13 Commits
0.2.0 ... 0.3.2

Author SHA1 Message Date
AbdBarho
499143009a Update Versions (#39) 2022-09-07 19:45:03 +02:00
AbdBarho
c614625f04 Update AUTOMATIC1111 (#37) 2022-09-06 20:16:28 +02:00
abdullah
ccd6e238b2 executable 2022-09-06 12:17:19 +02:00
Abdullah Barhoum
829864af9b Add Font 2022-09-05 21:05:43 +02:00
AbdBarho
ccc7306f48 Add AUTOMATIC1111 and lstein WebUIs (#32)
* Lstein

* Add AUTOMATIC1111 and lstein UIs

* Update Workflow
2022-09-05 19:51:22 +02:00
AbdBarho
082876aab3 SHA as Build ARG (#30) 2022-09-04 09:12:07 +02:00
AbdBarho
ae834cb764 Update Core to bb765f1 (#29) 2022-09-04 08:46:14 +02:00
AbdBarho
5f6d9fbb03 CI / Build Image (#27) 2022-09-03 17:36:19 +02:00
AbdBarho
d4da252343 Update README(#26) 2022-09-03 14:39:18 +02:00
AbdBarho
5af482ed8c Update Core to c84748a (#25) 2022-09-03 13:15:25 +02:00
AbdBarho
ce4e190f8f Force LF (#24) 2022-09-03 06:59:16 +02:00
AbdBarho
bae3590980 Make mount.sh executable in the git index (#22) 2022-09-02 16:31:55 +02:00
AbdBarho
1588d1eecf Force LF endings (#19) 2022-09-02 12:16:21 +02:00
19 changed files with 379 additions and 116 deletions

9
.editorconfig Normal file
View File

@@ -0,0 +1,9 @@
root = true
[*]
end_of_line = lf
indent_style = space
indent_size = 2
charset = utf-8
insert_final_newline = true
trim_trailing_whitespace = true

1
.gitattributes vendored Normal file
View File

@@ -0,0 +1 @@
* text=auto eol=lf

24
.github/workflows/docker.yml vendored Normal file
View File

@@ -0,0 +1,24 @@
name: Build Image
on: [push]
# TODO: how to cache intermediate images?
jobs:
build_hlky:
runs-on: ubuntu-latest
name: hlky
steps:
- uses: actions/checkout@v3
- run: docker compose build --progress plain
build_AUTOMATIC1111:
runs-on: ubuntu-latest
name: AUTOMATIC1111
steps:
- uses: actions/checkout@v3
- run: cd AUTOMATIC1111 && docker compose build --progress plain
build_lstein:
runs-on: ubuntu-latest
name: lstein
steps:
- uses: actions/checkout@v3
- run: cd lstein && docker compose build --progress plain

22
.github/workflows/executable.yml1 vendored Normal file
View File

@@ -0,0 +1,22 @@
name: Check executable
on: [push]
jobs:
check:
runs-on: ubuntu-latest
name: Check all sh
steps:
- run: git config --global core.fileMode true
- uses: actions/checkout@v3
- shell: bash
run: |
shopt -s globstar;
FAIL=0
for file in **/*.sh; do
if [ -f "${file}" ] && [ -r "${file}" ] && [ ! -x "${file}" ]; then
echo "$file" is not executable;
FAIL=1
fi
done
exit ${FAIL}

55
AUTOMATIC1111/Dockerfile Normal file
View File

@@ -0,0 +1,55 @@
# syntax=docker/dockerfile:1
FROM alpine/git:2.36.2 as download
RUN <<EOF
# who knows
git config --global http.postBuffer 1048576000
git clone https://github.com/sczhou/CodeFormer.git repositories/CodeFormer
git clone https://github.com/CompVis/stable-diffusion.git repositories/stable-diffusion
git clone https://github.com/CompVis/taming-transformers.git repositories/taming-transformers
rm -rf repositories/taming-transformers/data repositories/taming-transformers/assets
EOF
FROM pytorch/pytorch:1.12.1-cuda11.3-cudnn8-runtime
SHELL ["/bin/bash", "-ceuxo", "pipefail"]
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get install git fonts-dejavu-core -y && apt-get clean
RUN <<EOF
git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
cd stable-diffusion-webui
git reset --hard db6db585eb9ee48e7315e28603e18531dbc87067
pip install -U --prefer-binary --no-cache-dir -r requirements.txt
EOF
# Note: don't update the sha of previous versions because the install will take forever
# instead, update the repo state in a later step
ARG SHA=e92d4cf7476f1897fce376916dfb40755ea7920f
RUN <<EOF
cd stable-diffusion-webui
git pull
git reset --hard ${SHA}
pip install -U --prefer-binary --no-cache-dir -r requirements.txt
EOF
ENV ROOT=/workspace/stable-diffusion-webui \
WORKDIR=/workspace/stable-diffusion-webui/repositories/stable-diffusion
COPY --from=download /git/ ${ROOT}
RUN <<EOF
pip install --prefer-binary -U --no-cache-dir -r ${ROOT}/repositories/CodeFormer/requirements.txt
pip install --prefer-binary -U --no-cache-dir opencv-python-headless markupsafe==2.0.1
EOF
ENV TRANSFORMERS_CACHE=/cache/transformers TORCH_HOME=/cache/torch CLI_ARGS=""
COPY . /docker
RUN chmod +x /docker/mount.sh && python3 /docker/info.py ${ROOT}/modules/ui.py
WORKDIR ${WORKDIR}
EXPOSE 7860
# run, -u to not buffer stdout / stderr
CMD /docker/mount.sh && python3 -u ../../webui.py --listen ${CLI_ARGS}

14
AUTOMATIC1111/README.md Normal file
View File

@@ -0,0 +1,14 @@
# WebUI for AUTOMATIC1111
The WebUI of [AUTOMATIC1111/stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui) as docker container!
## Setup
Clone this repo, download the `model.ckpt` and `GFPGANv1.3.pth` and put into the `models` folder as mentioned in [the main README](../README.md), then run
```
cd AUTOMATIC1111
docker compose up --build
```
You can change the cli parameters in `AUTOMATIC1111/docker-compose.yml`. The full list of cil parameters can be found [here](https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/master/modules/shared.py)

View File

@@ -0,0 +1,4 @@
{
"outdir_samples": "/output",
"font": "DejaVuSans.ttf"
}

View File

@@ -0,0 +1,20 @@
version: '3.9'
services:
model:
build: .
ports:
- "7860:7860"
volumes:
- ../cache:/cache
- ../output:/output
- ../models:/models
environment:
- CLI_ARGS=--medvram --opt-split-attention
deploy:
resources:
reservations:
devices:
- driver: nvidia
device_ids: ['0']
capabilities: [gpu]

14
AUTOMATIC1111/info.py Normal file
View File

@@ -0,0 +1,14 @@
import sys
from pathlib import Path
file = Path(sys.argv[1])
file.write_text(
file.read_text()\
.replace(' return demo', """
with demo:
gr.Markdown(
'Created by [AUTOMATIC1111 / stable-diffusion-webui-docker](https://github.com/AbdBarho/stable-diffusion-webui-docker/tree/master/AUTOMATIC1111)'
)
return demo
""", 1)
)

32
AUTOMATIC1111/mount.sh Executable file
View File

@@ -0,0 +1,32 @@
#!/bin/bash
declare -A MODELS
MODELS["${WORKDIR}/models/ldm/stable-diffusion-v1/model.ckpt"]=model.ckpt
MODELS["${ROOT}/GFPGANv1.3.pth"]=GFPGANv1.3.pth
for path in "${!MODELS[@]}"; do
name=${MODELS[$path]}
base=$(dirname "${path}")
from_path="/models/${name}"
if test -f "${from_path}"; then
mkdir -p "${base}" && ln -sf "${from_path}" "${path}" && echo "Mounted ${name}"
else
echo "Skipping ${name}"
fi
done
# force realesrgan cache
rm -rf /opt/conda/lib/python3.7/site-packages/realesrgan/weights
ln -s -T /models /opt/conda/lib/python3.7/site-packages/realesrgan/weights
# force facexlib cache
mkdir -p /cache/weights/ ${WORKDIR}/gfpgan/
ln -sf /cache/weights/ ${WORKDIR}/gfpgan/
# code former cache
rm -rf ${ROOT}/repositories/CodeFormer/weights/CodeFormer ${ROOT}/repositories/CodeFormer/weights/facelib
ln -sf -T /cache/weights ${ROOT}/repositories/CodeFormer/weights/CodeFormer
ln -sf -T /cache/weights ${ROOT}/repositories/CodeFormer/weights/facelib
# mount config
ln -sf /docker/config.json ${WORKDIR}/config.json

171
README.md
View File

@@ -1,82 +1,89 @@
# Stable Diffusion WebUI Docker
Run Stable Diffusion on your machine with a nice UI without any hassle!
This repository provides the [WebUI](https://github.com/hlky/stable-diffusion-webui) as a docker image for easy setup and deployment. Please note that the WebUI is experimental and evolving quickly, so expect some bugs.
## Features
- Interactive UI with many features, and more on the way!
- Support for 6GB GPU cards.
- GFPGAN for face reconstruction, RealESRGAN for super-sampling.
- Experimental:
- [Textual Inversion](https://github.com/hlky/sd-enable-textual-inversion)
- Latent Diffusion Super Resolution
- GoBig
- GoLatent
- many more!
## Setup
Make sure you have an **up to date** version of docker installed. Download this repo and run:
```
docker compose build
```
you can let it build in the background while you download the different models
- [Stable Diffusion v1.4 (4GB)](https://www.googleapis.com/storage/v1/b/aai-blog-files/o/sd-v1-4.ckpt?alt=media), rename to `model.ckpt`
- (Optional) [GFPGANv1.3.pth (333MB)](https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth).
- (Optional) [RealESRGAN_x4plus.pth (64MB)](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth) and [RealESRGAN_x4plus_anime_6B.pth (18MB)](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth).
- (Optional) [LDSR (2GB)](https://heibox.uni-heidelberg.de/f/578df07c8fc04ffbadf3/?dl=1) and [its configuration](https://heibox.uni-heidelberg.de/f/31a76b13ea27482981b4/?dl=1), rename to `LDSR.ckpt` and `LDSR.yaml` respectively.
<!-- - (Optional) [RealESRGAN_x2plus.pth (64MB)](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth)
- TODO: (I still need to find the RealESRGAN_x2plus_6b.pth) -->
Put all of the downloaded files in the `models` folder, it should look something like this:
```
models/
├── model.ckpt
├── GFPGANv1.3.pth
├── RealESRGAN_x4plus.pth
├── RealESRGAN_x4plus_anime_6B.pth
├── LDSR.ckpt
── LDSR.yaml
```
## Run
After the build is done, you can run the app with:
```
docker compose up --build
```
Will start the app on http://localhost:7860/
Note: the first start will take sometime as some other models will be downloaded, these will be cached in the `cache` folder, so next runs are faster.
## Config
in the `docker-compose.yml` you can change the `CLI_ARGS` variable, which contains the arguments that will be passed to the WebUI. By default: `--extra-models-cpu --optimized-turbo` are given, which allow you to use this model on a 6GB GPU. However, some features might not be available in the mode.
[You can find the full list of arguments here.](https://github.com/hlky/stable-diffusion/blob/d667ff52a36b4e79526f01555bfbf85428f334ce/scripts/webui.py)
### FAQ
You can find fixes to common issues [in the wiki page.](https://github.com/AbdBarho/stable-diffusion-webui-docker/wiki/Main)
# Disclaimer
The authors of this project are not responsible for any content generated using this interface.
This license of this software forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, spread misinformation and target vulnerable groups. For the full list of restrictions please read [the license](./LICENSE).
# Thanks
Special thanks to everyone behind these awesome projects, without them, none of this would have been possible:
- [hlky/stable-diffusion-webui](https://github.com/hlky/stable-diffusion-webui)
- [AUTOMATIC1111/stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui)
- [CompVis/stable-diffusion](https://github.com/CompVis/stable-diffusion)
# Stable Diffusion WebUI Docker
Run Stable Diffusion on your machine with a nice UI without any hassle!
This repository provides the [WebUI](https://github.com/hlky/stable-diffusion-webui) as a docker image for easy setup and deployment.
Now with experimental support for 2 other forks:
- [AUTOMATIC1111](./AUTOMATIC1111/) (Stable, very few bugs!)
- [lstein](./lstein/)
## Features
- Interactive UI with many features, and more on the way!
- Support for 6GB GPU cards.
- GFPGAN for face reconstruction, RealESRGAN for super-sampling.
- Experimental:
- Latent Diffusion Super Resolution
- GoBig
- GoLatent
- many more!
## Setup
Make sure you have an **up to date** version of docker installed. Download this repo and run:
```
docker compose build
```
you can let it build in the background while you download the different models
- [Stable Diffusion v1.4 (4GB)](https://www.googleapis.com/storage/v1/b/aai-blog-files/o/sd-v1-4.ckpt?alt=media), rename to `model.ckpt`
- (Optional) [GFPGANv1.3.pth (333MB)](https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth).
- (Optional) [RealESRGAN_x4plus.pth (64MB)](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth) and [RealESRGAN_x4plus_anime_6B.pth (18MB)](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth).
- (Optional) [LDSR (2GB)](https://heibox.uni-heidelberg.de/f/578df07c8fc04ffbadf3/?dl=1) and [its configuration](https://heibox.uni-heidelberg.de/f/31a76b13ea27482981b4/?dl=1), rename to `LDSR.ckpt` and `LDSR.yaml` respectively.
<!-- - (Optional) [RealESRGAN_x2plus.pth (64MB)](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth)
- TODO: (I still need to find the RealESRGAN_x2plus_6b.pth) -->
Put all of the downloaded files in the `models` folder, it should look something like this:
```
models/
├── model.ckpt
── GFPGANv1.3.pth
├── RealESRGAN_x4plus.pth
├── RealESRGAN_x4plus_anime_6B.pth
├── LDSR.ckpt
└── LDSR.yaml
```
## Run
After the build is done, you can run the app with:
```
docker compose up --build
```
Will start the app on http://localhost:7860/
Note: the first start will take sometime as some other models will be downloaded, these will be cached in the `cache` folder, so next runs are faster.
### FAQ
You can find fixes to common issues [in the wiki page.](https://github.com/AbdBarho/stable-diffusion-webui-docker/wiki/FAQ)
## Config
in the `docker-compose.yml` you can change the `CLI_ARGS` variable, which contains the arguments that will be passed to the WebUI. By default: `--extra-models-cpu --optimized-turbo` are given, which allow you to use this model on a 6GB GPU. However, some features might not be available in the mode. [You can find the full list of arguments here.](https://github.com/hlky/stable-diffusion-webui/blob/2b1ac8daf7ea82c6c56eabab7e80ec1c33106a98/scripts/webui.py)
You can set the `WEBUI_SHA` to [any SHA from the main repo](https://github.com/hlky/stable-diffusion/commits/main), this will build the container against that commit. Use at your own risk.
# Disclaimer
The authors of this project are not responsible for any content generated using this interface.
This license of this software forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, spread misinformation and target vulnerable groups. For the full list of restrictions please read [the license](./LICENSE).
# Thanks
Special thanks to everyone behind these awesome projects, without them, none of this would have been possible:
- [hlky/stable-diffusion-webui](https://github.com/hlky/stable-diffusion-webui)
- [AUTOMATIC1111/stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui)
- [lstein/stable-diffusion](https://github.com/lstein/stable-diffusion)
- [CompVis/stable-diffusion](https://github.com/CompVis/stable-diffusion)
- [hlky/sd-enable-textual-inversion](https://github.com/hlky/sd-enable-textual-inversion)
- [devilismyfriend/latent-diffusion](https://github.com/devilismyfriend/latent-diffusion)

View File

@@ -2,7 +2,12 @@ version: '3.9'
services:
model:
build: ./build/
build:
context: ./hlky/
args:
# You can choose any commit sha from https://github.com/hlky/stable-diffusion/commits/main
# USE AT YOUR OWN RISK! otherwise just leave it empty.
WEBUI_SHA:
restart: on-failure
ports:
- "7860:7860"

View File

@@ -2,37 +2,31 @@
FROM continuumio/miniconda3:4.12.0
RUN conda install python=3.8.5 && conda clean -a -y
RUN conda install pytorch==1.11.0 torchvision==0.12.0 cudatoolkit=11.3 -c pytorch && conda clean -a -y
RUN git clone https://github.com/hlky/stable-diffusion.git && cd stable-diffusion && git reset --hard ff8c2d0b709f1e4180fb19fa5c27ec28c414cedd
RUN conda env update --file stable-diffusion/environment.yaml --name base && conda clean -a -y
# Fix: Module PIL has not attribute "Resampling"
RUN conda install -c anaconda pillow==9.2.0 && conda clean -a -y
SHELL ["/bin/bash", "-ceuxo", "pipefail"]
# fonts for generating the grid
RUN conda install python=3.8.5 && conda clean -a -y
RUN conda install pytorch==1.11.0 torchvision==0.12.0 cudatoolkit=11.3 -c pytorch && conda clean -a -y
RUN apt-get update && apt install fonts-dejavu-core rsync -y && apt-get clean
RUN <<EOF
git clone https://github.com/sd-webui/stable-diffusion-webui.git stable-diffusion
cd stable-diffusion
git reset --hard 2b1ac8daf7ea82c6c56eabab7e80ec1c33106a98
conda env update --file environment.yaml -n base
conda clean -a -y
EOF
# new dependency, should be added to the environment.yaml
RUN pip install -U --no-cache-dir pyperclip
# Note: don't update the sha of previous versions because the install will take forever
# instead, update the repo state in a later step
RUN cd stable-diffusion && git pull && git reset --hard d667ff52a36b4e79526f01555bfbf85428f334ce && \
ARG WEBUI_SHA=2b1ac8daf7ea82c6c56eabab7e80ec1c33106a98
RUN cd stable-diffusion && git pull && git reset --hard ${WEBUI_SHA} && \
conda env update --file environment.yaml --name base && conda clean -a -y
# download dev UI version, update the sha below in case you want some other version
# RUN <<EOF
# git clone https://github.com/hlky/stable-diffusion-webui.git
# cd stable-diffusion-webui
# # map to this file: https://github.com/hlky/stable-diffusion-webui/blob/master/.github/sync.yml
# git reset --hard 49e6178fd82ca736f9bbc621c6b12487c300e493
# cp -t /stable-diffusion/scripts/ webui.py relauncher.py txt2img.yaml
# cp -t /stable-diffusion/configs/webui webui.yaml
# cp -t /stable-diffusion/frontend/ frontend/*
# cd / && rm -rf stable-diffusion-webui
# EOF
# Textual inversion
RUN <<EOF
git clone https://github.com/hlky/sd-enable-textual-inversion.git &&
@@ -45,7 +39,7 @@ EOF
RUN <<EOF
git clone https://github.com/devilismyfriend/latent-diffusion &&
cd /latent-diffusion &&
git reset --hard 4119cf038fb953360fb004e48adb9913eed3594a &&
git reset --hard 6d61fc03f15273a457950f2cdc10dddf53ba6809 &&
# hacks all the way down
mv ldm ldm_latent &&
sed -i -- 's/from ldm/from ldm_latent/g' *.py
@@ -55,10 +49,10 @@ EOF
# add info
COPY . /docker/
RUN python /docker/info.py /stable-diffusion/frontend/frontend.py
RUN python /docker/info.py /stable-diffusion/frontend/frontend.py && chmod +x /docker/mount.sh
WORKDIR /stable-diffusion
ENV TRANSFORMERS_CACHE=/cache/transformers TORCH_HOME=/cache/torch CLI_ARGS=""
EXPOSE 7860
# run, -u to not buffer stdout / stderr
CMD /docker/mount.sh && python3 -u scripts/webui.py --outdir /output --ckpt /models/model.ckpt --ldsr-dir /latent-diffusion --save-metadata ${CLI_ARGS}
CMD /docker/mount.sh && python3 -u scripts/webui.py --outdir /output --ckpt /models/model.ckpt --ldsr-dir /latent-diffusion ${CLI_ARGS}

View File

@@ -9,7 +9,5 @@ file.write_text(
Created using <a href="https://github.com/AbdBarho/stable-diffusion-webui-docker">stable-diffusion-webui-docker</a>.
</p>
<p>For help and advanced usage guides,
""", 1)\
.replace('img2img_cfg = gr.Slider(minimum=1.0, maximum=30.0', 'img2img_cfg = gr.Slider(minimum=1.0, maximum=60.0')
)
""", 1)
)

5
build/mount.sh → hlky/mount.sh Normal file → Executable file
View File

@@ -26,6 +26,5 @@ if test -f /models/LDSR.yaml; then
fi
# force facexlib cache
mkdir -p /cache/weights/
rm -rf /stable-diffusion/src/facexlib/facexlib/weights
ln -sf /cache/weights/ /stable-diffusion/src/facexlib/facexlib/
mkdir -p /cache/weights/ /stable-diffusion/gfpgan/
ln -sf /cache/weights/ /stable-diffusion/gfpgan/

29
lstein/Dockerfile Normal file
View File

@@ -0,0 +1,29 @@
# syntax=docker/dockerfile:1
FROM continuumio/miniconda3:4.12.0
SHELL ["/bin/bash", "-ceuxo", "pipefail"]
RUN conda install python=3.8.5 && conda clean -a -y
RUN conda install pytorch==1.11.0 torchvision==0.12.0 cudatoolkit=11.3 -c pytorch && conda clean -a -y
RUN apt-get update && apt install fonts-dejavu-core rsync -y && apt-get clean
RUN <<EOF
git clone https://github.com/lstein/stable-diffusion.git
cd stable-diffusion
git reset --hard 751283a2de81bee4bb571fbabe4adb19f1d85b97
conda env update --file environment.yaml -n base
conda clean -a -y
EOF
ENV TRANSFORMERS_CACHE=/cache/transformers TORCH_HOME=/cache/torch CLI_ARGS=""
WORKDIR /stable-diffusion
EXPOSE 7860
# run, -u to not buffer stdout / stderr
CMD mkdir -p /stable-diffusion/models/ldm/stable-diffusion-v1/ && \
ln -sf /models/model.ckpt /stable-diffusion/models/ldm/stable-diffusion-v1/model.ckpt && \
python3 -u scripts/dream.py --outdir /output --web --host 0.0.0.0 --port 7860 ${CLI_ARGS}

14
lstein/README.md Normal file
View File

@@ -0,0 +1,14 @@
# WebUI for lstein
The WebUI of [lstein/stable-diffusion](https://github.com/lstein/stable-diffusion) as docker container!
Although it is a simple UI, the project has a lot of potential.
## Setup
Clone this repo, download the `model.ckpt` and put into the `models` folder as mentioned in [the main README](../README.md), then run
```
cd lstein
docker compose up --build
```

21
lstein/docker-compose.yml Normal file
View File

@@ -0,0 +1,21 @@
version: '3.9'
services:
model:
build: .
restart: on-failure
ports:
- "7860:7860"
volumes:
- ../cache:/cache
- ../output:/output
- ../models:/models
environment:
- CLI_ARGS=
deploy:
resources:
reservations:
devices:
- driver: nvidia
device_ids: ['0']
capabilities: [gpu]

1
models/.gitignore vendored
View File

@@ -1,5 +1,6 @@
/model.ckpt
/GFPGANv1.3.pth
/RealESRGAN_x2plus.pth
/RealESRGAN_x4plus.pth
/RealESRGAN_x4plus_anime_6B.pth
/LDSR.ckpt