13 Commits
0.2.1 ... 0.3.4

Author SHA1 Message Date
AbdBarho
e32a48f42a Update Versions (#43) 2022-09-09 19:05:19 +02:00
AbdBarho
76989b39a6 Update Versions (#42)
* Update hlky

* Update automatic
2022-09-08 17:56:15 +02:00
AbdBarho
4d9fc381bb Remove restart on failure (#41) 2022-09-07 22:38:02 +02:00
AbdBarho
bcee253fe0 Update README.md 2022-09-07 19:52:43 +02:00
AbdBarho
499143009a Update Versions (#39) 2022-09-07 19:45:03 +02:00
AbdBarho
c614625f04 Update AUTOMATIC1111 (#37) 2022-09-06 20:16:28 +02:00
abdullah
ccd6e238b2 executable 2022-09-06 12:17:19 +02:00
Abdullah Barhoum
829864af9b Add Font 2022-09-05 21:05:43 +02:00
AbdBarho
ccc7306f48 Add AUTOMATIC1111 and lstein WebUIs (#32)
* Lstein

* Add AUTOMATIC1111 and lstein UIs

* Update Workflow
2022-09-05 19:51:22 +02:00
AbdBarho
082876aab3 SHA as Build ARG (#30) 2022-09-04 09:12:07 +02:00
AbdBarho
ae834cb764 Update Core to bb765f1 (#29) 2022-09-04 08:46:14 +02:00
AbdBarho
5f6d9fbb03 CI / Build Image (#27) 2022-09-03 17:36:19 +02:00
AbdBarho
d4da252343 Update README(#26) 2022-09-03 14:39:18 +02:00
17 changed files with 291 additions and 38 deletions

24
.github/workflows/docker.yml vendored Normal file
View File

@@ -0,0 +1,24 @@
name: Build Image
on: [push]
# TODO: how to cache intermediate images?
jobs:
build_hlky:
runs-on: ubuntu-latest
name: hlky
steps:
- uses: actions/checkout@v3
- run: docker compose build --progress plain
build_AUTOMATIC1111:
runs-on: ubuntu-latest
name: AUTOMATIC1111
steps:
- uses: actions/checkout@v3
- run: cd AUTOMATIC1111 && docker compose build --progress plain
build_lstein:
runs-on: ubuntu-latest
name: lstein
steps:
- uses: actions/checkout@v3
- run: cd lstein && docker compose build --progress plain

22
.github/workflows/executable.yml1 vendored Normal file
View File

@@ -0,0 +1,22 @@
name: Check executable
on: [push]
jobs:
check:
runs-on: ubuntu-latest
name: Check all sh
steps:
- run: git config --global core.fileMode true
- uses: actions/checkout@v3
- shell: bash
run: |
shopt -s globstar;
FAIL=0
for file in **/*.sh; do
if [ -f "${file}" ] && [ -r "${file}" ] && [ ! -x "${file}" ]; then
echo "$file" is not executable;
FAIL=1
fi
done
exit ${FAIL}

53
AUTOMATIC1111/Dockerfile Normal file
View File

@@ -0,0 +1,53 @@
# syntax=docker/dockerfile:1
FROM alpine/git:2.36.2 as download
RUN <<EOF
# who knows
git config --global http.postBuffer 1048576000
git clone https://github.com/sczhou/CodeFormer.git repositories/CodeFormer
git clone https://github.com/CompVis/stable-diffusion.git repositories/stable-diffusion
git clone https://github.com/CompVis/taming-transformers.git repositories/taming-transformers
rm -rf repositories/taming-transformers/data repositories/taming-transformers/assets
EOF
FROM pytorch/pytorch:1.12.1-cuda11.3-cudnn8-runtime
SHELL ["/bin/bash", "-ceuxo", "pipefail"]
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get install git fonts-dejavu-core -y && apt-get clean
RUN <<EOF
git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
cd stable-diffusion-webui
git reset --hard db6db585eb9ee48e7315e28603e18531dbc87067
pip install -U --prefer-binary --no-cache-dir -r requirements.txt
EOF
ENV ROOT=/workspace/stable-diffusion-webui \
WORKDIR=/workspace/stable-diffusion-webui/repositories/stable-diffusion
COPY --from=download /git/ ${ROOT}
RUN pip install --prefer-binary -U --no-cache-dir -r ${ROOT}/repositories/CodeFormer/requirements.txt
# Note: don't update the sha of previous versions because the install will take forever
# instead, update the repo state in a later step
ARG SHA=17a7477c7282b0ff16fa1232e5922c0a645e4459
RUN <<EOF
cd stable-diffusion-webui
git pull
git reset --hard ${SHA}
pip install --prefer-binary --no-cache-dir -r requirements.txt
EOF
RUN pip install --prefer-binary -U --no-cache-dir opencv-python-headless markupsafe==2.0.1
ENV TRANSFORMERS_CACHE=/cache/transformers TORCH_HOME=/cache/torch CLI_ARGS=""
COPY . /docker
RUN chmod +x /docker/mount.sh && python3 /docker/info.py ${ROOT}/modules/ui.py
WORKDIR ${WORKDIR}
EXPOSE 7860
# run, -u to not buffer stdout / stderr
CMD /docker/mount.sh && python3 -u ../../webui.py --listen --port 7860 ${CLI_ARGS}

14
AUTOMATIC1111/README.md Normal file
View File

@@ -0,0 +1,14 @@
# WebUI for AUTOMATIC1111
The WebUI of [AUTOMATIC1111/stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui) as docker container!
## Setup
Clone this repo, download the `model.ckpt` and `GFPGANv1.3.pth` and put into the `models` folder as mentioned in [the main README](../README.md), then run
```
cd AUTOMATIC1111
docker compose up --build
```
You can change the cli parameters in `AUTOMATIC1111/docker-compose.yml`. The full list of cil parameters can be found [here](https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/master/modules/shared.py)

View File

@@ -0,0 +1 @@
{"outdir_samples": "/output", "outdir_txt2img_samples": "/output/txt2img-images", "outdir_img2img_samples": "/output/img2img-images", "outdir_extras_samples": "/output/extras-images", "outdir_txt2img_grids": "/output/txt2img-grids", "outdir_img2img_grids": "/output/img2img-grids", "outdir_save": "/output/saved", "__WARNING__": "DON'T CHANGE ANYTHING BEFORE THIS", "outdir_grids": "", "save_to_dirs": false, "save_to_dirs_prompt_len": 10, "samples_save": true, "samples_format": "png", "grid_save": true, "return_grid": true, "grid_format": "png", "grid_extended_filename": false, "grid_only_if_multiple": true, "n_rows": -1, "jpeg_quality": 80, "export_for_4chan": true, "enable_pnginfo": true, "font": "DejaVuSans.ttf", "enable_emphasis": true, "save_txt": false, "ESRGAN_tile": 192, "ESRGAN_tile_overlap": 8, "random_artist_categories": [], "upscale_at_full_resolution_padding": 16, "show_progressbar": true, "show_progress_every_n_steps": 0, "multiple_tqdm": true, "face_restoration_model": "CodeFormer", "code_former_weight": 0.5}

View File

@@ -0,0 +1,21 @@
version: '3.9'
services:
model:
build: .
ports:
- "7860:7860"
volumes:
- ../cache:/cache
- ../output:/output
- ../models:/models
- ./config.json:/docker/config.json
environment:
- CLI_ARGS=--medvram --opt-split-attention
deploy:
resources:
reservations:
devices:
- driver: nvidia
device_ids: ['0']
capabilities: [gpu]

14
AUTOMATIC1111/info.py Normal file
View File

@@ -0,0 +1,14 @@
import sys
from pathlib import Path
file = Path(sys.argv[1])
file.write_text(
file.read_text()\
.replace(' return demo', """
with demo:
gr.Markdown(
'Created by [AUTOMATIC1111 / stable-diffusion-webui-docker](https://github.com/AbdBarho/stable-diffusion-webui-docker/tree/master/AUTOMATIC1111)'
)
return demo
""", 1)
)

32
AUTOMATIC1111/mount.sh Executable file
View File

@@ -0,0 +1,32 @@
#!/bin/bash
declare -A MODELS
MODELS["${WORKDIR}/models/ldm/stable-diffusion-v1/model.ckpt"]=model.ckpt
MODELS["${ROOT}/GFPGANv1.3.pth"]=GFPGANv1.3.pth
for path in "${!MODELS[@]}"; do
name=${MODELS[$path]}
base=$(dirname "${path}")
from_path="/models/${name}"
if test -f "${from_path}"; then
mkdir -p "${base}" && ln -sf "${from_path}" "${path}" && echo "Mounted ${name}"
else
echo "Skipping ${name}"
fi
done
# force realesrgan cache
rm -rf /opt/conda/lib/python3.7/site-packages/realesrgan/weights
ln -s -T /models /opt/conda/lib/python3.7/site-packages/realesrgan/weights
# force facexlib cache
mkdir -p /cache/weights/ ${WORKDIR}/gfpgan/
ln -sf /cache/weights/ ${WORKDIR}/gfpgan/
# code former cache
rm -rf ${ROOT}/repositories/CodeFormer/weights/CodeFormer ${ROOT}/repositories/CodeFormer/weights/facelib
ln -sf -T /cache/weights ${ROOT}/repositories/CodeFormer/weights/CodeFormer
ln -sf -T /cache/weights ${ROOT}/repositories/CodeFormer/weights/facelib
# mount config
ln -sf /docker/config.json ${WORKDIR}/config.json

View File

@@ -1,8 +1,15 @@
# Stable Diffusion WebUI Docker
# Stable Diffusion WebUI Docker
Run Stable Diffusion on your machine with a nice UI without any hassle!
This repository provides the [WebUI](https://github.com/hlky/stable-diffusion-webui) as a docker image for easy setup and deployment. Please note that the WebUI is experimental and evolving quickly, so expect some bugs.
This repository provides the [WebUI](https://github.com/hlky/stable-diffusion-webui) as a docker image for easy setup and deployment.
Now with experimental support for 2 other forks:
- [AUTOMATIC1111](./AUTOMATIC1111/) (Stable, very few bugs!)
- [lstein](./lstein/)
NOTE: big update coming up!
## Features
@@ -10,7 +17,6 @@ This repository provides the [WebUI](https://github.com/hlky/stable-diffusion-we
- Support for 6GB GPU cards.
- GFPGAN for face reconstruction, RealESRGAN for super-sampling.
- Experimental:
- [Textual Inversion](https://github.com/hlky/sd-enable-textual-inversion)
- Latent Diffusion Super Resolution
- GoBig
- GoLatent
@@ -57,15 +63,15 @@ Will start the app on http://localhost:7860/
Note: the first start will take sometime as some other models will be downloaded, these will be cached in the `cache` folder, so next runs are faster.
## Config
in the `docker-compose.yml` you can change the `CLI_ARGS` variable, which contains the arguments that will be passed to the WebUI. By default: `--extra-models-cpu --optimized-turbo` are given, which allow you to use this model on a 6GB GPU. However, some features might not be available in the mode.
[You can find the full list of arguments here.](https://github.com/hlky/stable-diffusion/blob/d667ff52a36b4e79526f01555bfbf85428f334ce/scripts/webui.py)
### FAQ
You can find fixes to common issues [in the wiki page.](https://github.com/AbdBarho/stable-diffusion-webui-docker/wiki/Main)
You can find fixes to common issues [in the wiki page.](https://github.com/AbdBarho/stable-diffusion-webui-docker/wiki/FAQ)
## Config
in the `docker-compose.yml` you can change the `CLI_ARGS` variable, which contains the arguments that will be passed to the WebUI. By default: `--extra-models-cpu --optimized-turbo` are given, which allow you to use this model on a 6GB GPU. However, some features might not be available in the mode. [You can find the full list of arguments here.](https://github.com/hlky/stable-diffusion-webui/blob/2b1ac8daf7ea82c6c56eabab7e80ec1c33106a98/scripts/webui.py)
You can set the `WEBUI_SHA` to [any SHA from the main repo](https://github.com/hlky/stable-diffusion/commits/main), this will build the container against that commit. Use at your own risk.
# Disclaimer
@@ -79,4 +85,7 @@ Special thanks to everyone behind these awesome projects, without them, none of
- [hlky/stable-diffusion-webui](https://github.com/hlky/stable-diffusion-webui)
- [AUTOMATIC1111/stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui)
- [lstein/stable-diffusion](https://github.com/lstein/stable-diffusion)
- [CompVis/stable-diffusion](https://github.com/CompVis/stable-diffusion)
- [hlky/sd-enable-textual-inversion](https://github.com/hlky/sd-enable-textual-inversion)
- [devilismyfriend/latent-diffusion](https://github.com/devilismyfriend/latent-diffusion)

View File

@@ -2,8 +2,12 @@ version: '3.9'
services:
model:
build: ./build/
restart: on-failure
build:
context: ./hlky/
args:
# You can choose any commit sha from https://github.com/hlky/stable-diffusion/commits/main
# USE AT YOUR OWN RISK! otherwise just leave it empty.
WEBUI_SHA:
ports:
- "7860:7860"
volumes:

View File

@@ -2,34 +2,30 @@
FROM continuumio/miniconda3:4.12.0
RUN conda install python=3.8.5 && conda clean -a -y
RUN conda install pytorch==1.11.0 torchvision==0.12.0 cudatoolkit=11.3 -c pytorch && conda clean -a -y
RUN git clone https://github.com/hlky/stable-diffusion.git && cd stable-diffusion && git reset --hard ff8c2d0b709f1e4180fb19fa5c27ec28c414cedd
RUN conda env update --file stable-diffusion/environment.yaml --name base && conda clean -a -y
SHELL ["/bin/bash", "-ceuxo", "pipefail"]
# fonts for generating the grid
RUN conda install python=3.8.5 && conda clean -a -y
RUN conda install pytorch==1.11.0 torchvision==0.12.0 cudatoolkit=11.3 -c pytorch && conda clean -a -y
RUN apt-get update && apt install fonts-dejavu-core rsync -y && apt-get clean
RUN <<EOF
git clone https://github.com/sd-webui/stable-diffusion-webui.git stable-diffusion
cd stable-diffusion
git reset --hard 2b1ac8daf7ea82c6c56eabab7e80ec1c33106a98
conda env update --file environment.yaml -n base
conda clean -a -y
EOF
# new dependency, should be added to the environment.yaml
RUN pip install -U --no-cache-dir pyperclip
# Note: don't update the sha of previous versions because the install will take forever
# instead, update the repo state in a later step
RUN cd stable-diffusion && git pull && git reset --hard c84748aa6802c2f934687883a79bde745d2a58a6 && \
conda env update --file environment.yaml --name base && conda clean -a -y \
&& pip install -U --no-cache-dir pyperclip
# download dev UI version, update the sha below in case you want some other version
# RUN <<EOF
# git clone https://github.com/hlky/stable-diffusion-webui.git
# cd stable-diffusion-webui
# # map to this file: https://github.com/hlky/stable-diffusion-webui/blob/master/.github/sync.yml
# git reset --hard 49e6178fd82ca736f9bbc621c6b12487c300e493
# cp -t /stable-diffusion/scripts/ webui.py relauncher.py txt2img.yaml
# cp -t /stable-diffusion/configs/webui webui.yaml
# cp -t /stable-diffusion/frontend/ frontend/*
# cd / && rm -rf stable-diffusion-webui
# EOF
ARG WEBUI_SHA=b9d97c9816251933d094f1dae43d2c631a07db7a
RUN cd stable-diffusion && git pull && git reset --hard ${WEBUI_SHA} && \
conda env update --file environment.yaml --name base && conda clean -a -y
# Textual inversion
RUN <<EOF
@@ -53,7 +49,7 @@ EOF
# add info
COPY . /docker/
RUN python /docker/info.py /stable-diffusion/frontend/frontend.py
RUN python /docker/info.py /stable-diffusion/frontend/frontend.py && chmod +x /docker/mount.sh
WORKDIR /stable-diffusion
ENV TRANSFORMERS_CACHE=/cache/transformers TORCH_HOME=/cache/torch CLI_ARGS=""

View File

@@ -26,6 +26,5 @@ if test -f /models/LDSR.yaml; then
fi
# force facexlib cache
mkdir -p /cache/weights/
rm -rf /stable-diffusion/src/facexlib/facexlib/weights
ln -sf /cache/weights/ /stable-diffusion/src/facexlib/facexlib/
mkdir -p /cache/weights/ /stable-diffusion/gfpgan/
ln -sf /cache/weights/ /stable-diffusion/gfpgan/

29
lstein/Dockerfile Normal file
View File

@@ -0,0 +1,29 @@
# syntax=docker/dockerfile:1
FROM continuumio/miniconda3:4.12.0
SHELL ["/bin/bash", "-ceuxo", "pipefail"]
RUN conda install python=3.8.5 && conda clean -a -y
RUN conda install pytorch==1.11.0 torchvision==0.12.0 cudatoolkit=11.3 -c pytorch && conda clean -a -y
RUN apt-get update && apt install fonts-dejavu-core rsync -y && apt-get clean
RUN <<EOF
git clone https://github.com/lstein/stable-diffusion.git
cd stable-diffusion
git reset --hard 751283a2de81bee4bb571fbabe4adb19f1d85b97
conda env update --file environment.yaml -n base
conda clean -a -y
EOF
ENV TRANSFORMERS_CACHE=/cache/transformers TORCH_HOME=/cache/torch CLI_ARGS=""
WORKDIR /stable-diffusion
EXPOSE 7860
# run, -u to not buffer stdout / stderr
CMD mkdir -p /stable-diffusion/models/ldm/stable-diffusion-v1/ && \
ln -sf /models/model.ckpt /stable-diffusion/models/ldm/stable-diffusion-v1/model.ckpt && \
python3 -u scripts/dream.py --outdir /output --web --host 0.0.0.0 --port 7860 ${CLI_ARGS}

14
lstein/README.md Normal file
View File

@@ -0,0 +1,14 @@
# WebUI for lstein
The WebUI of [lstein/stable-diffusion](https://github.com/lstein/stable-diffusion) as docker container!
Although it is a simple UI, the project has a lot of potential.
## Setup
Clone this repo, download the `model.ckpt` and put into the `models` folder as mentioned in [the main README](../README.md), then run
```
cd lstein
docker compose up --build
```

20
lstein/docker-compose.yml Normal file
View File

@@ -0,0 +1,20 @@
version: '3.9'
services:
model:
build: .
ports:
- "7860:7860"
volumes:
- ../cache:/cache
- ../output:/output
- ../models:/models
environment:
- CLI_ARGS=
deploy:
resources:
reservations:
devices:
- driver: nvidia
device_ids: ['0']
capabilities: [gpu]

1
models/.gitignore vendored
View File

@@ -1,5 +1,6 @@
/model.ckpt
/GFPGANv1.3.pth
/RealESRGAN_x2plus.pth
/RealESRGAN_x4plus.pth
/RealESRGAN_x4plus_anime_6B.pth
/LDSR.ckpt