mirror of
https://github.com/AbdBarho/stable-diffusion-webui-docker.git
synced 2025-11-01 10:43:22 -04:00
[BREAKING] Rename UIs (#254)
Rename the UIs in docker compose to their new names Changes folder names Changes output folder structure Closes issue #263 Adds `sygil-sl` instead of docker compose flag.
This commit is contained in:
58
services/invoke/Dockerfile
Normal file
58
services/invoke/Dockerfile
Normal file
@@ -0,0 +1,58 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
FROM python:3.10-slim
|
||||
SHELL ["/bin/bash", "-ceuxo", "pipefail"]
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive PIP_EXISTS_ACTION=w PIP_PREFER_BINARY=1 PIP_NO_CACHE_DIR=1
|
||||
|
||||
|
||||
RUN pip install torch==1.12.0+cu116 --extra-index-url https://download.pytorch.org/whl/cu116
|
||||
|
||||
RUN apt-get update && apt-get install git -y && apt-get clean
|
||||
|
||||
RUN git clone https://github.com/invoke-ai/InvokeAI.git /stable-diffusion
|
||||
|
||||
WORKDIR /stable-diffusion
|
||||
|
||||
RUN <<EOF
|
||||
git reset --hard 5c31feb3a1096d437c94b6e1c3224eb7a7224a85
|
||||
git config --global http.postBuffer 1048576000
|
||||
pip install -r binary_installer/py3.10-linux-x86_64-cuda-reqs.txt
|
||||
EOF
|
||||
|
||||
|
||||
# patch match:
|
||||
# https://github.com/invoke-ai/InvokeAI/blob/main/docs/installation/INSTALL_PATCHMATCH.md
|
||||
RUN <<EOF
|
||||
apt-get update
|
||||
# apt-get install build-essential python3-opencv libopencv-dev -y
|
||||
apt-get install make g++ libopencv-dev -y
|
||||
apt-get clean
|
||||
cd /usr/lib/x86_64-linux-gnu/pkgconfig/
|
||||
ln -sf opencv4.pc opencv.pc
|
||||
EOF
|
||||
|
||||
ARG BRANCH=main SHA=38cd968130e386d188bdef68f9fbfbbbfabb2da0
|
||||
RUN <<EOF
|
||||
git fetch
|
||||
git reset --hard
|
||||
git checkout ${BRANCH}
|
||||
git reset --hard ${SHA}
|
||||
pip install -r binary_installer/py3.10-linux-x86_64-cuda-reqs.txt
|
||||
EOF
|
||||
|
||||
RUN pip install --force-reinstall opencv-python-headless && python3 -c "from patchmatch import patch_match"
|
||||
|
||||
|
||||
COPY . /docker/
|
||||
RUN <<EOF
|
||||
python3 /docker/info.py /stable-diffusion/frontend/dist/index.html
|
||||
touch ~/.invokeai
|
||||
EOF
|
||||
|
||||
|
||||
ENV ROOT=/stable-diffusion PYTHONPATH="${PYTHONPATH}:${ROOT}" PRELOAD=false CLI_ARGS=""
|
||||
EXPOSE 7860
|
||||
|
||||
ENTRYPOINT ["/docker/entrypoint.sh"]
|
||||
CMD python3 -u scripts/invoke.py --web --host 0.0.0.0 --port 7860 --config /docker/models.yaml --root_dir ${ROOT} --outdir /output/invoke ${CLI_ARGS}
|
||||
47
services/invoke/entrypoint.sh
Executable file
47
services/invoke/entrypoint.sh
Executable file
@@ -0,0 +1,47 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -Eeuo pipefail
|
||||
|
||||
declare -A MOUNTS
|
||||
|
||||
# cache
|
||||
MOUNTS["/root/.cache"]=/data/.cache/
|
||||
|
||||
# ui specific
|
||||
MOUNTS["${ROOT}/models/codeformer"]=/data/Codeformer/
|
||||
|
||||
MOUNTS["${ROOT}/models/gfpgan/GFPGANv1.4.pth"]=/data/GFPGAN/GFPGANv1.4.pth
|
||||
MOUNTS["${ROOT}/models/gfpgan/weights"]=/data/.cache/
|
||||
|
||||
MOUNTS["${ROOT}/models/realesrgan"]=/data/RealESRGAN/
|
||||
|
||||
MOUNTS["${ROOT}/models/bert-base-uncased"]=/data/.cache/huggingface/transformers/
|
||||
MOUNTS["${ROOT}/models/openai/clip-vit-large-patch14"]=/data/.cache/huggingface/transformers/
|
||||
MOUNTS["${ROOT}/models/CompVis/stable-diffusion-safety-checker"]=/data/.cache/huggingface/transformers/
|
||||
|
||||
|
||||
MOUNTS["${ROOT}/embeddings"]=/data/embeddings/
|
||||
|
||||
# hacks
|
||||
MOUNTS["${ROOT}/models/clipseg"]=/data/.cache/invoke/clipseg/
|
||||
|
||||
for to_path in "${!MOUNTS[@]}"; do
|
||||
set -Eeuo pipefail
|
||||
from_path="${MOUNTS[${to_path}]}"
|
||||
rm -rf "${to_path}"
|
||||
mkdir -p "$(dirname "${to_path}")"
|
||||
# ends with slash, make it!
|
||||
if [[ "$from_path" == */ ]]; then
|
||||
mkdir -vp "$from_path"
|
||||
fi
|
||||
|
||||
ln -sT "${from_path}" "${to_path}"
|
||||
echo Mounted $(basename "${from_path}")
|
||||
done
|
||||
|
||||
if "${PRELOAD}" == "true"; then
|
||||
set -Eeuo pipefail
|
||||
python3 -u scripts/preload_models.py --no-interactive --root ${ROOT} --config_file /docker/models.yaml
|
||||
fi
|
||||
|
||||
exec "$@"
|
||||
13
services/invoke/info.py
Normal file
13
services/invoke/info.py
Normal file
@@ -0,0 +1,13 @@
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
file = Path(sys.argv[1])
|
||||
file.write_text(
|
||||
file.read_text()\
|
||||
.replace(' <div id="root"></div>', """
|
||||
<div id="root"></div>
|
||||
<div>
|
||||
Deployed with <a href="https://github.com/AbdBarho/stable-diffusion-webui-docker/">stable-diffusion-webui-docker</a>
|
||||
</div>
|
||||
""", 1)
|
||||
)
|
||||
23
services/invoke/models.yaml
Normal file
23
services/invoke/models.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
# This file describes the alternative machine learning models
|
||||
# available to InvokeAI script.
|
||||
#
|
||||
# To add a new model, follow the examples below. Each
|
||||
# model requires a model config file, a weights file,
|
||||
# and the width and height of the images it
|
||||
# was trained on.
|
||||
stable-diffusion-1.5:
|
||||
description: Stable Diffusion version 1.5
|
||||
weights: /data/StableDiffusion/v1-5-pruned-emaonly.ckpt
|
||||
vae: /data/VAE/vae-ft-mse-840000-ema-pruned.ckpt
|
||||
config: ./configs/stable-diffusion/v1-inference.yaml
|
||||
width: 512
|
||||
height: 512
|
||||
default: true
|
||||
inpainting-1.5:
|
||||
description: RunwayML SD 1.5 model optimized for inpainting
|
||||
weights: /data/StableDiffusion/sd-v1-5-inpainting.ckpt
|
||||
vae: /data/VAE/vae-ft-mse-840000-ema-pruned.ckpt
|
||||
config: ./configs/stable-diffusion/v1-inpainting-inference.yaml
|
||||
width: 512
|
||||
height: 512
|
||||
default: false
|
||||
Reference in New Issue
Block a user