mirror of
https://github.com/AbdBarho/stable-diffusion-webui-docker.git
synced 2025-10-27 08:14:26 -04:00
Compare commits
7 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a1c16942ff | ||
|
|
6ae3473214 | ||
|
|
5d731cb43c | ||
|
|
c1fa2f1457 | ||
|
|
d8cfdd3af5 | ||
|
|
03d12cbcd9 | ||
|
|
2e76b6c4e7 |
11
.github/ISSUE_TEMPLATE/bug.md
vendored
11
.github/ISSUE_TEMPLATE/bug.md
vendored
@@ -7,13 +7,17 @@ assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Has this issue been opened before? Check the [FAQ](https://github.com/AbdBarho/stable-diffusion-webui-docker/wiki/Main), the [issues](https://github.com/AbdBarho/stable-diffusion-webui-docker/issues?q=is%3Aissue) and in [the issues in the WebUI repo](https://github.com/hlky/stable-diffusion-webui)**
|
||||
**Has this issue been opened before? Check the [FAQ](https://github.com/AbdBarho/stable-diffusion-webui-docker/wiki/Main), the [issues](https://github.com/AbdBarho/stable-diffusion-webui-docker/issues?q=is%3Aissue)**
|
||||
|
||||
|
||||
|
||||
**Describe the bug**
|
||||
|
||||
|
||||
**Which UI**
|
||||
|
||||
hlky or auto or auto-cpu or lstein?
|
||||
|
||||
**Steps to Reproduce**
|
||||
1. Go to '...'
|
||||
2. Click on '....'
|
||||
@@ -22,8 +26,11 @@ assignees: ''
|
||||
|
||||
**Hardware / Software:**
|
||||
- OS: [e.g. Windows / Ubuntu and version]
|
||||
- RAM:
|
||||
- GPU: [Nvidia 1660 / No GPU]
|
||||
- Version [e.g. 22]
|
||||
- VRAM:
|
||||
- Docker Version, Docker compose version
|
||||
- Release version [e.g. 1.0.1]
|
||||
|
||||
**Additional context**
|
||||
Any other context about the problem here. If applicable, add screenshots to help explain your problem.
|
||||
|
||||
16
.github/workflows/docker.yml
vendored
16
.github/workflows/docker.yml
vendored
@@ -3,13 +3,17 @@ name: Build Images
|
||||
on: [push]
|
||||
|
||||
jobs:
|
||||
build_all:
|
||||
build:
|
||||
strategy:
|
||||
matrix:
|
||||
profile:
|
||||
- auto
|
||||
- hlky
|
||||
- lstein
|
||||
- download
|
||||
runs-on: ubuntu-latest
|
||||
name: All
|
||||
name: ${{ matrix.profile }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
# better caching?
|
||||
- run: docker compose --profile auto build --progress plain
|
||||
- run: docker compose --profile hlky build --progress plain
|
||||
- run: docker compose --profile lstein build --progress plain
|
||||
- run: docker compose --profile download build --progress plain
|
||||
- run: docker compose --profile ${{ matrix.profile }} build --progress plain
|
||||
|
||||
@@ -47,6 +47,9 @@ Screenshots:
|
||||
|
||||
Visit the wiki for [Setup](https://github.com/AbdBarho/stable-diffusion-webui-docker/wiki/Setup) and [Usage](https://github.com/AbdBarho/stable-diffusion-webui-docker/wiki/Usage) instructions, checkout the [FAQ](https://github.com/AbdBarho/stable-diffusion-webui-docker/wiki/FAQ) page if you face any problems, or create a new issue!
|
||||
|
||||
## Contributing
|
||||
Contributions are welcome! create an issue first of what you want to contribute (before you implement anything) so we can talk about it.
|
||||
|
||||
## Disclaimer
|
||||
|
||||
The authors of this project are not responsible for any content generated using this interface.
|
||||
|
||||
@@ -41,7 +41,8 @@ RUN pip install --prefer-binary --no-cache-dir -r ${ROOT}/repositories/CodeForme
|
||||
|
||||
# Note: don't update the sha of previous versions because the install will take forever
|
||||
# instead, update the repo state in a later step
|
||||
ARG SHA=744ac1f89a075be4535146279feef800214c35a8
|
||||
|
||||
ARG SHA=2ddaeb318a9626502ef4bf949a312253d8021ff0
|
||||
RUN <<EOF
|
||||
cd stable-diffusion-webui
|
||||
git pull --rebase
|
||||
@@ -49,7 +50,7 @@ git reset --hard ${SHA}
|
||||
pip install --prefer-binary --no-cache-dir -r requirements.txt
|
||||
EOF
|
||||
|
||||
RUN pip install --prefer-binary -U --no-cache-dir opencv-python-headless markupsafe==2.0.1 gfpgan==1.3.5
|
||||
RUN pip install --prefer-binary -U --no-cache-dir opencv-python-headless
|
||||
|
||||
ENV TRANSFORMERS_CACHE=/cache/transformers TORCH_HOME=/cache/torch CLI_ARGS=""
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ file.write_text(
|
||||
.replace(' return demo', """
|
||||
with demo:
|
||||
gr.Markdown(
|
||||
'Created by [AUTOMATIC1111 / stable-diffusion-webui-docker](https://github.com/AbdBarho/stable-diffusion-webui-docker/tree/master/AUTOMATIC1111)'
|
||||
'Created by [AUTOMATIC1111 / stable-diffusion-webui-docker](https://github.com/AbdBarho/stable-diffusion-webui-docker/)'
|
||||
)
|
||||
return demo
|
||||
""", 1)
|
||||
|
||||
@@ -9,7 +9,7 @@ ENV DEBIAN_FRONTEND=noninteractive
|
||||
RUN conda install python=3.8.5 && conda clean -a -y
|
||||
RUN conda install pytorch==1.11.0 torchvision==0.12.0 cudatoolkit=11.3 -c pytorch && conda clean -a -y
|
||||
|
||||
RUN apt-get update && apt install fonts-dejavu-core rsync -y && apt-get clean
|
||||
RUN apt-get update && apt install fonts-dejavu-core rsync gcc -y && apt-get clean
|
||||
|
||||
|
||||
RUN <<EOF
|
||||
@@ -26,7 +26,9 @@ RUN pip install -U --no-cache-dir pyperclip
|
||||
# Note: don't update the sha of previous versions because the install will take forever
|
||||
# instead, update the repo state in a later step
|
||||
ARG BRANCH=master
|
||||
ARG SHA=7623a5734740025d79b710f3744bff9276e1467b
|
||||
ARG SHA=833a91047df999302f699637768741cecee9c37b
|
||||
# ARG BRANCH=dev
|
||||
# ARG SHA=b4de6caf697d311c1238c15a4c863fa529a35522
|
||||
RUN <<EOF
|
||||
cd stable-diffusion
|
||||
git fetch
|
||||
@@ -56,4 +58,6 @@ WORKDIR /stable-diffusion
|
||||
ENV TRANSFORMERS_CACHE=/cache/transformers TORCH_HOME=/cache/torch CLI_ARGS=""
|
||||
EXPOSE 7860
|
||||
# run, -u to not buffer stdout / stderr
|
||||
CMD /docker/mount.sh && python3 -u scripts/webui.py --outdir /output --ckpt /cache/models/model.ckpt --ldsr-dir /latent-diffusion ${CLI_ARGS}
|
||||
CMD /docker/mount.sh && \
|
||||
python3 -u scripts/webui.py --outdir /output --ckpt /cache/models/model.ckpt --ldsr-dir /latent-diffusion --inbrowser ${CLI_ARGS}
|
||||
# STREAMLIT_SERVER_PORT=7860 python -m streamlit run scripts/webui_streamlit.py
|
||||
|
||||
@@ -33,3 +33,6 @@ fi
|
||||
# force facexlib cache
|
||||
mkdir -p /cache/weights/ /stable-diffusion/gfpgan/
|
||||
ln -sf /cache/weights/ /stable-diffusion/gfpgan/
|
||||
|
||||
# streamlit config
|
||||
ln -sf /docker/webui_streamlit.yaml /stable-diffusion/configs/webui/webui_streamlit.yaml
|
||||
|
||||
102
services/hlky/webui_streamlit.yaml
Normal file
102
services/hlky/webui_streamlit.yaml
Normal file
@@ -0,0 +1,102 @@
|
||||
# UI defaults configuration file. It is automatically loaded if located at configs/webui/webui_streamlit.yaml.
|
||||
general:
|
||||
gpu: 0
|
||||
outdir: /outputs
|
||||
ckpt: "/cache/models/model.ckpt"
|
||||
fp:
|
||||
name: "embeddings/alex/embeddings_gs-11000.pt"
|
||||
GFPGAN_dir: "./src/gfpgan"
|
||||
RealESRGAN_dir: "./src/realesrgan"
|
||||
RealESRGAN_model: "RealESRGAN_x4plus"
|
||||
outdir_txt2img: /outputs/txt2img-samples
|
||||
outdir_img2img: /outputs/img2img-samples
|
||||
gfpgan_cpu: False
|
||||
esrgan_cpu: False
|
||||
extra_models_cpu: False
|
||||
extra_models_gpu: False
|
||||
save_metadata: True
|
||||
skip_grid: False
|
||||
skip_save: False
|
||||
grid_format: "jpg:95"
|
||||
save_format: "png"
|
||||
n_rows: -1
|
||||
no_verify_input: False
|
||||
no_half: False
|
||||
precision: "autocast"
|
||||
optimized: False
|
||||
optimized_turbo: False
|
||||
update_preview: True
|
||||
update_preview_frequency: 1
|
||||
|
||||
txt2img:
|
||||
prompt:
|
||||
height: 512
|
||||
width: 512
|
||||
cfg_scale: 5.0
|
||||
seed: ""
|
||||
batch_count: 1
|
||||
batch_size: 1
|
||||
sampling_steps: 50
|
||||
default_sampler: "k_lms"
|
||||
separate_prompts: False
|
||||
normalize_prompt_weights: True
|
||||
save_individual_images: True
|
||||
save_grid: True
|
||||
group_by_prompt: True
|
||||
save_as_jpg: False
|
||||
use_GFPGAN: True
|
||||
use_RealESRGAN: True
|
||||
RealESRGAN_model: "RealESRGAN_x4plus"
|
||||
variant_amount: 0.0
|
||||
variant_seed: ""
|
||||
|
||||
img2img:
|
||||
prompt:
|
||||
sampling_steps: 50
|
||||
# Adding an int to toggles enables the corresponding feature.
|
||||
# 0: Create prompt matrix (separate multiple prompts using |, and get all combinations of them)
|
||||
# 1: Normalize Prompt Weights (ensure sum of weights add up to 1.0)
|
||||
# 2: Loopback (use images from previous batch when creating next batch)
|
||||
# 3: Random loopback seed
|
||||
# 4: Save individual images
|
||||
# 5: Save grid
|
||||
# 6: Sort samples by prompt
|
||||
# 7: Write sample info files
|
||||
# 8: jpg samples
|
||||
# 9: Fix faces using GFPGAN
|
||||
# 10: Upscale images using Real-ESRGAN
|
||||
sampler_name: k_lms
|
||||
denoising_strength: 0.45
|
||||
# 0: Keep masked area
|
||||
# 1: Regenerate only masked area
|
||||
mask_mode: 0
|
||||
# 0: Just resize
|
||||
# 1: Crop and resize
|
||||
# 2: Resize and fill
|
||||
resize_mode: 0
|
||||
# Leave blank for random seed:
|
||||
seed: ""
|
||||
ddim_eta: 0.0
|
||||
cfg_scale: 5.0
|
||||
batch_count: 1
|
||||
batch_size: 1
|
||||
height: 512
|
||||
width: 512
|
||||
# Textual inversion embeddings file path:
|
||||
fp: ""
|
||||
loopback: True
|
||||
random_seed_loopback: True
|
||||
separate_prompts: False
|
||||
normalize_prompt_weights: True
|
||||
save_individual_images: True
|
||||
save_grid: True
|
||||
group_by_prompt: True
|
||||
save_as_jpg: False
|
||||
use_GFPGAN: True
|
||||
use_RealESRGAN: True
|
||||
RealESRGAN_model: "RealESRGAN_x4plus"
|
||||
variant_amount: 0.0
|
||||
variant_seed: ""
|
||||
|
||||
gfpgan:
|
||||
strength: 100
|
||||
@@ -9,7 +9,7 @@ ENV DEBIAN_FRONTEND=noninteractive
|
||||
RUN conda install python=3.8.5 && conda clean -a -y
|
||||
RUN conda install pytorch==1.11.0 torchvision==0.12.0 cudatoolkit=11.3 -c pytorch && conda clean -a -y
|
||||
|
||||
RUN apt-get update && apt install fonts-dejavu-core rsync -y && apt-get clean
|
||||
RUN apt-get update && apt install fonts-dejavu-core rsync gcc -y && apt-get clean
|
||||
|
||||
|
||||
RUN <<EOF
|
||||
|
||||
Reference in New Issue
Block a user