mirror of
				https://github.com/AbdBarho/stable-diffusion-webui-docker.git
				synced 2025-11-03 10:43:37 -05:00 
			
		
		
		
	Compare commits
	
		
			71 Commits
		
	
	
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 
						 | 
					83b78fe504 | ||
| 
						 | 
					84f9cb84e7 | ||
| 
						 | 
					6a66ff6abb | ||
| 
						 | 
					59892da866 | ||
| 
						 | 
					fceb83c2b0 | ||
| 
						 | 
					17b01a7627 | ||
| 
						 | 
					b96d7c30d0 | ||
| 
						 | 
					aae83bb8f2 | ||
| 
						 | 
					10763a8f61 | ||
| 
						 | 
					64e8f093d2 | ||
| 
						 | 
					3e0a137c23 | ||
| 
						 | 
					a1c16942ff | ||
| 
						 | 
					6ae3473214 | ||
| 
						 | 
					5d731cb43c | ||
| 
						 | 
					c1fa2f1457 | ||
| 
						 | 
					d8cfdd3af5 | ||
| 
						 | 
					03d12cbcd9 | ||
| 
						 | 
					2e76b6c4e7 | ||
| 
						 | 
					5eae2076ce | ||
| 
						 | 
					725e1f39ba | ||
| 
						 | 
					ab651fe0d7 | ||
| 
						 | 
					f76f8d4671 | ||
| 
						 | 
					e32a48f42a | ||
| 
						 | 
					76989b39a6 | ||
| 
						 | 
					4d9fc381bb | ||
| 
						 | 
					bcee253fe0 | ||
| 
						 | 
					499143009a | ||
| 
						 | 
					c614625f04 | ||
| 
						 | 
					ccd6e238b2 | ||
| 
						 | 
					829864af9b | ||
| 
						 | 
					ccc7306f48 | ||
| 
						 | 
					082876aab3 | ||
| 
						 | 
					ae834cb764 | ||
| 
						 | 
					5f6d9fbb03 | ||
| 
						 | 
					d4da252343 | ||
| 
						 | 
					5af482ed8c | ||
| 
						 | 
					ce4e190f8f | ||
| 
						 | 
					bae3590980 | ||
| 
						 | 
					1588d1eecf | ||
| 
						 | 
					9cbd58b3f4 | ||
| 
						 | 
					089fc524d8 | ||
| 
						 | 
					0d8b7d4ac8 | ||
| 
						 | 
					561664ea6e | ||
| 
						 | 
					77c2b2d217 | ||
| 
						 | 
					6c0c610f27 | ||
| 
						 | 
					dc730b7f6b | ||
| 
						 | 
					15952906a1 | ||
| 
						 | 
					4aaf38970a | ||
| 
						 | 
					61bd38dfe4 | ||
| 
						 | 
					bec4997639 | ||
| 
						 | 
					44903ca3aa | ||
| 
						 | 
					af409daa0e | ||
| 
						 | 
					19f9402076 | ||
| 
						 | 
					eef83a318c | ||
| 
						 | 
					bc8ec0fe55 | ||
| 
						 | 
					f5e9997a55 | ||
| 
						 | 
					fec97dc9e1 | ||
| 
						 | 
					134130a3b2 | ||
| 
						 | 
					2ecfb0fe59 | ||
| 
						 | 
					89d8a17064 | ||
| 
						 | 
					05829c5ae3 | ||
| 
						 | 
					66c0658255 | ||
| 
						 | 
					b3fa72a614 | ||
| 
						 | 
					3efd62aac2 | ||
| 
						 | 
					b5537dbc78 | ||
| 
						 | 
					76bd1bf3cb | ||
| 
						 | 
					43492b0ba1 | ||
| 
						 | 
					6178aee73b | ||
| 
						 | 
					3492e308f3 | ||
| 
						 | 
					28fbfe4aa8 | ||
| 
						 | 
					919b7adb2c | 
							
								
								
									
										9
									
								
								.editorconfig
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										9
									
								
								.editorconfig
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,9 @@
 | 
				
			|||||||
 | 
					root = true
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					[*]
 | 
				
			||||||
 | 
					end_of_line = lf
 | 
				
			||||||
 | 
					indent_style = space
 | 
				
			||||||
 | 
					indent_size = 2
 | 
				
			||||||
 | 
					charset = utf-8
 | 
				
			||||||
 | 
					insert_final_newline = true
 | 
				
			||||||
 | 
					trim_trailing_whitespace = true
 | 
				
			||||||
							
								
								
									
										1
									
								
								.gitattributes
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								.gitattributes
									
									
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1 @@
 | 
				
			|||||||
 | 
					* text=auto eol=lf
 | 
				
			||||||
							
								
								
									
										36
									
								
								.github/ISSUE_TEMPLATE/bug.md
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										36
									
								
								.github/ISSUE_TEMPLATE/bug.md
									
									
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,36 @@
 | 
				
			|||||||
 | 
					---
 | 
				
			||||||
 | 
					name: Bug
 | 
				
			||||||
 | 
					about: Report a bug
 | 
				
			||||||
 | 
					title: ''
 | 
				
			||||||
 | 
					labels: bug
 | 
				
			||||||
 | 
					assignees: ''
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					---
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					**Has this issue been opened before? Check the [FAQ](https://github.com/AbdBarho/stable-diffusion-webui-docker/wiki/Main), the [issues](https://github.com/AbdBarho/stable-diffusion-webui-docker/issues?q=is%3Aissue)**
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					**Describe the bug**
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					**Which UI**
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					hlky or auto or auto-cpu or lstein?
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					**Steps to Reproduce**
 | 
				
			||||||
 | 
					1. Go to '...'
 | 
				
			||||||
 | 
					2. Click on '....'
 | 
				
			||||||
 | 
					3. Scroll down to '....'
 | 
				
			||||||
 | 
					4. See error
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					**Hardware / Software:**
 | 
				
			||||||
 | 
					 - OS: [e.g. Windows / Ubuntu and version]
 | 
				
			||||||
 | 
					 - RAM:
 | 
				
			||||||
 | 
					 - GPU: [Nvidia 1660 / No GPU]
 | 
				
			||||||
 | 
					 - VRAM:
 | 
				
			||||||
 | 
					 - Docker Version, Docker compose version
 | 
				
			||||||
 | 
					 - Release version [e.g. 1.0.1]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					**Additional context**
 | 
				
			||||||
 | 
					Any other context about the problem here. If applicable, add screenshots to help explain your problem.
 | 
				
			||||||
							
								
								
									
										5
									
								
								.github/pull_request_template.md
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										5
									
								
								.github/pull_request_template.md
									
									
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,5 @@
 | 
				
			|||||||
 | 
					### Update versions
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					- auto: https://github.com/AUTOMATIC1111/stable-diffusion-webui/commit/
 | 
				
			||||||
 | 
					- hlky: https://github.com/sd-webui/stable-diffusion-webui/commit/
 | 
				
			||||||
 | 
					- lstein: https://github.com/lstein/stable-diffusion/commit/
 | 
				
			||||||
							
								
								
									
										19
									
								
								.github/workflows/docker.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										19
									
								
								.github/workflows/docker.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,19 @@
 | 
				
			|||||||
 | 
					name: Build Images
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					on: [push]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					jobs:
 | 
				
			||||||
 | 
					  build:
 | 
				
			||||||
 | 
					    strategy:
 | 
				
			||||||
 | 
					      matrix:
 | 
				
			||||||
 | 
					        profile:
 | 
				
			||||||
 | 
					          - auto
 | 
				
			||||||
 | 
					          - hlky
 | 
				
			||||||
 | 
					          - lstein
 | 
				
			||||||
 | 
					          - download
 | 
				
			||||||
 | 
					    runs-on: ubuntu-latest
 | 
				
			||||||
 | 
					    name: ${{ matrix.profile }}
 | 
				
			||||||
 | 
					    steps:
 | 
				
			||||||
 | 
					      - uses: actions/checkout@v3
 | 
				
			||||||
 | 
					      # better caching?
 | 
				
			||||||
 | 
					      - run: docker compose --profile ${{ matrix.profile }} build --progress plain
 | 
				
			||||||
							
								
								
									
										22
									
								
								.github/workflows/executable.yml1
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										22
									
								
								.github/workflows/executable.yml1
									
									
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,22 @@
 | 
				
			|||||||
 | 
					name: Check executable
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					on: [push]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					jobs:
 | 
				
			||||||
 | 
					  check:
 | 
				
			||||||
 | 
					    runs-on: ubuntu-latest
 | 
				
			||||||
 | 
					    name: Check all sh
 | 
				
			||||||
 | 
					    steps:
 | 
				
			||||||
 | 
					      - run: git config --global core.fileMode true
 | 
				
			||||||
 | 
					      - uses: actions/checkout@v3
 | 
				
			||||||
 | 
					      - shell: bash
 | 
				
			||||||
 | 
					        run: |
 | 
				
			||||||
 | 
					          shopt -s globstar;
 | 
				
			||||||
 | 
					          FAIL=0
 | 
				
			||||||
 | 
					          for file in **/*.sh; do
 | 
				
			||||||
 | 
					              if [ -f "${file}" ] && [ -r "${file}" ] && [ ! -x "${file}" ]; then
 | 
				
			||||||
 | 
					                  echo "$file" is not executable;
 | 
				
			||||||
 | 
					                  FAIL=1
 | 
				
			||||||
 | 
					              fi
 | 
				
			||||||
 | 
					          done
 | 
				
			||||||
 | 
					          exit ${FAIL}
 | 
				
			||||||
							
								
								
									
										20
									
								
								.github/workflows/stale.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										20
									
								
								.github/workflows/stale.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,20 @@
 | 
				
			|||||||
 | 
					name: 'Close stale issues and PRs'
 | 
				
			||||||
 | 
					on:
 | 
				
			||||||
 | 
					  schedule:
 | 
				
			||||||
 | 
					    - cron: '30 1 * * *'
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					jobs:
 | 
				
			||||||
 | 
					  stale:
 | 
				
			||||||
 | 
					    runs-on: ubuntu-latest
 | 
				
			||||||
 | 
					    steps:
 | 
				
			||||||
 | 
					      - uses: actions/stale@v5
 | 
				
			||||||
 | 
					        with:
 | 
				
			||||||
 | 
					          only-labels: awaiting-response
 | 
				
			||||||
 | 
					          stale-issue-message: This issue is stale because it has been open 14 days with no activity. Remove stale label or comment or this will be closed in 7 days.
 | 
				
			||||||
 | 
					          stale-pr-message: This PR is stale because it has been open 14 days with no activity. Remove stale label or comment or this will be closed in 7 days.
 | 
				
			||||||
 | 
					          close-issue-message: This issue was closed because it has been stalled for 7 days with no activity.
 | 
				
			||||||
 | 
					          close-pr-message: This PR was closed because it has been stalled for 7 days with no activity.
 | 
				
			||||||
 | 
					          days-before-issue-stale: 14
 | 
				
			||||||
 | 
					          days-before-pr-stale: 14
 | 
				
			||||||
 | 
					          days-before-issue-close: 7
 | 
				
			||||||
 | 
					          days-before-pr-close: 7
 | 
				
			||||||
							
								
								
									
										1
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							@@ -1 +1,2 @@
 | 
				
			|||||||
/dev
 | 
					/dev
 | 
				
			||||||
 | 
					/.devcontainer
 | 
				
			||||||
 
 | 
				
			|||||||
							
								
								
									
										22
									
								
								.vscode/launch.json
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										22
									
								
								.vscode/launch.json
									
									
									
									
										vendored
									
									
										Normal file
									
								
							@@ -0,0 +1,22 @@
 | 
				
			|||||||
 | 
					{
 | 
				
			||||||
 | 
					  // Use IntelliSense to learn about possible attributes.
 | 
				
			||||||
 | 
					  // Hover to view descriptions of existing attributes.
 | 
				
			||||||
 | 
					  // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
 | 
				
			||||||
 | 
					  "version": "0.2.0",
 | 
				
			||||||
 | 
					  "configurations": [
 | 
				
			||||||
 | 
					    {
 | 
				
			||||||
 | 
					      "name": "WebUI",
 | 
				
			||||||
 | 
					      "type": "python",
 | 
				
			||||||
 | 
					      "request": "launch",
 | 
				
			||||||
 | 
					      "program": "${file}",
 | 
				
			||||||
 | 
					      "cwd": "/stable-diffusion",
 | 
				
			||||||
 | 
					      "args": ["--ckpt", "${workspaceFolder}/models/model.ckpt", "--gfpgan-dir", "${workspaceFolder}/models/", "--extra-models-cpu"],
 | 
				
			||||||
 | 
					      "env": {
 | 
				
			||||||
 | 
					        "TRANSFORMERS_CACHE":"${workspaceFolder}/cache/transformers",
 | 
				
			||||||
 | 
					        "TORCH_HOME":"${workspaceFolder}/cache/torch"
 | 
				
			||||||
 | 
					      },
 | 
				
			||||||
 | 
					      "console": "integratedTerminal",
 | 
				
			||||||
 | 
					      "justMyCode": false
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
 | 
					  ]
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
@@ -1,31 +0,0 @@
 | 
				
			|||||||
# syntax=docker/dockerfile:1
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
FROM continuumio/miniconda3:4.12.0
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
RUN conda install python=3.8.5 && conda clean -a -y
 | 
					 | 
				
			||||||
RUN conda install pytorch==1.11.0 torchvision==0.12.0 cudatoolkit=11.3 -c pytorch && conda clean -a -y
 | 
					 | 
				
			||||||
RUN git clone https://github.com/hlky/stable-diffusion.git && cd stable-diffusion && git reset --hard 554bd068e6f2f6bc55449a67fe017ddd77090f28
 | 
					 | 
				
			||||||
RUN conda env update --file stable-diffusion/environment.yaml --name base && conda clean -a -y
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
RUN git clone https://github.com/hlky/stable-diffusion-webui.git && cd stable-diffusion-webui && \
 | 
					 | 
				
			||||||
  git reset --hard b2dc4539d4171ab4fc78471a5bb9425d6a5d5445 && \
 | 
					 | 
				
			||||||
  cp -t /stable-diffusion/scripts/  txt2img.yaml webui.py webui.yaml webui_playground.py  && \
 | 
					 | 
				
			||||||
  cd / && rm -rf stable-diffusion-webui
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
ENV TRANSFORMERS_CACHE=/cache/transformers TORCH_HOME=/cache/torch CLI_ARGS=""
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
WORKDIR /stable-diffusion
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
EXPOSE 7860
 | 
					 | 
				
			||||||
CMD ln -sf /models/model.ckpt /stable-diffusion/models/ldm/stable-diffusion-v1/model.ckpt && \
 | 
					 | 
				
			||||||
  ln -sf /models/GFPGANv1.3.pth   /stable-diffusion/src/gfpgan/experiments/pretrained_models/GFPGANv1.3.pth && \
 | 
					 | 
				
			||||||
  ln -sf /models/RealESRGAN_x4plus.pth   /stable-diffusion/src/realesrgan/experiments/pretrained_models/RealESRGAN_x4plus.pth && \
 | 
					 | 
				
			||||||
  # force facexlib cache
 | 
					 | 
				
			||||||
  mkdir -p /cache/weights/ && rm -rf /opt/conda/lib/python3.8/site-packages/facexlib/weights && \
 | 
					 | 
				
			||||||
  ln -sf  /cache/weights/ /opt/conda/lib/python3.8/site-packages/facexlib/ && \
 | 
					 | 
				
			||||||
  # run, -u to not buffer stdout / stderr
 | 
					 | 
				
			||||||
  python3 -u scripts/webui.py ${CLI_ARGS}
 | 
					 | 
				
			||||||
							
								
								
									
										2
									
								
								cache/.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								cache/.gitignore
									
									
									
									
										vendored
									
									
								
							@@ -1,3 +1,5 @@
 | 
				
			|||||||
/torch
 | 
					/torch
 | 
				
			||||||
/transformers
 | 
					/transformers
 | 
				
			||||||
/weights
 | 
					/weights
 | 
				
			||||||
 | 
					/models
 | 
				
			||||||
 | 
					/custom-models
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -1,18 +1,11 @@
 | 
				
			|||||||
version: '3.9'
 | 
					version: '3.9'
 | 
				
			||||||
 | 
					
 | 
				
			||||||
services:
 | 
					x-base_service: &base_service
 | 
				
			||||||
  model:
 | 
					 | 
				
			||||||
    build: ./build/
 | 
					 | 
				
			||||||
    restart: always
 | 
					 | 
				
			||||||
    ports:
 | 
					    ports:
 | 
				
			||||||
      - "7860:7860"
 | 
					      - "7860:7860"
 | 
				
			||||||
    volumes:
 | 
					    volumes:
 | 
				
			||||||
      - ./cache:/cache
 | 
					      - &v1 ./cache:/cache
 | 
				
			||||||
      - ./output:/output
 | 
					      - &v2 ./output:/output
 | 
				
			||||||
      - ./models:/models
 | 
					 | 
				
			||||||
    environment:
 | 
					 | 
				
			||||||
      - CLI_ARGS=--outdir /output --save-metadata --ckpt /models/model.ckpt
 | 
					 | 
				
			||||||
      #--extra-models-cpu --optimized
 | 
					 | 
				
			||||||
    deploy:
 | 
					    deploy:
 | 
				
			||||||
      resources:
 | 
					      resources:
 | 
				
			||||||
        reservations:
 | 
					        reservations:
 | 
				
			||||||
@@ -20,3 +13,45 @@ services:
 | 
				
			|||||||
              - driver: nvidia
 | 
					              - driver: nvidia
 | 
				
			||||||
                device_ids: ['0']
 | 
					                device_ids: ['0']
 | 
				
			||||||
                capabilities: [gpu]
 | 
					                capabilities: [gpu]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					name: webui-docker
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					services:
 | 
				
			||||||
 | 
					  download:
 | 
				
			||||||
 | 
					    build: ./services/download/
 | 
				
			||||||
 | 
					    profiles: ["download"]
 | 
				
			||||||
 | 
					    volumes:
 | 
				
			||||||
 | 
					      - *v1
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  hlky:
 | 
				
			||||||
 | 
					    <<: *base_service
 | 
				
			||||||
 | 
					    profiles: ["hlky"]
 | 
				
			||||||
 | 
					    build: ./services/hlky/
 | 
				
			||||||
 | 
					    environment:
 | 
				
			||||||
 | 
					      - CLI_ARGS=--optimized-turbo
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  automatic1111: &automatic
 | 
				
			||||||
 | 
					    <<: *base_service
 | 
				
			||||||
 | 
					    profiles: ["auto"]
 | 
				
			||||||
 | 
					    build: ./services/AUTOMATIC1111
 | 
				
			||||||
 | 
					    volumes:
 | 
				
			||||||
 | 
					      - *v1
 | 
				
			||||||
 | 
					      - *v2
 | 
				
			||||||
 | 
					      - ./services/AUTOMATIC1111/config.json:/stable-diffusion-webui/config.json
 | 
				
			||||||
 | 
					    environment:
 | 
				
			||||||
 | 
					      - CLI_ARGS=--medvram
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  automatic1111-cpu:
 | 
				
			||||||
 | 
					    <<: *automatic
 | 
				
			||||||
 | 
					    profiles: ["auto-cpu"]
 | 
				
			||||||
 | 
					    deploy: {}
 | 
				
			||||||
 | 
					    environment:
 | 
				
			||||||
 | 
					      - CLI_ARGS=--no-half --precision full
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  lstein:
 | 
				
			||||||
 | 
					    <<: *base_service
 | 
				
			||||||
 | 
					    profiles: ["lstein"]
 | 
				
			||||||
 | 
					    build: ./services/lstein/
 | 
				
			||||||
 | 
					    environment:
 | 
				
			||||||
 | 
					      - PRELOAD=false
 | 
				
			||||||
 | 
					      - CLI_ARGS=
 | 
				
			||||||
 
 | 
				
			|||||||
							
								
								
									
										4
									
								
								models/.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								models/.gitignore
									
									
									
									
										vendored
									
									
								
							@@ -1,4 +0,0 @@
 | 
				
			|||||||
/model.ckpt
 | 
					 | 
				
			||||||
/GFPGANv1.3.pth
 | 
					 | 
				
			||||||
/RealESRGAN_x4plus.pth
 | 
					 | 
				
			||||||
/RealESRGAN_x4plus_anime_6B.pth
 | 
					 | 
				
			||||||
							
								
								
									
										5
									
								
								scripts/chmod.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										5
									
								
								scripts/chmod.sh
									
									
									
									
									
										Executable file
									
								
							@@ -0,0 +1,5 @@
 | 
				
			|||||||
 | 
					#!/bin/bash
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					set -Eeuo pipefail
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					find . -name "*.sh" -exec git update-index --chmod=+x {} \;
 | 
				
			||||||
							
								
								
									
										67
									
								
								services/AUTOMATIC1111/Dockerfile
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										67
									
								
								services/AUTOMATIC1111/Dockerfile
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,67 @@
 | 
				
			|||||||
 | 
					# syntax=docker/dockerfile:1
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					FROM alpine/git:2.36.2 as download
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					RUN git clone --depth 1 https://github.com/CompVis/stable-diffusion.git repositories/stable-diffusion
 | 
				
			||||||
 | 
					RUN git clone --depth 1 https://github.com/sczhou/CodeFormer.git repositories/CodeFormer
 | 
				
			||||||
 | 
					RUN git clone --depth 1 https://github.com/salesforce/BLIP.git repositories/BLIP
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					RUN <<EOF
 | 
				
			||||||
 | 
					# because taming-transformers is huge
 | 
				
			||||||
 | 
					git config --global http.postBuffer 1048576000
 | 
				
			||||||
 | 
					git clone --depth 1 https://github.com/CompVis/taming-transformers.git repositories/taming-transformers
 | 
				
			||||||
 | 
					rm -rf repositories/taming-transformers/data repositories/taming-transformers/assets
 | 
				
			||||||
 | 
					EOF
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					FROM continuumio/miniconda3:4.12.0
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					SHELL ["/bin/bash", "-ceuxo", "pipefail"]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					ENV DEBIAN_FRONTEND=noninteractive
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					RUN conda install python=3.8.5 && conda clean -a -y
 | 
				
			||||||
 | 
					RUN conda install pytorch==1.11.0 torchvision==0.12.0 cudatoolkit=11.3 -c pytorch && conda clean -a -y
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					RUN apt-get update && apt install fonts-dejavu-core rsync -y && apt-get clean
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					RUN <<EOF
 | 
				
			||||||
 | 
					git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
 | 
				
			||||||
 | 
					cd stable-diffusion-webui
 | 
				
			||||||
 | 
					git reset --hard 7e77938230d4fefb6edccdba0b80b61d8416673e
 | 
				
			||||||
 | 
					pip install --prefer-binary --no-cache-dir -r requirements.txt
 | 
				
			||||||
 | 
					EOF
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					ENV ROOT=/stable-diffusion-webui \
 | 
				
			||||||
 | 
					  WORKDIR=/stable-diffusion-webui/repositories/stable-diffusion
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					COPY --from=download /git/ ${ROOT}
 | 
				
			||||||
 | 
					RUN pip install --prefer-binary --no-cache-dir -r ${ROOT}/repositories/CodeFormer/requirements.txt
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# Note: don't update the sha of previous versions because the install will take forever
 | 
				
			||||||
 | 
					# instead, update the repo state in a later step
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					ARG SHA=dd911a47b3c3313b3938b700eb26cbd5bb3e1c95
 | 
				
			||||||
 | 
					RUN <<EOF
 | 
				
			||||||
 | 
					cd stable-diffusion-webui
 | 
				
			||||||
 | 
					git pull --rebase
 | 
				
			||||||
 | 
					git reset --hard ${SHA}
 | 
				
			||||||
 | 
					pip install --prefer-binary --no-cache-dir -r requirements.txt
 | 
				
			||||||
 | 
					EOF
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					RUN pip install --prefer-binary -U --no-cache-dir opencv-python-headless
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					ENV TRANSFORMERS_CACHE=/cache/transformers TORCH_HOME=/cache/torch CLI_ARGS=""
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					COPY . /docker
 | 
				
			||||||
 | 
					RUN chmod +x /docker/mount.sh && python3 /docker/info.py ${ROOT}/modules/ui.py
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					WORKDIR ${WORKDIR}
 | 
				
			||||||
 | 
					EXPOSE 7860
 | 
				
			||||||
 | 
					# run, -u to not buffer stdout / stderr
 | 
				
			||||||
 | 
					CMD /docker/mount.sh && \
 | 
				
			||||||
 | 
					  python3 -u ../../webui.py --listen --port 7860 --hide-ui-dir-config --ckpt-dir /cache/custom-models --ckpt /cache/models/model.ckpt ${CLI_ARGS}
 | 
				
			||||||
							
								
								
									
										58
									
								
								services/AUTOMATIC1111/config.json
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										58
									
								
								services/AUTOMATIC1111/config.json
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,58 @@
 | 
				
			|||||||
 | 
					{
 | 
				
			||||||
 | 
					  "outdir_samples": "/output",
 | 
				
			||||||
 | 
					  "outdir_txt2img_samples": "/output/txt2img-images",
 | 
				
			||||||
 | 
					  "outdir_img2img_samples": "/output/img2img-images",
 | 
				
			||||||
 | 
					  "outdir_extras_samples": "/output/extras-images",
 | 
				
			||||||
 | 
					  "outdir_txt2img_grids": "/output/txt2img-grids",
 | 
				
			||||||
 | 
					  "outdir_img2img_grids": "/output/img2img-grids",
 | 
				
			||||||
 | 
					  "outdir_save": "/output/saved",
 | 
				
			||||||
 | 
					  "font": "DejaVuSans.ttf",
 | 
				
			||||||
 | 
					  "__WARNING__": "DON'T CHANGE ANYTHING BEFORE THIS",
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  "samples_filename_format": "",
 | 
				
			||||||
 | 
					  "outdir_grids": "",
 | 
				
			||||||
 | 
					  "save_to_dirs": false,
 | 
				
			||||||
 | 
					  "grid_save_to_dirs": false,
 | 
				
			||||||
 | 
					  "save_to_dirs_prompt_len": 10,
 | 
				
			||||||
 | 
					  "samples_save": true,
 | 
				
			||||||
 | 
					  "samples_format": "png",
 | 
				
			||||||
 | 
					  "grid_save": true,
 | 
				
			||||||
 | 
					  "return_grid": true,
 | 
				
			||||||
 | 
					  "grid_format": "png",
 | 
				
			||||||
 | 
					  "grid_extended_filename": false,
 | 
				
			||||||
 | 
					  "grid_only_if_multiple": true,
 | 
				
			||||||
 | 
					  "n_rows": -1,
 | 
				
			||||||
 | 
					  "jpeg_quality": 80,
 | 
				
			||||||
 | 
					  "export_for_4chan": true,
 | 
				
			||||||
 | 
					  "enable_pnginfo": true,
 | 
				
			||||||
 | 
					  "add_model_hash_to_info": false,
 | 
				
			||||||
 | 
					  "enable_emphasis": true,
 | 
				
			||||||
 | 
					  "save_txt": false,
 | 
				
			||||||
 | 
					  "ESRGAN_tile": 192,
 | 
				
			||||||
 | 
					  "ESRGAN_tile_overlap": 8,
 | 
				
			||||||
 | 
					  "random_artist_categories": [],
 | 
				
			||||||
 | 
					  "upscale_at_full_resolution_padding": 16,
 | 
				
			||||||
 | 
					  "show_progressbar": true,
 | 
				
			||||||
 | 
					  "show_progress_every_n_steps": 7,
 | 
				
			||||||
 | 
					  "multiple_tqdm": true,
 | 
				
			||||||
 | 
					  "face_restoration_model": null,
 | 
				
			||||||
 | 
					  "code_former_weight": 0.5,
 | 
				
			||||||
 | 
					  "save_images_before_face_restoration": false,
 | 
				
			||||||
 | 
					  "face_restoration_unload": false,
 | 
				
			||||||
 | 
					  "interrogate_keep_models_in_memory": false,
 | 
				
			||||||
 | 
					  "interrogate_use_builtin_artists": true,
 | 
				
			||||||
 | 
					  "interrogate_clip_num_beams": 1,
 | 
				
			||||||
 | 
					  "interrogate_clip_min_length": 24,
 | 
				
			||||||
 | 
					  "interrogate_clip_max_length": 48,
 | 
				
			||||||
 | 
					  "interrogate_clip_dict_limit": 1500.0,
 | 
				
			||||||
 | 
					  "samples_filename_pattern": "",
 | 
				
			||||||
 | 
					  "directories_filename_pattern": "",
 | 
				
			||||||
 | 
					  "save_selected_only": false,
 | 
				
			||||||
 | 
					  "filter_nsfw": false,
 | 
				
			||||||
 | 
					  "img2img_color_correction": false,
 | 
				
			||||||
 | 
					  "img2img_fix_steps": false,
 | 
				
			||||||
 | 
					  "enable_quantization": false,
 | 
				
			||||||
 | 
					  "enable_batch_seeds": true,
 | 
				
			||||||
 | 
					  "memmon_poll_rate": 8,
 | 
				
			||||||
 | 
					  "sd_model_checkpoint": null
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
							
								
								
									
										14
									
								
								services/AUTOMATIC1111/info.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										14
									
								
								services/AUTOMATIC1111/info.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,14 @@
 | 
				
			|||||||
 | 
					import sys
 | 
				
			||||||
 | 
					from pathlib import Path
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					file = Path(sys.argv[1])
 | 
				
			||||||
 | 
					file.write_text(
 | 
				
			||||||
 | 
					  file.read_text()\
 | 
				
			||||||
 | 
					  .replace('    return demo', """
 | 
				
			||||||
 | 
					    with demo:
 | 
				
			||||||
 | 
					        gr.Markdown(
 | 
				
			||||||
 | 
					          'Created by [AUTOMATIC1111 / stable-diffusion-webui-docker](https://github.com/AbdBarho/stable-diffusion-webui-docker/)'
 | 
				
			||||||
 | 
					        )
 | 
				
			||||||
 | 
					    return demo
 | 
				
			||||||
 | 
					""", 1)
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
							
								
								
									
										35
									
								
								services/AUTOMATIC1111/mount.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										35
									
								
								services/AUTOMATIC1111/mount.sh
									
									
									
									
									
										Executable file
									
								
							@@ -0,0 +1,35 @@
 | 
				
			|||||||
 | 
					#!/bin/bash
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					set -e
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					declare -A MODELS
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					MODELS["${WORKDIR}/models/ldm/stable-diffusion-v1/model.ckpt"]=model.ckpt
 | 
				
			||||||
 | 
					MODELS["${ROOT}/GFPGANv1.3.pth"]=GFPGANv1.3.pth
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					MODELS_DIR=/cache/models
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					for path in "${!MODELS[@]}"; do
 | 
				
			||||||
 | 
					  name=${MODELS[$path]}
 | 
				
			||||||
 | 
					  base=$(dirname "${path}")
 | 
				
			||||||
 | 
					  from_path="${MODELS_DIR}/${name}"
 | 
				
			||||||
 | 
					  if test -f "${from_path}"; then
 | 
				
			||||||
 | 
					    mkdir -p "${base}" && ln -sf "${from_path}" "${path}" && echo "Mounted ${name}"
 | 
				
			||||||
 | 
					  else
 | 
				
			||||||
 | 
					    echo "Skipping ${name}"
 | 
				
			||||||
 | 
					  fi
 | 
				
			||||||
 | 
					done
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# force realesrgan cache
 | 
				
			||||||
 | 
					rm -rf /opt/conda/lib/python3.8/site-packages/realesrgan/weights
 | 
				
			||||||
 | 
					ln -s -T "${MODELS_DIR}" /opt/conda/lib/python3.8/site-packages/realesrgan/weights
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# force facexlib cache
 | 
				
			||||||
 | 
					mkdir -p /cache/weights/ ${WORKDIR}/gfpgan/
 | 
				
			||||||
 | 
					ln -sf /cache/weights/ ${WORKDIR}/gfpgan/
 | 
				
			||||||
 | 
					# code former cache
 | 
				
			||||||
 | 
					rm -rf ${ROOT}/repositories/CodeFormer/weights/CodeFormer ${ROOT}/repositories/CodeFormer/weights/facelib
 | 
				
			||||||
 | 
					ln -sf -T /cache/weights ${ROOT}/repositories/CodeFormer/weights/CodeFormer
 | 
				
			||||||
 | 
					ln -sf -T /cache/weights ${ROOT}/repositories/CodeFormer/weights/facelib
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					mkdir -p /cache/torch /cache/transformers /cache/weights /cache/models /cache/custom-models
 | 
				
			||||||
							
								
								
									
										6
									
								
								services/download/Dockerfile
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										6
									
								
								services/download/Dockerfile
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,6 @@
 | 
				
			|||||||
 | 
					FROM bash:alpine3.15
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					RUN apk add parallel aria2
 | 
				
			||||||
 | 
					COPY . /docker
 | 
				
			||||||
 | 
					RUN chmod +x /docker/download.sh
 | 
				
			||||||
 | 
					ENTRYPOINT ["/docker/download.sh"]
 | 
				
			||||||
							
								
								
									
										6
									
								
								services/download/checksums.sha256
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										6
									
								
								services/download/checksums.sha256
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,6 @@
 | 
				
			|||||||
 | 
					fe4efff1e174c627256e44ec2991ba279b3816e364b49f9be2abc0b3ff3f8556  /cache/models/model.ckpt
 | 
				
			||||||
 | 
					c953a88f2727c85c3d9ae72e2bd4846bbaf59fe6972ad94130e23e7017524a70  /cache/models/GFPGANv1.3.pth
 | 
				
			||||||
 | 
					4fa0d38905f75ac06eb49a7951b426670021be3018265fd191d2125df9d682f1  /cache/models/RealESRGAN_x4plus.pth
 | 
				
			||||||
 | 
					f872d837d3c90ed2e05227bed711af5671a6fd1c9f7d7e91c911a61f155e99da  /cache/models/RealESRGAN_x4plus_anime_6B.pth
 | 
				
			||||||
 | 
					c209caecac2f97b4bb8f4d726b70ac2ac9b35904b7fc99801e1f5e61f9210c13  /cache/models/LDSR.ckpt
 | 
				
			||||||
 | 
					9d6ad53c5dafeb07200fb712db14b813b527edd262bc80ea136777bdb41be2ba  /cache/models/LDSR.yaml
 | 
				
			||||||
							
								
								
									
										13
									
								
								services/download/download.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										13
									
								
								services/download/download.sh
									
									
									
									
									
										Executable file
									
								
							@@ -0,0 +1,13 @@
 | 
				
			|||||||
 | 
					#!/usr/bin/env bash
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					set -Eeuo pipefail
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					mkdir -p /cache/torch /cache/transformers /cache/weights /cache/models /cache/custom-models
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					echo "Downloading, this might take a while..."
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					aria2c --input-file /docker/links.txt --dir /cache/models --continue
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					echo "Checking SHAs..."
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					parallel --will-cite -a /docker/checksums.sha256 "echo -n {} | sha256sum -c"
 | 
				
			||||||
							
								
								
									
										12
									
								
								services/download/links.txt
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										12
									
								
								services/download/links.txt
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,12 @@
 | 
				
			|||||||
 | 
					https://www.googleapis.com/storage/v1/b/aai-blog-files/o/sd-v1-4.ckpt?alt=media
 | 
				
			||||||
 | 
					  out=model.ckpt
 | 
				
			||||||
 | 
					https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth
 | 
				
			||||||
 | 
					  out=GFPGANv1.3.pth
 | 
				
			||||||
 | 
					https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth
 | 
				
			||||||
 | 
					  out=RealESRGAN_x4plus.pth
 | 
				
			||||||
 | 
					https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth
 | 
				
			||||||
 | 
					  out=RealESRGAN_x4plus_anime_6B.pth
 | 
				
			||||||
 | 
					https://heibox.uni-heidelberg.de/f/31a76b13ea27482981b4/?dl=1
 | 
				
			||||||
 | 
					  out=LDSR.yaml
 | 
				
			||||||
 | 
					https://heibox.uni-heidelberg.de/f/578df07c8fc04ffbadf3/?dl=1
 | 
				
			||||||
 | 
					  out=LDSR.ckpt
 | 
				
			||||||
							
								
								
									
										62
									
								
								services/hlky/Dockerfile
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										62
									
								
								services/hlky/Dockerfile
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,62 @@
 | 
				
			|||||||
 | 
					# syntax=docker/dockerfile:1
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					FROM continuumio/miniconda3:4.12.0
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					SHELL ["/bin/bash", "-ceuxo", "pipefail"]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					ENV DEBIAN_FRONTEND=noninteractive
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					RUN conda install python=3.8.5 && conda clean -a -y
 | 
				
			||||||
 | 
					RUN conda install pytorch==1.11.0 torchvision==0.12.0 cudatoolkit=11.3 -c pytorch && conda clean -a -y
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					RUN apt-get update && apt install fonts-dejavu-core rsync gcc -y && apt-get clean
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					RUN <<EOF
 | 
				
			||||||
 | 
					git config --global http.postBuffer 1048576000
 | 
				
			||||||
 | 
					git clone https://github.com/sd-webui/stable-diffusion-webui.git stable-diffusion
 | 
				
			||||||
 | 
					cd stable-diffusion
 | 
				
			||||||
 | 
					git reset --hard 7623a5734740025d79b710f3744bff9276e1467b
 | 
				
			||||||
 | 
					conda env update --file environment.yaml -n base
 | 
				
			||||||
 | 
					conda clean -a -y
 | 
				
			||||||
 | 
					EOF
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# Note: don't update the sha of previous versions because the install will take forever
 | 
				
			||||||
 | 
					# instead, update the repo state in a later step
 | 
				
			||||||
 | 
					ARG BRANCH=master
 | 
				
			||||||
 | 
					# ARG SHA=833a91047df999302f699637768741cecee9c37b
 | 
				
			||||||
 | 
					# ARG BRANCH=dev
 | 
				
			||||||
 | 
					ARG SHA=17748cbc9c34df44d0381c42e4f0fe1903089438
 | 
				
			||||||
 | 
					RUN <<EOF
 | 
				
			||||||
 | 
					cd stable-diffusion
 | 
				
			||||||
 | 
					git fetch
 | 
				
			||||||
 | 
					git checkout ${BRANCH}
 | 
				
			||||||
 | 
					git reset --hard ${SHA}
 | 
				
			||||||
 | 
					conda env update --file environment.yaml -n base
 | 
				
			||||||
 | 
					conda clean -a -y
 | 
				
			||||||
 | 
					EOF
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					RUN pip uninstall transformers -y && pip install -U --no-cache-dir pyperclip transformers==4.22
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# Latent diffusion
 | 
				
			||||||
 | 
					RUN <<EOF
 | 
				
			||||||
 | 
					git clone --depth 1 https://github.com/Hafiidz/latent-diffusion.git
 | 
				
			||||||
 | 
					cd latent-diffusion
 | 
				
			||||||
 | 
					# hacks all the way down
 | 
				
			||||||
 | 
					mv ldm ldm_latent &&
 | 
				
			||||||
 | 
					sed -i -- 's/from ldm/from ldm_latent/g' *.py
 | 
				
			||||||
 | 
					# dont forget to update the yaml!!
 | 
				
			||||||
 | 
					EOF
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# add info
 | 
				
			||||||
 | 
					COPY . /docker/
 | 
				
			||||||
 | 
					RUN python /docker/info.py /stable-diffusion/frontend/frontend.py && chmod +x /docker/mount.sh
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					WORKDIR /stable-diffusion
 | 
				
			||||||
 | 
					ENV TRANSFORMERS_CACHE=/cache/transformers TORCH_HOME=/cache/torch PYTHONPATH="${PYTHONPATH}:/stable-diffusion" CLI_ARGS=""
 | 
				
			||||||
 | 
					EXPOSE 7860
 | 
				
			||||||
 | 
					# run, -u to not buffer stdout / stderr
 | 
				
			||||||
 | 
					CMD /docker/mount.sh && \
 | 
				
			||||||
 | 
					  python3 -u scripts/webui.py --outdir /output --ckpt /cache/models/model.ckpt --ldsr-dir /latent-diffusion --inbrowser ${CLI_ARGS}
 | 
				
			||||||
 | 
					# STREAMLIT_SERVER_PORT=7860 python -m streamlit run scripts/webui_streamlit.py
 | 
				
			||||||
							
								
								
									
										13
									
								
								services/hlky/info.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										13
									
								
								services/hlky/info.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,13 @@
 | 
				
			|||||||
 | 
					import sys
 | 
				
			||||||
 | 
					from pathlib import Path
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					file = Path(sys.argv[1])
 | 
				
			||||||
 | 
					file.write_text(
 | 
				
			||||||
 | 
					  file.read_text()\
 | 
				
			||||||
 | 
					  .replace('<p>For help and advanced usage guides,', """
 | 
				
			||||||
 | 
					  <p>
 | 
				
			||||||
 | 
					    Created using <a href="https://github.com/AbdBarho/stable-diffusion-webui-docker">stable-diffusion-webui-docker</a>.
 | 
				
			||||||
 | 
					  </p>
 | 
				
			||||||
 | 
					  <p>For help and advanced usage guides,
 | 
				
			||||||
 | 
					""", 1)
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
							
								
								
									
										38
									
								
								services/hlky/mount.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										38
									
								
								services/hlky/mount.sh
									
									
									
									
									
										Executable file
									
								
							@@ -0,0 +1,38 @@
 | 
				
			|||||||
 | 
					#!/bin/bash
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					set -e
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					declare -A MODELS
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					ROOT=/stable-diffusion/src
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					MODELS["${ROOT}/gfpgan/experiments/pretrained_models/GFPGANv1.3.pth"]=GFPGANv1.3.pth
 | 
				
			||||||
 | 
					MODELS["${ROOT}/realesrgan/experiments/pretrained_models/RealESRGAN_x4plus.pth"]=RealESRGAN_x4plus.pth
 | 
				
			||||||
 | 
					MODELS["${ROOT}/realesrgan/experiments/pretrained_models/RealESRGAN_x4plus_anime_6B.pth"]=RealESRGAN_x4plus_anime_6B.pth
 | 
				
			||||||
 | 
					MODELS["/latent-diffusion/experiments/pretrained_models/model.ckpt"]=LDSR.ckpt
 | 
				
			||||||
 | 
					# MODELS["/latent-diffusion/experiments/pretrained_models/project.yaml"]=LDSR.yaml
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					MODELS_DIR=/cache/models
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					for path in "${!MODELS[@]}"; do
 | 
				
			||||||
 | 
					  name=${MODELS[$path]}
 | 
				
			||||||
 | 
					  base=$(dirname "${path}")
 | 
				
			||||||
 | 
					  from_path="${MODELS_DIR}/${name}"
 | 
				
			||||||
 | 
					  if test -f "${from_path}"; then
 | 
				
			||||||
 | 
					    mkdir -p "${base}" && ln -sf "${from_path}" "${path}" && echo "Mounted ${name}"
 | 
				
			||||||
 | 
					  else
 | 
				
			||||||
 | 
					    echo "Skipping ${name}"
 | 
				
			||||||
 | 
					  fi
 | 
				
			||||||
 | 
					done
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# hack for latent-diffusion
 | 
				
			||||||
 | 
					if test -f "${MODELS_DIR}/LDSR.yaml"; then
 | 
				
			||||||
 | 
					  sed 's/ldm\./ldm_latent\./g' "${MODELS_DIR}/LDSR.yaml" >/latent-diffusion/experiments/pretrained_models/project.yaml
 | 
				
			||||||
 | 
					fi
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# force facexlib cache
 | 
				
			||||||
 | 
					mkdir -p /cache/weights/ /stable-diffusion/gfpgan/
 | 
				
			||||||
 | 
					ln -sf /cache/weights/ /stable-diffusion/gfpgan/
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# streamlit config
 | 
				
			||||||
 | 
					ln -sf /docker/webui_streamlit.yaml /stable-diffusion/configs/webui/webui_streamlit.yaml
 | 
				
			||||||
							
								
								
									
										155
									
								
								services/hlky/webui_streamlit.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										155
									
								
								services/hlky/webui_streamlit.yaml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,155 @@
 | 
				
			|||||||
 | 
					# UI defaults configuration file. It is automatically loaded if located at configs/webui/webui_streamlit.yaml.
 | 
				
			||||||
 | 
					general:
 | 
				
			||||||
 | 
					  gpu: 0
 | 
				
			||||||
 | 
					  outdir: /outputs
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  default_model: "Stable Diffusion v1.4"
 | 
				
			||||||
 | 
					  default_model_config: "configs/stable-diffusion/v1-inference.yaml"
 | 
				
			||||||
 | 
					  default_model_path: "/cache/models/model.ckpt"
 | 
				
			||||||
 | 
					  fp:
 | 
				
			||||||
 | 
					    name:
 | 
				
			||||||
 | 
					  GFPGAN_dir: "./src/gfpgan"
 | 
				
			||||||
 | 
					  RealESRGAN_dir: "./src/realesrgan"
 | 
				
			||||||
 | 
					  RealESRGAN_model: "RealESRGAN_x4plus"
 | 
				
			||||||
 | 
					  outdir_txt2img: /outputs/txt2img-samples
 | 
				
			||||||
 | 
					  outdir_img2img: /outputs/img2img-samples
 | 
				
			||||||
 | 
					  gfpgan_cpu: False
 | 
				
			||||||
 | 
					  esrgan_cpu: False
 | 
				
			||||||
 | 
					  extra_models_cpu: False
 | 
				
			||||||
 | 
					  extra_models_gpu: False
 | 
				
			||||||
 | 
					  save_metadata: True
 | 
				
			||||||
 | 
					  save_format: "png"
 | 
				
			||||||
 | 
					  skip_grid: False
 | 
				
			||||||
 | 
					  skip_save: False
 | 
				
			||||||
 | 
					  grid_format: "jpg:95"
 | 
				
			||||||
 | 
					  n_rows: -1
 | 
				
			||||||
 | 
					  no_verify_input: False
 | 
				
			||||||
 | 
					  no_half: False
 | 
				
			||||||
 | 
					  use_float16: False
 | 
				
			||||||
 | 
					  precision: "autocast"
 | 
				
			||||||
 | 
					  optimized: False
 | 
				
			||||||
 | 
					  optimized_turbo: True
 | 
				
			||||||
 | 
					  optimized_config: "optimizedSD/v1-inference.yaml"
 | 
				
			||||||
 | 
					  update_preview: True
 | 
				
			||||||
 | 
					  update_preview_frequency: 5
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					txt2img:
 | 
				
			||||||
 | 
					  prompt:
 | 
				
			||||||
 | 
					  height: 512
 | 
				
			||||||
 | 
					  width: 512
 | 
				
			||||||
 | 
					  cfg_scale: 7.5
 | 
				
			||||||
 | 
					  seed: ""
 | 
				
			||||||
 | 
					  batch_count: 1
 | 
				
			||||||
 | 
					  batch_size: 1
 | 
				
			||||||
 | 
					  sampling_steps: 30
 | 
				
			||||||
 | 
					  default_sampler: "k_euler"
 | 
				
			||||||
 | 
					  separate_prompts: False
 | 
				
			||||||
 | 
					  update_preview: True
 | 
				
			||||||
 | 
					  update_preview_frequency: 5
 | 
				
			||||||
 | 
					  normalize_prompt_weights: True
 | 
				
			||||||
 | 
					  save_individual_images: True
 | 
				
			||||||
 | 
					  save_grid: True
 | 
				
			||||||
 | 
					  group_by_prompt: True
 | 
				
			||||||
 | 
					  save_as_jpg: False
 | 
				
			||||||
 | 
					  use_GFPGAN: False
 | 
				
			||||||
 | 
					  use_RealESRGAN: False
 | 
				
			||||||
 | 
					  RealESRGAN_model: "RealESRGAN_x4plus"
 | 
				
			||||||
 | 
					  variant_amount: 0.0
 | 
				
			||||||
 | 
					  variant_seed: ""
 | 
				
			||||||
 | 
					  write_info_files: True
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					txt2vid:
 | 
				
			||||||
 | 
					  default_model: "CompVis/stable-diffusion-v1-4"
 | 
				
			||||||
 | 
					  custom_models_list:
 | 
				
			||||||
 | 
					    [
 | 
				
			||||||
 | 
					      "CompVis/stable-diffusion-v1-4",
 | 
				
			||||||
 | 
					      "naclbit/trinart_stable_diffusion_v2",
 | 
				
			||||||
 | 
					      "hakurei/waifu-diffusion",
 | 
				
			||||||
 | 
					      "osanseviero/BigGAN-deep-128",
 | 
				
			||||||
 | 
					    ]
 | 
				
			||||||
 | 
					  prompt:
 | 
				
			||||||
 | 
					  height: 512
 | 
				
			||||||
 | 
					  width: 512
 | 
				
			||||||
 | 
					  cfg_scale: 7.5
 | 
				
			||||||
 | 
					  seed: ""
 | 
				
			||||||
 | 
					  batch_count: 1
 | 
				
			||||||
 | 
					  batch_size: 1
 | 
				
			||||||
 | 
					  sampling_steps: 30
 | 
				
			||||||
 | 
					  num_inference_steps: 200
 | 
				
			||||||
 | 
					  default_sampler: "k_euler"
 | 
				
			||||||
 | 
					  scheduler_name: "klms"
 | 
				
			||||||
 | 
					  separate_prompts: False
 | 
				
			||||||
 | 
					  update_preview: True
 | 
				
			||||||
 | 
					  update_preview_frequency: 5
 | 
				
			||||||
 | 
					  dynamic_preview_frequency: True
 | 
				
			||||||
 | 
					  normalize_prompt_weights: True
 | 
				
			||||||
 | 
					  save_individual_images: True
 | 
				
			||||||
 | 
					  save_video: True
 | 
				
			||||||
 | 
					  group_by_prompt: True
 | 
				
			||||||
 | 
					  write_info_files: True
 | 
				
			||||||
 | 
					  do_loop: False
 | 
				
			||||||
 | 
					  save_as_jpg: False
 | 
				
			||||||
 | 
					  use_GFPGAN: False
 | 
				
			||||||
 | 
					  use_RealESRGAN: False
 | 
				
			||||||
 | 
					  RealESRGAN_model: "RealESRGAN_x4plus"
 | 
				
			||||||
 | 
					  variant_amount: 0.0
 | 
				
			||||||
 | 
					  variant_seed: ""
 | 
				
			||||||
 | 
					  beta_start: 0.00085
 | 
				
			||||||
 | 
					  beta_end: 0.012
 | 
				
			||||||
 | 
					  beta_scheduler_type: "linear"
 | 
				
			||||||
 | 
					  max_frames: 1000
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					img2img:
 | 
				
			||||||
 | 
					  prompt:
 | 
				
			||||||
 | 
					  sampling_steps: 30
 | 
				
			||||||
 | 
					  # Adding an int to toggles enables the corresponding feature.
 | 
				
			||||||
 | 
					  # 0: Create prompt matrix (separate multiple prompts using |, and get all combinations of them)
 | 
				
			||||||
 | 
					  # 1: Normalize Prompt Weights (ensure sum of weights add up to 1.0)
 | 
				
			||||||
 | 
					  # 2: Loopback (use images from previous batch when creating next batch)
 | 
				
			||||||
 | 
					  # 3: Random loopback seed
 | 
				
			||||||
 | 
					  # 4: Save individual images
 | 
				
			||||||
 | 
					  # 5: Save grid
 | 
				
			||||||
 | 
					  # 6: Sort samples by prompt
 | 
				
			||||||
 | 
					  # 7: Write sample info files
 | 
				
			||||||
 | 
					  # 8: jpg samples
 | 
				
			||||||
 | 
					  # 9: Fix faces using GFPGAN
 | 
				
			||||||
 | 
					  # 10: Upscale images using Real-ESRGAN
 | 
				
			||||||
 | 
					  sampler_name: "k_euler"
 | 
				
			||||||
 | 
					  denoising_strength: 0.45
 | 
				
			||||||
 | 
					  # 0: Keep masked area
 | 
				
			||||||
 | 
					  # 1: Regenerate only masked area
 | 
				
			||||||
 | 
					  mask_mode: 0
 | 
				
			||||||
 | 
					  mask_restore: False
 | 
				
			||||||
 | 
					  # 0: Just resize
 | 
				
			||||||
 | 
					  # 1: Crop and resize
 | 
				
			||||||
 | 
					  # 2: Resize and fill
 | 
				
			||||||
 | 
					  resize_mode: 0
 | 
				
			||||||
 | 
					  # Leave blank for random seed:
 | 
				
			||||||
 | 
					  seed: ""
 | 
				
			||||||
 | 
					  ddim_eta: 0.0
 | 
				
			||||||
 | 
					  cfg_scale: 7.5
 | 
				
			||||||
 | 
					  batch_count: 1
 | 
				
			||||||
 | 
					  batch_size: 1
 | 
				
			||||||
 | 
					  height: 512
 | 
				
			||||||
 | 
					  width: 512
 | 
				
			||||||
 | 
					  # Textual inversion embeddings file path:
 | 
				
			||||||
 | 
					  fp: ""
 | 
				
			||||||
 | 
					  loopback: True
 | 
				
			||||||
 | 
					  random_seed_loopback: True
 | 
				
			||||||
 | 
					  separate_prompts: False
 | 
				
			||||||
 | 
					  update_preview: True
 | 
				
			||||||
 | 
					  update_preview_frequency: 5
 | 
				
			||||||
 | 
					  normalize_prompt_weights: True
 | 
				
			||||||
 | 
					  save_individual_images: True
 | 
				
			||||||
 | 
					  save_grid: True
 | 
				
			||||||
 | 
					  group_by_prompt: True
 | 
				
			||||||
 | 
					  save_as_jpg: False
 | 
				
			||||||
 | 
					  use_GFPGAN: False
 | 
				
			||||||
 | 
					  use_RealESRGAN: False
 | 
				
			||||||
 | 
					  RealESRGAN_model: "RealESRGAN_x4plus"
 | 
				
			||||||
 | 
					  variant_amount: 0.0
 | 
				
			||||||
 | 
					  variant_seed: ""
 | 
				
			||||||
 | 
					  write_info_files: True
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					gfpgan:
 | 
				
			||||||
 | 
					  strength: 100
 | 
				
			||||||
							
								
								
									
										54
									
								
								services/lstein/Dockerfile
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										54
									
								
								services/lstein/Dockerfile
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,54 @@
 | 
				
			|||||||
 | 
					# syntax=docker/dockerfile:1
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					FROM continuumio/miniconda3:4.12.0
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					SHELL ["/bin/bash", "-ceuxo", "pipefail"]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					ENV DEBIAN_FRONTEND=noninteractive
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# now it requires python3.9
 | 
				
			||||||
 | 
					RUN conda install python=3.9 && conda clean -a -y
 | 
				
			||||||
 | 
					RUN conda install pytorch==1.11.0 torchvision==0.12.0 cudatoolkit=11.3 -c pytorch && conda clean -a -y
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					RUN apt-get update && apt install fonts-dejavu-core rsync gcc -y && apt-get clean
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					RUN <<EOF
 | 
				
			||||||
 | 
					git clone https://github.com/lstein/stable-diffusion.git
 | 
				
			||||||
 | 
					cd stable-diffusion
 | 
				
			||||||
 | 
					git reset --hard e994073b5bdfa3c77313681c5944be1544eb65b6
 | 
				
			||||||
 | 
					sed -i -- 's/python=3.8.5/python=3.9/g' environment.yaml
 | 
				
			||||||
 | 
					conda env update --file environment.yaml -n base
 | 
				
			||||||
 | 
					conda clean -a -y
 | 
				
			||||||
 | 
					EOF
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					ARG BRANCH=development SHA=50d607ffea3734072a80e38b09ba0c3758af5d40
 | 
				
			||||||
 | 
					# ARG BRANCH=main SHA=89da371f4841f7e05da5a1672459d700c3920784
 | 
				
			||||||
 | 
					RUN <<EOF
 | 
				
			||||||
 | 
					cd stable-diffusion
 | 
				
			||||||
 | 
					git fetch
 | 
				
			||||||
 | 
					git reset --hard
 | 
				
			||||||
 | 
					git checkout ${BRANCH}
 | 
				
			||||||
 | 
					git reset --hard ${SHA}
 | 
				
			||||||
 | 
					conda env update --file environment.yaml -n base
 | 
				
			||||||
 | 
					conda clean -a -y
 | 
				
			||||||
 | 
					EOF
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					RUN pip uninstall opencv-python -y && pip install --prefer-binary --force-reinstall --no-cache-dir opencv-python-headless transformers==4.22
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					COPY . /docker/
 | 
				
			||||||
 | 
					RUN <<EOF
 | 
				
			||||||
 | 
					python3 /docker/info.py /stable-diffusion/static/dream_web/index.html
 | 
				
			||||||
 | 
					chmod +x /docker/mount.sh
 | 
				
			||||||
 | 
					sed -i -- 's/outputs\//\/output/g' /stable-diffusion/backend/server.py
 | 
				
			||||||
 | 
					EOF
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					ENV TRANSFORMERS_CACHE=/cache/transformers TORCH_HOME=/cache/torch PRELOAD=false CLI_ARGS=""
 | 
				
			||||||
 | 
					WORKDIR /stable-diffusion
 | 
				
			||||||
 | 
					EXPOSE 7860
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					CMD /docker/mount.sh && \
 | 
				
			||||||
 | 
					  python3 -u scripts/dream.py --outdir /output --web --host 0.0.0.0 --port 7860 ${CLI_ARGS}
 | 
				
			||||||
 | 
					  #python3 -u backend/server.py
 | 
				
			||||||
							
								
								
									
										10
									
								
								services/lstein/info.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										10
									
								
								services/lstein/info.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,10 @@
 | 
				
			|||||||
 | 
					import sys
 | 
				
			||||||
 | 
					from pathlib import Path
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					file = Path(sys.argv[1])
 | 
				
			||||||
 | 
					file.write_text(
 | 
				
			||||||
 | 
					  file.read_text()\
 | 
				
			||||||
 | 
					  .replace('GitHub site</a>', """
 | 
				
			||||||
 | 
					  GitHub site</a>, Deployed with <a href="https://github.com/AbdBarho/stable-diffusion-webui-docker/">stable-diffusion-webui-docker</a>
 | 
				
			||||||
 | 
					""", 1)
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
							
								
								
									
										30
									
								
								services/lstein/mount.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										30
									
								
								services/lstein/mount.sh
									
									
									
									
									
										Executable file
									
								
							@@ -0,0 +1,30 @@
 | 
				
			|||||||
 | 
					#!/bin/bash
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					set -eu
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					ROOT=/stable-diffusion
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					mkdir -p "${ROOT}/models/ldm/stable-diffusion-v1/"
 | 
				
			||||||
 | 
					ln -sf /cache/models/model.ckpt "${ROOT}/models/ldm/stable-diffusion-v1/model.ckpt"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					if test -f /cache/models/GFPGANv1.3.pth; then
 | 
				
			||||||
 | 
					  base="${ROOT}/src/gfpgan/experiments/pretrained_models/"
 | 
				
			||||||
 | 
					  mkdir -p "${base}"
 | 
				
			||||||
 | 
					  ln -sf /cache/models/GFPGANv1.3.pth "${base}/GFPGANv1.3.pth"
 | 
				
			||||||
 | 
					  echo "Mounted GFPGANv1.3.pth"
 | 
				
			||||||
 | 
					fi
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# facexlib
 | 
				
			||||||
 | 
					FACEX_WEIGHTS=/opt/conda/lib/python3.9/site-packages/facexlib/weights
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					rm -rf "${FACEX_WEIGHTS}"
 | 
				
			||||||
 | 
					mkdir -p /cache/weights
 | 
				
			||||||
 | 
					ln -sf -T /cache/weights "${FACEX_WEIGHTS}"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					REALESRGAN_WEIGHTS=/opt/conda/lib/python3.9/site-packages/realesrgan/weights
 | 
				
			||||||
 | 
					rm -rf "${REALESRGAN_WEIGHTS}"
 | 
				
			||||||
 | 
					ln -sf -T /cache/weights "${REALESRGAN_WEIGHTS}"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					if "${PRELOAD}" == "true"; then
 | 
				
			||||||
 | 
					  python3 -u scripts/preload_models.py
 | 
				
			||||||
 | 
					fi
 | 
				
			||||||
		Reference in New Issue
	
	Block a user