mirror of
				https://github.com/mealie-recipes/mealie.git
				synced 2025-10-31 10:13:32 -04:00 
			
		
		
		
	feat: improve idle memory usage (#1758)
* health check as python script * install crfpp model via python * drop curl from finale container * use uvicorn by default w/ gunicorn as opt in * recommend setting mem limit for container
This commit is contained in:
		
							
								
								
									
										10
									
								
								Dockerfile
									
									
									
									
									
								
							
							
						
						
									
										10
									
								
								Dockerfile
									
									
									
									
									
								
							| @@ -95,10 +95,9 @@ ENV TESTING=false | ||||
| ARG COMMIT | ||||
| ENV GIT_COMMIT_HASH=$COMMIT | ||||
|  | ||||
| # curl for used by healthcheck | ||||
| RUN apt-get update \ | ||||
|     && apt-get install --no-install-recommends -y \ | ||||
|     curl gosu \ | ||||
|     gosu \ | ||||
|     tesseract-ocr-all \ | ||||
|     && apt-get autoremove \ | ||||
|     && rm -rf /var/lib/apt/lists/* | ||||
| @@ -107,9 +106,6 @@ RUN apt-get update \ | ||||
| COPY --from=builder-base $POETRY_HOME $POETRY_HOME | ||||
| COPY --from=builder-base $PYSETUP_PATH $PYSETUP_PATH | ||||
|  | ||||
| # copy CRF++ Binary from crfpp | ||||
| ENV CRF_MODEL_URL=https://github.com/mealie-recipes/nlp-model/releases/download/v1.0.0/model.crfmodel | ||||
|  | ||||
| ENV LD_LIBRARY_PATH=/usr/local/lib | ||||
| COPY --from=crfpp /usr/local/lib/ /usr/local/lib | ||||
| COPY --from=crfpp /usr/local/bin/crf_learn /usr/local/bin/crf_learn | ||||
| @@ -130,14 +126,14 @@ RUN . $VENV_PATH/bin/activate && poetry install -E pgsql --no-dev | ||||
| WORKDIR / | ||||
|  | ||||
| # Grab CRF++ Model Release | ||||
| RUN curl -L0 $CRF_MODEL_URL --output $MEALIE_HOME/mealie/services/parser_services/crfpp/model.crfmodel | ||||
| RUN python $MEALIE_HOME/mealie/scripts/install_model.py | ||||
|  | ||||
| VOLUME [ "$MEALIE_HOME/data/" ] | ||||
| ENV APP_PORT=9000 | ||||
|  | ||||
| EXPOSE ${APP_PORT} | ||||
|  | ||||
| HEALTHCHECK CMD curl -f http://localhost:${APP_PORT}/docs || exit 1 | ||||
| HEALTHCHECK CMD python $MEALIE_HOME/mealie/scripts/healthcheck.py || exit 1 | ||||
|  | ||||
| RUN chmod +x $MEALIE_HOME/mealie/run.sh | ||||
| ENTRYPOINT $MEALIE_HOME/mealie/run.sh | ||||
|   | ||||
| @@ -3,6 +3,10 @@ services: | ||||
|   mealie-frontend: | ||||
|     container_name: mealie-frontend | ||||
|     image: mealie-frontend:dev | ||||
|     deploy: | ||||
|       resources: | ||||
|         limits: | ||||
|           memory: 500M | ||||
|     build: | ||||
|       context: ./frontend | ||||
|       dockerfile: Dockerfile | ||||
| @@ -34,6 +38,10 @@ services: | ||||
|       - THEME_DARK_ERROR=#EF5350 | ||||
|   mealie: | ||||
|     container_name: mealie-api | ||||
|     deploy: | ||||
|       resources: | ||||
|         limits: | ||||
|           memory: 1000M | ||||
|     build: | ||||
|       context: ./ | ||||
|       target: production | ||||
| @@ -57,6 +65,7 @@ services: | ||||
|  | ||||
|       # ===================================== | ||||
|       # Web Concurrency | ||||
|       WEB_GUNICORN: true | ||||
|       WORKERS_PER_CORE: 0.5 | ||||
|       MAX_WORKERS: 1 | ||||
|       WEB_CONCURRENCY: 1 | ||||
|   | ||||
| @@ -54,6 +54,7 @@ Changing the webworker settings may cause unforeseen memory leak issues with Mea | ||||
|  | ||||
| | Variables        | Default | Description                                                                                                                       | | ||||
| | ---------------- | :-----: | --------------------------------------------------------------------------------------------------------------------------------- | | ||||
| | WEB_GUNICORN     |  false  | Enables Gunicorn to manage Uvicorn web for multiple works                                                                         | | ||||
| | WORKERS_PER_CORE |    1    | Set the number of workers to the number of CPU cores multiplied by this value (Value \* CPUs). More info [here][workers_per_core] | | ||||
| | MAX_WORKERS      |    1    | Set the maximum number of workers to use. Default is not set meaning unlimited. More info [here][max_workers]                     | | ||||
| | WEB_CONCURRENCY  |    1    | Override the automatic definition of number of workers. More info [here][web_concurrency]                                         | | ||||
|   | ||||
| @@ -25,6 +25,10 @@ services: | ||||
|   mealie-api: | ||||
|     image: hkotel/mealie:api-v1.0.0beta-4 | ||||
|     container_name: mealie-api | ||||
|     deploy: | ||||
|       resources: | ||||
|         limits: | ||||
|           memory: 1000M # (4) | ||||
|     depends_on: | ||||
|       - postgres | ||||
|     volumes: | ||||
| @@ -66,3 +70,4 @@ volumes: | ||||
|     <br/> <br/> **Note** that both containers must be on the same docker-network for this to work. | ||||
| 2.  To access the mealie interface you only need to expose port 3000 on the mealie-frontend container. Here we expose port 9925 on the host, feel free to change this to any port you like. | ||||
| 3.  Mounting the data directory to the frontend is now required to access the images/assets directory. This can be mounted read-only. Internally the frontend containers runs a Caddy proxy server that serves the assets requested to reduce load on the backend API. | ||||
| 4.  Setting an explicit memory limit is recommended. Python can pre-allocate larger amounts of memory than is necessary if you have a machine with a lot of RAM. This can cause the container to idle at a high memory usage. Setting a memory limit will improve idle performance. | ||||
|   | ||||
| @@ -25,6 +25,10 @@ services: | ||||
|   mealie-api: | ||||
|     image: hkotel/mealie:api-v1.0.0beta-4 | ||||
|     container_name: mealie-api | ||||
|     deploy: | ||||
|       resources: | ||||
|         limits: | ||||
|           memory: 1000M # (4) | ||||
|     volumes: | ||||
|       - mealie-data:/app/data/ | ||||
|     environment: | ||||
| @@ -49,3 +53,4 @@ volumes: | ||||
|     <br/> <br/> **Note** that both containers must be on the same docker-network for this to work. | ||||
| 2.  To access the mealie interface you only need to expose port 3000 on the mealie-frontend container. Here we expose port 9925 on the host, feel free to change this to any port you like. | ||||
| 3.  Mounting the data directory to the frontend is now required to access the images/assets directory. This can be mounted read-only. Internally the frontend containers runs a Caddy proxy server that serves the assets requested to reduce load on the backend API. | ||||
| 4.  Setting an explicit memory limit is recommended. Python can pre-allocate larger amounts of memory than is necessary if you have a machine with a lot of RAM. This can cause the container to idle at a high memory usage. Setting a memory limit will improve idle performance. | ||||
|   | ||||
| @@ -41,11 +41,6 @@ init() { | ||||
|     poetry run python /app/mealie/db/init_db.py | ||||
| } | ||||
|  | ||||
| # Migrations | ||||
| # TODO | ||||
| # Migrations | ||||
| # Set Port from ENV Variable | ||||
|  | ||||
| if [ "$ARG1" == "reload" ]; then | ||||
|     echo "Hot Reload!" | ||||
|  | ||||
| @@ -63,6 +58,11 @@ else | ||||
|     GUNICORN_PORT=${API_PORT:-9000} | ||||
|  | ||||
|     # Start API | ||||
|     # uvicorn mealie.app:app --host 0.0.0.0 --port 9000 | ||||
|     gunicorn mealie.app:app -b 0.0.0.0:$GUNICORN_PORT -k uvicorn.workers.UvicornWorker -c /app/gunicorn_conf.py --preload | ||||
|  | ||||
|     if [ $WEB_GUNICORN == 'true' ]; then | ||||
|         echo "Starting Gunicorn" | ||||
|         gunicorn mealie.app:app -b 0.0.0.0:$GUNICORN_PORT -k uvicorn.workers.UvicornWorker -c /app/gunicorn_conf.py --preload | ||||
|     else | ||||
|         uvicorn mealie.app:app --host 0.0.0.0 --port $GUNICORN_PORT | ||||
|     fi | ||||
| fi | ||||
|   | ||||
							
								
								
									
										23
									
								
								mealie/scripts/healthcheck.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										23
									
								
								mealie/scripts/healthcheck.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,23 @@ | ||||
| import os | ||||
|  | ||||
| import requests | ||||
|  | ||||
|  | ||||
| def main(): | ||||
|     port = os.getenv("API_PORT") | ||||
|  | ||||
|     if port is None: | ||||
|         port = 9000 | ||||
|  | ||||
|     url = f"http://127.0.0.1:{port}/api/app/about" | ||||
|  | ||||
|     r = requests.get(url) | ||||
|  | ||||
|     if r.status_code == 200: | ||||
|         exit(0) | ||||
|     else: | ||||
|         exit(1) | ||||
|  | ||||
|  | ||||
| if __name__ == "__main__": | ||||
|     main() | ||||
							
								
								
									
										21
									
								
								mealie/scripts/install_model.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										21
									
								
								mealie/scripts/install_model.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,21 @@ | ||||
| import requests | ||||
|  | ||||
| from mealie.services.parser_services import crfpp | ||||
|  | ||||
| MODEL_URL = "https://github.com/mealie-recipes/nlp-model/releases/download/v1.0.0/model.crfmodel" | ||||
|  | ||||
|  | ||||
| def main(): | ||||
|     """ | ||||
|     Install the model into the crfpp directory | ||||
|     """ | ||||
|  | ||||
|     r = requests.get(MODEL_URL, stream=True, allow_redirects=True) | ||||
|     with open(crfpp.MODEL_PATH, "wb") as f: | ||||
|         for chunk in r.iter_content(chunk_size=1024): | ||||
|             if chunk: | ||||
|                 f.write(chunk) | ||||
|  | ||||
|  | ||||
| if __name__ == "__main__": | ||||
|     main() | ||||
		Reference in New Issue
	
	Block a user