Merge pull request #4 from lutzapps/docker-bake-and-docs

Docker bake and docs
This commit is contained in:
Madiator2011 2024-11-03 19:03:36 +01:00 committed by GitHub
commit 45b3e1f247
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
8 changed files with 178 additions and 102 deletions

View file

@ -24,10 +24,18 @@
"envFiles": ["${workspaceFolder}/.env"], // pass additional env-vars (hf_token, civitai token, ssh public-key) from ".env" file to container "envFiles": ["${workspaceFolder}/.env"], // pass additional env-vars (hf_token, civitai token, ssh public-key) from ".env" file to container
"env": { // this ENV vars go into the docker container to support local debugging "env": { // this ENV vars go into the docker container to support local debugging
"LOCAL_DEBUG": "True", // change app to localhost Urls and local Websockets (unsecured) "LOCAL_DEBUG": "True", // change app to localhost Urls and local Websockets (unsecured)
// if you NOT want need this behaviour, then set `LOCAL_DEBUG=False` [default],
// which is the same as NOT setting this ENV var at all.
"FLASK_APP": "app/app.py", "FLASK_APP": "app/app.py",
"FLASK_ENV": "development", // changed from "production"
"GEVENT_SUPPORT": "True" // gevent monkey-patching is being used, enable gevent support in the debugger "FLASK_ENV": "development", // changed from "production" [default],
// "FLASK_DEBUG": "0" // "1" allows debugging in Chrome, but then VSCode debugger not works // only needed when "LOCAL_DEBUG=True", otherwise this ENV var can be obmitted
"GEVENT_SUPPORT": "True" // gevent monkey-patching is being used, enable gevent support in the debugger,
// only needed when "LOCAL_DEBUG=True", otherwise this ENV var can be obmitted
// "FLASK_DEBUG": "0" // "1" allows debugging in Chrome, but then VSCode debugger not works, "0" is the [default], which is the same as NOT setting this ENV var at all
}, },
"volumes": [ "volumes": [
{ {

View file

@ -1,7 +1,7 @@
# Use the specified base image # lutzapps - use the specified CUDA version
ARG BASE_IMAGE
# lutzapps - use uppercase "AS" FROM ${BASE_IMAGE:-madiator2011/better-base:cuda12.4} AS base
FROM madiator2011/better-base:cuda12.4 AS base #FROM madiator2011/better-base:cuda12.4 AS base
# lutzapps - prepare for local developement and debugging # lutzapps - prepare for local developement and debugging
# needed to change the ORDER of "apt-get commands" and move the "update-alternatives" for python3 # needed to change the ORDER of "apt-get commands" and move the "update-alternatives" for python3
@ -10,8 +10,10 @@
# Install Python 3.11, set it as default, and remove Python 3.10 # Install Python 3.11, set it as default, and remove Python 3.10
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y python3.11 python3.11-venv python3.11-dev python3.11-distutils aria2 git \ # removed: 2x git nginx ffmpeg (as they are already installed with the base image)
pv git rsync zstd libtcmalloc-minimal4 bc nginx ffmpeg && \ # added: zip (for easier folder compression)
apt-get install -y python3.11 python3.11-venv python3.11-dev python3.11-distutils aria2 zip \
pv rsync zstd libtcmalloc-minimal4 bc && \
apt-get remove -y python3.10 python3.10-minimal libpython3.10-minimal libpython3.10-stdlib && \ apt-get remove -y python3.10 python3.10-minimal libpython3.10-minimal libpython3.10-stdlib && \
update-alternatives --install /usr/bin/python python /usr/bin/python3.11 1 && \ update-alternatives --install /usr/bin/python python /usr/bin/python3.11 1 && \
update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1 && \ update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1 && \
@ -63,9 +65,6 @@ EXPOSE 7222
# lutzapps - added a "app/tests" folder with script and testdata and readme file # lutzapps - added a "app/tests" folder with script and testdata and readme file
# lutzapps - grouped NGINX files in a sub-folder for cleaner view # lutzapps - grouped NGINX files in a sub-folder for cleaner view
# Copy the README.md
COPY nginx/README.md /usr/share/nginx/html/README.md
# NGINX configuration # NGINX configuration
COPY nginx/nginx.conf /etc/nginx/nginx.conf COPY nginx/nginx.conf /etc/nginx/nginx.conf
COPY nginx/readme.html /usr/share/nginx/html/readme.html COPY nginx/readme.html /usr/share/nginx/html/readme.html

View file

@ -1,99 +1,144 @@
# madiator-docker-runpod # better-ai-launcher
RunPod Docker Containers for RunPod Better AI Launcher Image for RunPod and local development.
**Better AI Launcher Container for RunPod and local develoment** ## RunPod Better App Manager
Welcome to the RunPod Better App Manager!
This image allows you to easily manage and run various AI applications on your RunPod instance.
### Features
- Easy installation of pre-configured AI applications.
- Start, stop, and monitor running applications.
- View application logs in real-time.
- Force kill applications if needed.
- Download Manager for **HuggingFace** and **CivitAI** with `token` support for privat and gated models.
- Shared Models Management for **Downloading and Sharing all models of all types to all installed AI applications**!
### Supported Applications
- Better Comfy UI
- Better Forge
- Better A1111
- more Apps coming soon (AI Trainers as `Kohya` and `ai-toolkit` are planned)
### Getting Started
- Access the Better App Manager interface through your RunPod instance URL.
- Install the desired application by clicking the **Install** button.
- Once installed, use the **Start** button to launch the application.
- Access the running application using the **Open App** button.
### Troubleshooting
If you encounter any issues:
- Check the application logs for error messages.
- Try stopping and restarting the application.
- Use the `Force Kill` option if an application becomes unresponsive.
- Refer to the RunPod documentation or contact support for further assistance.
For more detailed information and guides, please visit the <a href="https://docs.runpod.io/">RunPod Documentation</a>.
Part of the `madiator-docker-runpod` familiy of **RunPod Docker Containers for RunPod**
## Github
https://github.com/kodxana/madiator-docker-runpod<br>
found under the directory `official-templates/better-ai-launcher`
### Build Vars ### ### Build Vars ###
IMAGE_BASE=madiator2011/better-launcher IMAGE_BASE=madiator2011/better-launcher<br>
IMAGE_TAG=dev IMAGE_TAG=dev
### Github: ### ## Build Options
https://github.com/kodxana/madiator-docker-runpod To build with default options, run `docker buildx bake`, to build a specific target, run `docker buildx bake <target>`.
### ENV Vars ### ## Ports (System)
These ENV vars go into the docker container to support local debugging: - 22/tcp (SSH)
see also explanantion in ".vscode/tasks.json" or "docker-compose.debug.yml" - 7222/http (App-Manager)
- 7777/http (VSCode-Server)
- 8181/http (File-Browser)
LOCAL_DEBUG=True ## Ports (Apps)
change app to localhost Urls and local Websockets (unsecured) - 3000/http (ComfyUI)
- 7862/http (Forge) aka Stable-Diffiusion-WebUI-Forge
- 7863/http (A1111) aka Stable-Diffiusion-WebUI
FLASK_ENV=development *coming soon*
- 7864/http (Kohya-ss)
- 6006/http (Tensorboard)
changed from "production" (default) ## ENV Vars (System)
GEVENT_SUPPORT=True These ENV vars go into the docker container to support local debugging:<br>
see also explanantion in `".vscode/tasks.json"` or `"docker-compose.debug.yml"`
gevent monkey-patching is being used, enable gevent support in the debugger - LOCAL_DEBUG=True
FLASK_DEBUG=0
"1" allows debugging in Chrome, but then VSCode debugger not works change app to localhost Urls and local Websockets (unsecured) for local debugging.<br>
**TODO**: need to also setup a `bind workspace` in `".vscode/tasks.json"` or `"docker-compose.debug.yml"`
if you **NOT** want need this behaviour, then set `LOCAL_DEBUG=False` [default],<br>
which is the same as NOT setting this ENV var at all.
*User ENV Vars for Production:* - FLASK_ENV=development
### APP specific Vars ### changed from "`production`" [default].<br>
DISABLE_PULLBACK_MODELS=False only needed when `LOCAL_DEBUG=True`, otherwise this ENV var can be obmitted.
the default is, that app model files, which are found locally (in only one app), - GEVENT_SUPPORT=True
get automatically "pulled-back" into the '/workspace/shared_models' folder.
From there they will be re-linked back not only to their own "pulled-back" model-type folder,
but also will be linked back into all other corresponding app model-type folders.
So the "pulled-back" model is automatically shared to all installed apps.
If you NOT want this behaviour, then set DISABLE_PULLBACK_MODELS=True
### USER specific Vars and Secrets (Tokens) - TODO: adjust this for your personal settings ### gevent monkey-patching is being used, enable gevent support in the debugger.<br>
PUBLIC_KEY=ssh-ed25519 xxx...xxx usermail@domain.com only needed when `LOCAL_DEBUG=True`, otherwise this ENV var can be obmitted.
HF_TOKEN=hf_xxx...xxx - FLASK_DEBUG=0
CIVITAI_API_TOKEN=xxx.xxx "1" allows debugging in Chrome, but then the VSCode debugger will not works.<br>
"0" is the [default], which is the same as NOT setting this ENV var at all.
### APP specific Vars
- DISABLE_PULLBACK_MODELS=False
The default is, that app model files, which are found locally (in only one app), get automatically `pulled-back` into the `"/workspace/shared_models"` folder.<br>
From there they will be re-linked back not only to their own `pulled-back` model-type folder, but also will be linked back into all other corresponding app model-type folders.<br>
So the `pulled-back` model is automatically shared to all installed apps.
If you **NOT** want this behaviour, then set `DISABLE_PULLBACK_MODELS=True`,<br>
otherwise set `DISABLE_PULLBACK_MODELS=False` [default], which is the same as NOT setting this ENV var at all.
## ENV Vars (User and Secret Tokens)
**TODO: rename the file `"env.txt"` to `".env"` and adjust the ENV vars for your personal settings**
- PUBLIC_KEY=ssh-ed25519 xxx...xxx usermail@domain.com
your `PUBLIC ssh-key`<br>
**Note**: make sure to use the **full line content** from your `"*.pub"` key file!
- HF_TOKEN=hf_xxx...xxx
Your `HuggingFace` token.<br><br>
Can be a `READ` scoped token for downloading your `private` models, or `gated models` as e.g. `Flux.1 Dev` or METAs `Llama LLM models`.<br>
The HF_TOKEN need to be a `READ/WRITE` scoped token, if you plan also to **UPLOAD** models to `HuggingFace` later, when we have Trainer Apps like `Kohya` or `ai-toolkit`.
- CIVITAI_API_TOKEN=xxx...xxx
Your `CivitAI` API token.<br><br>
**Note**: CivitAI currently only provides a `FULL` user token, acting as `you`, so be careful with how to setup this token and with whom you share it!
**SECURITY TIP:** **SECURITY TIP:**
These 3 security sensitive environment vars should be stored as RUNPOD **SECRETS** and referenced directly in your POD Template in the format {{ RUNPOD_SECRET_MYENVVAR }} These three, user-specific and **security sensitive environment vars**, should be stored as RUNPOD **`SECRETS`** and be referenced directly in your POD Template in the format `{{ RUNPOD_SECRET_MYENVVAR }}`.
From https://docs.runpod.io/pods/templates/secrets From https://docs.runpod.io/pods/templates/secrets
You can reference your Secret directly in the Environment Variables section of your Pod template. To reference your Secret, reference it's key appended to the "RUNPOD_SECRET_" prefix. You can reference your Secret directly in the Environment Variables section of your Pod template. To reference your Secret, reference it's key appended to the `RUNPOD_SECRET_` prefix.
That mean, for this template/image, you should use these formats: That mean, for this template/image, you should use these formats to pass the above ENV vars into the docker container:
{{ RUNPOD_SECRET_PUBLIC_KEY}} - `{{ RUNPOD_SECRET_PUBLIC_KEY}}`
{{ RUNPOD_SECRET_HF_TOKEN }} - `{{ RUNPOD_SECRET_HF_TOKEN }}`
{{ RUNPOD_SECRET_CIVITAI_API_TOKEN }} - `{{ RUNPOD_SECRET_CIVITAI_API_TOKEN }}`
(c) 2024 RunPod Better App Manager. Created by Madiator2011.
### Ports: ###
SSH-Port
22:22/tcp
App-Manager
7222:7222/http
VSCode-Server
7777:7777/http
File-Browser
8181:8181/http
### Apps: ###
ComfyUI
3000:3000/http
Forge (Stable-Diffiusion-WebUI-Forge)
7862:7862/http
A1111 (Stable-Diffiusion-WebUI)
7863:7863/http
*coming soon*
Kohya-ss
7864:7864/http

View file

@ -1,6 +1,6 @@
#!/bin/bash #!/bin/bash
# please use the "./readme-testdata.txt" before you extract these TARs!!! # please use the "./README-SHARED_MODELS.txt" before you extract these TARs!!!
# Testcase #1 # Testcase #1
tar -xzf /app/tests/testdata_shared_models_link.tar.gz /workspace tar -xzf /app/tests/testdata_shared_models_link.tar.gz /workspace

View file

@ -1,16 +1,28 @@
group "default" { group "default" {
targets = ["better-ai-launcher"] targets = [
"better-ai-launcher-cuda124",
"better-ai-launcher-cuda121"
]
} }
target "better-ai-launcher" { target "better-ai-launcher-cuda124" {
dockerfile = "Dockerfile"
args = {
BASE_IMAGE = "madiator2011/better-base:cuda12.4",
}
contexts = {
scripts = "../../container-template"
}
tags = ["madiator2011/better-launcher:dev"]
}
target "better-ai-launcher-cuda121" {
dockerfile = "Dockerfile" dockerfile = "Dockerfile"
args = { args = {
BASE_IMAGE = "madiator2011/better-base:cuda12.1", BASE_IMAGE = "madiator2011/better-base:cuda12.1",
} }
contexts = { contexts = {
scripts = "../../container-template" scripts = "../../container-template"
proxy = "../../container-template/proxy"
logo = "../../container-template"
} }
tags = ["madiator2011/better-launcher:dev"] tags = ["madiator2011/better-launcher:dev-cuda121"]
} }

View file

@ -14,10 +14,18 @@ services:
- .env # pass additional env-vars (hf_token, civitai token, ssh public-key) from ".env" file to container - .env # pass additional env-vars (hf_token, civitai token, ssh public-key) from ".env" file to container
environment: environment:
- LOCAL_DEBUG=True # change app to localhost Urls and local Websockets (unsecured) - LOCAL_DEBUG=True # change app to localhost Urls and local Websockets (unsecured)
# if you NOT want need this behaviour, then set `LOCAL_DEBUG=False` [default],
# which is the same as NOT setting this ENV var at all.
- FLASK_APP=app/app.py - FLASK_APP=app/app.py
- FLASK_ENV=development # changed from "production"
- GEVENT_SUPPORT=True # gevent monkey-patching is being used, enable gevent support in the debugger - FLASK_ENV=development # changed from "production" [default],
#- "FLASK_DEBUG": "0" # "1" allows debugging in Chrome, but then VSCode debugger not works # only needed when "LOCAL_DEBUG=True", otherwise this ENV var can be obmitted
- GEVENT_SUPPORT=True # gevent monkey-patching is being used, enable gevent support in the debugger,
# only needed when "LOCAL_DEBUG=True", otherwise this ENV var can be obmitted
#- "FLASK_DEBUG": "0" # "1" allows debugging in Chrome, but then VSCode debugger not works, "0" is the [default], which is the same as NOT setting this ENV var at all
volumes: volumes:
- ./app:/app:rw - ./app:/app:rw
- ${HOME}/Projects/Docker/madiator:/workspace:rw # TODO: create the below folder before you run! - ${HOME}/Projects/Docker/madiator:/workspace:rw # TODO: create the below folder before you run!

View file

@ -22,12 +22,23 @@ DISABLE_PULLBACK_MODELS=False
# but also will be linked back into all other corresponding app model-type folders. # but also will be linked back into all other corresponding app model-type folders.
# So the "pulled-back" model is automatically shared to all installed apps. # So the "pulled-back" model is automatically shared to all installed apps.
# #
# if you NOT want this behaviour, then set DISABLE_PULLBACK_MODELS=True # if you NOT want this behaviour, then set DISABLE_PULLBACK_MODELS=True,
# otherwise set DISABLE_PULLBACK_MODELS=False [default] which is the same as NOT setting this ENV var at all.
### USER specific Vars and Secrets (Tokens) - TODO: adjust this for your personal settings ### ### USER specific Vars and Secrets (Tokens) - TODO: adjust this for your personal settings ###
PUBLIC_KEY=ssh-ed25519 XXX...XXX usermail@domain.com PUBLIC_KEY=ssh-ed25519 XXX...XXX usermail@domain.com
# Note: make sure to use the **full line content** from your `"*.pub"` key file!
HF_TOKEN=hf_XXX...XXX HF_TOKEN=hf_XXX...XXX
# Your `HuggingFace` token.
# Can be a `READ` scoped token for downloading your "private" models, or "gated models" as e.g. `Flux.1 Dev` or METAs `Llama LLM models`.
# The HF_TOKEN need to be a `READ/WRITE` scoped token, if you plan also to UPLOAD models to "HuggingFace" later,
# when we have Trainer Apps like "Kohya" or "ai-toolkit".
CIVITAI_API_TOKEN=XXX.XXX CIVITAI_API_TOKEN=XXX.XXX
# Your `CivitAI` API token.
# Note: CivitAI currently only provides a `FULL` user token, acting as `you`,
# so be careful with how to setup this token and with whom you share it!
### RUNPOD specific Vars ### ### RUNPOD specific Vars ###
RUNPOD_PUBLIC_IP=127.0.0.1 RUNPOD_PUBLIC_IP=127.0.0.1

View file

@ -1,7 +0,0 @@
## Build Options
To build with default options, run `docker buildx bake`, to build a specific target, run `docker buildx bake <target>`.
## Ports
- 22/tcp (SSH)