From 763d32abf39158391462d8591cd4d44f917aa3a6 Mon Sep 17 00:00:00 2001 From: lutzapps Date: Thu, 28 Nov 2024 18:21:51 +0100 Subject: [PATCH 1/2] dev-beta-v0.9.0 App Enhancements --- .../better-ai-launcher/.dockerignore | 3 +- .../better-ai-launcher/.vscode/tasks.json | 9 +- .../better-ai-launcher/Dockerfile | 41 +- .../better-ai-launcher/README.md | 171 +- .../better-ai-launcher/app/app.py | 255 +- .../app/templates/index.html | 2795 +++++++++-------- .../app/tests/README-SHARED_MODELS.txt | 198 +- .../app/tests/populate_testdata.sh | 6 +- .../tests/testdata_installed_apps_pull.tar.gz | Bin 5073 -> 495 bytes .../tests/testdata_shared_models_link.tar.gz | Bin 1085 -> 1295 bytes ...estdata_stable-diffusion-webui_pull.tar.gz | Bin 2207 -> 377 bytes .../app/utils/app_configs.py | 635 ++-- .../better-ai-launcher/app/utils/app_utils.py | 1432 ++++++--- .../app/utils/model_utils.py | 157 +- .../app/utils/shared_models.py | 146 +- .../docker-compose.debug.yml | 7 + official-templates/better-ai-launcher/env.txt | 31 +- .../better-ai-launcher/nginx/nginx.conf | 13 + .../better-ai-launcher/nginx/readme.html | 2 +- .../better-ai-launcher/tasks.json | 112 + 20 files changed, 3761 insertions(+), 2252 deletions(-) create mode 100644 official-templates/better-ai-launcher/tasks.json diff --git a/official-templates/better-ai-launcher/.dockerignore b/official-templates/better-ai-launcher/.dockerignore index 566bea3..7309fe5 100644 --- a/official-templates/better-ai-launcher/.dockerignore +++ b/official-templates/better-ai-launcher/.dockerignore @@ -27,5 +27,4 @@ **/obj **/secrets.dev.yaml **/values.dev.yaml -LICENSE -README.md +LICENSE \ No newline at end of file diff --git a/official-templates/better-ai-launcher/.vscode/tasks.json b/official-templates/better-ai-launcher/.vscode/tasks.json index 53cce64..f51df4c 100644 --- a/official-templates/better-ai-launcher/.vscode/tasks.json +++ b/official-templates/better-ai-launcher/.vscode/tasks.json @@ -26,15 +26,22 @@ "LOCAL_DEBUG": "True", // change app to localhost Urls and local Websockets (unsecured) // if you NOT want need this behaviour, then set `LOCAL_DEBUG=False` [default], // which is the same as NOT setting this ENV var at all. + + "PYTHONDONTWRITEBYTECODE": 1, + // keep Python from generating .pyc files in the container + // this should however be removed for production as it disables bytecode caching "FLASK_APP": "app/app.py", "FLASK_ENV": "development", // changed from "production" [default], // only needed when "LOCAL_DEBUG=True", otherwise this ENV var can be obmitted - "GEVENT_SUPPORT": "True" // gevent monkey-patching is being used, enable gevent support in the debugger, + "GEVENT_SUPPORT": "True", // gevent monkey-patching is being used, enable gevent support in the debugger, // only needed when "LOCAL_DEBUG=True", otherwise this ENV var can be obmitted + "GIT_PYTHON_TRACE": "full" // enables full logging for the GitPython code, used for cloning the apps, + // bcomfy custom_nodes, and refreshing the apps via git fetch/merge = git pull + // "FLASK_DEBUG": "0" // "1" allows debugging in Chrome, but then VSCode debugger not works, "0" is the [default], which is the same as NOT setting this ENV var at all }, "volumes": [ diff --git a/official-templates/better-ai-launcher/Dockerfile b/official-templates/better-ai-launcher/Dockerfile index 764a7b9..692884e 100644 --- a/official-templates/better-ai-launcher/Dockerfile +++ b/official-templates/better-ai-launcher/Dockerfile @@ -20,7 +20,7 @@ RUN apt-get update && \ ### ---> needed Tools for Installer # removed: 2x git nginx ffmpeg (as they are already installed with the base image) # added: pigz (for parallel execution of TAR files); zip (for easier folder compression) - apt-get install -y aria2 pigz zip pv rsync zstd libtcmalloc-minimal4 bc \ + apt-get install -y aria2 pigz zip pv rsync zstd tree libtcmalloc-minimal4 bc \ # add Python3.11 as system Python version, serving the Python Flask App python3.11 python3.11-venv python3.11-dev python3.11-distutils && \ # not remove Python3.10, as we need it for "official" app support (e.g. for kohya_ss VENV) @@ -85,6 +85,9 @@ RUN curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py && \ python3.11 get-pip.py && \ rm get-pip.py +# set the default python_cmd (and version) for the apps 'ba1111' and 'bforge' default launch script 'webui.sh' +ENV python_cmd=python3.11 + # Set the working directory WORKDIR /app @@ -102,15 +105,33 @@ COPY app . RUN curl -fsSL https://raw.githubusercontent.com/filebrowser/get/master/get.sh | bash # Set environment variables for developent/production -# overwrite this ENV in "tasks.json" docker run "env" section or overwrite in your ".env" file to "development" +# ENV vars alternatively could also stay in two different Dockerfiles: 'Dockerfile-Prod' and 'Dockerfile-Dev') + +### PROD START: +# +# ENV APP_CONFIGS_MANIFEST_URL=https://better.s3.madiator.com/app_configs.json +# +# overwrite this ENV in "tasks.json" docker run "env" section, docker-compose-debug.yml, or overwrite in your ".env" file to "development" ENV FLASK_ENV=production +# +### PROD END +### DEV START: +# +ENV APP_CONFIGS_MANIFEST_URL=https://better-dev.s3.madiator.com/app_configs.json +# +# alternativly add the following ENV vars in "tasks.json" docker run "env" section, docker-compose-debug.yml, +# or populate them in in your ".env" file, instead of setting them here in the Dockerfile +# # gevent monkey-patching is being used, enable gevent support in the debugger with GEVENT_SUPPORT=True -# add this ENV in "tasks.json" docker run "env" section or populate in your ".env" file # ENV GEVENT_SUPPORT=True +# +# keep Python from generating .pyc files in the container +# this should however be removed for production as it disables bytecode caching +# ENV PYTHONDONTWRITEBYTECODE=1 +# +### DEV END -# lutzapps - keep Python from generating .pyc files in the container -ENV PYTHONDONTWRITEBYTECODE=1 # Turns off buffering for easier container logging ENV PYTHONUNBUFFERED=1 @@ -128,14 +149,20 @@ EXPOSE 7222 COPY nginx/nginx.conf /etc/nginx/nginx.conf COPY nginx/readme.html /usr/share/nginx/html/readme.html +# HELP configuration ('/static/' is configured be hosting '/app/static/' content in nginx.conf) +COPY README.md /app/static/README.md +COPY README-Development.txt /app/static/README-Development.txt +COPY app/tests/README-SHARED_MODELS.txt /app/static/README-SHARED_MODELS.txt + + # Copy all necessary scripts -COPY --from=scripts start.sh / +#COPY --from=scripts start.sh / # --from=scripts is defined as a "shared" location in "docker-bake.hcl" in the "contexts" dict: # scripts = "../../container-template" # the local "start.sh" is (intentionally) empty # to build all from *one* location, copy "start.sh" here into the project workspace folder first # cp ../../container-template/scripts/start.sh start.sh -#COPY start.sh / +COPY start.sh / COPY pre_start.sh / # lutzapps - add execution flags to added "/app/tests/populate_testdata.sh" diff --git a/official-templates/better-ai-launcher/README.md b/official-templates/better-ai-launcher/README.md index e75ec3d..b9a8ac2 100644 --- a/official-templates/better-ai-launcher/README.md +++ b/official-templates/better-ai-launcher/README.md @@ -8,17 +8,19 @@ This image allows you to easily manage and run various AI applications on your R ### Features - Easy installation of pre-configured AI applications. -- Start, stop, and monitor running applications. +- Start, Stop, Delete, Check, Refresh and monitor running applications. +- Multiple Versions per App can be selected. - View application logs in real-time. - Force kill applications if needed. - Download Manager for **HuggingFace** and **CivitAI** with `token` support for privat and gated models. - Shared Models Management for **Downloading and Sharing all models of all types to all installed AI applications**! ### Supported Applications -- Better Comfy UI +- Better ComfyUI - Better Forge - Better A1111 -- more Apps coming soon (AI Trainers as `Kohya` and `ai-toolkit` are planned) +- Better Kohya +- more Apps are planned (AI Trainer `ai-toolkit` and `joy-caption-batch` captioner) ### Getting Started - Access the Better App Manager interface through your RunPod instance URL. @@ -36,7 +38,6 @@ If you encounter any issues: For more detailed information and guides, please visit the RunPod Documentation. - Part of the `madiator-docker-runpod` familiy of **RunPod Docker Containers for RunPod** ## Github @@ -60,11 +61,17 @@ BASE_IMAGE=madiator2011/better-base:cuda12.4 ## Ports (Apps) -- 3000/http (ComfyUI) -- 6006/http (Tensorboard [needed by kohya_ss]) -- 7862/http (Forge) aka Stable-Diffiusion-WebUI-Forge -- 7863/http (A1111) aka Stable-Diffiusion-WebUI -- 7864/http (Kohya-ss) with FLUX.1 support +- 3000/http (ComfyUI)
+- 7862/http (Forge) aka Stable-Diffiusion-WebUI-Forge
+- 7863/http (A1111) aka Stable-Diffiusion-WebUI
+ +**New**: +- [6006/http] tensorboard (supporting kohya_ss) - provided at the '/tensorboard/' sub-url +- [7864/http] kohya-ss with FLUX.1 support - provided as a gradio link
+`Kohya` is currently configured to run via a `public gradio link` (*.gradio.live domain)
+`Tensorboard` currently is pre-started with bkohya and available at `http://localhost:6006/tensorboard/`
+**Note**: Both Urls will be automatically opened, wenn you click the `Open Application` button.
+Make sure to disable popup-blocker settings in your browser for these 2 additional domains! ## ENV Vars (System) @@ -89,12 +96,17 @@ see also explanantion in `".vscode/tasks.json"` or `"docker-compose.debug.yml"` gevent monkey-patching is being used, enable gevent support in the debugger.
only needed when `LOCAL_DEBUG=True`, otherwise this ENV var can be obmitted. +- GIT_PYTHON_TRACE=full + + enables full logging for the GitPython code, used for cloning the apps,
+ bcomfy custom_nodes, and refreshing the apps via git fetch/merge = git pull. + - FLASK_DEBUG=0 "1" allows debugging in Chrome, but then the VSCode debugger will not works.
"0" is the [default], which is the same as NOT setting this ENV var at all. -### APP specific Vars +## APP specific Vars - DISABLE_PULLBACK_MODELS=False The default is, that app model files, which are found locally (in only one app), get automatically `pulled-back` into the `"/workspace/shared_models"` folder.
@@ -104,6 +116,141 @@ see also explanantion in `".vscode/tasks.json"` or `"docker-compose.debug.yml"` If you **NOT** want this behaviour, then set `DISABLE_PULLBACK_MODELS=True`,
otherwise set `DISABLE_PULLBACK_MODELS=False` [default], which is the same as NOT setting this ENV var at all. +## APP specific USER Vars +All apps can be provisioned in at least 2 Virtual Environment versions:
+- 'official' - This setup is "to the point' as defined and recommended by the app owners on GitHub.
+- 'latest' - This setup extends the 'official' Setup with the latest PyTorch and Cuda libraries, or
+ - in the case of ComfyUI - provides also an additional set of pre-installed Custom-Nodes. + +The user can choose from all available versions during Setup, or pre-select the VENV_VERSION, which should be installed via following ENV vars in the format `VENV_VERSION_`. + +If these ENV vars are not set/passed into the container, the App-Manager will provide an UI for selecting them during Setup: + +BCOMFY 'official' 5.43 GB (APP 75.3 MB):
+ + Python 3.11 && Cuda 12.4 && Recommended torch-2.5.1+cu124-cp311-cp311-linux_x86_64 && ComfyUI-Manager && comfy CLI + +BCOMFY 'latest' 6.59 GB (APP 400.22 MB):
+ + Python 3.11 && Cuda 12.4 && Recommended torch-2.5.1+cu124-cp311-cp311-linux_x86_64 && ComfyUI-Manager && comfy CLI && 12x Custom Nodes (see below) + +BFORGE 'official' 6.41 GB (APP 106.31 MB):
+ + Python 3.11 && Cuda 12.1 && Recommended torch-2.3.1+cu121-cp311-cp311-linux_x86_64 + +BFORGE 'latest' 6.62 GB (APP 105.58 MB):
+ + Python 3.11 && Cuda 12.4 && Upgraded to torch-2.5.1+cu124-cp311-cp311-linux_x86_64 && xformers + +BA111 'official' 4.85 GB (APP 41.78 MB):
+ + Python 3.11 && Cuda 12.1 && Recommended torch-2.1.2+cu121-cp311-cp311-linux_x86_64 + +BA111 'latest' 5.88 GB (APP 40.65 MB):
+ + Python 3.11 && Cuda 12.4 && Upgraded to torch-2.5.1+cu124-cp311-cp311-linux_x86_64 && xformers + +BKOHYA 'official':
+ + This does **NOT** exist + +BKOHYA 'latest' 11.61 GB (APP 58.57 MB):
+ + Python 3.10 && FLUX.1 version with torch-2.5.0+cu124 (setup-runpod.sh with requirements_runpod.txt) + (kohya_ss 'sd3-flux.1' branch and sd-scripts 'SD3' branch) + +Example ENV vars to 'pre-select' a specific APP version:
+ +VENV_VERSION_BCOMFY=latest
+VENV_VERSION_BFORGE=latest
+VENV_VERSION_BA1111=latest
+VENV_VERSION_BKOHYA=latest
+ +**NOTE**: Kohya currently is only available as the 'latest' (FLUX-)Version, and has **NO** 'official' version!

+**NOTE**: The selected VENV also controls the setup of the App,
+e.g. for BCOMFY 'latest', it also installs and activates 12 additional popular workflow 'custom nodes':
+- https://github.com/cubiq/ComfyUI_essentials +- https://github.com/rgthree/rgthree-comfy +- https://github.com/WASasquatch/was-node-suite-comfyui +- https://github.com/Fannovel16/comfyui_controlnet_aux +- https://github.com/XLabs-AI/x-flux-comfyui +- https://github.com/city96/ComfyUI-GGUF +- https://github.com/kijai/ComfyUI-Florence2 +- https://github.com/kijai/ComfyUI-KJNodes +- https://github.com/ssitu/ComfyUI_UltimateSDUpscale +- https://github.com/gseth/ControlAltAI-Nodes +- https://github.com/yolain/ComfyUI-Easy-Use +- https://github.com/ltdrdata/ComfyUI-Impact-Pack + +**NOTE**: If it makes sense, additional app versions will be added to the MANIFEST later, e.g. 'experimental' versions ;-)
+ +## Future plans + +**We also plan for additional Apps**:
+- @ostris `ai-toolkit` - another popular Trainer app, see https://github.com/ostris/ai-toolkit +- @MNeMoNiCuZ `joy-caption-batch` - a popular captioning app, see https://github.com/MNeMoNiCuZ/joy-caption-batch +for captioning with https://www.aimodels.fyi/models/huggingFace/llama-joycaption-alpha-two-hf-llava-fancyfeast + +Such a captioning app adds very nicely with the need to have good captions for Trainers like `kohya_ss` and `ai-toolkit`, specifically when training custom LoRA `Flux.1` models. + +## Better AI-Launcher Features + +All Apps can now be also `refreshed` any time - with their `'Refresh Application'` button - to the latest GitHub state of their corresponding Repos. This will include refreshing repo sub-modules (as in the case with 'kohya_ss'), and also will refresh 'custom_nodes' (in the case of 'ComfyUI's installed 12 custom nodes). In the case of 'ComfyUI' also all custom module requirements will be updated to their latest definitions.
+Note however, that refreshing an app needs to `reset` its status to the state, as when it was last installed/cloned!
+That means that any changes in the `app_path` (existing files edited or new files added) get lost, including local model downloads into the various `models` sub-folders of the app!
+ +Before refreshing, the `Refresh Symlinks` code will be called to `pull-back` any locally downloaded model files, +and save them into the `'shared_models'` workspace folder, before the actual `reset` is done.
+So this operation is not 'light' and you should plan for that accordingly!
+ +Every App also can be `deleted` and installed as another version with its `'Delete Application'` button.
+When `deleting` an app, the same logic applies as during `refreshing` and app, and the same `Refresh Symlinks` code will be called to `pull-back` any locally downloaded model files, and save them into the `'shared_models'` workspace folder, before the actual deletion of the app is done.
+This should make it easier to switch between app versions, if needed ;-)
+ +`Downloading` and `Unpacking` of app versions runs with the fastest available options: +- Downloads: + ATM we use `aria2c --max-connection-per-server=16 --max-concurrent-downloads=16 --split=16` for downloading app version `TAR.gz` archives from a central `S3 bucket location`.
+- Unpacking: + The `TAR.gz` archives are compressed with `'gzip -9'` option to achieve the lowest possible archive file size during download, which at the same time still provides fast Unpacking rates.
+ Unpacking the archives is done also as fast as possible with `PIGZ`, a parallel version of gzip. Although it only uses a single thread for decompression, it starts 3 additional threads for reading, writing, and check calculation. +- Verification: + All downloaded TAR archives are `SHA256 hash` checked for possible download corruptions.
+ + After Unpacking and after Cloning/Installing the app, both the `app_path` and also the `venv_path` of the app are checked for correct and expected folder sizes. That should help to detect corrupted installations, which - for any possible reason - did not finish their corresponding stage.
+ +This last verification part can also be done later at any time with the `'Check Application'` button of the app.
+ +If the check code detects wrong sizes for the APP or VENV folders, which are UNDER an expected minimum size of the app_path and venv_path, it offers to `delete` the app. `'Check Application'` shows a verification summary of the expected and actual APP and VENV folder sizes, and it also shows which version is currently installed and when it was last refeshed. It even shows you, when an updated app version exists online. + +### Shared Models +`'Shared Models'` provides a very powerful and completely configurable `'mapping'` for all kind of 'model files, be it Checkpoints, LoRAs, Embeddings and many more, between a `'shared_models'` workspace folder, and **all** installed applications, be it the currently supported applications or **any custom app**. +The mapping is completely transparent, and can be configures with 3 different kind of `mapping JSON files`. +One map for the kinds of model types to share, another map for the installed app-path locations, and the third map `connecting` these two other maps. This allows **any** mapping to **any** app ;-)
+ +`'Shared Models'` supports file-symlinks for single-file models, but also folder-symlinks for folder-based models (e.g. most LLM models are provided as folders). The mapping supports both types of symlinks. + +To further 'get started' with `'Shared Models'`, make sure to read the separate `README-SHARED-MODELS.txt` which also provides 3 sample scenarios in the form of 3 installable small TAR archives with 'test-dummy' models and a bash-script to install these test data files into your `'/workspace'` folder. +This readme file, bash-script and 3 archives can be found in the `'/app/tests'` folder within the container (or source-code): + + $ tree /app/tests + /app/tests + ├── README-SHARED_MODELS.txt + ├── populate_testdata.sh + ├── testdata_installed_apps_pull.tar.gz + ├── testdata_shared_models_link.tar.gz + └── testdata_stable-diffusion-webui_pull.tar.gz + + 1 directory, 5 files + +### Model Downloader +We also provide an intelligent `Model Downloader` to download all types of models directly into the `'shared_models'` workspace, from where these models will be automatically shared across all installed application, and mapped intelligently into their according (different named) local app model folders.
+This `Model Downloader` currently supports `HuggingFace` and `CivitAI` download Urls and - in the case of CivitAI - has a very smart `CivitAi Model and Version Picker Dialog`, to choose between all available 'Versions', and from any selected Version between all its available 'Files', of a specified given CivitAI Model Id Url.
+ +On the `'Models'` tab of the App-Manager, some `Example URLs` for popular Models are provided both for `HuggingFace` and for `CivitAI`. + +The `Model Downloader` supports also the use of `HuggingFace` and/or `CivitAI` `security tokens`, which can be provided as `ENV vars` (see below), or stored in hidden files in the workspace, or as one-time security tokens only available in memory in the web-form during model download.
+This allows downloading `private models` and also `gated models` from both `HuggingFace` and `CivitAI`. + ## ENV Vars (User and Secret Tokens) **TODO: rename the file `"env.txt"` to `".env"` and adjust the ENV vars for your personal settings** @@ -116,7 +263,7 @@ see also explanantion in `".vscode/tasks.json"` or `"docker-compose.debug.yml"` Your `HuggingFace` token.

Can be a `READ` scoped token for downloading your `private` models, or `gated models` as e.g. `Flux.1 Dev` or METAs `Llama LLM models`.
- The HF_TOKEN need to be a `READ/WRITE` scoped token, if you plan also to **UPLOAD** models to `HuggingFace` later, when we have Trainer Apps like `Kohya` or `ai-toolkit`. + The HF_TOKEN need to be a `READ/WRITE` scoped token, if you plan also to **UPLOAD** models to `HuggingFace` later, when we support direct uploads of your trained models from Trainer Apps like `kohya_ss` or later from `ai-toolkit`. - CIVITAI_API_TOKEN=xxx...xxx @@ -140,4 +287,4 @@ That mean, for this template/image, you should use these formats to pass the abo - `{{ RUNPOD_SECRET_CIVITAI_API_TOKEN }}` -(c) 2024 RunPod Better App Manager. Created by Madiator2011. \ No newline at end of file +(c) 2024 RunPod Better App Manager. Created by Madiator2011 & lutzapps. \ No newline at end of file diff --git a/official-templates/better-ai-launcher/app/app.py b/official-templates/better-ai-launcher/app/app.py index 8114c79..3113978 100644 --- a/official-templates/better-ai-launcher/app/app.py +++ b/official-templates/better-ai-launcher/app/app.py @@ -6,22 +6,28 @@ import threading import time from flask import Flask, render_template, jsonify, request from flask_sock import Sock +import re import json import signal import shutil import subprocess import traceback +import logging from utils.ssh_utils import setup_ssh, save_ssh_password, get_ssh_password, check_ssh_config, SSH_CONFIG_FILE from utils.filebrowser_utils import configure_filebrowser, start_filebrowser, stop_filebrowser, get_filebrowser_status, FILEBROWSER_PORT from utils.app_utils import ( - run_app, update_process_status, check_app_directories, get_app_status, - force_kill_process_by_name, update_webui_user_sh, save_install_status, - get_install_status, download_and_unpack_venv, fix_custom_nodes, is_process_running, install_app, # update_model_symlinks - get_bkohya_launch_url # lutzapps - support dynamic generated gradio url + run_app, run_bash_cmd, update_process_status, check_app_directories, get_app_status, + force_kill_process_by_name, find_and_kill_process_by_port, update_webui_user_sh, + fix_custom_nodes, is_process_running, install_app, + get_available_venvs, get_bkohya_launch_url, init_app_status, # lutzapps - support dynamic generated gradio url and venv_size checks + delete_app_installation, check_app_installation, refresh_app_installation # lutzapps - new app features for check and refresh app ) +from utils.websocket_utils import send_websocket_message, active_websockets +from utils.app_configs import get_app_configs, add_app_config, remove_app_config, app_configs, DEBUG_SETTINGS, APP_CONFIGS_MANIFEST_URL +from utils.model_utils import download_model, check_civitai_url, check_huggingface_url, format_size #, SHARED_MODELS_DIR # lutzapps - SHARED_MODELS_DIR is owned by shared_models module now -# lutzapps - CHANGE #1 +# lutzapps LOCAL_DEBUG = os.environ.get('LOCAL_DEBUG', 'False') # support local browsing for development/debugging # use the new "utils.shared_models" module for app model sharing @@ -55,10 +61,18 @@ from utils.shared_models import ( # global module 'SHARED_MODEL_APP_MAP' dict: 'model_type' -> 'app_name:app_model_dir' (relative path) # which does a default mapping from app code or (if exists) from external JSON 'SHARED_MODEL_APP_MAP_FILE' file +""" +from flask import Flask +import logging -from utils.websocket_utils import send_websocket_message, active_websockets -from utils.app_configs import get_app_configs, add_app_config, remove_app_config, app_configs -from utils.model_utils import download_model, check_civitai_url, check_huggingface_url, format_size #, SHARED_MODELS_DIR # lutzapps - SHARED_MODELS_DIR is owned by shared_models module now +logging.basicConfig(filename='record.log', level=logging.DEBUG) +app = Flask(__name__) + +if __name__ == '__main__': + app.run(debug=True) +""" + +#logging.basicConfig(filename='better-ai-launcher.log', level=logging.INFO) # CRITICAL, ERROR, WARNING, INFO, DEBUG app = Flask(__name__) sock = Sock(app) @@ -69,8 +83,6 @@ running_processes = {} app_configs = get_app_configs() -#S3_BASE_URL = "https://better.s3.madiator.com/" # unused now - SETTINGS_FILE = '/workspace/.app_settings.json' CIVITAI_TOKEN_FILE = '/workspace/.civitai_token' @@ -108,35 +120,26 @@ def index(): ssh_password = get_ssh_password() ssh_password_status = 'set' if ssh_password else 'not_set' - app_status = {} - for app_name, config in app_configs.items(): - dirs_ok, message = check_app_directories(app_name, app_configs) - status = get_app_status(app_name, running_processes) - install_status = get_install_status(app_name) - app_status[app_name] = { - 'name': config['name'], - 'dirs_ok': dirs_ok, - 'message': message, - 'port': config['port'], - 'status': status, - 'installed': dirs_ok, - 'install_status': install_status, - 'is_bcomfy': app_name == 'bcomfy' - } - filebrowser_status = get_filebrowser_status() + app_status = init_app_status(running_processes) + return render_template('index.html', - apps=app_configs, - app_status=app_status, - pod_id=RUNPOD_POD_ID, - RUNPOD_PUBLIC_IP=os.environ.get('RUNPOD_PUBLIC_IP'), - RUNPOD_TCP_PORT_22=os.environ.get('RUNPOD_TCP_PORT_22'), - enable_unsecure_localhost=os.environ.get('LOCAL_DEBUG'), - settings=settings, - current_auth_method=current_auth_method, - ssh_password=ssh_password, - ssh_password_status=ssh_password_status, - filebrowser_status=filebrowser_status) + apps=app_configs, + app_status=app_status, + pod_id=RUNPOD_POD_ID, + RUNPOD_PUBLIC_IP=os.environ.get('RUNPOD_PUBLIC_IP'), + RUNPOD_TCP_PORT_22=os.environ.get('RUNPOD_TCP_PORT_22'), + + # lutzapps - allow localhost Url for unsecure "http" and "ws" WebSockets protocol, + # according to 'LOCAL_DEBUG' ENV var + enable_unsecure_localhost=os.environ.get('LOCAL_DEBUG'), + app_configs_manifest_url=APP_CONFIGS_MANIFEST_URL, + + settings=settings, + current_auth_method=current_auth_method, + ssh_password=ssh_password, + ssh_password_status=ssh_password_status, + filebrowser_status=filebrowser_status) @app.route('/start/') def start_app(app_name): @@ -150,11 +153,38 @@ def start_app(app_name): update_webui_user_sh(app_name, app_configs) command = app_configs[app_name]['command'] + + # bkohya enhancements + if app_name == 'bkohya': + # the --noverify flag currently is NOT supported anymore, need to check, in the meantime disable it + # if DEBUG_SETTINGS['bkohya_noverify']: + # # Use regex to search & replace command variable to launch bkohya + # #command = re.sub(r'kohya_gui.py', 'kohya_gui.py --noverify', command) + # print(f"launch bkohya with patched command '{command}'") + + if DEBUG_SETTINGS['bkohya_run_tensorboard']: # default == True + # auto-launch tensorboard together with bkohya app + app_config = app_configs.get(app_name) # get bkohya app_config + app_path = app_config['app_path'] + cmd_key = 'run-tensorboard' # read the tensorboard launch command from the 'run-tensorboard' cmd_key + + ### run_app() variant, but need to define as app + # tensorboard_command = app_config['bash_cmds'][cmd_key] # get the bash_cmd value from app_config + # message = f"Launch Tensorboard together with kohya_ss: cmd_key='{cmd_key}' ..." + # print(message) + # app_name = 'tensorboard' + # threading.Thread(target=run_app, args=(app_name, tensorboard_command, running_processes)).start() + + ### run_bash_cmd() variant + #run_bash_cmd(app_config, app_path, cmd_key=cmd_key) + threading.Thread(target=run_bash_cmd, args=(app_config, app_path, cmd_key)).start() + + threading.Thread(target=run_app, args=(app_name, command, running_processes)).start() return jsonify({'status': 'started'}) return jsonify({'status': 'already_running'}) -@app.route('/stop/') +@app.route('/stop/', methods=['GET']) def stop_app(app_name): if app_name in running_processes and get_app_status(app_name, running_processes) == 'running': try: @@ -206,10 +236,11 @@ def kill_all(): if get_app_status(app_key, running_processes) == 'running': stop_app(app_key) return jsonify({'status': 'success'}) + except Exception as e: return jsonify({'status': 'error', 'message': str(e)}) -@app.route('/force_kill/', methods=['POST']) +@app.route('/force_kill/', methods=['GET']) def force_kill_app(app_name): try: success, message = force_kill_process_by_name(app_name, app_configs) @@ -217,9 +248,91 @@ def force_kill_app(app_name): return jsonify({'status': 'killed', 'message': message}) else: return jsonify({'status': 'error', 'message': message}) + except Exception as e: return jsonify({'status': 'error', 'message': str(e)}) +@app.route('/force_kill_by_port/', methods=['GET']) +def force_kill_by_port_route(port:int): + try: + success = find_and_kill_process_by_port(port) + message = '' + if success: + return jsonify({'status': 'killed', 'message': message}) + else: + return jsonify({'status': 'error', 'message': message}) + + except Exception as e: + return jsonify({'status': 'error', 'message': str(e)}) + + +# lutzapps - added check app feature + +@app.route('/delete_app/', methods=['GET']) +def delete_app_installation_route(app_name:str): + try: + def progress_callback(message_type:str, message_data:str): + try: + send_websocket_message(message_type, message_data) + print(message_data) # additionally print to output + except Exception as e: + print(f"Error sending progress update: {str(e)}") + # Continue even if websocket fails + pass + + success, message = delete_app_installation(app_name, app_configs, progress_callback) + if success: + return jsonify({'status': 'deleted', 'message': message}) + else: + return jsonify({'status': 'error', 'message': message}) + + except Exception as e: + return jsonify({'status': 'error', 'message': str(e)}) + +@app.route('/check_installation/', methods=['GET']) +def check_app_installation_route(app_name:str): + try: + def progress_callback(message_type, message_data): + try: + send_websocket_message(message_type, message_data) + print(message_data) # additionally print to output + except Exception as e: + print(f"Error sending progress update: {str(e)}") + # Continue even if websocket fails + pass + + success, message = check_app_installation(app_name, app_configs, progress_callback) + if success: + return jsonify({'status': 'checked', 'message': message}) + else: + return jsonify({'status': 'error', 'message': message}) + + except Exception as e: + return jsonify({'status': 'error', 'message': str(e)}) + +# lutzapps - added refresh app feature +@app.route('/refresh_installation/', methods=['GET']) +def refresh_app_installation_route(app_name:str): + try: + def progress_callback(message_type, message_data): + try: + send_websocket_message(message_type, message_data) + print(message_data) # additionally print to output + except Exception as e: + print(f"Error sending progress update: {str(e)}") + # Continue even if websocket fails + pass + + success, message = refresh_app_installation(app_name, app_configs, progress_callback) + if success: + return jsonify({'status': 'refreshed', 'message': message}) + else: + return jsonify({'status': 'error', 'message': message}) + + except Exception as e: + return jsonify({'status': 'error', 'message': str(e)}) + + from gevent.lock import RLock websocket_lock = RLock() @@ -272,8 +385,24 @@ def send_heartbeat(): # Start heartbeat thread threading.Thread(target=send_heartbeat, daemon=True).start() -@app.route('/install/', methods=['POST']) -def install_app_route(app_name): + +@app.route('/available_venvs/', methods=['GET']) +def available_venvs_route(app_name): + try: + success, venvs = get_available_venvs(app_name) + if success: + return jsonify({'status': 'success', 'available_venvs': venvs}) + else: + return jsonify({'status': 'error', 'error': venvs}) + + except Exception as e: + error_message = f"Error for {app_name}: {str(e)}\n{traceback.format_exc()}" + app.logger.error(error_message) + return jsonify({'status': 'error', 'message': error_message}), 500 + +# lutzapps - added venv_version +@app.route('/install//', methods=['GET']) +def install_app_route(app_name, venv_version): try: def progress_callback(message_type, message_data): try: @@ -283,19 +412,20 @@ def install_app_route(app_name): # Continue even if websocket fails pass - success, message = install_app(app_name, app_configs, progress_callback) + success, message = install_app(app_name, venv_version, progress_callback) if success: return jsonify({'status': 'success', 'message': message}) else: return jsonify({'status': 'error', 'message': message}) + except Exception as e: error_message = f"Installation error for {app_name}: {str(e)}\n{traceback.format_exc()}" app.logger.error(error_message) return jsonify({'status': 'error', 'message': error_message}), 500 -@app.route('/fix_custom_nodes/', methods=['POST']) +@app.route('/fix_custom_nodes/', methods=['GET']) def fix_custom_nodes_route(app_name): - success, message = fix_custom_nodes(app_name) + success, message = fix_custom_nodes(app_name, app_configs) if success: return jsonify({'status': 'success', 'message': message}) else: @@ -395,17 +525,17 @@ def start_symlink_update_thread(): thread.start() # modified function -@app.route('/recreate_symlinks', methods=['POST']) +@app.route('/recreate_symlinks', methods=['GET']) def recreate_symlinks_route(): - # lutzapps - CHANGE #7 - use the new "shared_models" module for app model sharing + # lutzapps - use the new "shared_models" module for app model sharing jsonResult = update_model_symlinks() return jsonResult # modified function -@app.route('/create_shared_folders', methods=['POST']) +@app.route('/create_shared_folders', methods=['GET']) def create_shared_folders(): - # lutzapps - CHANGE #8 - use the new "shared_models" module for app model sharing + # lutzapps - use the new "shared_models" module for app model sharing jsonResult = ensure_shared_models_folders() return jsonResult @@ -414,7 +544,7 @@ def save_civitai_token(token): json.dump({'token': token}, f) # lutzapps - added function - 'HF_TOKEN' ENV var -def load_huggingface_token(): +def load_huggingface_token()->str: # look FIRST for Huggingface token passed in as 'HF_TOKEN' ENV var HF_TOKEN = os.environ.get('HF_TOKEN', '') @@ -441,7 +571,7 @@ def load_huggingface_token(): return None # lutzapps - modified function - support 'CIVITAI_API_TOKEN' ENV var -def load_civitai_token(): +def load_civitai_token()->str: # look FIRST for CivitAI token passed in as 'CIVITAI_API_TOKEN' ENV var CIVITAI_API_TOKEN = os.environ.get('CIVITAI_API_TOKEN', '') @@ -517,32 +647,33 @@ def get_model_types_route(): @app.route('/download_model', methods=['POST']) def download_model_route(): + # this function will be called first from the model downloader, which only paasses the url, + # but did not parse for already existing version_id or file_index + # if we ignore the already wanted version_id, the user will end up with the model-picker dialog + # just to select the wanted version_id again, and then the model-picker calls also into this function, + # but now with a non-blank version_id + try: data = request.json url = data.get('url') model_name = data.get('model_name') model_type = data.get('model_type') - civitai_token = data.get('civitai_token') - hf_token = data.get('hf_token') + civitai_token = data.get('civitai_token') or load_civitai_token() # If no token provided in request, try to read from ENV and last from file + hf_token = data.get('hf_token') or load_huggingface_token() # If no token provided in request, try to read from ENV and last from file version_id = data.get('version_id') file_index = data.get('file_index') - # If no token provided in request, try to read from file - if not civitai_token: - try: - if os.path.exists('/workspace/.civitai_token'): - with open('/workspace/.civitai_token', 'r') as f: - token_data = json.load(f) - civitai_token = token_data.get('token') - except Exception as e: - app.logger.error(f"Error reading token file: {str(e)}") + is_civitai, _, url_model_id, url_version_id = check_civitai_url(url) + if version_id == None: # model-picker dialog not used already + version_id = url_version_id # get a possible version_id from the copy-pasted url - is_civitai, _, _, _ = check_civitai_url(url) is_huggingface, _, _, _, _ = check_huggingface_url(url) + # only CivitAI or Huggingface model downloads are supported for now if not (is_civitai or is_huggingface): return jsonify({'status': 'error', 'message': 'Unsupported URL. Please use Civitai or Hugging Face URLs.'}), 400 + # CivitAI downloads require an API Token needed (e.g. for model variant downloads and private models) if is_civitai and not civitai_token: return jsonify({'status': 'error', 'message': 'Civitai token is required for downloading from Civitai.'}), 400 diff --git a/official-templates/better-ai-launcher/app/templates/index.html b/official-templates/better-ai-launcher/app/templates/index.html index ab1b578..32b35a0 100644 --- a/official-templates/better-ai-launcher/app/templates/index.html +++ b/official-templates/better-ai-launcher/app/templates/index.html @@ -423,6 +423,9 @@ } .button-group button, + .check-installation-app-button, + .refresh-installation-app-button, + .delete-installation-app-button, .fix-custom-nodes-button { border: none; color: white; @@ -440,11 +443,18 @@ margin-top: 10px; } + .check-installation-app-button, + .refresh-installation-app-button, + .delete-installation-app-button, .fix-custom-nodes-button { - width: 100%; /* Keep fix custom nodes button full width */ + background-color: #616261; /* darker background-color for Chrome */ + width: 100%; /* make the buttons full width */ margin-top: 10px; } + .check-installation-app-button:hover, + .refresh-installation-app-button:hover, + .delete-installation-app-button:hover, .fix-custom-nodes-button:hover { background-color: #7B1FA2; transform: translateY(-2px); @@ -563,6 +573,8 @@ } .button-group button, + .check-installation-app-button, + .refresh-installation-app-button, .fix-custom-nodes-button, .install-button { border: none; @@ -2415,11 +2427,20 @@ View Logs + + + {% if app_key == 'bcomfy' %} +
+
Install/Refresh Progress:
+
+
0%
+
+
@@ -2472,6 +2499,16 @@