revorked ComFYui TEMPLATE

This commit is contained in:
Madiator2011 2024-07-27 11:02:51 +02:00 committed by GitHub
parent 3832394c7e
commit de61e8452e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 108 additions and 40 deletions

View file

@ -1,13 +1,8 @@
# Stage 1: Base Image
FROM madiator2011/better-pytorch:cuda12.1 as base
FROM madiator2011/better-base:cuda12.1 as base
ARG PYTHON_VERSION1
ARG TORCH
# Install PyTorch if specified
RUN if [ -n "${TORCH}" ]; then \
pip install --upgrade --no-cache-dir ${TORCH}; \
fi
ARG PYTHON_VERSION1=3.10
ARG TORCH=torch==2.3.0+cu121
# Stage 2: ComfyUI Installation
FROM base as comfyui-install
@ -15,46 +10,74 @@ FROM base as comfyui-install
# Create and activate virtual environment for ComfyUI installation
RUN python -m venv /venv
ENV PATH="/venv/bin:$PATH"
ENV VIRTUAL_ENV="/venv"
# Install PyTorch within the virtual environment
RUN if [ -n "${TORCH}" ]; then \
/venv/bin/pip install --upgrade --no-cache-dir ${TORCH}; \
fi
RUN git clone https://github.com/comfyanonymous/ComfyUI.git && \
cd /ComfyUI && \
pip install -r requirements.txt && \
/venv/bin/pip install -r requirements.txt && \
git clone https://github.com/ltdrdata/ComfyUI-Manager.git custom_nodes/ComfyUI-Manager && \
cd custom_nodes/ComfyUI-Manager && \
pip install -r requirements.txt && \
pip install -U xformers --index-url https://download.pytorch.org/whl/cu121
/venv/bin/pip install -r requirements.txt && \
/venv/bin/pip install -U xformers --index-url https://download.pytorch.org/whl/cu121 && \
/venv/bin/pip install -U accelerate wheel
# Stage 3: Model Setup and Final Image
FROM comfyui-install as final
# Stage 3: Model Setup
FROM comfyui-install as model-setup
# Create model and cache directories
RUN mkdir -p /root/.cache/huggingface && mkdir -p /comfy-models
# Download models directly
RUN wget -q --show-progress https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors -O /comfy-models/v1-5-pruned-emaonly.safetensors && \
wget -q --show-progress https://huggingface.co/stabilityai/stable-diffusion-2-1/resolve/main/v2-1_768-ema-pruned.safetensors -O /comfy-models/v2-1_768-ema-pruned.safetensors && \
wget -q --show-progress https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0.safetensors -O /comfy-models/sd_xl_base_1.0.safetensors && \
wget -q --show-progress https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/resolve/main/sd_xl_refiner_1.0.safetensors -O /comfy-models/sd_xl_refiner_1.0.safetensors
ARG INCLUDE_MODELS=false
# Download each model in a separate layer
RUN if [ "${INCLUDE_MODELS}" = "true" ]; then \
wget -q --show-progress https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors -O /comfy-models/v1-5-pruned-emaonly.safetensors; \
fi
RUN if [ "${INCLUDE_MODELS}" = "true" ]; then \
wget -q --show-progress https://huggingface.co/stabilityai/stable-diffusion-2-1/resolve/main/v2-1_768-ema-pruned.safetensors -O /comfy-models/v2-1_768-ema-pruned.safetensors; \
fi
RUN if [ "${INCLUDE_MODELS}" = "true" ]; then \
wget -q --show-progress https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0.safetensors -O /comfy-models/sd_xl_base_1.0.safetensors; \
fi
RUN if [ "${INCLUDE_MODELS}" = "true" ]; then \
wget -q --show-progress https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/resolve/main/sd_xl_refiner_1.0.safetensors -O /comfy-models/sd_xl_refiner_1.0.safetensors; \
fi
# Verify models were downloaded
RUN ls -lh /comfy-models
RUN if [ "${INCLUDE_MODELS}" = "true" ]; then \
ls -lh /comfy-models; \
fi
# NGINX Proxy
COPY --from=proxy nginx.conf /etc/nginx/nginx.conf
COPY --from=proxy readme.html /usr/share/nginx/html/readme.html
# Stage 4: Final Image
FROM comfyui-install as final
# Copy the README.md
COPY README.md /usr/share/nginx/html/README.md
# Copy models if they were included
COPY --from=model-setup /comfy-models /comfy-models
# Set environment variables for runtime
ENV PATH="/workspace/venv/bin:$PATH"
ENV VIRTUAL_ENV="/workspace/venv"
# Start Scripts
COPY pre_start.sh /pre_start.sh
# Copy the README.md
COPY README.md /usr/share/nginx/html/README.md
# Start Scripts
# NGINX Proxy
COPY --from=proxy nginx.conf /etc/nginx/nginx.conf
COPY --from=proxy readme.html /usr/share/nginx/html/readme.html
# Copy all necessary scripts
COPY pre_start.sh /pre_start.sh
COPY --from=scripts start.sh /
RUN chmod +x /start.sh
RUN chmod +x /pre_start.sh
CMD [ "/start.sh" ]
# CMD
CMD [ "/start.sh" ]

View file

@ -1,18 +1,59 @@
group "default" {
targets = ["py310-cuda121"]
targets = ["full-version", "light-version", "light-experimental"]
}
target "py310-cuda121" {
target "base" {
dockerfile = "Dockerfile"
args = {
BASE_IMAGE = "madiator2011/better-base:cuda12.1",
TORCH = "torch==2.3.0+cu121 -f https://download.pytorch.org/whl/torch_stable.html",
PYTHON_VERSION1 = "3.10"
}
contexts = {
scripts = "../../container-template"
proxy = "../../container-template/proxy"
logo = "../../container-template"
}
}
target "full-version" {
inherits = ["base"]
args = {
INCLUDE_MODELS = "true"
}
contexts = {
scripts = "../../container-template"
proxy = "../../container-template/proxy"
logo = "../../container-template"
}
tags = ["madiator2011/better-comfyui:full"]
}
target "light-version" {
inherits = ["base"]
args = {
INCLUDE_MODELS = "false"
}
contexts = {
scripts = "../../container-template"
proxy = "../../container-template/proxy"
logo = "../../container-template"
}
tags = ["madiator2011/better-comfyui:light"]
}
target "light-experimental" {
dockerfile = "Dockerfile"
args = {
BASE_IMAGE = "nvidia/cuda:12.1.0-cudnn8-devel-ubuntu22.04",
BASE_IMAGE = "madiator2011/better-base:light-cuda",
TORCH = "torch==2.3.0+cu121 -f https://download.pytorch.org/whl/torch_stable.html",
PYTHON_VERSION = "3.10"
PYTHON_VERSION1 = "3.10",
INCLUDE_MODELS = "false"
}
tags = ["madiator2011/better-comfyui:cuda12.1"]
contexts = {
scripts = "../../container-template"
proxy = "../../container-template/proxy"
logo = "../../container-template"
}
tags = ["madiator2011/better-comfyui:light-experimental"]
}

View file

@ -10,31 +10,35 @@ print_feedback() {
echo -e "${GREEN}[ComfyUI Startup]:${NC} $1"
}
# Function to run rsync with progress bar
# Function to run rsync with progress bar and optimizations
rsync_with_progress() {
stdbuf -i0 -o0 -e0 rsync -au --info=progress2 "$@" | stdbuf -i0 -o0 -e0 tr '\r' '\n' | stdbuf -i0 -o0 -e0 grep -oP '\d+%|\d+.\d+[mMgG]' | tqdm --bar-format='{l_bar}{bar}' --total=100 --unit='%' > /dev/null
rsync -aHvx --info=progress2 "$@"
}
# Check for required commands
for cmd in stdbuf rsync tr grep tqdm; do
for cmd in rsync; do
if ! command -v $cmd &> /dev/null; then
echo "$cmd could not be found, please install it."
exit 1
fi
done
LOG_FILE="/workspace/comfyui.log"
print_feedback "Starting ComfyUI setup..."
print_feedback "Syncing virtual environment..."
rsync_with_progress /venv/ /workspace/venv/
print_feedback "Activating virtual environment..."
export VIRTUAL_ENV="/workspace/venv"
export PATH="$VIRTUAL_ENV/bin:$PATH"
source /workspace/venv/bin/activate
export PYTHONUNBUFFERED=1
print_feedback "Syncing ComfyUI files..."
rsync_with_progress --remove-source-files /ComfyUI/ /workspace/ComfyUI/
rsync_with_progress /ComfyUI/ /workspace/ComfyUI/
print_feedback "Creating symbolic links for model checkpoints..."
ln -sf /comfy-models/* /workspace/ComfyUI/models/checkpoints/
@ -47,7 +51,7 @@ print_feedback "ComfyUI will be available at http://0.0.0.0:3000"
# Check if CUSTOM_ARGS is set and not empty
if [ -n "$CUSTOM_ARGS" ]; then
exec /workspace/venv/bin/python main.py --listen --port 3000 $CUSTOM_ARGS
exec /workspace/venv/bin/python main.py --listen --port 3000 $CUSTOM_ARGS 2>&1 | tee -a $LOG_FILE
else
exec /workspace/venv/bin/python main.py --listen --port 3000
fi
exec /workspace/venv/bin/python main.py --listen --port 3000 2>&1 | tee -a $LOG_FILE
fi