Skip to content

Instantly share code, notes, and snippets.

@gyorgygutai
Last active February 21, 2026 21:12
Show Gist options
  • Select an option

  • Save gyorgygutai/2f91578294714fc104c65d7d59824ac4 to your computer and use it in GitHub Desktop.

Select an option

Save gyorgygutai/2f91578294714fc104c65d7d59824ac4 to your computer and use it in GitHub Desktop.
#!/bin/bash
set -e
BASE=/runpod-volume/models
mkdir -p $BASE/diffusion_models
mkdir -p $BASE/text_encoders
mkdir -p $BASE/vae
mkdir -p $BASE/clip_vision
mkdir -p $BASE/loras
mkdir -p $BASE/text_encoders/gemma_3_12B_it_qat_q4_0_unquantized
pip install -q huggingface_hub
echo "=== Z-Image Turbo ==="
# ~13GB BF16, ~6.5GB FP8 — using FP8 to save space
huggingface-cli download Comfy-Org/z_image_turbo \
split_files/diffusion_models/z_image_turbo_bf16.safetensors \
--local-dir /tmp/z && \
mv /tmp/z/split_files/diffusion_models/z_image_turbo_bf16.safetensors \
$BASE/diffusion_models/
huggingface-cli download Comfy-Org/z_image_turbo \
split_files/text_encoders/qwen_3_4b.safetensors \
--local-dir /tmp/z && \
mv /tmp/z/split_files/text_encoders/qwen_3_4b.safetensors \
$BASE/text_encoders/
huggingface-cli download Comfy-Org/z_image_turbo \
split_files/vae/ae.safetensors \
--local-dir /tmp/z && \
mv /tmp/z/split_files/vae/ae.safetensors \
$BASE/vae/ae.safetensors
echo "=== Wan 2.2 — T2V + I2V FP8 (14B dual expert) ==="
huggingface-cli download Comfy-Org/Wan_2.2_ComfyUI_Repackaged \
split_files/diffusion_models/wan2.2_t2v_high_noise_14B_fp8_scaled.safetensors \
split_files/diffusion_models/wan2.2_t2v_low_noise_14B_fp8_scaled.safetensors \
split_files/diffusion_models/wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors \
split_files/diffusion_models/wan2.2_i2v_low_noise_14B_fp8_scaled.safetensors \
--local-dir /tmp/wan && \
mv /tmp/wan/split_files/diffusion_models/wan2.2_*.safetensors \
$BASE/diffusion_models/
huggingface-cli download Comfy-Org/Wan_2.2_ComfyUI_Repackaged \
split_files/text_encoders/umt5_xxl_fp8_e4m3fn_scaled.safetensors \
--local-dir /tmp/wan && \
mv /tmp/wan/split_files/text_encoders/umt5_xxl_fp8_e4m3fn_scaled.safetensors \
$BASE/text_encoders/
huggingface-cli download Comfy-Org/Wan_2.2_ComfyUI_Repackaged \
split_files/vae/wan_2.1_vae.safetensors \
--local-dir /tmp/wan && \
mv /tmp/wan/split_files/vae/wan_2.1_vae.safetensors \
$BASE/vae/
huggingface-cli download Comfy-Org/Wan_2.2_ComfyUI_Repackaged \
split_files/clip_vision/clip_vision_h.safetensors \
--local-dir /tmp/wan && \
mv /tmp/wan/split_files/clip_vision/clip_vision_h.safetensors \
$BASE/clip_vision/
echo "=== LTX-2 ==="
huggingface-cli download Lightricks/LTX-2 \
ltx-2-19b-dev-fp8.safetensors \
ltx-2-19b-distilled-lora-384.safetensors \
--local-dir /tmp/ltx && \
mv /tmp/ltx/ltx-2-19b-dev-fp8.safetensors $BASE/diffusion_models/ && \
mv /tmp/ltx/ltx-2-19b-distilled-lora-384.safetensors $BASE/loras/
# LTX-2 Gemma text encoder (large, ~7GB quantized)
huggingface-cli download google/gemma-3-12b-it-qat-q4_0-unquantized \
--local-dir $BASE/text_encoders/gemma_3_12B_it_qat_q4_0_unquantized
echo "=== Done ==="
du -sh $BASE/*
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment