Spaces:
Running
Running
| #!/bin/bash | |
| echo "Downloading models with storage optimization..." | |
| # Create directories | |
| mkdir -p pretrained_models | |
| # Install huggingface-hub if not already installed | |
| pip install "huggingface_hub[cli]" | |
| # Only download the most essential model files to stay under storage limit | |
| echo "Downloading wav2vec2-base-960h (essential for audio processing)..." | |
| huggingface-cli download facebook/wav2vec2-base-960h --local-dir ./pretrained_models/wav2vec2-base-960h | |
| # For the large models, create placeholder configs that will use HF hub directly | |
| echo "Setting up OmniAvatar-14B for hub streaming..." | |
| mkdir -p ./pretrained_models/OmniAvatar-14B | |
| cat > ./pretrained_models/OmniAvatar-14B/config.json << 'EOF' | |
| { | |
| "model_type": "omnivatar", | |
| "hub_model_id": "OmniAvatar/OmniAvatar-14B", | |
| "use_streaming": true, | |
| "cache_dir": "/tmp/hf_cache" | |
| } | |
| EOF | |
| echo "Setting up Wan2.1-T2V-14B for hub streaming..." | |
| mkdir -p ./pretrained_models/Wan2.1-T2V-14B | |
| cat > ./pretrained_models/Wan2.1-T2V-14B/config.json << 'EOF' | |
| { | |
| "model_type": "wan_t2v", | |
| "hub_model_id": "Wan-AI/Wan2.1-T2V-14B", | |
| "use_streaming": true, | |
| "cache_dir": "/tmp/hf_cache" | |
| } | |
| EOF | |
| echo "Storage-optimized model setup completed!" | |
| echo "Large models will be streamed from HF Hub to minimize storage usage." | |