# OmniAvatar-14B Inference Configuration model: base_model_path: "./pretrained_models/Wan2.1-T2V-14B" omni_model_path: "./pretrained_models/OmniAvatar-14B" wav2vec_path: "./pretrained_models/wav2vec2-base-960h" inference: output_dir: "./outputs" max_tokens: 30000 guidance_scale: 4.5 audio_scale: 3.0 num_steps: 25 overlap_frame: 13 tea_cache_l1_thresh: 0.14 device: use_cuda: true dtype: "bfloat16" generation: resolution: "480p" frame_rate: 25 duration_seconds: 10