├── .github └── workflows │ ├── logo.gif │ └── publish.yaml ├── .gitignore ├── LICENSE ├── README.md ├── README_zh.md ├── diffsynth ├── __init__.py ├── configs │ ├── __init__.py │ ├── model_configs.py │ └── vram_management_module_maps.py ├── core │ ├── __init__.py │ ├── attention │ │ ├── __init__.py │ │ └── attention.py │ ├── data │ │ ├── __init__.py │ │ ├── operators.py │ │ └── unified_dataset.py │ ├── gradient │ │ ├── __init__.py │ │ └── gradient_checkpoint.py │ ├── loader │ │ ├── __init__.py │ │ ├── config.py │ │ ├── file.py │ │ └── model.py │ └── vram │ │ ├── __init__.py │ │ ├── disk_map.py │ │ ├── initialization.py │ │ └── layers.py ├── diffusion │ ├── __init__.py │ ├── base_pipeline.py │ ├── flow_match.py │ ├── logger.py │ ├── loss.py │ ├── parsers.py │ ├── runner.py │ └── training_module.py ├── models │ ├── flux2_dit.py │ ├── flux2_text_encoder.py │ ├── flux2_vae.py │ ├── flux_controlnet.py │ ├── flux_dit.py │ ├── flux_infiniteyou.py │ ├── flux_ipadapter.py │ ├── flux_lora_encoder.py │ ├── flux_lora_patcher.py │ ├── flux_text_encoder_clip.py │ ├── flux_text_encoder_t5.py │ ├── flux_vae.py │ ├── flux_value_control.py │ ├── general_modules.py │ ├── longcat_video_dit.py │ ├── model_loader.py │ ├── nexus_gen.py │ ├── nexus_gen_ar_model.py │ ├── nexus_gen_projector.py │ ├── qwen_image_controlnet.py │ ├── qwen_image_dit.py │ ├── qwen_image_text_encoder.py │ ├── qwen_image_vae.py │ ├── sd_text_encoder.py │ ├── step1x_connector.py │ ├── step1x_text_encoder.py │ ├── wan_video_animate_adapter.py │ ├── wan_video_camera_controller.py │ ├── wan_video_dit.py │ ├── wan_video_dit_s2v.py │ ├── wan_video_image_encoder.py │ ├── wan_video_mot.py │ ├── wan_video_motion_controller.py │ ├── wan_video_text_encoder.py │ ├── wan_video_vace.py │ ├── wan_video_vae.py │ ├── wav2vec.py │ ├── z_image_dit.py │ └── z_image_text_encoder.py ├── pipelines │ ├── flux2_image.py │ ├── flux_image.py │ ├── qwen_image.py │ ├── wan_video.py │ └── z_image.py └── utils │ ├── controlnet │ ├── __init__.py │ ├── annotator.py │ └── controlnet_input.py │ ├── data │ └── __init__.py │ ├── lora │ ├── __init__.py │ ├── flux.py │ ├── general.py │ └── merge.py │ ├── state_dict_converters │ ├── __init__.py │ ├── flux2_text_encoder.py │ ├── flux_controlnet.py │ ├── flux_dit.py │ ├── flux_infiniteyou.py │ ├── flux_ipadapter.py │ ├── flux_text_encoder_clip.py │ ├── flux_text_encoder_t5.py │ ├── flux_vae.py │ ├── nexus_gen.py │ ├── nexus_gen_projector.py │ ├── qwen_image_text_encoder.py │ ├── step1x_connector.py │ ├── wan_video_animate_adapter.py │ ├── wan_video_dit.py │ ├── wan_video_image_encoder.py │ ├── wan_video_mot.py │ ├── wan_video_vace.py │ ├── wan_video_vae.py │ └── wans2v_audio_encoder.py │ └── xfuser │ ├── __init__.py │ └── xdit_context_parallel.py ├── docs ├── en │ ├── API_Reference │ │ └── core │ │ │ ├── attention.md │ │ │ ├── data.md │ │ │ ├── gradient.md │ │ │ ├── loader.md │ │ │ └── vram.md │ ├── Developer_Guide │ │ ├── Building_a_Pipeline.md │ │ ├── Enabling_VRAM_management.md │ │ ├── Integrating_Your_Model.md │ │ └── Training_Diffusion_Models.md │ ├── Model_Details │ │ ├── FLUX.md │ │ ├── FLUX2.md │ │ ├── Overview.md │ │ ├── Qwen-Image.md │ │ ├── Wan.md │ │ └── Z-Image.md │ ├── Pipeline_Usage │ │ ├── Environment_Variables.md │ │ ├── Model_Inference.md │ │ ├── Model_Training.md │ │ ├── Setup.md │ │ └── VRAM_management.md │ ├── QA.md │ ├── README.md │ └── Training │ │ ├── Differential_LoRA.md │ │ ├── Direct_Distill.md │ │ ├── FP8_Precision.md │ │ ├── Split_Training.md │ │ ├── Supervised_Fine_Tuning.md │ │ └── Understanding_Diffusion_models.md └── zh │ ├── API_Reference │ └── core │ │ ├── attention.md │ │ ├── data.md │ │ ├── gradient.md │ │ ├── loader.md │ │ └── vram.md │ ├── Developer_Guide │ ├── Building_a_Pipeline.md │ ├── Enabling_VRAM_management.md │ ├── Integrating_Your_Model.md │ └── Training_Diffusion_Models.md │ ├── Model_Details │ ├── FLUX.md │ ├── FLUX2.md │ ├── Overview.md │ ├── Qwen-Image.md │ ├── Wan.md │ └── Z-Image.md │ ├── Pipeline_Usage │ ├── Environment_Variables.md │ ├── Model_Inference.md │ ├── Model_Training.md │ ├── Setup.md │ └── VRAM_management.md │ ├── QA.md │ ├── README.md │ └── Training │ ├── Differential_LoRA.md │ ├── Direct_Distill.md │ ├── FP8_Precision.md │ ├── Split_Training.md │ ├── Supervised_Fine_Tuning.md │ └── Understanding_Diffusion_models.md ├── examples ├── dev_tools │ ├── fix_path.py │ └── unit_test.py ├── flux │ ├── model_inference │ │ ├── FLEX.2-preview.py │ │ ├── FLUX.1-Kontext-dev.py │ │ ├── FLUX.1-Krea-dev.py │ │ ├── FLUX.1-dev-AttriCtrl.py │ │ ├── FLUX.1-dev-Controlnet-Inpainting-Beta.py │ │ ├── FLUX.1-dev-Controlnet-Union-alpha.py │ │ ├── FLUX.1-dev-Controlnet-Upscaler.py │ │ ├── FLUX.1-dev-EliGen.py │ │ ├── FLUX.1-dev-IP-Adapter.py │ │ ├── FLUX.1-dev-InfiniteYou.py │ │ ├── FLUX.1-dev-LoRA-Encoder.py │ │ ├── FLUX.1-dev-LoRA-Fusion.py │ │ ├── FLUX.1-dev.py │ │ ├── Nexus-Gen-Editing.py │ │ ├── Nexus-Gen-Generation.py │ │ └── Step1X-Edit.py │ ├── model_inference_low_vram │ │ ├── FLEX.2-preview.py │ │ ├── FLUX.1-Kontext-dev.py │ │ ├── FLUX.1-Krea-dev.py │ │ ├── FLUX.1-dev-AttriCtrl.py │ │ ├── FLUX.1-dev-Controlnet-Inpainting-Beta.py │ │ ├── FLUX.1-dev-Controlnet-Union-alpha.py │ │ ├── FLUX.1-dev-Controlnet-Upscaler.py │ │ ├── FLUX.1-dev-EliGen.py │ │ ├── FLUX.1-dev-IP-Adapter.py │ │ ├── FLUX.1-dev-InfiniteYou.py │ │ ├── FLUX.1-dev-LoRA-Encoder.py │ │ ├── FLUX.1-dev-LoRA-Fusion.py │ │ ├── FLUX.1-dev.py │ │ ├── Nexus-Gen-Editing.py │ │ ├── Nexus-Gen-Generation.py │ │ └── Step1X-Edit.py │ └── model_training │ │ ├── full │ │ ├── FLEX.2-preview.sh │ │ ├── FLUX.1-Kontext-dev.sh │ │ ├── FLUX.1-Krea-dev.sh │ │ ├── FLUX.1-dev-AttriCtrl.sh │ │ ├── FLUX.1-dev-Controlnet-Inpainting-Beta.sh │ │ ├── FLUX.1-dev-Controlnet-Union-alpha.sh │ │ ├── FLUX.1-dev-Controlnet-Upscaler.sh │ │ ├── FLUX.1-dev-IP-Adapter.sh │ │ ├── FLUX.1-dev-InfiniteYou.sh │ │ ├── FLUX.1-dev-LoRA-Encoder.sh │ │ ├── FLUX.1-dev.sh │ │ ├── Nexus-Gen.sh │ │ ├── Step1X-Edit.sh │ │ ├── accelerate_config.yaml │ │ └── accelerate_config_zero2offload.yaml │ │ ├── lora │ │ ├── FLEX.2-preview.sh │ │ ├── FLUX.1-Kontext-dev.sh │ │ ├── FLUX.1-Krea-dev.sh │ │ ├── FLUX.1-dev-AttriCtrl.sh │ │ ├── FLUX.1-dev-Controlnet-Inpainting-Beta.sh │ │ ├── FLUX.1-dev-Controlnet-Union-alpha.sh │ │ ├── FLUX.1-dev-Controlnet-Upscaler.sh │ │ ├── FLUX.1-dev-EliGen.sh │ │ ├── FLUX.1-dev-IP-Adapter.sh │ │ ├── FLUX.1-dev-InfiniteYou.sh │ │ ├── FLUX.1-dev.sh │ │ ├── Nexus-Gen.sh │ │ └── Step1X-Edit.sh │ │ ├── train.py │ │ ├── validate_full │ │ ├── FLEX.2-preview.py │ │ ├── FLUX.1-Kontext-dev.py │ │ ├── FLUX.1-Krea-dev.py │ │ ├── FLUX.1-dev-AttriCtrl.py │ │ ├── FLUX.1-dev-Controlnet-Inpainting-Beta.py │ │ ├── FLUX.1-dev-Controlnet-Union-alpha.py │ │ ├── FLUX.1-dev-Controlnet-Upscaler.py │ │ ├── FLUX.1-dev-IP-Adapter.py │ │ ├── FLUX.1-dev-InfiniteYou.py │ │ ├── FLUX.1-dev-LoRA-Encoder.py │ │ ├── FLUX.1-dev.py │ │ ├── Nexus-Gen.py │ │ └── Step1X-Edit.py │ │ └── validate_lora │ │ ├── FLEX.2-preview.py │ │ ├── FLUX.1-Kontext-dev.py │ │ ├── FLUX.1-Krea-dev.py │ │ ├── FLUX.1-dev-AttriCtrl.py │ │ ├── FLUX.1-dev-Controlnet-Inpainting-Beta.py │ │ ├── FLUX.1-dev-Controlnet-Union-alpha.py │ │ ├── FLUX.1-dev-Controlnet-Upscaler.py │ │ ├── FLUX.1-dev-EliGen.py │ │ ├── FLUX.1-dev-IP-Adapter.py │ │ ├── FLUX.1-dev-InfiniteYou.py │ │ ├── FLUX.1-dev.py │ │ ├── Nexus-Gen.py │ │ └── Step1X-Edit.py ├── flux2 │ ├── model_inference │ │ └── FLUX.2-dev.py │ ├── model_inference_low_vram │ │ └── FLUX.2-dev.py │ └── model_training │ │ ├── lora │ │ └── FLUX.2-dev.sh │ │ ├── train.py │ │ └── validate_lora │ │ └── FLUX.2-dev.py ├── qwen_image │ ├── model_inference │ │ ├── Qwen-Image-Blockwise-ControlNet-Canny.py │ │ ├── Qwen-Image-Blockwise-ControlNet-Depth.py │ │ ├── Qwen-Image-Blockwise-ControlNet-Inpaint.py │ │ ├── Qwen-Image-Distill-DMD2.py │ │ ├── Qwen-Image-Distill-Full.py │ │ ├── Qwen-Image-Distill-LoRA.py │ │ ├── Qwen-Image-Edit-2509.py │ │ ├── Qwen-Image-Edit-Lowres-Fix.py │ │ ├── Qwen-Image-Edit.py │ │ ├── Qwen-Image-EliGen-Poster.py │ │ ├── Qwen-Image-EliGen-V2.py │ │ ├── Qwen-Image-EliGen.py │ │ ├── Qwen-Image-In-Context-Control-Union.py │ │ └── Qwen-Image.py │ ├── model_inference_low_vram │ │ ├── Qwen-Image-Blockwise-ControlNet-Canny.py │ │ ├── Qwen-Image-Blockwise-ControlNet-Depth.py │ │ ├── Qwen-Image-Blockwise-ControlNet-Inpaint.py │ │ ├── Qwen-Image-Distill-DMD2.py │ │ ├── Qwen-Image-Distill-Full.py │ │ ├── Qwen-Image-Distill-LoRA.py │ │ ├── Qwen-Image-Edit-2509.py │ │ ├── Qwen-Image-Edit-Lowres-Fix.py │ │ ├── Qwen-Image-Edit.py │ │ ├── Qwen-Image-EliGen-Poster.py │ │ ├── Qwen-Image-EliGen-V2.py │ │ ├── Qwen-Image-EliGen.py │ │ ├── Qwen-Image-In-Context-Control-Union.py │ │ └── Qwen-Image.py │ └── model_training │ │ ├── full │ │ ├── Qwen-Image-Blockwise-ControlNet-Canny.sh │ │ ├── Qwen-Image-Blockwise-ControlNet-Depth.sh │ │ ├── Qwen-Image-Blockwise-ControlNet-Inpaint.sh │ │ ├── Qwen-Image-Distill-Full.sh │ │ ├── Qwen-Image-Edit-2509.sh │ │ ├── Qwen-Image-Edit.sh │ │ ├── Qwen-Image.sh │ │ ├── accelerate_config.yaml │ │ └── accelerate_config_zero2offload.yaml │ │ ├── lora │ │ ├── Qwen-Image-Blockwise-ControlNet-Canny.sh │ │ ├── Qwen-Image-Blockwise-ControlNet-Depth.sh │ │ ├── Qwen-Image-Blockwise-ControlNet-Inpaint.sh │ │ ├── Qwen-Image-Distill-Full.sh │ │ ├── Qwen-Image-Distill-LoRA.sh │ │ ├── Qwen-Image-Edit-2509.sh │ │ ├── Qwen-Image-Edit.sh │ │ ├── Qwen-Image-EliGen-Poster.sh │ │ ├── Qwen-Image-EliGen.sh │ │ ├── Qwen-Image-In-Context-Control-Union.sh │ │ └── Qwen-Image.sh │ │ ├── scripts │ │ ├── Qwen-Image-Blockwise-ControlNet-Initialize.py │ │ └── Qwen-Image-Blockwise-ControlNet-Inpaint-Initialize.py │ │ ├── special │ │ ├── differential_training │ │ │ └── Qwen-Image-LoRA.sh │ │ ├── fp8_training │ │ │ ├── Qwen-Image-LoRA.sh │ │ │ └── validate.py │ │ ├── low_vram_training │ │ │ └── Qwen-Image-LoRA.sh │ │ ├── simple │ │ │ └── train.py │ │ └── split_training │ │ │ ├── Qwen-Image-LoRA.sh │ │ │ └── validate.py │ │ ├── train.py │ │ ├── validate_full │ │ ├── Qwen-Image-Blockwise-ControlNet-Canny.py │ │ ├── Qwen-Image-Blockwise-ControlNet-Depth.py │ │ ├── Qwen-Image-Blockwise-ControlNet-Inpaint.py │ │ ├── Qwen-Image-Distill-Full.py │ │ ├── Qwen-Image-Edit-2509.py │ │ ├── Qwen-Image-Edit.py │ │ └── Qwen-Image.py │ │ └── validate_lora │ │ ├── Qwen-Image-Blockwise-ControlNet-Canny.py │ │ ├── Qwen-Image-Blockwise-ControlNet-Depth.py │ │ ├── Qwen-Image-Blockwise-ControlNet-Inpaint.py │ │ ├── Qwen-Image-Distill-Full.py │ │ ├── Qwen-Image-Distill-LoRA.py │ │ ├── Qwen-Image-Edit-2509.py │ │ ├── Qwen-Image-Edit.py │ │ ├── Qwen-Image-EliGen-Poster.py │ │ ├── Qwen-Image-EliGen.py │ │ ├── Qwen-Image-In-Context-Control-Union.py │ │ └── Qwen-Image.py ├── wanvideo │ ├── model_inference │ │ ├── LongCat-Video.py │ │ ├── Video-As-Prompt-Wan2.1-14B.py │ │ ├── Wan2.1-1.3b-speedcontrol-v1.py │ │ ├── Wan2.1-FLF2V-14B-720P.py │ │ ├── Wan2.1-Fun-1.3B-Control.py │ │ ├── Wan2.1-Fun-1.3B-InP.py │ │ ├── Wan2.1-Fun-14B-Control.py │ │ ├── Wan2.1-Fun-14B-InP.py │ │ ├── Wan2.1-Fun-V1.1-1.3B-Control-Camera.py │ │ ├── Wan2.1-Fun-V1.1-1.3B-Control.py │ │ ├── Wan2.1-Fun-V1.1-1.3B-InP.py │ │ ├── Wan2.1-Fun-V1.1-14B-Control-Camera.py │ │ ├── Wan2.1-Fun-V1.1-14B-Control.py │ │ ├── Wan2.1-Fun-V1.1-14B-InP.py │ │ ├── Wan2.1-I2V-14B-480P.py │ │ ├── Wan2.1-I2V-14B-720P.py │ │ ├── Wan2.1-T2V-1.3B.py │ │ ├── Wan2.1-T2V-14B.py │ │ ├── Wan2.1-VACE-1.3B-Preview.py │ │ ├── Wan2.1-VACE-1.3B.py │ │ ├── Wan2.1-VACE-14B.py │ │ ├── Wan2.2-Animate-14B.py │ │ ├── Wan2.2-Fun-A14B-Control-Camera.py │ │ ├── Wan2.2-Fun-A14B-Control.py │ │ ├── Wan2.2-Fun-A14B-InP.py │ │ ├── Wan2.2-I2V-A14B.py │ │ ├── Wan2.2-S2V-14B.py │ │ ├── Wan2.2-S2V-14B_multi_clips.py │ │ ├── Wan2.2-T2V-A14B.py │ │ ├── Wan2.2-TI2V-5B.py │ │ ├── Wan2.2-VACE-Fun-A14B.py │ │ └── krea-realtime-video.py │ ├── model_inference_low_vram │ │ ├── LongCat-Video.py │ │ ├── Video-As-Prompt-Wan2.1-14B.py │ │ ├── Wan2.1-1.3b-speedcontrol-v1.py │ │ ├── Wan2.1-FLF2V-14B-720P.py │ │ ├── Wan2.1-Fun-1.3B-Control.py │ │ ├── Wan2.1-Fun-1.3B-InP.py │ │ ├── Wan2.1-Fun-14B-Control.py │ │ ├── Wan2.1-Fun-14B-InP.py │ │ ├── Wan2.1-Fun-V1.1-1.3B-Control-Camera.py │ │ ├── Wan2.1-Fun-V1.1-1.3B-Control.py │ │ ├── Wan2.1-Fun-V1.1-1.3B-InP.py │ │ ├── Wan2.1-Fun-V1.1-14B-Control-Camera.py │ │ ├── Wan2.1-Fun-V1.1-14B-Control.py │ │ ├── Wan2.1-Fun-V1.1-14B-InP.py │ │ ├── Wan2.1-I2V-14B-480P.py │ │ ├── Wan2.1-I2V-14B-720P.py │ │ ├── Wan2.1-T2V-1.3B.py │ │ ├── Wan2.1-T2V-14B.py │ │ ├── Wan2.1-VACE-1.3B-Preview.py │ │ ├── Wan2.1-VACE-1.3B.py │ │ ├── Wan2.1-VACE-14B.py │ │ ├── Wan2.2-Animate-14B.py │ │ ├── Wan2.2-Fun-A14B-Control-Camera.py │ │ ├── Wan2.2-Fun-A14B-Control.py │ │ ├── Wan2.2-Fun-A14B-InP.py │ │ ├── Wan2.2-I2V-A14B.py │ │ ├── Wan2.2-S2V-14B.py │ │ ├── Wan2.2-S2V-14B_multi_clips.py │ │ ├── Wan2.2-T2V-A14B.py │ │ ├── Wan2.2-TI2V-5B.py │ │ ├── Wan2.2-VACE-Fun-A14B.py │ │ └── krea-realtime-video.py │ └── model_training │ │ ├── full │ │ ├── LongCat-Video.sh │ │ ├── Video-As-Prompt-Wan2.1-14B.sh │ │ ├── Wan2.1-1.3b-speedcontrol-v1.sh │ │ ├── Wan2.1-FLF2V-14B-720P.sh │ │ ├── Wan2.1-Fun-1.3B-Control.sh │ │ ├── Wan2.1-Fun-1.3B-InP.sh │ │ ├── Wan2.1-Fun-14B-Control.sh │ │ ├── Wan2.1-Fun-14B-InP.sh │ │ ├── Wan2.1-Fun-V1.1-1.3B-Control-Camera.sh │ │ ├── Wan2.1-Fun-V1.1-1.3B-Control.sh │ │ ├── Wan2.1-Fun-V1.1-1.3B-InP.sh │ │ ├── Wan2.1-Fun-V1.1-14B-Control-Camera.sh │ │ ├── Wan2.1-Fun-V1.1-14B-Control.sh │ │ ├── Wan2.1-Fun-V1.1-14B-InP.sh │ │ ├── Wan2.1-I2V-14B-480P.sh │ │ ├── Wan2.1-I2V-14B-720P.sh │ │ ├── Wan2.1-T2V-1.3B.sh │ │ ├── Wan2.1-T2V-14B.sh │ │ ├── Wan2.1-VACE-1.3B-Preview.sh │ │ ├── Wan2.1-VACE-1.3B.sh │ │ ├── Wan2.1-VACE-14B.sh │ │ ├── Wan2.2-Animate-14B.sh │ │ ├── Wan2.2-Fun-A14B-Control-Camera.sh │ │ ├── Wan2.2-Fun-A14B-Control.sh │ │ ├── Wan2.2-Fun-A14B-InP.sh │ │ ├── Wan2.2-I2V-A14B.sh │ │ ├── Wan2.2-S2V-14B.sh │ │ ├── Wan2.2-T2V-A14B.sh │ │ ├── Wan2.2-TI2V-5B.sh │ │ ├── Wan2.2-VACE-Fun-A14B.sh │ │ ├── accelerate_config_14B.yaml │ │ └── krea-realtime-video.sh │ │ ├── lora │ │ ├── LongCat-Video.sh │ │ ├── Video-As-Prompt-Wan2.1-14B.sh │ │ ├── Wan2.1-1.3b-speedcontrol-v1.sh │ │ ├── Wan2.1-FLF2V-14B-720P.sh │ │ ├── Wan2.1-Fun-1.3B-Control.sh │ │ ├── Wan2.1-Fun-1.3B-InP.sh │ │ ├── Wan2.1-Fun-14B-Control.sh │ │ ├── Wan2.1-Fun-14B-InP.sh │ │ ├── Wan2.1-Fun-V1.1-1.3B-Control-Camera.sh │ │ ├── Wan2.1-Fun-V1.1-1.3B-Control.sh │ │ ├── Wan2.1-Fun-V1.1-1.3B-InP.sh │ │ ├── Wan2.1-Fun-V1.1-14B-Control-Camera.sh │ │ ├── Wan2.1-Fun-V1.1-14B-Control.sh │ │ ├── Wan2.1-Fun-V1.1-14B-InP.sh │ │ ├── Wan2.1-I2V-14B-480P.sh │ │ ├── Wan2.1-I2V-14B-720P.sh │ │ ├── Wan2.1-T2V-1.3B.sh │ │ ├── Wan2.1-T2V-14B.sh │ │ ├── Wan2.1-VACE-1.3B-Preview.sh │ │ ├── Wan2.1-VACE-1.3B.sh │ │ ├── Wan2.1-VACE-14B.sh │ │ ├── Wan2.2-Animate-14B.sh │ │ ├── Wan2.2-Fun-A14B-Control-Camera.sh │ │ ├── Wan2.2-Fun-A14B-Control.sh │ │ ├── Wan2.2-Fun-A14B-InP.sh │ │ ├── Wan2.2-I2V-A14B.sh │ │ ├── Wan2.2-S2V-14B.sh │ │ ├── Wan2.2-T2V-A14B.sh │ │ ├── Wan2.2-TI2V-5B.sh │ │ ├── Wan2.2-VACE-Fun-A14B.sh │ │ └── krea-realtime-video.sh │ │ ├── special │ │ ├── direct_distill │ │ │ ├── Wan2.1-T2V-1.3B.sh │ │ │ └── validate.py │ │ ├── fp8_training │ │ │ ├── Wan2.1-I2V-14B-480P.sh │ │ │ └── validate.py │ │ ├── low_vram_training │ │ │ ├── Wan2.1-I2V-14B-480P.sh │ │ │ └── validate.py │ │ └── split_training │ │ │ ├── Wan2.1-I2V-14B-480P.sh │ │ │ └── validate.py │ │ ├── train.py │ │ ├── validate_full │ │ ├── LongCat-Video.py │ │ ├── Video-As-Prompt-Wan2.1-14B.py │ │ ├── Wan2.1-1.3b-speedcontrol-v1.py │ │ ├── Wan2.1-FLF2V-14B-720P.py │ │ ├── Wan2.1-Fun-1.3B-Control.py │ │ ├── Wan2.1-Fun-1.3B-InP.py │ │ ├── Wan2.1-Fun-14B-Control.py │ │ ├── Wan2.1-Fun-14B-InP.py │ │ ├── Wan2.1-Fun-V1.1-1.3B-Control-Camera.py │ │ ├── Wan2.1-Fun-V1.1-1.3B-Control.py │ │ ├── Wan2.1-Fun-V1.1-1.3B-InP.py │ │ ├── Wan2.1-Fun-V1.1-14B-Control-Camera.py │ │ ├── Wan2.1-Fun-V1.1-14B-Control.py │ │ ├── Wan2.1-Fun-V1.1-14B-InP.py │ │ ├── Wan2.1-I2V-14B-480P.py │ │ ├── Wan2.1-I2V-14B-720P.py │ │ ├── Wan2.1-T2V-1.3B.py │ │ ├── Wan2.1-T2V-14B.py │ │ ├── Wan2.1-VACE-1.3B-Preview.py │ │ ├── Wan2.1-VACE-1.3B.py │ │ ├── Wan2.1-VACE-14B.py │ │ ├── Wan2.2-Animate-14B.py │ │ ├── Wan2.2-Fun-A14B-Control-Camera.py │ │ ├── Wan2.2-Fun-A14B-Control.py │ │ ├── Wan2.2-Fun-A14B-InP.py │ │ ├── Wan2.2-I2V-A14B.py │ │ ├── Wan2.2-S2V-14B.py │ │ ├── Wan2.2-T2V-A14B.py │ │ ├── Wan2.2-TI2V-5B.py │ │ ├── Wan2.2-VACE-Fun-A14B.py │ │ └── krea-realtime-video.py │ │ └── validate_lora │ │ ├── LongCat-Video.py │ │ ├── Video-As-Prompt-Wan2.1-14B.py │ │ ├── Wan2.1-1.3b-speedcontrol-v1.py │ │ ├── Wan2.1-FLF2V-14B-720P.py │ │ ├── Wan2.1-Fun-1.3B-Control.py │ │ ├── Wan2.1-Fun-1.3B-InP.py │ │ ├── Wan2.1-Fun-14B-Control.py │ │ ├── Wan2.1-Fun-14B-InP.py │ │ ├── Wan2.1-Fun-V1.1-1.3B-Control-Camera.py │ │ ├── Wan2.1-Fun-V1.1-1.3B-Control.py │ │ ├── Wan2.1-Fun-V1.1-1.3B-InP.py │ │ ├── Wan2.1-Fun-V1.1-14B-Control-Camera.py │ │ ├── Wan2.1-Fun-V1.1-14B-Control.py │ │ ├── Wan2.1-Fun-V1.1-14B-InP.py │ │ ├── Wan2.1-I2V-14B-480P.py │ │ ├── Wan2.1-I2V-14B-720P.py │ │ ├── Wan2.1-T2V-1.3B.py │ │ ├── Wan2.1-T2V-14B.py │ │ ├── Wan2.1-VACE-1.3B-Preview.py │ │ ├── Wan2.1-VACE-1.3B.py │ │ ├── Wan2.1-VACE-14B.py │ │ ├── Wan2.2-Animate-14B.py │ │ ├── Wan2.2-Fun-A14B-Control-Camera.py │ │ ├── Wan2.2-Fun-A14B-Control.py │ │ ├── Wan2.2-Fun-A14B-InP.py │ │ ├── Wan2.2-I2V-A14B.py │ │ ├── Wan2.2-S2V-14B.py │ │ ├── Wan2.2-T2V-A14B.py │ │ ├── Wan2.2-TI2V-5B.py │ │ ├── Wan2.2-VACE-Fun-A14B.py │ │ └── krea-realtime-video.py └── z_image │ ├── model_inference │ └── Z-Image-Turbo.py │ ├── model_inference_low_vram │ └── Z-Image-Turbo.py │ └── model_training │ ├── full │ ├── Z-Image-Turbo.sh │ └── accelerate_config.yaml │ ├── lora │ └── Z-Image-Turbo.sh │ ├── special │ ├── differential_training │ │ ├── Z-Image-Turbo.sh │ │ └── validate.py │ └── trajectory_imitation │ │ ├── Z-Image-Turbo.sh │ │ └── validate.py │ ├── train.py │ ├── validate_full │ └── Z-Image-Turbo.py │ └── validate_lora │ └── Z-Image-Turbo.py └── pyproject.toml /.github/workflows/logo.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/.github/workflows/logo.gif -------------------------------------------------------------------------------- /.github/workflows/publish.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/.github/workflows/publish.yaml -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/.gitignore -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/LICENSE -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/README.md -------------------------------------------------------------------------------- /README_zh.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/README_zh.md -------------------------------------------------------------------------------- /diffsynth/__init__.py: -------------------------------------------------------------------------------- 1 | from .core import * 2 | -------------------------------------------------------------------------------- /diffsynth/configs/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/configs/__init__.py -------------------------------------------------------------------------------- /diffsynth/configs/model_configs.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/configs/model_configs.py -------------------------------------------------------------------------------- /diffsynth/configs/vram_management_module_maps.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/configs/vram_management_module_maps.py -------------------------------------------------------------------------------- /diffsynth/core/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/core/__init__.py -------------------------------------------------------------------------------- /diffsynth/core/attention/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/core/attention/__init__.py -------------------------------------------------------------------------------- /diffsynth/core/attention/attention.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/core/attention/attention.py -------------------------------------------------------------------------------- /diffsynth/core/data/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/core/data/__init__.py -------------------------------------------------------------------------------- /diffsynth/core/data/operators.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/core/data/operators.py -------------------------------------------------------------------------------- /diffsynth/core/data/unified_dataset.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/core/data/unified_dataset.py -------------------------------------------------------------------------------- /diffsynth/core/gradient/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/core/gradient/__init__.py -------------------------------------------------------------------------------- /diffsynth/core/gradient/gradient_checkpoint.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/core/gradient/gradient_checkpoint.py -------------------------------------------------------------------------------- /diffsynth/core/loader/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/core/loader/__init__.py -------------------------------------------------------------------------------- /diffsynth/core/loader/config.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/core/loader/config.py -------------------------------------------------------------------------------- /diffsynth/core/loader/file.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/core/loader/file.py -------------------------------------------------------------------------------- /diffsynth/core/loader/model.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/core/loader/model.py -------------------------------------------------------------------------------- /diffsynth/core/vram/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/core/vram/__init__.py -------------------------------------------------------------------------------- /diffsynth/core/vram/disk_map.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/core/vram/disk_map.py -------------------------------------------------------------------------------- /diffsynth/core/vram/initialization.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/core/vram/initialization.py -------------------------------------------------------------------------------- /diffsynth/core/vram/layers.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/core/vram/layers.py -------------------------------------------------------------------------------- /diffsynth/diffusion/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/diffusion/__init__.py -------------------------------------------------------------------------------- /diffsynth/diffusion/base_pipeline.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/diffusion/base_pipeline.py -------------------------------------------------------------------------------- /diffsynth/diffusion/flow_match.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/diffusion/flow_match.py -------------------------------------------------------------------------------- /diffsynth/diffusion/logger.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/diffusion/logger.py -------------------------------------------------------------------------------- /diffsynth/diffusion/loss.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/diffusion/loss.py -------------------------------------------------------------------------------- /diffsynth/diffusion/parsers.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/diffusion/parsers.py -------------------------------------------------------------------------------- /diffsynth/diffusion/runner.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/diffusion/runner.py -------------------------------------------------------------------------------- /diffsynth/diffusion/training_module.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/diffusion/training_module.py -------------------------------------------------------------------------------- /diffsynth/models/flux2_dit.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/flux2_dit.py -------------------------------------------------------------------------------- /diffsynth/models/flux2_text_encoder.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/flux2_text_encoder.py -------------------------------------------------------------------------------- /diffsynth/models/flux2_vae.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/flux2_vae.py -------------------------------------------------------------------------------- /diffsynth/models/flux_controlnet.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/flux_controlnet.py -------------------------------------------------------------------------------- /diffsynth/models/flux_dit.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/flux_dit.py -------------------------------------------------------------------------------- /diffsynth/models/flux_infiniteyou.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/flux_infiniteyou.py -------------------------------------------------------------------------------- /diffsynth/models/flux_ipadapter.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/flux_ipadapter.py -------------------------------------------------------------------------------- /diffsynth/models/flux_lora_encoder.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/flux_lora_encoder.py -------------------------------------------------------------------------------- /diffsynth/models/flux_lora_patcher.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/flux_lora_patcher.py -------------------------------------------------------------------------------- /diffsynth/models/flux_text_encoder_clip.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/flux_text_encoder_clip.py -------------------------------------------------------------------------------- /diffsynth/models/flux_text_encoder_t5.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/flux_text_encoder_t5.py -------------------------------------------------------------------------------- /diffsynth/models/flux_vae.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/flux_vae.py -------------------------------------------------------------------------------- /diffsynth/models/flux_value_control.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/flux_value_control.py -------------------------------------------------------------------------------- /diffsynth/models/general_modules.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/general_modules.py -------------------------------------------------------------------------------- /diffsynth/models/longcat_video_dit.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/longcat_video_dit.py -------------------------------------------------------------------------------- /diffsynth/models/model_loader.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/model_loader.py -------------------------------------------------------------------------------- /diffsynth/models/nexus_gen.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/nexus_gen.py -------------------------------------------------------------------------------- /diffsynth/models/nexus_gen_ar_model.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/nexus_gen_ar_model.py -------------------------------------------------------------------------------- /diffsynth/models/nexus_gen_projector.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/nexus_gen_projector.py -------------------------------------------------------------------------------- /diffsynth/models/qwen_image_controlnet.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/qwen_image_controlnet.py -------------------------------------------------------------------------------- /diffsynth/models/qwen_image_dit.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/qwen_image_dit.py -------------------------------------------------------------------------------- /diffsynth/models/qwen_image_text_encoder.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/qwen_image_text_encoder.py -------------------------------------------------------------------------------- /diffsynth/models/qwen_image_vae.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/qwen_image_vae.py -------------------------------------------------------------------------------- /diffsynth/models/sd_text_encoder.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/sd_text_encoder.py -------------------------------------------------------------------------------- /diffsynth/models/step1x_connector.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/step1x_connector.py -------------------------------------------------------------------------------- /diffsynth/models/step1x_text_encoder.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/step1x_text_encoder.py -------------------------------------------------------------------------------- /diffsynth/models/wan_video_animate_adapter.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/wan_video_animate_adapter.py -------------------------------------------------------------------------------- /diffsynth/models/wan_video_camera_controller.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/wan_video_camera_controller.py -------------------------------------------------------------------------------- /diffsynth/models/wan_video_dit.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/wan_video_dit.py -------------------------------------------------------------------------------- /diffsynth/models/wan_video_dit_s2v.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/wan_video_dit_s2v.py -------------------------------------------------------------------------------- /diffsynth/models/wan_video_image_encoder.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/wan_video_image_encoder.py -------------------------------------------------------------------------------- /diffsynth/models/wan_video_mot.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/wan_video_mot.py -------------------------------------------------------------------------------- /diffsynth/models/wan_video_motion_controller.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/wan_video_motion_controller.py -------------------------------------------------------------------------------- /diffsynth/models/wan_video_text_encoder.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/wan_video_text_encoder.py -------------------------------------------------------------------------------- /diffsynth/models/wan_video_vace.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/wan_video_vace.py -------------------------------------------------------------------------------- /diffsynth/models/wan_video_vae.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/wan_video_vae.py -------------------------------------------------------------------------------- /diffsynth/models/wav2vec.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/wav2vec.py -------------------------------------------------------------------------------- /diffsynth/models/z_image_dit.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/z_image_dit.py -------------------------------------------------------------------------------- /diffsynth/models/z_image_text_encoder.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/models/z_image_text_encoder.py -------------------------------------------------------------------------------- /diffsynth/pipelines/flux2_image.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/pipelines/flux2_image.py -------------------------------------------------------------------------------- /diffsynth/pipelines/flux_image.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/pipelines/flux_image.py -------------------------------------------------------------------------------- /diffsynth/pipelines/qwen_image.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/pipelines/qwen_image.py -------------------------------------------------------------------------------- /diffsynth/pipelines/wan_video.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/pipelines/wan_video.py -------------------------------------------------------------------------------- /diffsynth/pipelines/z_image.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/pipelines/z_image.py -------------------------------------------------------------------------------- /diffsynth/utils/controlnet/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/utils/controlnet/__init__.py -------------------------------------------------------------------------------- /diffsynth/utils/controlnet/annotator.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/utils/controlnet/annotator.py -------------------------------------------------------------------------------- /diffsynth/utils/controlnet/controlnet_input.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/utils/controlnet/controlnet_input.py -------------------------------------------------------------------------------- /diffsynth/utils/data/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/utils/data/__init__.py -------------------------------------------------------------------------------- /diffsynth/utils/lora/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/utils/lora/__init__.py -------------------------------------------------------------------------------- /diffsynth/utils/lora/flux.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/utils/lora/flux.py -------------------------------------------------------------------------------- /diffsynth/utils/lora/general.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/utils/lora/general.py -------------------------------------------------------------------------------- /diffsynth/utils/lora/merge.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/utils/lora/merge.py -------------------------------------------------------------------------------- /diffsynth/utils/state_dict_converters/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /diffsynth/utils/state_dict_converters/flux2_text_encoder.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/utils/state_dict_converters/flux2_text_encoder.py -------------------------------------------------------------------------------- /diffsynth/utils/state_dict_converters/flux_controlnet.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/utils/state_dict_converters/flux_controlnet.py -------------------------------------------------------------------------------- /diffsynth/utils/state_dict_converters/flux_dit.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/utils/state_dict_converters/flux_dit.py -------------------------------------------------------------------------------- /diffsynth/utils/state_dict_converters/flux_infiniteyou.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/utils/state_dict_converters/flux_infiniteyou.py -------------------------------------------------------------------------------- /diffsynth/utils/state_dict_converters/flux_ipadapter.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/utils/state_dict_converters/flux_ipadapter.py -------------------------------------------------------------------------------- /diffsynth/utils/state_dict_converters/flux_text_encoder_clip.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/utils/state_dict_converters/flux_text_encoder_clip.py -------------------------------------------------------------------------------- /diffsynth/utils/state_dict_converters/flux_text_encoder_t5.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/utils/state_dict_converters/flux_text_encoder_t5.py -------------------------------------------------------------------------------- /diffsynth/utils/state_dict_converters/flux_vae.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/utils/state_dict_converters/flux_vae.py -------------------------------------------------------------------------------- /diffsynth/utils/state_dict_converters/nexus_gen.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/utils/state_dict_converters/nexus_gen.py -------------------------------------------------------------------------------- /diffsynth/utils/state_dict_converters/nexus_gen_projector.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/utils/state_dict_converters/nexus_gen_projector.py -------------------------------------------------------------------------------- /diffsynth/utils/state_dict_converters/qwen_image_text_encoder.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/utils/state_dict_converters/qwen_image_text_encoder.py -------------------------------------------------------------------------------- /diffsynth/utils/state_dict_converters/step1x_connector.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/utils/state_dict_converters/step1x_connector.py -------------------------------------------------------------------------------- /diffsynth/utils/state_dict_converters/wan_video_animate_adapter.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/utils/state_dict_converters/wan_video_animate_adapter.py -------------------------------------------------------------------------------- /diffsynth/utils/state_dict_converters/wan_video_dit.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/utils/state_dict_converters/wan_video_dit.py -------------------------------------------------------------------------------- /diffsynth/utils/state_dict_converters/wan_video_image_encoder.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/utils/state_dict_converters/wan_video_image_encoder.py -------------------------------------------------------------------------------- /diffsynth/utils/state_dict_converters/wan_video_mot.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/utils/state_dict_converters/wan_video_mot.py -------------------------------------------------------------------------------- /diffsynth/utils/state_dict_converters/wan_video_vace.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/utils/state_dict_converters/wan_video_vace.py -------------------------------------------------------------------------------- /diffsynth/utils/state_dict_converters/wan_video_vae.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/utils/state_dict_converters/wan_video_vae.py -------------------------------------------------------------------------------- /diffsynth/utils/state_dict_converters/wans2v_audio_encoder.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/utils/state_dict_converters/wans2v_audio_encoder.py -------------------------------------------------------------------------------- /diffsynth/utils/xfuser/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/utils/xfuser/__init__.py -------------------------------------------------------------------------------- /diffsynth/utils/xfuser/xdit_context_parallel.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/diffsynth/utils/xfuser/xdit_context_parallel.py -------------------------------------------------------------------------------- /docs/en/API_Reference/core/attention.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/en/API_Reference/core/attention.md -------------------------------------------------------------------------------- /docs/en/API_Reference/core/data.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/en/API_Reference/core/data.md -------------------------------------------------------------------------------- /docs/en/API_Reference/core/gradient.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/en/API_Reference/core/gradient.md -------------------------------------------------------------------------------- /docs/en/API_Reference/core/loader.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/en/API_Reference/core/loader.md -------------------------------------------------------------------------------- /docs/en/API_Reference/core/vram.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/en/API_Reference/core/vram.md -------------------------------------------------------------------------------- /docs/en/Developer_Guide/Building_a_Pipeline.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/en/Developer_Guide/Building_a_Pipeline.md -------------------------------------------------------------------------------- /docs/en/Developer_Guide/Enabling_VRAM_management.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/en/Developer_Guide/Enabling_VRAM_management.md -------------------------------------------------------------------------------- /docs/en/Developer_Guide/Integrating_Your_Model.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/en/Developer_Guide/Integrating_Your_Model.md -------------------------------------------------------------------------------- /docs/en/Developer_Guide/Training_Diffusion_Models.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/en/Developer_Guide/Training_Diffusion_Models.md -------------------------------------------------------------------------------- /docs/en/Model_Details/FLUX.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/en/Model_Details/FLUX.md -------------------------------------------------------------------------------- /docs/en/Model_Details/FLUX2.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/en/Model_Details/FLUX2.md -------------------------------------------------------------------------------- /docs/en/Model_Details/Overview.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/en/Model_Details/Overview.md -------------------------------------------------------------------------------- /docs/en/Model_Details/Qwen-Image.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/en/Model_Details/Qwen-Image.md -------------------------------------------------------------------------------- /docs/en/Model_Details/Wan.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/en/Model_Details/Wan.md -------------------------------------------------------------------------------- /docs/en/Model_Details/Z-Image.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/en/Model_Details/Z-Image.md -------------------------------------------------------------------------------- /docs/en/Pipeline_Usage/Environment_Variables.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/en/Pipeline_Usage/Environment_Variables.md -------------------------------------------------------------------------------- /docs/en/Pipeline_Usage/Model_Inference.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/en/Pipeline_Usage/Model_Inference.md -------------------------------------------------------------------------------- /docs/en/Pipeline_Usage/Model_Training.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/en/Pipeline_Usage/Model_Training.md -------------------------------------------------------------------------------- /docs/en/Pipeline_Usage/Setup.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/en/Pipeline_Usage/Setup.md -------------------------------------------------------------------------------- /docs/en/Pipeline_Usage/VRAM_management.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/en/Pipeline_Usage/VRAM_management.md -------------------------------------------------------------------------------- /docs/en/QA.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/en/QA.md -------------------------------------------------------------------------------- /docs/en/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/en/README.md -------------------------------------------------------------------------------- /docs/en/Training/Differential_LoRA.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/en/Training/Differential_LoRA.md -------------------------------------------------------------------------------- /docs/en/Training/Direct_Distill.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/en/Training/Direct_Distill.md -------------------------------------------------------------------------------- /docs/en/Training/FP8_Precision.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/en/Training/FP8_Precision.md -------------------------------------------------------------------------------- /docs/en/Training/Split_Training.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/en/Training/Split_Training.md -------------------------------------------------------------------------------- /docs/en/Training/Supervised_Fine_Tuning.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/en/Training/Supervised_Fine_Tuning.md -------------------------------------------------------------------------------- /docs/en/Training/Understanding_Diffusion_models.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/en/Training/Understanding_Diffusion_models.md -------------------------------------------------------------------------------- /docs/zh/API_Reference/core/attention.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/zh/API_Reference/core/attention.md -------------------------------------------------------------------------------- /docs/zh/API_Reference/core/data.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/zh/API_Reference/core/data.md -------------------------------------------------------------------------------- /docs/zh/API_Reference/core/gradient.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/zh/API_Reference/core/gradient.md -------------------------------------------------------------------------------- /docs/zh/API_Reference/core/loader.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/zh/API_Reference/core/loader.md -------------------------------------------------------------------------------- /docs/zh/API_Reference/core/vram.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/zh/API_Reference/core/vram.md -------------------------------------------------------------------------------- /docs/zh/Developer_Guide/Building_a_Pipeline.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/zh/Developer_Guide/Building_a_Pipeline.md -------------------------------------------------------------------------------- /docs/zh/Developer_Guide/Enabling_VRAM_management.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/zh/Developer_Guide/Enabling_VRAM_management.md -------------------------------------------------------------------------------- /docs/zh/Developer_Guide/Integrating_Your_Model.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/zh/Developer_Guide/Integrating_Your_Model.md -------------------------------------------------------------------------------- /docs/zh/Developer_Guide/Training_Diffusion_Models.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/zh/Developer_Guide/Training_Diffusion_Models.md -------------------------------------------------------------------------------- /docs/zh/Model_Details/FLUX.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/zh/Model_Details/FLUX.md -------------------------------------------------------------------------------- /docs/zh/Model_Details/FLUX2.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/zh/Model_Details/FLUX2.md -------------------------------------------------------------------------------- /docs/zh/Model_Details/Overview.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/zh/Model_Details/Overview.md -------------------------------------------------------------------------------- /docs/zh/Model_Details/Qwen-Image.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/zh/Model_Details/Qwen-Image.md -------------------------------------------------------------------------------- /docs/zh/Model_Details/Wan.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/zh/Model_Details/Wan.md -------------------------------------------------------------------------------- /docs/zh/Model_Details/Z-Image.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/zh/Model_Details/Z-Image.md -------------------------------------------------------------------------------- /docs/zh/Pipeline_Usage/Environment_Variables.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/zh/Pipeline_Usage/Environment_Variables.md -------------------------------------------------------------------------------- /docs/zh/Pipeline_Usage/Model_Inference.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/zh/Pipeline_Usage/Model_Inference.md -------------------------------------------------------------------------------- /docs/zh/Pipeline_Usage/Model_Training.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/zh/Pipeline_Usage/Model_Training.md -------------------------------------------------------------------------------- /docs/zh/Pipeline_Usage/Setup.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/zh/Pipeline_Usage/Setup.md -------------------------------------------------------------------------------- /docs/zh/Pipeline_Usage/VRAM_management.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/zh/Pipeline_Usage/VRAM_management.md -------------------------------------------------------------------------------- /docs/zh/QA.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/zh/QA.md -------------------------------------------------------------------------------- /docs/zh/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/zh/README.md -------------------------------------------------------------------------------- /docs/zh/Training/Differential_LoRA.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/zh/Training/Differential_LoRA.md -------------------------------------------------------------------------------- /docs/zh/Training/Direct_Distill.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/zh/Training/Direct_Distill.md -------------------------------------------------------------------------------- /docs/zh/Training/FP8_Precision.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/zh/Training/FP8_Precision.md -------------------------------------------------------------------------------- /docs/zh/Training/Split_Training.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/zh/Training/Split_Training.md -------------------------------------------------------------------------------- /docs/zh/Training/Supervised_Fine_Tuning.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/zh/Training/Supervised_Fine_Tuning.md -------------------------------------------------------------------------------- /docs/zh/Training/Understanding_Diffusion_models.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/docs/zh/Training/Understanding_Diffusion_models.md -------------------------------------------------------------------------------- /examples/dev_tools/fix_path.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/dev_tools/fix_path.py -------------------------------------------------------------------------------- /examples/dev_tools/unit_test.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/dev_tools/unit_test.py -------------------------------------------------------------------------------- /examples/flux/model_inference/FLEX.2-preview.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_inference/FLEX.2-preview.py -------------------------------------------------------------------------------- /examples/flux/model_inference/FLUX.1-Kontext-dev.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_inference/FLUX.1-Kontext-dev.py -------------------------------------------------------------------------------- /examples/flux/model_inference/FLUX.1-Krea-dev.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_inference/FLUX.1-Krea-dev.py -------------------------------------------------------------------------------- /examples/flux/model_inference/FLUX.1-dev-AttriCtrl.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_inference/FLUX.1-dev-AttriCtrl.py -------------------------------------------------------------------------------- /examples/flux/model_inference/FLUX.1-dev-Controlnet-Inpainting-Beta.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_inference/FLUX.1-dev-Controlnet-Inpainting-Beta.py -------------------------------------------------------------------------------- /examples/flux/model_inference/FLUX.1-dev-Controlnet-Union-alpha.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_inference/FLUX.1-dev-Controlnet-Union-alpha.py -------------------------------------------------------------------------------- /examples/flux/model_inference/FLUX.1-dev-Controlnet-Upscaler.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_inference/FLUX.1-dev-Controlnet-Upscaler.py -------------------------------------------------------------------------------- /examples/flux/model_inference/FLUX.1-dev-EliGen.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_inference/FLUX.1-dev-EliGen.py -------------------------------------------------------------------------------- /examples/flux/model_inference/FLUX.1-dev-IP-Adapter.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_inference/FLUX.1-dev-IP-Adapter.py -------------------------------------------------------------------------------- /examples/flux/model_inference/FLUX.1-dev-InfiniteYou.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_inference/FLUX.1-dev-InfiniteYou.py -------------------------------------------------------------------------------- /examples/flux/model_inference/FLUX.1-dev-LoRA-Encoder.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_inference/FLUX.1-dev-LoRA-Encoder.py -------------------------------------------------------------------------------- /examples/flux/model_inference/FLUX.1-dev-LoRA-Fusion.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_inference/FLUX.1-dev-LoRA-Fusion.py -------------------------------------------------------------------------------- /examples/flux/model_inference/FLUX.1-dev.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_inference/FLUX.1-dev.py -------------------------------------------------------------------------------- /examples/flux/model_inference/Nexus-Gen-Editing.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_inference/Nexus-Gen-Editing.py -------------------------------------------------------------------------------- /examples/flux/model_inference/Nexus-Gen-Generation.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_inference/Nexus-Gen-Generation.py -------------------------------------------------------------------------------- /examples/flux/model_inference/Step1X-Edit.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_inference/Step1X-Edit.py -------------------------------------------------------------------------------- /examples/flux/model_inference_low_vram/FLEX.2-preview.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_inference_low_vram/FLEX.2-preview.py -------------------------------------------------------------------------------- /examples/flux/model_inference_low_vram/FLUX.1-Kontext-dev.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_inference_low_vram/FLUX.1-Kontext-dev.py -------------------------------------------------------------------------------- /examples/flux/model_inference_low_vram/FLUX.1-Krea-dev.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_inference_low_vram/FLUX.1-Krea-dev.py -------------------------------------------------------------------------------- /examples/flux/model_inference_low_vram/FLUX.1-dev-AttriCtrl.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_inference_low_vram/FLUX.1-dev-AttriCtrl.py -------------------------------------------------------------------------------- /examples/flux/model_inference_low_vram/FLUX.1-dev-Controlnet-Inpainting-Beta.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_inference_low_vram/FLUX.1-dev-Controlnet-Inpainting-Beta.py -------------------------------------------------------------------------------- /examples/flux/model_inference_low_vram/FLUX.1-dev-Controlnet-Union-alpha.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_inference_low_vram/FLUX.1-dev-Controlnet-Union-alpha.py -------------------------------------------------------------------------------- /examples/flux/model_inference_low_vram/FLUX.1-dev-Controlnet-Upscaler.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_inference_low_vram/FLUX.1-dev-Controlnet-Upscaler.py -------------------------------------------------------------------------------- /examples/flux/model_inference_low_vram/FLUX.1-dev-EliGen.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_inference_low_vram/FLUX.1-dev-EliGen.py -------------------------------------------------------------------------------- /examples/flux/model_inference_low_vram/FLUX.1-dev-IP-Adapter.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_inference_low_vram/FLUX.1-dev-IP-Adapter.py -------------------------------------------------------------------------------- /examples/flux/model_inference_low_vram/FLUX.1-dev-InfiniteYou.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_inference_low_vram/FLUX.1-dev-InfiniteYou.py -------------------------------------------------------------------------------- /examples/flux/model_inference_low_vram/FLUX.1-dev-LoRA-Encoder.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_inference_low_vram/FLUX.1-dev-LoRA-Encoder.py -------------------------------------------------------------------------------- /examples/flux/model_inference_low_vram/FLUX.1-dev-LoRA-Fusion.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_inference_low_vram/FLUX.1-dev-LoRA-Fusion.py -------------------------------------------------------------------------------- /examples/flux/model_inference_low_vram/FLUX.1-dev.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_inference_low_vram/FLUX.1-dev.py -------------------------------------------------------------------------------- /examples/flux/model_inference_low_vram/Nexus-Gen-Editing.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_inference_low_vram/Nexus-Gen-Editing.py -------------------------------------------------------------------------------- /examples/flux/model_inference_low_vram/Nexus-Gen-Generation.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_inference_low_vram/Nexus-Gen-Generation.py -------------------------------------------------------------------------------- /examples/flux/model_inference_low_vram/Step1X-Edit.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_inference_low_vram/Step1X-Edit.py -------------------------------------------------------------------------------- /examples/flux/model_training/full/FLEX.2-preview.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/full/FLEX.2-preview.sh -------------------------------------------------------------------------------- /examples/flux/model_training/full/FLUX.1-Kontext-dev.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/full/FLUX.1-Kontext-dev.sh -------------------------------------------------------------------------------- /examples/flux/model_training/full/FLUX.1-Krea-dev.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/full/FLUX.1-Krea-dev.sh -------------------------------------------------------------------------------- /examples/flux/model_training/full/FLUX.1-dev-AttriCtrl.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/full/FLUX.1-dev-AttriCtrl.sh -------------------------------------------------------------------------------- /examples/flux/model_training/full/FLUX.1-dev-Controlnet-Inpainting-Beta.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/full/FLUX.1-dev-Controlnet-Inpainting-Beta.sh -------------------------------------------------------------------------------- /examples/flux/model_training/full/FLUX.1-dev-Controlnet-Union-alpha.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/full/FLUX.1-dev-Controlnet-Union-alpha.sh -------------------------------------------------------------------------------- /examples/flux/model_training/full/FLUX.1-dev-Controlnet-Upscaler.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/full/FLUX.1-dev-Controlnet-Upscaler.sh -------------------------------------------------------------------------------- /examples/flux/model_training/full/FLUX.1-dev-IP-Adapter.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/full/FLUX.1-dev-IP-Adapter.sh -------------------------------------------------------------------------------- /examples/flux/model_training/full/FLUX.1-dev-InfiniteYou.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/full/FLUX.1-dev-InfiniteYou.sh -------------------------------------------------------------------------------- /examples/flux/model_training/full/FLUX.1-dev-LoRA-Encoder.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/full/FLUX.1-dev-LoRA-Encoder.sh -------------------------------------------------------------------------------- /examples/flux/model_training/full/FLUX.1-dev.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/full/FLUX.1-dev.sh -------------------------------------------------------------------------------- /examples/flux/model_training/full/Nexus-Gen.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/full/Nexus-Gen.sh -------------------------------------------------------------------------------- /examples/flux/model_training/full/Step1X-Edit.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/full/Step1X-Edit.sh -------------------------------------------------------------------------------- /examples/flux/model_training/full/accelerate_config.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/full/accelerate_config.yaml -------------------------------------------------------------------------------- /examples/flux/model_training/full/accelerate_config_zero2offload.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/full/accelerate_config_zero2offload.yaml -------------------------------------------------------------------------------- /examples/flux/model_training/lora/FLEX.2-preview.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/lora/FLEX.2-preview.sh -------------------------------------------------------------------------------- /examples/flux/model_training/lora/FLUX.1-Kontext-dev.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/lora/FLUX.1-Kontext-dev.sh -------------------------------------------------------------------------------- /examples/flux/model_training/lora/FLUX.1-Krea-dev.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/lora/FLUX.1-Krea-dev.sh -------------------------------------------------------------------------------- /examples/flux/model_training/lora/FLUX.1-dev-AttriCtrl.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/lora/FLUX.1-dev-AttriCtrl.sh -------------------------------------------------------------------------------- /examples/flux/model_training/lora/FLUX.1-dev-Controlnet-Inpainting-Beta.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/lora/FLUX.1-dev-Controlnet-Inpainting-Beta.sh -------------------------------------------------------------------------------- /examples/flux/model_training/lora/FLUX.1-dev-Controlnet-Union-alpha.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/lora/FLUX.1-dev-Controlnet-Union-alpha.sh -------------------------------------------------------------------------------- /examples/flux/model_training/lora/FLUX.1-dev-Controlnet-Upscaler.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/lora/FLUX.1-dev-Controlnet-Upscaler.sh -------------------------------------------------------------------------------- /examples/flux/model_training/lora/FLUX.1-dev-EliGen.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/lora/FLUX.1-dev-EliGen.sh -------------------------------------------------------------------------------- /examples/flux/model_training/lora/FLUX.1-dev-IP-Adapter.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/lora/FLUX.1-dev-IP-Adapter.sh -------------------------------------------------------------------------------- /examples/flux/model_training/lora/FLUX.1-dev-InfiniteYou.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/lora/FLUX.1-dev-InfiniteYou.sh -------------------------------------------------------------------------------- /examples/flux/model_training/lora/FLUX.1-dev.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/lora/FLUX.1-dev.sh -------------------------------------------------------------------------------- /examples/flux/model_training/lora/Nexus-Gen.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/lora/Nexus-Gen.sh -------------------------------------------------------------------------------- /examples/flux/model_training/lora/Step1X-Edit.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/lora/Step1X-Edit.sh -------------------------------------------------------------------------------- /examples/flux/model_training/train.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/train.py -------------------------------------------------------------------------------- /examples/flux/model_training/validate_full/FLEX.2-preview.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/validate_full/FLEX.2-preview.py -------------------------------------------------------------------------------- /examples/flux/model_training/validate_full/FLUX.1-Kontext-dev.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/validate_full/FLUX.1-Kontext-dev.py -------------------------------------------------------------------------------- /examples/flux/model_training/validate_full/FLUX.1-Krea-dev.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/validate_full/FLUX.1-Krea-dev.py -------------------------------------------------------------------------------- /examples/flux/model_training/validate_full/FLUX.1-dev-AttriCtrl.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/validate_full/FLUX.1-dev-AttriCtrl.py -------------------------------------------------------------------------------- /examples/flux/model_training/validate_full/FLUX.1-dev-Controlnet-Inpainting-Beta.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/validate_full/FLUX.1-dev-Controlnet-Inpainting-Beta.py -------------------------------------------------------------------------------- /examples/flux/model_training/validate_full/FLUX.1-dev-Controlnet-Union-alpha.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/validate_full/FLUX.1-dev-Controlnet-Union-alpha.py -------------------------------------------------------------------------------- /examples/flux/model_training/validate_full/FLUX.1-dev-Controlnet-Upscaler.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/validate_full/FLUX.1-dev-Controlnet-Upscaler.py -------------------------------------------------------------------------------- /examples/flux/model_training/validate_full/FLUX.1-dev-IP-Adapter.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/validate_full/FLUX.1-dev-IP-Adapter.py -------------------------------------------------------------------------------- /examples/flux/model_training/validate_full/FLUX.1-dev-InfiniteYou.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/validate_full/FLUX.1-dev-InfiniteYou.py -------------------------------------------------------------------------------- /examples/flux/model_training/validate_full/FLUX.1-dev-LoRA-Encoder.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/validate_full/FLUX.1-dev-LoRA-Encoder.py -------------------------------------------------------------------------------- /examples/flux/model_training/validate_full/FLUX.1-dev.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/validate_full/FLUX.1-dev.py -------------------------------------------------------------------------------- /examples/flux/model_training/validate_full/Nexus-Gen.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/validate_full/Nexus-Gen.py -------------------------------------------------------------------------------- /examples/flux/model_training/validate_full/Step1X-Edit.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/validate_full/Step1X-Edit.py -------------------------------------------------------------------------------- /examples/flux/model_training/validate_lora/FLEX.2-preview.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/validate_lora/FLEX.2-preview.py -------------------------------------------------------------------------------- /examples/flux/model_training/validate_lora/FLUX.1-Kontext-dev.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/validate_lora/FLUX.1-Kontext-dev.py -------------------------------------------------------------------------------- /examples/flux/model_training/validate_lora/FLUX.1-Krea-dev.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/validate_lora/FLUX.1-Krea-dev.py -------------------------------------------------------------------------------- /examples/flux/model_training/validate_lora/FLUX.1-dev-AttriCtrl.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/validate_lora/FLUX.1-dev-AttriCtrl.py -------------------------------------------------------------------------------- /examples/flux/model_training/validate_lora/FLUX.1-dev-Controlnet-Inpainting-Beta.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/validate_lora/FLUX.1-dev-Controlnet-Inpainting-Beta.py -------------------------------------------------------------------------------- /examples/flux/model_training/validate_lora/FLUX.1-dev-Controlnet-Union-alpha.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/validate_lora/FLUX.1-dev-Controlnet-Union-alpha.py -------------------------------------------------------------------------------- /examples/flux/model_training/validate_lora/FLUX.1-dev-Controlnet-Upscaler.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/validate_lora/FLUX.1-dev-Controlnet-Upscaler.py -------------------------------------------------------------------------------- /examples/flux/model_training/validate_lora/FLUX.1-dev-EliGen.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/validate_lora/FLUX.1-dev-EliGen.py -------------------------------------------------------------------------------- /examples/flux/model_training/validate_lora/FLUX.1-dev-IP-Adapter.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/validate_lora/FLUX.1-dev-IP-Adapter.py -------------------------------------------------------------------------------- /examples/flux/model_training/validate_lora/FLUX.1-dev-InfiniteYou.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/validate_lora/FLUX.1-dev-InfiniteYou.py -------------------------------------------------------------------------------- /examples/flux/model_training/validate_lora/FLUX.1-dev.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/validate_lora/FLUX.1-dev.py -------------------------------------------------------------------------------- /examples/flux/model_training/validate_lora/Nexus-Gen.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/validate_lora/Nexus-Gen.py -------------------------------------------------------------------------------- /examples/flux/model_training/validate_lora/Step1X-Edit.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux/model_training/validate_lora/Step1X-Edit.py -------------------------------------------------------------------------------- /examples/flux2/model_inference/FLUX.2-dev.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux2/model_inference/FLUX.2-dev.py -------------------------------------------------------------------------------- /examples/flux2/model_inference_low_vram/FLUX.2-dev.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux2/model_inference_low_vram/FLUX.2-dev.py -------------------------------------------------------------------------------- /examples/flux2/model_training/lora/FLUX.2-dev.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux2/model_training/lora/FLUX.2-dev.sh -------------------------------------------------------------------------------- /examples/flux2/model_training/train.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux2/model_training/train.py -------------------------------------------------------------------------------- /examples/flux2/model_training/validate_lora/FLUX.2-dev.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/flux2/model_training/validate_lora/FLUX.2-dev.py -------------------------------------------------------------------------------- /examples/qwen_image/model_inference/Qwen-Image-Blockwise-ControlNet-Canny.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_inference/Qwen-Image-Blockwise-ControlNet-Canny.py -------------------------------------------------------------------------------- /examples/qwen_image/model_inference/Qwen-Image-Blockwise-ControlNet-Depth.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_inference/Qwen-Image-Blockwise-ControlNet-Depth.py -------------------------------------------------------------------------------- /examples/qwen_image/model_inference/Qwen-Image-Blockwise-ControlNet-Inpaint.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_inference/Qwen-Image-Blockwise-ControlNet-Inpaint.py -------------------------------------------------------------------------------- /examples/qwen_image/model_inference/Qwen-Image-Distill-DMD2.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_inference/Qwen-Image-Distill-DMD2.py -------------------------------------------------------------------------------- /examples/qwen_image/model_inference/Qwen-Image-Distill-Full.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_inference/Qwen-Image-Distill-Full.py -------------------------------------------------------------------------------- /examples/qwen_image/model_inference/Qwen-Image-Distill-LoRA.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_inference/Qwen-Image-Distill-LoRA.py -------------------------------------------------------------------------------- /examples/qwen_image/model_inference/Qwen-Image-Edit-2509.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_inference/Qwen-Image-Edit-2509.py -------------------------------------------------------------------------------- /examples/qwen_image/model_inference/Qwen-Image-Edit-Lowres-Fix.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_inference/Qwen-Image-Edit-Lowres-Fix.py -------------------------------------------------------------------------------- /examples/qwen_image/model_inference/Qwen-Image-Edit.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_inference/Qwen-Image-Edit.py -------------------------------------------------------------------------------- /examples/qwen_image/model_inference/Qwen-Image-EliGen-Poster.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_inference/Qwen-Image-EliGen-Poster.py -------------------------------------------------------------------------------- /examples/qwen_image/model_inference/Qwen-Image-EliGen-V2.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_inference/Qwen-Image-EliGen-V2.py -------------------------------------------------------------------------------- /examples/qwen_image/model_inference/Qwen-Image-EliGen.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_inference/Qwen-Image-EliGen.py -------------------------------------------------------------------------------- /examples/qwen_image/model_inference/Qwen-Image-In-Context-Control-Union.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_inference/Qwen-Image-In-Context-Control-Union.py -------------------------------------------------------------------------------- /examples/qwen_image/model_inference/Qwen-Image.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_inference/Qwen-Image.py -------------------------------------------------------------------------------- /examples/qwen_image/model_inference_low_vram/Qwen-Image-Blockwise-ControlNet-Canny.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_inference_low_vram/Qwen-Image-Blockwise-ControlNet-Canny.py -------------------------------------------------------------------------------- /examples/qwen_image/model_inference_low_vram/Qwen-Image-Blockwise-ControlNet-Depth.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_inference_low_vram/Qwen-Image-Blockwise-ControlNet-Depth.py -------------------------------------------------------------------------------- /examples/qwen_image/model_inference_low_vram/Qwen-Image-Blockwise-ControlNet-Inpaint.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_inference_low_vram/Qwen-Image-Blockwise-ControlNet-Inpaint.py -------------------------------------------------------------------------------- /examples/qwen_image/model_inference_low_vram/Qwen-Image-Distill-DMD2.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_inference_low_vram/Qwen-Image-Distill-DMD2.py -------------------------------------------------------------------------------- /examples/qwen_image/model_inference_low_vram/Qwen-Image-Distill-Full.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_inference_low_vram/Qwen-Image-Distill-Full.py -------------------------------------------------------------------------------- /examples/qwen_image/model_inference_low_vram/Qwen-Image-Distill-LoRA.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_inference_low_vram/Qwen-Image-Distill-LoRA.py -------------------------------------------------------------------------------- /examples/qwen_image/model_inference_low_vram/Qwen-Image-Edit-2509.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_inference_low_vram/Qwen-Image-Edit-2509.py -------------------------------------------------------------------------------- /examples/qwen_image/model_inference_low_vram/Qwen-Image-Edit-Lowres-Fix.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_inference_low_vram/Qwen-Image-Edit-Lowres-Fix.py -------------------------------------------------------------------------------- /examples/qwen_image/model_inference_low_vram/Qwen-Image-Edit.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_inference_low_vram/Qwen-Image-Edit.py -------------------------------------------------------------------------------- /examples/qwen_image/model_inference_low_vram/Qwen-Image-EliGen-Poster.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_inference_low_vram/Qwen-Image-EliGen-Poster.py -------------------------------------------------------------------------------- /examples/qwen_image/model_inference_low_vram/Qwen-Image-EliGen-V2.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_inference_low_vram/Qwen-Image-EliGen-V2.py -------------------------------------------------------------------------------- /examples/qwen_image/model_inference_low_vram/Qwen-Image-EliGen.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_inference_low_vram/Qwen-Image-EliGen.py -------------------------------------------------------------------------------- /examples/qwen_image/model_inference_low_vram/Qwen-Image-In-Context-Control-Union.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_inference_low_vram/Qwen-Image-In-Context-Control-Union.py -------------------------------------------------------------------------------- /examples/qwen_image/model_inference_low_vram/Qwen-Image.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_inference_low_vram/Qwen-Image.py -------------------------------------------------------------------------------- /examples/qwen_image/model_training/full/Qwen-Image-Blockwise-ControlNet-Canny.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/full/Qwen-Image-Blockwise-ControlNet-Canny.sh -------------------------------------------------------------------------------- /examples/qwen_image/model_training/full/Qwen-Image-Blockwise-ControlNet-Depth.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/full/Qwen-Image-Blockwise-ControlNet-Depth.sh -------------------------------------------------------------------------------- /examples/qwen_image/model_training/full/Qwen-Image-Blockwise-ControlNet-Inpaint.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/full/Qwen-Image-Blockwise-ControlNet-Inpaint.sh -------------------------------------------------------------------------------- /examples/qwen_image/model_training/full/Qwen-Image-Distill-Full.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/full/Qwen-Image-Distill-Full.sh -------------------------------------------------------------------------------- /examples/qwen_image/model_training/full/Qwen-Image-Edit-2509.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/full/Qwen-Image-Edit-2509.sh -------------------------------------------------------------------------------- /examples/qwen_image/model_training/full/Qwen-Image-Edit.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/full/Qwen-Image-Edit.sh -------------------------------------------------------------------------------- /examples/qwen_image/model_training/full/Qwen-Image.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/full/Qwen-Image.sh -------------------------------------------------------------------------------- /examples/qwen_image/model_training/full/accelerate_config.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/full/accelerate_config.yaml -------------------------------------------------------------------------------- /examples/qwen_image/model_training/full/accelerate_config_zero2offload.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/full/accelerate_config_zero2offload.yaml -------------------------------------------------------------------------------- /examples/qwen_image/model_training/lora/Qwen-Image-Blockwise-ControlNet-Canny.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/lora/Qwen-Image-Blockwise-ControlNet-Canny.sh -------------------------------------------------------------------------------- /examples/qwen_image/model_training/lora/Qwen-Image-Blockwise-ControlNet-Depth.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/lora/Qwen-Image-Blockwise-ControlNet-Depth.sh -------------------------------------------------------------------------------- /examples/qwen_image/model_training/lora/Qwen-Image-Blockwise-ControlNet-Inpaint.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/lora/Qwen-Image-Blockwise-ControlNet-Inpaint.sh -------------------------------------------------------------------------------- /examples/qwen_image/model_training/lora/Qwen-Image-Distill-Full.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/lora/Qwen-Image-Distill-Full.sh -------------------------------------------------------------------------------- /examples/qwen_image/model_training/lora/Qwen-Image-Distill-LoRA.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/lora/Qwen-Image-Distill-LoRA.sh -------------------------------------------------------------------------------- /examples/qwen_image/model_training/lora/Qwen-Image-Edit-2509.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/lora/Qwen-Image-Edit-2509.sh -------------------------------------------------------------------------------- /examples/qwen_image/model_training/lora/Qwen-Image-Edit.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/lora/Qwen-Image-Edit.sh -------------------------------------------------------------------------------- /examples/qwen_image/model_training/lora/Qwen-Image-EliGen-Poster.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/lora/Qwen-Image-EliGen-Poster.sh -------------------------------------------------------------------------------- /examples/qwen_image/model_training/lora/Qwen-Image-EliGen.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/lora/Qwen-Image-EliGen.sh -------------------------------------------------------------------------------- /examples/qwen_image/model_training/lora/Qwen-Image-In-Context-Control-Union.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/lora/Qwen-Image-In-Context-Control-Union.sh -------------------------------------------------------------------------------- /examples/qwen_image/model_training/lora/Qwen-Image.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/lora/Qwen-Image.sh -------------------------------------------------------------------------------- /examples/qwen_image/model_training/scripts/Qwen-Image-Blockwise-ControlNet-Initialize.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/scripts/Qwen-Image-Blockwise-ControlNet-Initialize.py -------------------------------------------------------------------------------- /examples/qwen_image/model_training/scripts/Qwen-Image-Blockwise-ControlNet-Inpaint-Initialize.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/scripts/Qwen-Image-Blockwise-ControlNet-Inpaint-Initialize.py -------------------------------------------------------------------------------- /examples/qwen_image/model_training/special/differential_training/Qwen-Image-LoRA.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/special/differential_training/Qwen-Image-LoRA.sh -------------------------------------------------------------------------------- /examples/qwen_image/model_training/special/fp8_training/Qwen-Image-LoRA.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/special/fp8_training/Qwen-Image-LoRA.sh -------------------------------------------------------------------------------- /examples/qwen_image/model_training/special/fp8_training/validate.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/special/fp8_training/validate.py -------------------------------------------------------------------------------- /examples/qwen_image/model_training/special/low_vram_training/Qwen-Image-LoRA.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/special/low_vram_training/Qwen-Image-LoRA.sh -------------------------------------------------------------------------------- /examples/qwen_image/model_training/special/simple/train.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/special/simple/train.py -------------------------------------------------------------------------------- /examples/qwen_image/model_training/special/split_training/Qwen-Image-LoRA.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/special/split_training/Qwen-Image-LoRA.sh -------------------------------------------------------------------------------- /examples/qwen_image/model_training/special/split_training/validate.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/special/split_training/validate.py -------------------------------------------------------------------------------- /examples/qwen_image/model_training/train.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/train.py -------------------------------------------------------------------------------- /examples/qwen_image/model_training/validate_full/Qwen-Image-Blockwise-ControlNet-Canny.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/validate_full/Qwen-Image-Blockwise-ControlNet-Canny.py -------------------------------------------------------------------------------- /examples/qwen_image/model_training/validate_full/Qwen-Image-Blockwise-ControlNet-Depth.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/validate_full/Qwen-Image-Blockwise-ControlNet-Depth.py -------------------------------------------------------------------------------- /examples/qwen_image/model_training/validate_full/Qwen-Image-Blockwise-ControlNet-Inpaint.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/validate_full/Qwen-Image-Blockwise-ControlNet-Inpaint.py -------------------------------------------------------------------------------- /examples/qwen_image/model_training/validate_full/Qwen-Image-Distill-Full.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/validate_full/Qwen-Image-Distill-Full.py -------------------------------------------------------------------------------- /examples/qwen_image/model_training/validate_full/Qwen-Image-Edit-2509.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/validate_full/Qwen-Image-Edit-2509.py -------------------------------------------------------------------------------- /examples/qwen_image/model_training/validate_full/Qwen-Image-Edit.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/validate_full/Qwen-Image-Edit.py -------------------------------------------------------------------------------- /examples/qwen_image/model_training/validate_full/Qwen-Image.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/validate_full/Qwen-Image.py -------------------------------------------------------------------------------- /examples/qwen_image/model_training/validate_lora/Qwen-Image-Blockwise-ControlNet-Canny.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/validate_lora/Qwen-Image-Blockwise-ControlNet-Canny.py -------------------------------------------------------------------------------- /examples/qwen_image/model_training/validate_lora/Qwen-Image-Blockwise-ControlNet-Depth.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/validate_lora/Qwen-Image-Blockwise-ControlNet-Depth.py -------------------------------------------------------------------------------- /examples/qwen_image/model_training/validate_lora/Qwen-Image-Blockwise-ControlNet-Inpaint.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/validate_lora/Qwen-Image-Blockwise-ControlNet-Inpaint.py -------------------------------------------------------------------------------- /examples/qwen_image/model_training/validate_lora/Qwen-Image-Distill-Full.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/validate_lora/Qwen-Image-Distill-Full.py -------------------------------------------------------------------------------- /examples/qwen_image/model_training/validate_lora/Qwen-Image-Distill-LoRA.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/validate_lora/Qwen-Image-Distill-LoRA.py -------------------------------------------------------------------------------- /examples/qwen_image/model_training/validate_lora/Qwen-Image-Edit-2509.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/validate_lora/Qwen-Image-Edit-2509.py -------------------------------------------------------------------------------- /examples/qwen_image/model_training/validate_lora/Qwen-Image-Edit.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/validate_lora/Qwen-Image-Edit.py -------------------------------------------------------------------------------- /examples/qwen_image/model_training/validate_lora/Qwen-Image-EliGen-Poster.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/validate_lora/Qwen-Image-EliGen-Poster.py -------------------------------------------------------------------------------- /examples/qwen_image/model_training/validate_lora/Qwen-Image-EliGen.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/validate_lora/Qwen-Image-EliGen.py -------------------------------------------------------------------------------- /examples/qwen_image/model_training/validate_lora/Qwen-Image-In-Context-Control-Union.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/validate_lora/Qwen-Image-In-Context-Control-Union.py -------------------------------------------------------------------------------- /examples/qwen_image/model_training/validate_lora/Qwen-Image.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/qwen_image/model_training/validate_lora/Qwen-Image.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference/LongCat-Video.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference/LongCat-Video.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference/Video-As-Prompt-Wan2.1-14B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference/Video-As-Prompt-Wan2.1-14B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference/Wan2.1-1.3b-speedcontrol-v1.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference/Wan2.1-1.3b-speedcontrol-v1.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference/Wan2.1-FLF2V-14B-720P.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference/Wan2.1-FLF2V-14B-720P.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference/Wan2.1-Fun-1.3B-Control.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference/Wan2.1-Fun-1.3B-Control.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference/Wan2.1-Fun-1.3B-InP.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference/Wan2.1-Fun-1.3B-InP.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference/Wan2.1-Fun-14B-Control.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference/Wan2.1-Fun-14B-Control.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference/Wan2.1-Fun-14B-InP.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference/Wan2.1-Fun-14B-InP.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference/Wan2.1-Fun-V1.1-1.3B-Control-Camera.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference/Wan2.1-Fun-V1.1-1.3B-Control-Camera.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference/Wan2.1-Fun-V1.1-1.3B-Control.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference/Wan2.1-Fun-V1.1-1.3B-Control.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference/Wan2.1-Fun-V1.1-1.3B-InP.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference/Wan2.1-Fun-V1.1-1.3B-InP.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference/Wan2.1-Fun-V1.1-14B-Control-Camera.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference/Wan2.1-Fun-V1.1-14B-Control-Camera.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference/Wan2.1-Fun-V1.1-14B-Control.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference/Wan2.1-Fun-V1.1-14B-Control.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference/Wan2.1-Fun-V1.1-14B-InP.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference/Wan2.1-Fun-V1.1-14B-InP.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference/Wan2.1-I2V-14B-480P.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference/Wan2.1-I2V-14B-480P.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference/Wan2.1-I2V-14B-720P.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference/Wan2.1-I2V-14B-720P.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference/Wan2.1-T2V-1.3B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference/Wan2.1-T2V-1.3B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference/Wan2.1-T2V-14B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference/Wan2.1-T2V-14B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference/Wan2.1-VACE-1.3B-Preview.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference/Wan2.1-VACE-1.3B-Preview.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference/Wan2.1-VACE-1.3B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference/Wan2.1-VACE-1.3B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference/Wan2.1-VACE-14B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference/Wan2.1-VACE-14B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference/Wan2.2-Animate-14B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference/Wan2.2-Animate-14B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference/Wan2.2-Fun-A14B-Control-Camera.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference/Wan2.2-Fun-A14B-Control-Camera.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference/Wan2.2-Fun-A14B-Control.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference/Wan2.2-Fun-A14B-Control.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference/Wan2.2-Fun-A14B-InP.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference/Wan2.2-Fun-A14B-InP.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference/Wan2.2-I2V-A14B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference/Wan2.2-I2V-A14B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference/Wan2.2-S2V-14B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference/Wan2.2-S2V-14B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference/Wan2.2-S2V-14B_multi_clips.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference/Wan2.2-S2V-14B_multi_clips.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference/Wan2.2-T2V-A14B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference/Wan2.2-T2V-A14B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference/Wan2.2-TI2V-5B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference/Wan2.2-TI2V-5B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference/Wan2.2-VACE-Fun-A14B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference/Wan2.2-VACE-Fun-A14B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference/krea-realtime-video.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference/krea-realtime-video.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference_low_vram/LongCat-Video.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference_low_vram/LongCat-Video.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference_low_vram/Video-As-Prompt-Wan2.1-14B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference_low_vram/Video-As-Prompt-Wan2.1-14B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference_low_vram/Wan2.1-1.3b-speedcontrol-v1.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference_low_vram/Wan2.1-1.3b-speedcontrol-v1.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference_low_vram/Wan2.1-FLF2V-14B-720P.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference_low_vram/Wan2.1-FLF2V-14B-720P.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference_low_vram/Wan2.1-Fun-1.3B-Control.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference_low_vram/Wan2.1-Fun-1.3B-Control.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference_low_vram/Wan2.1-Fun-1.3B-InP.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference_low_vram/Wan2.1-Fun-1.3B-InP.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference_low_vram/Wan2.1-Fun-14B-Control.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference_low_vram/Wan2.1-Fun-14B-Control.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference_low_vram/Wan2.1-Fun-14B-InP.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference_low_vram/Wan2.1-Fun-14B-InP.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference_low_vram/Wan2.1-Fun-V1.1-1.3B-Control-Camera.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference_low_vram/Wan2.1-Fun-V1.1-1.3B-Control-Camera.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference_low_vram/Wan2.1-Fun-V1.1-1.3B-Control.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference_low_vram/Wan2.1-Fun-V1.1-1.3B-Control.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference_low_vram/Wan2.1-Fun-V1.1-1.3B-InP.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference_low_vram/Wan2.1-Fun-V1.1-1.3B-InP.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference_low_vram/Wan2.1-Fun-V1.1-14B-Control-Camera.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference_low_vram/Wan2.1-Fun-V1.1-14B-Control-Camera.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference_low_vram/Wan2.1-Fun-V1.1-14B-Control.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference_low_vram/Wan2.1-Fun-V1.1-14B-Control.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference_low_vram/Wan2.1-Fun-V1.1-14B-InP.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference_low_vram/Wan2.1-Fun-V1.1-14B-InP.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference_low_vram/Wan2.1-I2V-14B-480P.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference_low_vram/Wan2.1-I2V-14B-480P.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference_low_vram/Wan2.1-I2V-14B-720P.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference_low_vram/Wan2.1-I2V-14B-720P.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference_low_vram/Wan2.1-T2V-1.3B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference_low_vram/Wan2.1-T2V-1.3B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference_low_vram/Wan2.1-T2V-14B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference_low_vram/Wan2.1-T2V-14B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference_low_vram/Wan2.1-VACE-1.3B-Preview.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference_low_vram/Wan2.1-VACE-1.3B-Preview.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference_low_vram/Wan2.1-VACE-1.3B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference_low_vram/Wan2.1-VACE-1.3B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference_low_vram/Wan2.1-VACE-14B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference_low_vram/Wan2.1-VACE-14B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference_low_vram/Wan2.2-Animate-14B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference_low_vram/Wan2.2-Animate-14B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference_low_vram/Wan2.2-Fun-A14B-Control-Camera.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference_low_vram/Wan2.2-Fun-A14B-Control-Camera.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference_low_vram/Wan2.2-Fun-A14B-Control.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference_low_vram/Wan2.2-Fun-A14B-Control.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference_low_vram/Wan2.2-Fun-A14B-InP.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference_low_vram/Wan2.2-Fun-A14B-InP.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference_low_vram/Wan2.2-I2V-A14B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference_low_vram/Wan2.2-I2V-A14B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference_low_vram/Wan2.2-S2V-14B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference_low_vram/Wan2.2-S2V-14B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference_low_vram/Wan2.2-S2V-14B_multi_clips.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference_low_vram/Wan2.2-S2V-14B_multi_clips.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference_low_vram/Wan2.2-T2V-A14B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference_low_vram/Wan2.2-T2V-A14B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference_low_vram/Wan2.2-TI2V-5B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference_low_vram/Wan2.2-TI2V-5B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference_low_vram/Wan2.2-VACE-Fun-A14B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference_low_vram/Wan2.2-VACE-Fun-A14B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_inference_low_vram/krea-realtime-video.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_inference_low_vram/krea-realtime-video.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/full/LongCat-Video.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/full/LongCat-Video.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/full/Video-As-Prompt-Wan2.1-14B.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/full/Video-As-Prompt-Wan2.1-14B.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/full/Wan2.1-1.3b-speedcontrol-v1.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/full/Wan2.1-1.3b-speedcontrol-v1.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/full/Wan2.1-FLF2V-14B-720P.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/full/Wan2.1-FLF2V-14B-720P.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/full/Wan2.1-Fun-1.3B-Control.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/full/Wan2.1-Fun-1.3B-Control.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/full/Wan2.1-Fun-1.3B-InP.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/full/Wan2.1-Fun-1.3B-InP.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/full/Wan2.1-Fun-14B-Control.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/full/Wan2.1-Fun-14B-Control.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/full/Wan2.1-Fun-14B-InP.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/full/Wan2.1-Fun-14B-InP.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-1.3B-Control-Camera.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-1.3B-Control-Camera.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-1.3B-Control.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-1.3B-Control.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-1.3B-InP.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-1.3B-InP.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-14B-Control-Camera.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-14B-Control-Camera.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-14B-Control.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-14B-Control.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-14B-InP.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-14B-InP.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/full/Wan2.1-I2V-14B-480P.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/full/Wan2.1-I2V-14B-480P.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/full/Wan2.1-I2V-14B-720P.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/full/Wan2.1-I2V-14B-720P.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/full/Wan2.1-T2V-1.3B.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/full/Wan2.1-T2V-1.3B.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/full/Wan2.1-T2V-14B.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/full/Wan2.1-T2V-14B.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/full/Wan2.1-VACE-1.3B-Preview.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/full/Wan2.1-VACE-1.3B-Preview.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/full/Wan2.1-VACE-1.3B.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/full/Wan2.1-VACE-1.3B.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/full/Wan2.1-VACE-14B.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/full/Wan2.1-VACE-14B.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/full/Wan2.2-Animate-14B.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/full/Wan2.2-Animate-14B.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/full/Wan2.2-Fun-A14B-Control-Camera.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/full/Wan2.2-Fun-A14B-Control-Camera.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/full/Wan2.2-Fun-A14B-Control.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/full/Wan2.2-Fun-A14B-Control.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/full/Wan2.2-Fun-A14B-InP.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/full/Wan2.2-Fun-A14B-InP.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/full/Wan2.2-I2V-A14B.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/full/Wan2.2-I2V-A14B.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/full/Wan2.2-S2V-14B.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/full/Wan2.2-S2V-14B.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/full/Wan2.2-T2V-A14B.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/full/Wan2.2-T2V-A14B.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/full/Wan2.2-TI2V-5B.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/full/Wan2.2-TI2V-5B.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/full/Wan2.2-VACE-Fun-A14B.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/full/Wan2.2-VACE-Fun-A14B.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/full/accelerate_config_14B.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/full/accelerate_config_14B.yaml -------------------------------------------------------------------------------- /examples/wanvideo/model_training/full/krea-realtime-video.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/full/krea-realtime-video.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/lora/LongCat-Video.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/lora/LongCat-Video.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/lora/Video-As-Prompt-Wan2.1-14B.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/lora/Video-As-Prompt-Wan2.1-14B.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/lora/Wan2.1-1.3b-speedcontrol-v1.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/lora/Wan2.1-1.3b-speedcontrol-v1.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/lora/Wan2.1-FLF2V-14B-720P.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/lora/Wan2.1-FLF2V-14B-720P.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/lora/Wan2.1-Fun-1.3B-Control.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/lora/Wan2.1-Fun-1.3B-Control.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/lora/Wan2.1-Fun-1.3B-InP.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/lora/Wan2.1-Fun-1.3B-InP.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/lora/Wan2.1-Fun-14B-Control.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/lora/Wan2.1-Fun-14B-Control.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/lora/Wan2.1-Fun-14B-InP.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/lora/Wan2.1-Fun-14B-InP.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/lora/Wan2.1-Fun-V1.1-1.3B-Control-Camera.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/lora/Wan2.1-Fun-V1.1-1.3B-Control-Camera.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/lora/Wan2.1-Fun-V1.1-1.3B-Control.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/lora/Wan2.1-Fun-V1.1-1.3B-Control.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/lora/Wan2.1-Fun-V1.1-1.3B-InP.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/lora/Wan2.1-Fun-V1.1-1.3B-InP.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/lora/Wan2.1-Fun-V1.1-14B-Control-Camera.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/lora/Wan2.1-Fun-V1.1-14B-Control-Camera.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/lora/Wan2.1-Fun-V1.1-14B-Control.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/lora/Wan2.1-Fun-V1.1-14B-Control.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/lora/Wan2.1-Fun-V1.1-14B-InP.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/lora/Wan2.1-Fun-V1.1-14B-InP.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/lora/Wan2.1-I2V-14B-480P.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/lora/Wan2.1-I2V-14B-480P.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/lora/Wan2.1-I2V-14B-720P.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/lora/Wan2.1-I2V-14B-720P.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/lora/Wan2.1-T2V-1.3B.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/lora/Wan2.1-T2V-1.3B.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/lora/Wan2.1-T2V-14B.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/lora/Wan2.1-T2V-14B.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/lora/Wan2.1-VACE-1.3B-Preview.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/lora/Wan2.1-VACE-1.3B-Preview.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/lora/Wan2.1-VACE-1.3B.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/lora/Wan2.1-VACE-1.3B.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/lora/Wan2.1-VACE-14B.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/lora/Wan2.1-VACE-14B.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/lora/Wan2.2-Animate-14B.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/lora/Wan2.2-Animate-14B.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/lora/Wan2.2-Fun-A14B-Control-Camera.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/lora/Wan2.2-Fun-A14B-Control-Camera.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/lora/Wan2.2-Fun-A14B-Control.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/lora/Wan2.2-Fun-A14B-Control.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/lora/Wan2.2-Fun-A14B-InP.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/lora/Wan2.2-Fun-A14B-InP.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/lora/Wan2.2-I2V-A14B.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/lora/Wan2.2-I2V-A14B.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/lora/Wan2.2-S2V-14B.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/lora/Wan2.2-S2V-14B.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/lora/Wan2.2-T2V-A14B.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/lora/Wan2.2-T2V-A14B.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/lora/Wan2.2-TI2V-5B.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/lora/Wan2.2-TI2V-5B.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/lora/Wan2.2-VACE-Fun-A14B.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/lora/Wan2.2-VACE-Fun-A14B.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/lora/krea-realtime-video.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/lora/krea-realtime-video.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/special/direct_distill/Wan2.1-T2V-1.3B.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/special/direct_distill/Wan2.1-T2V-1.3B.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/special/direct_distill/validate.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/special/direct_distill/validate.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/special/fp8_training/Wan2.1-I2V-14B-480P.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/special/fp8_training/Wan2.1-I2V-14B-480P.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/special/fp8_training/validate.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/special/fp8_training/validate.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/special/low_vram_training/Wan2.1-I2V-14B-480P.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/special/low_vram_training/Wan2.1-I2V-14B-480P.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/special/low_vram_training/validate.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/special/low_vram_training/validate.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/special/split_training/Wan2.1-I2V-14B-480P.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/special/split_training/Wan2.1-I2V-14B-480P.sh -------------------------------------------------------------------------------- /examples/wanvideo/model_training/special/split_training/validate.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/special/split_training/validate.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/train.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/train.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_full/LongCat-Video.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_full/LongCat-Video.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_full/Video-As-Prompt-Wan2.1-14B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_full/Video-As-Prompt-Wan2.1-14B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_full/Wan2.1-1.3b-speedcontrol-v1.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_full/Wan2.1-1.3b-speedcontrol-v1.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_full/Wan2.1-FLF2V-14B-720P.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_full/Wan2.1-FLF2V-14B-720P.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_full/Wan2.1-Fun-1.3B-Control.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-1.3B-Control.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_full/Wan2.1-Fun-1.3B-InP.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-1.3B-InP.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_full/Wan2.1-Fun-14B-Control.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-14B-Control.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_full/Wan2.1-Fun-14B-InP.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-14B-InP.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-1.3B-Control-Camera.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-1.3B-Control-Camera.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-1.3B-Control.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-1.3B-Control.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-1.3B-InP.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-1.3B-InP.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-14B-Control-Camera.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-14B-Control-Camera.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-14B-Control.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-14B-Control.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-14B-InP.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-14B-InP.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_full/Wan2.1-I2V-14B-480P.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_full/Wan2.1-I2V-14B-480P.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_full/Wan2.1-I2V-14B-720P.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_full/Wan2.1-I2V-14B-720P.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_full/Wan2.1-T2V-1.3B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_full/Wan2.1-T2V-1.3B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_full/Wan2.1-T2V-14B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_full/Wan2.1-T2V-14B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_full/Wan2.1-VACE-1.3B-Preview.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_full/Wan2.1-VACE-1.3B-Preview.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_full/Wan2.1-VACE-1.3B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_full/Wan2.1-VACE-1.3B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_full/Wan2.1-VACE-14B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_full/Wan2.1-VACE-14B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_full/Wan2.2-Animate-14B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_full/Wan2.2-Animate-14B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_full/Wan2.2-Fun-A14B-Control-Camera.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_full/Wan2.2-Fun-A14B-Control-Camera.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_full/Wan2.2-Fun-A14B-Control.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_full/Wan2.2-Fun-A14B-Control.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_full/Wan2.2-Fun-A14B-InP.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_full/Wan2.2-Fun-A14B-InP.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_full/Wan2.2-I2V-A14B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_full/Wan2.2-I2V-A14B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_full/Wan2.2-S2V-14B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_full/Wan2.2-S2V-14B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_full/Wan2.2-T2V-A14B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_full/Wan2.2-T2V-A14B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_full/Wan2.2-TI2V-5B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_full/Wan2.2-TI2V-5B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_full/Wan2.2-VACE-Fun-A14B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_full/Wan2.2-VACE-Fun-A14B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_full/krea-realtime-video.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_full/krea-realtime-video.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_lora/LongCat-Video.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_lora/LongCat-Video.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_lora/Video-As-Prompt-Wan2.1-14B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_lora/Video-As-Prompt-Wan2.1-14B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_lora/Wan2.1-1.3b-speedcontrol-v1.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_lora/Wan2.1-1.3b-speedcontrol-v1.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_lora/Wan2.1-FLF2V-14B-720P.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_lora/Wan2.1-FLF2V-14B-720P.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-1.3B-Control.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-1.3B-Control.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-1.3B-InP.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-1.3B-InP.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-14B-Control.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-14B-Control.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-14B-InP.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-14B-InP.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-V1.1-1.3B-Control-Camera.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-V1.1-1.3B-Control-Camera.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-V1.1-1.3B-Control.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-V1.1-1.3B-Control.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-V1.1-1.3B-InP.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-V1.1-1.3B-InP.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-V1.1-14B-Control-Camera.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-V1.1-14B-Control-Camera.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-V1.1-14B-Control.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-V1.1-14B-Control.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-V1.1-14B-InP.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-V1.1-14B-InP.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_lora/Wan2.1-I2V-14B-480P.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_lora/Wan2.1-I2V-14B-480P.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_lora/Wan2.1-I2V-14B-720P.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_lora/Wan2.1-I2V-14B-720P.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_lora/Wan2.1-T2V-1.3B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_lora/Wan2.1-T2V-1.3B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_lora/Wan2.1-T2V-14B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_lora/Wan2.1-T2V-14B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_lora/Wan2.1-VACE-1.3B-Preview.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_lora/Wan2.1-VACE-1.3B-Preview.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_lora/Wan2.1-VACE-1.3B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_lora/Wan2.1-VACE-1.3B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_lora/Wan2.1-VACE-14B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_lora/Wan2.1-VACE-14B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_lora/Wan2.2-Animate-14B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_lora/Wan2.2-Animate-14B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_lora/Wan2.2-Fun-A14B-Control-Camera.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_lora/Wan2.2-Fun-A14B-Control-Camera.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_lora/Wan2.2-Fun-A14B-Control.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_lora/Wan2.2-Fun-A14B-Control.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_lora/Wan2.2-Fun-A14B-InP.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_lora/Wan2.2-Fun-A14B-InP.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_lora/Wan2.2-I2V-A14B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_lora/Wan2.2-I2V-A14B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_lora/Wan2.2-S2V-14B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_lora/Wan2.2-S2V-14B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_lora/Wan2.2-T2V-A14B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_lora/Wan2.2-T2V-A14B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_lora/Wan2.2-TI2V-5B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_lora/Wan2.2-TI2V-5B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_lora/Wan2.2-VACE-Fun-A14B.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_lora/Wan2.2-VACE-Fun-A14B.py -------------------------------------------------------------------------------- /examples/wanvideo/model_training/validate_lora/krea-realtime-video.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/wanvideo/model_training/validate_lora/krea-realtime-video.py -------------------------------------------------------------------------------- /examples/z_image/model_inference/Z-Image-Turbo.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/z_image/model_inference/Z-Image-Turbo.py -------------------------------------------------------------------------------- /examples/z_image/model_inference_low_vram/Z-Image-Turbo.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/z_image/model_inference_low_vram/Z-Image-Turbo.py -------------------------------------------------------------------------------- /examples/z_image/model_training/full/Z-Image-Turbo.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/z_image/model_training/full/Z-Image-Turbo.sh -------------------------------------------------------------------------------- /examples/z_image/model_training/full/accelerate_config.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/z_image/model_training/full/accelerate_config.yaml -------------------------------------------------------------------------------- /examples/z_image/model_training/lora/Z-Image-Turbo.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/z_image/model_training/lora/Z-Image-Turbo.sh -------------------------------------------------------------------------------- /examples/z_image/model_training/special/differential_training/Z-Image-Turbo.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/z_image/model_training/special/differential_training/Z-Image-Turbo.sh -------------------------------------------------------------------------------- /examples/z_image/model_training/special/differential_training/validate.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/z_image/model_training/special/differential_training/validate.py -------------------------------------------------------------------------------- /examples/z_image/model_training/special/trajectory_imitation/Z-Image-Turbo.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/z_image/model_training/special/trajectory_imitation/Z-Image-Turbo.sh -------------------------------------------------------------------------------- /examples/z_image/model_training/special/trajectory_imitation/validate.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/z_image/model_training/special/trajectory_imitation/validate.py -------------------------------------------------------------------------------- /examples/z_image/model_training/train.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/z_image/model_training/train.py -------------------------------------------------------------------------------- /examples/z_image/model_training/validate_full/Z-Image-Turbo.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/z_image/model_training/validate_full/Z-Image-Turbo.py -------------------------------------------------------------------------------- /examples/z_image/model_training/validate_lora/Z-Image-Turbo.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/examples/z_image/model_training/validate_lora/Z-Image-Turbo.py -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/DiffSynth-Studio/HEAD/pyproject.toml --------------------------------------------------------------------------------