├── .github ├── ISSUE_TEMPLATE │ ├── bug-report.yml │ ├── config.yml │ ├── feature_request.md │ ├── feedback.md │ └── new-model-addition.yml ├── actions │ └── setup-miniconda │ │ └── action.yml └── workflows │ ├── build_docker_images.yml │ ├── build_documentation.yml │ ├── build_pr_documentation.yml │ ├── delete_doc_comment.yml │ ├── delete_doc_comment_trigger.yml │ ├── nightly_tests.yml │ ├── pr_quality.yml │ ├── pr_tests.yml │ ├── push_tests.yml │ ├── push_tests_fast.yml │ ├── push_tests_mps.yml │ ├── stale.yml │ ├── typos.yml │ └── upload_pr_documentation.yml ├── .gitignore ├── CITATION.cff ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── MANIFEST.in ├── Makefile ├── PHILOSOPHY.md ├── README.md ├── _typos.toml ├── docker ├── diffusers-flax-cpu │ └── Dockerfile ├── diffusers-flax-tpu │ └── Dockerfile ├── diffusers-onnxruntime-cpu │ └── Dockerfile ├── diffusers-onnxruntime-cuda │ └── Dockerfile ├── diffusers-pytorch-cpu │ └── Dockerfile └── diffusers-pytorch-cuda │ └── Dockerfile ├── docs ├── README.md ├── TRANSLATING.md └── source │ ├── _config.py │ ├── en │ ├── _toctree.yml │ ├── api │ │ ├── attnprocessor.mdx │ │ ├── configuration.mdx │ │ ├── diffusion_pipeline.mdx │ │ ├── experimental │ │ │ └── rl.mdx │ │ ├── image_processor.mdx │ │ ├── loaders.mdx │ │ ├── logging.mdx │ │ ├── models.mdx │ │ ├── outputs.mdx │ │ ├── pipelines │ │ │ ├── alt_diffusion.mdx │ │ │ ├── attend_and_excite.mdx │ │ │ ├── audio_diffusion.mdx │ │ │ ├── audioldm.mdx │ │ │ ├── controlnet.mdx │ │ │ ├── cycle_diffusion.mdx │ │ │ ├── dance_diffusion.mdx │ │ │ ├── ddim.mdx │ │ │ ├── ddpm.mdx │ │ │ ├── diffedit.mdx │ │ │ ├── dit.mdx │ │ │ ├── if.mdx │ │ │ ├── kandinsky.mdx │ │ │ ├── latent_diffusion.mdx │ │ │ ├── latent_diffusion_uncond.mdx │ │ │ ├── model_editing.mdx │ │ │ ├── overview.mdx │ │ │ ├── paint_by_example.mdx │ │ │ ├── panorama.mdx │ │ │ ├── pix2pix.mdx │ │ │ ├── pix2pix_zero.mdx │ │ │ ├── pndm.mdx │ │ │ ├── repaint.mdx │ │ │ ├── score_sde_ve.mdx │ │ │ ├── self_attention_guidance.mdx │ │ │ ├── semantic_stable_diffusion.mdx │ │ │ ├── spectrogram_diffusion.mdx │ │ │ ├── stable_diffusion │ │ │ │ ├── depth2img.mdx │ │ │ │ ├── image_variation.mdx │ │ │ │ ├── img2img.mdx │ │ │ │ ├── inpaint.mdx │ │ │ │ ├── latent_upscale.mdx │ │ │ │ ├── overview.mdx │ │ │ │ ├── stable_diffusion_2.mdx │ │ │ │ ├── stable_diffusion_safe.mdx │ │ │ │ ├── text2img.mdx │ │ │ │ └── upscale.mdx │ │ │ ├── stable_unclip.mdx │ │ │ ├── stochastic_karras_ve.mdx │ │ │ ├── text_to_video.mdx │ │ │ ├── text_to_video_zero.mdx │ │ │ ├── unclip.mdx │ │ │ ├── unidiffuser.mdx │ │ │ ├── versatile_diffusion.mdx │ │ │ └── vq_diffusion.mdx │ │ ├── schedulers │ │ │ ├── ddim.mdx │ │ │ ├── ddim_inverse.mdx │ │ │ ├── ddpm.mdx │ │ │ ├── deis.mdx │ │ │ ├── dpm_discrete.mdx │ │ │ ├── dpm_discrete_ancestral.mdx │ │ │ ├── dpm_sde.mdx │ │ │ ├── euler.mdx │ │ │ ├── euler_ancestral.mdx │ │ │ ├── heun.mdx │ │ │ ├── ipndm.mdx │ │ │ ├── lms_discrete.mdx │ │ │ ├── multistep_dpm_solver.mdx │ │ │ ├── multistep_dpm_solver_inverse.mdx │ │ │ ├── overview.mdx │ │ │ ├── pndm.mdx │ │ │ ├── repaint.mdx │ │ │ ├── score_sde_ve.mdx │ │ │ ├── score_sde_vp.mdx │ │ │ ├── singlestep_dpm_solver.mdx │ │ │ ├── stochastic_karras_ve.mdx │ │ │ ├── unipc.mdx │ │ │ └── vq_diffusion.mdx │ │ └── utilities.mdx │ ├── conceptual │ │ ├── contribution.mdx │ │ ├── ethical_guidelines.mdx │ │ ├── evaluation.mdx │ │ └── philosophy.mdx │ ├── imgs │ │ ├── access_request.png │ │ └── diffusers_library.jpg │ ├── index.mdx │ ├── installation.mdx │ ├── optimization │ │ ├── coreml.mdx │ │ ├── fp16.mdx │ │ ├── habana.mdx │ │ ├── mps.mdx │ │ ├── onnx.mdx │ │ ├── open_vino.mdx │ │ ├── opt_overview.mdx │ │ ├── tome.mdx │ │ ├── torch2.0.mdx │ │ └── xformers.mdx │ ├── quicktour.mdx │ ├── stable_diffusion.mdx │ ├── training │ │ ├── adapt_a_model.mdx │ │ ├── controlnet.mdx │ │ ├── create_dataset.mdx │ │ ├── custom_diffusion.mdx │ │ ├── distributed_inference.mdx │ │ ├── dreambooth.mdx │ │ ├── instructpix2pix.mdx │ │ ├── lora.mdx │ │ ├── overview.mdx │ │ ├── text2image.mdx │ │ ├── text_inversion.mdx │ │ └── unconditional_training.mdx │ ├── tutorials │ │ ├── basic_training.mdx │ │ └── tutorial_overview.mdx │ └── using-diffusers │ │ ├── audio.mdx │ │ ├── conditional_image_generation.mdx │ │ ├── contribute_pipeline.mdx │ │ ├── controlling_generation.mdx │ │ ├── custom_pipeline_examples.mdx │ │ ├── custom_pipeline_overview.mdx │ │ ├── depth2img.mdx │ │ ├── img2img.mdx │ │ ├── inpaint.mdx │ │ ├── loading.mdx │ │ ├── loading_overview.mdx │ │ ├── other-formats.mdx │ │ ├── other-modalities.mdx │ │ ├── pipeline_overview.mdx │ │ ├── reproducibility.mdx │ │ ├── reusing_seeds.mdx │ │ ├── rl.mdx │ │ ├── schedulers.mdx │ │ ├── stable_diffusion_jax_how_to.mdx │ │ ├── textual_inversion_inference.mdx │ │ ├── unconditional_image_generation.mdx │ │ ├── using_safetensors │ │ ├── using_safetensors.mdx │ │ ├── weighted_prompts.mdx │ │ └── write_own_pipeline.mdx │ ├── ko │ ├── _toctree.yml │ ├── in_translation.mdx │ ├── index.mdx │ ├── installation.mdx │ ├── optimization │ │ ├── fp16.mdx │ │ ├── habana.mdx │ │ ├── mps.mdx │ │ ├── onnx.mdx │ │ ├── open_vino.mdx │ │ └── xformers.mdx │ ├── quicktour.mdx │ └── training │ │ ├── dreambooth.mdx │ │ ├── lora.mdx │ │ └── text2image.mdx │ └── zh │ ├── _toctree.yml │ ├── index.mdx │ ├── installation.mdx │ └── quicktour.mdx ├── examples ├── README.md ├── community │ ├── README.md │ ├── bit_diffusion.py │ ├── checkpoint_merger.py │ ├── clip_guided_images_mixing_stable_diffusion.py │ ├── clip_guided_stable_diffusion.py │ ├── clip_guided_stable_diffusion_img2img.py │ ├── composable_stable_diffusion.py │ ├── ddim_noise_comparative_analysis.py │ ├── edict_pipeline.py │ ├── imagic_stable_diffusion.py │ ├── img2img_inpainting.py │ ├── interpolate_stable_diffusion.py │ ├── lpw_stable_diffusion.py │ ├── lpw_stable_diffusion_onnx.py │ ├── magic_mix.py │ ├── mixture_canvas.py │ ├── mixture_tiling.py │ ├── multilingual_stable_diffusion.py │ ├── one_step_unet.py │ ├── sd_text2img_k_diffusion.py │ ├── seed_resize_stable_diffusion.py │ ├── speech_to_image_diffusion.py │ ├── stable_diffusion_comparison.py │ ├── stable_diffusion_controlnet_img2img.py │ ├── stable_diffusion_controlnet_inpaint.py │ ├── stable_diffusion_controlnet_inpaint_img2img.py │ ├── stable_diffusion_controlnet_reference.py │ ├── stable_diffusion_ipex.py │ ├── stable_diffusion_mega.py │ ├── stable_diffusion_reference.py │ ├── stable_diffusion_repaint.py │ ├── stable_diffusion_tensorrt_img2img.py │ ├── stable_diffusion_tensorrt_inpaint.py │ ├── stable_diffusion_tensorrt_txt2img.py │ ├── stable_unclip.py │ ├── text_inpainting.py │ ├── tiled_upscaling.py │ ├── unclip_image_interpolation.py │ ├── unclip_text_interpolation.py │ └── wildcard_stable_diffusion.py ├── conftest.py ├── controlnet │ ├── README.md │ ├── requirements.txt │ ├── requirements_flax.txt │ ├── train_controlnet.py │ └── train_controlnet_flax.py ├── custom_diffusion │ ├── README.md │ ├── requirements.txt │ ├── retrieve.py │ └── train_custom_diffusion.py ├── dreambooth │ ├── README.md │ ├── requirements.txt │ ├── requirements_flax.txt │ ├── train_dreambooth.py │ ├── train_dreambooth_flax.py │ └── train_dreambooth_lora.py ├── inference │ ├── README.md │ ├── image_to_image.py │ └── inpainting.py ├── instruct_pix2pix │ ├── README.md │ ├── requirements.txt │ └── train_instruct_pix2pix.py ├── research_projects │ ├── README.md │ ├── colossalai │ │ ├── README.md │ │ ├── inference.py │ │ ├── requirement.txt │ │ └── train_dreambooth_colossalai.py │ ├── dreambooth_inpaint │ │ ├── README.md │ │ ├── requirements.txt │ │ ├── train_dreambooth_inpaint.py │ │ └── train_dreambooth_inpaint_lora.py │ ├── intel_opts │ │ ├── README.md │ │ ├── inference_bf16.py │ │ ├── textual_inversion │ │ │ ├── README.md │ │ │ ├── requirements.txt │ │ │ └── textual_inversion_bf16.py │ │ └── textual_inversion_dfq │ │ │ ├── README.md │ │ │ ├── requirements.txt │ │ │ ├── text2images.py │ │ │ └── textual_inversion.py │ ├── lora │ │ ├── README.md │ │ ├── requirements.txt │ │ └── train_text_to_image_lora.py │ ├── mulit_token_textual_inversion │ │ ├── README.md │ │ ├── multi_token_clip.py │ │ ├── requirements.txt │ │ ├── requirements_flax.txt │ │ ├── textual_inversion.py │ │ └── textual_inversion_flax.py │ ├── multi_subject_dreambooth │ │ ├── README.md │ │ ├── requirements.txt │ │ └── train_multi_subject_dreambooth.py │ └── onnxruntime │ │ ├── README.md │ │ ├── text_to_image │ │ ├── README.md │ │ ├── requirements.txt │ │ └── train_text_to_image.py │ │ ├── textual_inversion │ │ ├── README.md │ │ ├── requirements.txt │ │ └── textual_inversion.py │ │ └── unconditional_image_generation │ │ ├── README.md │ │ ├── requirements.txt │ │ └── train_unconditional.py ├── rl │ ├── README.md │ └── run_diffuser_locomotion.py ├── test_examples.py ├── text_to_image │ ├── README.md │ ├── requirements.txt │ ├── requirements_flax.txt │ ├── train_text_to_image.py │ ├── train_text_to_image_flax.py │ └── train_text_to_image_lora.py ├── textual_inversion │ ├── README.md │ ├── requirements.txt │ ├── requirements_flax.txt │ ├── textual_inversion.py │ └── textual_inversion_flax.py └── unconditional_image_generation │ ├── README.md │ ├── requirements.txt │ └── train_unconditional.py ├── pyproject.toml ├── scripts ├── __init__.py ├── change_naming_configs_and_checkpoints.py ├── conversion_ldm_uncond.py ├── convert_dance_diffusion_to_diffusers.py ├── convert_ddpm_original_checkpoint_to_diffusers.py ├── convert_diffusers_to_original_stable_diffusion.py ├── convert_dit_to_diffusers.py ├── convert_if.py ├── convert_k_upscaler_to_diffusers.py ├── convert_kakao_brain_unclip_to_diffusers.py ├── convert_kandinsky_to_diffusers.py ├── convert_ldm_original_checkpoint_to_diffusers.py ├── convert_lora_safetensor_to_diffusers.py ├── convert_models_diffuser_to_diffusers.py ├── convert_ms_text_to_video_to_diffusers.py ├── convert_music_spectrogram_to_diffusers.py ├── convert_ncsnpp_original_checkpoint_to_diffusers.py ├── convert_original_audioldm_to_diffusers.py ├── convert_original_controlnet_to_diffusers.py ├── convert_original_stable_diffusion_to_diffusers.py ├── convert_stable_diffusion_checkpoint_to_onnx.py ├── convert_unclip_txt2img_to_image_variation.py ├── convert_unidiffuser_to_diffusers.py ├── convert_vae_diff_to_onnx.py ├── convert_vae_pt_to_diffusers.py ├── convert_versatile_diffusion_to_diffusers.py ├── convert_vq_diffusion_to_diffusers.py └── generate_logits.py ├── setup.cfg ├── setup.py ├── src └── diffusers │ ├── __init__.py │ ├── commands │ ├── __init__.py │ ├── diffusers_cli.py │ └── env.py │ ├── configuration_utils.py │ ├── dependency_versions_check.py │ ├── dependency_versions_table.py │ ├── experimental │ ├── README.md │ ├── __init__.py │ └── rl │ │ ├── __init__.py │ │ └── value_guided_sampling.py │ ├── image_processor.py │ ├── loaders.py │ ├── models │ ├── README.md │ ├── __init__.py │ ├── activations.py │ ├── attention.py │ ├── attention_flax.py │ ├── attention_processor.py │ ├── autoencoder_kl.py │ ├── controlnet.py │ ├── controlnet_flax.py │ ├── cross_attention.py │ ├── dual_transformer_2d.py │ ├── embeddings.py │ ├── embeddings_flax.py │ ├── modeling_flax_pytorch_utils.py │ ├── modeling_flax_utils.py │ ├── modeling_pytorch_flax_utils.py │ ├── modeling_utils.py │ ├── prior_transformer.py │ ├── resnet.py │ ├── resnet_flax.py │ ├── t5_film_transformer.py │ ├── transformer_2d.py │ ├── transformer_temporal.py │ ├── unet_1d.py │ ├── unet_1d_blocks.py │ ├── unet_2d.py │ ├── unet_2d_blocks.py │ ├── unet_2d_blocks_flax.py │ ├── unet_2d_condition.py │ ├── unet_2d_condition_flax.py │ ├── unet_3d_blocks.py │ ├── unet_3d_condition.py │ ├── vae.py │ ├── vae_flax.py │ └── vq_model.py │ ├── optimization.py │ ├── pipeline_utils.py │ ├── pipelines │ ├── README.md │ ├── __init__.py │ ├── alt_diffusion │ │ ├── __init__.py │ │ ├── modeling_roberta_series.py │ │ ├── pipeline_alt_diffusion.py │ │ └── pipeline_alt_diffusion_img2img.py │ ├── audio_diffusion │ │ ├── __init__.py │ │ ├── mel.py │ │ └── pipeline_audio_diffusion.py │ ├── audioldm │ │ ├── __init__.py │ │ └── pipeline_audioldm.py │ ├── controlnet │ │ ├── __init__.py │ │ ├── multicontrolnet.py │ │ ├── pipeline_controlnet.py │ │ ├── pipeline_controlnet_img2img.py │ │ ├── pipeline_controlnet_inpaint.py │ │ └── pipeline_flax_controlnet.py │ ├── dance_diffusion │ │ ├── __init__.py │ │ └── pipeline_dance_diffusion.py │ ├── ddim │ │ ├── __init__.py │ │ └── pipeline_ddim.py │ ├── ddpm │ │ ├── __init__.py │ │ └── pipeline_ddpm.py │ ├── deepfloyd_if │ │ ├── __init__.py │ │ ├── pipeline_if.py │ │ ├── pipeline_if_img2img.py │ │ ├── pipeline_if_img2img_superresolution.py │ │ ├── pipeline_if_inpainting.py │ │ ├── pipeline_if_inpainting_superresolution.py │ │ ├── pipeline_if_superresolution.py │ │ ├── safety_checker.py │ │ ├── timesteps.py │ │ └── watermark.py │ ├── dit │ │ ├── __init__.py │ │ └── pipeline_dit.py │ ├── kandinsky │ │ ├── __init__.py │ │ ├── pipeline_kandinsky.py │ │ ├── pipeline_kandinsky_img2img.py │ │ ├── pipeline_kandinsky_inpaint.py │ │ ├── pipeline_kandinsky_prior.py │ │ └── text_encoder.py │ ├── latent_diffusion │ │ ├── __init__.py │ │ ├── pipeline_latent_diffusion.py │ │ └── pipeline_latent_diffusion_superresolution.py │ ├── latent_diffusion_uncond │ │ ├── __init__.py │ │ └── pipeline_latent_diffusion_uncond.py │ ├── onnx_utils.py │ ├── paint_by_example │ │ ├── __init__.py │ │ ├── image_encoder.py │ │ └── pipeline_paint_by_example.py │ ├── pipeline_flax_utils.py │ ├── pipeline_utils.py │ ├── pndm │ │ ├── __init__.py │ │ └── pipeline_pndm.py │ ├── repaint │ │ ├── __init__.py │ │ └── pipeline_repaint.py │ ├── score_sde_ve │ │ ├── __init__.py │ │ └── pipeline_score_sde_ve.py │ ├── semantic_stable_diffusion │ │ ├── __init__.py │ │ └── pipeline_semantic_stable_diffusion.py │ ├── spectrogram_diffusion │ │ ├── __init__.py │ │ ├── continous_encoder.py │ │ ├── midi_utils.py │ │ ├── notes_encoder.py │ │ └── pipeline_spectrogram_diffusion.py │ ├── stable_diffusion │ │ ├── README.md │ │ ├── __init__.py │ │ ├── convert_from_ckpt.py │ │ ├── pipeline_cycle_diffusion.py │ │ ├── pipeline_flax_stable_diffusion.py │ │ ├── pipeline_flax_stable_diffusion_controlnet.py │ │ ├── pipeline_flax_stable_diffusion_img2img.py │ │ ├── pipeline_flax_stable_diffusion_inpaint.py │ │ ├── pipeline_onnx_stable_diffusion.py │ │ ├── pipeline_onnx_stable_diffusion_img2img.py │ │ ├── pipeline_onnx_stable_diffusion_inpaint.py │ │ ├── pipeline_onnx_stable_diffusion_inpaint_legacy.py │ │ ├── pipeline_onnx_stable_diffusion_upscale.py │ │ ├── pipeline_stable_diffusion.py │ │ ├── pipeline_stable_diffusion_attend_and_excite.py │ │ ├── pipeline_stable_diffusion_controlnet.py │ │ ├── pipeline_stable_diffusion_depth2img.py │ │ ├── pipeline_stable_diffusion_diffedit.py │ │ ├── pipeline_stable_diffusion_image_variation.py │ │ ├── pipeline_stable_diffusion_img2img.py │ │ ├── pipeline_stable_diffusion_inpaint.py │ │ ├── pipeline_stable_diffusion_inpaint_legacy.py │ │ ├── pipeline_stable_diffusion_instruct_pix2pix.py │ │ ├── pipeline_stable_diffusion_k_diffusion.py │ │ ├── pipeline_stable_diffusion_latent_upscale.py │ │ ├── pipeline_stable_diffusion_model_editing.py │ │ ├── pipeline_stable_diffusion_panorama.py │ │ ├── pipeline_stable_diffusion_pix2pix_zero.py │ │ ├── pipeline_stable_diffusion_sag.py │ │ ├── pipeline_stable_diffusion_upscale.py │ │ ├── pipeline_stable_unclip.py │ │ ├── pipeline_stable_unclip_img2img.py │ │ ├── safety_checker.py │ │ ├── safety_checker_flax.py │ │ └── stable_unclip_image_normalizer.py │ ├── stable_diffusion_safe │ │ ├── __init__.py │ │ ├── pipeline_stable_diffusion_safe.py │ │ └── safety_checker.py │ ├── stochastic_karras_ve │ │ ├── __init__.py │ │ └── pipeline_stochastic_karras_ve.py │ ├── text_to_video_synthesis │ │ ├── __init__.py │ │ ├── pipeline_text_to_video_synth.py │ │ └── pipeline_text_to_video_zero.py │ ├── unclip │ │ ├── __init__.py │ │ ├── pipeline_unclip.py │ │ ├── pipeline_unclip_image_variation.py │ │ └── text_proj.py │ ├── unidiffuser │ │ ├── __init__.py │ │ ├── modeling_text_decoder.py │ │ ├── modeling_uvit.py │ │ └── pipeline_unidiffuser.py │ ├── versatile_diffusion │ │ ├── __init__.py │ │ ├── modeling_text_unet.py │ │ ├── pipeline_versatile_diffusion.py │ │ ├── pipeline_versatile_diffusion_dual_guided.py │ │ ├── pipeline_versatile_diffusion_image_variation.py │ │ └── pipeline_versatile_diffusion_text_to_image.py │ └── vq_diffusion │ │ ├── __init__.py │ │ └── pipeline_vq_diffusion.py │ ├── schedulers │ ├── README.md │ ├── __init__.py │ ├── scheduling_ddim.py │ ├── scheduling_ddim_flax.py │ ├── scheduling_ddim_inverse.py │ ├── scheduling_ddpm.py │ ├── scheduling_ddpm_flax.py │ ├── scheduling_deis_multistep.py │ ├── scheduling_dpmsolver_multistep.py │ ├── scheduling_dpmsolver_multistep_flax.py │ ├── scheduling_dpmsolver_multistep_inverse.py │ ├── scheduling_dpmsolver_sde.py │ ├── scheduling_dpmsolver_singlestep.py │ ├── scheduling_euler_ancestral_discrete.py │ ├── scheduling_euler_discrete.py │ ├── scheduling_heun_discrete.py │ ├── scheduling_ipndm.py │ ├── scheduling_k_dpm_2_ancestral_discrete.py │ ├── scheduling_k_dpm_2_discrete.py │ ├── scheduling_karras_ve.py │ ├── scheduling_karras_ve_flax.py │ ├── scheduling_lms_discrete.py │ ├── scheduling_lms_discrete_flax.py │ ├── scheduling_pndm.py │ ├── scheduling_pndm_flax.py │ ├── scheduling_repaint.py │ ├── scheduling_sde_ve.py │ ├── scheduling_sde_ve_flax.py │ ├── scheduling_sde_vp.py │ ├── scheduling_unclip.py │ ├── scheduling_unipc_multistep.py │ ├── scheduling_utils.py │ ├── scheduling_utils_flax.py │ └── scheduling_vq_diffusion.py │ ├── training_utils.py │ └── utils │ ├── __init__.py │ ├── accelerate_utils.py │ ├── constants.py │ ├── deprecation_utils.py │ ├── doc_utils.py │ ├── dummy_flax_and_transformers_objects.py │ ├── dummy_flax_objects.py │ ├── dummy_note_seq_objects.py │ ├── dummy_onnx_objects.py │ ├── dummy_pt_objects.py │ ├── dummy_torch_and_librosa_objects.py │ ├── dummy_torch_and_scipy_objects.py │ ├── dummy_torch_and_torchsde_objects.py │ ├── dummy_torch_and_transformers_and_k_diffusion_objects.py │ ├── dummy_torch_and_transformers_and_onnx_objects.py │ ├── dummy_torch_and_transformers_objects.py │ ├── dummy_transformers_and_torch_and_note_seq_objects.py │ ├── dynamic_modules_utils.py │ ├── hub_utils.py │ ├── import_utils.py │ ├── logging.py │ ├── model_card_template.md │ ├── outputs.py │ ├── pil_utils.py │ ├── testing_utils.py │ └── torch_utils.py ├── tests ├── __init__.py ├── conftest.py ├── fixtures │ ├── custom_pipeline │ │ ├── pipeline.py │ │ └── what_ever.py │ └── elise_format0.mid ├── models │ ├── __init__.py │ ├── test_activations.py │ ├── test_attention_processor.py │ ├── test_layers_utils.py │ ├── test_lora_layers.py │ ├── test_modeling_common.py │ ├── test_modeling_common_flax.py │ ├── test_models_unet_1d.py │ ├── test_models_unet_2d.py │ ├── test_models_unet_2d_condition.py │ ├── test_models_unet_2d_flax.py │ ├── test_models_unet_3d_condition.py │ ├── test_models_vae.py │ ├── test_models_vae_flax.py │ ├── test_models_vq.py │ ├── test_unet_2d_blocks.py │ └── test_unet_blocks_common.py ├── others │ ├── test_check_copies.py │ ├── test_check_dummies.py │ ├── test_config.py │ ├── test_ema.py │ ├── test_hub_utils.py │ ├── test_image_processor.py │ ├── test_outputs.py │ ├── test_training.py │ └── test_utils.py ├── pipelines │ ├── __init__.py │ ├── altdiffusion │ │ ├── __init__.py │ │ ├── test_alt_diffusion.py │ │ └── test_alt_diffusion_img2img.py │ ├── audio_diffusion │ │ ├── __init__.py │ │ └── test_audio_diffusion.py │ ├── audioldm │ │ ├── __init__.py │ │ └── test_audioldm.py │ ├── controlnet │ │ ├── __init__.py │ │ ├── test_controlnet.py │ │ ├── test_controlnet_img2img.py │ │ ├── test_controlnet_inpaint.py │ │ └── test_flax_controlnet.py │ ├── dance_diffusion │ │ ├── __init__.py │ │ └── test_dance_diffusion.py │ ├── ddim │ │ ├── __init__.py │ │ └── test_ddim.py │ ├── ddpm │ │ ├── __init__.py │ │ └── test_ddpm.py │ ├── deepfloyd_if │ │ ├── __init__.py │ │ ├── test_if.py │ │ ├── test_if_img2img.py │ │ ├── test_if_img2img_superresolution.py │ │ ├── test_if_inpainting.py │ │ ├── test_if_inpainting_superresolution.py │ │ └── test_if_superresolution.py │ ├── dit │ │ ├── __init__.py │ │ └── test_dit.py │ ├── kandinsky │ │ ├── __init__.py │ │ ├── test_kandinsky.py │ │ ├── test_kandinsky_img2img.py │ │ ├── test_kandinsky_inpaint.py │ │ └── test_kandinsky_prior.py │ ├── karras_ve │ │ ├── __init__.py │ │ └── test_karras_ve.py │ ├── latent_diffusion │ │ ├── __init__.py │ │ ├── test_latent_diffusion.py │ │ ├── test_latent_diffusion_superresolution.py │ │ └── test_latent_diffusion_uncond.py │ ├── paint_by_example │ │ ├── __init__.py │ │ └── test_paint_by_example.py │ ├── pipeline_params.py │ ├── pndm │ │ ├── __init__.py │ │ └── test_pndm.py │ ├── repaint │ │ ├── __init__.py │ │ └── test_repaint.py │ ├── score_sde_ve │ │ ├── __init__.py │ │ └── test_score_sde_ve.py │ ├── semantic_stable_diffusion │ │ ├── __init__.py │ │ └── test_semantic_diffusion.py │ ├── spectrogram_diffusion │ │ ├── __init__.py │ │ └── test_spectrogram_diffusion.py │ ├── stable_diffusion │ │ ├── __init__.py │ │ ├── test_cycle_diffusion.py │ │ ├── test_onnx_stable_diffusion.py │ │ ├── test_onnx_stable_diffusion_img2img.py │ │ ├── test_onnx_stable_diffusion_inpaint.py │ │ ├── test_onnx_stable_diffusion_inpaint_legacy.py │ │ ├── test_onnx_stable_diffusion_upscale.py │ │ ├── test_stable_diffusion.py │ │ ├── test_stable_diffusion_image_variation.py │ │ ├── test_stable_diffusion_img2img.py │ │ ├── test_stable_diffusion_inpaint.py │ │ ├── test_stable_diffusion_inpaint_legacy.py │ │ ├── test_stable_diffusion_instruction_pix2pix.py │ │ ├── test_stable_diffusion_k_diffusion.py │ │ ├── test_stable_diffusion_model_editing.py │ │ ├── test_stable_diffusion_panorama.py │ │ ├── test_stable_diffusion_pix2pix_zero.py │ │ └── test_stable_diffusion_sag.py │ ├── stable_diffusion_2 │ │ ├── __init__.py │ │ ├── test_stable_diffusion.py │ │ ├── test_stable_diffusion_attend_and_excite.py │ │ ├── test_stable_diffusion_depth.py │ │ ├── test_stable_diffusion_diffedit.py │ │ ├── test_stable_diffusion_flax.py │ │ ├── test_stable_diffusion_flax_inpaint.py │ │ ├── test_stable_diffusion_inpaint.py │ │ ├── test_stable_diffusion_latent_upscale.py │ │ ├── test_stable_diffusion_upscale.py │ │ └── test_stable_diffusion_v_pred.py │ ├── stable_diffusion_safe │ │ ├── __init__.py │ │ └── test_safe_diffusion.py │ ├── stable_unclip │ │ ├── __init__.py │ │ ├── test_stable_unclip.py │ │ └── test_stable_unclip_img2img.py │ ├── test_pipeline_utils.py │ ├── test_pipelines.py │ ├── test_pipelines_common.py │ ├── test_pipelines_flax.py │ ├── test_pipelines_onnx_common.py │ ├── text_to_video │ │ ├── __init__.py │ │ ├── test_text_to_video.py │ │ └── test_text_to_video_zero.py │ ├── unclip │ │ ├── __init__.py │ │ ├── test_unclip.py │ │ └── test_unclip_image_variation.py │ ├── unidiffuser │ │ ├── __init__.py │ │ └── test_unidiffuser.py │ ├── versatile_diffusion │ │ ├── __init__.py │ │ ├── test_versatile_diffusion_dual_guided.py │ │ ├── test_versatile_diffusion_image_variation.py │ │ ├── test_versatile_diffusion_mega.py │ │ └── test_versatile_diffusion_text_to_image.py │ └── vq_diffusion │ │ ├── __init__.py │ │ └── test_vq_diffusion.py └── schedulers │ ├── __init__.py │ ├── test_scheduler_ddim.py │ ├── test_scheduler_ddpm.py │ ├── test_scheduler_deis.py │ ├── test_scheduler_dpm_multi.py │ ├── test_scheduler_dpm_sde.py │ ├── test_scheduler_dpm_single.py │ ├── test_scheduler_euler.py │ ├── test_scheduler_euler_ancestral.py │ ├── test_scheduler_flax.py │ ├── test_scheduler_heun.py │ ├── test_scheduler_ipndm.py │ ├── test_scheduler_kdpm2_ancestral.py │ ├── test_scheduler_kdpm2_discrete.py │ ├── test_scheduler_lms.py │ ├── test_scheduler_pndm.py │ ├── test_scheduler_score_sde_ve.py │ ├── test_scheduler_unclip.py │ ├── test_scheduler_unipc.py │ ├── test_scheduler_vq_diffusion.py │ └── test_schedulers.py └── utils ├── check_config_docstrings.py ├── check_copies.py ├── check_doc_toc.py ├── check_dummies.py ├── check_inits.py ├── check_repo.py ├── check_table.py ├── custom_init_isort.py ├── get_modified_files.py ├── overwrite_expected_slice.py ├── print_env.py ├── release.py └── stale.py /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | contact_links: 2 | - name: Blank issue 3 | url: https://github.com/huggingface/diffusers/issues/new 4 | about: Other 5 | - name: Forum 6 | url: https://discuss.huggingface.co/ 7 | about: General usage questions and community discussions -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: "\U0001F680 Feature request" 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feedback.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: "💬 Feedback about API Design" 3 | about: Give feedback about the current API design 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **What API design would you like to have changed or added to the library? Why?** 11 | 12 | **What use case would this enable or better enable? Can you give us a code example?** 13 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/new-model-addition.yml: -------------------------------------------------------------------------------- 1 | name: "\U0001F31F New model/pipeline/scheduler addition" 2 | description: Submit a proposal/request to implement a new diffusion model / pipeline / scheduler 3 | labels: [ "New model/pipeline/scheduler" ] 4 | 5 | body: 6 | - type: textarea 7 | id: description-request 8 | validations: 9 | required: true 10 | attributes: 11 | label: Model/Pipeline/Scheduler description 12 | description: | 13 | Put any and all important information relative to the model/pipeline/scheduler 14 | 15 | - type: checkboxes 16 | id: information-tasks 17 | attributes: 18 | label: Open source status 19 | description: | 20 | Please note that if the model implementation isn't available or if the weights aren't open-source, we are less likely to implement it in `diffusers`. 21 | options: 22 | - label: "The model implementation is available" 23 | - label: "The model weights are available (Only relevant if addition is not a scheduler)." 24 | 25 | - type: textarea 26 | id: additional-info 27 | attributes: 28 | label: Provide useful links for the implementation 29 | description: | 30 | Please provide information regarding the implementation, the weights, and the authors. 31 | Please mention the authors by @gh-username if you're aware of their usernames. 32 | -------------------------------------------------------------------------------- /.github/workflows/build_docker_images.yml: -------------------------------------------------------------------------------- 1 | name: Build Docker images (nightly) 2 | 3 | on: 4 | workflow_dispatch: 5 | schedule: 6 | - cron: "0 0 * * *" # every day at midnight 7 | 8 | concurrency: 9 | group: docker-image-builds 10 | cancel-in-progress: false 11 | 12 | env: 13 | REGISTRY: diffusers 14 | 15 | jobs: 16 | build-docker-images: 17 | runs-on: ubuntu-latest 18 | 19 | permissions: 20 | contents: read 21 | packages: write 22 | 23 | strategy: 24 | fail-fast: false 25 | matrix: 26 | image-name: 27 | - diffusers-pytorch-cpu 28 | - diffusers-pytorch-cuda 29 | - diffusers-flax-cpu 30 | - diffusers-flax-tpu 31 | - diffusers-onnxruntime-cpu 32 | - diffusers-onnxruntime-cuda 33 | 34 | steps: 35 | - name: Checkout repository 36 | uses: actions/checkout@v3 37 | 38 | - name: Login to Docker Hub 39 | uses: docker/login-action@v2 40 | with: 41 | username: ${{ env.REGISTRY }} 42 | password: ${{ secrets.DOCKERHUB_TOKEN }} 43 | 44 | - name: Build and push 45 | uses: docker/build-push-action@v3 46 | with: 47 | no-cache: true 48 | context: ./docker/${{ matrix.image-name }} 49 | push: true 50 | tags: ${{ env.REGISTRY }}/${{ matrix.image-name }}:latest 51 | -------------------------------------------------------------------------------- /.github/workflows/build_documentation.yml: -------------------------------------------------------------------------------- 1 | name: Build documentation 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | - doc-builder* 8 | - v*-patch 9 | 10 | jobs: 11 | build: 12 | uses: huggingface/doc-builder/.github/workflows/build_main_documentation.yml@main 13 | with: 14 | commit_sha: ${{ github.sha }} 15 | package: diffusers 16 | notebook_folder: diffusers_doc 17 | languages: en ko 18 | secrets: 19 | token: ${{ secrets.HUGGINGFACE_PUSH }} 20 | hf_token: ${{ secrets.HF_DOC_PUSH }} 21 | -------------------------------------------------------------------------------- /.github/workflows/build_pr_documentation.yml: -------------------------------------------------------------------------------- 1 | name: Build PR Documentation 2 | 3 | on: 4 | pull_request: 5 | 6 | concurrency: 7 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 8 | cancel-in-progress: true 9 | 10 | jobs: 11 | build: 12 | uses: huggingface/doc-builder/.github/workflows/build_pr_documentation.yml@main 13 | with: 14 | commit_sha: ${{ github.event.pull_request.head.sha }} 15 | pr_number: ${{ github.event.number }} 16 | package: diffusers 17 | languages: en ko 18 | -------------------------------------------------------------------------------- /.github/workflows/delete_doc_comment.yml: -------------------------------------------------------------------------------- 1 | name: Delete doc comment 2 | 3 | on: 4 | workflow_run: 5 | workflows: ["Delete doc comment trigger"] 6 | types: 7 | - completed 8 | 9 | 10 | jobs: 11 | delete: 12 | uses: huggingface/doc-builder/.github/workflows/delete_doc_comment.yml@main 13 | secrets: 14 | comment_bot_token: ${{ secrets.COMMENT_BOT_TOKEN }} -------------------------------------------------------------------------------- /.github/workflows/delete_doc_comment_trigger.yml: -------------------------------------------------------------------------------- 1 | name: Delete doc comment trigger 2 | 3 | on: 4 | pull_request: 5 | types: [ closed ] 6 | 7 | 8 | jobs: 9 | delete: 10 | uses: huggingface/doc-builder/.github/workflows/delete_doc_comment_trigger.yml@main 11 | with: 12 | pr_number: ${{ github.event.number }} 13 | -------------------------------------------------------------------------------- /.github/workflows/pr_quality.yml: -------------------------------------------------------------------------------- 1 | name: Run code quality checks 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - main 7 | push: 8 | branches: 9 | - main 10 | 11 | concurrency: 12 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 13 | cancel-in-progress: true 14 | 15 | jobs: 16 | check_code_quality: 17 | runs-on: ubuntu-latest 18 | steps: 19 | - uses: actions/checkout@v3 20 | - name: Set up Python 21 | uses: actions/setup-python@v4 22 | with: 23 | python-version: "3.7" 24 | - name: Install dependencies 25 | run: | 26 | python -m pip install --upgrade pip 27 | pip install .[quality] 28 | - name: Check quality 29 | run: | 30 | black --check examples tests src utils scripts 31 | ruff examples tests src utils scripts 32 | doc-builder style src/diffusers docs/source --max_len 119 --check_only --path_to_docs docs/source 33 | 34 | check_repository_consistency: 35 | runs-on: ubuntu-latest 36 | steps: 37 | - uses: actions/checkout@v3 38 | - name: Set up Python 39 | uses: actions/setup-python@v4 40 | with: 41 | python-version: "3.7" 42 | - name: Install dependencies 43 | run: | 44 | python -m pip install --upgrade pip 45 | pip install .[quality] 46 | - name: Check quality 47 | run: | 48 | python utils/check_copies.py 49 | python utils/check_dummies.py 50 | make deps_table_check_updated 51 | -------------------------------------------------------------------------------- /.github/workflows/push_tests_mps.yml: -------------------------------------------------------------------------------- 1 | name: Fast mps tests on main 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | env: 9 | DIFFUSERS_IS_CI: yes 10 | HF_HOME: /mnt/cache 11 | OMP_NUM_THREADS: 8 12 | MKL_NUM_THREADS: 8 13 | PYTEST_TIMEOUT: 600 14 | RUN_SLOW: no 15 | 16 | jobs: 17 | run_fast_tests_apple_m1: 18 | name: Fast PyTorch MPS tests on MacOS 19 | runs-on: [ self-hosted, apple-m1 ] 20 | 21 | steps: 22 | - name: Checkout diffusers 23 | uses: actions/checkout@v3 24 | with: 25 | fetch-depth: 2 26 | 27 | - name: Clean checkout 28 | shell: arch -arch arm64 bash {0} 29 | run: | 30 | git clean -fxd 31 | 32 | - name: Setup miniconda 33 | uses: ./.github/actions/setup-miniconda 34 | with: 35 | python-version: 3.9 36 | 37 | - name: Install dependencies 38 | shell: arch -arch arm64 bash {0} 39 | run: | 40 | ${CONDA_RUN} python -m pip install --upgrade pip 41 | ${CONDA_RUN} python -m pip install -e .[quality,test] 42 | ${CONDA_RUN} python -m pip install torch torchvision torchaudio 43 | ${CONDA_RUN} python -m pip install accelerate --upgrade 44 | ${CONDA_RUN} python -m pip install transformers --upgrade 45 | 46 | - name: Environment 47 | shell: arch -arch arm64 bash {0} 48 | run: | 49 | ${CONDA_RUN} python utils/print_env.py 50 | 51 | - name: Run fast PyTorch tests on M1 (MPS) 52 | shell: arch -arch arm64 bash {0} 53 | env: 54 | HF_HOME: /System/Volumes/Data/mnt/cache 55 | HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }} 56 | run: | 57 | ${CONDA_RUN} python -m pytest -n 0 -s -v --make-reports=tests_torch_mps tests/ 58 | 59 | - name: Failure short reports 60 | if: ${{ failure() }} 61 | run: cat reports/tests_torch_mps_failures_short.txt 62 | 63 | - name: Test suite reports artifacts 64 | if: ${{ always() }} 65 | uses: actions/upload-artifact@v2 66 | with: 67 | name: pr_torch_mps_test_reports 68 | path: reports 69 | -------------------------------------------------------------------------------- /.github/workflows/stale.yml: -------------------------------------------------------------------------------- 1 | name: Stale Bot 2 | 3 | on: 4 | schedule: 5 | - cron: "0 15 * * *" 6 | 7 | jobs: 8 | close_stale_issues: 9 | name: Close Stale Issues 10 | if: github.repository == 'huggingface/diffusers' 11 | runs-on: ubuntu-latest 12 | env: 13 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 14 | steps: 15 | - uses: actions/checkout@v2 16 | 17 | - name: Setup Python 18 | uses: actions/setup-python@v1 19 | with: 20 | python-version: 3.7 21 | 22 | - name: Install requirements 23 | run: | 24 | pip install PyGithub 25 | - name: Close stale issues 26 | run: | 27 | python utils/stale.py 28 | -------------------------------------------------------------------------------- /.github/workflows/typos.yml: -------------------------------------------------------------------------------- 1 | name: Check typos 2 | 3 | on: 4 | workflow_dispatch: 5 | 6 | jobs: 7 | build: 8 | runs-on: ubuntu-latest 9 | 10 | steps: 11 | - uses: actions/checkout@v3 12 | 13 | - name: typos-action 14 | uses: crate-ci/typos@v1.12.4 15 | -------------------------------------------------------------------------------- /.github/workflows/upload_pr_documentation.yml: -------------------------------------------------------------------------------- 1 | name: Upload PR Documentation 2 | 3 | on: 4 | workflow_run: 5 | workflows: ["Build PR Documentation"] 6 | types: 7 | - completed 8 | 9 | jobs: 10 | build: 11 | uses: huggingface/doc-builder/.github/workflows/upload_pr_documentation.yml@main 12 | with: 13 | package_name: diffusers 14 | secrets: 15 | hf_token: ${{ secrets.HF_DOC_PUSH }} 16 | comment_bot_token: ${{ secrets.COMMENT_BOT_TOKEN }} -------------------------------------------------------------------------------- /CITATION.cff: -------------------------------------------------------------------------------- 1 | cff-version: 1.2.0 2 | title: 'Diffusers: State-of-the-art diffusion models' 3 | message: >- 4 | If you use this software, please cite it using the 5 | metadata from this file. 6 | type: software 7 | authors: 8 | - given-names: Patrick 9 | family-names: von Platen 10 | - given-names: Suraj 11 | family-names: Patil 12 | - given-names: Anton 13 | family-names: Lozhkov 14 | - given-names: Pedro 15 | family-names: Cuenca 16 | - given-names: Nathan 17 | family-names: Lambert 18 | - given-names: Kashif 19 | family-names: Rasul 20 | - given-names: Mishig 21 | family-names: Davaadorj 22 | - given-names: Thomas 23 | family-names: Wolf 24 | repository-code: 'https://github.com/huggingface/diffusers' 25 | abstract: >- 26 | Diffusers provides pretrained diffusion models across 27 | multiple modalities, such as vision and audio, and serves 28 | as a modular toolbox for inference and training of 29 | diffusion models. 30 | keywords: 31 | - deep-learning 32 | - pytorch 33 | - image-generation 34 | - diffusion 35 | - text2image 36 | - image2image 37 | - score-based-generative-modeling 38 | - stable-diffusion 39 | license: Apache-2.0 40 | version: 0.12.1 41 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include LICENSE 2 | include src/diffusers/utils/model_card_template.md 3 | -------------------------------------------------------------------------------- /_typos.toml: -------------------------------------------------------------------------------- 1 | # Files for typos 2 | # Instruction: https://github.com/marketplace/actions/typos-action#getting-started 3 | 4 | [default.extend-identifiers] 5 | 6 | [default.extend-words] 7 | NIN="NIN" # NIN is used in scripts/convert_ncsnpp_original_checkpoint_to_diffusers.py 8 | nd="np" # nd may be np (numpy) 9 | parms="parms" # parms is used in scripts/convert_original_stable_diffusion_to_diffusers.py 10 | 11 | 12 | [files] 13 | extend-exclude = ["_typos.toml"] 14 | -------------------------------------------------------------------------------- /docker/diffusers-flax-cpu/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:20.04 2 | LABEL maintainer="Hugging Face" 3 | LABEL repository="diffusers" 4 | 5 | ENV DEBIAN_FRONTEND=noninteractive 6 | 7 | RUN apt update && \ 8 | apt install -y bash \ 9 | build-essential \ 10 | git \ 11 | git-lfs \ 12 | curl \ 13 | ca-certificates \ 14 | libsndfile1-dev \ 15 | python3.8 \ 16 | python3-pip \ 17 | python3.8-venv && \ 18 | rm -rf /var/lib/apt/lists 19 | 20 | # make sure to use venv 21 | RUN python3 -m venv /opt/venv 22 | ENV PATH="/opt/venv/bin:$PATH" 23 | 24 | # pre-install the heavy dependencies (these can later be overridden by the deps from setup.py) 25 | # follow the instructions here: https://cloud.google.com/tpu/docs/run-in-container#train_a_jax_model_in_a_docker_container 26 | RUN python3 -m pip install --no-cache-dir --upgrade pip && \ 27 | python3 -m pip install --upgrade --no-cache-dir \ 28 | clu \ 29 | "jax[cpu]>=0.2.16,!=0.3.2" \ 30 | "flax>=0.4.1" \ 31 | "jaxlib>=0.1.65" && \ 32 | python3 -m pip install --no-cache-dir \ 33 | accelerate \ 34 | datasets \ 35 | hf-doc-builder \ 36 | huggingface-hub \ 37 | Jinja2 \ 38 | librosa \ 39 | numpy \ 40 | scipy \ 41 | tensorboard \ 42 | transformers 43 | 44 | CMD ["/bin/bash"] -------------------------------------------------------------------------------- /docker/diffusers-flax-tpu/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:20.04 2 | LABEL maintainer="Hugging Face" 3 | LABEL repository="diffusers" 4 | 5 | ENV DEBIAN_FRONTEND=noninteractive 6 | 7 | RUN apt update && \ 8 | apt install -y bash \ 9 | build-essential \ 10 | git \ 11 | git-lfs \ 12 | curl \ 13 | ca-certificates \ 14 | libsndfile1-dev \ 15 | python3.8 \ 16 | python3-pip \ 17 | python3.8-venv && \ 18 | rm -rf /var/lib/apt/lists 19 | 20 | # make sure to use venv 21 | RUN python3 -m venv /opt/venv 22 | ENV PATH="/opt/venv/bin:$PATH" 23 | 24 | # pre-install the heavy dependencies (these can later be overridden by the deps from setup.py) 25 | # follow the instructions here: https://cloud.google.com/tpu/docs/run-in-container#train_a_jax_model_in_a_docker_container 26 | RUN python3 -m pip install --no-cache-dir --upgrade pip && \ 27 | python3 -m pip install --no-cache-dir \ 28 | "jax[tpu]>=0.2.16,!=0.3.2" \ 29 | -f https://storage.googleapis.com/jax-releases/libtpu_releases.html && \ 30 | python3 -m pip install --upgrade --no-cache-dir \ 31 | clu \ 32 | "flax>=0.4.1" \ 33 | "jaxlib>=0.1.65" && \ 34 | python3 -m pip install --no-cache-dir \ 35 | accelerate \ 36 | datasets \ 37 | hf-doc-builder \ 38 | huggingface-hub \ 39 | Jinja2 \ 40 | librosa \ 41 | numpy \ 42 | scipy \ 43 | tensorboard \ 44 | transformers 45 | 46 | CMD ["/bin/bash"] -------------------------------------------------------------------------------- /docker/diffusers-onnxruntime-cpu/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:20.04 2 | LABEL maintainer="Hugging Face" 3 | LABEL repository="diffusers" 4 | 5 | ENV DEBIAN_FRONTEND=noninteractive 6 | 7 | RUN apt update && \ 8 | apt install -y bash \ 9 | build-essential \ 10 | git \ 11 | git-lfs \ 12 | curl \ 13 | ca-certificates \ 14 | libsndfile1-dev \ 15 | python3.8 \ 16 | python3-pip \ 17 | python3.8-venv && \ 18 | rm -rf /var/lib/apt/lists 19 | 20 | # make sure to use venv 21 | RUN python3 -m venv /opt/venv 22 | ENV PATH="/opt/venv/bin:$PATH" 23 | 24 | # pre-install the heavy dependencies (these can later be overridden by the deps from setup.py) 25 | RUN python3 -m pip install --no-cache-dir --upgrade pip && \ 26 | python3 -m pip install --no-cache-dir \ 27 | torch \ 28 | torchvision \ 29 | torchaudio \ 30 | onnxruntime \ 31 | --extra-index-url https://download.pytorch.org/whl/cpu && \ 32 | python3 -m pip install --no-cache-dir \ 33 | accelerate \ 34 | datasets \ 35 | hf-doc-builder \ 36 | huggingface-hub \ 37 | Jinja2 \ 38 | librosa \ 39 | numpy \ 40 | scipy \ 41 | tensorboard \ 42 | transformers 43 | 44 | CMD ["/bin/bash"] -------------------------------------------------------------------------------- /docker/diffusers-onnxruntime-cuda/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nvidia/cuda:11.6.2-cudnn8-devel-ubuntu20.04 2 | LABEL maintainer="Hugging Face" 3 | LABEL repository="diffusers" 4 | 5 | ENV DEBIAN_FRONTEND=noninteractive 6 | 7 | RUN apt update && \ 8 | apt install -y bash \ 9 | build-essential \ 10 | git \ 11 | git-lfs \ 12 | curl \ 13 | ca-certificates \ 14 | libsndfile1-dev \ 15 | python3.8 \ 16 | python3-pip \ 17 | python3.8-venv && \ 18 | rm -rf /var/lib/apt/lists 19 | 20 | # make sure to use venv 21 | RUN python3 -m venv /opt/venv 22 | ENV PATH="/opt/venv/bin:$PATH" 23 | 24 | # pre-install the heavy dependencies (these can later be overridden by the deps from setup.py) 25 | RUN python3 -m pip install --no-cache-dir --upgrade pip && \ 26 | python3 -m pip install --no-cache-dir \ 27 | torch \ 28 | torchvision \ 29 | torchaudio \ 30 | "onnxruntime-gpu>=1.13.1" \ 31 | --extra-index-url https://download.pytorch.org/whl/cu117 && \ 32 | python3 -m pip install --no-cache-dir \ 33 | accelerate \ 34 | datasets \ 35 | hf-doc-builder \ 36 | huggingface-hub \ 37 | Jinja2 \ 38 | librosa \ 39 | numpy \ 40 | scipy \ 41 | tensorboard \ 42 | transformers 43 | 44 | CMD ["/bin/bash"] -------------------------------------------------------------------------------- /docker/diffusers-pytorch-cpu/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:20.04 2 | LABEL maintainer="Hugging Face" 3 | LABEL repository="diffusers" 4 | 5 | ENV DEBIAN_FRONTEND=noninteractive 6 | 7 | RUN apt update && \ 8 | apt install -y bash \ 9 | build-essential \ 10 | git \ 11 | git-lfs \ 12 | curl \ 13 | ca-certificates \ 14 | libsndfile1-dev \ 15 | python3.8 \ 16 | python3-pip \ 17 | python3.8-venv && \ 18 | rm -rf /var/lib/apt/lists 19 | 20 | # make sure to use venv 21 | RUN python3 -m venv /opt/venv 22 | ENV PATH="/opt/venv/bin:$PATH" 23 | 24 | # pre-install the heavy dependencies (these can later be overridden by the deps from setup.py) 25 | RUN python3 -m pip install --no-cache-dir --upgrade pip && \ 26 | python3 -m pip install --no-cache-dir \ 27 | torch \ 28 | torchvision \ 29 | torchaudio \ 30 | --extra-index-url https://download.pytorch.org/whl/cpu && \ 31 | python3 -m pip install --no-cache-dir \ 32 | accelerate \ 33 | datasets \ 34 | hf-doc-builder \ 35 | huggingface-hub \ 36 | Jinja2 \ 37 | librosa \ 38 | numpy \ 39 | scipy \ 40 | tensorboard \ 41 | transformers 42 | 43 | CMD ["/bin/bash"] -------------------------------------------------------------------------------- /docker/diffusers-pytorch-cuda/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nvidia/cuda:11.7.1-cudnn8-runtime-ubuntu20.04 2 | LABEL maintainer="Hugging Face" 3 | LABEL repository="diffusers" 4 | 5 | ENV DEBIAN_FRONTEND=noninteractive 6 | 7 | RUN apt update && \ 8 | apt install -y bash \ 9 | build-essential \ 10 | git \ 11 | git-lfs \ 12 | curl \ 13 | ca-certificates \ 14 | libsndfile1-dev \ 15 | python3.8 \ 16 | python3-pip \ 17 | python3.8-venv && \ 18 | rm -rf /var/lib/apt/lists 19 | 20 | # make sure to use venv 21 | RUN python3 -m venv /opt/venv 22 | ENV PATH="/opt/venv/bin:$PATH" 23 | 24 | # pre-install the heavy dependencies (these can later be overridden by the deps from setup.py) 25 | RUN python3 -m pip install --no-cache-dir --upgrade pip && \ 26 | python3 -m pip install --no-cache-dir \ 27 | torch \ 28 | torchvision \ 29 | torchaudio && \ 30 | python3 -m pip install --no-cache-dir \ 31 | accelerate \ 32 | datasets \ 33 | hf-doc-builder \ 34 | huggingface-hub \ 35 | Jinja2 \ 36 | librosa \ 37 | numpy \ 38 | scipy \ 39 | tensorboard \ 40 | transformers \ 41 | omegaconf \ 42 | pytorch-lightning \ 43 | xformers 44 | 45 | CMD ["/bin/bash"] 46 | -------------------------------------------------------------------------------- /docs/source/_config.py: -------------------------------------------------------------------------------- 1 | # docstyle-ignore 2 | INSTALL_CONTENT = """ 3 | # Diffusers installation 4 | ! pip install diffusers transformers datasets accelerate 5 | # To install from source instead of the last release, comment the command above and uncomment the following one. 6 | # ! pip install git+https://github.com/huggingface/diffusers.git 7 | """ 8 | 9 | notebook_first_cells = [{"type": "code", "content": INSTALL_CONTENT}] 10 | -------------------------------------------------------------------------------- /docs/source/en/api/attnprocessor.mdx: -------------------------------------------------------------------------------- 1 | # Attention Processor 2 | 3 | An attention processor is a class for applying different types of attention mechanisms. 4 | 5 | ## AttnProcessor 6 | [[autodoc]] models.attention_processor.AttnProcessor 7 | 8 | ## AttnProcessor2_0 9 | [[autodoc]] models.attention_processor.AttnProcessor2_0 10 | 11 | ## LoRAAttnProcessor 12 | [[autodoc]] models.attention_processor.LoRAAttnProcessor 13 | 14 | ## LoRAAttnProcessor2_0 15 | [[autodoc]] models.attention_processor.LoRAAttnProcessor2_0 16 | 17 | ## CustomDiffusionAttnProcessor 18 | [[autodoc]] models.attention_processor.CustomDiffusionAttnProcessor 19 | 20 | ## AttnAddedKVProcessor 21 | [[autodoc]] models.attention_processor.AttnAddedKVProcessor 22 | 23 | ## AttnAddedKVProcessor2_0 24 | [[autodoc]] models.attention_processor.AttnAddedKVProcessor2_0 25 | 26 | ## LoRAAttnAddedKVProcessor 27 | [[autodoc]] models.attention_processor.LoRAAttnAddedKVProcessor 28 | 29 | ## XFormersAttnProcessor 30 | [[autodoc]] models.attention_processor.XFormersAttnProcessor 31 | 32 | ## LoRAXFormersAttnProcessor 33 | [[autodoc]] models.attention_processor.LoRAXFormersAttnProcessor 34 | 35 | ## CustomDiffusionXFormersAttnProcessor 36 | [[autodoc]] models.attention_processor.CustomDiffusionXFormersAttnProcessor 37 | 38 | ## SlicedAttnProcessor 39 | [[autodoc]] models.attention_processor.SlicedAttnProcessor 40 | 41 | ## SlicedAttnAddedKVProcessor 42 | [[autodoc]] models.attention_processor.SlicedAttnAddedKVProcessor -------------------------------------------------------------------------------- /docs/source/en/api/configuration.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # Configuration 14 | 15 | Schedulers from [`~schedulers.scheduling_utils.SchedulerMixin`] and models from [`ModelMixin`] inherit from [`ConfigMixin`] which conveniently takes care of storing all the parameters that are 16 | passed to their respective `__init__` methods in a JSON-configuration file. 17 | 18 | ## ConfigMixin 19 | 20 | [[autodoc]] ConfigMixin 21 | - load_config 22 | - from_config 23 | - save_config 24 | - to_json_file 25 | - to_json_string 26 | -------------------------------------------------------------------------------- /docs/source/en/api/diffusion_pipeline.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # Pipelines 14 | 15 | The [`DiffusionPipeline`] is the easiest way to load any pretrained diffusion pipeline from the [Hub](https://huggingface.co/models?library=diffusers) and use it for inference. 16 | 17 | 18 | 19 | You shouldn't use the [`DiffusionPipeline`] class for training or finetuning a diffusion model. Individual 20 | components (for example, [`UNetModel`] and [`UNetConditionModel`]) of diffusion pipelines are usually trained individually, so we suggest directly working with instead. 21 | 22 | 23 | 24 | The pipeline type (for example [`StableDiffusionPipeline`]) of any diffusion pipeline loaded with [`~DiffusionPipeline.from_pretrained`] is automatically 25 | detected and pipeline components are loaded and passed to the `__init__` function of the pipeline. 26 | 27 | Any pipeline object can be saved locally with [`~DiffusionPipeline.save_pretrained`]. 28 | 29 | ## DiffusionPipeline 30 | 31 | [[autodoc]] DiffusionPipeline 32 | - all 33 | - __call__ 34 | - device 35 | - to 36 | - components 37 | -------------------------------------------------------------------------------- /docs/source/en/api/experimental/rl.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # TODO 14 | 15 | Coming soon! -------------------------------------------------------------------------------- /docs/source/en/api/image_processor.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # Image Processor for VAE 14 | 15 | Image processor provides a unified API for Stable Diffusion pipelines to prepare their image inputs for VAE encoding, as well as post-processing their outputs once decoded. This includes transformations such as resizing, normalization, and conversion between PIL Image, PyTorch, and Numpy arrays. 16 | 17 | All pipelines with VAE image processor will accept image inputs in the format of PIL Image, PyTorch tensor, or Numpy array, and will able to return outputs in the format of PIL Image, Pytorch tensor, and Numpy array based on the `output_type` argument from the user. Additionally, the User can pass encoded image latents directly to the pipeline, or ask the pipeline to return latents as output with `output_type = 'pt'` argument. This allows you to take the generated latents from one pipeline and pass it to another pipeline as input, without ever having to leave the latent space. It also makes it much easier to use multiple pipelines together, by passing PyTorch tensors directly between different pipelines. 18 | 19 | 20 | ## VaeImageProcessor 21 | 22 | [[autodoc]] image_processor.VaeImageProcessor -------------------------------------------------------------------------------- /docs/source/en/api/loaders.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # Loaders 14 | 15 | There are many ways to train adapter neural networks for diffusion models, such as 16 | - [Textual Inversion](./training/text_inversion.mdx) 17 | - [LoRA](https://github.com/cloneofsimo/lora) 18 | - [Hypernetworks](https://arxiv.org/abs/1609.09106) 19 | 20 | Such adapter neural networks often only consist of a fraction of the number of weights compared 21 | to the pretrained model and as such are very portable. The Diffusers library offers an easy-to-use 22 | API to load such adapter neural networks via the [`loaders.py` module](https://github.com/huggingface/diffusers/blob/main/src/diffusers/loaders.py). 23 | 24 | **Note**: This module is still highly experimental and prone to future changes. 25 | 26 | ## LoaderMixins 27 | 28 | ### UNet2DConditionLoadersMixin 29 | 30 | [[autodoc]] loaders.UNet2DConditionLoadersMixin 31 | 32 | ### TextualInversionLoaderMixin 33 | 34 | [[autodoc]] loaders.TextualInversionLoaderMixin 35 | 36 | ### LoraLoaderMixin 37 | 38 | [[autodoc]] loaders.LoraLoaderMixin 39 | 40 | ### FromCkptMixin 41 | 42 | [[autodoc]] loaders.FromCkptMixin 43 | -------------------------------------------------------------------------------- /docs/source/en/api/outputs.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # BaseOutputs 14 | 15 | All models have outputs that are subclasses of [`~utils.BaseOutput`]. Those are 16 | data structures containing all the information returned by the model, but they can also be used as tuples or 17 | dictionaries. 18 | 19 | For example: 20 | 21 | ```python 22 | from diffusers import DDIMPipeline 23 | 24 | pipeline = DDIMPipeline.from_pretrained("google/ddpm-cifar10-32") 25 | outputs = pipeline() 26 | ``` 27 | 28 | The `outputs` object is a [`~pipelines.ImagePipelineOutput`] which means it has an image attribute. 29 | 30 | You can access each attribute as you normally would or with a keyword lookup, and if that attribute is not returned by the model, you will get `None`: 31 | 32 | ```python 33 | outputs.images 34 | outputs["images"] 35 | ``` 36 | 37 | When considering the `outputs` object as a tuple, it only considers the attributes that don't have `None` values. 38 | For instance, retrieving an image by indexing into it returns the tuple `(outputs.images)`: 39 | 40 | ```python 41 | outputs[:1] 42 | ``` 43 | 44 | 45 | 46 | To check a specific pipeline or model output, refer to its corresponding API documentation. 47 | 48 | 49 | 50 | ## BaseOutput 51 | 52 | [[autodoc]] utils.BaseOutput 53 | - to_tuple 54 | 55 | ## ImagePipelineOutput 56 | 57 | [[autodoc]] pipelines.ImagePipelineOutput 58 | 59 | ## FlaxImagePipelineOutput 60 | 61 | [[autodoc]] pipelines.pipeline_flax_utils.FlaxImagePipelineOutput 62 | 63 | ## AudioPipelineOutput 64 | 65 | [[autodoc]] pipelines.AudioPipelineOutput 66 | 67 | ## ImageTextPipelineOutput 68 | 69 | [[autodoc]] ImageTextPipelineOutput -------------------------------------------------------------------------------- /docs/source/en/api/pipelines/dance_diffusion.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # Dance Diffusion 14 | 15 | ## Overview 16 | 17 | [Dance Diffusion](https://github.com/Harmonai-org/sample-generator) by Zach Evans. 18 | 19 | Dance Diffusion is the first in a suite of generative audio tools for producers and musicians to be released by Harmonai. 20 | For more info or to get involved in the development of these tools, please visit https://harmonai.org and fill out the form on the front page. 21 | 22 | The original codebase of this implementation can be found [here](https://github.com/Harmonai-org/sample-generator). 23 | 24 | ## Available Pipelines: 25 | 26 | | Pipeline | Tasks | Colab 27 | |---|---|:---:| 28 | | [pipeline_dance_diffusion.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py) | *Unconditional Audio Generation* | - | 29 | 30 | 31 | ## DanceDiffusionPipeline 32 | [[autodoc]] DanceDiffusionPipeline 33 | - all 34 | - __call__ 35 | -------------------------------------------------------------------------------- /docs/source/en/api/pipelines/ddim.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # DDIM 14 | 15 | ## Overview 16 | 17 | [Denoising Diffusion Implicit Models](https://arxiv.org/abs/2010.02502) (DDIM) by Jiaming Song, Chenlin Meng and Stefano Ermon. 18 | 19 | The abstract of the paper is the following: 20 | 21 | Denoising diffusion probabilistic models (DDPMs) have achieved high quality image generation without adversarial training, yet they require simulating a Markov chain for many steps to produce a sample. To accelerate sampling, we present denoising diffusion implicit models (DDIMs), a more efficient class of iterative implicit probabilistic models with the same training procedure as DDPMs. In DDPMs, the generative process is defined as the reverse of a Markovian diffusion process. We construct a class of non-Markovian diffusion processes that lead to the same training objective, but whose reverse process can be much faster to sample from. We empirically demonstrate that DDIMs can produce high quality samples 10× to 50× faster in terms of wall-clock time compared to DDPMs, allow us to trade off computation for sample quality, and can perform semantically meaningful image interpolation directly in the latent space. 22 | 23 | The original codebase of this paper can be found here: [ermongroup/ddim](https://github.com/ermongroup/ddim). 24 | For questions, feel free to contact the author on [tsong.me](https://tsong.me/). 25 | 26 | ## Available Pipelines: 27 | 28 | | Pipeline | Tasks | Colab 29 | |---|---|:---:| 30 | | [pipeline_ddim.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/ddim/pipeline_ddim.py) | *Unconditional Image Generation* | - | 31 | 32 | 33 | ## DDIMPipeline 34 | [[autodoc]] DDIMPipeline 35 | - all 36 | - __call__ 37 | -------------------------------------------------------------------------------- /docs/source/en/api/pipelines/ddpm.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # DDPM 14 | 15 | ## Overview 16 | 17 | [Denoising Diffusion Probabilistic Models](https://arxiv.org/abs/2006.11239) 18 | (DDPM) by Jonathan Ho, Ajay Jain and Pieter Abbeel proposes the diffusion based model of the same name, but in the context of the 🤗 Diffusers library, DDPM refers to the discrete denoising scheduler from the paper as well as the pipeline. 19 | 20 | The abstract of the paper is the following: 21 | 22 | We present high quality image synthesis results using diffusion probabilistic models, a class of latent variable models inspired by considerations from nonequilibrium thermodynamics. Our best results are obtained by training on a weighted variational bound designed according to a novel connection between diffusion probabilistic models and denoising score matching with Langevin dynamics, and our models naturally admit a progressive lossy decompression scheme that can be interpreted as a generalization of autoregressive decoding. On the unconditional CIFAR10 dataset, we obtain an Inception score of 9.46 and a state-of-the-art FID score of 3.17. On 256x256 LSUN, we obtain sample quality similar to ProgressiveGAN. 23 | 24 | The original codebase of this paper can be found [here](https://github.com/hojonathanho/diffusion). 25 | 26 | 27 | ## Available Pipelines: 28 | 29 | | Pipeline | Tasks | Colab 30 | |---|---|:---:| 31 | | [pipeline_ddpm.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/ddpm/pipeline_ddpm.py) | *Unconditional Image Generation* | - | 32 | 33 | 34 | # DDPMPipeline 35 | [[autodoc]] DDPMPipeline 36 | - all 37 | - __call__ 38 | -------------------------------------------------------------------------------- /docs/source/en/api/pipelines/stable_diffusion/depth2img.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # Depth-to-Image Generation 14 | 15 | ## StableDiffusionDepth2ImgPipeline 16 | 17 | The depth-guided stable diffusion model was created by the researchers and engineers from [CompVis](https://github.com/CompVis), [Stability AI](https://stability.ai/), and [LAION](https://laion.ai/), as part of Stable Diffusion 2.0. It uses [MiDas](https://github.com/isl-org/MiDaS) to infer depth based on an image. 18 | 19 | [`StableDiffusionDepth2ImgPipeline`] lets you pass a text prompt and an initial image to condition the generation of new images as well as a `depth_map` to preserve the images’ structure. 20 | 21 | The original codebase can be found here: 22 | - *Stable Diffusion v2*: [Stability-AI/stablediffusion](https://github.com/Stability-AI/stablediffusion#depth-conditional-stable-diffusion) 23 | 24 | Available Checkpoints are: 25 | - *stable-diffusion-2-depth*: [stabilityai/stable-diffusion-2-depth](https://huggingface.co/stabilityai/stable-diffusion-2-depth) 26 | 27 | [[autodoc]] StableDiffusionDepth2ImgPipeline 28 | - all 29 | - __call__ 30 | - enable_attention_slicing 31 | - disable_attention_slicing 32 | - enable_xformers_memory_efficient_attention 33 | - disable_xformers_memory_efficient_attention 34 | - load_textual_inversion 35 | - load_lora_weights 36 | - save_lora_weights 37 | -------------------------------------------------------------------------------- /docs/source/en/api/pipelines/stable_diffusion/image_variation.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # Image Variation 14 | 15 | ## StableDiffusionImageVariationPipeline 16 | 17 | [`StableDiffusionImageVariationPipeline`] lets you generate variations from an input image using Stable Diffusion. It uses a fine-tuned version of Stable Diffusion model, trained by [Justin Pinkney](https://www.justinpinkney.com/) (@Buntworthy) at [Lambda](https://lambdalabs.com/). 18 | 19 | The original codebase can be found here: 20 | [Stable Diffusion Image Variations](https://github.com/LambdaLabsML/lambda-diffusers#stable-diffusion-image-variations) 21 | 22 | Available Checkpoints are: 23 | - *sd-image-variations-diffusers*: [lambdalabs/sd-image-variations-diffusers](https://huggingface.co/lambdalabs/sd-image-variations-diffusers) 24 | 25 | [[autodoc]] StableDiffusionImageVariationPipeline 26 | - all 27 | - __call__ 28 | - enable_attention_slicing 29 | - disable_attention_slicing 30 | - enable_xformers_memory_efficient_attention 31 | - disable_xformers_memory_efficient_attention 32 | -------------------------------------------------------------------------------- /docs/source/en/api/pipelines/stable_diffusion/img2img.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # Image-to-Image Generation 14 | 15 | ## StableDiffusionImg2ImgPipeline 16 | 17 | The Stable Diffusion model was created by the researchers and engineers from [CompVis](https://github.com/CompVis), [Stability AI](https://stability.ai/), [runway](https://github.com/runwayml), and [LAION](https://laion.ai/). The [`StableDiffusionImg2ImgPipeline`] lets you pass a text prompt and an initial image to condition the generation of new images using Stable Diffusion. 18 | 19 | The original codebase can be found here: [CampVis/stable-diffusion](https://github.com/CompVis/stable-diffusion/blob/main/scripts/img2img.py) 20 | 21 | [`StableDiffusionImg2ImgPipeline`] is compatible with all Stable Diffusion checkpoints for [Text-to-Image](./text2img) 22 | 23 | The pipeline uses the diffusion-denoising mechanism proposed by SDEdit ([SDEdit: Guided Image Synthesis and Editing with Stochastic Differential Equations](https://arxiv.org/abs/2108.01073) 24 | proposed by Chenlin Meng, Yutong He, Yang Song, Jiaming Song, Jiajun Wu, Jun-Yan Zhu, Stefano Ermon). 25 | 26 | [[autodoc]] StableDiffusionImg2ImgPipeline 27 | - all 28 | - __call__ 29 | - enable_attention_slicing 30 | - disable_attention_slicing 31 | - enable_xformers_memory_efficient_attention 32 | - disable_xformers_memory_efficient_attention 33 | - load_textual_inversion 34 | - from_ckpt 35 | - load_lora_weights 36 | - save_lora_weights 37 | 38 | [[autodoc]] FlaxStableDiffusionImg2ImgPipeline 39 | - all 40 | - __call__ 41 | -------------------------------------------------------------------------------- /docs/source/en/api/pipelines/stable_diffusion/inpaint.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # Text-Guided Image Inpainting 14 | 15 | ## StableDiffusionInpaintPipeline 16 | 17 | The Stable Diffusion model was created by the researchers and engineers from [CompVis](https://github.com/CompVis), [Stability AI](https://stability.ai/), [runway](https://github.com/runwayml), and [LAION](https://laion.ai/). The [`StableDiffusionInpaintPipeline`] lets you edit specific parts of an image by providing a mask and a text prompt using Stable Diffusion. 18 | 19 | The original codebase can be found here: 20 | - *Stable Diffusion V1*: [CampVis/stable-diffusion](https://github.com/runwayml/stable-diffusion#inpainting-with-stable-diffusion) 21 | - *Stable Diffusion V2*: [Stability-AI/stablediffusion](https://github.com/Stability-AI/stablediffusion#image-inpainting-with-stable-diffusion) 22 | 23 | Available checkpoints are: 24 | - *stable-diffusion-inpainting (512x512 resolution)*: [runwayml/stable-diffusion-inpainting](https://huggingface.co/runwayml/stable-diffusion-inpainting) 25 | - *stable-diffusion-2-inpainting (512x512 resolution)*: [stabilityai/stable-diffusion-2-inpainting](https://huggingface.co/stabilityai/stable-diffusion-2-inpainting) 26 | 27 | [[autodoc]] StableDiffusionInpaintPipeline 28 | - all 29 | - __call__ 30 | - enable_attention_slicing 31 | - disable_attention_slicing 32 | - enable_xformers_memory_efficient_attention 33 | - disable_xformers_memory_efficient_attention 34 | - load_textual_inversion 35 | - load_lora_weights 36 | - save_lora_weights 37 | 38 | [[autodoc]] FlaxStableDiffusionInpaintPipeline 39 | - all 40 | - __call__ 41 | -------------------------------------------------------------------------------- /docs/source/en/api/pipelines/stable_diffusion/latent_upscale.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # Stable Diffusion Latent Upscaler 14 | 15 | ## StableDiffusionLatentUpscalePipeline 16 | 17 | The Stable Diffusion Latent Upscaler model was created by [Katherine Crowson](https://github.com/crowsonkb/k-diffusion) in collaboration with [Stability AI](https://stability.ai/). It can be used on top of any [`StableDiffusionUpscalePipeline`] checkpoint to enhance its output image resolution by a factor of 2. 18 | 19 | A notebook that demonstrates the original implementation can be found here: 20 | - [Stable Diffusion Upscaler Demo](https://colab.research.google.com/drive/1o1qYJcFeywzCIdkfKJy7cTpgZTCM2EI4) 21 | 22 | Available Checkpoints are: 23 | - *stabilityai/latent-upscaler*: [stabilityai/sd-x2-latent-upscaler](https://huggingface.co/stabilityai/sd-x2-latent-upscaler) 24 | 25 | 26 | [[autodoc]] StableDiffusionLatentUpscalePipeline 27 | - all 28 | - __call__ 29 | - enable_sequential_cpu_offload 30 | - enable_attention_slicing 31 | - disable_attention_slicing 32 | - enable_xformers_memory_efficient_attention 33 | - disable_xformers_memory_efficient_attention -------------------------------------------------------------------------------- /docs/source/en/api/pipelines/stable_diffusion/upscale.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # Super-Resolution 14 | 15 | ## StableDiffusionUpscalePipeline 16 | 17 | The upscaler diffusion model was created by the researchers and engineers from [CompVis](https://github.com/CompVis), [Stability AI](https://stability.ai/), and [LAION](https://laion.ai/), as part of Stable Diffusion 2.0. [`StableDiffusionUpscalePipeline`] can be used to enhance the resolution of input images by a factor of 4. 18 | 19 | The original codebase can be found here: 20 | - *Stable Diffusion v2*: [Stability-AI/stablediffusion](https://github.com/Stability-AI/stablediffusion#image-upscaling-with-stable-diffusion) 21 | 22 | Available Checkpoints are: 23 | - *stabilityai/stable-diffusion-x4-upscaler (x4 resolution resolution)*: [stable-diffusion-x4-upscaler](https://huggingface.co/stabilityai/stable-diffusion-x4-upscaler) 24 | 25 | 26 | [[autodoc]] StableDiffusionUpscalePipeline 27 | - all 28 | - __call__ 29 | - enable_attention_slicing 30 | - disable_attention_slicing 31 | - enable_xformers_memory_efficient_attention 32 | - disable_xformers_memory_efficient_attention -------------------------------------------------------------------------------- /docs/source/en/api/pipelines/stochastic_karras_ve.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # Stochastic Karras VE 14 | 15 | ## Overview 16 | 17 | [Elucidating the Design Space of Diffusion-Based Generative Models](https://arxiv.org/abs/2206.00364) by Tero Karras, Miika Aittala, Timo Aila and Samuli Laine. 18 | 19 | The abstract of the paper is the following: 20 | 21 | We argue that the theory and practice of diffusion-based generative models are currently unnecessarily convoluted and seek to remedy the situation by presenting a design space that clearly separates the concrete design choices. This lets us identify several changes to both the sampling and training processes, as well as preconditioning of the score networks. Together, our improvements yield new state-of-the-art FID of 1.79 for CIFAR-10 in a class-conditional setting and 1.97 in an unconditional setting, with much faster sampling (35 network evaluations per image) than prior designs. To further demonstrate their modular nature, we show that our design changes dramatically improve both the efficiency and quality obtainable with pre-trained score networks from previous work, including improving the FID of an existing ImageNet-64 model from 2.07 to near-SOTA 1.55. 22 | 23 | This pipeline implements the Stochastic sampling tailored to the Variance-Expanding (VE) models. 24 | 25 | 26 | ## Available Pipelines: 27 | 28 | | Pipeline | Tasks | Colab 29 | |---|---|:---:| 30 | | [pipeline_stochastic_karras_ve.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stochastic_karras_ve/pipeline_stochastic_karras_ve.py) | *Unconditional Image Generation* | - | 31 | 32 | 33 | ## KarrasVePipeline 34 | [[autodoc]] KarrasVePipeline 35 | - all 36 | - __call__ 37 | -------------------------------------------------------------------------------- /docs/source/en/api/schedulers/ddim_inverse.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # Inverse Denoising Diffusion Implicit Models (DDIMInverse) 14 | 15 | ## Overview 16 | 17 | This scheduler is the inverted scheduler of [Denoising Diffusion Implicit Models](https://arxiv.org/abs/2010.02502) (DDIM) by Jiaming Song, Chenlin Meng and Stefano Ermon. 18 | The implementation is mostly based on the DDIM inversion definition of [Null-text Inversion for Editing Real Images using Guided Diffusion Models](https://arxiv.org/pdf/2211.09794.pdf) 19 | 20 | ## DDIMInverseScheduler 21 | [[autodoc]] DDIMInverseScheduler 22 | -------------------------------------------------------------------------------- /docs/source/en/api/schedulers/ddpm.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # Denoising Diffusion Probabilistic Models (DDPM) 14 | 15 | ## Overview 16 | 17 | [Denoising Diffusion Probabilistic Models](https://arxiv.org/abs/2006.11239) 18 | (DDPM) by Jonathan Ho, Ajay Jain and Pieter Abbeel proposes the diffusion based model of the same name, but in the context of the 🤗 Diffusers library, DDPM refers to the discrete denoising scheduler from the paper as well as the pipeline. 19 | 20 | The abstract of the paper is the following: 21 | 22 | We present high quality image synthesis results using diffusion probabilistic models, a class of latent variable models inspired by considerations from nonequilibrium thermodynamics. Our best results are obtained by training on a weighted variational bound designed according to a novel connection between diffusion probabilistic models and denoising score matching with Langevin dynamics, and our models naturally admit a progressive lossy decompression scheme that can be interpreted as a generalization of autoregressive decoding. On the unconditional CIFAR10 dataset, we obtain an Inception score of 9.46 and a state-of-the-art FID score of 3.17. On 256x256 LSUN, we obtain sample quality similar to ProgressiveGAN. 23 | 24 | The original paper can be found [here](https://arxiv.org/abs/2010.02502). 25 | 26 | ## DDPMScheduler 27 | [[autodoc]] DDPMScheduler 28 | -------------------------------------------------------------------------------- /docs/source/en/api/schedulers/deis.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # DEIS 14 | 15 | Fast Sampling of Diffusion Models with Exponential Integrator. 16 | 17 | ## Overview 18 | 19 | Original paper can be found [here](https://arxiv.org/abs/2204.13902). The original implementation can be found [here](https://github.com/qsh-zh/deis). 20 | 21 | ## DEISMultistepScheduler 22 | [[autodoc]] DEISMultistepScheduler 23 | -------------------------------------------------------------------------------- /docs/source/en/api/schedulers/dpm_discrete.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # DPM Discrete Scheduler inspired by Karras et. al paper 14 | 15 | ## Overview 16 | 17 | Inspired by [Karras et. al](https://arxiv.org/abs/2206.00364). Scheduler ported from @crowsonkb's https://github.com/crowsonkb/k-diffusion library: 18 | 19 | All credit for making this scheduler work goes to [Katherine Crowson](https://github.com/crowsonkb/) 20 | 21 | ## KDPM2DiscreteScheduler 22 | [[autodoc]] KDPM2DiscreteScheduler -------------------------------------------------------------------------------- /docs/source/en/api/schedulers/dpm_discrete_ancestral.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # DPM Discrete Scheduler with ancestral sampling inspired by Karras et. al paper 14 | 15 | ## Overview 16 | 17 | Inspired by [Karras et. al](https://arxiv.org/abs/2206.00364). Scheduler ported from @crowsonkb's https://github.com/crowsonkb/k-diffusion library: 18 | 19 | All credit for making this scheduler work goes to [Katherine Crowson](https://github.com/crowsonkb/) 20 | 21 | ## KDPM2AncestralDiscreteScheduler 22 | [[autodoc]] KDPM2AncestralDiscreteScheduler -------------------------------------------------------------------------------- /docs/source/en/api/schedulers/dpm_sde.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # DPM Stochastic Scheduler inspired by Karras et. al paper 14 | 15 | ## Overview 16 | 17 | Inspired by Stochastic Sampler from [Karras et. al](https://arxiv.org/abs/2206.00364). 18 | Scheduler ported from @crowsonkb's https://github.com/crowsonkb/k-diffusion library: 19 | 20 | All credit for making this scheduler work goes to [Katherine Crowson](https://github.com/crowsonkb/) 21 | 22 | ## DPMSolverSDEScheduler 23 | [[autodoc]] DPMSolverSDEScheduler -------------------------------------------------------------------------------- /docs/source/en/api/schedulers/euler.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # Euler scheduler 14 | 15 | ## Overview 16 | 17 | Euler scheduler (Algorithm 2) from the paper [Elucidating the Design Space of Diffusion-Based Generative Models](https://arxiv.org/abs/2206.00364) by Karras et al. (2022). Based on the original [k-diffusion](https://github.com/crowsonkb/k-diffusion/blob/481677d114f6ea445aa009cf5bd7a9cdee909e47/k_diffusion/sampling.py#L51) implementation by Katherine Crowson. 18 | Fast scheduler which often times generates good outputs with 20-30 steps. 19 | 20 | ## EulerDiscreteScheduler 21 | [[autodoc]] EulerDiscreteScheduler -------------------------------------------------------------------------------- /docs/source/en/api/schedulers/euler_ancestral.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # Euler Ancestral scheduler 14 | 15 | ## Overview 16 | 17 | Ancestral sampling with Euler method steps. Based on the original [k-diffusion](https://github.com/crowsonkb/k-diffusion/blob/481677d114f6ea445aa009cf5bd7a9cdee909e47/k_diffusion/sampling.py#L72) implementation by Katherine Crowson. 18 | Fast scheduler which often times generates good outputs with 20-30 steps. 19 | 20 | ## EulerAncestralDiscreteScheduler 21 | [[autodoc]] EulerAncestralDiscreteScheduler 22 | -------------------------------------------------------------------------------- /docs/source/en/api/schedulers/heun.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # Heun scheduler inspired by Karras et. al paper 14 | 15 | ## Overview 16 | 17 | Algorithm 1 of [Karras et. al](https://arxiv.org/abs/2206.00364). 18 | Scheduler ported from @crowsonkb's https://github.com/crowsonkb/k-diffusion library: 19 | 20 | All credit for making this scheduler work goes to [Katherine Crowson](https://github.com/crowsonkb/) 21 | 22 | ## HeunDiscreteScheduler 23 | [[autodoc]] HeunDiscreteScheduler -------------------------------------------------------------------------------- /docs/source/en/api/schedulers/ipndm.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # improved pseudo numerical methods for diffusion models (iPNDM) 14 | 15 | ## Overview 16 | 17 | Original implementation can be found [here](https://github.com/crowsonkb/v-diffusion-pytorch/blob/987f8985e38208345c1959b0ea767a625831cc9b/diffusion/sampling.py#L296). 18 | 19 | ## IPNDMScheduler 20 | [[autodoc]] IPNDMScheduler -------------------------------------------------------------------------------- /docs/source/en/api/schedulers/lms_discrete.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # Linear multistep scheduler for discrete beta schedules 14 | 15 | ## Overview 16 | 17 | Original implementation can be found [here](https://arxiv.org/abs/2206.00364). 18 | 19 | ## LMSDiscreteScheduler 20 | [[autodoc]] LMSDiscreteScheduler -------------------------------------------------------------------------------- /docs/source/en/api/schedulers/multistep_dpm_solver.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # Multistep DPM-Solver 14 | 15 | ## Overview 16 | 17 | Original paper can be found [here](https://arxiv.org/abs/2206.00927) and the [improved version](https://arxiv.org/abs/2211.01095). The original implementation can be found [here](https://github.com/LuChengTHU/dpm-solver). 18 | 19 | ## DPMSolverMultistepScheduler 20 | [[autodoc]] DPMSolverMultistepScheduler -------------------------------------------------------------------------------- /docs/source/en/api/schedulers/multistep_dpm_solver_inverse.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # Inverse Multistep DPM-Solver (DPMSolverMultistepInverse) 14 | 15 | ## Overview 16 | 17 | This scheduler is the inverted scheduler of [DPM-Solver: A Fast ODE Solver for Diffusion Probabilistic Model Sampling in Around 10 Steps](https://arxiv.org/abs/2206.00927) and [DPM-Solver++: Fast Solver for Guided Sampling of Diffusion Probabilistic Models 18 | ](https://arxiv.org/abs/2211.01095) by Cheng Lu, Yuhao Zhou, Fan Bao, Jianfei Chen, Chongxuan Li, and Jun Zhu. 19 | The implementation is mostly based on the DDIM inversion definition of [Null-text Inversion for Editing Real Images using Guided Diffusion Models](https://arxiv.org/pdf/2211.09794.pdf) and the ad-hoc notebook implementation for DiffEdit latent inversion [here](https://github.com/Xiang-cd/DiffEdit-stable-diffusion/blob/main/diffedit.ipynb). 20 | 21 | ## DPMSolverMultistepInverseScheduler 22 | [[autodoc]] DPMSolverMultistepInverseScheduler 23 | -------------------------------------------------------------------------------- /docs/source/en/api/schedulers/pndm.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # Pseudo numerical methods for diffusion models (PNDM) 14 | 15 | ## Overview 16 | 17 | Original implementation can be found [here](https://github.com/crowsonkb/k-diffusion/blob/481677d114f6ea445aa009cf5bd7a9cdee909e47/k_diffusion/sampling.py#L181). 18 | 19 | ## PNDMScheduler 20 | [[autodoc]] PNDMScheduler -------------------------------------------------------------------------------- /docs/source/en/api/schedulers/repaint.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # RePaint scheduler 14 | 15 | ## Overview 16 | 17 | DDPM-based inpainting scheduler for unsupervised inpainting with extreme masks. 18 | Intended for use with [`RePaintPipeline`]. 19 | Based on the paper [RePaint: Inpainting using Denoising Diffusion Probabilistic Models](https://arxiv.org/abs/2201.09865) 20 | and the original implementation by Andreas Lugmayr et al.: https://github.com/andreas128/RePaint 21 | 22 | ## RePaintScheduler 23 | [[autodoc]] RePaintScheduler -------------------------------------------------------------------------------- /docs/source/en/api/schedulers/score_sde_ve.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # Variance Exploding Stochastic Differential Equation (VE-SDE) scheduler 14 | 15 | ## Overview 16 | 17 | Original paper can be found [here](https://arxiv.org/abs/2011.13456). 18 | 19 | ## ScoreSdeVeScheduler 20 | [[autodoc]] ScoreSdeVeScheduler 21 | -------------------------------------------------------------------------------- /docs/source/en/api/schedulers/score_sde_vp.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # Variance Preserving Stochastic Differential Equation (VP-SDE) scheduler 14 | 15 | ## Overview 16 | 17 | Original paper can be found [here](https://arxiv.org/abs/2011.13456). 18 | 19 | 20 | 21 | Score SDE-VP is under construction. 22 | 23 | 24 | 25 | ## ScoreSdeVpScheduler 26 | [[autodoc]] schedulers.scheduling_sde_vp.ScoreSdeVpScheduler 27 | -------------------------------------------------------------------------------- /docs/source/en/api/schedulers/singlestep_dpm_solver.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # Singlestep DPM-Solver 14 | 15 | ## Overview 16 | 17 | Original paper can be found [here](https://arxiv.org/abs/2206.00927) and the [improved version](https://arxiv.org/abs/2211.01095). The original implementation can be found [here](https://github.com/LuChengTHU/dpm-solver). 18 | 19 | ## DPMSolverSinglestepScheduler 20 | [[autodoc]] DPMSolverSinglestepScheduler -------------------------------------------------------------------------------- /docs/source/en/api/schedulers/stochastic_karras_ve.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # Variance exploding, stochastic sampling from Karras et. al 14 | 15 | ## Overview 16 | 17 | Original paper can be found [here](https://arxiv.org/abs/2206.00364). 18 | 19 | ## KarrasVeScheduler 20 | [[autodoc]] KarrasVeScheduler -------------------------------------------------------------------------------- /docs/source/en/api/schedulers/unipc.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # UniPC 14 | 15 | ## Overview 16 | 17 | UniPC is a training-free framework designed for the fast sampling of diffusion models, which consists of a corrector (UniC) and a predictor (UniP) that share a unified analytical form and support arbitrary orders. 18 | 19 | For more details about the method, please refer to the [paper](https://arxiv.org/abs/2302.04867) and the [code](https://github.com/wl-zhao/UniPC). 20 | 21 | Fast Sampling of Diffusion Models with Exponential Integrator. 22 | 23 | ## UniPCMultistepScheduler 24 | [[autodoc]] UniPCMultistepScheduler 25 | -------------------------------------------------------------------------------- /docs/source/en/api/schedulers/vq_diffusion.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # VQDiffusionScheduler 14 | 15 | ## Overview 16 | 17 | Original paper can be found [here](https://arxiv.org/abs/2111.14822) 18 | 19 | ## VQDiffusionScheduler 20 | [[autodoc]] VQDiffusionScheduler -------------------------------------------------------------------------------- /docs/source/en/api/utilities.mdx: -------------------------------------------------------------------------------- 1 | # Utilities 2 | 3 | Utility and helper functions for working with 🤗 Diffusers. 4 | 5 | ## randn_tensor 6 | 7 | [[autodoc]] diffusers.utils.randn_tensor 8 | 9 | ## numpy_to_pil 10 | 11 | [[autodoc]] utils.pil_utils.numpy_to_pil 12 | 13 | ## pt_to_pil 14 | 15 | [[autodoc]] utils.pil_utils.pt_to_pil 16 | 17 | ## load_image 18 | 19 | [[autodoc]] utils.testing_utils.load_image 20 | 21 | ## export_to_video 22 | 23 | [[autodoc]] utils.testing_utils.export_to_video -------------------------------------------------------------------------------- /docs/source/en/imgs/access_request.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/takuma104/diffusers/f523b11a1023a07d5aaa21a68d69ce6d9b71d36e/docs/source/en/imgs/access_request.png -------------------------------------------------------------------------------- /docs/source/en/imgs/diffusers_library.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/takuma104/diffusers/f523b11a1023a07d5aaa21a68d69ce6d9b71d36e/docs/source/en/imgs/diffusers_library.jpg -------------------------------------------------------------------------------- /docs/source/en/optimization/open_vino.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | 14 | # How to use OpenVINO for inference 15 | 16 | 🤗 [Optimum](https://github.com/huggingface/optimum-intel) provides a Stable Diffusion pipeline compatible with OpenVINO. You can now easily perform inference with OpenVINO Runtime on a variety of Intel processors ([see](https://docs.openvino.ai/latest/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html) the full list of supported devices). 17 | 18 | ## Installation 19 | 20 | Install 🤗 Optimum Intel with the following command: 21 | 22 | ``` 23 | pip install optimum["openvino"] 24 | ``` 25 | 26 | ## Stable Diffusion Inference 27 | 28 | To load an OpenVINO model and run inference with OpenVINO Runtime, you need to replace `StableDiffusionPipeline` with `OVStableDiffusionPipeline`. In case you want to load a PyTorch model and convert it to the OpenVINO format on-the-fly, you can set `export=True`. 29 | 30 | ```python 31 | from optimum.intel.openvino import OVStableDiffusionPipeline 32 | 33 | model_id = "runwayml/stable-diffusion-v1-5" 34 | pipe = OVStableDiffusionPipeline.from_pretrained(model_id, export=True) 35 | prompt = "a photo of an astronaut riding a horse on mars" 36 | images = pipe(prompt).images[0] 37 | ``` 38 | 39 | You can find more examples (such as static reshaping and model compilation) in [optimum documentation](https://huggingface.co/docs/optimum/intel/inference#export-and-inference-of-stable-diffusion-models). 40 | -------------------------------------------------------------------------------- /docs/source/en/optimization/opt_overview.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # Overview 14 | 15 | Generating high-quality outputs is computationally intensive, especially during each iterative step where you go from a noisy output to a less noisy output. One of 🧨 Diffuser's goal is to make this technology widely accessible to everyone, which includes enabling fast inference on consumer and specialized hardware. 16 | 17 | This section will cover tips and tricks - like half-precision weights and sliced attention - for optimizing inference speed and reducing memory-consumption. You can also learn how to speed up your PyTorch code with [`torch.compile`](https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html) or [ONNX Runtime](https://onnxruntime.ai/docs/), and enable memory-efficient attention with [xFormers](https://facebookresearch.github.io/xformers/). There are also guides for running inference on specific hardware like Apple Silicon, and Intel or Habana processors. -------------------------------------------------------------------------------- /docs/source/en/optimization/xformers.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # Installing xFormers 14 | 15 | We recommend the use of [xFormers](https://github.com/facebookresearch/xformers) for both inference and training. In our tests, the optimizations performed in the attention blocks allow for both faster speed and reduced memory consumption. 16 | 17 | Starting from version `0.0.16` of xFormers, released on January 2023, installation can be easily performed using pre-built pip wheels: 18 | 19 | ```bash 20 | pip install xformers 21 | ``` 22 | 23 | 24 | 25 | The xFormers PIP package requires the latest version of PyTorch (1.13.1 as of xFormers 0.0.16). If you need to use a previous version of PyTorch, then we recommend you install xFormers from source using [the project instructions](https://github.com/facebookresearch/xformers#installing-xformers). 26 | 27 | 28 | 29 | After xFormers is installed, you can use `enable_xformers_memory_efficient_attention()` for faster inference and reduced memory consumption, as discussed [here](fp16#memory-efficient-attention). 30 | 31 | 32 | 33 | According to [this issue](https://github.com/huggingface/diffusers/issues/2234#issuecomment-1416931212), xFormers `v0.0.16` cannot be used for training (fine-tune or Dreambooth) in some GPUs. If you observe that problem, please install a development version as indicated in that comment. 34 | 35 | 36 | -------------------------------------------------------------------------------- /docs/source/en/training/adapt_a_model.mdx: -------------------------------------------------------------------------------- 1 | # Adapt a model to a new task 2 | 3 | Many diffusion systems share the same components, allowing you to adapt a pretrained model for one task to an entirely different task. 4 | 5 | This guide will show you how to adapt a pretrained text-to-image model for inpainting by initializing and modifying the architecture of a pretrained [`UNet2DConditionModel`]. 6 | 7 | ## Configure UNet2DConditionModel parameters 8 | 9 | A [`UNet2DConditionModel`] by default accepts 4 channels in the [input sample](https://huggingface.co/docs/diffusers/v0.16.0/en/api/models#diffusers.UNet2DConditionModel.in_channels). For example, load a pretrained text-to-image model like [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) and take a look at the number of `in_channels`: 10 | 11 | ```py 12 | from diffusers import StableDiffusionPipeline 13 | 14 | pipeline = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") 15 | pipeline.unet.config["in_channels"] 16 | 4 17 | ``` 18 | 19 | Inpainting requires 9 channels in the input sample. You can check this value in a pretrained inpainting model like [`runwayml/stable-diffusion-inpainting`](https://huggingface.co/runwayml/stable-diffusion-inpainting): 20 | 21 | ```py 22 | from diffusers import StableDiffusionPipeline 23 | 24 | pipeline = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-inpainting") 25 | pipeline.unet.config["in_channels"] 26 | 9 27 | ``` 28 | 29 | To adapt your text-to-image model for inpainting, you'll need to change the number of `in_channels` from 4 to 9. 30 | 31 | Initialize a [`UNet2DConditionModel`] with the pretrained text-to-image model weights, and change `in_channels` to 9. Changing the number of `in_channels` means you need to set `ignore_mismatched_sizes=True` and `low_cpu_mem_usage=False` to avoid a size mismatch error because the shape is different now. 32 | 33 | ```py 34 | from diffusers import UNet2DConditionModel 35 | 36 | model_id = "runwayml/stable-diffusion-v1-5" 37 | unet = UNet2DConditionModel.from_pretrained( 38 | model_id, subfolder="unet", in_channels=9, low_cpu_mem_usage=False, ignore_mismatched_sizes=True 39 | ) 40 | ``` 41 | 42 | The pretrained weights of the other components from the text-to-image model are initialized from their checkpoints, but the input channel weights (`conv_in.weight`) of the `unet` are randomly initialized. It is important to finetune the model for inpainting because otherwise the model returns noise. 43 | -------------------------------------------------------------------------------- /docs/source/en/tutorials/tutorial_overview.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # Overview 14 | 15 | Welcome to 🧨 Diffusers! If you're new to diffusion models and generative AI, and want to learn more, then you've come to the right place. These beginner-friendly tutorials are designed to provide a gentle introduction to diffusion models and help you understand the library fundamentals - the core components and how 🧨 Diffusers is meant to be used. 16 | 17 | You'll learn how to use a pipeline for inference to rapidly generate things, and then deconstruct that pipeline to really understand how to use the library as a modular toolbox for building your own diffusion systems. In the next lesson, you'll learn how to train your own diffusion model to generate what you want. 18 | 19 | After completing the tutorials, you'll have gained the necessary skills to start exploring the library on your own and see how to use it for your own projects and applications. 20 | 21 | Feel free to join our community on [Discord](https://discord.com/invite/JfAtkvEtRb) or the [forums](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63) to connect and collaborate with other users and developers! 22 | 23 | Let's start diffusing! 🧨 -------------------------------------------------------------------------------- /docs/source/en/using-diffusers/audio.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # Using Diffusers for audio 14 | 15 | [`DanceDiffusionPipeline`] and [`AudioDiffusionPipeline`] can be used to generate 16 | audio rapidly! More coming soon! -------------------------------------------------------------------------------- /docs/source/en/using-diffusers/loading_overview.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # Overview 14 | 15 | 🧨 Diffusers offers many pipelines, models, and schedulers for generative tasks. To make loading these components as simple as possible, we provide a single and unified method - `from_pretrained()` - that loads any of these components from either the Hugging Face [Hub](https://huggingface.co/models?library=diffusers&sort=downloads) or your local machine. Whenever you load a pipeline or model, the latest files are automatically downloaded and cached so you can quickly reuse them next time without redownloading the files. 16 | 17 | This section will show you everything you need to know about loading pipelines, how to load different components in a pipeline, how to load checkpoint variants, and how to load community pipelines. You'll also learn how to load schedulers and compare the speed and quality trade-offs of using different schedulers. Finally, you'll see how to convert and load KerasCV checkpoints so you can use them in PyTorch with 🧨 Diffusers. -------------------------------------------------------------------------------- /docs/source/en/using-diffusers/other-modalities.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # Using Diffusers with other modalities 14 | 15 | Diffusers is in the process of expanding to modalities other than images. 16 | 17 | Example type | Colab | Pipeline | 18 | :-------------------------:|:-------------------------:|:-------------------------:| 19 | [Molecule conformation](https://www.nature.com/subjects/molecular-conformation#:~:text=Definition,to%20changes%20in%20their%20environment.) generation | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/geodiff_molecule_conformation.ipynb) | ❌ 20 | 21 | More coming soon! -------------------------------------------------------------------------------- /docs/source/en/using-diffusers/pipeline_overview.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # Overview 14 | 15 | A pipeline is an end-to-end class that provides a quick and easy way to use a diffusion system for inference by bundling independently trained models and schedulers together. Certain combinations of models and schedulers define specific pipeline types, like [`StableDiffusionPipeline`] or [`StableDiffusionControlNetPipeline`], with specific capabilities. All pipeline types inherit from the base [`DiffusionPipeline`] class; pass it any checkpoint, and it'll automatically detect the pipeline type and load the necessary components. 16 | 17 | This section introduces you to some of the tasks supported by our pipelines such as unconditional image generation and different techniques and variations of text-to-image generation. You'll also learn how to gain more control over the generation process by setting a seed for reproducibility and weighting prompts to adjust the influence certain words in the prompt has over the output. Finally, you'll see how you can create a community pipeline for a custom task like generating images from speech. -------------------------------------------------------------------------------- /docs/source/en/using-diffusers/rl.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # Using Diffusers for reinforcement learning 14 | 15 | Support for one RL model and related pipelines is included in the `experimental` source of diffusers. 16 | More models and examples coming soon! 17 | 18 | # Diffuser Value-guided Planning 19 | 20 | You can run the model from [*Planning with Diffusion for Flexible Behavior Synthesis*](https://arxiv.org/abs/2205.09991) with Diffusers. 21 | The script is located in the [RL Examples](https://github.com/huggingface/diffusers/tree/main/examples/rl) folder. 22 | 23 | Or, run this example in Colab [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/reinforcement_learning_with_diffusers.ipynb) 24 | 25 | [[autodoc]] diffusers.experimental.ValueGuidedRLPipeline -------------------------------------------------------------------------------- /docs/source/en/using-diffusers/using_safetensors: -------------------------------------------------------------------------------- 1 | # What is safetensors ? 2 | 3 | [safetensors](https://github.com/huggingface/safetensors) is a different format 4 | from the classic `.bin` which uses Pytorch which uses pickle. 5 | 6 | Pickle is notoriously unsafe which allow any malicious file to execute arbitrary code. 7 | The hub itself tries to prevent issues from it, but it's not a silver bullet. 8 | 9 | `safetensors` first and foremost goal is to make loading machine learning models *safe* 10 | in the sense that no takeover of your computer can be done. 11 | 12 | # Why use safetensors ? 13 | 14 | **Safety** can be one reason, if you're attempting to use a not well known model and 15 | you're not sure about the source of the file. 16 | 17 | And a secondary reason, is **the speed of loading**. Safetensors can load models much faster 18 | than regular pickle files. If you spend a lot of times switching models, this can be 19 | a huge timesave. 20 | -------------------------------------------------------------------------------- /docs/source/ko/_toctree.yml: -------------------------------------------------------------------------------- 1 | - sections: 2 | - local: index 3 | title: "🧨 Diffusers" 4 | - local: quicktour 5 | title: "훑어보기" 6 | - local: in_translation 7 | title: Stable Diffusion 8 | - local: installation 9 | title: "설치" 10 | title: "시작하기" 11 | 12 | - sections: 13 | - sections: 14 | - local: in_translation 15 | title: 개요 16 | - local: in_translation 17 | title: Unconditional 이미지 생성 18 | - local: in_translation 19 | title: Textual Inversion 20 | - local: training/dreambooth 21 | title: DreamBooth 22 | - local: training/text2image 23 | title: Text-to-image 24 | - local: training/lora 25 | title: Low-Rank Adaptation of Large Language Models (LoRA) 26 | - local: in_translation 27 | title: ControlNet 28 | - local: in_translation 29 | title: InstructPix2Pix 학습 30 | title: 학습 31 | - sections: 32 | - local: in_translation 33 | title: 개요 34 | - local: optimization/fp16 35 | title: 메모리와 속도 36 | - local: in_translation 37 | title: Torch2.0 지원 38 | - local: optimization/xformers 39 | title: xFormers 40 | - local: optimization/onnx 41 | title: ONNX 42 | - local: optimization/open_vino 43 | title: OpenVINO 44 | - local: optimization/mps 45 | title: MPS 46 | - local: optimization/habana 47 | title: Habana Gaudi 48 | title: 최적화/특수 하드웨어 -------------------------------------------------------------------------------- /docs/source/ko/in_translation.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # 번역중 14 | 15 | 열심히 번역을 진행중입니다. 조금만 기다려주세요. 16 | 감사합니다! -------------------------------------------------------------------------------- /docs/source/ko/optimization/onnx.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | 14 | # 추론을 위해 ONNX 런타임을 사용하는 방법 15 | 16 | 🤗 Diffusers는 ONNX Runtime과 호환되는 Stable Diffusion 파이프라인을 제공합니다. 이를 통해 ONNX(CPU 포함)를 지원하고 PyTorch의 가속 버전을 사용할 수 없는 모든 하드웨어에서 Stable Diffusion을 실행할 수 있습니다. 17 | 18 | ## 설치 19 | 20 | 다음 명령어로 ONNX Runtime를 지원하는 🤗 Optimum를 설치합니다: 21 | 22 | ``` 23 | pip install optimum["onnxruntime"] 24 | ``` 25 | 26 | ## Stable Diffusion 추론 27 | 28 | 아래 코드는 ONNX 런타임을 사용하는 방법을 보여줍니다. `StableDiffusionPipeline` 대신 `OnnxStableDiffusionPipeline`을 사용해야 합니다. 29 | PyTorch 모델을 불러오고 즉시 ONNX 형식으로 변환하려는 경우 `export=True`로 설정합니다. 30 | 31 | ```python 32 | from optimum.onnxruntime import ORTStableDiffusionPipeline 33 | 34 | model_id = "runwayml/stable-diffusion-v1-5" 35 | pipe = ORTStableDiffusionPipeline.from_pretrained(model_id, export=True) 36 | prompt = "a photo of an astronaut riding a horse on mars" 37 | images = pipe(prompt).images[0] 38 | pipe.save_pretrained("./onnx-stable-diffusion-v1-5") 39 | ``` 40 | 41 | 파이프라인을 ONNX 형식으로 오프라인으로 내보내고 나중에 추론에 사용하려는 경우, 42 | [`optimum-cli export`](https://huggingface.co/docs/optimum/main/en/exporters/onnx/usage_guides/export_a_model#exporting-a-model-to-onnx-using-the-cli) 명령어를 사용할 수 있습니다: 43 | 44 | ```bash 45 | optimum-cli export onnx --model runwayml/stable-diffusion-v1-5 sd_v15_onnx/ 46 | ``` 47 | 48 | 그 다음 추론을 수행합니다: 49 | 50 | ```python 51 | from optimum.onnxruntime import ORTStableDiffusionPipeline 52 | 53 | model_id = "sd_v15_onnx" 54 | pipe = ORTStableDiffusionPipeline.from_pretrained(model_id) 55 | prompt = "a photo of an astronaut riding a horse on mars" 56 | images = pipe(prompt).images[0] 57 | ``` 58 | 59 | Notice that we didn't have to specify `export=True` above. 60 | 61 | [Optimum 문서](https://huggingface.co/docs/optimum/)에서 더 많은 예시를 찾을 수 있습니다. 62 | 63 | ## 알려진 이슈들 64 | 65 | - 여러 프롬프트를 배치로 생성하면 너무 많은 메모리가 사용되는 것 같습니다. 이를 조사하는 동안, 배치 대신 반복 방법이 필요할 수도 있습니다. 66 | -------------------------------------------------------------------------------- /docs/source/ko/optimization/open_vino.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # 추론을 위한 OpenVINO 사용 방법 14 | 15 | 🤗 [Optimum](https://github.com/huggingface/optimum-intel)은 OpenVINO와 호환되는 Stable Diffusion 파이프라인을 제공합니다. 16 | 이제 다양한 Intel 프로세서에서 OpenVINO Runtime으로 쉽게 추론을 수행할 수 있습니다. ([여기](https://docs.openvino.ai/latest/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html)서 지원되는 전 기기 목록을 확인하세요). 17 | 18 | ## 설치 19 | 20 | 다음 명령어로 🤗 Optimum을 설치합니다: 21 | 22 | ``` 23 | pip install optimum["openvino"] 24 | ``` 25 | 26 | ## Stable Diffusion 추론 27 | 28 | OpenVINO 모델을 불러오고 OpenVINO 런타임으로 추론을 실행하려면 `StableDiffusionPipeline`을 `OVStableDiffusionPipeline`으로 교체해야 합니다. PyTorch 모델을 불러오고 즉시 OpenVINO 형식으로 변환하려는 경우 `export=True`로 설정합니다. 29 | 30 | ```python 31 | from optimum.intel.openvino import OVStableDiffusionPipeline 32 | 33 | model_id = "runwayml/stable-diffusion-v1-5" 34 | pipe = OVStableDiffusionPipeline.from_pretrained(model_id, export=True) 35 | prompt = "a photo of an astronaut riding a horse on mars" 36 | images = pipe(prompt).images[0] 37 | ``` 38 | 39 | [Optimum 문서](https://huggingface.co/docs/optimum/intel/inference#export-and-inference-of-stable-diffusion-models)에서 (정적 reshaping과 모델 컴파일 등의) 더 많은 예시들을 찾을 수 있습니다. 40 | -------------------------------------------------------------------------------- /docs/source/ko/optimization/xformers.mdx: -------------------------------------------------------------------------------- 1 | 12 | 13 | # xFormers 설치하기 14 | 15 | 추론과 학습 모두에 [xFormers](https://github.com/facebookresearch/xformers)를 사용하는 것이 좋습니다. 16 | 자체 테스트로 어텐션 블록에서 수행된 최적화가 더 빠른 속도와 적은 메모리 소비를 확인했습니다. 17 | 18 | 2023년 1월에 출시된 xFormers 버전 '0.0.16'부터 사전 빌드된 pip wheel을 사용하여 쉽게 설치할 수 있습니다: 19 | 20 | ```bash 21 | pip install xformers 22 | ``` 23 | 24 | 25 | 26 | xFormers PIP 패키지에는 최신 버전의 PyTorch(xFormers 0.0.16에 1.13.1)가 필요합니다. 이전 버전의 PyTorch를 사용해야 하는 경우 [프로젝트 지침](https://github.com/facebookresearch/xformers#installing-xformers)의 소스를 사용해 xFormers를 설치하는 것이 좋습니다. 27 | 28 | 29 | 30 | xFormers를 설치하면, [여기](fp16#memory-efficient-attention)서 설명한 것처럼 'enable_xformers_memory_efficient_attention()'을 사용하여 추론 속도를 높이고 메모리 소비를 줄일 수 있습니다. 31 | 32 | 33 | 34 | [이 이슈](https://github.com/huggingface/diffusers/issues/2234#issuecomment-1416931212)에 따르면 xFormers `v0.0.16`에서 GPU를 사용한 학습(파인 튜닝 또는 Dreambooth)을 할 수 없습니다. 해당 문제가 발견되면. 해당 코멘트를 참고해 development 버전을 설치하세요. 35 | 36 | 37 | -------------------------------------------------------------------------------- /examples/community/one_step_unet.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import torch 3 | 4 | from diffusers import DiffusionPipeline 5 | 6 | 7 | class UnetSchedulerOneForwardPipeline(DiffusionPipeline): 8 | def __init__(self, unet, scheduler): 9 | super().__init__() 10 | 11 | self.register_modules(unet=unet, scheduler=scheduler) 12 | 13 | def __call__(self): 14 | image = torch.randn( 15 | (1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), 16 | ) 17 | timestep = 1 18 | 19 | model_output = self.unet(image, timestep).sample 20 | scheduler_output = self.scheduler.step(model_output, timestep, image).prev_sample 21 | 22 | result = scheduler_output - scheduler_output + torch.ones_like(scheduler_output) 23 | 24 | return result 25 | -------------------------------------------------------------------------------- /examples/conftest.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 The HuggingFace Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # tests directory-specific settings - this file is run automatically 16 | # by pytest before any tests are run 17 | 18 | import sys 19 | import warnings 20 | from os.path import abspath, dirname, join 21 | 22 | 23 | # allow having multiple repository checkouts and not needing to remember to rerun 24 | # 'pip install -e .[dev]' when switching between checkouts and running tests. 25 | git_repo_path = abspath(join(dirname(dirname(dirname(__file__))), "src")) 26 | sys.path.insert(1, git_repo_path) 27 | 28 | 29 | # silence FutureWarning warnings in tests since often we can't act on them until 30 | # they become normal warnings - i.e. the tests still need to test the current functionality 31 | warnings.simplefilter(action="ignore", category=FutureWarning) 32 | 33 | 34 | def pytest_addoption(parser): 35 | from diffusers.utils.testing_utils import pytest_addoption_shared 36 | 37 | pytest_addoption_shared(parser) 38 | 39 | 40 | def pytest_terminal_summary(terminalreporter): 41 | from diffusers.utils.testing_utils import pytest_terminal_summary_main 42 | 43 | make_reports = terminalreporter.config.getoption("--make-reports") 44 | if make_reports: 45 | pytest_terminal_summary_main(terminalreporter, id=make_reports) 46 | -------------------------------------------------------------------------------- /examples/controlnet/requirements.txt: -------------------------------------------------------------------------------- 1 | accelerate>=0.16.0 2 | torchvision 3 | transformers>=4.25.1 4 | ftfy 5 | tensorboard 6 | datasets 7 | -------------------------------------------------------------------------------- /examples/controlnet/requirements_flax.txt: -------------------------------------------------------------------------------- 1 | transformers>=4.25.1 2 | datasets 3 | flax 4 | optax 5 | torch 6 | torchvision 7 | ftfy 8 | tensorboard 9 | Jinja2 10 | -------------------------------------------------------------------------------- /examples/custom_diffusion/requirements.txt: -------------------------------------------------------------------------------- 1 | accelerate 2 | torchvision 3 | transformers>=4.25.1 4 | ftfy 5 | tensorboard 6 | Jinja2 7 | -------------------------------------------------------------------------------- /examples/dreambooth/requirements.txt: -------------------------------------------------------------------------------- 1 | accelerate>=0.16.0 2 | torchvision 3 | transformers>=4.25.1 4 | ftfy 5 | tensorboard 6 | Jinja2 7 | -------------------------------------------------------------------------------- /examples/dreambooth/requirements_flax.txt: -------------------------------------------------------------------------------- 1 | transformers>=4.25.1 2 | flax 3 | optax 4 | torch 5 | torchvision 6 | ftfy 7 | tensorboard 8 | Jinja2 9 | -------------------------------------------------------------------------------- /examples/inference/README.md: -------------------------------------------------------------------------------- 1 | # Inference Examples 2 | 3 | **The inference examples folder is deprecated and will be removed in a future version**. 4 | **Officially supported inference examples can be found in the [Pipelines folder](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines)**. 5 | 6 | - For `Image-to-Image text-guided generation with Stable Diffusion`, please have a look at the official [Pipeline examples](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines#examples) 7 | - For `In-painting using Stable Diffusion`, please have a look at the official [Pipeline examples](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines#examples) 8 | - For `Tweak prompts reusing seeds and latents`, please have a look at the official [Pipeline examples](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines#examples) 9 | -------------------------------------------------------------------------------- /examples/inference/image_to_image.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | 3 | from diffusers import StableDiffusionImg2ImgPipeline # noqa F401 4 | 5 | 6 | warnings.warn( 7 | "The `image_to_image.py` script is outdated. Please use directly `from diffusers import" 8 | " StableDiffusionImg2ImgPipeline` instead." 9 | ) 10 | -------------------------------------------------------------------------------- /examples/inference/inpainting.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | 3 | from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401 4 | 5 | 6 | warnings.warn( 7 | "The `inpainting.py` script is outdated. Please use directly `from diffusers import" 8 | " StableDiffusionInpaintPipeline` instead." 9 | ) 10 | -------------------------------------------------------------------------------- /examples/instruct_pix2pix/requirements.txt: -------------------------------------------------------------------------------- 1 | accelerate>=0.16.0 2 | torchvision 3 | transformers>=4.25.1 4 | datasets 5 | ftfy 6 | tensorboard -------------------------------------------------------------------------------- /examples/research_projects/README.md: -------------------------------------------------------------------------------- 1 | # Research projects 2 | 3 | This folder contains various research projects using 🧨 Diffusers. 4 | They are not really maintained by the core maintainers of this library and often require a specific version of Diffusers that is indicated in the requirements file of each folder. 5 | Updating them to the most recent version of the library will require some work. 6 | 7 | To use any of them, just run the command 8 | 9 | ``` 10 | pip install -r requirements.txt 11 | ``` 12 | inside the folder of your choice. 13 | 14 | If you need help with any of those, please open an issue where you directly ping the author(s), as indicated at the top of the README of each folder. 15 | -------------------------------------------------------------------------------- /examples/research_projects/colossalai/inference.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from diffusers import StableDiffusionPipeline 4 | 5 | 6 | model_id = "path-to-your-trained-model" 7 | pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") 8 | 9 | prompt = "A photo of sks dog in a bucket" 10 | image = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] 11 | 12 | image.save("dog-bucket.png") 13 | -------------------------------------------------------------------------------- /examples/research_projects/colossalai/requirement.txt: -------------------------------------------------------------------------------- 1 | diffusers 2 | torch 3 | torchvision 4 | ftfy 5 | tensorboard 6 | Jinja2 7 | transformers -------------------------------------------------------------------------------- /examples/research_projects/dreambooth_inpaint/requirements.txt: -------------------------------------------------------------------------------- 1 | diffusers==0.9.0 2 | accelerate>=0.16.0 3 | torchvision 4 | transformers>=4.21.0 5 | ftfy 6 | tensorboard 7 | Jinja2 8 | -------------------------------------------------------------------------------- /examples/research_projects/intel_opts/README.md: -------------------------------------------------------------------------------- 1 | ## Diffusers examples with Intel optimizations 2 | 3 | **This research project is not actively maintained by the diffusers team. For any questions or comments, please make sure to tag @hshen14 .** 4 | 5 | This aims to provide diffusers examples with Intel optimizations such as Bfloat16 for training/fine-tuning acceleration and 8-bit integer (INT8) for inference acceleration on Intel platforms. 6 | 7 | ## Accelerating the fine-tuning for textual inversion 8 | 9 | We accelereate the fine-tuning for textual inversion with Intel Extension for PyTorch. The [examples](textual_inversion) enable both single node and multi-node distributed training with Bfloat16 support on Intel Xeon Scalable Processor. 10 | 11 | ## Accelerating the inference for Stable Diffusion using Bfloat16 12 | 13 | We start the inference acceleration with Bfloat16 using Intel Extension for PyTorch. The [script](inference_bf16.py) is generally designed to support standard Stable Diffusion models with Bfloat16 support. 14 | ```bash 15 | pip install diffusers transformers accelerate scipy safetensors 16 | 17 | export KMP_BLOCKTIME=1 18 | export KMP_SETTINGS=1 19 | export KMP_AFFINITY=granularity=fine,compact,1,0 20 | 21 | # Intel OpenMP 22 | export OMP_NUM_THREADS=< Cores to use > 23 | export LD_PRELOAD=${LD_PRELOAD}:/path/to/lib/libiomp5.so 24 | # Jemalloc is a recommended malloc implementation that emphasizes fragmentation avoidance and scalable concurrency support. 25 | export LD_PRELOAD=${LD_PRELOAD}:/path/to/lib/libjemalloc.so 26 | export MALLOC_CONF="oversize_threshold:1,background_thread:true,metadata_thp:auto,dirty_decay_ms:-1,muzzy_decay_ms:9000000000" 27 | 28 | # Launch with default DDIM 29 | numactl --membind -C python python inference_bf16.py 30 | # Launch with DPMSolverMultistepScheduler 31 | numactl --membind -C python python inference_bf16.py --dpm 32 | 33 | ``` 34 | 35 | ## Accelerating the inference for Stable Diffusion using INT8 36 | 37 | Coming soon ... 38 | -------------------------------------------------------------------------------- /examples/research_projects/intel_opts/inference_bf16.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | import intel_extension_for_pytorch as ipex 4 | import torch 5 | 6 | from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline 7 | 8 | 9 | parser = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False) 10 | parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not") 11 | parser.add_argument("--steps", default=None, type=int, help="Num inference steps") 12 | args = parser.parse_args() 13 | 14 | 15 | device = "cpu" 16 | prompt = "a lovely in red dress and hat, in the snowly and brightly night, with many brighly buildings" 17 | 18 | model_id = "path-to-your-trained-model" 19 | pipe = StableDiffusionPipeline.from_pretrained(model_id) 20 | if args.dpm: 21 | pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) 22 | pipe = pipe.to(device) 23 | 24 | # to channels last 25 | pipe.unet = pipe.unet.to(memory_format=torch.channels_last) 26 | pipe.vae = pipe.vae.to(memory_format=torch.channels_last) 27 | pipe.text_encoder = pipe.text_encoder.to(memory_format=torch.channels_last) 28 | if pipe.requires_safety_checker: 29 | pipe.safety_checker = pipe.safety_checker.to(memory_format=torch.channels_last) 30 | 31 | # optimize with ipex 32 | sample = torch.randn(2, 4, 64, 64) 33 | timestep = torch.rand(1) * 999 34 | encoder_hidden_status = torch.randn(2, 77, 768) 35 | input_example = (sample, timestep, encoder_hidden_status) 36 | try: 37 | pipe.unet = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloat16, inplace=True, sample_input=input_example) 38 | except Exception: 39 | pipe.unet = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloat16, inplace=True) 40 | pipe.vae = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloat16, inplace=True) 41 | pipe.text_encoder = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloat16, inplace=True) 42 | if pipe.requires_safety_checker: 43 | pipe.safety_checker = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloat16, inplace=True) 44 | 45 | # compute 46 | seed = 666 47 | generator = torch.Generator(device).manual_seed(seed) 48 | generate_kwargs = {"generator": generator} 49 | if args.steps is not None: 50 | generate_kwargs["num_inference_steps"] = args.steps 51 | 52 | with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloat16): 53 | image = pipe(prompt, **generate_kwargs).images[0] 54 | 55 | # save image 56 | image.save("generated.png") 57 | -------------------------------------------------------------------------------- /examples/research_projects/intel_opts/textual_inversion/requirements.txt: -------------------------------------------------------------------------------- 1 | accelerate>=0.16.0 2 | torchvision 3 | transformers>=4.21.0 4 | ftfy 5 | tensorboard 6 | Jinja2 7 | intel_extension_for_pytorch>=1.13 8 | -------------------------------------------------------------------------------- /examples/research_projects/intel_opts/textual_inversion_dfq/requirements.txt: -------------------------------------------------------------------------------- 1 | accelerate 2 | torchvision 3 | transformers>=4.25.0 4 | ftfy 5 | tensorboard 6 | modelcards 7 | neural-compressor -------------------------------------------------------------------------------- /examples/research_projects/lora/requirements.txt: -------------------------------------------------------------------------------- 1 | accelerate>=0.16.0 2 | torchvision 3 | transformers>=4.25.1 4 | datasets 5 | ftfy 6 | tensorboard 7 | Jinja2 8 | git+https://github.com/huggingface/peft.git -------------------------------------------------------------------------------- /examples/research_projects/mulit_token_textual_inversion/requirements.txt: -------------------------------------------------------------------------------- 1 | accelerate>=0.16.0 2 | torchvision 3 | transformers>=4.25.1 4 | ftfy 5 | tensorboard 6 | Jinja2 7 | -------------------------------------------------------------------------------- /examples/research_projects/mulit_token_textual_inversion/requirements_flax.txt: -------------------------------------------------------------------------------- 1 | transformers>=4.25.1 2 | flax 3 | optax 4 | torch 5 | torchvision 6 | ftfy 7 | tensorboard 8 | Jinja2 9 | -------------------------------------------------------------------------------- /examples/research_projects/multi_subject_dreambooth/requirements.txt: -------------------------------------------------------------------------------- 1 | accelerate>=0.16.0 2 | torchvision 3 | transformers>=4.25.1 4 | ftfy 5 | tensorboard 6 | Jinja2 -------------------------------------------------------------------------------- /examples/research_projects/onnxruntime/README.md: -------------------------------------------------------------------------------- 1 | ## Diffusers examples with ONNXRuntime optimizations 2 | 3 | **This research project is not actively maintained by the diffusers team. For any questions or comments, please contact Prathik Rao (prathikr), Sunghoon Choi (hanbitmyths), Ashwini Khade (askhade), or Peng Wang (pengwa) on github with any questions.** 4 | 5 | This aims to provide diffusers examples with ONNXRuntime optimizations for training/fine-tuning unconditional image generation, text to image, and textual inversion. Please see individual directories for more details on how to run each task using ONNXRuntime. -------------------------------------------------------------------------------- /examples/research_projects/onnxruntime/text_to_image/requirements.txt: -------------------------------------------------------------------------------- 1 | accelerate>=0.16.0 2 | torchvision 3 | transformers>=4.25.1 4 | datasets 5 | ftfy 6 | tensorboard 7 | modelcards 8 | -------------------------------------------------------------------------------- /examples/research_projects/onnxruntime/textual_inversion/requirements.txt: -------------------------------------------------------------------------------- 1 | accelerate>=0.16.0 2 | torchvision 3 | transformers>=4.25.1 4 | ftfy 5 | tensorboard 6 | modelcards 7 | -------------------------------------------------------------------------------- /examples/research_projects/onnxruntime/unconditional_image_generation/README.md: -------------------------------------------------------------------------------- 1 | ## Training examples 2 | 3 | Creating a training image set is [described in a different document](https://huggingface.co/docs/datasets/image_process#image-datasets). 4 | 5 | ### Installing the dependencies 6 | 7 | Before running the scripts, make sure to install the library's training dependencies: 8 | 9 | **Important** 10 | 11 | To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: 12 | ```bash 13 | git clone https://github.com/huggingface/diffusers 14 | cd diffusers 15 | pip install . 16 | ``` 17 | 18 | Then cd in the example folder and run 19 | ```bash 20 | pip install -r requirements.txt 21 | ``` 22 | 23 | 24 | And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: 25 | 26 | ```bash 27 | accelerate config 28 | ``` 29 | 30 | #### Use ONNXRuntime to accelerate training 31 | 32 | In order to leverage onnxruntime to accelerate training, please use train_unconditional_ort.py 33 | 34 | The command to train a DDPM UNet model on the Oxford Flowers dataset with onnxruntime: 35 | 36 | ```bash 37 | accelerate launch train_unconditional.py \ 38 | --dataset_name="huggan/flowers-102-categories" \ 39 | --resolution=64 --center_crop --random_flip \ 40 | --output_dir="ddpm-ema-flowers-64" \ 41 | --use_ema \ 42 | --train_batch_size=16 \ 43 | --num_epochs=1 \ 44 | --gradient_accumulation_steps=1 \ 45 | --learning_rate=1e-4 \ 46 | --lr_warmup_steps=500 \ 47 | --mixed_precision=fp16 48 | ``` 49 | 50 | Please contact Prathik Rao (prathikr), Sunghoon Choi (hanbitmyths), Ashwini Khade (askhade), or Peng Wang (pengwa) on github with any questions. 51 | -------------------------------------------------------------------------------- /examples/research_projects/onnxruntime/unconditional_image_generation/requirements.txt: -------------------------------------------------------------------------------- 1 | accelerate>=0.16.0 2 | torchvision 3 | datasets 4 | tensorboard -------------------------------------------------------------------------------- /examples/rl/README.md: -------------------------------------------------------------------------------- 1 | # Overview 2 | 3 | These examples show how to run [Diffuser](https://arxiv.org/abs/2205.09991) in Diffusers. 4 | There are two ways to use the script, `run_diffuser_locomotion.py`. 5 | 6 | The key option is a change of the variable `n_guide_steps`. 7 | When `n_guide_steps=0`, the trajectories are sampled from the diffusion model, but not fine-tuned to maximize reward in the environment. 8 | By default, `n_guide_steps=2` to match the original implementation. 9 | 10 | 11 | You will need some RL specific requirements to run the examples: 12 | 13 | ``` 14 | pip install -f https://download.pytorch.org/whl/torch_stable.html \ 15 | free-mujoco-py \ 16 | einops \ 17 | gym==0.24.1 \ 18 | protobuf==3.20.1 \ 19 | git+https://github.com/rail-berkeley/d4rl.git \ 20 | mediapy \ 21 | Pillow==9.0.0 22 | ``` 23 | -------------------------------------------------------------------------------- /examples/rl/run_diffuser_locomotion.py: -------------------------------------------------------------------------------- 1 | import d4rl # noqa 2 | import gym 3 | import tqdm 4 | from diffusers.experimental import ValueGuidedRLPipeline 5 | 6 | 7 | config = { 8 | "n_samples": 64, 9 | "horizon": 32, 10 | "num_inference_steps": 20, 11 | "n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network 12 | "scale_grad_by_std": True, 13 | "scale": 0.1, 14 | "eta": 0.0, 15 | "t_grad_cutoff": 2, 16 | "device": "cpu", 17 | } 18 | 19 | 20 | if __name__ == "__main__": 21 | env_name = "hopper-medium-v2" 22 | env = gym.make(env_name) 23 | 24 | pipeline = ValueGuidedRLPipeline.from_pretrained( 25 | "bglick13/hopper-medium-v2-value-function-hor32", 26 | env=env, 27 | ) 28 | 29 | env.seed(0) 30 | obs = env.reset() 31 | total_reward = 0 32 | total_score = 0 33 | T = 1000 34 | rollout = [obs.copy()] 35 | try: 36 | for t in tqdm.tqdm(range(T)): 37 | # call the policy 38 | denorm_actions = pipeline(obs, planning_horizon=32) 39 | 40 | # execute action in environment 41 | next_observation, reward, terminal, _ = env.step(denorm_actions) 42 | score = env.get_normalized_score(total_reward) 43 | 44 | # update return 45 | total_reward += reward 46 | total_score += score 47 | print( 48 | f"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:" 49 | f" {total_score}" 50 | ) 51 | 52 | # save observations for rendering 53 | rollout.append(next_observation.copy()) 54 | 55 | obs = next_observation 56 | except KeyboardInterrupt: 57 | pass 58 | 59 | print(f"Total reward: {total_reward}") 60 | -------------------------------------------------------------------------------- /examples/text_to_image/requirements.txt: -------------------------------------------------------------------------------- 1 | accelerate>=0.16.0 2 | torchvision 3 | transformers>=4.25.1 4 | datasets 5 | ftfy 6 | tensorboard 7 | Jinja2 8 | -------------------------------------------------------------------------------- /examples/text_to_image/requirements_flax.txt: -------------------------------------------------------------------------------- 1 | transformers>=4.25.1 2 | datasets 3 | flax 4 | optax 5 | torch 6 | torchvision 7 | ftfy 8 | tensorboard 9 | Jinja2 10 | -------------------------------------------------------------------------------- /examples/textual_inversion/requirements.txt: -------------------------------------------------------------------------------- 1 | accelerate>=0.16.0 2 | torchvision 3 | transformers>=4.25.1 4 | ftfy 5 | tensorboard 6 | Jinja2 7 | -------------------------------------------------------------------------------- /examples/textual_inversion/requirements_flax.txt: -------------------------------------------------------------------------------- 1 | transformers>=4.25.1 2 | flax 3 | optax 4 | torch 5 | torchvision 6 | ftfy 7 | tensorboard 8 | Jinja2 9 | -------------------------------------------------------------------------------- /examples/unconditional_image_generation/requirements.txt: -------------------------------------------------------------------------------- 1 | accelerate>=0.16.0 2 | torchvision 3 | datasets 4 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.black] 2 | line-length = 119 3 | target-version = ['py37'] 4 | 5 | [tool.ruff] 6 | # Never enforce `E501` (line length violations). 7 | ignore = ["C901", "E501", "E741", "W605"] 8 | select = ["C", "E", "F", "I", "W"] 9 | line-length = 119 10 | 11 | # Ignore import violations in all `__init__.py` files. 12 | [tool.ruff.per-file-ignores] 13 | "__init__.py" = ["E402", "F401", "F403", "F811"] 14 | "src/diffusers/utils/dummy_*.py" = ["F401"] 15 | 16 | [tool.ruff.isort] 17 | lines-after-imports = 2 18 | known-first-party = ["diffusers"] 19 | -------------------------------------------------------------------------------- /scripts/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/takuma104/diffusers/f523b11a1023a07d5aaa21a68d69ce6d9b71d36e/scripts/__init__.py -------------------------------------------------------------------------------- /scripts/conversion_ldm_uncond.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | import OmegaConf 4 | import torch 5 | 6 | from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel 7 | 8 | 9 | def convert_ldm_original(checkpoint_path, config_path, output_path): 10 | config = OmegaConf.load(config_path) 11 | state_dict = torch.load(checkpoint_path, map_location="cpu")["model"] 12 | keys = list(state_dict.keys()) 13 | 14 | # extract state_dict for VQVAE 15 | first_stage_dict = {} 16 | first_stage_key = "first_stage_model." 17 | for key in keys: 18 | if key.startswith(first_stage_key): 19 | first_stage_dict[key.replace(first_stage_key, "")] = state_dict[key] 20 | 21 | # extract state_dict for UNetLDM 22 | unet_state_dict = {} 23 | unet_key = "model.diffusion_model." 24 | for key in keys: 25 | if key.startswith(unet_key): 26 | unet_state_dict[key.replace(unet_key, "")] = state_dict[key] 27 | 28 | vqvae_init_args = config.model.params.first_stage_config.params 29 | unet_init_args = config.model.params.unet_config.params 30 | 31 | vqvae = VQModel(**vqvae_init_args).eval() 32 | vqvae.load_state_dict(first_stage_dict) 33 | 34 | unet = UNetLDMModel(**unet_init_args).eval() 35 | unet.load_state_dict(unet_state_dict) 36 | 37 | noise_scheduler = DDIMScheduler( 38 | timesteps=config.model.params.timesteps, 39 | beta_schedule="scaled_linear", 40 | beta_start=config.model.params.linear_start, 41 | beta_end=config.model.params.linear_end, 42 | clip_sample=False, 43 | ) 44 | 45 | pipeline = LDMPipeline(vqvae, unet, noise_scheduler) 46 | pipeline.save_pretrained(output_path) 47 | 48 | 49 | if __name__ == "__main__": 50 | parser = argparse.ArgumentParser() 51 | parser.add_argument("--checkpoint_path", type=str, required=True) 52 | parser.add_argument("--config_path", type=str, required=True) 53 | parser.add_argument("--output_path", type=str, required=True) 54 | args = parser.parse_args() 55 | 56 | convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path) 57 | -------------------------------------------------------------------------------- /scripts/convert_unclip_txt2img_to_image_variation.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection 4 | 5 | from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline 6 | 7 | 8 | if __name__ == "__main__": 9 | parser = argparse.ArgumentParser() 10 | 11 | parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") 12 | 13 | parser.add_argument( 14 | "--txt2img_unclip", 15 | default="kakaobrain/karlo-v1-alpha", 16 | type=str, 17 | required=False, 18 | help="The pretrained txt2img unclip.", 19 | ) 20 | 21 | args = parser.parse_args() 22 | 23 | txt2img = UnCLIPPipeline.from_pretrained(args.txt2img_unclip) 24 | 25 | feature_extractor = CLIPImageProcessor() 26 | image_encoder = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14") 27 | 28 | img2img = UnCLIPImageVariationPipeline( 29 | decoder=txt2img.decoder, 30 | text_encoder=txt2img.text_encoder, 31 | tokenizer=txt2img.tokenizer, 32 | text_proj=txt2img.text_proj, 33 | feature_extractor=feature_extractor, 34 | image_encoder=image_encoder, 35 | super_res_first=txt2img.super_res_first, 36 | super_res_last=txt2img.super_res_last, 37 | decoder_scheduler=txt2img.decoder_scheduler, 38 | super_res_scheduler=txt2img.super_res_scheduler, 39 | ) 40 | 41 | img2img.save_pretrained(args.dump_path) 42 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [isort] 2 | default_section = FIRSTPARTY 3 | ensure_newline_before_comments = True 4 | force_grid_wrap = 0 5 | include_trailing_comma = True 6 | known_first_party = accelerate 7 | known_third_party = 8 | numpy 9 | torch 10 | torch_xla 11 | 12 | line_length = 119 13 | lines_after_imports = 2 14 | multi_line_output = 3 15 | use_parentheses = True 16 | 17 | [flake8] 18 | ignore = E203, E722, E501, E741, W503, W605 19 | max-line-length = 119 20 | per-file-ignores = __init__.py:F401 21 | -------------------------------------------------------------------------------- /src/diffusers/commands/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 The HuggingFace Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from abc import ABC, abstractmethod 16 | from argparse import ArgumentParser 17 | 18 | 19 | class BaseDiffusersCLICommand(ABC): 20 | @staticmethod 21 | @abstractmethod 22 | def register_subcommand(parser: ArgumentParser): 23 | raise NotImplementedError() 24 | 25 | @abstractmethod 26 | def run(self): 27 | raise NotImplementedError() 28 | -------------------------------------------------------------------------------- /src/diffusers/commands/diffusers_cli.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Copyright 2023 The HuggingFace Team. All rights reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | from argparse import ArgumentParser 17 | 18 | from .env import EnvironmentCommand 19 | 20 | 21 | def main(): 22 | parser = ArgumentParser("Diffusers CLI tool", usage="diffusers-cli []") 23 | commands_parser = parser.add_subparsers(help="diffusers-cli command helpers") 24 | 25 | # Register commands 26 | EnvironmentCommand.register_subcommand(commands_parser) 27 | 28 | # Let's go 29 | args = parser.parse_args() 30 | 31 | if not hasattr(args, "func"): 32 | parser.print_help() 33 | exit(1) 34 | 35 | # Run 36 | service = args.func(args) 37 | service.run() 38 | 39 | 40 | if __name__ == "__main__": 41 | main() 42 | -------------------------------------------------------------------------------- /src/diffusers/dependency_versions_check.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 The HuggingFace Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import sys 15 | 16 | from .dependency_versions_table import deps 17 | from .utils.versions import require_version, require_version_core 18 | 19 | 20 | # define which module versions we always want to check at run time 21 | # (usually the ones defined in `install_requires` in setup.py) 22 | # 23 | # order specific notes: 24 | # - tqdm must be checked before tokenizers 25 | 26 | pkgs_to_check_at_runtime = "python tqdm regex requests packaging filelock numpy tokenizers".split() 27 | if sys.version_info < (3, 7): 28 | pkgs_to_check_at_runtime.append("dataclasses") 29 | if sys.version_info < (3, 8): 30 | pkgs_to_check_at_runtime.append("importlib_metadata") 31 | 32 | for pkg in pkgs_to_check_at_runtime: 33 | if pkg in deps: 34 | if pkg == "tokenizers": 35 | # must be loaded here, or else tqdm check may fail 36 | from .utils import is_tokenizers_available 37 | 38 | if not is_tokenizers_available(): 39 | continue # not required, check version only if installed 40 | 41 | require_version_core(deps[pkg]) 42 | else: 43 | raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py") 44 | 45 | 46 | def dep_version_check(pkg, hint=None): 47 | require_version(deps[pkg], hint) 48 | -------------------------------------------------------------------------------- /src/diffusers/dependency_versions_table.py: -------------------------------------------------------------------------------- 1 | # THIS FILE HAS BEEN AUTOGENERATED. To update: 2 | # 1. modify the `_deps` dict in setup.py 3 | # 2. run `make deps_table_update`` 4 | deps = { 5 | "Pillow": "Pillow", 6 | "accelerate": "accelerate>=0.11.0", 7 | "compel": "compel==0.1.8", 8 | "black": "black~=23.1", 9 | "datasets": "datasets", 10 | "filelock": "filelock", 11 | "flax": "flax>=0.4.1", 12 | "hf-doc-builder": "hf-doc-builder>=0.3.0", 13 | "huggingface-hub": "huggingface-hub>=0.13.2", 14 | "requests-mock": "requests-mock==1.10.0", 15 | "importlib_metadata": "importlib_metadata", 16 | "isort": "isort>=5.5.4", 17 | "jax": "jax>=0.2.8,!=0.3.2", 18 | "jaxlib": "jaxlib>=0.1.65", 19 | "Jinja2": "Jinja2", 20 | "k-diffusion": "k-diffusion>=0.0.12", 21 | "librosa": "librosa", 22 | "numpy": "numpy", 23 | "omegaconf": "omegaconf", 24 | "parameterized": "parameterized", 25 | "protobuf": "protobuf>=3.20.3,<4", 26 | "pytest": "pytest", 27 | "pytest-timeout": "pytest-timeout", 28 | "pytest-xdist": "pytest-xdist", 29 | "ruff": "ruff>=0.0.241", 30 | "safetensors": "safetensors", 31 | "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", 32 | "scipy": "scipy", 33 | "regex": "regex!=2019.12.17", 34 | "requests": "requests", 35 | "tensorboard": "tensorboard", 36 | "torch": "torch>=1.4", 37 | "torchvision": "torchvision", 38 | "transformers": "transformers>=4.25.1", 39 | "urllib3": "urllib3<=2.0.0", 40 | } 41 | -------------------------------------------------------------------------------- /src/diffusers/experimental/README.md: -------------------------------------------------------------------------------- 1 | # 🧨 Diffusers Experimental 2 | 3 | We are adding experimental code to support novel applications and usages of the Diffusers library. 4 | Currently, the following experiments are supported: 5 | * Reinforcement learning via an implementation of the [Diffuser](https://arxiv.org/abs/2205.09991) model. -------------------------------------------------------------------------------- /src/diffusers/experimental/__init__.py: -------------------------------------------------------------------------------- 1 | from .rl import ValueGuidedRLPipeline 2 | -------------------------------------------------------------------------------- /src/diffusers/experimental/rl/__init__.py: -------------------------------------------------------------------------------- 1 | from .value_guided_sampling import ValueGuidedRLPipeline 2 | -------------------------------------------------------------------------------- /src/diffusers/models/README.md: -------------------------------------------------------------------------------- 1 | # Models 2 | 3 | For more detail on the models, please refer to the [docs](https://huggingface.co/docs/diffusers/api/models). -------------------------------------------------------------------------------- /src/diffusers/models/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 The HuggingFace Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from ..utils import is_flax_available, is_torch_available 16 | 17 | 18 | if is_torch_available(): 19 | from .autoencoder_kl import AutoencoderKL 20 | from .controlnet import ControlNetModel 21 | from .dual_transformer_2d import DualTransformer2DModel 22 | from .modeling_utils import ModelMixin 23 | from .prior_transformer import PriorTransformer 24 | from .t5_film_transformer import T5FilmDecoder 25 | from .transformer_2d import Transformer2DModel 26 | from .unet_1d import UNet1DModel 27 | from .unet_2d import UNet2DModel 28 | from .unet_2d_condition import UNet2DConditionModel 29 | from .unet_3d_condition import UNet3DConditionModel 30 | from .vq_model import VQModel 31 | 32 | if is_flax_available(): 33 | from .controlnet_flax import FlaxControlNetModel 34 | from .unet_2d_condition_flax import FlaxUNet2DConditionModel 35 | from .vae_flax import FlaxAutoencoderKL 36 | -------------------------------------------------------------------------------- /src/diffusers/models/activations.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | 3 | 4 | def get_activation(act_fn): 5 | if act_fn in ["swish", "silu"]: 6 | return nn.SiLU() 7 | elif act_fn == "mish": 8 | return nn.Mish() 9 | elif act_fn == "gelu": 10 | return nn.GELU() 11 | else: 12 | raise ValueError(f"Unsupported activation function: {act_fn}") 13 | -------------------------------------------------------------------------------- /src/diffusers/pipeline_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 The HuggingFace Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | 14 | # limitations under the License. 15 | 16 | # NOTE: This file is deprecated and will be removed in a future version. 17 | # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works 18 | 19 | from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 20 | from .utils import deprecate 21 | 22 | 23 | deprecate( 24 | "pipelines_utils", 25 | "0.22.0", 26 | "Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.", 27 | standard_warn=False, 28 | stacklevel=3, 29 | ) 30 | -------------------------------------------------------------------------------- /src/diffusers/pipelines/alt_diffusion/__init__.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import List, Optional, Union 3 | 4 | import numpy as np 5 | import PIL 6 | from PIL import Image 7 | 8 | from ...utils import BaseOutput, is_torch_available, is_transformers_available 9 | 10 | 11 | @dataclass 12 | # Copied from diffusers.pipelines.stable_diffusion.__init__.StableDiffusionPipelineOutput with Stable->Alt 13 | class AltDiffusionPipelineOutput(BaseOutput): 14 | """ 15 | Output class for Alt Diffusion pipelines. 16 | 17 | Args: 18 | images (`List[PIL.Image.Image]` or `np.ndarray`) 19 | List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, 20 | num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. 21 | nsfw_content_detected (`List[bool]`) 22 | List of flags denoting whether the corresponding generated image likely represents "not-safe-for-work" 23 | (nsfw) content, or `None` if safety checking could not be performed. 24 | """ 25 | 26 | images: Union[List[PIL.Image.Image], np.ndarray] 27 | nsfw_content_detected: Optional[List[bool]] 28 | 29 | 30 | if is_transformers_available() and is_torch_available(): 31 | from .modeling_roberta_series import RobertaSeriesModelWithTransformation 32 | from .pipeline_alt_diffusion import AltDiffusionPipeline 33 | from .pipeline_alt_diffusion_img2img import AltDiffusionImg2ImgPipeline 34 | -------------------------------------------------------------------------------- /src/diffusers/pipelines/audio_diffusion/__init__.py: -------------------------------------------------------------------------------- 1 | from .mel import Mel 2 | from .pipeline_audio_diffusion import AudioDiffusionPipeline 3 | -------------------------------------------------------------------------------- /src/diffusers/pipelines/audioldm/__init__.py: -------------------------------------------------------------------------------- 1 | from ...utils import ( 2 | OptionalDependencyNotAvailable, 3 | is_torch_available, 4 | is_transformers_available, 5 | is_transformers_version, 6 | ) 7 | 8 | 9 | try: 10 | if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")): 11 | raise OptionalDependencyNotAvailable() 12 | except OptionalDependencyNotAvailable: 13 | from ...utils.dummy_torch_and_transformers_objects import ( 14 | AudioLDMPipeline, 15 | ) 16 | else: 17 | from .pipeline_audioldm import AudioLDMPipeline 18 | -------------------------------------------------------------------------------- /src/diffusers/pipelines/controlnet/__init__.py: -------------------------------------------------------------------------------- 1 | from ...utils import ( 2 | OptionalDependencyNotAvailable, 3 | is_flax_available, 4 | is_torch_available, 5 | is_transformers_available, 6 | ) 7 | 8 | 9 | try: 10 | if not (is_transformers_available() and is_torch_available()): 11 | raise OptionalDependencyNotAvailable() 12 | except OptionalDependencyNotAvailable: 13 | from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 14 | else: 15 | from .multicontrolnet import MultiControlNetModel 16 | from .pipeline_controlnet import StableDiffusionControlNetPipeline 17 | from .pipeline_controlnet_img2img import StableDiffusionControlNetImg2ImgPipeline 18 | from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline 19 | 20 | 21 | if is_transformers_available() and is_flax_available(): 22 | from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline 23 | -------------------------------------------------------------------------------- /src/diffusers/pipelines/dance_diffusion/__init__.py: -------------------------------------------------------------------------------- 1 | from .pipeline_dance_diffusion import DanceDiffusionPipeline 2 | -------------------------------------------------------------------------------- /src/diffusers/pipelines/ddim/__init__.py: -------------------------------------------------------------------------------- 1 | from .pipeline_ddim import DDIMPipeline 2 | -------------------------------------------------------------------------------- /src/diffusers/pipelines/ddpm/__init__.py: -------------------------------------------------------------------------------- 1 | from .pipeline_ddpm import DDPMPipeline 2 | -------------------------------------------------------------------------------- /src/diffusers/pipelines/deepfloyd_if/__init__.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import List, Optional, Union 3 | 4 | import numpy as np 5 | import PIL 6 | 7 | from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available 8 | from .timesteps import ( 9 | fast27_timesteps, 10 | smart27_timesteps, 11 | smart50_timesteps, 12 | smart100_timesteps, 13 | smart185_timesteps, 14 | super27_timesteps, 15 | super40_timesteps, 16 | super100_timesteps, 17 | ) 18 | 19 | 20 | @dataclass 21 | class IFPipelineOutput(BaseOutput): 22 | """ 23 | Args: 24 | Output class for Stable Diffusion pipelines. 25 | images (`List[PIL.Image.Image]` or `np.ndarray`) 26 | List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, 27 | num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. 28 | nsfw_detected (`List[bool]`) 29 | List of flags denoting whether the corresponding generated image likely represents "not-safe-for-work" 30 | (nsfw) content or a watermark. `None` if safety checking could not be performed. 31 | watermark_detected (`List[bool]`) 32 | List of flags denoting whether the corresponding generated image likely has a watermark. `None` if safety 33 | checking could not be performed. 34 | """ 35 | 36 | images: Union[List[PIL.Image.Image], np.ndarray] 37 | nsfw_detected: Optional[List[bool]] 38 | watermark_detected: Optional[List[bool]] 39 | 40 | 41 | try: 42 | if not (is_transformers_available() and is_torch_available()): 43 | raise OptionalDependencyNotAvailable() 44 | except OptionalDependencyNotAvailable: 45 | from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 46 | else: 47 | from .pipeline_if import IFPipeline 48 | from .pipeline_if_img2img import IFImg2ImgPipeline 49 | from .pipeline_if_img2img_superresolution import IFImg2ImgSuperResolutionPipeline 50 | from .pipeline_if_inpainting import IFInpaintingPipeline 51 | from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline 52 | from .pipeline_if_superresolution import IFSuperResolutionPipeline 53 | from .safety_checker import IFSafetyChecker 54 | from .watermark import IFWatermarker 55 | -------------------------------------------------------------------------------- /src/diffusers/pipelines/deepfloyd_if/safety_checker.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import torch.nn as nn 4 | from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel 5 | 6 | from ...utils import logging 7 | 8 | 9 | logger = logging.get_logger(__name__) 10 | 11 | 12 | class IFSafetyChecker(PreTrainedModel): 13 | config_class = CLIPConfig 14 | 15 | _no_split_modules = ["CLIPEncoderLayer"] 16 | 17 | def __init__(self, config: CLIPConfig): 18 | super().__init__(config) 19 | 20 | self.vision_model = CLIPVisionModelWithProjection(config.vision_config) 21 | 22 | self.p_head = nn.Linear(config.vision_config.projection_dim, 1) 23 | self.w_head = nn.Linear(config.vision_config.projection_dim, 1) 24 | 25 | @torch.no_grad() 26 | def forward(self, clip_input, images, p_threshold=0.5, w_threshold=0.5): 27 | image_embeds = self.vision_model(clip_input)[0] 28 | 29 | nsfw_detected = self.p_head(image_embeds) 30 | nsfw_detected = nsfw_detected.flatten() 31 | nsfw_detected = nsfw_detected > p_threshold 32 | nsfw_detected = nsfw_detected.tolist() 33 | 34 | if any(nsfw_detected): 35 | logger.warning( 36 | "Potential NSFW content was detected in one or more images. A black image will be returned instead." 37 | " Try again with a different prompt and/or seed." 38 | ) 39 | 40 | for idx, nsfw_detected_ in enumerate(nsfw_detected): 41 | if nsfw_detected_: 42 | images[idx] = np.zeros(images[idx].shape) 43 | 44 | watermark_detected = self.w_head(image_embeds) 45 | watermark_detected = watermark_detected.flatten() 46 | watermark_detected = watermark_detected > w_threshold 47 | watermark_detected = watermark_detected.tolist() 48 | 49 | if any(watermark_detected): 50 | logger.warning( 51 | "Potential watermarked content was detected in one or more images. A black image will be returned instead." 52 | " Try again with a different prompt and/or seed." 53 | ) 54 | 55 | for idx, watermark_detected_ in enumerate(watermark_detected): 56 | if watermark_detected_: 57 | images[idx] = np.zeros(images[idx].shape) 58 | 59 | return images, nsfw_detected, watermark_detected 60 | -------------------------------------------------------------------------------- /src/diffusers/pipelines/deepfloyd_if/watermark.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | import PIL 4 | import torch 5 | from PIL import Image 6 | 7 | from ...configuration_utils import ConfigMixin 8 | from ...models.modeling_utils import ModelMixin 9 | from ...utils import PIL_INTERPOLATION 10 | 11 | 12 | class IFWatermarker(ModelMixin, ConfigMixin): 13 | def __init__(self): 14 | super().__init__() 15 | 16 | self.register_buffer("watermark_image", torch.zeros((62, 62, 4))) 17 | self.watermark_image_as_pil = None 18 | 19 | def apply_watermark(self, images: List[PIL.Image.Image], sample_size=None): 20 | # copied from https://github.com/deep-floyd/IF/blob/b77482e36ca2031cb94dbca1001fc1e6400bf4ab/deepfloyd_if/modules/base.py#L287 21 | 22 | h = images[0].height 23 | w = images[0].width 24 | 25 | sample_size = sample_size or h 26 | 27 | coef = min(h / sample_size, w / sample_size) 28 | img_h, img_w = (int(h / coef), int(w / coef)) if coef < 1 else (h, w) 29 | 30 | S1, S2 = 1024**2, img_w * img_h 31 | K = (S2 / S1) ** 0.5 32 | wm_size, wm_x, wm_y = int(K * 62), img_w - int(14 * K), img_h - int(14 * K) 33 | 34 | if self.watermark_image_as_pil is None: 35 | watermark_image = self.watermark_image.to(torch.uint8).cpu().numpy() 36 | watermark_image = Image.fromarray(watermark_image, mode="RGBA") 37 | self.watermark_image_as_pil = watermark_image 38 | 39 | wm_img = self.watermark_image_as_pil.resize( 40 | (wm_size, wm_size), PIL_INTERPOLATION["bicubic"], reducing_gap=None 41 | ) 42 | 43 | for pil_img in images: 44 | pil_img.paste(wm_img, box=(wm_x - wm_size, wm_y - wm_size, wm_x, wm_y), mask=wm_img.split()[-1]) 45 | 46 | return images 47 | -------------------------------------------------------------------------------- /src/diffusers/pipelines/dit/__init__.py: -------------------------------------------------------------------------------- 1 | from .pipeline_dit import DiTPipeline 2 | -------------------------------------------------------------------------------- /src/diffusers/pipelines/kandinsky/__init__.py: -------------------------------------------------------------------------------- 1 | from ...utils import ( 2 | OptionalDependencyNotAvailable, 3 | is_torch_available, 4 | is_transformers_available, 5 | is_transformers_version, 6 | ) 7 | 8 | 9 | try: 10 | if not (is_transformers_available() and is_torch_available()): 11 | raise OptionalDependencyNotAvailable() 12 | except OptionalDependencyNotAvailable: 13 | from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline 14 | else: 15 | from .pipeline_kandinsky import KandinskyPipeline 16 | from .pipeline_kandinsky_img2img import KandinskyImg2ImgPipeline 17 | from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline 18 | from .pipeline_kandinsky_prior import KandinskyPriorPipeline 19 | from .text_encoder import MultilingualCLIP 20 | -------------------------------------------------------------------------------- /src/diffusers/pipelines/kandinsky/text_encoder.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel 3 | 4 | 5 | class MCLIPConfig(XLMRobertaConfig): 6 | model_type = "M-CLIP" 7 | 8 | def __init__(self, transformerDimSize=1024, imageDimSize=768, **kwargs): 9 | self.transformerDimensions = transformerDimSize 10 | self.numDims = imageDimSize 11 | super().__init__(**kwargs) 12 | 13 | 14 | class MultilingualCLIP(PreTrainedModel): 15 | config_class = MCLIPConfig 16 | 17 | def __init__(self, config, *args, **kwargs): 18 | super().__init__(config, *args, **kwargs) 19 | self.transformer = XLMRobertaModel(config) 20 | self.LinearTransformation = torch.nn.Linear( 21 | in_features=config.transformerDimensions, out_features=config.numDims 22 | ) 23 | 24 | def forward(self, input_ids, attention_mask): 25 | embs = self.transformer(input_ids=input_ids, attention_mask=attention_mask)[0] 26 | embs2 = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None] 27 | return self.LinearTransformation(embs2), embs 28 | -------------------------------------------------------------------------------- /src/diffusers/pipelines/latent_diffusion/__init__.py: -------------------------------------------------------------------------------- 1 | from ...utils import is_transformers_available 2 | from .pipeline_latent_diffusion_superresolution import LDMSuperResolutionPipeline 3 | 4 | 5 | if is_transformers_available(): 6 | from .pipeline_latent_diffusion import LDMBertModel, LDMTextToImagePipeline 7 | -------------------------------------------------------------------------------- /src/diffusers/pipelines/latent_diffusion_uncond/__init__.py: -------------------------------------------------------------------------------- 1 | from .pipeline_latent_diffusion_uncond import LDMPipeline 2 | -------------------------------------------------------------------------------- /src/diffusers/pipelines/paint_by_example/__init__.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import List, Optional, Union 3 | 4 | import numpy as np 5 | import PIL 6 | from PIL import Image 7 | 8 | from ...utils import is_torch_available, is_transformers_available 9 | 10 | 11 | if is_transformers_available() and is_torch_available(): 12 | from .image_encoder import PaintByExampleImageEncoder 13 | from .pipeline_paint_by_example import PaintByExamplePipeline 14 | -------------------------------------------------------------------------------- /src/diffusers/pipelines/pndm/__init__.py: -------------------------------------------------------------------------------- 1 | from .pipeline_pndm import PNDMPipeline 2 | -------------------------------------------------------------------------------- /src/diffusers/pipelines/repaint/__init__.py: -------------------------------------------------------------------------------- 1 | from .pipeline_repaint import RePaintPipeline 2 | -------------------------------------------------------------------------------- /src/diffusers/pipelines/score_sde_ve/__init__.py: -------------------------------------------------------------------------------- 1 | from .pipeline_score_sde_ve import ScoreSdeVePipeline 2 | -------------------------------------------------------------------------------- /src/diffusers/pipelines/semantic_stable_diffusion/__init__.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from enum import Enum 3 | from typing import List, Optional, Union 4 | 5 | import numpy as np 6 | import PIL 7 | from PIL import Image 8 | 9 | from ...utils import BaseOutput, is_torch_available, is_transformers_available 10 | 11 | 12 | @dataclass 13 | class SemanticStableDiffusionPipelineOutput(BaseOutput): 14 | """ 15 | Output class for Stable Diffusion pipelines. 16 | 17 | Args: 18 | images (`List[PIL.Image.Image]` or `np.ndarray`) 19 | List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, 20 | num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. 21 | nsfw_content_detected (`List[bool]`) 22 | List of flags denoting whether the corresponding generated image likely represents "not-safe-for-work" 23 | (nsfw) content, or `None` if safety checking could not be performed. 24 | """ 25 | 26 | images: Union[List[PIL.Image.Image], np.ndarray] 27 | nsfw_content_detected: Optional[List[bool]] 28 | 29 | 30 | if is_transformers_available() and is_torch_available(): 31 | from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline 32 | -------------------------------------------------------------------------------- /src/diffusers/pipelines/spectrogram_diffusion/__init__.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa 2 | from ...utils import is_note_seq_available, is_transformers_available, is_torch_available 3 | from ...utils import OptionalDependencyNotAvailable 4 | 5 | 6 | try: 7 | if not (is_transformers_available() and is_torch_available()): 8 | raise OptionalDependencyNotAvailable() 9 | except OptionalDependencyNotAvailable: 10 | from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 11 | else: 12 | from .notes_encoder import SpectrogramNotesEncoder 13 | from .continous_encoder import SpectrogramContEncoder 14 | from .pipeline_spectrogram_diffusion import ( 15 | SpectrogramContEncoder, 16 | SpectrogramDiffusionPipeline, 17 | T5FilmDecoder, 18 | ) 19 | 20 | try: 21 | if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): 22 | raise OptionalDependencyNotAvailable() 23 | except OptionalDependencyNotAvailable: 24 | from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 25 | else: 26 | from .midi_utils import MidiProcessor 27 | -------------------------------------------------------------------------------- /src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_controlnet.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 The HuggingFace Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # NOTE: This file is deprecated and will be removed in a future version. 16 | # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works 17 | 18 | from ...utils import deprecate 19 | from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401 20 | 21 | 22 | deprecate( 23 | "stable diffusion controlnet", 24 | "0.22.0", 25 | "Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.", 26 | standard_warn=False, 27 | stacklevel=3, 28 | ) 29 | -------------------------------------------------------------------------------- /src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_controlnet.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 The HuggingFace Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # NOTE: This file is deprecated and will be removed in a future version. 16 | # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works 17 | from ...utils import deprecate 18 | from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 19 | from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 20 | 21 | 22 | deprecate( 23 | "stable diffusion controlnet", 24 | "0.22.0", 25 | "Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.", 26 | standard_warn=False, 27 | stacklevel=3, 28 | ) 29 | -------------------------------------------------------------------------------- /src/diffusers/pipelines/stable_diffusion/stable_unclip_image_normalizer.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 The HuggingFace Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from typing import Optional, Union 16 | 17 | import torch 18 | from torch import nn 19 | 20 | from ...configuration_utils import ConfigMixin, register_to_config 21 | from ...models.modeling_utils import ModelMixin 22 | 23 | 24 | class StableUnCLIPImageNormalizer(ModelMixin, ConfigMixin): 25 | """ 26 | This class is used to hold the mean and standard deviation of the CLIP embedder used in stable unCLIP. 27 | 28 | It is used to normalize the image embeddings before the noise is applied and un-normalize the noised image 29 | embeddings. 30 | """ 31 | 32 | @register_to_config 33 | def __init__( 34 | self, 35 | embedding_dim: int = 768, 36 | ): 37 | super().__init__() 38 | 39 | self.mean = nn.Parameter(torch.zeros(1, embedding_dim)) 40 | self.std = nn.Parameter(torch.ones(1, embedding_dim)) 41 | 42 | def to( 43 | self, 44 | torch_device: Optional[Union[str, torch.device]] = None, 45 | torch_dtype: Optional[torch.dtype] = None, 46 | ): 47 | self.mean = nn.Parameter(self.mean.to(torch_device).to(torch_dtype)) 48 | self.std = nn.Parameter(self.std.to(torch_device).to(torch_dtype)) 49 | return self 50 | 51 | def scale(self, embeds): 52 | embeds = (embeds - self.mean) * 1.0 / self.std 53 | return embeds 54 | 55 | def unscale(self, embeds): 56 | embeds = (embeds * self.std) + self.mean 57 | return embeds 58 | -------------------------------------------------------------------------------- /src/diffusers/pipelines/stochastic_karras_ve/__init__.py: -------------------------------------------------------------------------------- 1 | from .pipeline_stochastic_karras_ve import KarrasVePipeline 2 | -------------------------------------------------------------------------------- /src/diffusers/pipelines/text_to_video_synthesis/__init__.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import List, Optional, Union 3 | 4 | import numpy as np 5 | import torch 6 | 7 | from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available 8 | 9 | 10 | @dataclass 11 | class TextToVideoSDPipelineOutput(BaseOutput): 12 | """ 13 | Output class for text to video pipelines. 14 | 15 | Args: 16 | frames (`List[np.ndarray]` or `torch.FloatTensor`) 17 | List of denoised frames (essentially images) as NumPy arrays of shape `(height, width, num_channels)` or as 18 | a `torch` tensor. NumPy array present the denoised images of the diffusion pipeline. The length of the list 19 | denotes the video length i.e., the number of frames. 20 | """ 21 | 22 | frames: Union[List[np.ndarray], torch.FloatTensor] 23 | 24 | 25 | try: 26 | if not (is_transformers_available() and is_torch_available()): 27 | raise OptionalDependencyNotAvailable() 28 | except OptionalDependencyNotAvailable: 29 | from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 30 | else: 31 | from .pipeline_text_to_video_synth import TextToVideoSDPipeline # noqa: F401 32 | from .pipeline_text_to_video_zero import TextToVideoZeroPipeline 33 | -------------------------------------------------------------------------------- /src/diffusers/pipelines/unclip/__init__.py: -------------------------------------------------------------------------------- 1 | from ...utils import ( 2 | OptionalDependencyNotAvailable, 3 | is_torch_available, 4 | is_transformers_available, 5 | is_transformers_version, 6 | ) 7 | 8 | 9 | try: 10 | if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): 11 | raise OptionalDependencyNotAvailable() 12 | except OptionalDependencyNotAvailable: 13 | from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline 14 | else: 15 | from .pipeline_unclip import UnCLIPPipeline 16 | from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline 17 | from .text_proj import UnCLIPTextProjModel 18 | -------------------------------------------------------------------------------- /src/diffusers/pipelines/unidiffuser/__init__.py: -------------------------------------------------------------------------------- 1 | from ...utils import ( 2 | OptionalDependencyNotAvailable, 3 | is_torch_available, 4 | is_transformers_available, 5 | is_transformers_version, 6 | ) 7 | 8 | 9 | try: 10 | if not (is_transformers_available() and is_torch_available()): 11 | raise OptionalDependencyNotAvailable() 12 | except OptionalDependencyNotAvailable: 13 | from ...utils.dummy_torch_and_transformers_objects import ( 14 | ImageTextPipelineOutput, 15 | UniDiffuserPipeline, 16 | ) 17 | else: 18 | from .modeling_text_decoder import UniDiffuserTextDecoder 19 | from .modeling_uvit import UniDiffuserModel, UTransformer2DModel 20 | from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline 21 | -------------------------------------------------------------------------------- /src/diffusers/pipelines/versatile_diffusion/__init__.py: -------------------------------------------------------------------------------- 1 | from ...utils import ( 2 | OptionalDependencyNotAvailable, 3 | is_torch_available, 4 | is_transformers_available, 5 | is_transformers_version, 6 | ) 7 | 8 | 9 | try: 10 | if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): 11 | raise OptionalDependencyNotAvailable() 12 | except OptionalDependencyNotAvailable: 13 | from ...utils.dummy_torch_and_transformers_objects import ( 14 | VersatileDiffusionDualGuidedPipeline, 15 | VersatileDiffusionImageVariationPipeline, 16 | VersatileDiffusionPipeline, 17 | VersatileDiffusionTextToImagePipeline, 18 | ) 19 | else: 20 | from .modeling_text_unet import UNetFlatConditionModel 21 | from .pipeline_versatile_diffusion import VersatileDiffusionPipeline 22 | from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline 23 | from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline 24 | from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline 25 | -------------------------------------------------------------------------------- /src/diffusers/pipelines/vq_diffusion/__init__.py: -------------------------------------------------------------------------------- 1 | from ...utils import is_torch_available, is_transformers_available 2 | 3 | 4 | if is_transformers_available() and is_torch_available(): 5 | from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline 6 | -------------------------------------------------------------------------------- /src/diffusers/schedulers/README.md: -------------------------------------------------------------------------------- 1 | # Schedulers 2 | 3 | For more information on the schedulers, please refer to the [docs](https://huggingface.co/docs/diffusers/api/schedulers/overview). -------------------------------------------------------------------------------- /src/diffusers/utils/accelerate_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 The HuggingFace Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | """ 15 | Accelerate utilities: Utilities related to accelerate 16 | """ 17 | 18 | from packaging import version 19 | 20 | from .import_utils import is_accelerate_available 21 | 22 | 23 | if is_accelerate_available(): 24 | import accelerate 25 | 26 | 27 | def apply_forward_hook(method): 28 | """ 29 | Decorator that applies a registered CpuOffload hook to an arbitrary function rather than `forward`. This is useful 30 | for cases where a PyTorch module provides functions other than `forward` that should trigger a move to the 31 | appropriate acceleration device. This is the case for `encode` and `decode` in [`AutoencoderKL`]. 32 | 33 | This decorator looks inside the internal `_hf_hook` property to find a registered offload hook. 34 | 35 | :param method: The method to decorate. This method should be a method of a PyTorch module. 36 | """ 37 | if not is_accelerate_available(): 38 | return method 39 | accelerate_version = version.parse(accelerate.__version__).base_version 40 | if version.parse(accelerate_version) < version.parse("0.17.0"): 41 | return method 42 | 43 | def wrapper(self, *args, **kwargs): 44 | if hasattr(self, "_hf_hook") and hasattr(self._hf_hook, "pre_forward"): 45 | self._hf_hook.pre_forward(self) 46 | return method(self, *args, **kwargs) 47 | 48 | return wrapper 49 | -------------------------------------------------------------------------------- /src/diffusers/utils/constants.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | 16 | from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home 17 | 18 | 19 | default_cache_path = HUGGINGFACE_HUB_CACHE 20 | 21 | 22 | CONFIG_NAME = "config.json" 23 | WEIGHTS_NAME = "diffusion_pytorch_model.bin" 24 | FLAX_WEIGHTS_NAME = "diffusion_flax_model.msgpack" 25 | ONNX_WEIGHTS_NAME = "model.onnx" 26 | SAFETENSORS_WEIGHTS_NAME = "diffusion_pytorch_model.safetensors" 27 | ONNX_EXTERNAL_WEIGHTS_NAME = "weights.pb" 28 | HUGGINGFACE_CO_RESOLVE_ENDPOINT = "https://huggingface.co" 29 | DIFFUSERS_CACHE = default_cache_path 30 | DIFFUSERS_DYNAMIC_MODULE_NAME = "diffusers_modules" 31 | HF_MODULES_CACHE = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules")) 32 | DEPRECATED_REVISION_ARGS = ["fp16", "non-ema"] 33 | TEXT_ENCODER_ATTN_MODULE = ".self_attn" 34 | -------------------------------------------------------------------------------- /src/diffusers/utils/deprecation_utils.py: -------------------------------------------------------------------------------- 1 | import inspect 2 | import warnings 3 | from typing import Any, Dict, Optional, Union 4 | 5 | from packaging import version 6 | 7 | 8 | def deprecate(*args, take_from: Optional[Union[Dict, Any]] = None, standard_warn=True, stacklevel=2): 9 | from .. import __version__ 10 | 11 | deprecated_kwargs = take_from 12 | values = () 13 | if not isinstance(args[0], tuple): 14 | args = (args,) 15 | 16 | for attribute, version_name, message in args: 17 | if version.parse(version.parse(__version__).base_version) >= version.parse(version_name): 18 | raise ValueError( 19 | f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'" 20 | f" version {__version__} is >= {version_name}" 21 | ) 22 | 23 | warning = None 24 | if isinstance(deprecated_kwargs, dict) and attribute in deprecated_kwargs: 25 | values += (deprecated_kwargs.pop(attribute),) 26 | warning = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}." 27 | elif hasattr(deprecated_kwargs, attribute): 28 | values += (getattr(deprecated_kwargs, attribute),) 29 | warning = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}." 30 | elif deprecated_kwargs is None: 31 | warning = f"`{attribute}` is deprecated and will be removed in version {version_name}." 32 | 33 | if warning is not None: 34 | warning = warning + " " if standard_warn else "" 35 | warnings.warn(warning + message, FutureWarning, stacklevel=stacklevel) 36 | 37 | if isinstance(deprecated_kwargs, dict) and len(deprecated_kwargs) > 0: 38 | call_frame = inspect.getouterframes(inspect.currentframe())[1] 39 | filename = call_frame.filename 40 | line_number = call_frame.lineno 41 | function = call_frame.function 42 | key, value = next(iter(deprecated_kwargs.items())) 43 | raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`") 44 | 45 | if len(values) == 0: 46 | return 47 | elif len(values) == 1: 48 | return values[0] 49 | return values 50 | -------------------------------------------------------------------------------- /src/diffusers/utils/doc_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 The HuggingFace Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | """ 15 | Doc utilities: Utilities related to documentation 16 | """ 17 | import re 18 | 19 | 20 | def replace_example_docstring(example_docstring): 21 | def docstring_decorator(fn): 22 | func_doc = fn.__doc__ 23 | lines = func_doc.split("\n") 24 | i = 0 25 | while i < len(lines) and re.search(r"^\s*Examples?:\s*$", lines[i]) is None: 26 | i += 1 27 | if i < len(lines): 28 | lines[i] = example_docstring 29 | func_doc = "\n".join(lines) 30 | else: 31 | raise ValueError( 32 | f"The function {fn} should have an empty 'Examples:' in its docstring as placeholder, " 33 | f"current docstring is:\n{func_doc}" 34 | ) 35 | fn.__doc__ = func_doc 36 | return fn 37 | 38 | return docstring_decorator 39 | -------------------------------------------------------------------------------- /src/diffusers/utils/dummy_flax_and_transformers_objects.py: -------------------------------------------------------------------------------- 1 | # This file is autogenerated by the command `make fix-copies`, do not edit. 2 | from ..utils import DummyObject, requires_backends 3 | 4 | 5 | class FlaxStableDiffusionControlNetPipeline(metaclass=DummyObject): 6 | _backends = ["flax", "transformers"] 7 | 8 | def __init__(self, *args, **kwargs): 9 | requires_backends(self, ["flax", "transformers"]) 10 | 11 | @classmethod 12 | def from_config(cls, *args, **kwargs): 13 | requires_backends(cls, ["flax", "transformers"]) 14 | 15 | @classmethod 16 | def from_pretrained(cls, *args, **kwargs): 17 | requires_backends(cls, ["flax", "transformers"]) 18 | 19 | 20 | class FlaxStableDiffusionImg2ImgPipeline(metaclass=DummyObject): 21 | _backends = ["flax", "transformers"] 22 | 23 | def __init__(self, *args, **kwargs): 24 | requires_backends(self, ["flax", "transformers"]) 25 | 26 | @classmethod 27 | def from_config(cls, *args, **kwargs): 28 | requires_backends(cls, ["flax", "transformers"]) 29 | 30 | @classmethod 31 | def from_pretrained(cls, *args, **kwargs): 32 | requires_backends(cls, ["flax", "transformers"]) 33 | 34 | 35 | class FlaxStableDiffusionInpaintPipeline(metaclass=DummyObject): 36 | _backends = ["flax", "transformers"] 37 | 38 | def __init__(self, *args, **kwargs): 39 | requires_backends(self, ["flax", "transformers"]) 40 | 41 | @classmethod 42 | def from_config(cls, *args, **kwargs): 43 | requires_backends(cls, ["flax", "transformers"]) 44 | 45 | @classmethod 46 | def from_pretrained(cls, *args, **kwargs): 47 | requires_backends(cls, ["flax", "transformers"]) 48 | 49 | 50 | class FlaxStableDiffusionPipeline(metaclass=DummyObject): 51 | _backends = ["flax", "transformers"] 52 | 53 | def __init__(self, *args, **kwargs): 54 | requires_backends(self, ["flax", "transformers"]) 55 | 56 | @classmethod 57 | def from_config(cls, *args, **kwargs): 58 | requires_backends(cls, ["flax", "transformers"]) 59 | 60 | @classmethod 61 | def from_pretrained(cls, *args, **kwargs): 62 | requires_backends(cls, ["flax", "transformers"]) 63 | -------------------------------------------------------------------------------- /src/diffusers/utils/dummy_note_seq_objects.py: -------------------------------------------------------------------------------- 1 | # This file is autogenerated by the command `make fix-copies`, do not edit. 2 | from ..utils import DummyObject, requires_backends 3 | 4 | 5 | class MidiProcessor(metaclass=DummyObject): 6 | _backends = ["note_seq"] 7 | 8 | def __init__(self, *args, **kwargs): 9 | requires_backends(self, ["note_seq"]) 10 | 11 | @classmethod 12 | def from_config(cls, *args, **kwargs): 13 | requires_backends(cls, ["note_seq"]) 14 | 15 | @classmethod 16 | def from_pretrained(cls, *args, **kwargs): 17 | requires_backends(cls, ["note_seq"]) 18 | -------------------------------------------------------------------------------- /src/diffusers/utils/dummy_onnx_objects.py: -------------------------------------------------------------------------------- 1 | # This file is autogenerated by the command `make fix-copies`, do not edit. 2 | from ..utils import DummyObject, requires_backends 3 | 4 | 5 | class OnnxRuntimeModel(metaclass=DummyObject): 6 | _backends = ["onnx"] 7 | 8 | def __init__(self, *args, **kwargs): 9 | requires_backends(self, ["onnx"]) 10 | 11 | @classmethod 12 | def from_config(cls, *args, **kwargs): 13 | requires_backends(cls, ["onnx"]) 14 | 15 | @classmethod 16 | def from_pretrained(cls, *args, **kwargs): 17 | requires_backends(cls, ["onnx"]) 18 | -------------------------------------------------------------------------------- /src/diffusers/utils/dummy_torch_and_librosa_objects.py: -------------------------------------------------------------------------------- 1 | # This file is autogenerated by the command `make fix-copies`, do not edit. 2 | from ..utils import DummyObject, requires_backends 3 | 4 | 5 | class AudioDiffusionPipeline(metaclass=DummyObject): 6 | _backends = ["torch", "librosa"] 7 | 8 | def __init__(self, *args, **kwargs): 9 | requires_backends(self, ["torch", "librosa"]) 10 | 11 | @classmethod 12 | def from_config(cls, *args, **kwargs): 13 | requires_backends(cls, ["torch", "librosa"]) 14 | 15 | @classmethod 16 | def from_pretrained(cls, *args, **kwargs): 17 | requires_backends(cls, ["torch", "librosa"]) 18 | 19 | 20 | class Mel(metaclass=DummyObject): 21 | _backends = ["torch", "librosa"] 22 | 23 | def __init__(self, *args, **kwargs): 24 | requires_backends(self, ["torch", "librosa"]) 25 | 26 | @classmethod 27 | def from_config(cls, *args, **kwargs): 28 | requires_backends(cls, ["torch", "librosa"]) 29 | 30 | @classmethod 31 | def from_pretrained(cls, *args, **kwargs): 32 | requires_backends(cls, ["torch", "librosa"]) 33 | -------------------------------------------------------------------------------- /src/diffusers/utils/dummy_torch_and_scipy_objects.py: -------------------------------------------------------------------------------- 1 | # This file is autogenerated by the command `make fix-copies`, do not edit. 2 | from ..utils import DummyObject, requires_backends 3 | 4 | 5 | class LMSDiscreteScheduler(metaclass=DummyObject): 6 | _backends = ["torch", "scipy"] 7 | 8 | def __init__(self, *args, **kwargs): 9 | requires_backends(self, ["torch", "scipy"]) 10 | 11 | @classmethod 12 | def from_config(cls, *args, **kwargs): 13 | requires_backends(cls, ["torch", "scipy"]) 14 | 15 | @classmethod 16 | def from_pretrained(cls, *args, **kwargs): 17 | requires_backends(cls, ["torch", "scipy"]) 18 | -------------------------------------------------------------------------------- /src/diffusers/utils/dummy_torch_and_torchsde_objects.py: -------------------------------------------------------------------------------- 1 | # This file is autogenerated by the command `make fix-copies`, do not edit. 2 | from ..utils import DummyObject, requires_backends 3 | 4 | 5 | class DPMSolverSDEScheduler(metaclass=DummyObject): 6 | _backends = ["torch", "torchsde"] 7 | 8 | def __init__(self, *args, **kwargs): 9 | requires_backends(self, ["torch", "torchsde"]) 10 | 11 | @classmethod 12 | def from_config(cls, *args, **kwargs): 13 | requires_backends(cls, ["torch", "torchsde"]) 14 | 15 | @classmethod 16 | def from_pretrained(cls, *args, **kwargs): 17 | requires_backends(cls, ["torch", "torchsde"]) 18 | -------------------------------------------------------------------------------- /src/diffusers/utils/dummy_torch_and_transformers_and_k_diffusion_objects.py: -------------------------------------------------------------------------------- 1 | # This file is autogenerated by the command `make fix-copies`, do not edit. 2 | from ..utils import DummyObject, requires_backends 3 | 4 | 5 | class StableDiffusionKDiffusionPipeline(metaclass=DummyObject): 6 | _backends = ["torch", "transformers", "k_diffusion"] 7 | 8 | def __init__(self, *args, **kwargs): 9 | requires_backends(self, ["torch", "transformers", "k_diffusion"]) 10 | 11 | @classmethod 12 | def from_config(cls, *args, **kwargs): 13 | requires_backends(cls, ["torch", "transformers", "k_diffusion"]) 14 | 15 | @classmethod 16 | def from_pretrained(cls, *args, **kwargs): 17 | requires_backends(cls, ["torch", "transformers", "k_diffusion"]) 18 | -------------------------------------------------------------------------------- /src/diffusers/utils/dummy_transformers_and_torch_and_note_seq_objects.py: -------------------------------------------------------------------------------- 1 | # This file is autogenerated by the command `make fix-copies`, do not edit. 2 | from ..utils import DummyObject, requires_backends 3 | 4 | 5 | class SpectrogramDiffusionPipeline(metaclass=DummyObject): 6 | _backends = ["transformers", "torch", "note_seq"] 7 | 8 | def __init__(self, *args, **kwargs): 9 | requires_backends(self, ["transformers", "torch", "note_seq"]) 10 | 11 | @classmethod 12 | def from_config(cls, *args, **kwargs): 13 | requires_backends(cls, ["transformers", "torch", "note_seq"]) 14 | 15 | @classmethod 16 | def from_pretrained(cls, *args, **kwargs): 17 | requires_backends(cls, ["transformers", "torch", "note_seq"]) 18 | -------------------------------------------------------------------------------- /src/diffusers/utils/model_card_template.md: -------------------------------------------------------------------------------- 1 | --- 2 | {{ card_data }} 3 | --- 4 | 5 | 7 | 8 | # {{ model_name | default("Diffusion Model") }} 9 | 10 | ## Model description 11 | 12 | This diffusion model is trained with the [🤗 Diffusers](https://github.com/huggingface/diffusers) library 13 | on the `{{ dataset_name }}` dataset. 14 | 15 | ## Intended uses & limitations 16 | 17 | #### How to use 18 | 19 | ```python 20 | # TODO: add an example code snippet for running this diffusion pipeline 21 | ``` 22 | 23 | #### Limitations and bias 24 | 25 | [TODO: provide examples of latent issues and potential remediations] 26 | 27 | ## Training data 28 | 29 | [TODO: describe the data used to train the model] 30 | 31 | ### Training hyperparameters 32 | 33 | The following hyperparameters were used during training: 34 | - learning_rate: {{ learning_rate }} 35 | - train_batch_size: {{ train_batch_size }} 36 | - eval_batch_size: {{ eval_batch_size }} 37 | - gradient_accumulation_steps: {{ gradient_accumulation_steps }} 38 | - optimizer: AdamW with betas=({{ adam_beta1 }}, {{ adam_beta2 }}), weight_decay={{ adam_weight_decay }} and epsilon={{ adam_epsilon }} 39 | - lr_scheduler: {{ lr_scheduler }} 40 | - lr_warmup_steps: {{ lr_warmup_steps }} 41 | - ema_inv_gamma: {{ ema_inv_gamma }} 42 | - ema_inv_gamma: {{ ema_power }} 43 | - ema_inv_gamma: {{ ema_max_decay }} 44 | - mixed_precision: {{ mixed_precision }} 45 | 46 | ### Training results 47 | 48 | 📈 [TensorBoard logs](https://huggingface.co/{{ repo_name }}/tensorboard?#scalars) 49 | 50 | 51 | -------------------------------------------------------------------------------- /src/diffusers/utils/pil_utils.py: -------------------------------------------------------------------------------- 1 | import PIL.Image 2 | import PIL.ImageOps 3 | from packaging import version 4 | from PIL import Image 5 | 6 | 7 | if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): 8 | PIL_INTERPOLATION = { 9 | "linear": PIL.Image.Resampling.BILINEAR, 10 | "bilinear": PIL.Image.Resampling.BILINEAR, 11 | "bicubic": PIL.Image.Resampling.BICUBIC, 12 | "lanczos": PIL.Image.Resampling.LANCZOS, 13 | "nearest": PIL.Image.Resampling.NEAREST, 14 | } 15 | else: 16 | PIL_INTERPOLATION = { 17 | "linear": PIL.Image.LINEAR, 18 | "bilinear": PIL.Image.BILINEAR, 19 | "bicubic": PIL.Image.BICUBIC, 20 | "lanczos": PIL.Image.LANCZOS, 21 | "nearest": PIL.Image.NEAREST, 22 | } 23 | 24 | 25 | def pt_to_pil(images): 26 | """ 27 | Convert a torch image to a PIL image. 28 | """ 29 | images = (images / 2 + 0.5).clamp(0, 1) 30 | images = images.cpu().permute(0, 2, 3, 1).float().numpy() 31 | images = numpy_to_pil(images) 32 | return images 33 | 34 | 35 | def numpy_to_pil(images): 36 | """ 37 | Convert a numpy image or a batch of images to a PIL image. 38 | """ 39 | if images.ndim == 3: 40 | images = images[None, ...] 41 | images = (images * 255).round().astype("uint8") 42 | if images.shape[-1] == 1: 43 | # special case for grayscale (single channel) images 44 | pil_images = [Image.fromarray(image.squeeze(), mode="L") for image in images] 45 | else: 46 | pil_images = [Image.fromarray(image) for image in images] 47 | 48 | return pil_images 49 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/takuma104/diffusers/f523b11a1023a07d5aaa21a68d69ce6d9b71d36e/tests/__init__.py -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 The HuggingFace Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # tests directory-specific settings - this file is run automatically 16 | # by pytest before any tests are run 17 | 18 | import sys 19 | import warnings 20 | from os.path import abspath, dirname, join 21 | 22 | 23 | # allow having multiple repository checkouts and not needing to remember to rerun 24 | # 'pip install -e .[dev]' when switching between checkouts and running tests. 25 | git_repo_path = abspath(join(dirname(dirname(__file__)), "src")) 26 | sys.path.insert(1, git_repo_path) 27 | 28 | # silence FutureWarning warnings in tests since often we can't act on them until 29 | # they become normal warnings - i.e. the tests still need to test the current functionality 30 | warnings.simplefilter(action="ignore", category=FutureWarning) 31 | 32 | 33 | def pytest_addoption(parser): 34 | from diffusers.utils.testing_utils import pytest_addoption_shared 35 | 36 | pytest_addoption_shared(parser) 37 | 38 | 39 | def pytest_terminal_summary(terminalreporter): 40 | from diffusers.utils.testing_utils import pytest_terminal_summary_main 41 | 42 | make_reports = terminalreporter.config.getoption("--make-reports") 43 | if make_reports: 44 | pytest_terminal_summary_main(terminalreporter, id=make_reports) 45 | -------------------------------------------------------------------------------- /tests/fixtures/elise_format0.mid: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/takuma104/diffusers/f523b11a1023a07d5aaa21a68d69ce6d9b71d36e/tests/fixtures/elise_format0.mid -------------------------------------------------------------------------------- /tests/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/takuma104/diffusers/f523b11a1023a07d5aaa21a68d69ce6d9b71d36e/tests/models/__init__.py -------------------------------------------------------------------------------- /tests/models/test_activations.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import torch 4 | from torch import nn 5 | 6 | from diffusers.models.activations import get_activation 7 | 8 | 9 | class ActivationsTests(unittest.TestCase): 10 | def test_swish(self): 11 | act = get_activation("swish") 12 | 13 | self.assertIsInstance(act, nn.SiLU) 14 | 15 | self.assertEqual(act(torch.tensor(-100, dtype=torch.float32)).item(), 0) 16 | self.assertNotEqual(act(torch.tensor(-1, dtype=torch.float32)).item(), 0) 17 | self.assertEqual(act(torch.tensor(0, dtype=torch.float32)).item(), 0) 18 | self.assertEqual(act(torch.tensor(20, dtype=torch.float32)).item(), 20) 19 | 20 | def test_silu(self): 21 | act = get_activation("silu") 22 | 23 | self.assertIsInstance(act, nn.SiLU) 24 | 25 | self.assertEqual(act(torch.tensor(-100, dtype=torch.float32)).item(), 0) 26 | self.assertNotEqual(act(torch.tensor(-1, dtype=torch.float32)).item(), 0) 27 | self.assertEqual(act(torch.tensor(0, dtype=torch.float32)).item(), 0) 28 | self.assertEqual(act(torch.tensor(20, dtype=torch.float32)).item(), 20) 29 | 30 | def test_mish(self): 31 | act = get_activation("mish") 32 | 33 | self.assertIsInstance(act, nn.Mish) 34 | 35 | self.assertEqual(act(torch.tensor(-200, dtype=torch.float32)).item(), 0) 36 | self.assertNotEqual(act(torch.tensor(-1, dtype=torch.float32)).item(), 0) 37 | self.assertEqual(act(torch.tensor(0, dtype=torch.float32)).item(), 0) 38 | self.assertEqual(act(torch.tensor(20, dtype=torch.float32)).item(), 20) 39 | 40 | def test_gelu(self): 41 | act = get_activation("gelu") 42 | 43 | self.assertIsInstance(act, nn.GELU) 44 | 45 | self.assertEqual(act(torch.tensor(-100, dtype=torch.float32)).item(), 0) 46 | self.assertNotEqual(act(torch.tensor(-1, dtype=torch.float32)).item(), 0) 47 | self.assertEqual(act(torch.tensor(0, dtype=torch.float32)).item(), 0) 48 | self.assertEqual(act(torch.tensor(20, dtype=torch.float32)).item(), 20) 49 | -------------------------------------------------------------------------------- /tests/models/test_models_vae_flax.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from diffusers import FlaxAutoencoderKL 4 | from diffusers.utils import is_flax_available 5 | from diffusers.utils.testing_utils import require_flax 6 | 7 | from .test_modeling_common_flax import FlaxModelTesterMixin 8 | 9 | 10 | if is_flax_available(): 11 | import jax 12 | 13 | 14 | @require_flax 15 | class FlaxAutoencoderKLTests(FlaxModelTesterMixin, unittest.TestCase): 16 | model_class = FlaxAutoencoderKL 17 | 18 | @property 19 | def dummy_input(self): 20 | batch_size = 4 21 | num_channels = 3 22 | sizes = (32, 32) 23 | 24 | prng_key = jax.random.PRNGKey(0) 25 | image = jax.random.uniform(prng_key, ((batch_size, num_channels) + sizes)) 26 | 27 | return {"sample": image, "prng_key": prng_key} 28 | 29 | def prepare_init_args_and_inputs_for_common(self): 30 | init_dict = { 31 | "block_out_channels": [32, 64], 32 | "in_channels": 3, 33 | "out_channels": 3, 34 | "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], 35 | "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], 36 | "latent_channels": 4, 37 | } 38 | inputs_dict = self.dummy_input 39 | return init_dict, inputs_dict 40 | -------------------------------------------------------------------------------- /tests/others/test_hub_utils.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Copyright 2023 HuggingFace Inc. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | import unittest 16 | from pathlib import Path 17 | from tempfile import TemporaryDirectory 18 | from unittest.mock import Mock, patch 19 | 20 | import diffusers.utils.hub_utils 21 | 22 | 23 | class CreateModelCardTest(unittest.TestCase): 24 | @patch("diffusers.utils.hub_utils.get_full_repo_name") 25 | def test_create_model_card(self, repo_name_mock: Mock) -> None: 26 | repo_name_mock.return_value = "full_repo_name" 27 | with TemporaryDirectory() as tmpdir: 28 | # Dummy args values 29 | args = Mock() 30 | args.output_dir = tmpdir 31 | args.local_rank = 0 32 | args.hub_token = "hub_token" 33 | args.dataset_name = "dataset_name" 34 | args.learning_rate = 0.01 35 | args.train_batch_size = 100000 36 | args.eval_batch_size = 10000 37 | args.gradient_accumulation_steps = 0.01 38 | args.adam_beta1 = 0.02 39 | args.adam_beta2 = 0.03 40 | args.adam_weight_decay = 0.0005 41 | args.adam_epsilon = 0.000001 42 | args.lr_scheduler = 1 43 | args.lr_warmup_steps = 10 44 | args.ema_inv_gamma = 0.001 45 | args.ema_power = 0.1 46 | args.ema_max_decay = 0.2 47 | args.mixed_precision = True 48 | 49 | # Model card mush be rendered and saved 50 | diffusers.utils.hub_utils.create_model_card(args, model_name="model_name") 51 | self.assertTrue((Path(tmpdir) / "README.md").is_file()) 52 | -------------------------------------------------------------------------------- /tests/others/test_outputs.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from dataclasses import dataclass 3 | from typing import List, Union 4 | 5 | import numpy as np 6 | import PIL.Image 7 | 8 | from diffusers.utils.outputs import BaseOutput 9 | 10 | 11 | @dataclass 12 | class CustomOutput(BaseOutput): 13 | images: Union[List[PIL.Image.Image], np.ndarray] 14 | 15 | 16 | class ConfigTester(unittest.TestCase): 17 | def test_outputs_single_attribute(self): 18 | outputs = CustomOutput(images=np.random.rand(1, 3, 4, 4)) 19 | 20 | # check every way of getting the attribute 21 | assert isinstance(outputs.images, np.ndarray) 22 | assert outputs.images.shape == (1, 3, 4, 4) 23 | assert isinstance(outputs["images"], np.ndarray) 24 | assert outputs["images"].shape == (1, 3, 4, 4) 25 | assert isinstance(outputs[0], np.ndarray) 26 | assert outputs[0].shape == (1, 3, 4, 4) 27 | 28 | # test with a non-tensor attribute 29 | outputs = CustomOutput(images=[PIL.Image.new("RGB", (4, 4))]) 30 | 31 | # check every way of getting the attribute 32 | assert isinstance(outputs.images, list) 33 | assert isinstance(outputs.images[0], PIL.Image.Image) 34 | assert isinstance(outputs["images"], list) 35 | assert isinstance(outputs["images"][0], PIL.Image.Image) 36 | assert isinstance(outputs[0], list) 37 | assert isinstance(outputs[0][0], PIL.Image.Image) 38 | 39 | def test_outputs_dict_init(self): 40 | # test output reinitialization with a `dict` for compatibility with `accelerate` 41 | outputs = CustomOutput({"images": np.random.rand(1, 3, 4, 4)}) 42 | 43 | # check every way of getting the attribute 44 | assert isinstance(outputs.images, np.ndarray) 45 | assert outputs.images.shape == (1, 3, 4, 4) 46 | assert isinstance(outputs["images"], np.ndarray) 47 | assert outputs["images"].shape == (1, 3, 4, 4) 48 | assert isinstance(outputs[0], np.ndarray) 49 | assert outputs[0].shape == (1, 3, 4, 4) 50 | 51 | # test with a non-tensor attribute 52 | outputs = CustomOutput({"images": [PIL.Image.new("RGB", (4, 4))]}) 53 | 54 | # check every way of getting the attribute 55 | assert isinstance(outputs.images, list) 56 | assert isinstance(outputs.images[0], PIL.Image.Image) 57 | assert isinstance(outputs["images"], list) 58 | assert isinstance(outputs["images"][0], PIL.Image.Image) 59 | assert isinstance(outputs[0], list) 60 | assert isinstance(outputs[0][0], PIL.Image.Image) 61 | -------------------------------------------------------------------------------- /tests/pipelines/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/takuma104/diffusers/f523b11a1023a07d5aaa21a68d69ce6d9b71d36e/tests/pipelines/__init__.py -------------------------------------------------------------------------------- /tests/pipelines/altdiffusion/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/takuma104/diffusers/f523b11a1023a07d5aaa21a68d69ce6d9b71d36e/tests/pipelines/altdiffusion/__init__.py -------------------------------------------------------------------------------- /tests/pipelines/audio_diffusion/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/takuma104/diffusers/f523b11a1023a07d5aaa21a68d69ce6d9b71d36e/tests/pipelines/audio_diffusion/__init__.py -------------------------------------------------------------------------------- /tests/pipelines/audioldm/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/takuma104/diffusers/f523b11a1023a07d5aaa21a68d69ce6d9b71d36e/tests/pipelines/audioldm/__init__.py -------------------------------------------------------------------------------- /tests/pipelines/controlnet/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/takuma104/diffusers/f523b11a1023a07d5aaa21a68d69ce6d9b71d36e/tests/pipelines/controlnet/__init__.py -------------------------------------------------------------------------------- /tests/pipelines/dance_diffusion/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/takuma104/diffusers/f523b11a1023a07d5aaa21a68d69ce6d9b71d36e/tests/pipelines/dance_diffusion/__init__.py -------------------------------------------------------------------------------- /tests/pipelines/ddim/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/takuma104/diffusers/f523b11a1023a07d5aaa21a68d69ce6d9b71d36e/tests/pipelines/ddim/__init__.py -------------------------------------------------------------------------------- /tests/pipelines/ddpm/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/takuma104/diffusers/f523b11a1023a07d5aaa21a68d69ce6d9b71d36e/tests/pipelines/ddpm/__init__.py -------------------------------------------------------------------------------- /tests/pipelines/dit/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/takuma104/diffusers/f523b11a1023a07d5aaa21a68d69ce6d9b71d36e/tests/pipelines/dit/__init__.py -------------------------------------------------------------------------------- /tests/pipelines/kandinsky/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/takuma104/diffusers/f523b11a1023a07d5aaa21a68d69ce6d9b71d36e/tests/pipelines/kandinsky/__init__.py -------------------------------------------------------------------------------- /tests/pipelines/karras_ve/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/takuma104/diffusers/f523b11a1023a07d5aaa21a68d69ce6d9b71d36e/tests/pipelines/karras_ve/__init__.py -------------------------------------------------------------------------------- /tests/pipelines/latent_diffusion/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/takuma104/diffusers/f523b11a1023a07d5aaa21a68d69ce6d9b71d36e/tests/pipelines/latent_diffusion/__init__.py -------------------------------------------------------------------------------- /tests/pipelines/paint_by_example/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/takuma104/diffusers/f523b11a1023a07d5aaa21a68d69ce6d9b71d36e/tests/pipelines/paint_by_example/__init__.py -------------------------------------------------------------------------------- /tests/pipelines/pndm/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/takuma104/diffusers/f523b11a1023a07d5aaa21a68d69ce6d9b71d36e/tests/pipelines/pndm/__init__.py -------------------------------------------------------------------------------- /tests/pipelines/repaint/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/takuma104/diffusers/f523b11a1023a07d5aaa21a68d69ce6d9b71d36e/tests/pipelines/repaint/__init__.py -------------------------------------------------------------------------------- /tests/pipelines/score_sde_ve/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/takuma104/diffusers/f523b11a1023a07d5aaa21a68d69ce6d9b71d36e/tests/pipelines/score_sde_ve/__init__.py -------------------------------------------------------------------------------- /tests/pipelines/semantic_stable_diffusion/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/takuma104/diffusers/f523b11a1023a07d5aaa21a68d69ce6d9b71d36e/tests/pipelines/semantic_stable_diffusion/__init__.py -------------------------------------------------------------------------------- /tests/pipelines/spectrogram_diffusion/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/takuma104/diffusers/f523b11a1023a07d5aaa21a68d69ce6d9b71d36e/tests/pipelines/spectrogram_diffusion/__init__.py -------------------------------------------------------------------------------- /tests/pipelines/stable_diffusion/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/takuma104/diffusers/f523b11a1023a07d5aaa21a68d69ce6d9b71d36e/tests/pipelines/stable_diffusion/__init__.py -------------------------------------------------------------------------------- /tests/pipelines/stable_diffusion_2/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/takuma104/diffusers/f523b11a1023a07d5aaa21a68d69ce6d9b71d36e/tests/pipelines/stable_diffusion_2/__init__.py -------------------------------------------------------------------------------- /tests/pipelines/stable_diffusion_safe/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/takuma104/diffusers/f523b11a1023a07d5aaa21a68d69ce6d9b71d36e/tests/pipelines/stable_diffusion_safe/__init__.py -------------------------------------------------------------------------------- /tests/pipelines/stable_unclip/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/takuma104/diffusers/f523b11a1023a07d5aaa21a68d69ce6d9b71d36e/tests/pipelines/stable_unclip/__init__.py -------------------------------------------------------------------------------- /tests/pipelines/test_pipelines_onnx_common.py: -------------------------------------------------------------------------------- 1 | from diffusers.utils.testing_utils import require_onnxruntime 2 | 3 | 4 | @require_onnxruntime 5 | class OnnxPipelineTesterMixin: 6 | """ 7 | This mixin is designed to be used with unittest.TestCase classes. 8 | It provides a set of common tests for each ONNXRuntime pipeline, e.g. saving and loading the pipeline, 9 | equivalence of dict and tuple outputs, etc. 10 | """ 11 | 12 | pass 13 | -------------------------------------------------------------------------------- /tests/pipelines/text_to_video/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/takuma104/diffusers/f523b11a1023a07d5aaa21a68d69ce6d9b71d36e/tests/pipelines/text_to_video/__init__.py -------------------------------------------------------------------------------- /tests/pipelines/text_to_video/test_text_to_video_zero.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Copyright 2023 HuggingFace Inc. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | import unittest 17 | 18 | import torch 19 | 20 | from diffusers import DDIMScheduler, TextToVideoZeroPipeline 21 | from diffusers.utils import load_pt, require_torch_gpu, slow 22 | 23 | from ..test_pipelines_common import assert_mean_pixel_difference 24 | 25 | 26 | @slow 27 | @require_torch_gpu 28 | class TextToVideoZeroPipelineSlowTests(unittest.TestCase): 29 | def test_full_model(self): 30 | model_id = "runwayml/stable-diffusion-v1-5" 31 | pipe = TextToVideoZeroPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") 32 | pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) 33 | generator = torch.Generator(device="cuda").manual_seed(0) 34 | 35 | prompt = "A bear is playing a guitar on Times Square" 36 | result = pipe(prompt=prompt, generator=generator).images 37 | 38 | expected_result = load_pt( 39 | "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text-to-video/A bear is playing a guitar on Times Square.pt" 40 | ) 41 | 42 | assert_mean_pixel_difference(result, expected_result) 43 | -------------------------------------------------------------------------------- /tests/pipelines/unclip/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/takuma104/diffusers/f523b11a1023a07d5aaa21a68d69ce6d9b71d36e/tests/pipelines/unclip/__init__.py -------------------------------------------------------------------------------- /tests/pipelines/unidiffuser/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/takuma104/diffusers/f523b11a1023a07d5aaa21a68d69ce6d9b71d36e/tests/pipelines/unidiffuser/__init__.py -------------------------------------------------------------------------------- /tests/pipelines/versatile_diffusion/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/takuma104/diffusers/f523b11a1023a07d5aaa21a68d69ce6d9b71d36e/tests/pipelines/versatile_diffusion/__init__.py -------------------------------------------------------------------------------- /tests/pipelines/versatile_diffusion/test_versatile_diffusion_image_variation.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Copyright 2023 HuggingFace Inc. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | import unittest 17 | 18 | import numpy as np 19 | import torch 20 | 21 | from diffusers import VersatileDiffusionImageVariationPipeline 22 | from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device 23 | 24 | 25 | torch.backends.cuda.matmul.allow_tf32 = False 26 | 27 | 28 | class VersatileDiffusionImageVariationPipelineFastTests(unittest.TestCase): 29 | pass 30 | 31 | 32 | @slow 33 | @require_torch_gpu 34 | class VersatileDiffusionImageVariationPipelineIntegrationTests(unittest.TestCase): 35 | def test_inference_image_variations(self): 36 | pipe = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion") 37 | pipe.to(torch_device) 38 | pipe.set_progress_bar_config(disable=None) 39 | 40 | image_prompt = load_image( 41 | "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" 42 | ) 43 | generator = torch.manual_seed(0) 44 | image = pipe( 45 | image=image_prompt, 46 | generator=generator, 47 | guidance_scale=7.5, 48 | num_inference_steps=50, 49 | output_type="numpy", 50 | ).images 51 | 52 | image_slice = image[0, 253:256, 253:256, -1] 53 | 54 | assert image.shape == (1, 512, 512, 3) 55 | expected_slice = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945]) 56 | 57 | assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 58 | -------------------------------------------------------------------------------- /tests/pipelines/vq_diffusion/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/takuma104/diffusers/f523b11a1023a07d5aaa21a68d69ce6d9b71d36e/tests/pipelines/vq_diffusion/__init__.py -------------------------------------------------------------------------------- /tests/schedulers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/takuma104/diffusers/f523b11a1023a07d5aaa21a68d69ce6d9b71d36e/tests/schedulers/__init__.py -------------------------------------------------------------------------------- /tests/schedulers/test_scheduler_vq_diffusion.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn.functional as F 3 | 4 | from diffusers import VQDiffusionScheduler 5 | 6 | from .test_schedulers import SchedulerCommonTest 7 | 8 | 9 | class VQDiffusionSchedulerTest(SchedulerCommonTest): 10 | scheduler_classes = (VQDiffusionScheduler,) 11 | 12 | def get_scheduler_config(self, **kwargs): 13 | config = { 14 | "num_vec_classes": 4097, 15 | "num_train_timesteps": 100, 16 | } 17 | 18 | config.update(**kwargs) 19 | return config 20 | 21 | def dummy_sample(self, num_vec_classes): 22 | batch_size = 4 23 | height = 8 24 | width = 8 25 | 26 | sample = torch.randint(0, num_vec_classes, (batch_size, height * width)) 27 | 28 | return sample 29 | 30 | @property 31 | def dummy_sample_deter(self): 32 | assert False 33 | 34 | def dummy_model(self, num_vec_classes): 35 | def model(sample, t, *args): 36 | batch_size, num_latent_pixels = sample.shape 37 | logits = torch.rand((batch_size, num_vec_classes - 1, num_latent_pixels)) 38 | return_value = F.log_softmax(logits.double(), dim=1).float() 39 | return return_value 40 | 41 | return model 42 | 43 | def test_timesteps(self): 44 | for timesteps in [2, 5, 100, 1000]: 45 | self.check_over_configs(num_train_timesteps=timesteps) 46 | 47 | def test_num_vec_classes(self): 48 | for num_vec_classes in [5, 100, 1000, 4000]: 49 | self.check_over_configs(num_vec_classes=num_vec_classes) 50 | 51 | def test_time_indices(self): 52 | for t in [0, 50, 99]: 53 | self.check_over_forward(time_step=t) 54 | 55 | def test_add_noise_device(self): 56 | pass 57 | -------------------------------------------------------------------------------- /utils/get_modified_files.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Copyright 2023 The HuggingFace Inc. team. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | # this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: 17 | # python ./utils/get_modified_files.py utils src tests examples 18 | # 19 | # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered 20 | # since the output of this script is fed into Makefile commands it doesn't print a newline after the results 21 | 22 | import re 23 | import subprocess 24 | import sys 25 | 26 | 27 | fork_point_sha = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8") 28 | modified_files = subprocess.check_output(f"git diff --name-only {fork_point_sha}".split()).decode("utf-8").split() 29 | 30 | joined_dirs = "|".join(sys.argv[1:]) 31 | regex = re.compile(rf"^({joined_dirs}).*?\.py$") 32 | 33 | relevant_modified_files = [x for x in modified_files if regex.match(x)] 34 | print(" ".join(relevant_modified_files), end="") 35 | -------------------------------------------------------------------------------- /utils/print_env.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # coding=utf-8 4 | # Copyright 2023 The HuggingFace Inc. team. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | 18 | # this script dumps information about the environment 19 | 20 | import os 21 | import platform 22 | import sys 23 | 24 | 25 | os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" 26 | 27 | print("Python version:", sys.version) 28 | 29 | print("OS platform:", platform.platform()) 30 | print("OS architecture:", platform.machine()) 31 | 32 | try: 33 | import torch 34 | 35 | print("Torch version:", torch.__version__) 36 | print("Cuda available:", torch.cuda.is_available()) 37 | print("Cuda version:", torch.version.cuda) 38 | print("CuDNN version:", torch.backends.cudnn.version()) 39 | print("Number of GPUs available:", torch.cuda.device_count()) 40 | except ImportError: 41 | print("Torch version:", None) 42 | 43 | try: 44 | import transformers 45 | 46 | print("transformers version:", transformers.__version__) 47 | except ImportError: 48 | print("transformers version:", None) 49 | --------------------------------------------------------------------------------