├── README.md ├── diffusers ├── README.md ├── setup.py └── src │ ├── diffusers.egg-info │ ├── PKG-INFO │ ├── SOURCES.txt │ ├── dependency_links.txt │ ├── entry_points.txt │ ├── requires.txt │ └── top_level.txt │ └── diffusers │ ├── __init__.py │ ├── commands │ ├── __init__.py │ ├── diffusers_cli.py │ ├── env.py │ └── fp16_safetensors.py │ ├── configuration_utils.py │ ├── dependency_versions_check.py │ ├── dependency_versions_table.py │ ├── experimental │ ├── README.md │ ├── __init__.py │ └── rl │ │ ├── __init__.py │ │ └── value_guided_sampling.py │ ├── image_processor.py │ ├── loaders.py │ ├── models │ ├── README.md │ ├── __init__.py │ ├── activations.py │ ├── adapter.py │ ├── attention.py │ ├── attention_flax.py │ ├── attention_processor.py │ ├── autoencoder_asym_kl.py │ ├── autoencoder_kl.py │ ├── autoencoder_tiny.py │ ├── controlnet.py │ ├── controlnet_flax.py │ ├── dual_transformer_2d.py │ ├── embeddings.py │ ├── embeddings_flax.py │ ├── lora.py │ ├── modeling_flax_pytorch_utils.py │ ├── modeling_flax_utils.py │ ├── modeling_pytorch_flax_utils.py │ ├── modeling_utils.py │ ├── prior_transformer.py │ ├── resnet.py │ ├── resnet_flax.py │ ├── t5_film_transformer.py │ ├── transformer_2d.py │ ├── transformer_temporal.py │ ├── unet_1d.py │ ├── unet_1d_blocks.py │ ├── unet_2d.py │ ├── unet_2d_blocks.py │ ├── unet_2d_blocks_flax.py │ ├── unet_2d_condition.py │ ├── unet_2d_condition_flax.py │ ├── unet_3d_blocks.py │ ├── unet_3d_condition.py │ ├── vae.py │ ├── vae_flax.py │ └── vq_model.py │ ├── optimization.py │ ├── pipeline_utils.py │ ├── pipelines │ ├── README.md │ ├── __init__.py │ ├── alt_diffusion │ │ ├── __init__.py │ │ ├── modeling_roberta_series.py │ │ ├── pipeline_alt_diffusion.py │ │ └── pipeline_alt_diffusion_img2img.py │ ├── audio_diffusion │ │ ├── __init__.py │ │ ├── mel.py │ │ └── pipeline_audio_diffusion.py │ ├── audioldm │ │ ├── __init__.py │ │ └── pipeline_audioldm.py │ ├── audioldm2 │ │ ├── __init__.py │ │ ├── modeling_audioldm2.py │ │ └── pipeline_audioldm2.py │ ├── auto_pipeline.py │ ├── consistency_models │ │ ├── __init__.py │ │ └── pipeline_consistency_models.py │ ├── controlnet │ │ ├── __init__.py │ │ ├── multicontrolnet.py │ │ ├── pipeline_controlnet.py │ │ ├── pipeline_controlnet_img2img.py │ │ ├── pipeline_controlnet_inpaint.py │ │ ├── pipeline_controlnet_sd_xl.py │ │ └── pipeline_flax_controlnet.py │ ├── dance_diffusion │ │ ├── __init__.py │ │ └── pipeline_dance_diffusion.py │ ├── ddim │ │ ├── __init__.py │ │ └── pipeline_ddim.py │ ├── ddpm │ │ ├── __init__.py │ │ └── pipeline_ddpm.py │ ├── deepfloyd_if │ │ ├── __init__.py │ │ ├── pipeline_if.py │ │ ├── pipeline_if_img2img.py │ │ ├── pipeline_if_img2img_superresolution.py │ │ ├── pipeline_if_inpainting.py │ │ ├── pipeline_if_inpainting_superresolution.py │ │ ├── pipeline_if_superresolution.py │ │ ├── safety_checker.py │ │ ├── timesteps.py │ │ └── watermark.py │ ├── dit │ │ ├── __init__.py │ │ └── pipeline_dit.py │ ├── kandinsky │ │ ├── __init__.py │ │ ├── pipeline_kandinsky.py │ │ ├── pipeline_kandinsky_combined.py │ │ ├── pipeline_kandinsky_img2img.py │ │ ├── pipeline_kandinsky_inpaint.py │ │ ├── pipeline_kandinsky_prior.py │ │ └── text_encoder.py │ ├── kandinsky2_2 │ │ ├── __init__.py │ │ ├── pipeline_kandinsky2_2.py │ │ ├── pipeline_kandinsky2_2_combined.py │ │ ├── pipeline_kandinsky2_2_controlnet.py │ │ ├── pipeline_kandinsky2_2_controlnet_img2img.py │ │ ├── pipeline_kandinsky2_2_img2img.py │ │ ├── pipeline_kandinsky2_2_inpainting.py │ │ ├── pipeline_kandinsky2_2_prior.py │ │ └── pipeline_kandinsky2_2_prior_emb2emb.py │ ├── latent_diffusion │ │ ├── __init__.py │ │ ├── pipeline_latent_diffusion.py │ │ └── pipeline_latent_diffusion_superresolution.py │ ├── latent_diffusion_uncond │ │ ├── __init__.py │ │ └── pipeline_latent_diffusion_uncond.py │ ├── musicldm │ │ ├── __init__.py │ │ └── pipeline_musicldm.py │ ├── onnx_utils.py │ ├── paint_by_example │ │ ├── __init__.py │ │ ├── image_encoder.py │ │ └── pipeline_paint_by_example.py │ ├── pipeline_flax_utils.py │ ├── pipeline_utils.py │ ├── pndm │ │ ├── __init__.py │ │ └── pipeline_pndm.py │ ├── repaint │ │ ├── __init__.py │ │ └── pipeline_repaint.py │ ├── score_sde_ve │ │ ├── __init__.py │ │ └── pipeline_score_sde_ve.py │ ├── semantic_stable_diffusion │ │ ├── __init__.py │ │ └── pipeline_semantic_stable_diffusion.py │ ├── shap_e │ │ ├── __init__.py │ │ ├── camera.py │ │ ├── pipeline_shap_e.py │ │ ├── pipeline_shap_e_img2img.py │ │ └── renderer.py │ ├── spectrogram_diffusion │ │ ├── __init__.py │ │ ├── continous_encoder.py │ │ ├── midi_utils.py │ │ ├── notes_encoder.py │ │ └── pipeline_spectrogram_diffusion.py │ ├── stable_diffusion │ │ ├── README.md │ │ ├── __init__.py │ │ ├── convert_from_ckpt.py │ │ ├── pipeline_cycle_diffusion.py │ │ ├── pipeline_flax_stable_diffusion.py │ │ ├── pipeline_flax_stable_diffusion_controlnet.py │ │ ├── pipeline_flax_stable_diffusion_img2img.py │ │ ├── pipeline_flax_stable_diffusion_inpaint.py │ │ ├── pipeline_onnx_stable_diffusion.py │ │ ├── pipeline_onnx_stable_diffusion_img2img.py │ │ ├── pipeline_onnx_stable_diffusion_inpaint.py │ │ ├── pipeline_onnx_stable_diffusion_inpaint_legacy.py │ │ ├── pipeline_onnx_stable_diffusion_upscale.py │ │ ├── pipeline_stable_diffusion.py │ │ ├── pipeline_stable_diffusion_attend_and_excite.py │ │ ├── pipeline_stable_diffusion_controlnet.py │ │ ├── pipeline_stable_diffusion_depth2img.py │ │ ├── pipeline_stable_diffusion_diffedit.py │ │ ├── pipeline_stable_diffusion_gligen.py │ │ ├── pipeline_stable_diffusion_image_variation.py │ │ ├── pipeline_stable_diffusion_img2img.py │ │ ├── pipeline_stable_diffusion_inpaint.py │ │ ├── pipeline_stable_diffusion_inpaint_legacy.py │ │ ├── pipeline_stable_diffusion_instruct_pix2pix.py │ │ ├── pipeline_stable_diffusion_k_diffusion.py │ │ ├── pipeline_stable_diffusion_latent_upscale.py │ │ ├── pipeline_stable_diffusion_ldm3d.py │ │ ├── pipeline_stable_diffusion_model_editing.py │ │ ├── pipeline_stable_diffusion_panorama.py │ │ ├── pipeline_stable_diffusion_paradigms.py │ │ ├── pipeline_stable_diffusion_pix2pix_zero.py │ │ ├── pipeline_stable_diffusion_sag.py │ │ ├── pipeline_stable_diffusion_upscale.py │ │ ├── pipeline_stable_unclip.py │ │ ├── pipeline_stable_unclip_img2img.py │ │ ├── safety_checker.py │ │ ├── safety_checker_flax.py │ │ └── stable_unclip_image_normalizer.py │ ├── stable_diffusion_safe │ │ ├── __init__.py │ │ ├── pipeline_stable_diffusion_safe.py │ │ └── safety_checker.py │ ├── stable_diffusion_xl │ │ ├── __init__.py │ │ ├── pipeline_stable_diffusion_xl.py │ │ ├── pipeline_stable_diffusion_xl_img2img.py │ │ ├── pipeline_stable_diffusion_xl_inpaint.py │ │ ├── pipeline_stable_diffusion_xl_instruct_pix2pix.py │ │ └── watermark.py │ ├── stochastic_karras_ve │ │ ├── __init__.py │ │ └── pipeline_stochastic_karras_ve.py │ ├── t2i_adapter │ │ ├── __init__.py │ │ └── pipeline_stable_diffusion_adapter.py │ ├── text_to_video_synthesis │ │ ├── __init__.py │ │ ├── pipeline_text_to_video_synth.py │ │ ├── pipeline_text_to_video_synth_img2img.py │ │ └── pipeline_text_to_video_zero.py │ ├── unclip │ │ ├── __init__.py │ │ ├── pipeline_unclip.py │ │ ├── pipeline_unclip_image_variation.py │ │ └── text_proj.py │ ├── unidiffuser │ │ ├── __init__.py │ │ ├── modeling_text_decoder.py │ │ ├── modeling_uvit.py │ │ └── pipeline_unidiffuser.py │ ├── versatile_diffusion │ │ ├── __init__.py │ │ ├── modeling_text_unet.py │ │ ├── pipeline_versatile_diffusion.py │ │ ├── pipeline_versatile_diffusion_dual_guided.py │ │ ├── pipeline_versatile_diffusion_image_variation.py │ │ └── pipeline_versatile_diffusion_text_to_image.py │ └── vq_diffusion │ │ ├── __init__.py │ │ └── pipeline_vq_diffusion.py │ ├── schedulers │ ├── README.md │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-38.pyc │ │ ├── scheduling_consistency_models.cpython-38.pyc │ │ ├── scheduling_ddim.cpython-38.pyc │ │ ├── scheduling_ddim_inverse.cpython-38.pyc │ │ ├── scheduling_ddim_parallel.cpython-38.pyc │ │ ├── scheduling_ddpm.cpython-38.pyc │ │ ├── scheduling_ddpm_parallel.cpython-38.pyc │ │ ├── scheduling_deis_multistep.cpython-38.pyc │ │ ├── scheduling_dpmsolver_multistep.cpython-38.pyc │ │ ├── scheduling_dpmsolver_multistep_inverse.cpython-38.pyc │ │ ├── scheduling_dpmsolver_sde.cpython-38.pyc │ │ ├── scheduling_dpmsolver_singlestep.cpython-38.pyc │ │ ├── scheduling_euler_ancestral_discrete.cpython-38.pyc │ │ ├── scheduling_euler_discrete.cpython-38.pyc │ │ ├── scheduling_heun_discrete.cpython-38.pyc │ │ ├── scheduling_ipndm.cpython-38.pyc │ │ ├── scheduling_k_dpm_2_ancestral_discrete.cpython-38.pyc │ │ ├── scheduling_k_dpm_2_discrete.cpython-38.pyc │ │ ├── scheduling_karras_ve.cpython-38.pyc │ │ ├── scheduling_lms_discrete.cpython-38.pyc │ │ ├── scheduling_pndm.cpython-38.pyc │ │ ├── scheduling_repaint.cpython-38.pyc │ │ ├── scheduling_sde_ve.cpython-38.pyc │ │ ├── scheduling_sde_vp.cpython-38.pyc │ │ ├── scheduling_unclip.cpython-38.pyc │ │ ├── scheduling_unipc_multistep.cpython-38.pyc │ │ ├── scheduling_utils.cpython-38.pyc │ │ └── scheduling_vq_diffusion.cpython-38.pyc │ ├── scheduling_consistency_models.py │ ├── scheduling_ddim.py │ ├── scheduling_ddim_flax.py │ ├── scheduling_ddim_inverse.py │ ├── scheduling_ddim_parallel.py │ ├── scheduling_ddpm.py │ ├── scheduling_ddpm_flax.py │ ├── scheduling_ddpm_parallel.py │ ├── scheduling_deis_multistep.py │ ├── scheduling_dpmsolver_multistep.py │ ├── scheduling_dpmsolver_multistep_flax.py │ ├── scheduling_dpmsolver_multistep_inverse.py │ ├── scheduling_dpmsolver_sde.py │ ├── scheduling_dpmsolver_singlestep.py │ ├── scheduling_euler_ancestral_discrete.py │ ├── scheduling_euler_discrete.py │ ├── scheduling_heun_discrete.py │ ├── scheduling_ipndm.py │ ├── scheduling_k_dpm_2_ancestral_discrete.py │ ├── scheduling_k_dpm_2_discrete.py │ ├── scheduling_karras_ve.py │ ├── scheduling_karras_ve_flax.py │ ├── scheduling_lms_discrete.py │ ├── scheduling_lms_discrete_flax.py │ ├── scheduling_pndm.py │ ├── scheduling_pndm_flax.py │ ├── scheduling_repaint.py │ ├── scheduling_sde_ve.py │ ├── scheduling_sde_ve_flax.py │ ├── scheduling_sde_vp.py │ ├── scheduling_unclip.py │ ├── scheduling_unipc_multistep.py │ ├── scheduling_utils.py │ ├── scheduling_utils_flax.py │ └── scheduling_vq_diffusion.py │ ├── training_utils.py │ └── utils │ ├── __init__.py │ ├── __pycache__ │ ├── __init__.cpython-311.pyc │ ├── __init__.cpython-38.pyc │ ├── accelerate_utils.cpython-311.pyc │ ├── accelerate_utils.cpython-38.pyc │ ├── constants.cpython-311.pyc │ ├── constants.cpython-38.pyc │ ├── deprecation_utils.cpython-311.pyc │ ├── deprecation_utils.cpython-38.pyc │ ├── doc_utils.cpython-311.pyc │ ├── doc_utils.cpython-38.pyc │ ├── dummy_flax_and_transformers_objects.cpython-311.pyc │ ├── dummy_flax_and_transformers_objects.cpython-38.pyc │ ├── dummy_flax_objects.cpython-311.pyc │ ├── dummy_flax_objects.cpython-38.pyc │ ├── dummy_note_seq_objects.cpython-311.pyc │ ├── dummy_note_seq_objects.cpython-38.pyc │ ├── dummy_onnx_objects.cpython-311.pyc │ ├── dummy_onnx_objects.cpython-38.pyc │ ├── dummy_pt_objects.cpython-311.pyc │ ├── dummy_torch_and_librosa_objects.cpython-311.pyc │ ├── dummy_torch_and_librosa_objects.cpython-38.pyc │ ├── dummy_torch_and_scipy_objects.cpython-311.pyc │ ├── dummy_torch_and_scipy_objects.cpython-38.pyc │ ├── dummy_torch_and_torchsde_objects.cpython-311.pyc │ ├── dummy_torch_and_torchsde_objects.cpython-38.pyc │ ├── dummy_torch_and_transformers_and_invisible_watermark_objects.cpython-38.pyc │ ├── dummy_torch_and_transformers_and_k_diffusion_objects.cpython-311.pyc │ ├── dummy_torch_and_transformers_and_k_diffusion_objects.cpython-38.pyc │ ├── dummy_torch_and_transformers_and_onnx_objects.cpython-311.pyc │ ├── dummy_torch_and_transformers_and_onnx_objects.cpython-38.pyc │ ├── dummy_torch_and_transformers_objects.cpython-311.pyc │ ├── dummy_torch_and_transformers_objects.cpython-38.pyc │ ├── dummy_transformers_and_torch_and_note_seq_objects.cpython-311.pyc │ ├── dummy_transformers_and_torch_and_note_seq_objects.cpython-38.pyc │ ├── dynamic_modules_utils.cpython-311.pyc │ ├── dynamic_modules_utils.cpython-38.pyc │ ├── hub_utils.cpython-311.pyc │ ├── hub_utils.cpython-38.pyc │ ├── import_utils.cpython-311.pyc │ ├── import_utils.cpython-38.pyc │ ├── logging.cpython-311.pyc │ ├── logging.cpython-38.pyc │ ├── outputs.cpython-311.pyc │ ├── outputs.cpython-38.pyc │ ├── pil_utils.cpython-311.pyc │ ├── pil_utils.cpython-38.pyc │ ├── testing_utils.cpython-311.pyc │ ├── testing_utils.cpython-38.pyc │ ├── torch_utils.cpython-311.pyc │ └── torch_utils.cpython-38.pyc │ ├── accelerate_utils.py │ ├── constants.py │ ├── deprecation_utils.py │ ├── doc_utils.py │ ├── dummy_flax_and_transformers_objects.py │ ├── dummy_flax_objects.py │ ├── dummy_note_seq_objects.py │ ├── dummy_onnx_objects.py │ ├── dummy_pt_objects.py │ ├── dummy_torch_and_librosa_objects.py │ ├── dummy_torch_and_scipy_objects.py │ ├── dummy_torch_and_torchsde_objects.py │ ├── dummy_torch_and_transformers_and_k_diffusion_objects.py │ ├── dummy_torch_and_transformers_and_onnx_objects.py │ ├── dummy_torch_and_transformers_objects.py │ ├── dummy_transformers_and_torch_and_note_seq_objects.py │ ├── dynamic_modules_utils.py │ ├── hub_utils.py │ ├── import_utils.py │ ├── logging.py │ ├── model_card_template.md │ ├── outputs.py │ ├── pil_utils.py │ ├── testing_utils.py │ └── torch_utils.py ├── diffusers_test ├── ContentImages │ ├── imgs0 │ │ ├── 0000.png │ │ ├── 0001.png │ │ ├── 0002.png │ │ ├── 0003.png │ │ └── 0004.png │ ├── imgs1 │ │ ├── 0000.png │ │ ├── 0001.png │ │ ├── 0002.png │ │ ├── 0003.png │ │ └── 0004.png │ ├── imgs2 │ │ ├── 0000.png │ │ ├── 0001.png │ │ ├── 0002.png │ │ ├── 0003.png │ │ └── 0004.png │ ├── imgs3 │ │ ├── 0000.png │ │ ├── 0001.png │ │ ├── 0002.png │ │ ├── 0003.png │ │ └── 0004.png │ ├── imgs4 │ │ ├── 0000.png │ │ ├── 0001.png │ │ ├── 0002.png │ │ ├── 0003.png │ │ └── 0004.png │ ├── imgs5 │ │ ├── 0000.png │ │ ├── 0001.png │ │ ├── 0002.png │ │ ├── 0003.png │ │ └── 0004.png │ └── imgs_and_hyperparameters │ │ ├── ContentImages │ │ ├── 0.png │ │ ├── 1.png │ │ ├── 10.png │ │ ├── 11.png │ │ ├── 12.png │ │ ├── 2.png │ │ ├── 3.png │ │ ├── 4.png │ │ ├── 5.png │ │ ├── 6.png │ │ ├── 7.png │ │ ├── 8.png │ │ └── 9.png │ │ ├── img00 Cyberpunk --n 160 --b 2.8 --s 1.1 │ │ └── output.png │ │ ├── img00 Embroidery Art --n 160 --b 1.8 --s 0.8 │ │ └── output.png │ │ ├── img00 Pixel Punk --n 160 --b 2.8 --s 0.8 │ │ └── output.png │ │ ├── img00 Sketching --n 160 --b 2.8 --s 0.8 │ │ └── output.png │ │ ├── img00 Studio Ghibli --n 160 --b 2.8 --s 0.8 │ │ └── 00000_03.png │ │ ├── img00 Wasteland --n 160 --b 2.8 --s 0.8 │ │ └── output.png │ │ ├── img00 Watercolor Painting --n 160 --b 2.9 --s 0.3 │ │ └── output.png │ │ ├── img00 chineseink --n 320 --b 2.5 --s 0.5 │ │ └── output.png │ │ ├── img00 oilpainting --n 160 --b 2.8 --s 0.4 │ │ └── output.png │ │ ├── img01 Cyberpunk --n 320 --b 2.8 --s 0.8 │ │ └── output.png │ │ ├── img01 Embroidery Art--n 320 --b 2.0 --s 1.2 │ │ └── output.png │ │ ├── img01 Oil Painting --n 160 --b 3.0 --s 1.4 │ │ └── output.png │ │ ├── img01 Pixel Punk --n 320 --b 2.8 --s 0.8 │ │ └── output.png │ │ ├── img01 Sketching --n 320 --b 2.6 --s 1.0 │ │ └── output.png │ │ ├── img01 Studio Ghibli --n 160 --b 2.5 --s 1.2 │ │ └── output.png │ │ ├── img01 Wasteland --n 320 --b 2.8 --s 0.8 │ │ └── output.png │ │ ├── img01 Watercolor Painting --n 160 --b 3.0 --s 1.4 │ │ └── output.png │ │ ├── img01 chineseink--n 320 --b 2.5 --s 0.6 │ │ └── 00000_00.png │ │ ├── img02 Cyberpunk --n 160 --b 2.8 --s 1.6 │ │ └── output.png │ │ ├── img02 Embroidery Art --n 160 --b 2.4 --s 1.7 │ │ └── output.png │ │ ├── img02 OilPainting --n 160 --b 2.6 --s 1.8 │ │ └── output.png │ │ ├── img02 Sketching --n 160 --b 2.8 --s 1.2 │ │ └── output.png │ │ ├── img02 Studio Ghibli --n 160 --b 2.8 --s 1.6 │ │ └── output.png │ │ ├── img02 chineseink --n 160 --b 2.8 --s 1.1 │ │ └── output.png │ │ ├── img02 pixel punk --n 160 --b 2.8 --s 1.4 │ │ └── output.png │ │ ├── img02 wasteland --n 160 --b 2.8 --s 1.4 │ │ └── output.png │ │ ├── img02 watercolor painting --n 160 --b 2.6 --s 0.6 │ │ └── output.png │ │ ├── img03 Cyberpunk --n 160 --b 2.2 --s 1.0 │ │ └── output.png │ │ ├── img03 Embroidery Art --n 160 --b 2.8 --s 1.2 │ │ └── output.png │ │ ├── img03 Oil painting --n 160 --b 2.8 --s 1.4 │ │ └── 00000_02.png │ │ ├── img03 Pixel punk --n 160 --b 2.2 --s 1.6 │ │ └── output.png │ │ ├── img03 Sketching --n 160 --b 2.8 --s 2.5 │ │ └── output.png │ │ ├── img03 Studio Ghibli --n 160 --b 2.8 --s 1.8 │ │ └── output.png │ │ ├── img03 Watercolor Painting --n 160 --b 2.8 --s 1.4 │ │ └── output.png │ │ ├── img03 chinese ink --n 160 --b 3.0 --s 0.6 │ │ └── output.png │ │ ├── img03 wasteland --n 160 --b 2.8 --s 0.5 │ │ └── output.png │ │ ├── img04 Chinese Ink --n 160 --b 2.8 --s 1.5 │ │ └── output.png │ │ ├── img04 Cyberpunk --n 160 --b 1.8 --s 1.6 │ │ └── output.png │ │ ├── img04 Embroidery Art --n 160 --b 2.6 --s 0.8 │ │ └── output.png │ │ ├── img04 OilPainting --n 160 --b 2.6 --s 0.6 │ │ └── output.png │ │ ├── img04 Pixel punk --n 160 --b 2.2 --s 0.8 │ │ └── output.png │ │ ├── img04 Sketching --n 320 --b 2.8 --s 1.0 │ │ └── output.png │ │ ├── img04 Studio Ghibli --n 160 --b 3.0 --s 0.5 │ │ └── output.png │ │ ├── img04 Wasteland --n 160 --b 2.8 --s 1.4 │ │ └── output.png │ │ ├── img04 Watercolor Painting --n 160 --b 2.6 --s 0.6 │ │ └── output.png │ │ ├── img05 Cyberpunk --n 160 --b 2.8 --s 0.7 │ │ └── output.png │ │ ├── img05 Chinese Ink --n 160 --b 2.8 --s 1.6 │ │ └── output.png │ │ ├── img05 Embroidery Art --n 160 --b 2.8 --s 0.6 │ │ └── output.png │ │ ├── img05 Oilpainting --n 160 --b 2.8 --s 1.2 │ │ └── output.png │ │ ├── img05 Pixel punk --n 160 --b 2.8 --s 1.4 │ │ └── output.png │ │ ├── img05 Sketching --n 160 --b 3.0 --s 1.6 │ │ └── output.png │ │ ├── img05 Studio Ghibli --n 160 --b 2.8 --s 1.6 │ │ └── output.png │ │ ├── img05 Wasteland --n 160 --b 2.8 --s 0.6 │ │ └── output.png │ │ ├── img05 Watercolor painting --n 160 --b 2.8 --s 1.2 │ │ └── output.png │ │ ├── img10 Illumination --n 320 --b 3.5 --s 2.2 │ │ └── output.png │ │ ├── img10 JOJO --n 160 --b 3.0 --s 0.8 │ │ └── output.png │ │ ├── img10 Studio Ghibli --n 160 --b 3.0 --s 1.1 │ │ └── output.png │ │ ├── img10 Ufotable --n 320 --b 2.8 --s 1.6 │ │ └── output.png │ │ ├── img11 Illumination --n 160 --b 2.6 --s 1.2 │ │ └── output.png │ │ ├── img11 JOJO --n 160 --b 2.8 --s 0.6 │ │ └── output.png │ │ ├── img11 Studio Ghibli --n 160 --b 2.8 --s 1.2 │ │ └── output.png │ │ ├── img11 Ufotable --n 160 --b 3.0 --s 1.1 │ │ └── output.png │ │ ├── img17 JOJO --n 160 --b 3.0 --s 0.8 │ │ └── output.png │ │ ├── img17 Ufotable --n 160 --b 3.0 --s 1.4 │ │ └── output.png │ │ ├── img17 Ufotable --n 160 --b 3.5 --s 0.8 │ │ └── output.png │ │ ├── img18 Children Crayon Painting --n 160 --b 2.2 --s 1.4 │ │ └── output.png │ │ ├── img18 LEGO Toy --n 160 --b 2.2 --s 1.4 │ │ └── output.png │ │ ├── img18 Origami --n 160 --b 2.2 --s 1.4 │ │ └── output.png │ │ ├── img18 Pixel --n 160 --b 1.8 --s 1.4 │ │ └── output.png │ │ ├── img19 Children Crayon Painting --n 160 --b 2.4 --s 1.4 │ │ └── output.png │ │ ├── img19 LEGO Toy --n 160 --b 2.8 --s 1.4 │ │ └── output.png │ │ ├── img19 Pixel --n 160 --b 2.4 --s 1.4 │ │ └── output.png │ │ ├── img20 Children Crayon Painting --n 160 --b 2.2 --s 1.4 │ │ └── output.png │ │ ├── img20 LEGO Toy --n 160 --b 2.2 --s 1.6 │ │ └── output.png │ │ ├── img20 Origami --n 160 --b 2.8 --s 0.8 │ │ └── output.png │ │ ├── img20 Pixel --n 160 --b 2.5 --s 1.6 │ │ └── output.png │ │ ├── img21 Children Crayon Painting --n 160 --b 2.4 --s 1.4 │ │ └── output.png │ │ ├── img21 LEGO Toy --n 160 --b 1.8 --s 1.4 │ │ └── output.png │ │ ├── img21 Origami --n 160 --b 1.0 --s 1.4 │ │ └── output.png │ │ ├── img21 Pixel --n 160 --b 2.5 --s 1.2 │ │ └── output.png │ │ └── t ├── __pycache__ │ ├── pipeline_stable_diffusion_img2img.cpython-38.pyc │ ├── pipeline_stable_diffusion_xl.cpython-311.pyc │ └── pipeline_stable_diffusion_xl.cpython-38.pyc ├── centercrop.py ├── pipeline_stable_diffusion_img2img.py ├── pipeline_stable_diffusion_xl.py ├── stable_diffusion_xl_test.py ├── style_prompt0.json ├── style_prompt1.json ├── style_prompt2.json ├── style_prompt3.json ├── style_prompt4.json └── style_prompt5.json └── imgs └── teaser.png /diffusers/src/diffusers.egg-info/dependency_links.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /diffusers/src/diffusers.egg-info/entry_points.txt: -------------------------------------------------------------------------------- 1 | [console_scripts] 2 | diffusers-cli = diffusers.commands.diffusers_cli:main 3 | -------------------------------------------------------------------------------- /diffusers/src/diffusers.egg-info/requires.txt: -------------------------------------------------------------------------------- 1 | importlib_metadata 2 | filelock 3 | huggingface-hub>=0.13.2 4 | numpy 5 | regex!=2019.12.17 6 | requests 7 | safetensors>=0.3.1 8 | Pillow 9 | 10 | [dev] 11 | urllib3<=2.0.0 12 | black~=23.1 13 | isort>=5.5.4 14 | ruff==0.0.280 15 | hf-doc-builder>=0.3.0 16 | compel==0.1.8 17 | datasets 18 | Jinja2 19 | invisible-watermark>=0.2.0 20 | k-diffusion>=0.0.12 21 | librosa 22 | omegaconf 23 | parameterized 24 | pytest 25 | pytest-timeout 26 | pytest-xdist 27 | requests-mock==1.10.0 28 | safetensors>=0.3.1 29 | sentencepiece!=0.1.92,>=0.1.91 30 | scipy 31 | torchvision 32 | transformers>=4.25.1 33 | accelerate>=0.11.0 34 | protobuf<4,>=3.20.3 35 | tensorboard 36 | torch>=1.4 37 | jax!=0.3.2,>=0.2.8 38 | jaxlib>=0.1.65 39 | flax>=0.4.1 40 | 41 | [docs] 42 | hf-doc-builder>=0.3.0 43 | 44 | [flax] 45 | jax!=0.3.2,>=0.2.8 46 | jaxlib>=0.1.65 47 | flax>=0.4.1 48 | 49 | [quality] 50 | urllib3<=2.0.0 51 | black~=23.1 52 | isort>=5.5.4 53 | ruff==0.0.280 54 | hf-doc-builder>=0.3.0 55 | 56 | [test] 57 | compel==0.1.8 58 | datasets 59 | Jinja2 60 | invisible-watermark>=0.2.0 61 | k-diffusion>=0.0.12 62 | librosa 63 | omegaconf 64 | parameterized 65 | pytest 66 | pytest-timeout 67 | pytest-xdist 68 | requests-mock==1.10.0 69 | safetensors>=0.3.1 70 | sentencepiece!=0.1.92,>=0.1.91 71 | scipy 72 | torchvision 73 | transformers>=4.25.1 74 | 75 | [torch] 76 | torch>=1.4 77 | accelerate>=0.11.0 78 | 79 | [training] 80 | accelerate>=0.11.0 81 | datasets 82 | protobuf<4,>=3.20.3 83 | tensorboard 84 | Jinja2 85 | -------------------------------------------------------------------------------- /diffusers/src/diffusers.egg-info/top_level.txt: -------------------------------------------------------------------------------- 1 | diffusers 2 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/commands/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 The HuggingFace Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from abc import ABC, abstractmethod 16 | from argparse import ArgumentParser 17 | 18 | 19 | class BaseDiffusersCLICommand(ABC): 20 | @staticmethod 21 | @abstractmethod 22 | def register_subcommand(parser: ArgumentParser): 23 | raise NotImplementedError() 24 | 25 | @abstractmethod 26 | def run(self): 27 | raise NotImplementedError() 28 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/commands/diffusers_cli.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Copyright 2023 The HuggingFace Team. All rights reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | from argparse import ArgumentParser 17 | 18 | from .env import EnvironmentCommand 19 | from .fp16_safetensors import FP16SafetensorsCommand 20 | 21 | 22 | def main(): 23 | parser = ArgumentParser("Diffusers CLI tool", usage="diffusers-cli []") 24 | commands_parser = parser.add_subparsers(help="diffusers-cli command helpers") 25 | 26 | # Register commands 27 | EnvironmentCommand.register_subcommand(commands_parser) 28 | FP16SafetensorsCommand.register_subcommand(commands_parser) 29 | 30 | # Let's go 31 | args = parser.parse_args() 32 | 33 | if not hasattr(args, "func"): 34 | parser.print_help() 35 | exit(1) 36 | 37 | # Run 38 | service = args.func(args) 39 | service.run() 40 | 41 | 42 | if __name__ == "__main__": 43 | main() 44 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/commands/env.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 The HuggingFace Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import platform 16 | from argparse import ArgumentParser 17 | 18 | import huggingface_hub 19 | 20 | from .. import __version__ as version 21 | from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available 22 | from . import BaseDiffusersCLICommand 23 | 24 | 25 | def info_command_factory(_): 26 | return EnvironmentCommand() 27 | 28 | 29 | class EnvironmentCommand(BaseDiffusersCLICommand): 30 | @staticmethod 31 | def register_subcommand(parser: ArgumentParser): 32 | download_parser = parser.add_parser("env") 33 | download_parser.set_defaults(func=info_command_factory) 34 | 35 | def run(self): 36 | hub_version = huggingface_hub.__version__ 37 | 38 | pt_version = "not installed" 39 | pt_cuda_available = "NA" 40 | if is_torch_available(): 41 | import torch 42 | 43 | pt_version = torch.__version__ 44 | pt_cuda_available = torch.cuda.is_available() 45 | 46 | transformers_version = "not installed" 47 | if is_transformers_available(): 48 | import transformers 49 | 50 | transformers_version = transformers.__version__ 51 | 52 | accelerate_version = "not installed" 53 | if is_accelerate_available(): 54 | import accelerate 55 | 56 | accelerate_version = accelerate.__version__ 57 | 58 | xformers_version = "not installed" 59 | if is_xformers_available(): 60 | import xformers 61 | 62 | xformers_version = xformers.__version__ 63 | 64 | info = { 65 | "`diffusers` version": version, 66 | "Platform": platform.platform(), 67 | "Python version": platform.python_version(), 68 | "PyTorch version (GPU?)": f"{pt_version} ({pt_cuda_available})", 69 | "Huggingface_hub version": hub_version, 70 | "Transformers version": transformers_version, 71 | "Accelerate version": accelerate_version, 72 | "xFormers version": xformers_version, 73 | "Using GPU in script?": "", 74 | "Using distributed or parallel set-up in script?": "", 75 | } 76 | 77 | print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n") 78 | print(self.format_dict(info)) 79 | 80 | return info 81 | 82 | @staticmethod 83 | def format_dict(d): 84 | return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) + "\n" 85 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/dependency_versions_check.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 The HuggingFace Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import sys 15 | 16 | from .dependency_versions_table import deps 17 | from .utils.versions import require_version, require_version_core 18 | 19 | 20 | # define which module versions we always want to check at run time 21 | # (usually the ones defined in `install_requires` in setup.py) 22 | # 23 | # order specific notes: 24 | # - tqdm must be checked before tokenizers 25 | 26 | pkgs_to_check_at_runtime = "python tqdm regex requests packaging filelock numpy tokenizers".split() 27 | if sys.version_info < (3, 7): 28 | pkgs_to_check_at_runtime.append("dataclasses") 29 | if sys.version_info < (3, 8): 30 | pkgs_to_check_at_runtime.append("importlib_metadata") 31 | 32 | for pkg in pkgs_to_check_at_runtime: 33 | if pkg in deps: 34 | if pkg == "tokenizers": 35 | # must be loaded here, or else tqdm check may fail 36 | from .utils import is_tokenizers_available 37 | 38 | if not is_tokenizers_available(): 39 | continue # not required, check version only if installed 40 | 41 | require_version_core(deps[pkg]) 42 | else: 43 | raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py") 44 | 45 | 46 | def dep_version_check(pkg, hint=None): 47 | require_version(deps[pkg], hint) 48 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/dependency_versions_table.py: -------------------------------------------------------------------------------- 1 | # THIS FILE HAS BEEN AUTOGENERATED. To update: 2 | # 1. modify the `_deps` dict in setup.py 3 | # 2. run `make deps_table_update`` 4 | deps = { 5 | "Pillow": "Pillow", 6 | "accelerate": "accelerate>=0.11.0", 7 | "compel": "compel==0.1.8", 8 | "black": "black~=23.1", 9 | "datasets": "datasets", 10 | "filelock": "filelock", 11 | "flax": "flax>=0.4.1", 12 | "hf-doc-builder": "hf-doc-builder>=0.3.0", 13 | "huggingface-hub": "huggingface-hub>=0.13.2", 14 | "requests-mock": "requests-mock==1.10.0", 15 | "importlib_metadata": "importlib_metadata", 16 | "invisible-watermark": "invisible-watermark>=0.2.0", 17 | "isort": "isort>=5.5.4", 18 | "jax": "jax>=0.2.8,!=0.3.2", 19 | "jaxlib": "jaxlib>=0.1.65", 20 | "Jinja2": "Jinja2", 21 | "k-diffusion": "k-diffusion>=0.0.12", 22 | "torchsde": "torchsde", 23 | "note_seq": "note_seq", 24 | "librosa": "librosa", 25 | "numpy": "numpy", 26 | "omegaconf": "omegaconf", 27 | "parameterized": "parameterized", 28 | "protobuf": "protobuf>=3.20.3,<4", 29 | "pytest": "pytest", 30 | "pytest-timeout": "pytest-timeout", 31 | "pytest-xdist": "pytest-xdist", 32 | "ruff": "ruff==0.0.280", 33 | "safetensors": "safetensors>=0.3.1", 34 | "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", 35 | "scipy": "scipy", 36 | "onnx": "onnx", 37 | "regex": "regex!=2019.12.17", 38 | "requests": "requests", 39 | "tensorboard": "tensorboard", 40 | "torch": "torch>=1.4", 41 | "torchvision": "torchvision", 42 | "transformers": "transformers>=4.25.1", 43 | "urllib3": "urllib3<=2.0.0", 44 | } 45 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/experimental/README.md: -------------------------------------------------------------------------------- 1 | # 🧨 Diffusers Experimental 2 | 3 | We are adding experimental code to support novel applications and usages of the Diffusers library. 4 | Currently, the following experiments are supported: 5 | * Reinforcement learning via an implementation of the [Diffuser](https://arxiv.org/abs/2205.09991) model. -------------------------------------------------------------------------------- /diffusers/src/diffusers/experimental/__init__.py: -------------------------------------------------------------------------------- 1 | from .rl import ValueGuidedRLPipeline 2 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/experimental/rl/__init__.py: -------------------------------------------------------------------------------- 1 | from .value_guided_sampling import ValueGuidedRLPipeline 2 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/models/README.md: -------------------------------------------------------------------------------- 1 | # Models 2 | 3 | For more detail on the models, please refer to the [docs](https://huggingface.co/docs/diffusers/api/models/overview). -------------------------------------------------------------------------------- /diffusers/src/diffusers/models/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 The HuggingFace Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from ..utils import is_flax_available, is_torch_available 16 | 17 | 18 | if is_torch_available(): 19 | from .adapter import MultiAdapter, T2IAdapter 20 | from .autoencoder_asym_kl import AsymmetricAutoencoderKL 21 | from .autoencoder_kl import AutoencoderKL 22 | from .autoencoder_tiny import AutoencoderTiny 23 | from .controlnet import ControlNetModel 24 | from .dual_transformer_2d import DualTransformer2DModel 25 | from .modeling_utils import ModelMixin 26 | from .prior_transformer import PriorTransformer 27 | from .t5_film_transformer import T5FilmDecoder 28 | from .transformer_2d import Transformer2DModel 29 | from .unet_1d import UNet1DModel 30 | from .unet_2d import UNet2DModel 31 | from .unet_2d_condition import UNet2DConditionModel 32 | from .unet_3d_condition import UNet3DConditionModel 33 | from .vq_model import VQModel 34 | 35 | if is_flax_available(): 36 | from .controlnet_flax import FlaxControlNetModel 37 | from .unet_2d_condition_flax import FlaxUNet2DConditionModel 38 | from .vae_flax import FlaxAutoencoderKL 39 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/models/activations.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | 3 | 4 | def get_activation(act_fn): 5 | if act_fn in ["swish", "silu"]: 6 | return nn.SiLU() 7 | elif act_fn == "mish": 8 | return nn.Mish() 9 | elif act_fn == "gelu": 10 | return nn.GELU() 11 | elif act_fn == "relu": 12 | return nn.ReLU() 13 | else: 14 | raise ValueError(f"Unsupported activation function: {act_fn}") 15 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/models/embeddings_flax.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 The HuggingFace Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import math 15 | 16 | import flax.linen as nn 17 | import jax.numpy as jnp 18 | 19 | 20 | def get_sinusoidal_embeddings( 21 | timesteps: jnp.ndarray, 22 | embedding_dim: int, 23 | freq_shift: float = 1, 24 | min_timescale: float = 1, 25 | max_timescale: float = 1.0e4, 26 | flip_sin_to_cos: bool = False, 27 | scale: float = 1.0, 28 | ) -> jnp.ndarray: 29 | """Returns the positional encoding (same as Tensor2Tensor). 30 | 31 | Args: 32 | timesteps: a 1-D Tensor of N indices, one per batch element. 33 | These may be fractional. 34 | embedding_dim: The number of output channels. 35 | min_timescale: The smallest time unit (should probably be 0.0). 36 | max_timescale: The largest time unit. 37 | Returns: 38 | a Tensor of timing signals [N, num_channels] 39 | """ 40 | assert timesteps.ndim == 1, "Timesteps should be a 1d-array" 41 | assert embedding_dim % 2 == 0, f"Embedding dimension {embedding_dim} should be even" 42 | num_timescales = float(embedding_dim // 2) 43 | log_timescale_increment = math.log(max_timescale / min_timescale) / (num_timescales - freq_shift) 44 | inv_timescales = min_timescale * jnp.exp(jnp.arange(num_timescales, dtype=jnp.float32) * -log_timescale_increment) 45 | emb = jnp.expand_dims(timesteps, 1) * jnp.expand_dims(inv_timescales, 0) 46 | 47 | # scale embeddings 48 | scaled_time = scale * emb 49 | 50 | if flip_sin_to_cos: 51 | signal = jnp.concatenate([jnp.cos(scaled_time), jnp.sin(scaled_time)], axis=1) 52 | else: 53 | signal = jnp.concatenate([jnp.sin(scaled_time), jnp.cos(scaled_time)], axis=1) 54 | signal = jnp.reshape(signal, [jnp.shape(timesteps)[0], embedding_dim]) 55 | return signal 56 | 57 | 58 | class FlaxTimestepEmbedding(nn.Module): 59 | r""" 60 | Time step Embedding Module. Learns embeddings for input time steps. 61 | 62 | Args: 63 | time_embed_dim (`int`, *optional*, defaults to `32`): 64 | Time step embedding dimension 65 | dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): 66 | Parameters `dtype` 67 | """ 68 | time_embed_dim: int = 32 69 | dtype: jnp.dtype = jnp.float32 70 | 71 | @nn.compact 72 | def __call__(self, temb): 73 | temb = nn.Dense(self.time_embed_dim, dtype=self.dtype, name="linear_1")(temb) 74 | temb = nn.silu(temb) 75 | temb = nn.Dense(self.time_embed_dim, dtype=self.dtype, name="linear_2")(temb) 76 | return temb 77 | 78 | 79 | class FlaxTimesteps(nn.Module): 80 | r""" 81 | Wrapper Module for sinusoidal Time step Embeddings as described in https://arxiv.org/abs/2006.11239 82 | 83 | Args: 84 | dim (`int`, *optional*, defaults to `32`): 85 | Time step embedding dimension 86 | """ 87 | dim: int = 32 88 | flip_sin_to_cos: bool = False 89 | freq_shift: float = 1 90 | 91 | @nn.compact 92 | def __call__(self, timesteps): 93 | return get_sinusoidal_embeddings( 94 | timesteps, embedding_dim=self.dim, flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.freq_shift 95 | ) 96 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/models/lora.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 The HuggingFace Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from typing import Optional 16 | 17 | import torch.nn.functional as F 18 | from torch import nn 19 | 20 | 21 | class LoRALinearLayer(nn.Module): 22 | def __init__(self, in_features, out_features, rank=4, network_alpha=None, device=None, dtype=None): 23 | super().__init__() 24 | 25 | self.down = nn.Linear(in_features, rank, bias=False, device=device, dtype=dtype) 26 | self.up = nn.Linear(rank, out_features, bias=False, device=device, dtype=dtype) 27 | # This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script. 28 | # See https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning 29 | self.network_alpha = network_alpha 30 | self.rank = rank 31 | 32 | nn.init.normal_(self.down.weight, std=1 / rank) 33 | nn.init.zeros_(self.up.weight) 34 | 35 | def forward(self, hidden_states): 36 | orig_dtype = hidden_states.dtype 37 | dtype = self.down.weight.dtype 38 | 39 | down_hidden_states = self.down(hidden_states.to(dtype)) 40 | up_hidden_states = self.up(down_hidden_states) 41 | 42 | if self.network_alpha is not None: 43 | up_hidden_states *= self.network_alpha / self.rank 44 | 45 | return up_hidden_states.to(orig_dtype) 46 | 47 | 48 | class LoRAConv2dLayer(nn.Module): 49 | def __init__( 50 | self, in_features, out_features, rank=4, kernel_size=(1, 1), stride=(1, 1), padding=0, network_alpha=None 51 | ): 52 | super().__init__() 53 | 54 | self.down = nn.Conv2d(in_features, rank, kernel_size=kernel_size, stride=stride, padding=padding, bias=False) 55 | # according to the official kohya_ss trainer kernel_size are always fixed for the up layer 56 | # # see: https://github.com/bmaltais/kohya_ss/blob/2accb1305979ba62f5077a23aabac23b4c37e935/networks/lora_diffusers.py#L129 57 | self.up = nn.Conv2d(rank, out_features, kernel_size=(1, 1), stride=(1, 1), bias=False) 58 | 59 | # This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script. 60 | # See https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning 61 | self.network_alpha = network_alpha 62 | self.rank = rank 63 | 64 | nn.init.normal_(self.down.weight, std=1 / rank) 65 | nn.init.zeros_(self.up.weight) 66 | 67 | def forward(self, hidden_states): 68 | orig_dtype = hidden_states.dtype 69 | dtype = self.down.weight.dtype 70 | 71 | down_hidden_states = self.down(hidden_states.to(dtype)) 72 | up_hidden_states = self.up(down_hidden_states) 73 | 74 | if self.network_alpha is not None: 75 | up_hidden_states *= self.network_alpha / self.rank 76 | 77 | return up_hidden_states.to(orig_dtype) 78 | 79 | 80 | class LoRACompatibleConv(nn.Conv2d): 81 | """ 82 | A convolutional layer that can be used with LoRA. 83 | """ 84 | 85 | def __init__(self, *args, lora_layer: Optional[LoRAConv2dLayer] = None, **kwargs): 86 | super().__init__(*args, **kwargs) 87 | self.lora_layer = lora_layer 88 | 89 | def set_lora_layer(self, lora_layer: Optional[LoRAConv2dLayer]): 90 | self.lora_layer = lora_layer 91 | 92 | def forward(self, x): 93 | if self.lora_layer is None: 94 | # make sure to the functional Conv2D function as otherwise torch.compile's graph will break 95 | # see: https://github.com/huggingface/diffusers/pull/4315 96 | return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) 97 | else: 98 | return super().forward(x) + self.lora_layer(x) 99 | 100 | 101 | class LoRACompatibleLinear(nn.Linear): 102 | """ 103 | A Linear layer that can be used with LoRA. 104 | """ 105 | 106 | def __init__(self, *args, lora_layer: Optional[LoRALinearLayer] = None, **kwargs): 107 | super().__init__(*args, **kwargs) 108 | self.lora_layer = lora_layer 109 | 110 | def set_lora_layer(self, lora_layer: Optional[LoRAConv2dLayer]): 111 | self.lora_layer = lora_layer 112 | 113 | def forward(self, x): 114 | if self.lora_layer is None: 115 | return super().forward(x) 116 | else: 117 | return super().forward(x) + self.lora_layer(x) 118 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/models/resnet_flax.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 The HuggingFace Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import flax.linen as nn 15 | import jax 16 | import jax.numpy as jnp 17 | 18 | 19 | class FlaxUpsample2D(nn.Module): 20 | out_channels: int 21 | dtype: jnp.dtype = jnp.float32 22 | 23 | def setup(self): 24 | self.conv = nn.Conv( 25 | self.out_channels, 26 | kernel_size=(3, 3), 27 | strides=(1, 1), 28 | padding=((1, 1), (1, 1)), 29 | dtype=self.dtype, 30 | ) 31 | 32 | def __call__(self, hidden_states): 33 | batch, height, width, channels = hidden_states.shape 34 | hidden_states = jax.image.resize( 35 | hidden_states, 36 | shape=(batch, height * 2, width * 2, channels), 37 | method="nearest", 38 | ) 39 | hidden_states = self.conv(hidden_states) 40 | return hidden_states 41 | 42 | 43 | class FlaxDownsample2D(nn.Module): 44 | out_channels: int 45 | dtype: jnp.dtype = jnp.float32 46 | 47 | def setup(self): 48 | self.conv = nn.Conv( 49 | self.out_channels, 50 | kernel_size=(3, 3), 51 | strides=(2, 2), 52 | padding=((1, 1), (1, 1)), # padding="VALID", 53 | dtype=self.dtype, 54 | ) 55 | 56 | def __call__(self, hidden_states): 57 | # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim 58 | # hidden_states = jnp.pad(hidden_states, pad_width=pad) 59 | hidden_states = self.conv(hidden_states) 60 | return hidden_states 61 | 62 | 63 | class FlaxResnetBlock2D(nn.Module): 64 | in_channels: int 65 | out_channels: int = None 66 | dropout_prob: float = 0.0 67 | use_nin_shortcut: bool = None 68 | dtype: jnp.dtype = jnp.float32 69 | 70 | def setup(self): 71 | out_channels = self.in_channels if self.out_channels is None else self.out_channels 72 | 73 | self.norm1 = nn.GroupNorm(num_groups=32, epsilon=1e-5) 74 | self.conv1 = nn.Conv( 75 | out_channels, 76 | kernel_size=(3, 3), 77 | strides=(1, 1), 78 | padding=((1, 1), (1, 1)), 79 | dtype=self.dtype, 80 | ) 81 | 82 | self.time_emb_proj = nn.Dense(out_channels, dtype=self.dtype) 83 | 84 | self.norm2 = nn.GroupNorm(num_groups=32, epsilon=1e-5) 85 | self.dropout = nn.Dropout(self.dropout_prob) 86 | self.conv2 = nn.Conv( 87 | out_channels, 88 | kernel_size=(3, 3), 89 | strides=(1, 1), 90 | padding=((1, 1), (1, 1)), 91 | dtype=self.dtype, 92 | ) 93 | 94 | use_nin_shortcut = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut 95 | 96 | self.conv_shortcut = None 97 | if use_nin_shortcut: 98 | self.conv_shortcut = nn.Conv( 99 | out_channels, 100 | kernel_size=(1, 1), 101 | strides=(1, 1), 102 | padding="VALID", 103 | dtype=self.dtype, 104 | ) 105 | 106 | def __call__(self, hidden_states, temb, deterministic=True): 107 | residual = hidden_states 108 | hidden_states = self.norm1(hidden_states) 109 | hidden_states = nn.swish(hidden_states) 110 | hidden_states = self.conv1(hidden_states) 111 | 112 | temb = self.time_emb_proj(nn.swish(temb)) 113 | temb = jnp.expand_dims(jnp.expand_dims(temb, 1), 1) 114 | hidden_states = hidden_states + temb 115 | 116 | hidden_states = self.norm2(hidden_states) 117 | hidden_states = nn.swish(hidden_states) 118 | hidden_states = self.dropout(hidden_states, deterministic) 119 | hidden_states = self.conv2(hidden_states) 120 | 121 | if self.conv_shortcut is not None: 122 | residual = self.conv_shortcut(residual) 123 | 124 | return hidden_states + residual 125 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipeline_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 The HuggingFace Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | 14 | # limitations under the License. 15 | 16 | # NOTE: This file is deprecated and will be removed in a future version. 17 | # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works 18 | 19 | from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 20 | from .utils import deprecate 21 | 22 | 23 | deprecate( 24 | "pipelines_utils", 25 | "0.22.0", 26 | "Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.", 27 | standard_warn=False, 28 | stacklevel=3, 29 | ) 30 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/alt_diffusion/__init__.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import List, Optional, Union 3 | 4 | import numpy as np 5 | import PIL 6 | from PIL import Image 7 | 8 | from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available 9 | 10 | 11 | @dataclass 12 | # Copied from diffusers.pipelines.stable_diffusion.__init__.StableDiffusionPipelineOutput with Stable->Alt 13 | class AltDiffusionPipelineOutput(BaseOutput): 14 | """ 15 | Output class for Alt Diffusion pipelines. 16 | 17 | Args: 18 | images (`List[PIL.Image.Image]` or `np.ndarray`) 19 | List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, 20 | num_channels)`. 21 | nsfw_content_detected (`List[bool]`) 22 | List indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content or 23 | `None` if safety checking could not be performed. 24 | """ 25 | 26 | images: Union[List[PIL.Image.Image], np.ndarray] 27 | nsfw_content_detected: Optional[List[bool]] 28 | 29 | 30 | try: 31 | if not (is_transformers_available() and is_torch_available()): 32 | raise OptionalDependencyNotAvailable() 33 | except OptionalDependencyNotAvailable: 34 | from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline 35 | else: 36 | from .modeling_roberta_series import RobertaSeriesModelWithTransformation 37 | from .pipeline_alt_diffusion import AltDiffusionPipeline 38 | from .pipeline_alt_diffusion_img2img import AltDiffusionImg2ImgPipeline 39 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/audio_diffusion/__init__.py: -------------------------------------------------------------------------------- 1 | from .mel import Mel 2 | from .pipeline_audio_diffusion import AudioDiffusionPipeline 3 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/audioldm/__init__.py: -------------------------------------------------------------------------------- 1 | from ...utils import ( 2 | OptionalDependencyNotAvailable, 3 | is_torch_available, 4 | is_transformers_available, 5 | is_transformers_version, 6 | ) 7 | 8 | 9 | try: 10 | if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")): 11 | raise OptionalDependencyNotAvailable() 12 | except OptionalDependencyNotAvailable: 13 | from ...utils.dummy_torch_and_transformers_objects import ( 14 | AudioLDMPipeline, 15 | ) 16 | else: 17 | from .pipeline_audioldm import AudioLDMPipeline 18 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/audioldm2/__init__.py: -------------------------------------------------------------------------------- 1 | from ...utils import ( 2 | OptionalDependencyNotAvailable, 3 | is_torch_available, 4 | is_transformers_available, 5 | is_transformers_version, 6 | ) 7 | 8 | 9 | try: 10 | if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")): 11 | raise OptionalDependencyNotAvailable() 12 | except OptionalDependencyNotAvailable: 13 | from ...utils.dummy_torch_and_transformers_objects import ( 14 | AudioLDM2Pipeline, 15 | AudioLDM2ProjectionModel, 16 | AudioLDM2UNet2DConditionModel, 17 | ) 18 | else: 19 | from .modeling_audioldm2 import AudioLDM2ProjectionModel, AudioLDM2UNet2DConditionModel 20 | from .pipeline_audioldm2 import AudioLDM2Pipeline 21 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/consistency_models/__init__.py: -------------------------------------------------------------------------------- 1 | from .pipeline_consistency_models import ConsistencyModelPipeline 2 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/controlnet/__init__.py: -------------------------------------------------------------------------------- 1 | from ...utils import ( 2 | OptionalDependencyNotAvailable, 3 | is_flax_available, 4 | is_torch_available, 5 | is_transformers_available, 6 | ) 7 | 8 | 9 | try: 10 | if not (is_transformers_available() and is_torch_available()): 11 | raise OptionalDependencyNotAvailable() 12 | except OptionalDependencyNotAvailable: 13 | from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 14 | else: 15 | from .multicontrolnet import MultiControlNetModel 16 | from .pipeline_controlnet import StableDiffusionControlNetPipeline 17 | from .pipeline_controlnet_img2img import StableDiffusionControlNetImg2ImgPipeline 18 | from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline 19 | from .pipeline_controlnet_sd_xl import StableDiffusionXLControlNetPipeline 20 | 21 | 22 | if is_transformers_available() and is_flax_available(): 23 | from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline 24 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/dance_diffusion/__init__.py: -------------------------------------------------------------------------------- 1 | from .pipeline_dance_diffusion import DanceDiffusionPipeline 2 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/ddim/__init__.py: -------------------------------------------------------------------------------- 1 | from .pipeline_ddim import DDIMPipeline 2 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/ddpm/__init__.py: -------------------------------------------------------------------------------- 1 | from .pipeline_ddpm import DDPMPipeline 2 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/deepfloyd_if/__init__.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import List, Optional, Union 3 | 4 | import numpy as np 5 | import PIL 6 | 7 | from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available 8 | from .timesteps import ( 9 | fast27_timesteps, 10 | smart27_timesteps, 11 | smart50_timesteps, 12 | smart100_timesteps, 13 | smart185_timesteps, 14 | super27_timesteps, 15 | super40_timesteps, 16 | super100_timesteps, 17 | ) 18 | 19 | 20 | @dataclass 21 | class IFPipelineOutput(BaseOutput): 22 | """ 23 | Args: 24 | Output class for Stable Diffusion pipelines. 25 | images (`List[PIL.Image.Image]` or `np.ndarray`) 26 | List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, 27 | num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. 28 | nsfw_detected (`List[bool]`) 29 | List of flags denoting whether the corresponding generated image likely represents "not-safe-for-work" 30 | (nsfw) content or a watermark. `None` if safety checking could not be performed. 31 | watermark_detected (`List[bool]`) 32 | List of flags denoting whether the corresponding generated image likely has a watermark. `None` if safety 33 | checking could not be performed. 34 | """ 35 | 36 | images: Union[List[PIL.Image.Image], np.ndarray] 37 | nsfw_detected: Optional[List[bool]] 38 | watermark_detected: Optional[List[bool]] 39 | 40 | 41 | try: 42 | if not (is_transformers_available() and is_torch_available()): 43 | raise OptionalDependencyNotAvailable() 44 | except OptionalDependencyNotAvailable: 45 | from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 46 | else: 47 | from .pipeline_if import IFPipeline 48 | from .pipeline_if_img2img import IFImg2ImgPipeline 49 | from .pipeline_if_img2img_superresolution import IFImg2ImgSuperResolutionPipeline 50 | from .pipeline_if_inpainting import IFInpaintingPipeline 51 | from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline 52 | from .pipeline_if_superresolution import IFSuperResolutionPipeline 53 | from .safety_checker import IFSafetyChecker 54 | from .watermark import IFWatermarker 55 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/deepfloyd_if/safety_checker.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import torch.nn as nn 4 | from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel 5 | 6 | from ...utils import logging 7 | 8 | 9 | logger = logging.get_logger(__name__) 10 | 11 | 12 | class IFSafetyChecker(PreTrainedModel): 13 | config_class = CLIPConfig 14 | 15 | _no_split_modules = ["CLIPEncoderLayer"] 16 | 17 | def __init__(self, config: CLIPConfig): 18 | super().__init__(config) 19 | 20 | self.vision_model = CLIPVisionModelWithProjection(config.vision_config) 21 | 22 | self.p_head = nn.Linear(config.vision_config.projection_dim, 1) 23 | self.w_head = nn.Linear(config.vision_config.projection_dim, 1) 24 | 25 | @torch.no_grad() 26 | def forward(self, clip_input, images, p_threshold=0.5, w_threshold=0.5): 27 | image_embeds = self.vision_model(clip_input)[0] 28 | 29 | nsfw_detected = self.p_head(image_embeds) 30 | nsfw_detected = nsfw_detected.flatten() 31 | nsfw_detected = nsfw_detected > p_threshold 32 | nsfw_detected = nsfw_detected.tolist() 33 | 34 | if any(nsfw_detected): 35 | logger.warning( 36 | "Potential NSFW content was detected in one or more images. A black image will be returned instead." 37 | " Try again with a different prompt and/or seed." 38 | ) 39 | 40 | for idx, nsfw_detected_ in enumerate(nsfw_detected): 41 | if nsfw_detected_: 42 | images[idx] = np.zeros(images[idx].shape) 43 | 44 | watermark_detected = self.w_head(image_embeds) 45 | watermark_detected = watermark_detected.flatten() 46 | watermark_detected = watermark_detected > w_threshold 47 | watermark_detected = watermark_detected.tolist() 48 | 49 | if any(watermark_detected): 50 | logger.warning( 51 | "Potential watermarked content was detected in one or more images. A black image will be returned instead." 52 | " Try again with a different prompt and/or seed." 53 | ) 54 | 55 | for idx, watermark_detected_ in enumerate(watermark_detected): 56 | if watermark_detected_: 57 | images[idx] = np.zeros(images[idx].shape) 58 | 59 | return images, nsfw_detected, watermark_detected 60 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/deepfloyd_if/watermark.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | import PIL 4 | import torch 5 | from PIL import Image 6 | 7 | from ...configuration_utils import ConfigMixin 8 | from ...models.modeling_utils import ModelMixin 9 | from ...utils import PIL_INTERPOLATION 10 | 11 | 12 | class IFWatermarker(ModelMixin, ConfigMixin): 13 | def __init__(self): 14 | super().__init__() 15 | 16 | self.register_buffer("watermark_image", torch.zeros((62, 62, 4))) 17 | self.watermark_image_as_pil = None 18 | 19 | def apply_watermark(self, images: List[PIL.Image.Image], sample_size=None): 20 | # copied from https://github.com/deep-floyd/IF/blob/b77482e36ca2031cb94dbca1001fc1e6400bf4ab/deepfloyd_if/modules/base.py#L287 21 | 22 | h = images[0].height 23 | w = images[0].width 24 | 25 | sample_size = sample_size or h 26 | 27 | coef = min(h / sample_size, w / sample_size) 28 | img_h, img_w = (int(h / coef), int(w / coef)) if coef < 1 else (h, w) 29 | 30 | S1, S2 = 1024**2, img_w * img_h 31 | K = (S2 / S1) ** 0.5 32 | wm_size, wm_x, wm_y = int(K * 62), img_w - int(14 * K), img_h - int(14 * K) 33 | 34 | if self.watermark_image_as_pil is None: 35 | watermark_image = self.watermark_image.to(torch.uint8).cpu().numpy() 36 | watermark_image = Image.fromarray(watermark_image, mode="RGBA") 37 | self.watermark_image_as_pil = watermark_image 38 | 39 | wm_img = self.watermark_image_as_pil.resize( 40 | (wm_size, wm_size), PIL_INTERPOLATION["bicubic"], reducing_gap=None 41 | ) 42 | 43 | for pil_img in images: 44 | pil_img.paste(wm_img, box=(wm_x - wm_size, wm_y - wm_size, wm_x, wm_y), mask=wm_img.split()[-1]) 45 | 46 | return images 47 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/dit/__init__.py: -------------------------------------------------------------------------------- 1 | from .pipeline_dit import DiTPipeline 2 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/kandinsky/__init__.py: -------------------------------------------------------------------------------- 1 | from ...utils import ( 2 | OptionalDependencyNotAvailable, 3 | is_torch_available, 4 | is_transformers_available, 5 | ) 6 | 7 | 8 | try: 9 | if not (is_transformers_available() and is_torch_available()): 10 | raise OptionalDependencyNotAvailable() 11 | except OptionalDependencyNotAvailable: 12 | from ...utils.dummy_torch_and_transformers_objects import * 13 | else: 14 | from .pipeline_kandinsky import KandinskyPipeline 15 | from .pipeline_kandinsky_combined import ( 16 | KandinskyCombinedPipeline, 17 | KandinskyImg2ImgCombinedPipeline, 18 | KandinskyInpaintCombinedPipeline, 19 | ) 20 | from .pipeline_kandinsky_img2img import KandinskyImg2ImgPipeline 21 | from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline 22 | from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput 23 | from .text_encoder import MultilingualCLIP 24 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/kandinsky/text_encoder.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel 3 | 4 | 5 | class MCLIPConfig(XLMRobertaConfig): 6 | model_type = "M-CLIP" 7 | 8 | def __init__(self, transformerDimSize=1024, imageDimSize=768, **kwargs): 9 | self.transformerDimensions = transformerDimSize 10 | self.numDims = imageDimSize 11 | super().__init__(**kwargs) 12 | 13 | 14 | class MultilingualCLIP(PreTrainedModel): 15 | config_class = MCLIPConfig 16 | 17 | def __init__(self, config, *args, **kwargs): 18 | super().__init__(config, *args, **kwargs) 19 | self.transformer = XLMRobertaModel(config) 20 | self.LinearTransformation = torch.nn.Linear( 21 | in_features=config.transformerDimensions, out_features=config.numDims 22 | ) 23 | 24 | def forward(self, input_ids, attention_mask): 25 | embs = self.transformer(input_ids=input_ids, attention_mask=attention_mask)[0] 26 | embs2 = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None] 27 | return self.LinearTransformation(embs2), embs 28 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/kandinsky2_2/__init__.py: -------------------------------------------------------------------------------- 1 | from ...utils import ( 2 | OptionalDependencyNotAvailable, 3 | is_torch_available, 4 | is_transformers_available, 5 | ) 6 | 7 | 8 | try: 9 | if not (is_transformers_available() and is_torch_available()): 10 | raise OptionalDependencyNotAvailable() 11 | except OptionalDependencyNotAvailable: 12 | from ...utils.dummy_torch_and_transformers_objects import * 13 | else: 14 | from .pipeline_kandinsky2_2 import KandinskyV22Pipeline 15 | from .pipeline_kandinsky2_2_combined import ( 16 | KandinskyV22CombinedPipeline, 17 | KandinskyV22Img2ImgCombinedPipeline, 18 | KandinskyV22InpaintCombinedPipeline, 19 | ) 20 | from .pipeline_kandinsky2_2_controlnet import KandinskyV22ControlnetPipeline 21 | from .pipeline_kandinsky2_2_controlnet_img2img import KandinskyV22ControlnetImg2ImgPipeline 22 | from .pipeline_kandinsky2_2_img2img import KandinskyV22Img2ImgPipeline 23 | from .pipeline_kandinsky2_2_inpainting import KandinskyV22InpaintPipeline 24 | from .pipeline_kandinsky2_2_prior import KandinskyV22PriorPipeline 25 | from .pipeline_kandinsky2_2_prior_emb2emb import KandinskyV22PriorEmb2EmbPipeline 26 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/latent_diffusion/__init__.py: -------------------------------------------------------------------------------- 1 | from ...utils import OptionalDependencyNotAvailable, is_torch_available, is_transformers_available 2 | from .pipeline_latent_diffusion_superresolution import LDMSuperResolutionPipeline 3 | 4 | 5 | try: 6 | if not (is_transformers_available() and is_torch_available()): 7 | raise OptionalDependencyNotAvailable() 8 | except OptionalDependencyNotAvailable: 9 | from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline 10 | else: 11 | from .pipeline_latent_diffusion import LDMBertModel, LDMTextToImagePipeline 12 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/latent_diffusion_uncond/__init__.py: -------------------------------------------------------------------------------- 1 | from .pipeline_latent_diffusion_uncond import LDMPipeline 2 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/musicldm/__init__.py: -------------------------------------------------------------------------------- 1 | from ...utils import ( 2 | OptionalDependencyNotAvailable, 3 | is_torch_available, 4 | is_transformers_available, 5 | is_transformers_version, 6 | ) 7 | 8 | 9 | try: 10 | if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")): 11 | raise OptionalDependencyNotAvailable() 12 | except OptionalDependencyNotAvailable: 13 | from ...utils.dummy_torch_and_transformers_objects import ( 14 | MusicLDMPipeline, 15 | ) 16 | else: 17 | from .pipeline_musicldm import MusicLDMPipeline 18 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/paint_by_example/__init__.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import List, Optional, Union 3 | 4 | import numpy as np 5 | import PIL 6 | from PIL import Image 7 | 8 | from ...utils import OptionalDependencyNotAvailable, is_torch_available, is_transformers_available 9 | 10 | 11 | try: 12 | if not (is_transformers_available() and is_torch_available()): 13 | raise OptionalDependencyNotAvailable() 14 | except OptionalDependencyNotAvailable: 15 | from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline 16 | else: 17 | from .image_encoder import PaintByExampleImageEncoder 18 | from .pipeline_paint_by_example import PaintByExamplePipeline 19 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/paint_by_example/image_encoder.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 The HuggingFace Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import torch 15 | from torch import nn 16 | from transformers import CLIPPreTrainedModel, CLIPVisionModel 17 | 18 | from ...models.attention import BasicTransformerBlock 19 | from ...utils import logging 20 | 21 | 22 | logger = logging.get_logger(__name__) # pylint: disable=invalid-name 23 | 24 | 25 | class PaintByExampleImageEncoder(CLIPPreTrainedModel): 26 | def __init__(self, config, proj_size=None): 27 | super().__init__(config) 28 | self.proj_size = proj_size or getattr(config, "projection_dim", 768) 29 | 30 | self.model = CLIPVisionModel(config) 31 | self.mapper = PaintByExampleMapper(config) 32 | self.final_layer_norm = nn.LayerNorm(config.hidden_size) 33 | self.proj_out = nn.Linear(config.hidden_size, self.proj_size) 34 | 35 | # uncondition for scaling 36 | self.uncond_vector = nn.Parameter(torch.randn((1, 1, self.proj_size))) 37 | 38 | def forward(self, pixel_values, return_uncond_vector=False): 39 | clip_output = self.model(pixel_values=pixel_values) 40 | latent_states = clip_output.pooler_output 41 | latent_states = self.mapper(latent_states[:, None]) 42 | latent_states = self.final_layer_norm(latent_states) 43 | latent_states = self.proj_out(latent_states) 44 | if return_uncond_vector: 45 | return latent_states, self.uncond_vector 46 | 47 | return latent_states 48 | 49 | 50 | class PaintByExampleMapper(nn.Module): 51 | def __init__(self, config): 52 | super().__init__() 53 | num_layers = (config.num_hidden_layers + 1) // 5 54 | hid_size = config.hidden_size 55 | num_heads = 1 56 | self.blocks = nn.ModuleList( 57 | [ 58 | BasicTransformerBlock(hid_size, num_heads, hid_size, activation_fn="gelu", attention_bias=True) 59 | for _ in range(num_layers) 60 | ] 61 | ) 62 | 63 | def forward(self, hidden_states): 64 | for block in self.blocks: 65 | hidden_states = block(hidden_states) 66 | 67 | return hidden_states 68 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/pndm/__init__.py: -------------------------------------------------------------------------------- 1 | from .pipeline_pndm import PNDMPipeline 2 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/repaint/__init__.py: -------------------------------------------------------------------------------- 1 | from .pipeline_repaint import RePaintPipeline 2 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/score_sde_ve/__init__.py: -------------------------------------------------------------------------------- 1 | from .pipeline_score_sde_ve import ScoreSdeVePipeline 2 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/score_sde_ve/pipeline_score_sde_ve.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 The HuggingFace Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from typing import List, Optional, Tuple, Union 16 | 17 | import torch 18 | 19 | from ...models import UNet2DModel 20 | from ...schedulers import ScoreSdeVeScheduler 21 | from ...utils import randn_tensor 22 | from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput 23 | 24 | 25 | class ScoreSdeVePipeline(DiffusionPipeline): 26 | r""" 27 | Pipeline for unconditional image generation. 28 | 29 | This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods 30 | implemented for all pipelines (downloading, saving, running on a particular device, etc.). 31 | 32 | Parameters: 33 | unet ([`UNet2DModel`]): 34 | A `UNet2DModel` to denoise the encoded image. 35 | scheduler ([`ScoreSdeVeScheduler`]): 36 | A `ScoreSdeVeScheduler` to be used in combination with `unet` to denoise the encoded image. 37 | """ 38 | unet: UNet2DModel 39 | scheduler: ScoreSdeVeScheduler 40 | 41 | def __init__(self, unet: UNet2DModel, scheduler: ScoreSdeVeScheduler): 42 | super().__init__() 43 | self.register_modules(unet=unet, scheduler=scheduler) 44 | 45 | @torch.no_grad() 46 | def __call__( 47 | self, 48 | batch_size: int = 1, 49 | num_inference_steps: int = 2000, 50 | generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, 51 | output_type: Optional[str] = "pil", 52 | return_dict: bool = True, 53 | **kwargs, 54 | ) -> Union[ImagePipelineOutput, Tuple]: 55 | r""" 56 | The call function to the pipeline for generation. 57 | 58 | Args: 59 | batch_size (`int`, *optional*, defaults to 1): 60 | The number of images to generate. 61 | generator (`torch.Generator`, `optional`): 62 | A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make 63 | generation deterministic. 64 | output_type (`str`, `optional`, defaults to `"pil"`): 65 | The output format of the generated image. Choose between `PIL.Image` or `np.array`. 66 | return_dict (`bool`, *optional*, defaults to `True`): 67 | Whether or not to return a [`ImagePipelineOutput`] instead of a plain tuple. 68 | 69 | Returns: 70 | [`~pipelines.ImagePipelineOutput`] or `tuple`: 71 | If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is 72 | returned where the first element is a list with the generated images. 73 | """ 74 | 75 | img_size = self.unet.config.sample_size 76 | shape = (batch_size, 3, img_size, img_size) 77 | 78 | model = self.unet 79 | 80 | sample = randn_tensor(shape, generator=generator) * self.scheduler.init_noise_sigma 81 | sample = sample.to(self.device) 82 | 83 | self.scheduler.set_timesteps(num_inference_steps) 84 | self.scheduler.set_sigmas(num_inference_steps) 85 | 86 | for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)): 87 | sigma_t = self.scheduler.sigmas[i] * torch.ones(shape[0], device=self.device) 88 | 89 | # correction step 90 | for _ in range(self.scheduler.config.correct_steps): 91 | model_output = self.unet(sample, sigma_t).sample 92 | sample = self.scheduler.step_correct(model_output, sample, generator=generator).prev_sample 93 | 94 | # prediction step 95 | model_output = model(sample, sigma_t).sample 96 | output = self.scheduler.step_pred(model_output, t, sample, generator=generator) 97 | 98 | sample, sample_mean = output.prev_sample, output.prev_sample_mean 99 | 100 | sample = sample_mean.clamp(0, 1) 101 | sample = sample.cpu().permute(0, 2, 3, 1).numpy() 102 | if output_type == "pil": 103 | sample = self.numpy_to_pil(sample) 104 | 105 | if not return_dict: 106 | return (sample,) 107 | 108 | return ImagePipelineOutput(images=sample) 109 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/semantic_stable_diffusion/__init__.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from enum import Enum 3 | from typing import List, Optional, Union 4 | 5 | import numpy as np 6 | import PIL 7 | from PIL import Image 8 | 9 | from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available 10 | 11 | 12 | @dataclass 13 | class SemanticStableDiffusionPipelineOutput(BaseOutput): 14 | """ 15 | Output class for Stable Diffusion pipelines. 16 | 17 | Args: 18 | images (`List[PIL.Image.Image]` or `np.ndarray`) 19 | List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, 20 | num_channels)`. 21 | nsfw_content_detected (`List[bool]`) 22 | List indicating whether the corresponding generated image contains “not-safe-for-work” (nsfw) content or 23 | `None` if safety checking could not be performed. 24 | """ 25 | 26 | images: Union[List[PIL.Image.Image], np.ndarray] 27 | nsfw_content_detected: Optional[List[bool]] 28 | 29 | 30 | try: 31 | if not (is_transformers_available() and is_torch_available()): 32 | raise OptionalDependencyNotAvailable() 33 | except OptionalDependencyNotAvailable: 34 | from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 35 | else: 36 | from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline 37 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/shap_e/__init__.py: -------------------------------------------------------------------------------- 1 | from ...utils import ( 2 | OptionalDependencyNotAvailable, 3 | is_torch_available, 4 | is_transformers_available, 5 | is_transformers_version, 6 | ) 7 | 8 | 9 | try: 10 | if not (is_transformers_available() and is_torch_available()): 11 | raise OptionalDependencyNotAvailable() 12 | except OptionalDependencyNotAvailable: 13 | from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline 14 | else: 15 | from .camera import create_pan_cameras 16 | from .pipeline_shap_e import ShapEPipeline 17 | from .pipeline_shap_e_img2img import ShapEImg2ImgPipeline 18 | from .renderer import ( 19 | BoundingBoxVolume, 20 | ImportanceRaySampler, 21 | MLPNeRFModelOutput, 22 | MLPNeRSTFModel, 23 | ShapEParamsProjModel, 24 | ShapERenderer, 25 | StratifiedRaySampler, 26 | VoidNeRFModel, 27 | ) 28 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/spectrogram_diffusion/__init__.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa 2 | from ...utils import is_note_seq_available, is_transformers_available, is_torch_available 3 | from ...utils import OptionalDependencyNotAvailable 4 | 5 | 6 | try: 7 | if not (is_transformers_available() and is_torch_available()): 8 | raise OptionalDependencyNotAvailable() 9 | except OptionalDependencyNotAvailable: 10 | from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 11 | else: 12 | from .notes_encoder import SpectrogramNotesEncoder 13 | from .continous_encoder import SpectrogramContEncoder 14 | from .pipeline_spectrogram_diffusion import ( 15 | SpectrogramContEncoder, 16 | SpectrogramDiffusionPipeline, 17 | T5FilmDecoder, 18 | ) 19 | 20 | try: 21 | if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): 22 | raise OptionalDependencyNotAvailable() 23 | except OptionalDependencyNotAvailable: 24 | from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 25 | else: 26 | from .midi_utils import MidiProcessor 27 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/spectrogram_diffusion/continous_encoder.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Music Spectrogram Diffusion Authors. 2 | # Copyright 2023 The HuggingFace Team. All rights reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | import torch 17 | import torch.nn as nn 18 | from transformers.modeling_utils import ModuleUtilsMixin 19 | from transformers.models.t5.modeling_t5 import ( 20 | T5Block, 21 | T5Config, 22 | T5LayerNorm, 23 | ) 24 | 25 | from ...configuration_utils import ConfigMixin, register_to_config 26 | from ...models import ModelMixin 27 | 28 | 29 | class SpectrogramContEncoder(ModelMixin, ConfigMixin, ModuleUtilsMixin): 30 | @register_to_config 31 | def __init__( 32 | self, 33 | input_dims: int, 34 | targets_context_length: int, 35 | d_model: int, 36 | dropout_rate: float, 37 | num_layers: int, 38 | num_heads: int, 39 | d_kv: int, 40 | d_ff: int, 41 | feed_forward_proj: str, 42 | is_decoder: bool = False, 43 | ): 44 | super().__init__() 45 | 46 | self.input_proj = nn.Linear(input_dims, d_model, bias=False) 47 | 48 | self.position_encoding = nn.Embedding(targets_context_length, d_model) 49 | self.position_encoding.weight.requires_grad = False 50 | 51 | self.dropout_pre = nn.Dropout(p=dropout_rate) 52 | 53 | t5config = T5Config( 54 | d_model=d_model, 55 | num_heads=num_heads, 56 | d_kv=d_kv, 57 | d_ff=d_ff, 58 | feed_forward_proj=feed_forward_proj, 59 | dropout_rate=dropout_rate, 60 | is_decoder=is_decoder, 61 | is_encoder_decoder=False, 62 | ) 63 | self.encoders = nn.ModuleList() 64 | for lyr_num in range(num_layers): 65 | lyr = T5Block(t5config) 66 | self.encoders.append(lyr) 67 | 68 | self.layer_norm = T5LayerNorm(d_model) 69 | self.dropout_post = nn.Dropout(p=dropout_rate) 70 | 71 | def forward(self, encoder_inputs, encoder_inputs_mask): 72 | x = self.input_proj(encoder_inputs) 73 | 74 | # terminal relative positional encodings 75 | max_positions = encoder_inputs.shape[1] 76 | input_positions = torch.arange(max_positions, device=encoder_inputs.device) 77 | 78 | seq_lens = encoder_inputs_mask.sum(-1) 79 | input_positions = torch.roll(input_positions.unsqueeze(0), tuple(seq_lens.tolist()), dims=0) 80 | x += self.position_encoding(input_positions) 81 | 82 | x = self.dropout_pre(x) 83 | 84 | # inverted the attention mask 85 | input_shape = encoder_inputs.size() 86 | extended_attention_mask = self.get_extended_attention_mask(encoder_inputs_mask, input_shape) 87 | 88 | for lyr in self.encoders: 89 | x = lyr(x, extended_attention_mask)[0] 90 | x = self.layer_norm(x) 91 | 92 | return self.dropout_post(x), encoder_inputs_mask 93 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/spectrogram_diffusion/notes_encoder.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Music Spectrogram Diffusion Authors. 2 | # Copyright 2023 The HuggingFace Team. All rights reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | import torch 17 | import torch.nn as nn 18 | from transformers.modeling_utils import ModuleUtilsMixin 19 | from transformers.models.t5.modeling_t5 import T5Block, T5Config, T5LayerNorm 20 | 21 | from ...configuration_utils import ConfigMixin, register_to_config 22 | from ...models import ModelMixin 23 | 24 | 25 | class SpectrogramNotesEncoder(ModelMixin, ConfigMixin, ModuleUtilsMixin): 26 | @register_to_config 27 | def __init__( 28 | self, 29 | max_length: int, 30 | vocab_size: int, 31 | d_model: int, 32 | dropout_rate: float, 33 | num_layers: int, 34 | num_heads: int, 35 | d_kv: int, 36 | d_ff: int, 37 | feed_forward_proj: str, 38 | is_decoder: bool = False, 39 | ): 40 | super().__init__() 41 | 42 | self.token_embedder = nn.Embedding(vocab_size, d_model) 43 | 44 | self.position_encoding = nn.Embedding(max_length, d_model) 45 | self.position_encoding.weight.requires_grad = False 46 | 47 | self.dropout_pre = nn.Dropout(p=dropout_rate) 48 | 49 | t5config = T5Config( 50 | vocab_size=vocab_size, 51 | d_model=d_model, 52 | num_heads=num_heads, 53 | d_kv=d_kv, 54 | d_ff=d_ff, 55 | dropout_rate=dropout_rate, 56 | feed_forward_proj=feed_forward_proj, 57 | is_decoder=is_decoder, 58 | is_encoder_decoder=False, 59 | ) 60 | 61 | self.encoders = nn.ModuleList() 62 | for lyr_num in range(num_layers): 63 | lyr = T5Block(t5config) 64 | self.encoders.append(lyr) 65 | 66 | self.layer_norm = T5LayerNorm(d_model) 67 | self.dropout_post = nn.Dropout(p=dropout_rate) 68 | 69 | def forward(self, encoder_input_tokens, encoder_inputs_mask): 70 | x = self.token_embedder(encoder_input_tokens) 71 | 72 | seq_length = encoder_input_tokens.shape[1] 73 | inputs_positions = torch.arange(seq_length, device=encoder_input_tokens.device) 74 | x += self.position_encoding(inputs_positions) 75 | 76 | x = self.dropout_pre(x) 77 | 78 | # inverted the attention mask 79 | input_shape = encoder_input_tokens.size() 80 | extended_attention_mask = self.get_extended_attention_mask(encoder_inputs_mask, input_shape) 81 | 82 | for lyr in self.encoders: 83 | x = lyr(x, extended_attention_mask)[0] 84 | x = self.layer_norm(x) 85 | 86 | return self.dropout_post(x), encoder_inputs_mask 87 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_controlnet.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 The HuggingFace Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # NOTE: This file is deprecated and will be removed in a future version. 16 | # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works 17 | 18 | from ...utils import deprecate 19 | from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401 20 | 21 | 22 | deprecate( 23 | "stable diffusion controlnet", 24 | "0.22.0", 25 | "Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.", 26 | standard_warn=False, 27 | stacklevel=3, 28 | ) 29 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_controlnet.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 The HuggingFace Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # NOTE: This file is deprecated and will be removed in a future version. 16 | # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works 17 | from ...utils import deprecate 18 | from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 19 | from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 20 | 21 | 22 | deprecate( 23 | "stable diffusion controlnet", 24 | "0.22.0", 25 | "Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.", 26 | standard_warn=False, 27 | stacklevel=3, 28 | ) 29 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/stable_diffusion/safety_checker_flax.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 The HuggingFace Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from typing import Optional, Tuple 16 | 17 | import jax 18 | import jax.numpy as jnp 19 | from flax import linen as nn 20 | from flax.core.frozen_dict import FrozenDict 21 | from transformers import CLIPConfig, FlaxPreTrainedModel 22 | from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule 23 | 24 | 25 | def jax_cosine_distance(emb_1, emb_2, eps=1e-12): 26 | norm_emb_1 = jnp.divide(emb_1.T, jnp.clip(jnp.linalg.norm(emb_1, axis=1), a_min=eps)).T 27 | norm_emb_2 = jnp.divide(emb_2.T, jnp.clip(jnp.linalg.norm(emb_2, axis=1), a_min=eps)).T 28 | return jnp.matmul(norm_emb_1, norm_emb_2.T) 29 | 30 | 31 | class FlaxStableDiffusionSafetyCheckerModule(nn.Module): 32 | config: CLIPConfig 33 | dtype: jnp.dtype = jnp.float32 34 | 35 | def setup(self): 36 | self.vision_model = FlaxCLIPVisionModule(self.config.vision_config) 37 | self.visual_projection = nn.Dense(self.config.projection_dim, use_bias=False, dtype=self.dtype) 38 | 39 | self.concept_embeds = self.param("concept_embeds", jax.nn.initializers.ones, (17, self.config.projection_dim)) 40 | self.special_care_embeds = self.param( 41 | "special_care_embeds", jax.nn.initializers.ones, (3, self.config.projection_dim) 42 | ) 43 | 44 | self.concept_embeds_weights = self.param("concept_embeds_weights", jax.nn.initializers.ones, (17,)) 45 | self.special_care_embeds_weights = self.param("special_care_embeds_weights", jax.nn.initializers.ones, (3,)) 46 | 47 | def __call__(self, clip_input): 48 | pooled_output = self.vision_model(clip_input)[1] 49 | image_embeds = self.visual_projection(pooled_output) 50 | 51 | special_cos_dist = jax_cosine_distance(image_embeds, self.special_care_embeds) 52 | cos_dist = jax_cosine_distance(image_embeds, self.concept_embeds) 53 | 54 | # increase this value to create a stronger `nfsw` filter 55 | # at the cost of increasing the possibility of filtering benign image inputs 56 | adjustment = 0.0 57 | 58 | special_scores = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment 59 | special_scores = jnp.round(special_scores, 3) 60 | is_special_care = jnp.any(special_scores > 0, axis=1, keepdims=True) 61 | # Use a lower threshold if an image has any special care concept 62 | special_adjustment = is_special_care * 0.01 63 | 64 | concept_scores = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment 65 | concept_scores = jnp.round(concept_scores, 3) 66 | has_nsfw_concepts = jnp.any(concept_scores > 0, axis=1) 67 | 68 | return has_nsfw_concepts 69 | 70 | 71 | class FlaxStableDiffusionSafetyChecker(FlaxPreTrainedModel): 72 | config_class = CLIPConfig 73 | main_input_name = "clip_input" 74 | module_class = FlaxStableDiffusionSafetyCheckerModule 75 | 76 | def __init__( 77 | self, 78 | config: CLIPConfig, 79 | input_shape: Optional[Tuple] = None, 80 | seed: int = 0, 81 | dtype: jnp.dtype = jnp.float32, 82 | _do_init: bool = True, 83 | **kwargs, 84 | ): 85 | if input_shape is None: 86 | input_shape = (1, 224, 224, 3) 87 | module = self.module_class(config=config, dtype=dtype, **kwargs) 88 | super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) 89 | 90 | def init_weights(self, rng: jax.random.KeyArray, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: 91 | # init input tensor 92 | clip_input = jax.random.normal(rng, input_shape) 93 | 94 | params_rng, dropout_rng = jax.random.split(rng) 95 | rngs = {"params": params_rng, "dropout": dropout_rng} 96 | 97 | random_params = self.module.init(rngs, clip_input)["params"] 98 | 99 | return random_params 100 | 101 | def __call__( 102 | self, 103 | clip_input, 104 | params: dict = None, 105 | ): 106 | clip_input = jnp.transpose(clip_input, (0, 2, 3, 1)) 107 | 108 | return self.module.apply( 109 | {"params": params or self.params}, 110 | jnp.array(clip_input, dtype=jnp.float32), 111 | rngs={}, 112 | ) 113 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/stable_diffusion/stable_unclip_image_normalizer.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 The HuggingFace Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from typing import Optional, Union 16 | 17 | import torch 18 | from torch import nn 19 | 20 | from ...configuration_utils import ConfigMixin, register_to_config 21 | from ...models.modeling_utils import ModelMixin 22 | 23 | 24 | class StableUnCLIPImageNormalizer(ModelMixin, ConfigMixin): 25 | """ 26 | This class is used to hold the mean and standard deviation of the CLIP embedder used in stable unCLIP. 27 | 28 | It is used to normalize the image embeddings before the noise is applied and un-normalize the noised image 29 | embeddings. 30 | """ 31 | 32 | @register_to_config 33 | def __init__( 34 | self, 35 | embedding_dim: int = 768, 36 | ): 37 | super().__init__() 38 | 39 | self.mean = nn.Parameter(torch.zeros(1, embedding_dim)) 40 | self.std = nn.Parameter(torch.ones(1, embedding_dim)) 41 | 42 | def to( 43 | self, 44 | torch_device: Optional[Union[str, torch.device]] = None, 45 | torch_dtype: Optional[torch.dtype] = None, 46 | ): 47 | self.mean = nn.Parameter(self.mean.to(torch_device).to(torch_dtype)) 48 | self.std = nn.Parameter(self.std.to(torch_device).to(torch_dtype)) 49 | return self 50 | 51 | def scale(self, embeds): 52 | embeds = (embeds - self.mean) * 1.0 / self.std 53 | return embeds 54 | 55 | def unscale(self, embeds): 56 | embeds = (embeds * self.std) + self.mean 57 | return embeds 58 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/stable_diffusion_safe/__init__.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from enum import Enum 3 | from typing import List, Optional, Union 4 | 5 | import numpy as np 6 | import PIL 7 | from PIL import Image 8 | 9 | from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available 10 | 11 | 12 | @dataclass 13 | class SafetyConfig(object): 14 | WEAK = { 15 | "sld_warmup_steps": 15, 16 | "sld_guidance_scale": 20, 17 | "sld_threshold": 0.0, 18 | "sld_momentum_scale": 0.0, 19 | "sld_mom_beta": 0.0, 20 | } 21 | MEDIUM = { 22 | "sld_warmup_steps": 10, 23 | "sld_guidance_scale": 1000, 24 | "sld_threshold": 0.01, 25 | "sld_momentum_scale": 0.3, 26 | "sld_mom_beta": 0.4, 27 | } 28 | STRONG = { 29 | "sld_warmup_steps": 7, 30 | "sld_guidance_scale": 2000, 31 | "sld_threshold": 0.025, 32 | "sld_momentum_scale": 0.5, 33 | "sld_mom_beta": 0.7, 34 | } 35 | MAX = { 36 | "sld_warmup_steps": 0, 37 | "sld_guidance_scale": 5000, 38 | "sld_threshold": 1.0, 39 | "sld_momentum_scale": 0.5, 40 | "sld_mom_beta": 0.7, 41 | } 42 | 43 | 44 | @dataclass 45 | class StableDiffusionSafePipelineOutput(BaseOutput): 46 | """ 47 | Output class for Safe Stable Diffusion pipelines. 48 | 49 | Args: 50 | images (`List[PIL.Image.Image]` or `np.ndarray`) 51 | List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, 52 | num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. 53 | nsfw_content_detected (`List[bool]`) 54 | List of flags denoting whether the corresponding generated image likely represents "not-safe-for-work" 55 | (nsfw) content, or `None` if safety checking could not be performed. 56 | images (`List[PIL.Image.Image]` or `np.ndarray`) 57 | List of denoised PIL images that were flagged by the safety checker any may contain "not-safe-for-work" 58 | (nsfw) content, or `None` if no safety check was performed or no images were flagged. 59 | applied_safety_concept (`str`) 60 | The safety concept that was applied for safety guidance, or `None` if safety guidance was disabled 61 | """ 62 | 63 | images: Union[List[PIL.Image.Image], np.ndarray] 64 | nsfw_content_detected: Optional[List[bool]] 65 | unsafe_images: Optional[Union[List[PIL.Image.Image], np.ndarray]] 66 | applied_safety_concept: Optional[str] 67 | 68 | 69 | try: 70 | if not (is_transformers_available() and is_torch_available()): 71 | raise OptionalDependencyNotAvailable() 72 | except OptionalDependencyNotAvailable: 73 | from ...utils.dummy_torch_and_transformers_objects import * 74 | else: 75 | from .pipeline_stable_diffusion_safe import StableDiffusionPipelineSafe 76 | from .safety_checker import SafeStableDiffusionSafetyChecker 77 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/stable_diffusion_xl/__init__.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import List, Optional, Union 3 | 4 | import numpy as np 5 | import PIL 6 | 7 | from ...utils import ( 8 | BaseOutput, 9 | OptionalDependencyNotAvailable, 10 | is_torch_available, 11 | is_transformers_available, 12 | ) 13 | 14 | 15 | @dataclass 16 | class StableDiffusionXLPipelineOutput(BaseOutput): 17 | """ 18 | Output class for Stable Diffusion pipelines. 19 | 20 | Args: 21 | images (`List[PIL.Image.Image]` or `np.ndarray`) 22 | List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, 23 | num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. 24 | """ 25 | 26 | images: Union[List[PIL.Image.Image], np.ndarray] 27 | 28 | 29 | try: 30 | if not (is_transformers_available() and is_torch_available()): 31 | raise OptionalDependencyNotAvailable() 32 | except OptionalDependencyNotAvailable: 33 | from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 34 | else: 35 | from .pipeline_stable_diffusion_xl import StableDiffusionXLPipeline 36 | from .pipeline_stable_diffusion_xl_img2img import StableDiffusionXLImg2ImgPipeline 37 | from .pipeline_stable_diffusion_xl_inpaint import StableDiffusionXLInpaintPipeline 38 | from .pipeline_stable_diffusion_xl_instruct_pix2pix import StableDiffusionXLInstructPix2PixPipeline 39 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/stable_diffusion_xl/watermark.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | 4 | from ...utils import is_invisible_watermark_available 5 | 6 | 7 | if is_invisible_watermark_available(): 8 | from imwatermark import WatermarkEncoder 9 | 10 | 11 | # Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66 12 | WATERMARK_MESSAGE = 0b101100111110110010010000011110111011000110011110 13 | # bin(x)[2:] gives bits of x as str, use int to convert them to 0/1 14 | WATERMARK_BITS = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]] 15 | 16 | 17 | class StableDiffusionXLWatermarker: 18 | def __init__(self): 19 | self.watermark = WATERMARK_BITS 20 | self.encoder = WatermarkEncoder() 21 | 22 | self.encoder.set_watermark("bits", self.watermark) 23 | 24 | def apply_watermark(self, images: torch.FloatTensor): 25 | # can't encode images that are smaller than 256 26 | if images.shape[-1] < 256: 27 | return images 28 | 29 | images = (255 * (images / 2 + 0.5)).cpu().permute(0, 2, 3, 1).float().numpy() 30 | 31 | images = [self.encoder.encode(image, "dwtDct") for image in images] 32 | 33 | images = torch.from_numpy(np.array(images)).permute(0, 3, 1, 2) 34 | 35 | images = torch.clamp(2 * (images / 255 - 0.5), min=-1.0, max=1.0) 36 | return images 37 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/stochastic_karras_ve/__init__.py: -------------------------------------------------------------------------------- 1 | from .pipeline_stochastic_karras_ve import KarrasVePipeline 2 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/t2i_adapter/__init__.py: -------------------------------------------------------------------------------- 1 | from ...utils import ( 2 | OptionalDependencyNotAvailable, 3 | is_torch_available, 4 | is_transformers_available, 5 | ) 6 | 7 | 8 | try: 9 | if not (is_transformers_available() and is_torch_available()): 10 | raise OptionalDependencyNotAvailable() 11 | except OptionalDependencyNotAvailable: 12 | from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 13 | else: 14 | from .pipeline_stable_diffusion_adapter import StableDiffusionAdapterPipeline 15 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/text_to_video_synthesis/__init__.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import List, Optional, Union 3 | 4 | import numpy as np 5 | import torch 6 | 7 | from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available 8 | 9 | 10 | @dataclass 11 | class TextToVideoSDPipelineOutput(BaseOutput): 12 | """ 13 | Output class for text-to-video pipelines. 14 | 15 | Args: 16 | frames (`List[np.ndarray]` or `torch.FloatTensor`) 17 | List of denoised frames (essentially images) as NumPy arrays of shape `(height, width, num_channels)` or as 18 | a `torch` tensor. The length of the list denotes the video length (the number of frames). 19 | """ 20 | 21 | frames: Union[List[np.ndarray], torch.FloatTensor] 22 | 23 | 24 | try: 25 | if not (is_transformers_available() and is_torch_available()): 26 | raise OptionalDependencyNotAvailable() 27 | except OptionalDependencyNotAvailable: 28 | from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 29 | else: 30 | from .pipeline_text_to_video_synth import TextToVideoSDPipeline 31 | from .pipeline_text_to_video_synth_img2img import VideoToVideoSDPipeline # noqa: F401 32 | from .pipeline_text_to_video_zero import TextToVideoZeroPipeline 33 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/unclip/__init__.py: -------------------------------------------------------------------------------- 1 | from ...utils import ( 2 | OptionalDependencyNotAvailable, 3 | is_torch_available, 4 | is_transformers_available, 5 | is_transformers_version, 6 | ) 7 | 8 | 9 | try: 10 | if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): 11 | raise OptionalDependencyNotAvailable() 12 | except OptionalDependencyNotAvailable: 13 | from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline 14 | else: 15 | from .pipeline_unclip import UnCLIPPipeline 16 | from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline 17 | from .text_proj import UnCLIPTextProjModel 18 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/unclip/text_proj.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 Kakao Brain and The HuggingFace Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import torch 16 | from torch import nn 17 | 18 | from ...configuration_utils import ConfigMixin, register_to_config 19 | from ...models import ModelMixin 20 | 21 | 22 | class UnCLIPTextProjModel(ModelMixin, ConfigMixin): 23 | """ 24 | Utility class for CLIP embeddings. Used to combine the image and text embeddings into a format usable by the 25 | decoder. 26 | 27 | For more details, see the original paper: https://arxiv.org/abs/2204.06125 section 2.1 28 | """ 29 | 30 | @register_to_config 31 | def __init__( 32 | self, 33 | *, 34 | clip_extra_context_tokens: int = 4, 35 | clip_embeddings_dim: int = 768, 36 | time_embed_dim: int, 37 | cross_attention_dim, 38 | ): 39 | super().__init__() 40 | 41 | self.learned_classifier_free_guidance_embeddings = nn.Parameter(torch.zeros(clip_embeddings_dim)) 42 | 43 | # parameters for additional clip time embeddings 44 | self.embedding_proj = nn.Linear(clip_embeddings_dim, time_embed_dim) 45 | self.clip_image_embeddings_project_to_time_embeddings = nn.Linear(clip_embeddings_dim, time_embed_dim) 46 | 47 | # parameters for encoder hidden states 48 | self.clip_extra_context_tokens = clip_extra_context_tokens 49 | self.clip_extra_context_tokens_proj = nn.Linear( 50 | clip_embeddings_dim, self.clip_extra_context_tokens * cross_attention_dim 51 | ) 52 | self.encoder_hidden_states_proj = nn.Linear(clip_embeddings_dim, cross_attention_dim) 53 | self.text_encoder_hidden_states_norm = nn.LayerNorm(cross_attention_dim) 54 | 55 | def forward(self, *, image_embeddings, prompt_embeds, text_encoder_hidden_states, do_classifier_free_guidance): 56 | if do_classifier_free_guidance: 57 | # Add the classifier free guidance embeddings to the image embeddings 58 | image_embeddings_batch_size = image_embeddings.shape[0] 59 | classifier_free_guidance_embeddings = self.learned_classifier_free_guidance_embeddings.unsqueeze(0) 60 | classifier_free_guidance_embeddings = classifier_free_guidance_embeddings.expand( 61 | image_embeddings_batch_size, -1 62 | ) 63 | image_embeddings = torch.cat([classifier_free_guidance_embeddings, image_embeddings], dim=0) 64 | 65 | # The image embeddings batch size and the text embeddings batch size are equal 66 | assert image_embeddings.shape[0] == prompt_embeds.shape[0] 67 | 68 | batch_size = prompt_embeds.shape[0] 69 | 70 | # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and 71 | # adding CLIP embeddings to the existing timestep embedding, ... 72 | time_projected_prompt_embeds = self.embedding_proj(prompt_embeds) 73 | time_projected_image_embeddings = self.clip_image_embeddings_project_to_time_embeddings(image_embeddings) 74 | additive_clip_time_embeddings = time_projected_image_embeddings + time_projected_prompt_embeds 75 | 76 | # ... and by projecting CLIP embeddings into four 77 | # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" 78 | clip_extra_context_tokens = self.clip_extra_context_tokens_proj(image_embeddings) 79 | clip_extra_context_tokens = clip_extra_context_tokens.reshape(batch_size, -1, self.clip_extra_context_tokens) 80 | clip_extra_context_tokens = clip_extra_context_tokens.permute(0, 2, 1) 81 | 82 | text_encoder_hidden_states = self.encoder_hidden_states_proj(text_encoder_hidden_states) 83 | text_encoder_hidden_states = self.text_encoder_hidden_states_norm(text_encoder_hidden_states) 84 | text_encoder_hidden_states = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states], dim=1) 85 | 86 | return text_encoder_hidden_states, additive_clip_time_embeddings 87 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/unidiffuser/__init__.py: -------------------------------------------------------------------------------- 1 | from ...utils import ( 2 | OptionalDependencyNotAvailable, 3 | is_torch_available, 4 | is_transformers_available, 5 | is_transformers_version, 6 | ) 7 | 8 | 9 | try: 10 | if not (is_transformers_available() and is_torch_available()): 11 | raise OptionalDependencyNotAvailable() 12 | except OptionalDependencyNotAvailable: 13 | from ...utils.dummy_torch_and_transformers_objects import ( 14 | ImageTextPipelineOutput, 15 | UniDiffuserPipeline, 16 | ) 17 | else: 18 | from .modeling_text_decoder import UniDiffuserTextDecoder 19 | from .modeling_uvit import UniDiffuserModel, UTransformer2DModel 20 | from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline 21 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/versatile_diffusion/__init__.py: -------------------------------------------------------------------------------- 1 | from ...utils import ( 2 | OptionalDependencyNotAvailable, 3 | is_torch_available, 4 | is_transformers_available, 5 | is_transformers_version, 6 | ) 7 | 8 | 9 | try: 10 | if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): 11 | raise OptionalDependencyNotAvailable() 12 | except OptionalDependencyNotAvailable: 13 | from ...utils.dummy_torch_and_transformers_objects import ( 14 | VersatileDiffusionDualGuidedPipeline, 15 | VersatileDiffusionImageVariationPipeline, 16 | VersatileDiffusionPipeline, 17 | VersatileDiffusionTextToImagePipeline, 18 | ) 19 | else: 20 | from .modeling_text_unet import UNetFlatConditionModel 21 | from .pipeline_versatile_diffusion import VersatileDiffusionPipeline 22 | from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline 23 | from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline 24 | from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline 25 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/pipelines/vq_diffusion/__init__.py: -------------------------------------------------------------------------------- 1 | from ...utils import OptionalDependencyNotAvailable, is_torch_available, is_transformers_available 2 | 3 | 4 | try: 5 | if not (is_transformers_available() and is_torch_available()): 6 | raise OptionalDependencyNotAvailable() 7 | except OptionalDependencyNotAvailable: 8 | from ...utils.dummy_torch_and_transformers_objects import * 9 | else: 10 | from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline 11 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/schedulers/README.md: -------------------------------------------------------------------------------- 1 | # Schedulers 2 | 3 | For more information on the schedulers, please refer to the [docs](https://huggingface.co/docs/diffusers/api/schedulers/overview). -------------------------------------------------------------------------------- /diffusers/src/diffusers/schedulers/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 The HuggingFace Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from ..utils import ( 17 | OptionalDependencyNotAvailable, 18 | is_flax_available, 19 | is_scipy_available, 20 | is_torch_available, 21 | is_torchsde_available, 22 | ) 23 | 24 | 25 | try: 26 | if not is_torch_available(): 27 | raise OptionalDependencyNotAvailable() 28 | except OptionalDependencyNotAvailable: 29 | from ..utils.dummy_pt_objects import * # noqa F403 30 | else: 31 | from .scheduling_consistency_models import CMStochasticIterativeScheduler 32 | from .scheduling_ddim import DDIMScheduler 33 | from .scheduling_ddim_inverse import DDIMInverseScheduler 34 | from .scheduling_ddim_parallel import DDIMParallelScheduler 35 | from .scheduling_ddpm import DDPMScheduler 36 | from .scheduling_ddpm_parallel import DDPMParallelScheduler 37 | from .scheduling_deis_multistep import DEISMultistepScheduler 38 | from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler 39 | from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler 40 | from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler 41 | from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler 42 | from .scheduling_euler_discrete import EulerDiscreteScheduler 43 | from .scheduling_heun_discrete import HeunDiscreteScheduler 44 | from .scheduling_ipndm import IPNDMScheduler 45 | from .scheduling_k_dpm_2_ancestral_discrete import KDPM2AncestralDiscreteScheduler 46 | from .scheduling_k_dpm_2_discrete import KDPM2DiscreteScheduler 47 | from .scheduling_karras_ve import KarrasVeScheduler 48 | from .scheduling_pndm import PNDMScheduler 49 | from .scheduling_repaint import RePaintScheduler 50 | from .scheduling_sde_ve import ScoreSdeVeScheduler 51 | from .scheduling_sde_vp import ScoreSdeVpScheduler 52 | from .scheduling_unclip import UnCLIPScheduler 53 | from .scheduling_unipc_multistep import UniPCMultistepScheduler 54 | from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin 55 | from .scheduling_vq_diffusion import VQDiffusionScheduler 56 | 57 | try: 58 | if not is_flax_available(): 59 | raise OptionalDependencyNotAvailable() 60 | except OptionalDependencyNotAvailable: 61 | from ..utils.dummy_flax_objects import * # noqa F403 62 | else: 63 | from .scheduling_ddim_flax import FlaxDDIMScheduler 64 | from .scheduling_ddpm_flax import FlaxDDPMScheduler 65 | from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler 66 | from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler 67 | from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler 68 | from .scheduling_pndm_flax import FlaxPNDMScheduler 69 | from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler 70 | from .scheduling_utils_flax import ( 71 | FlaxKarrasDiffusionSchedulers, 72 | FlaxSchedulerMixin, 73 | FlaxSchedulerOutput, 74 | broadcast_to_shape_from_left, 75 | ) 76 | 77 | 78 | try: 79 | if not (is_torch_available() and is_scipy_available()): 80 | raise OptionalDependencyNotAvailable() 81 | except OptionalDependencyNotAvailable: 82 | from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 83 | else: 84 | from .scheduling_lms_discrete import LMSDiscreteScheduler 85 | 86 | try: 87 | if not (is_torch_available() and is_torchsde_available()): 88 | raise OptionalDependencyNotAvailable() 89 | except OptionalDependencyNotAvailable: 90 | from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 91 | else: 92 | from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler 93 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/schedulers/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/schedulers/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/schedulers/__pycache__/scheduling_consistency_models.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/schedulers/__pycache__/scheduling_consistency_models.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/schedulers/__pycache__/scheduling_ddim.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/schedulers/__pycache__/scheduling_ddim.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/schedulers/__pycache__/scheduling_ddim_inverse.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/schedulers/__pycache__/scheduling_ddim_inverse.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/schedulers/__pycache__/scheduling_ddim_parallel.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/schedulers/__pycache__/scheduling_ddim_parallel.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/schedulers/__pycache__/scheduling_ddpm.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/schedulers/__pycache__/scheduling_ddpm.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/schedulers/__pycache__/scheduling_ddpm_parallel.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/schedulers/__pycache__/scheduling_ddpm_parallel.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/schedulers/__pycache__/scheduling_deis_multistep.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/schedulers/__pycache__/scheduling_deis_multistep.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/schedulers/__pycache__/scheduling_dpmsolver_multistep.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/schedulers/__pycache__/scheduling_dpmsolver_multistep.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/schedulers/__pycache__/scheduling_dpmsolver_multistep_inverse.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/schedulers/__pycache__/scheduling_dpmsolver_multistep_inverse.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/schedulers/__pycache__/scheduling_dpmsolver_sde.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/schedulers/__pycache__/scheduling_dpmsolver_sde.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/schedulers/__pycache__/scheduling_dpmsolver_singlestep.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/schedulers/__pycache__/scheduling_dpmsolver_singlestep.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/schedulers/__pycache__/scheduling_euler_ancestral_discrete.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/schedulers/__pycache__/scheduling_euler_ancestral_discrete.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/schedulers/__pycache__/scheduling_euler_discrete.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/schedulers/__pycache__/scheduling_euler_discrete.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/schedulers/__pycache__/scheduling_heun_discrete.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/schedulers/__pycache__/scheduling_heun_discrete.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/schedulers/__pycache__/scheduling_ipndm.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/schedulers/__pycache__/scheduling_ipndm.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/schedulers/__pycache__/scheduling_k_dpm_2_ancestral_discrete.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/schedulers/__pycache__/scheduling_k_dpm_2_ancestral_discrete.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/schedulers/__pycache__/scheduling_k_dpm_2_discrete.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/schedulers/__pycache__/scheduling_k_dpm_2_discrete.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/schedulers/__pycache__/scheduling_karras_ve.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/schedulers/__pycache__/scheduling_karras_ve.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/schedulers/__pycache__/scheduling_lms_discrete.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/schedulers/__pycache__/scheduling_lms_discrete.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/schedulers/__pycache__/scheduling_pndm.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/schedulers/__pycache__/scheduling_pndm.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/schedulers/__pycache__/scheduling_repaint.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/schedulers/__pycache__/scheduling_repaint.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/schedulers/__pycache__/scheduling_sde_ve.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/schedulers/__pycache__/scheduling_sde_ve.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/schedulers/__pycache__/scheduling_sde_vp.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/schedulers/__pycache__/scheduling_sde_vp.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/schedulers/__pycache__/scheduling_unclip.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/schedulers/__pycache__/scheduling_unclip.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/schedulers/__pycache__/scheduling_unipc_multistep.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/schedulers/__pycache__/scheduling_unipc_multistep.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/schedulers/__pycache__/scheduling_utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/schedulers/__pycache__/scheduling_utils.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/schedulers/__pycache__/scheduling_vq_diffusion.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/schedulers/__pycache__/scheduling_vq_diffusion.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/schedulers/scheduling_sde_vp.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 Google Brain and The HuggingFace Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch 16 | 17 | import math 18 | from typing import Union 19 | 20 | import torch 21 | 22 | from ..configuration_utils import ConfigMixin, register_to_config 23 | from ..utils import randn_tensor 24 | from .scheduling_utils import SchedulerMixin 25 | 26 | 27 | class ScoreSdeVpScheduler(SchedulerMixin, ConfigMixin): 28 | """ 29 | `ScoreSdeVpScheduler` is a variance preserving stochastic differential equation (SDE) scheduler. 30 | 31 | This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic 32 | methods the library implements for all schedulers such as loading and saving. 33 | 34 | Args: 35 | num_train_timesteps (`int`, defaults to 2000): 36 | The number of diffusion steps to train the model. 37 | beta_min (`int`, defaults to 0.1): 38 | beta_max (`int`, defaults to 20): 39 | sampling_eps (`int`, defaults to 1e-3): 40 | The end value of sampling where timesteps decrease progressively from 1 to epsilon. 41 | """ 42 | 43 | order = 1 44 | 45 | @register_to_config 46 | def __init__(self, num_train_timesteps=2000, beta_min=0.1, beta_max=20, sampling_eps=1e-3): 47 | self.sigmas = None 48 | self.discrete_sigmas = None 49 | self.timesteps = None 50 | 51 | def set_timesteps(self, num_inference_steps, device: Union[str, torch.device] = None): 52 | """ 53 | Sets the continuous timesteps used for the diffusion chain (to be run before inference). 54 | 55 | Args: 56 | num_inference_steps (`int`): 57 | The number of diffusion steps used when generating samples with a pre-trained model. 58 | device (`str` or `torch.device`, *optional*): 59 | The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. 60 | """ 61 | self.timesteps = torch.linspace(1, self.config.sampling_eps, num_inference_steps, device=device) 62 | 63 | def step_pred(self, score, x, t, generator=None): 64 | """ 65 | Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion 66 | process from the learned model outputs (most often the predicted noise). 67 | 68 | Args: 69 | score (): 70 | x (): 71 | t (): 72 | generator (`torch.Generator`, *optional*): 73 | A random number generator. 74 | """ 75 | if self.timesteps is None: 76 | raise ValueError( 77 | "`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" 78 | ) 79 | 80 | # TODO(Patrick) better comments + non-PyTorch 81 | # postprocess model score 82 | log_mean_coeff = ( 83 | -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min 84 | ) 85 | std = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff)) 86 | std = std.flatten() 87 | while len(std.shape) < len(score.shape): 88 | std = std.unsqueeze(-1) 89 | score = -score / std 90 | 91 | # compute 92 | dt = -1.0 / len(self.timesteps) 93 | 94 | beta_t = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) 95 | beta_t = beta_t.flatten() 96 | while len(beta_t.shape) < len(x.shape): 97 | beta_t = beta_t.unsqueeze(-1) 98 | drift = -0.5 * beta_t * x 99 | 100 | diffusion = torch.sqrt(beta_t) 101 | drift = drift - diffusion**2 * score 102 | x_mean = x + drift * dt 103 | 104 | # add noise 105 | noise = randn_tensor(x.shape, layout=x.layout, generator=generator, device=x.device, dtype=x.dtype) 106 | x = x_mean + diffusion * math.sqrt(-dt) * noise 107 | 108 | return x, x_mean 109 | 110 | def __len__(self): 111 | return self.config.num_train_timesteps 112 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | import os 17 | 18 | from packaging import version 19 | 20 | from .. import __version__ 21 | from .accelerate_utils import apply_forward_hook 22 | from .constants import ( 23 | CONFIG_NAME, 24 | DEPRECATED_REVISION_ARGS, 25 | DIFFUSERS_CACHE, 26 | DIFFUSERS_DYNAMIC_MODULE_NAME, 27 | FLAX_WEIGHTS_NAME, 28 | HF_MODULES_CACHE, 29 | HUGGINGFACE_CO_RESOLVE_ENDPOINT, 30 | ONNX_EXTERNAL_WEIGHTS_NAME, 31 | ONNX_WEIGHTS_NAME, 32 | SAFETENSORS_WEIGHTS_NAME, 33 | WEIGHTS_NAME, 34 | ) 35 | from .deprecation_utils import deprecate 36 | from .doc_utils import replace_example_docstring 37 | from .dynamic_modules_utils import get_class_from_dynamic_module 38 | from .hub_utils import ( 39 | HF_HUB_OFFLINE, 40 | PushToHubMixin, 41 | _add_variant, 42 | _get_model_file, 43 | extract_commit_hash, 44 | http_user_agent, 45 | ) 46 | from .import_utils import ( 47 | BACKENDS_MAPPING, 48 | ENV_VARS_TRUE_AND_AUTO_VALUES, 49 | ENV_VARS_TRUE_VALUES, 50 | USE_JAX, 51 | USE_TF, 52 | USE_TORCH, 53 | DummyObject, 54 | OptionalDependencyNotAvailable, 55 | is_accelerate_available, 56 | is_accelerate_version, 57 | is_bs4_available, 58 | is_flax_available, 59 | is_ftfy_available, 60 | is_inflect_available, 61 | is_invisible_watermark_available, 62 | is_k_diffusion_available, 63 | is_k_diffusion_version, 64 | is_librosa_available, 65 | is_note_seq_available, 66 | is_omegaconf_available, 67 | is_onnx_available, 68 | is_scipy_available, 69 | is_tensorboard_available, 70 | is_tf_available, 71 | is_torch_available, 72 | is_torch_version, 73 | is_torchsde_available, 74 | is_transformers_available, 75 | is_transformers_version, 76 | is_unidecode_available, 77 | is_wandb_available, 78 | is_xformers_available, 79 | requires_backends, 80 | ) 81 | from .logging import get_logger 82 | from .outputs import BaseOutput 83 | from .pil_utils import PIL_INTERPOLATION, make_image_grid, numpy_to_pil, pt_to_pil 84 | from .torch_utils import is_compiled_module, randn_tensor 85 | 86 | 87 | if is_torch_available(): 88 | from .testing_utils import ( 89 | floats_tensor, 90 | load_hf_numpy, 91 | load_image, 92 | load_numpy, 93 | load_pt, 94 | nightly, 95 | parse_flag_from_env, 96 | print_tensor_test, 97 | require_torch_2, 98 | require_torch_gpu, 99 | skip_mps, 100 | slow, 101 | torch_all_close, 102 | torch_device, 103 | ) 104 | from .torch_utils import maybe_allow_in_graph 105 | 106 | from .testing_utils import export_to_gif, export_to_obj, export_to_ply, export_to_video 107 | 108 | 109 | logger = get_logger(__name__) 110 | 111 | 112 | def check_min_version(min_version): 113 | if version.parse(__version__) < version.parse(min_version): 114 | if "dev" in min_version: 115 | error_message = ( 116 | "This example requires a source install from HuggingFace diffusers (see " 117 | "`https://huggingface.co/docs/diffusers/installation#install-from-source`)," 118 | ) 119 | else: 120 | error_message = f"This example requires a minimum version of {min_version}," 121 | error_message += f" but the version found is {__version__}.\n" 122 | raise ImportError(error_message) 123 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/accelerate_utils.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/accelerate_utils.cpython-311.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/accelerate_utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/accelerate_utils.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/constants.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/constants.cpython-311.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/constants.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/constants.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/deprecation_utils.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/deprecation_utils.cpython-311.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/deprecation_utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/deprecation_utils.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/doc_utils.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/doc_utils.cpython-311.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/doc_utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/doc_utils.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/dummy_flax_and_transformers_objects.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/dummy_flax_and_transformers_objects.cpython-311.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/dummy_flax_and_transformers_objects.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/dummy_flax_and_transformers_objects.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/dummy_flax_objects.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/dummy_flax_objects.cpython-311.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/dummy_flax_objects.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/dummy_flax_objects.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/dummy_note_seq_objects.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/dummy_note_seq_objects.cpython-311.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/dummy_note_seq_objects.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/dummy_note_seq_objects.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/dummy_onnx_objects.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/dummy_onnx_objects.cpython-311.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/dummy_onnx_objects.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/dummy_onnx_objects.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/dummy_pt_objects.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/dummy_pt_objects.cpython-311.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/dummy_torch_and_librosa_objects.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/dummy_torch_and_librosa_objects.cpython-311.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/dummy_torch_and_librosa_objects.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/dummy_torch_and_librosa_objects.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/dummy_torch_and_scipy_objects.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/dummy_torch_and_scipy_objects.cpython-311.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/dummy_torch_and_scipy_objects.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/dummy_torch_and_scipy_objects.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/dummy_torch_and_torchsde_objects.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/dummy_torch_and_torchsde_objects.cpython-311.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/dummy_torch_and_torchsde_objects.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/dummy_torch_and_torchsde_objects.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/dummy_torch_and_transformers_and_invisible_watermark_objects.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/dummy_torch_and_transformers_and_invisible_watermark_objects.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/dummy_torch_and_transformers_and_k_diffusion_objects.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/dummy_torch_and_transformers_and_k_diffusion_objects.cpython-311.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/dummy_torch_and_transformers_and_k_diffusion_objects.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/dummy_torch_and_transformers_and_k_diffusion_objects.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/dummy_torch_and_transformers_and_onnx_objects.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/dummy_torch_and_transformers_and_onnx_objects.cpython-311.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/dummy_torch_and_transformers_and_onnx_objects.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/dummy_torch_and_transformers_and_onnx_objects.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/dummy_torch_and_transformers_objects.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/dummy_torch_and_transformers_objects.cpython-311.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/dummy_torch_and_transformers_objects.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/dummy_torch_and_transformers_objects.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/dummy_transformers_and_torch_and_note_seq_objects.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/dummy_transformers_and_torch_and_note_seq_objects.cpython-311.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/dummy_transformers_and_torch_and_note_seq_objects.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/dummy_transformers_and_torch_and_note_seq_objects.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/dynamic_modules_utils.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/dynamic_modules_utils.cpython-311.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/dynamic_modules_utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/dynamic_modules_utils.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/hub_utils.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/hub_utils.cpython-311.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/hub_utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/hub_utils.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/import_utils.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/import_utils.cpython-311.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/import_utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/import_utils.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/logging.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/logging.cpython-311.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/logging.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/logging.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/outputs.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/outputs.cpython-311.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/outputs.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/outputs.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/pil_utils.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/pil_utils.cpython-311.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/pil_utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/pil_utils.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/testing_utils.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/testing_utils.cpython-311.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/testing_utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/testing_utils.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/torch_utils.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/torch_utils.cpython-311.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/__pycache__/torch_utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers/src/diffusers/utils/__pycache__/torch_utils.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/accelerate_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 The HuggingFace Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | """ 15 | Accelerate utilities: Utilities related to accelerate 16 | """ 17 | 18 | from packaging import version 19 | 20 | from .import_utils import is_accelerate_available 21 | 22 | 23 | if is_accelerate_available(): 24 | import accelerate 25 | 26 | 27 | def apply_forward_hook(method): 28 | """ 29 | Decorator that applies a registered CpuOffload hook to an arbitrary function rather than `forward`. This is useful 30 | for cases where a PyTorch module provides functions other than `forward` that should trigger a move to the 31 | appropriate acceleration device. This is the case for `encode` and `decode` in [`AutoencoderKL`]. 32 | 33 | This decorator looks inside the internal `_hf_hook` property to find a registered offload hook. 34 | 35 | :param method: The method to decorate. This method should be a method of a PyTorch module. 36 | """ 37 | if not is_accelerate_available(): 38 | return method 39 | accelerate_version = version.parse(accelerate.__version__).base_version 40 | if version.parse(accelerate_version) < version.parse("0.17.0"): 41 | return method 42 | 43 | def wrapper(self, *args, **kwargs): 44 | if hasattr(self, "_hf_hook") and hasattr(self._hf_hook, "pre_forward"): 45 | self._hf_hook.pre_forward(self) 46 | return method(self, *args, **kwargs) 47 | 48 | return wrapper 49 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/constants.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | 16 | from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home 17 | 18 | 19 | default_cache_path = HUGGINGFACE_HUB_CACHE 20 | 21 | 22 | CONFIG_NAME = "config.json" 23 | WEIGHTS_NAME = "diffusion_pytorch_model.bin" 24 | FLAX_WEIGHTS_NAME = "diffusion_flax_model.msgpack" 25 | ONNX_WEIGHTS_NAME = "model.onnx" 26 | SAFETENSORS_WEIGHTS_NAME = "diffusion_pytorch_model.safetensors" 27 | ONNX_EXTERNAL_WEIGHTS_NAME = "weights.pb" 28 | HUGGINGFACE_CO_RESOLVE_ENDPOINT = "https://huggingface.co" 29 | DIFFUSERS_CACHE = default_cache_path 30 | DIFFUSERS_DYNAMIC_MODULE_NAME = "diffusers_modules" 31 | HF_MODULES_CACHE = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules")) 32 | DEPRECATED_REVISION_ARGS = ["fp16", "non-ema"] 33 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/deprecation_utils.py: -------------------------------------------------------------------------------- 1 | import inspect 2 | import warnings 3 | from typing import Any, Dict, Optional, Union 4 | 5 | from packaging import version 6 | 7 | 8 | def deprecate(*args, take_from: Optional[Union[Dict, Any]] = None, standard_warn=True, stacklevel=2): 9 | from .. import __version__ 10 | 11 | deprecated_kwargs = take_from 12 | values = () 13 | if not isinstance(args[0], tuple): 14 | args = (args,) 15 | 16 | for attribute, version_name, message in args: 17 | if version.parse(version.parse(__version__).base_version) >= version.parse(version_name): 18 | raise ValueError( 19 | f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'" 20 | f" version {__version__} is >= {version_name}" 21 | ) 22 | 23 | warning = None 24 | if isinstance(deprecated_kwargs, dict) and attribute in deprecated_kwargs: 25 | values += (deprecated_kwargs.pop(attribute),) 26 | warning = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}." 27 | elif hasattr(deprecated_kwargs, attribute): 28 | values += (getattr(deprecated_kwargs, attribute),) 29 | warning = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}." 30 | elif deprecated_kwargs is None: 31 | warning = f"`{attribute}` is deprecated and will be removed in version {version_name}." 32 | 33 | if warning is not None: 34 | warning = warning + " " if standard_warn else "" 35 | warnings.warn(warning + message, FutureWarning, stacklevel=stacklevel) 36 | 37 | if isinstance(deprecated_kwargs, dict) and len(deprecated_kwargs) > 0: 38 | call_frame = inspect.getouterframes(inspect.currentframe())[1] 39 | filename = call_frame.filename 40 | line_number = call_frame.lineno 41 | function = call_frame.function 42 | key, value = next(iter(deprecated_kwargs.items())) 43 | raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`") 44 | 45 | if len(values) == 0: 46 | return 47 | elif len(values) == 1: 48 | return values[0] 49 | return values 50 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/doc_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 The HuggingFace Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | """ 15 | Doc utilities: Utilities related to documentation 16 | """ 17 | import re 18 | 19 | 20 | def replace_example_docstring(example_docstring): 21 | def docstring_decorator(fn): 22 | func_doc = fn.__doc__ 23 | lines = func_doc.split("\n") 24 | i = 0 25 | while i < len(lines) and re.search(r"^\s*Examples?:\s*$", lines[i]) is None: 26 | i += 1 27 | if i < len(lines): 28 | lines[i] = example_docstring 29 | func_doc = "\n".join(lines) 30 | else: 31 | raise ValueError( 32 | f"The function {fn} should have an empty 'Examples:' in its docstring as placeholder, " 33 | f"current docstring is:\n{func_doc}" 34 | ) 35 | fn.__doc__ = func_doc 36 | return fn 37 | 38 | return docstring_decorator 39 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/dummy_flax_and_transformers_objects.py: -------------------------------------------------------------------------------- 1 | # This file is autogenerated by the command `make fix-copies`, do not edit. 2 | from ..utils import DummyObject, requires_backends 3 | 4 | 5 | class FlaxStableDiffusionControlNetPipeline(metaclass=DummyObject): 6 | _backends = ["flax", "transformers"] 7 | 8 | def __init__(self, *args, **kwargs): 9 | requires_backends(self, ["flax", "transformers"]) 10 | 11 | @classmethod 12 | def from_config(cls, *args, **kwargs): 13 | requires_backends(cls, ["flax", "transformers"]) 14 | 15 | @classmethod 16 | def from_pretrained(cls, *args, **kwargs): 17 | requires_backends(cls, ["flax", "transformers"]) 18 | 19 | 20 | class FlaxStableDiffusionImg2ImgPipeline(metaclass=DummyObject): 21 | _backends = ["flax", "transformers"] 22 | 23 | def __init__(self, *args, **kwargs): 24 | requires_backends(self, ["flax", "transformers"]) 25 | 26 | @classmethod 27 | def from_config(cls, *args, **kwargs): 28 | requires_backends(cls, ["flax", "transformers"]) 29 | 30 | @classmethod 31 | def from_pretrained(cls, *args, **kwargs): 32 | requires_backends(cls, ["flax", "transformers"]) 33 | 34 | 35 | class FlaxStableDiffusionInpaintPipeline(metaclass=DummyObject): 36 | _backends = ["flax", "transformers"] 37 | 38 | def __init__(self, *args, **kwargs): 39 | requires_backends(self, ["flax", "transformers"]) 40 | 41 | @classmethod 42 | def from_config(cls, *args, **kwargs): 43 | requires_backends(cls, ["flax", "transformers"]) 44 | 45 | @classmethod 46 | def from_pretrained(cls, *args, **kwargs): 47 | requires_backends(cls, ["flax", "transformers"]) 48 | 49 | 50 | class FlaxStableDiffusionPipeline(metaclass=DummyObject): 51 | _backends = ["flax", "transformers"] 52 | 53 | def __init__(self, *args, **kwargs): 54 | requires_backends(self, ["flax", "transformers"]) 55 | 56 | @classmethod 57 | def from_config(cls, *args, **kwargs): 58 | requires_backends(cls, ["flax", "transformers"]) 59 | 60 | @classmethod 61 | def from_pretrained(cls, *args, **kwargs): 62 | requires_backends(cls, ["flax", "transformers"]) 63 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/dummy_note_seq_objects.py: -------------------------------------------------------------------------------- 1 | # This file is autogenerated by the command `make fix-copies`, do not edit. 2 | from ..utils import DummyObject, requires_backends 3 | 4 | 5 | class MidiProcessor(metaclass=DummyObject): 6 | _backends = ["note_seq"] 7 | 8 | def __init__(self, *args, **kwargs): 9 | requires_backends(self, ["note_seq"]) 10 | 11 | @classmethod 12 | def from_config(cls, *args, **kwargs): 13 | requires_backends(cls, ["note_seq"]) 14 | 15 | @classmethod 16 | def from_pretrained(cls, *args, **kwargs): 17 | requires_backends(cls, ["note_seq"]) 18 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/dummy_onnx_objects.py: -------------------------------------------------------------------------------- 1 | # This file is autogenerated by the command `make fix-copies`, do not edit. 2 | from ..utils import DummyObject, requires_backends 3 | 4 | 5 | class OnnxRuntimeModel(metaclass=DummyObject): 6 | _backends = ["onnx"] 7 | 8 | def __init__(self, *args, **kwargs): 9 | requires_backends(self, ["onnx"]) 10 | 11 | @classmethod 12 | def from_config(cls, *args, **kwargs): 13 | requires_backends(cls, ["onnx"]) 14 | 15 | @classmethod 16 | def from_pretrained(cls, *args, **kwargs): 17 | requires_backends(cls, ["onnx"]) 18 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/dummy_torch_and_librosa_objects.py: -------------------------------------------------------------------------------- 1 | # This file is autogenerated by the command `make fix-copies`, do not edit. 2 | from ..utils import DummyObject, requires_backends 3 | 4 | 5 | class AudioDiffusionPipeline(metaclass=DummyObject): 6 | _backends = ["torch", "librosa"] 7 | 8 | def __init__(self, *args, **kwargs): 9 | requires_backends(self, ["torch", "librosa"]) 10 | 11 | @classmethod 12 | def from_config(cls, *args, **kwargs): 13 | requires_backends(cls, ["torch", "librosa"]) 14 | 15 | @classmethod 16 | def from_pretrained(cls, *args, **kwargs): 17 | requires_backends(cls, ["torch", "librosa"]) 18 | 19 | 20 | class Mel(metaclass=DummyObject): 21 | _backends = ["torch", "librosa"] 22 | 23 | def __init__(self, *args, **kwargs): 24 | requires_backends(self, ["torch", "librosa"]) 25 | 26 | @classmethod 27 | def from_config(cls, *args, **kwargs): 28 | requires_backends(cls, ["torch", "librosa"]) 29 | 30 | @classmethod 31 | def from_pretrained(cls, *args, **kwargs): 32 | requires_backends(cls, ["torch", "librosa"]) 33 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/dummy_torch_and_scipy_objects.py: -------------------------------------------------------------------------------- 1 | # This file is autogenerated by the command `make fix-copies`, do not edit. 2 | from ..utils import DummyObject, requires_backends 3 | 4 | 5 | class LMSDiscreteScheduler(metaclass=DummyObject): 6 | _backends = ["torch", "scipy"] 7 | 8 | def __init__(self, *args, **kwargs): 9 | requires_backends(self, ["torch", "scipy"]) 10 | 11 | @classmethod 12 | def from_config(cls, *args, **kwargs): 13 | requires_backends(cls, ["torch", "scipy"]) 14 | 15 | @classmethod 16 | def from_pretrained(cls, *args, **kwargs): 17 | requires_backends(cls, ["torch", "scipy"]) 18 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/dummy_torch_and_torchsde_objects.py: -------------------------------------------------------------------------------- 1 | # This file is autogenerated by the command `make fix-copies`, do not edit. 2 | from ..utils import DummyObject, requires_backends 3 | 4 | 5 | class DPMSolverSDEScheduler(metaclass=DummyObject): 6 | _backends = ["torch", "torchsde"] 7 | 8 | def __init__(self, *args, **kwargs): 9 | requires_backends(self, ["torch", "torchsde"]) 10 | 11 | @classmethod 12 | def from_config(cls, *args, **kwargs): 13 | requires_backends(cls, ["torch", "torchsde"]) 14 | 15 | @classmethod 16 | def from_pretrained(cls, *args, **kwargs): 17 | requires_backends(cls, ["torch", "torchsde"]) 18 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/dummy_torch_and_transformers_and_k_diffusion_objects.py: -------------------------------------------------------------------------------- 1 | # This file is autogenerated by the command `make fix-copies`, do not edit. 2 | from ..utils import DummyObject, requires_backends 3 | 4 | 5 | class StableDiffusionKDiffusionPipeline(metaclass=DummyObject): 6 | _backends = ["torch", "transformers", "k_diffusion"] 7 | 8 | def __init__(self, *args, **kwargs): 9 | requires_backends(self, ["torch", "transformers", "k_diffusion"]) 10 | 11 | @classmethod 12 | def from_config(cls, *args, **kwargs): 13 | requires_backends(cls, ["torch", "transformers", "k_diffusion"]) 14 | 15 | @classmethod 16 | def from_pretrained(cls, *args, **kwargs): 17 | requires_backends(cls, ["torch", "transformers", "k_diffusion"]) 18 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/dummy_torch_and_transformers_and_onnx_objects.py: -------------------------------------------------------------------------------- 1 | # This file is autogenerated by the command `make fix-copies`, do not edit. 2 | from ..utils import DummyObject, requires_backends 3 | 4 | 5 | class OnnxStableDiffusionImg2ImgPipeline(metaclass=DummyObject): 6 | _backends = ["torch", "transformers", "onnx"] 7 | 8 | def __init__(self, *args, **kwargs): 9 | requires_backends(self, ["torch", "transformers", "onnx"]) 10 | 11 | @classmethod 12 | def from_config(cls, *args, **kwargs): 13 | requires_backends(cls, ["torch", "transformers", "onnx"]) 14 | 15 | @classmethod 16 | def from_pretrained(cls, *args, **kwargs): 17 | requires_backends(cls, ["torch", "transformers", "onnx"]) 18 | 19 | 20 | class OnnxStableDiffusionInpaintPipeline(metaclass=DummyObject): 21 | _backends = ["torch", "transformers", "onnx"] 22 | 23 | def __init__(self, *args, **kwargs): 24 | requires_backends(self, ["torch", "transformers", "onnx"]) 25 | 26 | @classmethod 27 | def from_config(cls, *args, **kwargs): 28 | requires_backends(cls, ["torch", "transformers", "onnx"]) 29 | 30 | @classmethod 31 | def from_pretrained(cls, *args, **kwargs): 32 | requires_backends(cls, ["torch", "transformers", "onnx"]) 33 | 34 | 35 | class OnnxStableDiffusionInpaintPipelineLegacy(metaclass=DummyObject): 36 | _backends = ["torch", "transformers", "onnx"] 37 | 38 | def __init__(self, *args, **kwargs): 39 | requires_backends(self, ["torch", "transformers", "onnx"]) 40 | 41 | @classmethod 42 | def from_config(cls, *args, **kwargs): 43 | requires_backends(cls, ["torch", "transformers", "onnx"]) 44 | 45 | @classmethod 46 | def from_pretrained(cls, *args, **kwargs): 47 | requires_backends(cls, ["torch", "transformers", "onnx"]) 48 | 49 | 50 | class OnnxStableDiffusionPipeline(metaclass=DummyObject): 51 | _backends = ["torch", "transformers", "onnx"] 52 | 53 | def __init__(self, *args, **kwargs): 54 | requires_backends(self, ["torch", "transformers", "onnx"]) 55 | 56 | @classmethod 57 | def from_config(cls, *args, **kwargs): 58 | requires_backends(cls, ["torch", "transformers", "onnx"]) 59 | 60 | @classmethod 61 | def from_pretrained(cls, *args, **kwargs): 62 | requires_backends(cls, ["torch", "transformers", "onnx"]) 63 | 64 | 65 | class OnnxStableDiffusionUpscalePipeline(metaclass=DummyObject): 66 | _backends = ["torch", "transformers", "onnx"] 67 | 68 | def __init__(self, *args, **kwargs): 69 | requires_backends(self, ["torch", "transformers", "onnx"]) 70 | 71 | @classmethod 72 | def from_config(cls, *args, **kwargs): 73 | requires_backends(cls, ["torch", "transformers", "onnx"]) 74 | 75 | @classmethod 76 | def from_pretrained(cls, *args, **kwargs): 77 | requires_backends(cls, ["torch", "transformers", "onnx"]) 78 | 79 | 80 | class StableDiffusionOnnxPipeline(metaclass=DummyObject): 81 | _backends = ["torch", "transformers", "onnx"] 82 | 83 | def __init__(self, *args, **kwargs): 84 | requires_backends(self, ["torch", "transformers", "onnx"]) 85 | 86 | @classmethod 87 | def from_config(cls, *args, **kwargs): 88 | requires_backends(cls, ["torch", "transformers", "onnx"]) 89 | 90 | @classmethod 91 | def from_pretrained(cls, *args, **kwargs): 92 | requires_backends(cls, ["torch", "transformers", "onnx"]) 93 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/dummy_transformers_and_torch_and_note_seq_objects.py: -------------------------------------------------------------------------------- 1 | # This file is autogenerated by the command `make fix-copies`, do not edit. 2 | from ..utils import DummyObject, requires_backends 3 | 4 | 5 | class SpectrogramDiffusionPipeline(metaclass=DummyObject): 6 | _backends = ["transformers", "torch", "note_seq"] 7 | 8 | def __init__(self, *args, **kwargs): 9 | requires_backends(self, ["transformers", "torch", "note_seq"]) 10 | 11 | @classmethod 12 | def from_config(cls, *args, **kwargs): 13 | requires_backends(cls, ["transformers", "torch", "note_seq"]) 14 | 15 | @classmethod 16 | def from_pretrained(cls, *args, **kwargs): 17 | requires_backends(cls, ["transformers", "torch", "note_seq"]) 18 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/model_card_template.md: -------------------------------------------------------------------------------- 1 | --- 2 | {{ card_data }} 3 | --- 4 | 5 | 7 | 8 | # {{ model_name | default("Diffusion Model") }} 9 | 10 | ## Model description 11 | 12 | This diffusion model is trained with the [🤗 Diffusers](https://github.com/huggingface/diffusers) library 13 | on the `{{ dataset_name }}` dataset. 14 | 15 | ## Intended uses & limitations 16 | 17 | #### How to use 18 | 19 | ```python 20 | # TODO: add an example code snippet for running this diffusion pipeline 21 | ``` 22 | 23 | #### Limitations and bias 24 | 25 | [TODO: provide examples of latent issues and potential remediations] 26 | 27 | ## Training data 28 | 29 | [TODO: describe the data used to train the model] 30 | 31 | ### Training hyperparameters 32 | 33 | The following hyperparameters were used during training: 34 | - learning_rate: {{ learning_rate }} 35 | - train_batch_size: {{ train_batch_size }} 36 | - eval_batch_size: {{ eval_batch_size }} 37 | - gradient_accumulation_steps: {{ gradient_accumulation_steps }} 38 | - optimizer: AdamW with betas=({{ adam_beta1 }}, {{ adam_beta2 }}), weight_decay={{ adam_weight_decay }} and epsilon={{ adam_epsilon }} 39 | - lr_scheduler: {{ lr_scheduler }} 40 | - lr_warmup_steps: {{ lr_warmup_steps }} 41 | - ema_inv_gamma: {{ ema_inv_gamma }} 42 | - ema_inv_gamma: {{ ema_power }} 43 | - ema_inv_gamma: {{ ema_max_decay }} 44 | - mixed_precision: {{ mixed_precision }} 45 | 46 | ### Training results 47 | 48 | 📈 [TensorBoard logs](https://huggingface.co/{{ repo_name }}/tensorboard?#scalars) 49 | 50 | 51 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/outputs.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 The HuggingFace Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | """ 15 | Generic utilities 16 | """ 17 | 18 | from collections import OrderedDict 19 | from dataclasses import fields 20 | from typing import Any, Tuple 21 | 22 | import numpy as np 23 | 24 | from .import_utils import is_torch_available 25 | 26 | 27 | def is_tensor(x): 28 | """ 29 | Tests if `x` is a `torch.Tensor` or `np.ndarray`. 30 | """ 31 | if is_torch_available(): 32 | import torch 33 | 34 | if isinstance(x, torch.Tensor): 35 | return True 36 | 37 | return isinstance(x, np.ndarray) 38 | 39 | 40 | class BaseOutput(OrderedDict): 41 | """ 42 | Base class for all model outputs as dataclass. Has a `__getitem__` that allows indexing by integer or slice (like a 43 | tuple) or strings (like a dictionary) that will ignore the `None` attributes. Otherwise behaves like a regular 44 | Python dictionary. 45 | 46 | 47 | 48 | You can't unpack a [`BaseOutput`] directly. Use the [`~utils.BaseOutput.to_tuple`] method to convert it to a tuple 49 | first. 50 | 51 | 52 | """ 53 | 54 | def __post_init__(self): 55 | class_fields = fields(self) 56 | 57 | # Safety and consistency checks 58 | if not len(class_fields): 59 | raise ValueError(f"{self.__class__.__name__} has no fields.") 60 | 61 | first_field = getattr(self, class_fields[0].name) 62 | other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:]) 63 | 64 | if other_fields_are_none and isinstance(first_field, dict): 65 | for key, value in first_field.items(): 66 | self[key] = value 67 | else: 68 | for field in class_fields: 69 | v = getattr(self, field.name) 70 | if v is not None: 71 | self[field.name] = v 72 | 73 | def __delitem__(self, *args, **kwargs): 74 | raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.") 75 | 76 | def setdefault(self, *args, **kwargs): 77 | raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.") 78 | 79 | def pop(self, *args, **kwargs): 80 | raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.") 81 | 82 | def update(self, *args, **kwargs): 83 | raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.") 84 | 85 | def __getitem__(self, k): 86 | if isinstance(k, str): 87 | inner_dict = dict(self.items()) 88 | return inner_dict[k] 89 | else: 90 | return self.to_tuple()[k] 91 | 92 | def __setattr__(self, name, value): 93 | if name in self.keys() and value is not None: 94 | # Don't call self.__setitem__ to avoid recursion errors 95 | super().__setitem__(name, value) 96 | super().__setattr__(name, value) 97 | 98 | def __setitem__(self, key, value): 99 | # Will raise a KeyException if needed 100 | super().__setitem__(key, value) 101 | # Don't call self.__setattr__ to avoid recursion errors 102 | super().__setattr__(key, value) 103 | 104 | def to_tuple(self) -> Tuple[Any]: 105 | """ 106 | Convert self to a tuple containing all the attributes/keys that are not `None`. 107 | """ 108 | return tuple(self[k] for k in self.keys()) 109 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/pil_utils.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | import PIL.Image 4 | import PIL.ImageOps 5 | from packaging import version 6 | from PIL import Image 7 | 8 | 9 | if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): 10 | PIL_INTERPOLATION = { 11 | "linear": PIL.Image.Resampling.BILINEAR, 12 | "bilinear": PIL.Image.Resampling.BILINEAR, 13 | "bicubic": PIL.Image.Resampling.BICUBIC, 14 | "lanczos": PIL.Image.Resampling.LANCZOS, 15 | "nearest": PIL.Image.Resampling.NEAREST, 16 | } 17 | else: 18 | PIL_INTERPOLATION = { 19 | "linear": PIL.Image.LINEAR, 20 | "bilinear": PIL.Image.BILINEAR, 21 | "bicubic": PIL.Image.BICUBIC, 22 | "lanczos": PIL.Image.LANCZOS, 23 | "nearest": PIL.Image.NEAREST, 24 | } 25 | 26 | 27 | def pt_to_pil(images): 28 | """ 29 | Convert a torch image to a PIL image. 30 | """ 31 | images = (images / 2 + 0.5).clamp(0, 1) 32 | images = images.cpu().permute(0, 2, 3, 1).float().numpy() 33 | images = numpy_to_pil(images) 34 | return images 35 | 36 | 37 | def numpy_to_pil(images): 38 | """ 39 | Convert a numpy image or a batch of images to a PIL image. 40 | """ 41 | if images.ndim == 3: 42 | images = images[None, ...] 43 | images = (images * 255).round().astype("uint8") 44 | if images.shape[-1] == 1: 45 | # special case for grayscale (single channel) images 46 | pil_images = [Image.fromarray(image.squeeze(), mode="L") for image in images] 47 | else: 48 | pil_images = [Image.fromarray(image) for image in images] 49 | 50 | return pil_images 51 | 52 | 53 | def make_image_grid(images: List[PIL.Image.Image], rows: int, cols: int, resize: int = None) -> PIL.Image.Image: 54 | """ 55 | Prepares a single grid of images. Useful for visualization purposes. 56 | """ 57 | assert len(images) == rows * cols 58 | 59 | if resize is not None: 60 | images = [img.resize((resize, resize)) for img in images] 61 | 62 | w, h = images[0].size 63 | grid = Image.new("RGB", size=(cols * w, rows * h)) 64 | 65 | for i, img in enumerate(images): 66 | grid.paste(img, box=(i % cols * w, i // cols * h)) 67 | return grid 68 | -------------------------------------------------------------------------------- /diffusers/src/diffusers/utils/torch_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 The HuggingFace Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | """ 15 | PyTorch utilities: Utilities related to PyTorch 16 | """ 17 | from typing import List, Optional, Tuple, Union 18 | 19 | from . import logging 20 | from .import_utils import is_torch_available, is_torch_version 21 | 22 | 23 | if is_torch_available(): 24 | import torch 25 | 26 | logger = logging.get_logger(__name__) # pylint: disable=invalid-name 27 | 28 | try: 29 | from torch._dynamo import allow_in_graph as maybe_allow_in_graph 30 | except (ImportError, ModuleNotFoundError): 31 | 32 | def maybe_allow_in_graph(cls): 33 | return cls 34 | 35 | 36 | def randn_tensor( 37 | shape: Union[Tuple, List], 38 | generator: Optional[Union[List["torch.Generator"], "torch.Generator"]] = None, 39 | device: Optional["torch.device"] = None, 40 | dtype: Optional["torch.dtype"] = None, 41 | layout: Optional["torch.layout"] = None, 42 | ): 43 | """A helper function to create random tensors on the desired `device` with the desired `dtype`. When 44 | passing a list of generators, you can seed each batch size individually. If CPU generators are passed, the tensor 45 | is always created on the CPU. 46 | """ 47 | # device on which tensor is created defaults to device 48 | rand_device = device 49 | batch_size = shape[0] 50 | 51 | layout = layout or torch.strided 52 | device = device or torch.device("cpu") 53 | 54 | if generator is not None: 55 | gen_device_type = generator.device.type if not isinstance(generator, list) else generator[0].device.type 56 | if gen_device_type != device.type and gen_device_type == "cpu": 57 | rand_device = "cpu" 58 | if device != "mps": 59 | logger.info( 60 | f"The passed generator was created on 'cpu' even though a tensor on {device} was expected." 61 | f" Tensors will be created on 'cpu' and then moved to {device}. Note that one can probably" 62 | f" slighly speed up this function by passing a generator that was created on the {device} device." 63 | ) 64 | elif gen_device_type != device.type and gen_device_type == "cuda": 65 | raise ValueError(f"Cannot generate a {device} tensor from a generator of type {gen_device_type}.") 66 | 67 | # make sure generator list of length 1 is treated like a non-list 68 | if isinstance(generator, list) and len(generator) == 1: 69 | generator = generator[0] 70 | 71 | if isinstance(generator, list): 72 | shape = (1,) + shape[1:] 73 | latents = [ 74 | torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype, layout=layout) 75 | for i in range(batch_size) 76 | ] 77 | latents = torch.cat(latents, dim=0).to(device) 78 | else: 79 | latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype, layout=layout).to(device) 80 | 81 | return latents 82 | 83 | 84 | def is_compiled_module(module): 85 | """Check whether the module was compiled with torch.compile()""" 86 | if is_torch_version("<", "2.0.0") or not hasattr(torch, "_dynamo"): 87 | return False 88 | return isinstance(module, torch._dynamo.eval_frame.OptimizedModule) 89 | -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs0/0000.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs0/0000.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs0/0001.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs0/0001.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs0/0002.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs0/0002.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs0/0003.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs0/0003.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs0/0004.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs0/0004.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs1/0000.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs1/0000.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs1/0001.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs1/0001.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs1/0002.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs1/0002.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs1/0003.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs1/0003.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs1/0004.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs1/0004.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs2/0000.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs2/0000.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs2/0001.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs2/0001.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs2/0002.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs2/0002.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs2/0003.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs2/0003.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs2/0004.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs2/0004.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs3/0000.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs3/0000.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs3/0001.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs3/0001.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs3/0002.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs3/0002.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs3/0003.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs3/0003.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs3/0004.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs3/0004.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs4/0000.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs4/0000.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs4/0001.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs4/0001.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs4/0002.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs4/0002.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs4/0003.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs4/0003.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs4/0004.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs4/0004.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs5/0000.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs5/0000.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs5/0001.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs5/0001.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs5/0002.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs5/0002.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs5/0003.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs5/0003.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs5/0004.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs5/0004.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/ContentImages/0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/ContentImages/0.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/ContentImages/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/ContentImages/1.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/ContentImages/10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/ContentImages/10.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/ContentImages/11.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/ContentImages/11.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/ContentImages/12.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/ContentImages/12.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/ContentImages/2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/ContentImages/2.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/ContentImages/3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/ContentImages/3.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/ContentImages/4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/ContentImages/4.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/ContentImages/5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/ContentImages/5.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/ContentImages/6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/ContentImages/6.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/ContentImages/7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/ContentImages/7.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/ContentImages/8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/ContentImages/8.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/ContentImages/9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/ContentImages/9.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img00 Cyberpunk --n 160 --b 2.8 --s 1.1/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img00 Cyberpunk --n 160 --b 2.8 --s 1.1/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img00 Embroidery Art --n 160 --b 1.8 --s 0.8/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img00 Embroidery Art --n 160 --b 1.8 --s 0.8/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img00 Pixel Punk --n 160 --b 2.8 --s 0.8/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img00 Pixel Punk --n 160 --b 2.8 --s 0.8/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img00 Sketching --n 160 --b 2.8 --s 0.8/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img00 Sketching --n 160 --b 2.8 --s 0.8/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img00 Studio Ghibli --n 160 --b 2.8 --s 0.8/00000_03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img00 Studio Ghibli --n 160 --b 2.8 --s 0.8/00000_03.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img00 Wasteland --n 160 --b 2.8 --s 0.8/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img00 Wasteland --n 160 --b 2.8 --s 0.8/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img00 Watercolor Painting --n 160 --b 2.9 --s 0.3/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img00 Watercolor Painting --n 160 --b 2.9 --s 0.3/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img00 chineseink --n 320 --b 2.5 --s 0.5/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img00 chineseink --n 320 --b 2.5 --s 0.5/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img00 oilpainting --n 160 --b 2.8 --s 0.4/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img00 oilpainting --n 160 --b 2.8 --s 0.4/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img01 Cyberpunk --n 320 --b 2.8 --s 0.8/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img01 Cyberpunk --n 320 --b 2.8 --s 0.8/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img01 Embroidery Art--n 320 --b 2.0 --s 1.2/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img01 Embroidery Art--n 320 --b 2.0 --s 1.2/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img01 Oil Painting --n 160 --b 3.0 --s 1.4/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img01 Oil Painting --n 160 --b 3.0 --s 1.4/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img01 Pixel Punk --n 320 --b 2.8 --s 0.8/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img01 Pixel Punk --n 320 --b 2.8 --s 0.8/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img01 Sketching --n 320 --b 2.6 --s 1.0/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img01 Sketching --n 320 --b 2.6 --s 1.0/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img01 Studio Ghibli --n 160 --b 2.5 --s 1.2/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img01 Studio Ghibli --n 160 --b 2.5 --s 1.2/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img01 Wasteland --n 320 --b 2.8 --s 0.8/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img01 Wasteland --n 320 --b 2.8 --s 0.8/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img01 Watercolor Painting --n 160 --b 3.0 --s 1.4/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img01 Watercolor Painting --n 160 --b 3.0 --s 1.4/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img01 chineseink--n 320 --b 2.5 --s 0.6/00000_00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img01 chineseink--n 320 --b 2.5 --s 0.6/00000_00.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img02 Cyberpunk --n 160 --b 2.8 --s 1.6/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img02 Cyberpunk --n 160 --b 2.8 --s 1.6/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img02 Embroidery Art --n 160 --b 2.4 --s 1.7/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img02 Embroidery Art --n 160 --b 2.4 --s 1.7/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img02 OilPainting --n 160 --b 2.6 --s 1.8/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img02 OilPainting --n 160 --b 2.6 --s 1.8/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img02 Sketching --n 160 --b 2.8 --s 1.2/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img02 Sketching --n 160 --b 2.8 --s 1.2/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img02 Studio Ghibli --n 160 --b 2.8 --s 1.6/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img02 Studio Ghibli --n 160 --b 2.8 --s 1.6/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img02 chineseink --n 160 --b 2.8 --s 1.1/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img02 chineseink --n 160 --b 2.8 --s 1.1/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img02 pixel punk --n 160 --b 2.8 --s 1.4/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img02 pixel punk --n 160 --b 2.8 --s 1.4/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img02 wasteland --n 160 --b 2.8 --s 1.4/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img02 wasteland --n 160 --b 2.8 --s 1.4/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img02 watercolor painting --n 160 --b 2.6 --s 0.6/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img02 watercolor painting --n 160 --b 2.6 --s 0.6/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img03 Cyberpunk --n 160 --b 2.2 --s 1.0/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img03 Cyberpunk --n 160 --b 2.2 --s 1.0/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img03 Embroidery Art --n 160 --b 2.8 --s 1.2/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img03 Embroidery Art --n 160 --b 2.8 --s 1.2/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img03 Oil painting --n 160 --b 2.8 --s 1.4/00000_02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img03 Oil painting --n 160 --b 2.8 --s 1.4/00000_02.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img03 Pixel punk --n 160 --b 2.2 --s 1.6/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img03 Pixel punk --n 160 --b 2.2 --s 1.6/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img03 Sketching --n 160 --b 2.8 --s 2.5/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img03 Sketching --n 160 --b 2.8 --s 2.5/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img03 Studio Ghibli --n 160 --b 2.8 --s 1.8/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img03 Studio Ghibli --n 160 --b 2.8 --s 1.8/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img03 Watercolor Painting --n 160 --b 2.8 --s 1.4/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img03 Watercolor Painting --n 160 --b 2.8 --s 1.4/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img03 chinese ink --n 160 --b 3.0 --s 0.6/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img03 chinese ink --n 160 --b 3.0 --s 0.6/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img03 wasteland --n 160 --b 2.8 --s 0.5/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img03 wasteland --n 160 --b 2.8 --s 0.5/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img04 Chinese Ink --n 160 --b 2.8 --s 1.5/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img04 Chinese Ink --n 160 --b 2.8 --s 1.5/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img04 Cyberpunk --n 160 --b 1.8 --s 1.6/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img04 Cyberpunk --n 160 --b 1.8 --s 1.6/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img04 Embroidery Art --n 160 --b 2.6 --s 0.8/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img04 Embroidery Art --n 160 --b 2.6 --s 0.8/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img04 OilPainting --n 160 --b 2.6 --s 0.6/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img04 OilPainting --n 160 --b 2.6 --s 0.6/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img04 Pixel punk --n 160 --b 2.2 --s 0.8/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img04 Pixel punk --n 160 --b 2.2 --s 0.8/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img04 Sketching --n 320 --b 2.8 --s 1.0/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img04 Sketching --n 320 --b 2.8 --s 1.0/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img04 Studio Ghibli --n 160 --b 3.0 --s 0.5/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img04 Studio Ghibli --n 160 --b 3.0 --s 0.5/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img04 Wasteland --n 160 --b 2.8 --s 1.4/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img04 Wasteland --n 160 --b 2.8 --s 1.4/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img04 Watercolor Painting --n 160 --b 2.6 --s 0.6/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img04 Watercolor Painting --n 160 --b 2.6 --s 0.6/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img05 Cyberpunk --n 160 --b 2.8 --s 0.7/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img05 Cyberpunk --n 160 --b 2.8 --s 0.7/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img05 Chinese Ink --n 160 --b 2.8 --s 1.6/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img05 Chinese Ink --n 160 --b 2.8 --s 1.6/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img05 Embroidery Art --n 160 --b 2.8 --s 0.6/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img05 Embroidery Art --n 160 --b 2.8 --s 0.6/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img05 Oilpainting --n 160 --b 2.8 --s 1.2/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img05 Oilpainting --n 160 --b 2.8 --s 1.2/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img05 Pixel punk --n 160 --b 2.8 --s 1.4/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img05 Pixel punk --n 160 --b 2.8 --s 1.4/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img05 Sketching --n 160 --b 3.0 --s 1.6/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img05 Sketching --n 160 --b 3.0 --s 1.6/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img05 Studio Ghibli --n 160 --b 2.8 --s 1.6/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img05 Studio Ghibli --n 160 --b 2.8 --s 1.6/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img05 Wasteland --n 160 --b 2.8 --s 0.6/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img05 Wasteland --n 160 --b 2.8 --s 0.6/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img05 Watercolor painting --n 160 --b 2.8 --s 1.2/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img05 Watercolor painting --n 160 --b 2.8 --s 1.2/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img10 Illumination --n 320 --b 3.5 --s 2.2/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img10 Illumination --n 320 --b 3.5 --s 2.2/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img10 JOJO --n 160 --b 3.0 --s 0.8/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img10 JOJO --n 160 --b 3.0 --s 0.8/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img10 Studio Ghibli --n 160 --b 3.0 --s 1.1/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img10 Studio Ghibli --n 160 --b 3.0 --s 1.1/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img10 Ufotable --n 320 --b 2.8 --s 1.6/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img10 Ufotable --n 320 --b 2.8 --s 1.6/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img11 Illumination --n 160 --b 2.6 --s 1.2/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img11 Illumination --n 160 --b 2.6 --s 1.2/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img11 JOJO --n 160 --b 2.8 --s 0.6/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img11 JOJO --n 160 --b 2.8 --s 0.6/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img11 Studio Ghibli --n 160 --b 2.8 --s 1.2/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img11 Studio Ghibli --n 160 --b 2.8 --s 1.2/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img11 Ufotable --n 160 --b 3.0 --s 1.1/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img11 Ufotable --n 160 --b 3.0 --s 1.1/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img17 JOJO --n 160 --b 3.0 --s 0.8/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img17 JOJO --n 160 --b 3.0 --s 0.8/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img17 Ufotable --n 160 --b 3.0 --s 1.4/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img17 Ufotable --n 160 --b 3.0 --s 1.4/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img17 Ufotable --n 160 --b 3.5 --s 0.8/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img17 Ufotable --n 160 --b 3.5 --s 0.8/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img18 Children Crayon Painting --n 160 --b 2.2 --s 1.4/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img18 Children Crayon Painting --n 160 --b 2.2 --s 1.4/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img18 LEGO Toy --n 160 --b 2.2 --s 1.4/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img18 LEGO Toy --n 160 --b 2.2 --s 1.4/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img18 Origami --n 160 --b 2.2 --s 1.4/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img18 Origami --n 160 --b 2.2 --s 1.4/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img18 Pixel --n 160 --b 1.8 --s 1.4/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img18 Pixel --n 160 --b 1.8 --s 1.4/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img19 Children Crayon Painting --n 160 --b 2.4 --s 1.4/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img19 Children Crayon Painting --n 160 --b 2.4 --s 1.4/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img19 LEGO Toy --n 160 --b 2.8 --s 1.4/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img19 LEGO Toy --n 160 --b 2.8 --s 1.4/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img19 Pixel --n 160 --b 2.4 --s 1.4/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img19 Pixel --n 160 --b 2.4 --s 1.4/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img20 Children Crayon Painting --n 160 --b 2.2 --s 1.4/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img20 Children Crayon Painting --n 160 --b 2.2 --s 1.4/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img20 LEGO Toy --n 160 --b 2.2 --s 1.6/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img20 LEGO Toy --n 160 --b 2.2 --s 1.6/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img20 Origami --n 160 --b 2.8 --s 0.8/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img20 Origami --n 160 --b 2.8 --s 0.8/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img20 Pixel --n 160 --b 2.5 --s 1.6/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img20 Pixel --n 160 --b 2.5 --s 1.6/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img21 Children Crayon Painting --n 160 --b 2.4 --s 1.4/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img21 Children Crayon Painting --n 160 --b 2.4 --s 1.4/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img21 LEGO Toy --n 160 --b 1.8 --s 1.4/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img21 LEGO Toy --n 160 --b 1.8 --s 1.4/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img21 Origami --n 160 --b 1.0 --s 1.4/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img21 Origami --n 160 --b 1.0 --s 1.4/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/img21 Pixel --n 160 --b 2.5 --s 1.2/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/ContentImages/imgs_and_hyperparameters/img21 Pixel --n 160 --b 2.5 --s 1.2/output.png -------------------------------------------------------------------------------- /diffusers_test/ContentImages/imgs_and_hyperparameters/t: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /diffusers_test/__pycache__/pipeline_stable_diffusion_img2img.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/__pycache__/pipeline_stable_diffusion_img2img.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers_test/__pycache__/pipeline_stable_diffusion_xl.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/__pycache__/pipeline_stable_diffusion_xl.cpython-311.pyc -------------------------------------------------------------------------------- /diffusers_test/__pycache__/pipeline_stable_diffusion_xl.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/diffusers_test/__pycache__/pipeline_stable_diffusion_xl.cpython-38.pyc -------------------------------------------------------------------------------- /diffusers_test/centercrop.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | 3 | def crop_to_square_and_save(inputpath, outputpath): 4 | # 打开图片 5 | with Image.open(inputpath) as img: 6 | # 获取图片的宽度和高度 7 | width, height = img.size 8 | 9 | # 计算裁剪的新边长,取宽和高中的较小值 10 | new_edge_length = min(width, height) 11 | 12 | # 计算裁剪框的左上角和右下角坐标 13 | left = (width - new_edge_length)/2 14 | top = (height - new_edge_length)/2 15 | right = (width + new_edge_length)/2 16 | bottom = (height + new_edge_length)/2 17 | 18 | # 裁剪图片 19 | img_cropped = img.crop((left, top, right, bottom)) 20 | 21 | # 保存裁剪后的图片 22 | img_cropped.save(outputpath, "PNG") 23 | 24 | if __name__=="__main__": 25 | crop_to_square_and_save("path/to/your/input/image.jpg", "path/to/your/output/image.png") 26 | -------------------------------------------------------------------------------- /diffusers_test/style_prompt0.json: -------------------------------------------------------------------------------- 1 | [ 2 | "Oil painting style" 3 | ] 4 | -------------------------------------------------------------------------------- /diffusers_test/style_prompt1.json: -------------------------------------------------------------------------------- 1 | [ 2 | "Origami Art Style" 3 | ] 4 | -------------------------------------------------------------------------------- /diffusers_test/style_prompt2.json: -------------------------------------------------------------------------------- 1 | [ 2 | "Van Gogh Starry Sky Style" 3 | ] 4 | -------------------------------------------------------------------------------- /diffusers_test/style_prompt3.json: -------------------------------------------------------------------------------- 1 | [ 2 | "studio Ghibli style" 3 | ] 4 | -------------------------------------------------------------------------------- /diffusers_test/style_prompt4.json: -------------------------------------------------------------------------------- 1 | [ 2 | "Cyberpunk style" 3 | ] 4 | -------------------------------------------------------------------------------- /diffusers_test/style_prompt5.json: -------------------------------------------------------------------------------- 1 | [ 2 | "Children Crayon Drawing Style" 3 | ] 4 | -------------------------------------------------------------------------------- /imgs/teaser.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreeStyleFreeLunch/FreeStyle/c55481d4ab32ef0f96d72838253721707fede5c2/imgs/teaser.png --------------------------------------------------------------------------------