├── models
├── vae
│ └── put_vae_here
├── loras
│ └── put_loras_here
├── inpaint
│ └── put_inpaint_here
├── unet
│ └── put_unet_files_here
├── gligen
│ └── put_gligen_models_here
├── checkpoints
│ └── put_checkpoints_here
├── diffusers
│ └── put_diffusers_models_here
├── hypernetworks
│ └── put_hypernetworks_here
├── clip
│ └── put_clip_or_text_encoder_models_here
├── clip_vision
│ └── put_clip_vision_models_here
├── controlnet
│ └── put_controlnets_and_t2i_here
├── style_models
│ └── put_t2i_style_model_here
├── prompt_expansion
│ ├── put_prompt_expansion_here
│ └── fooocus_expansion
│ │ ├── special_tokens_map.json
│ │ ├── tokenizer_config.json
│ │ └── config.json
├── safety_checker
│ └── put_safety_checker_models_here
├── upscale_models
│ └── put_esrgan_and_other_upscale_models_here
├── embeddings
│ └── put_embeddings_or_textual_inversion_concepts_here
├── vae_approx
│ └── put_taesd_encoder_pth_and_taesd_decoder_pth_here
└── configs
│ ├── v2-inference.yaml
│ ├── v2-inference_fp32.yaml
│ ├── v2-inference-v.yaml
│ ├── v2-inference-v_fp32.yaml
│ ├── v1-inference.yaml
│ ├── v1-inference_fp16.yaml
│ ├── anything_v3.yaml
│ ├── v1-inference_clip_skip_2.yaml
│ ├── v1-inference_clip_skip_2_fp16.yaml
│ └── v1-inpainting-inference.yaml
├── modules
├── __init__.py
├── constants.py
├── html.py
├── ops.py
├── upscaler.py
├── model_loader.py
├── auth.py
├── extra_utils.py
├── style_sorter.py
├── localization.py
└── patch_precision.py
├── ldm_patched
├── pfn
│ ├── __init__.py
│ ├── architecture
│ │ ├── __init__.py
│ │ ├── timm
│ │ │ └── helpers.py
│ │ ├── OmniSR
│ │ │ ├── pixelshuffle.py
│ │ │ ├── OSAG.py
│ │ │ └── layernorm.py
│ │ ├── LICENSE-HAT
│ │ ├── LICENSE-RealESRGAN
│ │ └── face
│ │ │ ├── LICENSE-codeformer
│ │ │ └── fused_act.py
│ └── types.py
├── ldm
│ └── modules
│ │ ├── encoders
│ │ ├── __init__.py
│ │ └── noise_aug_modules.py
│ │ ├── diffusionmodules
│ │ └── __init__.py
│ │ └── distributions
│ │ └── __init__.py
├── modules
│ ├── options.py
│ ├── checkpoint_pickle.py
│ ├── clip_vision_config_g.json
│ ├── clip_vision_config_h.json
│ ├── clip_vision_config_vitl.json
│ ├── sd1_tokenizer
│ │ ├── special_tokens_map.json
│ │ └── tokenizer_config.json
│ ├── clip_config_bigg.json
│ ├── sd2_clip_config.json
│ ├── sd1_clip_config.json
│ ├── sd2_clip.py
│ ├── diffusers_load.py
│ └── conds.py
├── licenses-3rd
│ ├── kdiffusion
│ ├── taesd
│ ├── ldm
│ └── chainer
└── contrib
│ ├── external_sdupscale.py
│ ├── external_perpneg.py
│ └── external_align_your_steps.py
├── shared.py
├── fooocus_version.py
├── .github
├── CODEOWNERS
├── dependabot.yml
├── ISSUE_TEMPLATE
│ ├── config.yml
│ └── feature_request.yml
└── workflows
│ └── build_container.yml
├── wildcards
├── color_flower.txt
├── .gitignore
├── color.txt
├── animal.txt
└── extended-color.txt
├── requirements_docker.txt
├── extras
├── BLIP
│ ├── models
│ │ └── bert_tokenizer
│ │ │ ├── tokenizer_config.json
│ │ │ └── config.json
│ └── configs
│ │ ├── retrieval_msrvtt.yaml
│ │ ├── nocaps.yaml
│ │ ├── nlvr.yaml
│ │ ├── bert_config.json
│ │ ├── med_config.json
│ │ ├── pretrain.yaml
│ │ ├── vqa.yaml
│ │ ├── caption_coco.yaml
│ │ ├── retrieval_coco.yaml
│ │ └── retrieval_flickr.yaml
├── facexlib
│ ├── utils
│ │ └── __init__.py
│ ├── parsing
│ │ ├── __init__.py
│ │ └── resnet.py
│ └── detection
│ │ └── __init__.py
├── safety_checker
│ └── configs
│ │ └── preprocessor_config.json
├── GroundingDINO
│ └── config
│ │ └── GroundingDINO_SwinT_OGC.py
└── face_crop.py
├── notification-example.mp3
├── sdxl_styles
└── samples
│ ├── baroque.jpg
│ ├── cubism.jpg
│ ├── dadaism.jpg
│ ├── fauvism.jpg
│ ├── glo_fi.jpg
│ ├── idyllic.jpg
│ ├── op_art.jpg
│ ├── rococo.jpg
│ ├── academia.jpg
│ ├── art_deco.jpg
│ ├── futurism.jpg
│ ├── game_gta.jpg
│ ├── mk_mosaic.jpg
│ ├── mk_palekh.jpg
│ ├── mre_anime.jpg
│ ├── mre_comic.jpg
│ ├── mre_manga.jpg
│ ├── photo_hdr.jpg
│ ├── pop_art_2.jpg
│ ├── sai_anime.jpg
│ ├── sketchup.jpg
│ ├── terragen.jpg
│ ├── ads_luxury.jpg
│ ├── ads_retail.jpg
│ ├── art_nouveau.jpg
│ ├── astral_aura.jpg
│ ├── avant_garde.jpg
│ ├── caricature.jpg
│ ├── dark_fantasy.jpg
│ ├── doodle_art.jpg
│ ├── flat_2d_art.jpg
│ ├── fooocus_pony.jpg
│ ├── fooocus_v2.jpg
│ ├── game_mario.jpg
│ ├── game_pokemon.jpg
│ ├── game_zelda.jpg
│ ├── glitchcore.jpg
│ ├── graffiti_art.jpg
│ ├── high_fashion.jpg
│ ├── logo_design.jpg
│ ├── mandola_art.jpg
│ ├── medievalism.jpg
│ ├── minimalism.jpg
│ ├── misc_disco.jpg
│ ├── misc_gothic.jpg
│ ├── misc_grunge.jpg
│ ├── misc_horror.jpg
│ ├── misc_kawaii.jpg
│ ├── misc_macabre.jpg
│ ├── misc_manga.jpg
│ ├── misc_space.jpg
│ ├── misc_tribal.jpg
│ ├── mk_atompunk.jpg
│ ├── mk_basquiat.jpg
│ ├── mk_dayak_art.jpg
│ ├── mk_de_stijl.jpg
│ ├── mk_gyotaku.jpg
│ ├── mk_herbarium.jpg
│ ├── mk_pollock.jpg
│ ├── mk_scrimshaw.jpg
│ ├── mk_shibori.jpg
│ ├── mk_ukiyo_e.jpg
│ ├── mk_van_gogh.jpg
│ ├── neo_baroque.jpg
│ ├── neo_futurism.jpg
│ ├── neo_rococo.jpg
│ ├── pebble_art.jpg
│ ├── photo_alien.jpg
│ ├── random_style.jpg
│ ├── sai_3d_model.jpg
│ ├── sai_enhance.jpg
│ ├── sai_line_art.jpg
│ ├── sai_lowpoly.jpg
│ ├── sai_neonpunk.jpg
│ ├── sai_origami.jpg
│ ├── sai_texture.jpg
│ ├── steampunk_2.jpg
│ ├── suprematism.jpg
│ ├── surrealism.jpg
│ ├── watercolor_2.jpg
│ ├── action_figure.jpg
│ ├── ads_automotive.jpg
│ ├── ads_corporate.jpg
│ ├── cel_shaded_art.jpg
│ ├── cinematic_diva.jpg
│ ├── classicism_art.jpg
│ ├── conceptual_art.jpg
│ ├── constructivism.jpg
│ ├── dmt_art_style.jpg
│ ├── expressionism.jpg
│ ├── fooocus_sharp.jpg
│ ├── game_minecraft.jpg
│ ├── impressionism.jpg
│ ├── marker_drawing.jpg
│ ├── misc_dystopian.jpg
│ ├── misc_nautical.jpg
│ ├── misc_zentangle.jpg
│ ├── mk_andy_warhol.jpg
│ ├── mk_chicano_art.jpg
│ ├── mk_embroidery.jpg
│ ├── mk_luminogram.jpg
│ ├── mk_mokume_gane.jpg
│ ├── mk_suminagashi.jpg
│ ├── mk_tlingit_art.jpg
│ ├── mre_bad_dream.jpg
│ ├── mre_brave_art.jpg
│ ├── mre_dark_dream.jpg
│ ├── mre_gloomy_art.jpg
│ ├── mre_space_art.jpg
│ ├── mre_undead_art.jpg
│ ├── neo_byzantine.jpg
│ ├── neoclassicism.jpg
│ ├── photo_glamour.jpg
│ ├── sai_cinematic.jpg
│ ├── sai_comic_book.jpg
│ ├── sai_craft_clay.jpg
│ ├── sai_isometric.jpg
│ ├── sai_pixel_art.jpg
│ ├── silhouette_art.jpg
│ ├── adorable_kawaii.jpg
│ ├── ads_advertising.jpg
│ ├── ads_real_estate.jpg
│ ├── artstyle_abstract.jpg
│ ├── artstyle_art_deco.jpg
│ ├── artstyle_cubist.jpg
│ ├── artstyle_graffiti.jpg
│ ├── artstyle_pop_art.jpg
│ ├── double_exposure.jpg
│ ├── fooocus_cinematic.jpg
│ ├── fooocus_enhance.jpg
│ ├── fooocus_negative.jpg
│ ├── futuristic_sci_fi.jpg
│ ├── game_retro_arcade.jpg
│ ├── game_retro_game.jpg
│ ├── googie_art_style.jpg
│ ├── macro_photography.jpg
│ ├── misc_dreamscape.jpg
│ ├── misc_fairy_tale.jpg
│ ├── misc_lovecraftian.jpg
│ ├── misc_metropolis.jpg
│ ├── misc_minimalist.jpg
│ ├── misc_monochrome.jpg
│ ├── mk_adnate_style.jpg
│ ├── mk_afrofuturism.jpg
│ ├── mk_albumen_print.jpg
│ ├── mk_aquatint_print.jpg
│ ├── mk_bauhaus_style.jpg
│ ├── mk_bromoil_print.jpg
│ ├── mk_calotype_print.jpg
│ ├── mk_carnival_glass.jpg
│ ├── mk_coloring_book.jpg
│ ├── mk_constructivism.jpg
│ ├── mk_fayum_portrait.jpg
│ ├── mk_gond_painting.jpg
│ ├── mk_halftone_print.jpg
│ ├── mk_inuit_carving.jpg
│ ├── mk_lite_brite_art.jpg
│ ├── mk_one_line_art.jpg
│ ├── mk_pictorialism.jpg
│ ├── mk_punk_collage.jpg
│ ├── mk_singer_sargent.jpg
│ ├── mre_elemental_art.jpg
│ ├── mre_underground.jpg
│ ├── neo_impressionism.jpg
│ ├── photo_film_noir.jpg
│ ├── photo_neon_noir.jpg
│ ├── photo_silhouette.jpg
│ ├── photo_tilt_shift.jpg
│ ├── sai_analog_film.jpg
│ ├── sai_digital_art.jpg
│ ├── sai_fantasy_art.jpg
│ ├── sai_photographic.jpg
│ ├── simple_vector_art.jpg
│ ├── sticker_designs.jpg
│ ├── vibrant_rim_light.jpg
│ ├── artstyle_steampunk.jpg
│ ├── artstyle_surrealist.jpg
│ ├── artstyle_typography.jpg
│ ├── artstyle_watercolor.jpg
│ ├── colored_pencil_art.jpg
│ ├── fooocus_masterpiece.jpg
│ ├── fooocus_photograph.jpg
│ ├── fortnite_art_style.jpg
│ ├── game_bubble_bobble.jpg
│ ├── game_cyberpunk_game.jpg
│ ├── game_fighting_game.jpg
│ ├── game_strategy_game.jpg
│ ├── game_streetfighter.jpg
│ ├── infographic_drawing.jpg
│ ├── luxurious_elegance.jpg
│ ├── misc_architectural.jpg
│ ├── misc_stained_glass.jpg
│ ├── mk_alcohol_ink_art.jpg
│ ├── mk_anthotype_print.jpg
│ ├── mk_blacklight_paint.jpg
│ ├── mk_color_sketchnote.jpg
│ ├── mk_cross_stitching.jpg
│ ├── mk_cyanotype_print.jpg
│ ├── mk_encaustic_paint.jpg
│ ├── mk_pichwai_painting.jpg
│ ├── mk_vitreous_enamel.jpg
│ ├── mre_artistic_vision.jpg
│ ├── mre_dark_cyberpunk.jpg
│ ├── mre_heroic_fantasy.jpg
│ ├── mre_sumi_e_detailed.jpg
│ ├── mre_sumi_e_symbolic.jpg
│ ├── papercraft_collage.jpg
│ ├── papercraft_kirigami.jpg
│ ├── photo_long_exposure.jpg
│ ├── volumetric_lighting.jpg
│ ├── abstract_expressionism.jpg
│ ├── adorable_3d_character.jpg
│ ├── ads_fashion_editorial.jpg
│ ├── ads_food_photography.jpg
│ ├── artstyle_art_nouveau.jpg
│ ├── artstyle_expressionist.jpg
│ ├── artstyle_hyperrealism.jpg
│ ├── artstyle_impressionist.jpg
│ ├── artstyle_pointillism.jpg
│ ├── artstyle_psychedelic.jpg
│ ├── artstyle_renaissance.jpg
│ ├── bauhaus_style_poster.jpg
│ ├── character_design_sheet.jpg
│ ├── color_field_painting.jpg
│ ├── dark_moody_atmosphere.jpg
│ ├── faded_polaroid_photo.jpg
│ ├── fooocus_semi_realistic.jpg
│ ├── futuristic_cybernetic.jpg
│ ├── futuristic_futuristic.jpg
│ ├── futuristic_vaporwave.jpg
│ ├── game_rpg_fantasy_game.jpg
│ ├── harlem_renaissance_art.jpg
│ ├── ink_dripping_drawing.jpg
│ ├── japanese_ink_drawing.jpg
│ ├── knolling_photography.jpg
│ ├── misc_techwear_fashion.jpg
│ ├── mk_chromolithography.jpg
│ ├── mk_cibulak_porcelain.jpg
│ ├── mk_kalighat_painting.jpg
│ ├── mk_madhubani_painting.jpg
│ ├── mk_patachitra_painting.jpg
│ ├── mk_ron_english_style.jpg
│ ├── mk_samoan_art_inspired.jpg
│ ├── mre_cinematic_dynamic.jpg
│ ├── mre_lyrical_geometry.jpg
│ ├── mre_surreal_painting.jpg
│ ├── ornate_and_intricate.jpg
│ ├── papercraft_paper_mache.jpg
│ ├── pencil_sketch_drawing.jpg
│ ├── whimsical_and_playful.jpg
│ ├── artstyle_constructivist.jpg
│ ├── futuristic_biomechanical.jpg
│ ├── light_cheery_atmosphere.jpg
│ ├── mk_dufaycolor_photograph.jpg
│ ├── mk_shepard_fairey_style.jpg
│ ├── mk_vintage_travel_poster.jpg
│ ├── mre_ancient_illustration.jpg
│ ├── mre_dynamic_illustration.jpg
│ ├── mre_spontaneous_picture.jpg
│ ├── papercraft_flat_papercut.jpg
│ ├── blueprint_schematic_drawing.jpg
│ ├── dripping_paint_splatter_art.jpg
│ ├── futuristic_cybernetic_robot.jpg
│ ├── futuristic_retro_cyberpunk.jpg
│ ├── futuristic_retro_futurism.jpg
│ ├── mk_cross_processing_print.jpg
│ ├── mk_illuminated_manuscript.jpg
│ ├── mk_vintage_airline_poster.jpg
│ ├── papercraft_paper_quilling.jpg
│ ├── papercraft_papercut_collage.jpg
│ ├── papercraft_stacked_papercut.jpg
│ ├── photo_iphone_photographic.jpg
│ ├── ads_gourmet_food_photography.jpg
│ ├── tranquil_relaxing_atmosphere.jpg
│ ├── artstyle_abstract_expressionism.jpg
│ ├── futuristic_cyberpunk_cityscape.jpg
│ ├── papercraft_papercut_shadow_box.jpg
│ ├── futuristic_biomechanical_cyberpunk.jpg
│ └── papercraft_thick_layered_papercut.jpg
├── tests
├── __init__.py
└── test_extra_utils.py
├── .gitattributes
├── environment.yaml
├── language
└── example.json
├── auth-example.json
├── presets
├── .gitignore
├── playground_v2.5.json
├── pony_v6.json
├── lightning.json
├── lcm.json
├── anime.json
├── realistic.json
├── sai.json
└── default.json
├── experiments_face.py
├── experiments_expansion.py
├── development.md
├── experiments_interrogate.py
├── requirements_versions.txt
├── experiments_mask_generation.py
├── entrypoint.sh
├── fooocus_colab.ipynb
├── .dockerignore
├── .gitignore
├── Dockerfile
├── entry_with_update.py
└── docker-compose.yml
/models/vae/put_vae_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/modules/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ldm_patched/pfn/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/loras/put_loras_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/shared.py:
--------------------------------------------------------------------------------
1 | gradio_root = None
--------------------------------------------------------------------------------
/models/inpaint/put_inpaint_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/unet/put_unet_files_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/fooocus_version.py:
--------------------------------------------------------------------------------
1 | version = '2.5.5'
--------------------------------------------------------------------------------
/models/gligen/put_gligen_models_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/.github/CODEOWNERS:
--------------------------------------------------------------------------------
1 | * @lllyasviel
2 |
--------------------------------------------------------------------------------
/ldm_patched/pfn/architecture/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/checkpoints/put_checkpoints_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/diffusers/put_diffusers_models_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/hypernetworks/put_hypernetworks_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ldm_patched/ldm/modules/encoders/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/clip/put_clip_or_text_encoder_models_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/clip_vision/put_clip_vision_models_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/controlnet/put_controlnets_and_t2i_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/style_models/put_t2i_style_model_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ldm_patched/ldm/modules/diffusionmodules/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ldm_patched/ldm/modules/distributions/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/prompt_expansion/put_prompt_expansion_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/safety_checker/put_safety_checker_models_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/wildcards/color_flower.txt:
--------------------------------------------------------------------------------
1 | __color__ __flower__
2 |
--------------------------------------------------------------------------------
/models/upscale_models/put_esrgan_and_other_upscale_models_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/embeddings/put_embeddings_or_textual_inversion_concepts_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/vae_approx/put_taesd_encoder_pth_and_taesd_decoder_pth_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/requirements_docker.txt:
--------------------------------------------------------------------------------
1 | torch==2.1.0
2 | torchvision==0.16.0
3 |
--------------------------------------------------------------------------------
/extras/BLIP/models/bert_tokenizer/tokenizer_config.json:
--------------------------------------------------------------------------------
1 | {
2 | "do_lower_case": true
3 | }
4 |
--------------------------------------------------------------------------------
/notification-example.mp3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/notification-example.mp3
--------------------------------------------------------------------------------
/sdxl_styles/samples/baroque.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/baroque.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/cubism.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/cubism.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/dadaism.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/dadaism.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/fauvism.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/fauvism.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/glo_fi.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/glo_fi.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/idyllic.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/idyllic.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/op_art.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/op_art.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/rococo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/rococo.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/academia.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/academia.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/art_deco.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/art_deco.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/futurism.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/futurism.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/game_gta.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/game_gta.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_mosaic.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_mosaic.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_palekh.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_palekh.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mre_anime.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mre_anime.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mre_comic.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mre_comic.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mre_manga.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mre_manga.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/photo_hdr.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/photo_hdr.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/pop_art_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/pop_art_2.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/sai_anime.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/sai_anime.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/sketchup.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/sketchup.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/terragen.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/terragen.jpg
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import pathlib
3 |
4 | sys.path.append(pathlib.Path(f'{__file__}/../modules').parent.resolve())
5 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Ensure that shell scripts always use lf line endings, e.g. entrypoint.sh for docker
2 | * text=auto
3 | *.sh text eol=lf
--------------------------------------------------------------------------------
/environment.yaml:
--------------------------------------------------------------------------------
1 | name: fooocus
2 | channels:
3 | - defaults
4 | dependencies:
5 | - python=3.10
6 | - pip=23.0
7 | - packaging
8 |
--------------------------------------------------------------------------------
/modules/constants.py:
--------------------------------------------------------------------------------
1 | # as in k-diffusion (sampling.py)
2 | MIN_SEED = 0
3 | MAX_SEED = 2**63 - 1
4 |
5 | AUTH_FILENAME = 'auth.json'
6 |
--------------------------------------------------------------------------------
/sdxl_styles/samples/ads_luxury.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/ads_luxury.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/ads_retail.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/ads_retail.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/art_nouveau.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/art_nouveau.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/astral_aura.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/astral_aura.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/avant_garde.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/avant_garde.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/caricature.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/caricature.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/dark_fantasy.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/dark_fantasy.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/doodle_art.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/doodle_art.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/flat_2d_art.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/flat_2d_art.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/fooocus_pony.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/fooocus_pony.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/fooocus_v2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/fooocus_v2.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/game_mario.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/game_mario.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/game_pokemon.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/game_pokemon.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/game_zelda.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/game_zelda.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/glitchcore.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/glitchcore.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/graffiti_art.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/graffiti_art.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/high_fashion.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/high_fashion.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/logo_design.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/logo_design.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mandola_art.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mandola_art.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/medievalism.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/medievalism.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/minimalism.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/minimalism.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/misc_disco.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/misc_disco.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/misc_gothic.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/misc_gothic.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/misc_grunge.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/misc_grunge.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/misc_horror.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/misc_horror.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/misc_kawaii.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/misc_kawaii.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/misc_macabre.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/misc_macabre.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/misc_manga.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/misc_manga.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/misc_space.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/misc_space.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/misc_tribal.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/misc_tribal.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_atompunk.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_atompunk.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_basquiat.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_basquiat.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_dayak_art.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_dayak_art.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_de_stijl.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_de_stijl.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_gyotaku.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_gyotaku.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_herbarium.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_herbarium.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_pollock.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_pollock.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_scrimshaw.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_scrimshaw.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_shibori.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_shibori.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_ukiyo_e.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_ukiyo_e.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_van_gogh.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_van_gogh.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/neo_baroque.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/neo_baroque.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/neo_futurism.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/neo_futurism.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/neo_rococo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/neo_rococo.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/pebble_art.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/pebble_art.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/photo_alien.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/photo_alien.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/random_style.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/random_style.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/sai_3d_model.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/sai_3d_model.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/sai_enhance.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/sai_enhance.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/sai_line_art.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/sai_line_art.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/sai_lowpoly.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/sai_lowpoly.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/sai_neonpunk.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/sai_neonpunk.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/sai_origami.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/sai_origami.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/sai_texture.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/sai_texture.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/steampunk_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/steampunk_2.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/suprematism.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/suprematism.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/surrealism.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/surrealism.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/watercolor_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/watercolor_2.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/action_figure.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/action_figure.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/ads_automotive.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/ads_automotive.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/ads_corporate.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/ads_corporate.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/cel_shaded_art.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/cel_shaded_art.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/cinematic_diva.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/cinematic_diva.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/classicism_art.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/classicism_art.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/conceptual_art.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/conceptual_art.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/constructivism.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/constructivism.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/dmt_art_style.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/dmt_art_style.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/expressionism.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/expressionism.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/fooocus_sharp.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/fooocus_sharp.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/game_minecraft.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/game_minecraft.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/impressionism.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/impressionism.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/marker_drawing.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/marker_drawing.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/misc_dystopian.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/misc_dystopian.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/misc_nautical.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/misc_nautical.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/misc_zentangle.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/misc_zentangle.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_andy_warhol.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_andy_warhol.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_chicano_art.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_chicano_art.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_embroidery.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_embroidery.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_luminogram.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_luminogram.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_mokume_gane.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_mokume_gane.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_suminagashi.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_suminagashi.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_tlingit_art.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_tlingit_art.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mre_bad_dream.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mre_bad_dream.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mre_brave_art.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mre_brave_art.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mre_dark_dream.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mre_dark_dream.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mre_gloomy_art.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mre_gloomy_art.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mre_space_art.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mre_space_art.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mre_undead_art.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mre_undead_art.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/neo_byzantine.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/neo_byzantine.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/neoclassicism.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/neoclassicism.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/photo_glamour.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/photo_glamour.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/sai_cinematic.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/sai_cinematic.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/sai_comic_book.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/sai_comic_book.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/sai_craft_clay.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/sai_craft_clay.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/sai_isometric.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/sai_isometric.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/sai_pixel_art.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/sai_pixel_art.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/silhouette_art.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/silhouette_art.jpg
--------------------------------------------------------------------------------
/language/example.json:
--------------------------------------------------------------------------------
1 | {
2 | "Generate": "生成",
3 | "Input Image": "入力画像",
4 | "Advanced": "고급",
5 | "SAI 3D Model": "SAI 3D Modèle"
6 | }
7 |
--------------------------------------------------------------------------------
/sdxl_styles/samples/adorable_kawaii.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/adorable_kawaii.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/ads_advertising.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/ads_advertising.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/ads_real_estate.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/ads_real_estate.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/artstyle_abstract.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/artstyle_abstract.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/artstyle_art_deco.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/artstyle_art_deco.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/artstyle_cubist.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/artstyle_cubist.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/artstyle_graffiti.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/artstyle_graffiti.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/artstyle_pop_art.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/artstyle_pop_art.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/double_exposure.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/double_exposure.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/fooocus_cinematic.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/fooocus_cinematic.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/fooocus_enhance.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/fooocus_enhance.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/fooocus_negative.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/fooocus_negative.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/futuristic_sci_fi.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/futuristic_sci_fi.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/game_retro_arcade.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/game_retro_arcade.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/game_retro_game.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/game_retro_game.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/googie_art_style.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/googie_art_style.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/macro_photography.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/macro_photography.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/misc_dreamscape.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/misc_dreamscape.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/misc_fairy_tale.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/misc_fairy_tale.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/misc_lovecraftian.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/misc_lovecraftian.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/misc_metropolis.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/misc_metropolis.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/misc_minimalist.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/misc_minimalist.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/misc_monochrome.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/misc_monochrome.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_adnate_style.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_adnate_style.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_afrofuturism.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_afrofuturism.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_albumen_print.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_albumen_print.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_aquatint_print.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_aquatint_print.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_bauhaus_style.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_bauhaus_style.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_bromoil_print.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_bromoil_print.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_calotype_print.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_calotype_print.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_carnival_glass.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_carnival_glass.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_coloring_book.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_coloring_book.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_constructivism.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_constructivism.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_fayum_portrait.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_fayum_portrait.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_gond_painting.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_gond_painting.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_halftone_print.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_halftone_print.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_inuit_carving.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_inuit_carving.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_lite_brite_art.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_lite_brite_art.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_one_line_art.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_one_line_art.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_pictorialism.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_pictorialism.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_punk_collage.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_punk_collage.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_singer_sargent.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_singer_sargent.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mre_elemental_art.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mre_elemental_art.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mre_underground.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mre_underground.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/neo_impressionism.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/neo_impressionism.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/photo_film_noir.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/photo_film_noir.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/photo_neon_noir.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/photo_neon_noir.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/photo_silhouette.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/photo_silhouette.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/photo_tilt_shift.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/photo_tilt_shift.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/sai_analog_film.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/sai_analog_film.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/sai_digital_art.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/sai_digital_art.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/sai_fantasy_art.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/sai_fantasy_art.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/sai_photographic.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/sai_photographic.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/simple_vector_art.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/simple_vector_art.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/sticker_designs.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/sticker_designs.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/vibrant_rim_light.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/vibrant_rim_light.jpg
--------------------------------------------------------------------------------
/auth-example.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "user": "sitting-duck-1",
4 | "pass": "very-bad-publicly-known-password-change-it"
5 | }
6 | ]
7 |
--------------------------------------------------------------------------------
/presets/.gitignore:
--------------------------------------------------------------------------------
1 | *.json
2 | !anime.json
3 | !default.json
4 | !lcm.json
5 | !playground_v2.5.json
6 | !pony_v6.json
7 | !realistic.json
8 | !sai.json
--------------------------------------------------------------------------------
/sdxl_styles/samples/artstyle_steampunk.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/artstyle_steampunk.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/artstyle_surrealist.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/artstyle_surrealist.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/artstyle_typography.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/artstyle_typography.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/artstyle_watercolor.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/artstyle_watercolor.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/colored_pencil_art.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/colored_pencil_art.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/fooocus_masterpiece.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/fooocus_masterpiece.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/fooocus_photograph.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/fooocus_photograph.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/fortnite_art_style.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/fortnite_art_style.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/game_bubble_bobble.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/game_bubble_bobble.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/game_cyberpunk_game.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/game_cyberpunk_game.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/game_fighting_game.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/game_fighting_game.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/game_strategy_game.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/game_strategy_game.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/game_streetfighter.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/game_streetfighter.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/infographic_drawing.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/infographic_drawing.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/luxurious_elegance.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/luxurious_elegance.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/misc_architectural.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/misc_architectural.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/misc_stained_glass.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/misc_stained_glass.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_alcohol_ink_art.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_alcohol_ink_art.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_anthotype_print.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_anthotype_print.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_blacklight_paint.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_blacklight_paint.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_color_sketchnote.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_color_sketchnote.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_cross_stitching.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_cross_stitching.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_cyanotype_print.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_cyanotype_print.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_encaustic_paint.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_encaustic_paint.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_pichwai_painting.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_pichwai_painting.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_vitreous_enamel.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_vitreous_enamel.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mre_artistic_vision.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mre_artistic_vision.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mre_dark_cyberpunk.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mre_dark_cyberpunk.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mre_heroic_fantasy.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mre_heroic_fantasy.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mre_sumi_e_detailed.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mre_sumi_e_detailed.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mre_sumi_e_symbolic.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mre_sumi_e_symbolic.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/papercraft_collage.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/papercraft_collage.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/papercraft_kirigami.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/papercraft_kirigami.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/photo_long_exposure.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/photo_long_exposure.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/volumetric_lighting.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/volumetric_lighting.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/abstract_expressionism.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/abstract_expressionism.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/adorable_3d_character.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/adorable_3d_character.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/ads_fashion_editorial.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/ads_fashion_editorial.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/ads_food_photography.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/ads_food_photography.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/artstyle_art_nouveau.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/artstyle_art_nouveau.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/artstyle_expressionist.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/artstyle_expressionist.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/artstyle_hyperrealism.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/artstyle_hyperrealism.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/artstyle_impressionist.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/artstyle_impressionist.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/artstyle_pointillism.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/artstyle_pointillism.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/artstyle_psychedelic.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/artstyle_psychedelic.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/artstyle_renaissance.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/artstyle_renaissance.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/bauhaus_style_poster.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/bauhaus_style_poster.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/character_design_sheet.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/character_design_sheet.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/color_field_painting.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/color_field_painting.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/dark_moody_atmosphere.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/dark_moody_atmosphere.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/faded_polaroid_photo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/faded_polaroid_photo.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/fooocus_semi_realistic.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/fooocus_semi_realistic.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/futuristic_cybernetic.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/futuristic_cybernetic.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/futuristic_futuristic.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/futuristic_futuristic.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/futuristic_vaporwave.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/futuristic_vaporwave.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/game_rpg_fantasy_game.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/game_rpg_fantasy_game.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/harlem_renaissance_art.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/harlem_renaissance_art.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/ink_dripping_drawing.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/ink_dripping_drawing.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/japanese_ink_drawing.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/japanese_ink_drawing.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/knolling_photography.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/knolling_photography.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/misc_techwear_fashion.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/misc_techwear_fashion.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_chromolithography.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_chromolithography.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_cibulak_porcelain.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_cibulak_porcelain.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_kalighat_painting.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_kalighat_painting.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_madhubani_painting.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_madhubani_painting.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_patachitra_painting.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_patachitra_painting.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_ron_english_style.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_ron_english_style.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_samoan_art_inspired.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_samoan_art_inspired.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mre_cinematic_dynamic.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mre_cinematic_dynamic.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mre_lyrical_geometry.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mre_lyrical_geometry.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mre_surreal_painting.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mre_surreal_painting.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/ornate_and_intricate.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/ornate_and_intricate.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/papercraft_paper_mache.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/papercraft_paper_mache.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/pencil_sketch_drawing.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/pencil_sketch_drawing.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/whimsical_and_playful.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/whimsical_and_playful.jpg
--------------------------------------------------------------------------------
/wildcards/.gitignore:
--------------------------------------------------------------------------------
1 | *.txt
2 | !animal.txt
3 | !artist.txt
4 | !color.txt
5 | !color_flower.txt
6 | !extended-color.txt
7 | !flower.txt
8 | !nationality.txt
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 | updates:
3 | - package-ecosystem: "github-actions"
4 | directory: "/"
5 | schedule:
6 | interval: "monthly"
--------------------------------------------------------------------------------
/sdxl_styles/samples/artstyle_constructivist.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/artstyle_constructivist.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/futuristic_biomechanical.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/futuristic_biomechanical.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/light_cheery_atmosphere.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/light_cheery_atmosphere.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_dufaycolor_photograph.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_dufaycolor_photograph.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_shepard_fairey_style.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_shepard_fairey_style.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_vintage_travel_poster.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_vintage_travel_poster.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mre_ancient_illustration.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mre_ancient_illustration.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mre_dynamic_illustration.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mre_dynamic_illustration.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mre_spontaneous_picture.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mre_spontaneous_picture.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/papercraft_flat_papercut.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/papercraft_flat_papercut.jpg
--------------------------------------------------------------------------------
/ldm_patched/modules/options.py:
--------------------------------------------------------------------------------
1 |
2 | args_parsing = False
3 |
4 | def enable_args_parsing(enable=True):
5 | global args_parsing
6 | args_parsing = enable
7 |
--------------------------------------------------------------------------------
/sdxl_styles/samples/blueprint_schematic_drawing.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/blueprint_schematic_drawing.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/dripping_paint_splatter_art.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/dripping_paint_splatter_art.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/futuristic_cybernetic_robot.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/futuristic_cybernetic_robot.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/futuristic_retro_cyberpunk.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/futuristic_retro_cyberpunk.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/futuristic_retro_futurism.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/futuristic_retro_futurism.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_cross_processing_print.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_cross_processing_print.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_illuminated_manuscript.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_illuminated_manuscript.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/mk_vintage_airline_poster.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/mk_vintage_airline_poster.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/papercraft_paper_quilling.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/papercraft_paper_quilling.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/papercraft_papercut_collage.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/papercraft_papercut_collage.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/papercraft_stacked_papercut.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/papercraft_stacked_papercut.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/photo_iphone_photographic.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/photo_iphone_photographic.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/ads_gourmet_food_photography.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/ads_gourmet_food_photography.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/tranquil_relaxing_atmosphere.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/tranquil_relaxing_atmosphere.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/artstyle_abstract_expressionism.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/artstyle_abstract_expressionism.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/futuristic_cyberpunk_cityscape.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/futuristic_cyberpunk_cityscape.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/papercraft_papercut_shadow_box.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/papercraft_papercut_shadow_box.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/futuristic_biomechanical_cyberpunk.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/futuristic_biomechanical_cyberpunk.jpg
--------------------------------------------------------------------------------
/sdxl_styles/samples/papercraft_thick_layered_papercut.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nass-works/Fooocus-fixed-crash/HEAD/sdxl_styles/samples/papercraft_thick_layered_papercut.jpg
--------------------------------------------------------------------------------
/models/prompt_expansion/fooocus_expansion/special_tokens_map.json:
--------------------------------------------------------------------------------
1 | {
2 | "bos_token": "<|endoftext|>",
3 | "eos_token": "<|endoftext|>",
4 | "unk_token": "<|endoftext|>"
5 | }
6 |
--------------------------------------------------------------------------------
/experiments_face.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import extras.face_crop as cropper
3 |
4 |
5 | img = cv2.imread('lena.png')
6 | result = cropper.crop_image(img)
7 | cv2.imwrite('lena_result.png', result)
8 |
--------------------------------------------------------------------------------
/wildcards/color.txt:
--------------------------------------------------------------------------------
1 | aqua
2 | black
3 | blue
4 | fuchsia
5 | gray
6 | green
7 | lime
8 | maroon
9 | navy
10 | olive
11 | orange
12 | purple
13 | red
14 | silver
15 | teal
16 | white
17 | yellow
18 |
--------------------------------------------------------------------------------
/experiments_expansion.py:
--------------------------------------------------------------------------------
1 | from modules.expansion import FooocusExpansion
2 |
3 | expansion = FooocusExpansion()
4 |
5 | text = 'a handsome man'
6 |
7 | for i in range(64):
8 | print(expansion(text, seed=i))
9 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
1 | blank_issues_enabled: false
2 | contact_links:
3 | - name: Ask a question
4 | url: https://github.com/lllyasviel/Fooocus/discussions/new?category=q-a
5 | about: Ask the community for help
--------------------------------------------------------------------------------
/development.md:
--------------------------------------------------------------------------------
1 | ## Running unit tests
2 |
3 | Native python:
4 | ```
5 | python -m unittest tests/
6 | ```
7 |
8 | Embedded python (Windows zip file installation method):
9 | ```
10 | ..\python_embeded\python.exe -m unittest
11 | ```
12 |
--------------------------------------------------------------------------------
/models/prompt_expansion/fooocus_expansion/tokenizer_config.json:
--------------------------------------------------------------------------------
1 | {
2 | "add_prefix_space": false,
3 | "bos_token": "<|endoftext|>",
4 | "eos_token": "<|endoftext|>",
5 | "model_max_length": 1024,
6 | "name_or_path": "gpt2",
7 | "special_tokens_map_file": null,
8 | "tokenizer_class": "GPT2Tokenizer",
9 | "unk_token": "<|endoftext|>"
10 | }
11 |
--------------------------------------------------------------------------------
/extras/facexlib/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from .face_utils import align_crop_face_landmarks, compute_increased_bbox, get_valid_bboxes, paste_face_back
2 | from .misc import img2tensor, load_file_from_url, scandir
3 |
4 | __all__ = [
5 | 'align_crop_face_landmarks', 'compute_increased_bbox', 'get_valid_bboxes', 'load_file_from_url', 'paste_face_back',
6 | 'img2tensor', 'scandir'
7 | ]
8 |
--------------------------------------------------------------------------------
/ldm_patched/modules/checkpoint_pickle.py:
--------------------------------------------------------------------------------
1 | import pickle
2 |
3 | load = pickle.load
4 |
5 | class Empty:
6 | pass
7 |
8 | class Unpickler(pickle.Unpickler):
9 | def find_class(self, module, name):
10 | #TODO: safe unpickle
11 | if module.startswith("pytorch_lightning"):
12 | return Empty
13 | return super().find_class(module, name)
14 |
--------------------------------------------------------------------------------
/modules/html.py:
--------------------------------------------------------------------------------
1 | progress_html = '''
2 |
9 | '''
10 |
11 |
12 | def make_progress_html(number, text):
13 | return progress_html.replace('*number*', str(number)).replace('*text*', text)
14 |
--------------------------------------------------------------------------------
/experiments_interrogate.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | from extras.interrogate import default_interrogator as default_interrogator_photo
3 | from extras.wd14tagger import default_interrogator as default_interrogator_anime
4 |
5 | img = cv2.imread('./test_imgs/red_box.jpg')[:, :, ::-1].copy()
6 | print(default_interrogator_photo(img))
7 | img = cv2.imread('./test_imgs/miku.jpg')[:, :, ::-1].copy()
8 | print(default_interrogator_anime(img))
9 |
--------------------------------------------------------------------------------
/extras/BLIP/configs/retrieval_msrvtt.yaml:
--------------------------------------------------------------------------------
1 | video_root: '/export/share/dongxuli/data/msrvtt_retrieval/videos'
2 | ann_root: 'annotation'
3 |
4 | # set pretrained as a file path or an url
5 | pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
6 |
7 | # size of vit model; base or large
8 | vit: 'base'
9 | batch_size: 64
10 | k_test: 128
11 | image_size: 384
12 | num_frm_test: 8
--------------------------------------------------------------------------------
/extras/BLIP/configs/nocaps.yaml:
--------------------------------------------------------------------------------
1 | image_root: '/export/share/datasets/vision/nocaps/'
2 | ann_root: 'annotation'
3 |
4 | # set pretrained as a file path or an url
5 | pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_caption_capfilt_large.pth'
6 |
7 | vit: 'base'
8 | batch_size: 32
9 |
10 | image_size: 384
11 |
12 | max_length: 20
13 | min_length: 5
14 | num_beams: 3
15 | prompt: 'a picture of '
--------------------------------------------------------------------------------
/extras/safety_checker/configs/preprocessor_config.json:
--------------------------------------------------------------------------------
1 | {
2 | "crop_size": 224,
3 | "do_center_crop": true,
4 | "do_convert_rgb": true,
5 | "do_normalize": true,
6 | "do_resize": true,
7 | "feature_extractor_type": "CLIPFeatureExtractor",
8 | "image_mean": [
9 | 0.48145466,
10 | 0.4578275,
11 | 0.40821073
12 | ],
13 | "image_std": [
14 | 0.26862954,
15 | 0.26130258,
16 | 0.27577711
17 | ],
18 | "resample": 3,
19 | "size": 224
20 | }
21 |
--------------------------------------------------------------------------------
/ldm_patched/modules/clip_vision_config_g.json:
--------------------------------------------------------------------------------
1 | {
2 | "attention_dropout": 0.0,
3 | "dropout": 0.0,
4 | "hidden_act": "gelu",
5 | "hidden_size": 1664,
6 | "image_size": 224,
7 | "initializer_factor": 1.0,
8 | "initializer_range": 0.02,
9 | "intermediate_size": 8192,
10 | "layer_norm_eps": 1e-05,
11 | "model_type": "clip_vision_model",
12 | "num_attention_heads": 16,
13 | "num_channels": 3,
14 | "num_hidden_layers": 48,
15 | "patch_size": 14,
16 | "projection_dim": 1280,
17 | "torch_dtype": "float32"
18 | }
19 |
--------------------------------------------------------------------------------
/ldm_patched/modules/clip_vision_config_h.json:
--------------------------------------------------------------------------------
1 | {
2 | "attention_dropout": 0.0,
3 | "dropout": 0.0,
4 | "hidden_act": "gelu",
5 | "hidden_size": 1280,
6 | "image_size": 224,
7 | "initializer_factor": 1.0,
8 | "initializer_range": 0.02,
9 | "intermediate_size": 5120,
10 | "layer_norm_eps": 1e-05,
11 | "model_type": "clip_vision_model",
12 | "num_attention_heads": 16,
13 | "num_channels": 3,
14 | "num_hidden_layers": 32,
15 | "patch_size": 14,
16 | "projection_dim": 1024,
17 | "torch_dtype": "float32"
18 | }
19 |
--------------------------------------------------------------------------------
/ldm_patched/modules/clip_vision_config_vitl.json:
--------------------------------------------------------------------------------
1 | {
2 | "attention_dropout": 0.0,
3 | "dropout": 0.0,
4 | "hidden_act": "quick_gelu",
5 | "hidden_size": 1024,
6 | "image_size": 224,
7 | "initializer_factor": 1.0,
8 | "initializer_range": 0.02,
9 | "intermediate_size": 4096,
10 | "layer_norm_eps": 1e-05,
11 | "model_type": "clip_vision_model",
12 | "num_attention_heads": 16,
13 | "num_channels": 3,
14 | "num_hidden_layers": 24,
15 | "patch_size": 14,
16 | "projection_dim": 768,
17 | "torch_dtype": "float32"
18 | }
19 |
--------------------------------------------------------------------------------
/requirements_versions.txt:
--------------------------------------------------------------------------------
1 | torchsde==0.2.6
2 | einops==0.8.0
3 | transformers==4.42.4
4 | safetensors==0.4.3
5 | accelerate==0.32.1
6 | pyyaml==6.0.1
7 | pillow==10.4.0
8 | scipy==1.14.0
9 | tqdm==4.66.4
10 | psutil==6.0.0
11 | pytorch_lightning==2.3.3
12 | omegaconf==2.3.0
13 | gradio==3.41.2
14 | pygit2==1.15.1
15 | opencv-contrib-python-headless==4.10.0.84
16 | httpx==0.27.0
17 | onnxruntime==1.18.1
18 | timm==1.0.7
19 | numpy==1.26.4
20 | tokenizers==0.19.1
21 | packaging==24.1
22 | rembg==2.0.57
23 | groundingdino-py==0.4.0
24 | segment_anything==1.0
--------------------------------------------------------------------------------
/extras/BLIP/configs/nlvr.yaml:
--------------------------------------------------------------------------------
1 | image_root: '/export/share/datasets/vision/NLVR2/'
2 | ann_root: 'annotation'
3 |
4 | # set pretrained as a file path or an url
5 | pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_nlvr.pth'
6 |
7 | #size of vit model; base or large
8 | vit: 'base'
9 | batch_size_train: 16
10 | batch_size_test: 64
11 | vit_grad_ckpt: False
12 | vit_ckpt_layer: 0
13 | max_epoch: 15
14 |
15 | image_size: 384
16 |
17 | # optimizer
18 | weight_decay: 0.05
19 | init_lr: 3e-5
20 | min_lr: 0
21 |
22 |
--------------------------------------------------------------------------------
/modules/ops.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import contextlib
3 |
4 |
5 | @contextlib.contextmanager
6 | def use_patched_ops(operations):
7 | op_names = ['Linear', 'Conv2d', 'Conv3d', 'GroupNorm', 'LayerNorm']
8 | backups = {op_name: getattr(torch.nn, op_name) for op_name in op_names}
9 |
10 | try:
11 | for op_name in op_names:
12 | setattr(torch.nn, op_name, getattr(operations, op_name))
13 |
14 | yield
15 |
16 | finally:
17 | for op_name in op_names:
18 | setattr(torch.nn, op_name, backups[op_name])
19 | return
20 |
--------------------------------------------------------------------------------
/extras/BLIP/configs/bert_config.json:
--------------------------------------------------------------------------------
1 | {
2 | "architectures": [
3 | "BertModel"
4 | ],
5 | "attention_probs_dropout_prob": 0.1,
6 | "hidden_act": "gelu",
7 | "hidden_dropout_prob": 0.1,
8 | "hidden_size": 768,
9 | "initializer_range": 0.02,
10 | "intermediate_size": 3072,
11 | "layer_norm_eps": 1e-12,
12 | "max_position_embeddings": 512,
13 | "model_type": "bert",
14 | "num_attention_heads": 12,
15 | "num_hidden_layers": 12,
16 | "pad_token_id": 0,
17 | "type_vocab_size": 2,
18 | "vocab_size": 30522,
19 | "encoder_width": 768,
20 | "add_cross_attention": true
21 | }
22 |
--------------------------------------------------------------------------------
/extras/BLIP/configs/med_config.json:
--------------------------------------------------------------------------------
1 | {
2 | "architectures": [
3 | "BertModel"
4 | ],
5 | "attention_probs_dropout_prob": 0.1,
6 | "hidden_act": "gelu",
7 | "hidden_dropout_prob": 0.1,
8 | "hidden_size": 768,
9 | "initializer_range": 0.02,
10 | "intermediate_size": 3072,
11 | "layer_norm_eps": 1e-12,
12 | "max_position_embeddings": 512,
13 | "model_type": "bert",
14 | "num_attention_heads": 12,
15 | "num_hidden_layers": 12,
16 | "pad_token_id": 0,
17 | "type_vocab_size": 2,
18 | "vocab_size": 30524,
19 | "encoder_width": 768,
20 | "add_cross_attention": true
21 | }
22 |
--------------------------------------------------------------------------------
/extras/BLIP/configs/pretrain.yaml:
--------------------------------------------------------------------------------
1 | train_file: ['/export/share/junnan-li/VL_pretrain/annotation/coco_karpathy_train.json',
2 | '/export/share/junnan-li/VL_pretrain/annotation/vg_caption.json',
3 | ]
4 | laion_path: ''
5 |
6 | # size of vit model; base or large
7 | vit: 'base'
8 | vit_grad_ckpt: False
9 | vit_ckpt_layer: 0
10 |
11 | image_size: 224
12 | batch_size: 75
13 |
14 | queue_size: 57600
15 | alpha: 0.4
16 |
17 | # optimizer
18 | weight_decay: 0.05
19 | init_lr: 3e-4
20 | min_lr: 1e-6
21 | warmup_lr: 1e-6
22 | lr_decay_rate: 0.9
23 | max_epoch: 20
24 | warmup_steps: 3000
25 |
26 |
27 |
28 |
--------------------------------------------------------------------------------
/ldm_patched/modules/sd1_tokenizer/special_tokens_map.json:
--------------------------------------------------------------------------------
1 | {
2 | "bos_token": {
3 | "content": "<|startoftext|>",
4 | "lstrip": false,
5 | "normalized": true,
6 | "rstrip": false,
7 | "single_word": false
8 | },
9 | "eos_token": {
10 | "content": "<|endoftext|>",
11 | "lstrip": false,
12 | "normalized": true,
13 | "rstrip": false,
14 | "single_word": false
15 | },
16 | "pad_token": "<|endoftext|>",
17 | "unk_token": {
18 | "content": "<|endoftext|>",
19 | "lstrip": false,
20 | "normalized": true,
21 | "rstrip": false,
22 | "single_word": false
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/ldm_patched/modules/clip_config_bigg.json:
--------------------------------------------------------------------------------
1 | {
2 | "architectures": [
3 | "CLIPTextModel"
4 | ],
5 | "attention_dropout": 0.0,
6 | "bos_token_id": 0,
7 | "dropout": 0.0,
8 | "eos_token_id": 2,
9 | "hidden_act": "gelu",
10 | "hidden_size": 1280,
11 | "initializer_factor": 1.0,
12 | "initializer_range": 0.02,
13 | "intermediate_size": 5120,
14 | "layer_norm_eps": 1e-05,
15 | "max_position_embeddings": 77,
16 | "model_type": "clip_text_model",
17 | "num_attention_heads": 20,
18 | "num_hidden_layers": 32,
19 | "pad_token_id": 1,
20 | "projection_dim": 1280,
21 | "torch_dtype": "float32",
22 | "vocab_size": 49408
23 | }
24 |
--------------------------------------------------------------------------------
/ldm_patched/modules/sd2_clip_config.json:
--------------------------------------------------------------------------------
1 | {
2 | "architectures": [
3 | "CLIPTextModel"
4 | ],
5 | "attention_dropout": 0.0,
6 | "bos_token_id": 0,
7 | "dropout": 0.0,
8 | "eos_token_id": 2,
9 | "hidden_act": "gelu",
10 | "hidden_size": 1024,
11 | "initializer_factor": 1.0,
12 | "initializer_range": 0.02,
13 | "intermediate_size": 4096,
14 | "layer_norm_eps": 1e-05,
15 | "max_position_embeddings": 77,
16 | "model_type": "clip_text_model",
17 | "num_attention_heads": 16,
18 | "num_hidden_layers": 24,
19 | "pad_token_id": 1,
20 | "projection_dim": 1024,
21 | "torch_dtype": "float32",
22 | "vocab_size": 49408
23 | }
24 |
--------------------------------------------------------------------------------
/extras/BLIP/models/bert_tokenizer/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "architectures": [
3 | "BertForMaskedLM"
4 | ],
5 | "attention_probs_dropout_prob": 0.1,
6 | "gradient_checkpointing": false,
7 | "hidden_act": "gelu",
8 | "hidden_dropout_prob": 0.1,
9 | "hidden_size": 768,
10 | "initializer_range": 0.02,
11 | "intermediate_size": 3072,
12 | "layer_norm_eps": 1e-12,
13 | "max_position_embeddings": 512,
14 | "model_type": "bert",
15 | "num_attention_heads": 12,
16 | "num_hidden_layers": 12,
17 | "pad_token_id": 0,
18 | "position_embedding_type": "absolute",
19 | "transformers_version": "4.6.0.dev0",
20 | "type_vocab_size": 2,
21 | "use_cache": true,
22 | "vocab_size": 30522
23 | }
24 |
--------------------------------------------------------------------------------
/ldm_patched/modules/sd1_clip_config.json:
--------------------------------------------------------------------------------
1 | {
2 | "_name_or_path": "openai/clip-vit-large-patch14",
3 | "architectures": [
4 | "CLIPTextModel"
5 | ],
6 | "attention_dropout": 0.0,
7 | "bos_token_id": 0,
8 | "dropout": 0.0,
9 | "eos_token_id": 2,
10 | "hidden_act": "quick_gelu",
11 | "hidden_size": 768,
12 | "initializer_factor": 1.0,
13 | "initializer_range": 0.02,
14 | "intermediate_size": 3072,
15 | "layer_norm_eps": 1e-05,
16 | "max_position_embeddings": 77,
17 | "model_type": "clip_text_model",
18 | "num_attention_heads": 12,
19 | "num_hidden_layers": 12,
20 | "pad_token_id": 1,
21 | "projection_dim": 768,
22 | "torch_dtype": "float32",
23 | "transformers_version": "4.24.0",
24 | "vocab_size": 49408
25 | }
26 |
--------------------------------------------------------------------------------
/experiments_mask_generation.py:
--------------------------------------------------------------------------------
1 | # https://github.com/sail-sg/EditAnything/blob/main/sam2groundingdino_edit.py
2 |
3 | import numpy as np
4 | from PIL import Image
5 |
6 | from extras.inpaint_mask import SAMOptions, generate_mask_from_image
7 |
8 | original_image = Image.open('cat.webp')
9 | image = np.array(original_image, dtype=np.uint8)
10 |
11 | sam_options = SAMOptions(
12 | dino_prompt='eye',
13 | dino_box_threshold=0.3,
14 | dino_text_threshold=0.25,
15 | dino_erode_or_dilate=0,
16 | dino_debug=False,
17 | max_detections=2,
18 | model_type='vit_b'
19 | )
20 |
21 | mask_image, _, _, _ = generate_mask_from_image(image, sam_options=sam_options)
22 |
23 | merged_masks_img = Image.fromarray(mask_image)
24 | merged_masks_img.show()
25 |
--------------------------------------------------------------------------------
/extras/BLIP/configs/vqa.yaml:
--------------------------------------------------------------------------------
1 | vqa_root: '/export/share/datasets/vision/VQA/Images/mscoco/' #followed by train2014/
2 | vg_root: '/export/share/datasets/vision/visual-genome/' #followed by image/
3 | train_files: ['vqa_train','vqa_val','vg_qa']
4 | ann_root: 'annotation'
5 |
6 | # set pretrained as a file path or an url
7 | pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
8 |
9 | # size of vit model; base or large
10 | vit: 'base'
11 | batch_size_train: 16
12 | batch_size_test: 32
13 | vit_grad_ckpt: False
14 | vit_ckpt_layer: 0
15 | init_lr: 2e-5
16 |
17 | image_size: 480
18 |
19 | k_test: 128
20 | inference: 'rank'
21 |
22 | # optimizer
23 | weight_decay: 0.05
24 | min_lr: 0
25 | max_epoch: 10
--------------------------------------------------------------------------------
/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | ORIGINALDIR=/content/app
4 | # Use predefined DATADIR if it is defined
5 | [[ x"${DATADIR}" == "x" ]] && DATADIR=/content/data
6 |
7 | # Make persistent dir from original dir
8 | function mklink () {
9 | mkdir -p $DATADIR/$1
10 | ln -s $DATADIR/$1 $ORIGINALDIR
11 | }
12 |
13 | # Copy old files from import dir
14 | function import () {
15 | (test -d /import/$1 && cd /import/$1 && cp -Rpn . $DATADIR/$1/)
16 | }
17 |
18 | cd $ORIGINALDIR
19 |
20 | # models
21 | mklink models
22 | # Copy original files
23 | (cd $ORIGINALDIR/models.org && cp -Rpn . $ORIGINALDIR/models/)
24 | # Import old files
25 | import models
26 |
27 | # outputs
28 | mklink outputs
29 | # Import old files
30 | import outputs
31 |
32 | # Start application
33 | python launch.py $*
34 |
--------------------------------------------------------------------------------
/fooocus_colab.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {
7 | "id": "VjYy0F2gZIPR"
8 | },
9 | "outputs": [],
10 | "source": [
11 | "!pip install pygit2==1.15.1\n",
12 | "%cd /content\n",
13 | "!git clone https://github.com/lllyasviel/Fooocus.git\n",
14 | "%cd /content/Fooocus\n",
15 | "!python entry_with_update.py --share --always-high-vram\n"
16 | ]
17 | }
18 | ],
19 | "metadata": {
20 | "accelerator": "GPU",
21 | "colab": {
22 | "gpuType": "T4",
23 | "provenance": []
24 | },
25 | "kernelspec": {
26 | "display_name": "Python 3",
27 | "name": "python3"
28 | },
29 | "language_info": {
30 | "name": "python"
31 | }
32 | },
33 | "nbformat": 4,
34 | "nbformat_minor": 0
35 | }
36 |
--------------------------------------------------------------------------------
/extras/BLIP/configs/caption_coco.yaml:
--------------------------------------------------------------------------------
1 | image_root: '/export/share/datasets/vision/coco/images/'
2 | ann_root: 'annotation'
3 | coco_gt_root: 'annotation/coco_gt'
4 |
5 | # set pretrained as a file path or an url
6 | pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_caption_capfilt_large.pth'
7 |
8 | # size of vit model; base or large
9 | vit: 'base'
10 | vit_grad_ckpt: False
11 | vit_ckpt_layer: 0
12 | batch_size: 32
13 | init_lr: 1e-5
14 |
15 | # vit: 'large'
16 | # vit_grad_ckpt: True
17 | # vit_ckpt_layer: 5
18 | # batch_size: 16
19 | # init_lr: 2e-6
20 |
21 | image_size: 384
22 |
23 | # generation configs
24 | max_length: 20
25 | min_length: 5
26 | num_beams: 3
27 | prompt: 'a picture of '
28 |
29 | # optimizer
30 | weight_decay: 0.05
31 | min_lr: 0
32 | max_epoch: 5
33 |
34 |
--------------------------------------------------------------------------------
/extras/BLIP/configs/retrieval_coco.yaml:
--------------------------------------------------------------------------------
1 | image_root: '/export/share/datasets/vision/coco/images/'
2 | ann_root: 'annotation'
3 | dataset: 'coco'
4 |
5 | # set pretrained as a file path or an url
6 | pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
7 |
8 | # size of vit model; base or large
9 |
10 | vit: 'base'
11 | batch_size_train: 32
12 | batch_size_test: 64
13 | vit_grad_ckpt: True
14 | vit_ckpt_layer: 4
15 | init_lr: 1e-5
16 |
17 | # vit: 'large'
18 | # batch_size_train: 16
19 | # batch_size_test: 32
20 | # vit_grad_ckpt: True
21 | # vit_ckpt_layer: 12
22 | # init_lr: 5e-6
23 |
24 | image_size: 384
25 | queue_size: 57600
26 | alpha: 0.4
27 | k_test: 256
28 | negative_all_rank: True
29 |
30 | # optimizer
31 | weight_decay: 0.05
32 | min_lr: 0
33 | max_epoch: 6
34 |
35 |
--------------------------------------------------------------------------------
/extras/BLIP/configs/retrieval_flickr.yaml:
--------------------------------------------------------------------------------
1 | image_root: '/export/share/datasets/vision/flickr30k/'
2 | ann_root: 'annotation'
3 | dataset: 'flickr'
4 |
5 | # set pretrained as a file path or an url
6 | pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_flickr.pth'
7 |
8 | # size of vit model; base or large
9 |
10 | vit: 'base'
11 | batch_size_train: 32
12 | batch_size_test: 64
13 | vit_grad_ckpt: True
14 | vit_ckpt_layer: 4
15 | init_lr: 1e-5
16 |
17 | # vit: 'large'
18 | # batch_size_train: 16
19 | # batch_size_test: 32
20 | # vit_grad_ckpt: True
21 | # vit_ckpt_layer: 10
22 | # init_lr: 5e-6
23 |
24 | image_size: 384
25 | queue_size: 57600
26 | alpha: 0.4
27 | k_test: 128
28 | negative_all_rank: False
29 |
30 | # optimizer
31 | weight_decay: 0.05
32 | min_lr: 0
33 | max_epoch: 6
34 |
35 |
--------------------------------------------------------------------------------
/ldm_patched/pfn/architecture/timm/helpers.py:
--------------------------------------------------------------------------------
1 | """ Layer/Module Helpers
2 | Hacked together by / Copyright 2020 Ross Wightman
3 | """
4 | import collections.abc
5 | from itertools import repeat
6 |
7 |
8 | # From PyTorch internals
9 | def _ntuple(n):
10 | def parse(x):
11 | if isinstance(x, collections.abc.Iterable) and not isinstance(x, str):
12 | return x
13 | return tuple(repeat(x, n))
14 |
15 | return parse
16 |
17 |
18 | to_1tuple = _ntuple(1)
19 | to_2tuple = _ntuple(2)
20 | to_3tuple = _ntuple(3)
21 | to_4tuple = _ntuple(4)
22 | to_ntuple = _ntuple
23 |
24 |
25 | def make_divisible(v, divisor=8, min_value=None, round_limit=0.9):
26 | min_value = min_value or divisor
27 | new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
28 | # Make sure that round down does not go down by more than 10%.
29 | if new_v < round_limit * v:
30 | new_v += divisor
31 | return new_v
32 |
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
1 | __pycache__
2 | *.ckpt
3 | *.safetensors
4 | *.pth
5 | *.pt
6 | *.bin
7 | *.patch
8 | *.backup
9 | *.corrupted
10 | *.partial
11 | *.onnx
12 | sorted_styles.json
13 | /input
14 | /cache
15 | /language/default.json
16 | /test_imgs
17 | config.txt
18 | config_modification_tutorial.txt
19 | user_path_config.txt
20 | user_path_config-deprecated.txt
21 | /modules/*.png
22 | /repositories
23 | /fooocus_env
24 | /venv
25 | /tmp
26 | /ui-config.json
27 | /outputs
28 | /config.json
29 | /log
30 | /webui.settings.bat
31 | /embeddings
32 | /styles.csv
33 | /params.txt
34 | /styles.csv.bak
35 | /webui-user.bat
36 | /webui-user.sh
37 | /interrogate
38 | /user.css
39 | /.idea
40 | /notification.ogg
41 | /notification.mp3
42 | /SwinIR
43 | /textual_inversion
44 | .vscode
45 | /extensions
46 | /test/stdout.txt
47 | /test/stderr.txt
48 | /cache.json*
49 | /config_states/
50 | /node_modules
51 | /package-lock.json
52 | /.coverage*
53 | /auth.json
54 | .DS_Store
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__
2 | *.ckpt
3 | *.safetensors
4 | *.pth
5 | *.pt
6 | *.bin
7 | *.patch
8 | *.backup
9 | *.corrupted
10 | *.partial
11 | *.onnx
12 | sorted_styles.json
13 | hash_cache.txt
14 | /input
15 | /cache
16 | /language/default.json
17 | /test_imgs
18 | config.txt
19 | config_modification_tutorial.txt
20 | user_path_config.txt
21 | user_path_config-deprecated.txt
22 | /modules/*.png
23 | /repositories
24 | /fooocus_env
25 | /venv
26 | /tmp
27 | /ui-config.json
28 | /outputs
29 | /config.json
30 | /log
31 | /webui.settings.bat
32 | /embeddings
33 | /styles.csv
34 | /params.txt
35 | /styles.csv.bak
36 | /webui-user.bat
37 | /webui-user.sh
38 | /interrogate
39 | /user.css
40 | /.idea
41 | /notification.ogg
42 | /notification.mp3
43 | /SwinIR
44 | /textual_inversion
45 | .vscode
46 | /extensions
47 | /test/stdout.txt
48 | /test/stderr.txt
49 | /cache.json*
50 | /config_states/
51 | /node_modules
52 | /package-lock.json
53 | /.coverage*
54 | /auth.json
55 | .DS_Store
56 |
--------------------------------------------------------------------------------
/ldm_patched/modules/sd1_tokenizer/tokenizer_config.json:
--------------------------------------------------------------------------------
1 | {
2 | "add_prefix_space": false,
3 | "bos_token": {
4 | "__type": "AddedToken",
5 | "content": "<|startoftext|>",
6 | "lstrip": false,
7 | "normalized": true,
8 | "rstrip": false,
9 | "single_word": false
10 | },
11 | "do_lower_case": true,
12 | "eos_token": {
13 | "__type": "AddedToken",
14 | "content": "<|endoftext|>",
15 | "lstrip": false,
16 | "normalized": true,
17 | "rstrip": false,
18 | "single_word": false
19 | },
20 | "errors": "replace",
21 | "model_max_length": 77,
22 | "name_or_path": "openai/clip-vit-large-patch14",
23 | "pad_token": "<|endoftext|>",
24 | "special_tokens_map_file": "./special_tokens_map.json",
25 | "tokenizer_class": "CLIPTokenizer",
26 | "unk_token": {
27 | "__type": "AddedToken",
28 | "content": "<|endoftext|>",
29 | "lstrip": false,
30 | "normalized": true,
31 | "rstrip": false,
32 | "single_word": false
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/ldm_patched/pfn/architecture/OmniSR/pixelshuffle.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | #############################################################
4 | # File: pixelshuffle.py
5 | # Created Date: Friday July 1st 2022
6 | # Author: Chen Xuanhong
7 | # Email: chenxuanhongzju@outlook.com
8 | # Last Modified: Friday, 1st July 2022 10:18:39 am
9 | # Modified By: Chen Xuanhong
10 | # Copyright (c) 2022 Shanghai Jiao Tong University
11 | #############################################################
12 |
13 | import torch.nn as nn
14 |
15 |
16 | def pixelshuffle_block(
17 | in_channels, out_channels, upscale_factor=2, kernel_size=3, bias=False
18 | ):
19 | """
20 | Upsample features according to `upscale_factor`.
21 | """
22 | padding = kernel_size // 2
23 | conv = nn.Conv2d(
24 | in_channels,
25 | out_channels * (upscale_factor**2),
26 | kernel_size,
27 | padding=1,
28 | bias=bias,
29 | )
30 | pixel_shuffle = nn.PixelShuffle(upscale_factor)
31 | return nn.Sequential(*[conv, pixel_shuffle])
32 |
--------------------------------------------------------------------------------
/modules/upscaler.py:
--------------------------------------------------------------------------------
1 | from collections import OrderedDict
2 |
3 | import modules.core as core
4 | import torch
5 | from ldm_patched.contrib.external_upscale_model import ImageUpscaleWithModel
6 | from ldm_patched.pfn.architecture.RRDB import RRDBNet as ESRGAN
7 | from modules.config import downloading_upscale_model
8 |
9 | opImageUpscaleWithModel = ImageUpscaleWithModel()
10 | model = None
11 |
12 |
13 | def perform_upscale(img):
14 | global model
15 |
16 | print(f'Upscaling image with shape {str(img.shape)} ...')
17 |
18 | if model is None:
19 | model_filename = downloading_upscale_model()
20 | sd = torch.load(model_filename, weights_only=True)
21 | sdo = OrderedDict()
22 | for k, v in sd.items():
23 | sdo[k.replace('residual_block_', 'RDB')] = v
24 | del sd
25 | model = ESRGAN(sdo)
26 | model.cpu()
27 | model.eval()
28 |
29 | img = core.numpy_to_pytorch(img)
30 | img = opImageUpscaleWithModel.upscale(model, img)[0]
31 | img = core.pytorch_to_numpy(img)[0]
32 |
33 | return img
34 |
--------------------------------------------------------------------------------
/modules/model_loader.py:
--------------------------------------------------------------------------------
1 | import os
2 | from urllib.parse import urlparse
3 | from typing import Optional
4 |
5 |
6 | def load_file_from_url(
7 | url: str,
8 | *,
9 | model_dir: str,
10 | progress: bool = True,
11 | file_name: Optional[str] = None,
12 | ) -> str:
13 | """Download a file from `url` into `model_dir`, using the file present if possible.
14 |
15 | Returns the path to the downloaded file.
16 | """
17 | domain = os.environ.get("HF_MIRROR", "https://huggingface.co").rstrip('/')
18 | url = str.replace(url, "https://huggingface.co", domain, 1)
19 | os.makedirs(model_dir, exist_ok=True)
20 | if not file_name:
21 | parts = urlparse(url)
22 | file_name = os.path.basename(parts.path)
23 | cached_file = os.path.abspath(os.path.join(model_dir, file_name))
24 | if not os.path.exists(cached_file):
25 | print(f'Downloading: "{url}" to {cached_file}\n')
26 | from torch.hub import download_url_to_file
27 | download_url_to_file(url, cached_file, progress=progress)
28 | return cached_file
29 |
--------------------------------------------------------------------------------
/ldm_patched/licenses-3rd/kdiffusion:
--------------------------------------------------------------------------------
1 | Copyright (c) 2022 Katherine Crowson
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy
4 | of this software and associated documentation files (the "Software"), to deal
5 | in the Software without restriction, including without limitation the rights
6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 | copies of the Software, and to permit persons to whom the Software is
8 | furnished to do so, subject to the following conditions:
9 |
10 | The above copyright notice and this permission notice shall be included in
11 | all copies or substantial portions of the Software.
12 |
13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19 | THE SOFTWARE.
--------------------------------------------------------------------------------
/models/prompt_expansion/fooocus_expansion/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "_name_or_path": "gpt2",
3 | "activation_function": "gelu_new",
4 | "architectures": [
5 | "GPT2LMHeadModel"
6 | ],
7 | "attn_pdrop": 0.1,
8 | "bos_token_id": 50256,
9 | "embd_pdrop": 0.1,
10 | "eos_token_id": 50256,
11 | "pad_token_id": 50256,
12 | "initializer_range": 0.02,
13 | "layer_norm_epsilon": 1e-05,
14 | "model_type": "gpt2",
15 | "n_ctx": 1024,
16 | "n_embd": 768,
17 | "n_head": 12,
18 | "n_inner": null,
19 | "n_layer": 12,
20 | "n_positions": 1024,
21 | "reorder_and_upcast_attn": false,
22 | "resid_pdrop": 0.1,
23 | "scale_attn_by_inverse_layer_idx": false,
24 | "scale_attn_weights": true,
25 | "summary_activation": null,
26 | "summary_first_dropout": 0.1,
27 | "summary_proj_to_labels": true,
28 | "summary_type": "cls_index",
29 | "summary_use_proj": true,
30 | "task_specific_params": {
31 | "text-generation": {
32 | "do_sample": true,
33 | "max_length": 50
34 | }
35 | },
36 | "torch_dtype": "float32",
37 | "transformers_version": "4.23.0.dev0",
38 | "use_cache": true,
39 | "vocab_size": 50257
40 | }
41 |
--------------------------------------------------------------------------------
/extras/facexlib/parsing/__init__.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from extras.facexlib.utils import load_file_from_url
4 | from .bisenet import BiSeNet
5 | from .parsenet import ParseNet
6 |
7 |
8 | def init_parsing_model(model_name='bisenet', half=False, device='cuda', model_rootpath=None):
9 | if model_name == 'bisenet':
10 | model = BiSeNet(num_class=19)
11 | model_url = 'https://github.com/xinntao/facexlib/releases/download/v0.2.0/parsing_bisenet.pth'
12 | elif model_name == 'parsenet':
13 | model = ParseNet(in_size=512, out_size=512, parsing_ch=19)
14 | model_url = 'https://github.com/xinntao/facexlib/releases/download/v0.2.2/parsing_parsenet.pth'
15 | else:
16 | raise NotImplementedError(f'{model_name} is not implemented.')
17 |
18 | model_path = load_file_from_url(
19 | url=model_url, model_dir='facexlib/weights', progress=True, file_name=None, save_dir=model_rootpath)
20 | load_net = torch.load(model_path, map_location=lambda storage, loc: storage, weights_only=True)
21 | model.load_state_dict(load_net, strict=True)
22 | model.eval()
23 | model = model.to(device)
24 | return model
25 |
--------------------------------------------------------------------------------
/ldm_patched/licenses-3rd/taesd:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Ollin Boer Bohan
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/ldm_patched/pfn/architecture/LICENSE-HAT:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2022 Xiangyu Chen
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/ldm_patched/licenses-3rd/ldm:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2022 Machine Vision and Learning Group, LMU Munich
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/ldm_patched/licenses-3rd/chainer:
--------------------------------------------------------------------------------
1 | Copyright (c) 2015 Preferred Infrastructure, Inc.
2 | Copyright (c) 2015 Preferred Networks, Inc.
3 |
4 | Permission is hereby granted, free of charge, to any person obtaining a copy
5 | of this software and associated documentation files (the "Software"), to deal
6 | in the Software without restriction, including without limitation the rights
7 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | copies of the Software, and to permit persons to whom the Software is
9 | furnished to do so, subject to the following conditions:
10 |
11 | The above copyright notice and this permission notice shall be included in
12 | all copies or substantial portions of the Software.
13 |
14 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 | THE SOFTWARE.
--------------------------------------------------------------------------------
/extras/GroundingDINO/config/GroundingDINO_SwinT_OGC.py:
--------------------------------------------------------------------------------
1 | batch_size = 1
2 | modelname = "groundingdino"
3 | backbone = "swin_T_224_1k"
4 | position_embedding = "sine"
5 | pe_temperatureH = 20
6 | pe_temperatureW = 20
7 | return_interm_indices = [1, 2, 3]
8 | backbone_freeze_keywords = None
9 | enc_layers = 6
10 | dec_layers = 6
11 | pre_norm = False
12 | dim_feedforward = 2048
13 | hidden_dim = 256
14 | dropout = 0.0
15 | nheads = 8
16 | num_queries = 900
17 | query_dim = 4
18 | num_patterns = 0
19 | num_feature_levels = 4
20 | enc_n_points = 4
21 | dec_n_points = 4
22 | two_stage_type = "standard"
23 | two_stage_bbox_embed_share = False
24 | two_stage_class_embed_share = False
25 | transformer_activation = "relu"
26 | dec_pred_bbox_embed_share = True
27 | dn_box_noise_scale = 1.0
28 | dn_label_noise_ratio = 0.5
29 | dn_label_coef = 1.0
30 | dn_bbox_coef = 1.0
31 | embed_init_tgt = True
32 | dn_labelbook_size = 2000
33 | max_text_len = 256
34 | text_encoder_type = "bert-base-uncased"
35 | use_text_enhancer = True
36 | use_fusion_layer = True
37 | use_checkpoint = True
38 | use_transformer_ckpt = True
39 | use_text_cross_attention = True
40 | text_dropout = 0.0
41 | fusion_dropout = 0.0
42 | fusion_droppath = 0.1
43 | sub_sentence_present = True
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM nvidia/cuda:12.4.1-base-ubuntu22.04
2 | ENV DEBIAN_FRONTEND noninteractive
3 | ENV CMDARGS --listen
4 |
5 | RUN apt-get update -y && \
6 | apt-get install -y curl libgl1 libglib2.0-0 python3-pip python-is-python3 git && \
7 | apt-get clean && \
8 | rm -rf /var/lib/apt/lists/*
9 |
10 | COPY requirements_docker.txt requirements_versions.txt /tmp/
11 | RUN pip install --no-cache-dir -r /tmp/requirements_docker.txt -r /tmp/requirements_versions.txt && \
12 | rm -f /tmp/requirements_docker.txt /tmp/requirements_versions.txt
13 | RUN pip install --no-cache-dir xformers==0.0.23 --no-dependencies
14 | RUN curl -fsL -o /usr/local/lib/python3.10/dist-packages/gradio/frpc_linux_amd64_v0.2 https://cdn-media.huggingface.co/frpc-gradio-0.2/frpc_linux_amd64 && \
15 | chmod +x /usr/local/lib/python3.10/dist-packages/gradio/frpc_linux_amd64_v0.2
16 |
17 | RUN adduser --disabled-password --gecos '' user && \
18 | mkdir -p /content/app /content/data
19 |
20 | COPY entrypoint.sh /content/
21 | RUN chown -R user:user /content
22 |
23 | WORKDIR /content
24 | USER user
25 |
26 | COPY --chown=user:user . /content/app
27 | RUN mv /content/app/models /content/app/models.org
28 |
29 | CMD [ "sh", "-c", "/content/entrypoint.sh ${CMDARGS}" ]
30 |
--------------------------------------------------------------------------------
/wildcards/animal.txt:
--------------------------------------------------------------------------------
1 | Alligator
2 | Ant
3 | Antelope
4 | Armadillo
5 | Badger
6 | Bat
7 | Bear
8 | Beaver
9 | Bison
10 | Boar
11 | Bobcat
12 | Bull
13 | Camel
14 | Chameleon
15 | Cheetah
16 | Chicken
17 | Chihuahua
18 | Chimpanzee
19 | Chinchilla
20 | Chipmunk
21 | Komodo Dragon
22 | Cow
23 | Coyote
24 | Crocodile
25 | Crow
26 | Deer
27 | Dinosaur
28 | Dolphin
29 | Donkey
30 | Duck
31 | Eagle
32 | Eel
33 | Elephant
34 | Elk
35 | Emu
36 | Falcon
37 | Ferret
38 | Flamingo
39 | Flying Squirrel
40 | Giraffe
41 | Goose
42 | Guinea pig
43 | Hawk
44 | Hedgehog
45 | Hippopotamus
46 | Horse
47 | Hummingbird
48 | Hyena
49 | Jackal
50 | Jaguar
51 | Jellyfish
52 | Kangaroo
53 | King Cobra
54 | Koala bear
55 | Leopard
56 | Lion
57 | Lizard
58 | Magpie
59 | Marten
60 | Meerkat
61 | Mole
62 | Monkey
63 | Moose
64 | Mouse
65 | Octopus
66 | Okapi
67 | Orangutan
68 | Ostrich
69 | Otter
70 | Owl
71 | Panda
72 | Pangolin
73 | Panther
74 | Penguin
75 | Pig
76 | Porcupine
77 | Possum
78 | Puma
79 | Quokka
80 | Rabbit
81 | Raccoon
82 | Raven
83 | Reindeer
84 | Rhinoceros
85 | Seal
86 | Shark
87 | Sheep
88 | Snail
89 | Snake
90 | Sparrow
91 | Spider
92 | Squirrel
93 | Swallow
94 | Tiger
95 | Walrus
96 | Whale
97 | Wolf
98 | Wombat
99 | Yak
100 | Zebra
101 |
--------------------------------------------------------------------------------
/ldm_patched/modules/sd2_clip.py:
--------------------------------------------------------------------------------
1 | from ldm_patched.modules import sd1_clip
2 | import torch
3 | import os
4 |
5 | class SD2ClipHModel(sd1_clip.SDClipModel):
6 | def __init__(self, arch="ViT-H-14", device="cpu", max_length=77, freeze=True, layer="penultimate", layer_idx=None, dtype=None):
7 | if layer == "penultimate":
8 | layer="hidden"
9 | layer_idx=-2
10 |
11 | textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd2_clip_config.json")
12 | super().__init__(device=device, freeze=freeze, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, dtype=dtype, special_tokens={"start": 49406, "end": 49407, "pad": 0})
13 |
14 | class SD2ClipHTokenizer(sd1_clip.SDTokenizer):
15 | def __init__(self, tokenizer_path=None, embedding_directory=None):
16 | super().__init__(tokenizer_path, pad_with_end=False, embedding_directory=embedding_directory, embedding_size=1024)
17 |
18 | class SD2Tokenizer(sd1_clip.SD1Tokenizer):
19 | def __init__(self, embedding_directory=None):
20 | super().__init__(embedding_directory=embedding_directory, clip_name="h", tokenizer=SD2ClipHTokenizer)
21 |
22 | class SD2ClipModel(sd1_clip.SD1ClipModel):
23 | def __init__(self, device="cpu", dtype=None, **kwargs):
24 | super().__init__(device=device, dtype=dtype, clip_name="h", clip_model=SD2ClipHModel, **kwargs)
25 |
--------------------------------------------------------------------------------
/.github/workflows/build_container.yml:
--------------------------------------------------------------------------------
1 | name: Docker image build
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | tags:
8 | - v*
9 |
10 | jobs:
11 | build-and-push-image:
12 | runs-on: ubuntu-latest
13 |
14 | permissions:
15 | contents: read
16 | packages: write
17 |
18 | steps:
19 | - name: Checkout repository
20 | uses: actions/checkout@v4
21 |
22 | - name: Log in to the Container registry
23 | uses: docker/login-action@v3
24 | with:
25 | registry: ghcr.io
26 | username: ${{ github.repository_owner }}
27 | password: ${{ secrets.GITHUB_TOKEN }}
28 |
29 | - name: Extract metadata (tags, labels) for Docker
30 | id: meta
31 | uses: docker/metadata-action@v5
32 | with:
33 | images: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
34 | tags: |
35 | type=semver,pattern={{version}}
36 | type=semver,pattern={{major}}.{{minor}}
37 | type=semver,pattern={{major}}
38 | type=edge,branch=main
39 |
40 | - name: Build and push Docker image
41 | uses: docker/build-push-action@v6
42 | with:
43 | context: .
44 | file: ./Dockerfile
45 | push: true
46 | tags: ${{ steps.meta.outputs.tags }}
47 | labels: ${{ steps.meta.outputs.labels }}
--------------------------------------------------------------------------------
/modules/auth.py:
--------------------------------------------------------------------------------
1 | import json
2 | import hashlib
3 | import modules.constants as constants
4 |
5 | from os.path import exists
6 |
7 |
8 | def auth_list_to_dict(auth_list):
9 | auth_dict = {}
10 | for auth_data in auth_list:
11 | if 'user' in auth_data:
12 | if 'hash' in auth_data:
13 | auth_dict |= {auth_data['user']: auth_data['hash']}
14 | elif 'pass' in auth_data:
15 | auth_dict |= {auth_data['user']: hashlib.sha256(bytes(auth_data['pass'], encoding='utf-8')).hexdigest()}
16 | return auth_dict
17 |
18 |
19 | def load_auth_data(filename=None):
20 | auth_dict = None
21 | if filename != None and exists(filename):
22 | with open(filename, encoding='utf-8') as auth_file:
23 | try:
24 | auth_obj = json.load(auth_file)
25 | if isinstance(auth_obj, list) and len(auth_obj) > 0:
26 | auth_dict = auth_list_to_dict(auth_obj)
27 | except Exception as e:
28 | print('load_auth_data, e: ' + str(e))
29 | return auth_dict
30 |
31 |
32 | auth_dict = load_auth_data(constants.AUTH_FILENAME)
33 |
34 | auth_enabled = auth_dict != None
35 |
36 |
37 | def check_auth(user, password):
38 | if user not in auth_dict:
39 | return False
40 | else:
41 | return hashlib.sha256(bytes(password, encoding='utf-8')).hexdigest() == auth_dict[user]
42 |
--------------------------------------------------------------------------------
/extras/facexlib/detection/__init__.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from copy import deepcopy
3 |
4 | from extras.facexlib.utils import load_file_from_url
5 | from .retinaface import RetinaFace
6 |
7 |
8 | def init_detection_model(model_name, half=False, device='cuda', model_rootpath=None):
9 | if model_name == 'retinaface_resnet50':
10 | model = RetinaFace(network_name='resnet50', half=half, device=device)
11 | model_url = 'https://github.com/xinntao/facexlib/releases/download/v0.1.0/detection_Resnet50_Final.pth'
12 | elif model_name == 'retinaface_mobile0.25':
13 | model = RetinaFace(network_name='mobile0.25', half=half, device=device)
14 | model_url = 'https://github.com/xinntao/facexlib/releases/download/v0.1.0/detection_mobilenet0.25_Final.pth'
15 | else:
16 | raise NotImplementedError(f'{model_name} is not implemented.')
17 |
18 | model_path = load_file_from_url(
19 | url=model_url, model_dir='facexlib/weights', progress=True, file_name=None, save_dir=model_rootpath)
20 |
21 | # TODO: clean pretrained model
22 | load_net = torch.load(model_path, map_location=lambda storage, loc: storage, weights_only=True)
23 | # remove unnecessary 'module.'
24 | for k, v in deepcopy(load_net).items():
25 | if k.startswith('module.'):
26 | load_net[k[7:]] = v
27 | load_net.pop(k)
28 | model.load_state_dict(load_net, strict=True)
29 | model.eval()
30 | model = model.to(device)
31 | return model
32 |
--------------------------------------------------------------------------------
/presets/playground_v2.5.json:
--------------------------------------------------------------------------------
1 | {
2 | "default_model": "playground-v2.5-1024px-aesthetic.fp16.safetensors",
3 | "default_refiner": "None",
4 | "default_refiner_switch": 0.5,
5 | "default_loras": [
6 | [
7 | true,
8 | "None",
9 | 1.0
10 | ],
11 | [
12 | true,
13 | "None",
14 | 1.0
15 | ],
16 | [
17 | true,
18 | "None",
19 | 1.0
20 | ],
21 | [
22 | true,
23 | "None",
24 | 1.0
25 | ],
26 | [
27 | true,
28 | "None",
29 | 1.0
30 | ]
31 | ],
32 | "default_cfg_scale": 2.0,
33 | "default_sample_sharpness": 2.0,
34 | "default_sampler": "dpmpp_2m",
35 | "default_scheduler": "edm_playground_v2.5",
36 | "default_performance": "Speed",
37 | "default_prompt": "",
38 | "default_prompt_negative": "",
39 | "default_styles": [
40 | "Fooocus V2"
41 | ],
42 | "default_aspect_ratio": "1024*1024",
43 | "default_overwrite_step": -1,
44 | "default_inpaint_engine_version": "None",
45 | "checkpoint_downloads": {
46 | "playground-v2.5-1024px-aesthetic.fp16.safetensors": "https://huggingface.co/mashb1t/fav_models/resolve/main/fav/playground-v2.5-1024px-aesthetic.fp16.safetensors"
47 | },
48 | "embeddings_downloads": {},
49 | "lora_downloads": {},
50 | "previous_default_models": []
51 | }
--------------------------------------------------------------------------------
/entry_with_update.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 |
4 |
5 | root = os.path.dirname(os.path.abspath(__file__))
6 | sys.path.append(root)
7 | os.chdir(root)
8 |
9 |
10 | try:
11 | import pygit2
12 | pygit2.option(pygit2.GIT_OPT_SET_OWNER_VALIDATION, 0)
13 |
14 | repo = pygit2.Repository(os.path.abspath(os.path.dirname(__file__)))
15 |
16 | branch_name = repo.head.shorthand
17 |
18 | remote_name = 'origin'
19 | remote = repo.remotes[remote_name]
20 |
21 | remote.fetch()
22 |
23 | local_branch_ref = f'refs/heads/{branch_name}'
24 | local_branch = repo.lookup_reference(local_branch_ref)
25 |
26 | remote_reference = f'refs/remotes/{remote_name}/{branch_name}'
27 | remote_commit = repo.revparse_single(remote_reference)
28 |
29 | merge_result, _ = repo.merge_analysis(remote_commit.id)
30 |
31 | if merge_result & pygit2.GIT_MERGE_ANALYSIS_UP_TO_DATE:
32 | print("Already up-to-date")
33 | elif merge_result & pygit2.GIT_MERGE_ANALYSIS_FASTFORWARD:
34 | local_branch.set_target(remote_commit.id)
35 | repo.head.set_target(remote_commit.id)
36 | repo.checkout_tree(repo.get(remote_commit.id))
37 | repo.reset(local_branch.target, pygit2.GIT_RESET_HARD)
38 | print("Fast-forward merge")
39 | elif merge_result & pygit2.GIT_MERGE_ANALYSIS_NORMAL:
40 | print("Update failed - Did you modify any file?")
41 | except Exception as e:
42 | print('Update failed.')
43 | print(str(e))
44 |
45 | print('Update succeeded.')
46 | from launch import *
47 |
--------------------------------------------------------------------------------
/modules/extra_utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | from ast import literal_eval
3 |
4 |
5 | def makedirs_with_log(path):
6 | try:
7 | os.makedirs(path, exist_ok=True)
8 | except OSError as error:
9 | print(f'Directory {path} could not be created, reason: {error}')
10 |
11 |
12 | def get_files_from_folder(folder_path, extensions=None, name_filter=None):
13 | if not os.path.isdir(folder_path):
14 | raise ValueError("Folder path is not a valid directory.")
15 |
16 | filenames = []
17 |
18 | for root, _, files in os.walk(folder_path, topdown=False):
19 | relative_path = os.path.relpath(root, folder_path)
20 | if relative_path == ".":
21 | relative_path = ""
22 | for filename in sorted(files, key=lambda s: s.casefold()):
23 | _, file_extension = os.path.splitext(filename)
24 | if (extensions is None or file_extension.lower() in extensions) and (name_filter is None or name_filter in _):
25 | path = os.path.join(relative_path, filename)
26 | filenames.append(path)
27 |
28 | return filenames
29 |
30 |
31 | def try_eval_env_var(value: str, expected_type=None):
32 | try:
33 | value_eval = value
34 | if expected_type is bool:
35 | value_eval = value.title()
36 | value_eval = literal_eval(value_eval)
37 | if expected_type is not None and not isinstance(value_eval, expected_type):
38 | return value
39 | return value_eval
40 | except:
41 | return value
42 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.yml:
--------------------------------------------------------------------------------
1 | name: Feature request
2 | description: Suggest an idea for this project
3 | title: "[Feature Request]: "
4 | labels: ["enhancement", "triage"]
5 |
6 | body:
7 | - type: checkboxes
8 | attributes:
9 | label: Is there an existing issue for this?
10 | description: Please search to see if an issue already exists for the feature you want, and that it's not implemented in a recent build/commit.
11 | options:
12 | - label: I have searched the existing issues and checked the recent builds/commits
13 | required: true
14 | - type: markdown
15 | attributes:
16 | value: |
17 | *Please fill this form with as much information as possible, provide screenshots and/or illustrations of the feature if possible*
18 | - type: textarea
19 | id: feature
20 | attributes:
21 | label: What would your feature do?
22 | description: Tell us about your feature in a very clear and simple way, and what problem it would solve
23 | validations:
24 | required: true
25 | - type: textarea
26 | id: workflow
27 | attributes:
28 | label: Proposed workflow
29 | description: Please provide us with step by step information on how you'd like the feature to be accessed and used
30 | value: |
31 | 1. Go to ....
32 | 2. Press ....
33 | 3. ...
34 | validations:
35 | required: true
36 | - type: textarea
37 | id: misc
38 | attributes:
39 | label: Additional information
40 | description: Add any other context or screenshots about the feature request here.
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | volumes:
2 | fooocus-data:
3 |
4 | services:
5 | app:
6 | build: .
7 | image: ghcr.io/lllyasviel/fooocus
8 | ports:
9 | - "7865:7865"
10 | environment:
11 | - CMDARGS=--listen # Arguments for launch.py.
12 | - DATADIR=/content/data # Directory which stores models, outputs dir
13 | - config_path=/content/data/config.txt
14 | - config_example_path=/content/data/config_modification_tutorial.txt
15 | - path_checkpoints=/content/data/models/checkpoints/
16 | - path_loras=/content/data/models/loras/
17 | - path_embeddings=/content/data/models/embeddings/
18 | - path_vae_approx=/content/data/models/vae_approx/
19 | - path_upscale_models=/content/data/models/upscale_models/
20 | - path_inpaint=/content/data/models/inpaint/
21 | - path_controlnet=/content/data/models/controlnet/
22 | - path_clip_vision=/content/data/models/clip_vision/
23 | - path_fooocus_expansion=/content/data/models/prompt_expansion/fooocus_expansion/
24 | - path_outputs=/content/app/outputs/ # Warning: If it is not located under '/content/app', you can't see history log!
25 | volumes:
26 | - fooocus-data:/content/data
27 | #- ./models:/import/models # Once you import files, you don't need to mount again.
28 | #- ./outputs:/import/outputs # Once you import files, you don't need to mount again.
29 | tty: true
30 | deploy:
31 | resources:
32 | reservations:
33 | devices:
34 | - driver: nvidia
35 | device_ids: ['0']
36 | capabilities: [compute, utility]
37 |
--------------------------------------------------------------------------------
/presets/pony_v6.json:
--------------------------------------------------------------------------------
1 | {
2 | "default_model": "ponyDiffusionV6XL.safetensors",
3 | "default_refiner": "None",
4 | "default_refiner_switch": 0.5,
5 | "default_vae": "ponyDiffusionV6XL_vae.safetensors",
6 | "default_loras": [
7 | [
8 | true,
9 | "None",
10 | 1.0
11 | ],
12 | [
13 | true,
14 | "None",
15 | 1.0
16 | ],
17 | [
18 | true,
19 | "None",
20 | 1.0
21 | ],
22 | [
23 | true,
24 | "None",
25 | 1.0
26 | ],
27 | [
28 | true,
29 | "None",
30 | 1.0
31 | ]
32 | ],
33 | "default_cfg_scale": 7.0,
34 | "default_sample_sharpness": 2.0,
35 | "default_sampler": "dpmpp_2m_sde_gpu",
36 | "default_scheduler": "karras",
37 | "default_performance": "Speed",
38 | "default_prompt": "",
39 | "default_prompt_negative": "",
40 | "default_styles": [
41 | "Fooocus Pony"
42 | ],
43 | "default_aspect_ratio": "896*1152",
44 | "default_overwrite_step": -1,
45 | "default_inpaint_engine_version": "None",
46 | "checkpoint_downloads": {
47 | "ponyDiffusionV6XL.safetensors": "https://huggingface.co/mashb1t/fav_models/resolve/main/fav/ponyDiffusionV6XL.safetensors"
48 | },
49 | "embeddings_downloads": {},
50 | "lora_downloads": {},
51 | "vae_downloads": {
52 | "ponyDiffusionV6XL_vae.safetensors": "https://huggingface.co/mashb1t/fav_models/resolve/main/fav/ponyDiffusionV6XL_vae.safetensors"
53 | }
54 | }
55 |
--------------------------------------------------------------------------------
/ldm_patched/pfn/architecture/LICENSE-RealESRGAN:
--------------------------------------------------------------------------------
1 | BSD 3-Clause License
2 |
3 | Copyright (c) 2021, Xintao Wang
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 |
9 | 1. Redistributions of source code must retain the above copyright notice, this
10 | list of conditions and the following disclaimer.
11 |
12 | 2. Redistributions in binary form must reproduce the above copyright notice,
13 | this list of conditions and the following disclaimer in the documentation
14 | and/or other materials provided with the distribution.
15 |
16 | 3. Neither the name of the copyright holder nor the names of its
17 | contributors may be used to endorse or promote products derived from
18 | this software without specific prior written permission.
19 |
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
--------------------------------------------------------------------------------
/ldm_patched/modules/diffusers_load.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import ldm_patched.modules.sd
4 |
5 | def first_file(path, filenames):
6 | for f in filenames:
7 | p = os.path.join(path, f)
8 | if os.path.exists(p):
9 | return p
10 | return None
11 |
12 | def load_diffusers(model_path, output_vae=True, output_clip=True, embedding_directory=None):
13 | diffusion_model_names = ["diffusion_pytorch_model.fp16.safetensors", "diffusion_pytorch_model.safetensors", "diffusion_pytorch_model.fp16.bin", "diffusion_pytorch_model.bin"]
14 | unet_path = first_file(os.path.join(model_path, "unet"), diffusion_model_names)
15 | vae_path = first_file(os.path.join(model_path, "vae"), diffusion_model_names)
16 |
17 | text_encoder_model_names = ["model.fp16.safetensors", "model.safetensors", "pytorch_model.fp16.bin", "pytorch_model.bin"]
18 | text_encoder1_path = first_file(os.path.join(model_path, "text_encoder"), text_encoder_model_names)
19 | text_encoder2_path = first_file(os.path.join(model_path, "text_encoder_2"), text_encoder_model_names)
20 |
21 | text_encoder_paths = [text_encoder1_path]
22 | if text_encoder2_path is not None:
23 | text_encoder_paths.append(text_encoder2_path)
24 |
25 | unet = ldm_patched.modules.sd.load_unet(unet_path)
26 |
27 | clip = None
28 | if output_clip:
29 | clip = ldm_patched.modules.sd.load_clip(text_encoder_paths, embedding_directory=embedding_directory)
30 |
31 | vae = None
32 | if output_vae:
33 | sd = ldm_patched.modules.utils.load_torch_file(vae_path)
34 | vae = ldm_patched.modules.sd.VAE(sd=sd)
35 |
36 | return (unet, clip, vae)
37 |
--------------------------------------------------------------------------------
/presets/lightning.json:
--------------------------------------------------------------------------------
1 | {
2 | "default_model": "juggernautXL_v8Rundiffusion.safetensors",
3 | "default_refiner": "None",
4 | "default_refiner_switch": 0.5,
5 | "default_loras": [
6 | [
7 | true,
8 | "None",
9 | 1.0
10 | ],
11 | [
12 | true,
13 | "None",
14 | 1.0
15 | ],
16 | [
17 | true,
18 | "None",
19 | 1.0
20 | ],
21 | [
22 | true,
23 | "None",
24 | 1.0
25 | ],
26 | [
27 | true,
28 | "None",
29 | 1.0
30 | ]
31 | ],
32 | "default_cfg_scale": 4.0,
33 | "default_sample_sharpness": 2.0,
34 | "default_sampler": "dpmpp_2m_sde_gpu",
35 | "default_scheduler": "karras",
36 | "default_performance": "Lightning",
37 | "default_prompt": "",
38 | "default_prompt_negative": "",
39 | "default_styles": [
40 | "Fooocus V2",
41 | "Fooocus Enhance",
42 | "Fooocus Sharp"
43 | ],
44 | "default_aspect_ratio": "1152*896",
45 | "checkpoint_downloads": {
46 | "juggernautXL_v8Rundiffusion.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_v8Rundiffusion.safetensors"
47 | },
48 | "embeddings_downloads": {},
49 | "lora_downloads": {},
50 | "previous_default_models": [
51 | "juggernautXL_version8Rundiffusion.safetensors",
52 | "juggernautXL_version7Rundiffusion.safetensors",
53 | "juggernautXL_v7Rundiffusion.safetensors",
54 | "juggernautXL_version6Rundiffusion.safetensors",
55 | "juggernautXL_v6Rundiffusion.safetensors"
56 | ]
57 | }
--------------------------------------------------------------------------------
/ldm_patched/ldm/modules/encoders/noise_aug_modules.py:
--------------------------------------------------------------------------------
1 | from ..diffusionmodules.upscaling import ImageConcatWithNoiseAugmentation
2 | from ..diffusionmodules.openaimodel import Timestep
3 | import torch
4 |
5 | class CLIPEmbeddingNoiseAugmentation(ImageConcatWithNoiseAugmentation):
6 | def __init__(self, *args, clip_stats_path=None, timestep_dim=256, **kwargs):
7 | super().__init__(*args, **kwargs)
8 | if clip_stats_path is None:
9 | clip_mean, clip_std = torch.zeros(timestep_dim), torch.ones(timestep_dim)
10 | else:
11 | clip_mean, clip_std = torch.load(clip_stats_path, map_location="cpu", weights_only=True)
12 | self.register_buffer("data_mean", clip_mean[None, :], persistent=False)
13 | self.register_buffer("data_std", clip_std[None, :], persistent=False)
14 | self.time_embed = Timestep(timestep_dim)
15 |
16 | def scale(self, x):
17 | # re-normalize to centered mean and unit variance
18 | x = (x - self.data_mean.to(x.device)) * 1. / self.data_std.to(x.device)
19 | return x
20 |
21 | def unscale(self, x):
22 | # back to original data stats
23 | x = (x * self.data_std.to(x.device)) + self.data_mean.to(x.device)
24 | return x
25 |
26 | def forward(self, x, noise_level=None, seed=None):
27 | if noise_level is None:
28 | noise_level = torch.randint(0, self.max_noise_level, (x.shape[0],), device=x.device).long()
29 | else:
30 | assert isinstance(noise_level, torch.Tensor)
31 | x = self.scale(x)
32 | z = self.q_sample(x, noise_level, seed=seed)
33 | z = self.unscale(z)
34 | noise_level = self.time_embed(noise_level)
35 | return z, noise_level
36 |
--------------------------------------------------------------------------------
/presets/lcm.json:
--------------------------------------------------------------------------------
1 | {
2 | "default_model": "juggernautXL_v8Rundiffusion.safetensors",
3 | "default_refiner": "None",
4 | "default_refiner_switch": 0.5,
5 | "default_loras": [
6 | [
7 | true,
8 | "None",
9 | 1.0
10 | ],
11 | [
12 | true,
13 | "None",
14 | 1.0
15 | ],
16 | [
17 | true,
18 | "None",
19 | 1.0
20 | ],
21 | [
22 | true,
23 | "None",
24 | 1.0
25 | ],
26 | [
27 | true,
28 | "None",
29 | 1.0
30 | ]
31 | ],
32 | "default_cfg_scale": 4.0,
33 | "default_sample_sharpness": 2.0,
34 | "default_sampler": "dpmpp_2m_sde_gpu",
35 | "default_scheduler": "karras",
36 | "default_performance": "Extreme Speed",
37 | "default_prompt": "",
38 | "default_prompt_negative": "",
39 | "default_styles": [
40 | "Fooocus V2",
41 | "Fooocus Enhance",
42 | "Fooocus Sharp"
43 | ],
44 | "default_aspect_ratio": "1152*896",
45 | "default_overwrite_step": -1,
46 | "checkpoint_downloads": {
47 | "juggernautXL_v8Rundiffusion.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_v8Rundiffusion.safetensors"
48 | },
49 | "embeddings_downloads": {},
50 | "lora_downloads": {},
51 | "previous_default_models": [
52 | "juggernautXL_version8Rundiffusion.safetensors",
53 | "juggernautXL_version7Rundiffusion.safetensors",
54 | "juggernautXL_v7Rundiffusion.safetensors",
55 | "juggernautXL_version6Rundiffusion.safetensors",
56 | "juggernautXL_v6Rundiffusion.safetensors"
57 | ]
58 | }
--------------------------------------------------------------------------------
/presets/anime.json:
--------------------------------------------------------------------------------
1 | {
2 | "default_model": "animaPencilXL_v500.safetensors",
3 | "default_refiner": "None",
4 | "default_refiner_switch": 0.5,
5 | "default_loras": [
6 | [
7 | true,
8 | "None",
9 | 1.0
10 | ],
11 | [
12 | true,
13 | "None",
14 | 1.0
15 | ],
16 | [
17 | true,
18 | "None",
19 | 1.0
20 | ],
21 | [
22 | true,
23 | "None",
24 | 1.0
25 | ],
26 | [
27 | true,
28 | "None",
29 | 1.0
30 | ]
31 | ],
32 | "default_cfg_scale": 6.0,
33 | "default_sample_sharpness": 2.0,
34 | "default_sampler": "dpmpp_2m_sde_gpu",
35 | "default_scheduler": "karras",
36 | "default_performance": "Speed",
37 | "default_prompt": "",
38 | "default_prompt_negative": "",
39 | "default_styles": [
40 | "Fooocus V2",
41 | "Fooocus Semi Realistic",
42 | "Fooocus Masterpiece"
43 | ],
44 | "default_aspect_ratio": "896*1152",
45 | "default_overwrite_step": -1,
46 | "checkpoint_downloads": {
47 | "animaPencilXL_v500.safetensors": "https://huggingface.co/mashb1t/fav_models/resolve/main/fav/animaPencilXL_v500.safetensors"
48 | },
49 | "embeddings_downloads": {},
50 | "lora_downloads": {},
51 | "previous_default_models": [
52 | "animaPencilXL_v400.safetensors",
53 | "animaPencilXL_v310.safetensors",
54 | "animaPencilXL_v300.safetensors",
55 | "animaPencilXL_v260.safetensors",
56 | "animaPencilXL_v210.safetensors",
57 | "animaPencilXL_v200.safetensors",
58 | "animaPencilXL_v100.safetensors"
59 | ]
60 | }
61 |
--------------------------------------------------------------------------------
/presets/realistic.json:
--------------------------------------------------------------------------------
1 | {
2 | "default_model": "realisticStockPhoto_v20.safetensors",
3 | "default_refiner": "None",
4 | "default_refiner_switch": 0.5,
5 | "default_loras": [
6 | [
7 | true,
8 | "SDXL_FILM_PHOTOGRAPHY_STYLE_V1.safetensors",
9 | 0.25
10 | ],
11 | [
12 | true,
13 | "None",
14 | 1.0
15 | ],
16 | [
17 | true,
18 | "None",
19 | 1.0
20 | ],
21 | [
22 | true,
23 | "None",
24 | 1.0
25 | ],
26 | [
27 | true,
28 | "None",
29 | 1.0
30 | ]
31 | ],
32 | "default_cfg_scale": 3.0,
33 | "default_sample_sharpness": 2.0,
34 | "default_sampler": "dpmpp_2m_sde_gpu",
35 | "default_scheduler": "karras",
36 | "default_performance": "Speed",
37 | "default_prompt": "",
38 | "default_prompt_negative": "unrealistic, saturated, high contrast, big nose, painting, drawing, sketch, cartoon, anime, manga, render, CG, 3d, watermark, signature, label",
39 | "default_styles": [
40 | "Fooocus V2",
41 | "Fooocus Photograph",
42 | "Fooocus Negative"
43 | ],
44 | "default_aspect_ratio": "896*1152",
45 | "default_overwrite_step": -1,
46 | "checkpoint_downloads": {
47 | "realisticStockPhoto_v20.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/realisticStockPhoto_v20.safetensors"
48 | },
49 | "embeddings_downloads": {},
50 | "lora_downloads": {
51 | "SDXL_FILM_PHOTOGRAPHY_STYLE_V1.safetensors": "https://huggingface.co/mashb1t/fav_models/resolve/main/fav/SDXL_FILM_PHOTOGRAPHY_STYLE_V1.safetensors"
52 | },
53 | "previous_default_models": ["realisticStockPhoto_v10.safetensors"]
54 | }
--------------------------------------------------------------------------------
/presets/sai.json:
--------------------------------------------------------------------------------
1 | {
2 | "default_model": "sd_xl_base_1.0_0.9vae.safetensors",
3 | "default_refiner": "sd_xl_refiner_1.0_0.9vae.safetensors",
4 | "default_refiner_switch": 0.75,
5 | "default_loras": [
6 | [
7 | true,
8 | "sd_xl_offset_example-lora_1.0.safetensors",
9 | 0.5
10 | ],
11 | [
12 | true,
13 | "None",
14 | 1.0
15 | ],
16 | [
17 | true,
18 | "None",
19 | 1.0
20 | ],
21 | [
22 | true,
23 | "None",
24 | 1.0
25 | ],
26 | [
27 | true,
28 | "None",
29 | 1.0
30 | ]
31 | ],
32 | "default_cfg_scale": 7.0,
33 | "default_sample_sharpness": 2.0,
34 | "default_sampler": "dpmpp_2m_sde_gpu",
35 | "default_scheduler": "karras",
36 | "default_performance": "Speed",
37 | "default_prompt": "",
38 | "default_prompt_negative": "",
39 | "default_styles": [
40 | "Fooocus V2",
41 | "Fooocus Cinematic"
42 | ],
43 | "default_aspect_ratio": "1152*896",
44 | "default_overwrite_step": -1,
45 | "checkpoint_downloads": {
46 | "sd_xl_base_1.0_0.9vae.safetensors": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0_0.9vae.safetensors",
47 | "sd_xl_refiner_1.0_0.9vae.safetensors": "https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/resolve/main/sd_xl_refiner_1.0_0.9vae.safetensors"
48 | },
49 | "embeddings_downloads": {},
50 | "lora_downloads": {
51 | "sd_xl_offset_example-lora_1.0.safetensors": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_offset_example-lora_1.0.safetensors"
52 | },
53 | "previous_default_models": []
54 | }
--------------------------------------------------------------------------------
/ldm_patched/pfn/architecture/face/LICENSE-codeformer:
--------------------------------------------------------------------------------
1 | S-Lab License 1.0
2 |
3 | Copyright 2022 S-Lab
4 |
5 | Redistribution and use for non-commercial purpose in source and
6 | binary forms, with or without modification, are permitted provided
7 | that the following conditions are met:
8 |
9 | 1. Redistributions of source code must retain the above copyright
10 | notice, this list of conditions and the following disclaimer.
11 |
12 | 2. Redistributions in binary form must reproduce the above copyright
13 | notice, this list of conditions and the following disclaimer in
14 | the documentation and/or other materials provided with the
15 | distribution.
16 |
17 | 3. Neither the name of the copyright holder nor the names of its
18 | contributors may be used to endorse or promote products derived
19 | from this software without specific prior written permission.
20 |
21 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 | HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 |
33 | In the event that redistribution and/or use for commercial purpose in
34 | source or binary forms, with or without modification is required,
35 | please contact the contributor(s) of the work.
36 |
--------------------------------------------------------------------------------
/extras/face_crop.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 | import modules.config
4 |
5 |
6 | faceRestoreHelper = None
7 |
8 |
9 | def align_warp_face(self, landmark, border_mode='constant'):
10 | affine_matrix = cv2.estimateAffinePartial2D(landmark, self.face_template, method=cv2.LMEDS)[0]
11 | self.affine_matrices.append(affine_matrix)
12 | if border_mode == 'constant':
13 | border_mode = cv2.BORDER_CONSTANT
14 | elif border_mode == 'reflect101':
15 | border_mode = cv2.BORDER_REFLECT101
16 | elif border_mode == 'reflect':
17 | border_mode = cv2.BORDER_REFLECT
18 | input_img = self.input_img
19 | cropped_face = cv2.warpAffine(input_img, affine_matrix, self.face_size,
20 | borderMode=border_mode, borderValue=(135, 133, 132))
21 | return cropped_face
22 |
23 |
24 | def crop_image(img_rgb):
25 | global faceRestoreHelper
26 |
27 | if faceRestoreHelper is None:
28 | from extras.facexlib.utils.face_restoration_helper import FaceRestoreHelper
29 | faceRestoreHelper = FaceRestoreHelper(
30 | upscale_factor=1,
31 | model_rootpath=modules.config.path_controlnet,
32 | device='cpu' # use cpu is safer since we are out of memory management
33 | )
34 |
35 | faceRestoreHelper.clean_all()
36 | faceRestoreHelper.read_image(np.ascontiguousarray(img_rgb[:, :, ::-1].copy()))
37 | faceRestoreHelper.get_face_landmarks_5()
38 |
39 | landmarks = faceRestoreHelper.all_landmarks_5
40 | # landmarks are already sorted with confidence.
41 |
42 | if len(landmarks) == 0:
43 | print('No face detected')
44 | return img_rgb
45 | else:
46 | print(f'Detected {len(landmarks)} faces')
47 |
48 | result = align_warp_face(faceRestoreHelper, landmarks[0])
49 |
50 | return np.ascontiguousarray(result[:, :, ::-1].copy())
51 |
--------------------------------------------------------------------------------
/presets/default.json:
--------------------------------------------------------------------------------
1 | {
2 | "default_model": "juggernautXL_v8Rundiffusion.safetensors",
3 | "default_refiner": "None",
4 | "default_refiner_switch": 0.5,
5 | "default_loras": [
6 | [
7 | true,
8 | "sd_xl_offset_example-lora_1.0.safetensors",
9 | 0.1
10 | ],
11 | [
12 | true,
13 | "None",
14 | 1.0
15 | ],
16 | [
17 | true,
18 | "None",
19 | 1.0
20 | ],
21 | [
22 | true,
23 | "None",
24 | 1.0
25 | ],
26 | [
27 | true,
28 | "None",
29 | 1.0
30 | ]
31 | ],
32 | "default_cfg_scale": 4.0,
33 | "default_sample_sharpness": 2.0,
34 | "default_sampler": "dpmpp_2m_sde_gpu",
35 | "default_scheduler": "karras",
36 | "default_performance": "Speed",
37 | "default_prompt": "",
38 | "default_prompt_negative": "",
39 | "default_styles": [
40 | "Fooocus V2",
41 | "Fooocus Enhance",
42 | "Fooocus Sharp"
43 | ],
44 | "default_aspect_ratio": "1152*896",
45 | "default_overwrite_step": -1,
46 | "checkpoint_downloads": {
47 | "juggernautXL_v8Rundiffusion.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_v8Rundiffusion.safetensors"
48 | },
49 | "embeddings_downloads": {},
50 | "lora_downloads": {
51 | "sd_xl_offset_example-lora_1.0.safetensors": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_offset_example-lora_1.0.safetensors"
52 | },
53 | "previous_default_models": [
54 | "juggernautXL_version8Rundiffusion.safetensors",
55 | "juggernautXL_version7Rundiffusion.safetensors",
56 | "juggernautXL_v7Rundiffusion.safetensors",
57 | "juggernautXL_version6Rundiffusion.safetensors",
58 | "juggernautXL_v6Rundiffusion.safetensors"
59 | ]
60 | }
--------------------------------------------------------------------------------
/ldm_patched/pfn/types.py:
--------------------------------------------------------------------------------
1 | from typing import Union
2 |
3 | from .architecture.DAT import DAT
4 | from .architecture.face.codeformer import CodeFormer
5 | from .architecture.face.gfpganv1_clean_arch import GFPGANv1Clean
6 | from .architecture.face.restoreformer_arch import RestoreFormer
7 | from .architecture.HAT import HAT
8 | from .architecture.LaMa import LaMa
9 | from .architecture.OmniSR.OmniSR import OmniSR
10 | from .architecture.RRDB import RRDBNet as ESRGAN
11 | from .architecture.SCUNet import SCUNet
12 | from .architecture.SPSR import SPSRNet as SPSR
13 | from .architecture.SRVGG import SRVGGNetCompact as RealESRGANv2
14 | from .architecture.SwiftSRGAN import Generator as SwiftSRGAN
15 | from .architecture.Swin2SR import Swin2SR
16 | from .architecture.SwinIR import SwinIR
17 |
18 | PyTorchSRModels = (
19 | RealESRGANv2,
20 | SPSR,
21 | SwiftSRGAN,
22 | ESRGAN,
23 | SwinIR,
24 | Swin2SR,
25 | HAT,
26 | OmniSR,
27 | SCUNet,
28 | DAT,
29 | )
30 | PyTorchSRModel = Union[
31 | RealESRGANv2,
32 | SPSR,
33 | SwiftSRGAN,
34 | ESRGAN,
35 | SwinIR,
36 | Swin2SR,
37 | HAT,
38 | OmniSR,
39 | SCUNet,
40 | DAT,
41 | ]
42 |
43 |
44 | def is_pytorch_sr_model(model: object):
45 | return isinstance(model, PyTorchSRModels)
46 |
47 |
48 | PyTorchFaceModels = (GFPGANv1Clean, RestoreFormer, CodeFormer)
49 | PyTorchFaceModel = Union[GFPGANv1Clean, RestoreFormer, CodeFormer]
50 |
51 |
52 | def is_pytorch_face_model(model: object):
53 | return isinstance(model, PyTorchFaceModels)
54 |
55 |
56 | PyTorchInpaintModels = (LaMa,)
57 | PyTorchInpaintModel = Union[LaMa]
58 |
59 |
60 | def is_pytorch_inpaint_model(model: object):
61 | return isinstance(model, PyTorchInpaintModels)
62 |
63 |
64 | PyTorchModels = (*PyTorchSRModels, *PyTorchFaceModels, *PyTorchInpaintModels)
65 | PyTorchModel = Union[PyTorchSRModel, PyTorchFaceModel, PyTorchInpaintModel]
66 |
67 |
68 | def is_pytorch_model(model: object):
69 | return isinstance(model, PyTorchModels)
70 |
--------------------------------------------------------------------------------
/ldm_patched/contrib/external_sdupscale.py:
--------------------------------------------------------------------------------
1 | # https://github.com/comfyanonymous/ComfyUI/blob/master/nodes.py
2 |
3 | import torch
4 | import ldm_patched.contrib.external
5 | import ldm_patched.modules.utils
6 |
7 | class SD_4XUpscale_Conditioning:
8 | @classmethod
9 | def INPUT_TYPES(s):
10 | return {"required": { "images": ("IMAGE",),
11 | "positive": ("CONDITIONING",),
12 | "negative": ("CONDITIONING",),
13 | "scale_ratio": ("FLOAT", {"default": 4.0, "min": 0.0, "max": 10.0, "step": 0.01}),
14 | "noise_augmentation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
15 | }}
16 | RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT")
17 | RETURN_NAMES = ("positive", "negative", "latent")
18 |
19 | FUNCTION = "encode"
20 |
21 | CATEGORY = "conditioning/upscale_diffusion"
22 |
23 | def encode(self, images, positive, negative, scale_ratio, noise_augmentation):
24 | width = max(1, round(images.shape[-2] * scale_ratio))
25 | height = max(1, round(images.shape[-3] * scale_ratio))
26 |
27 | pixels = ldm_patched.modules.utils.common_upscale((images.movedim(-1,1) * 2.0) - 1.0, width // 4, height // 4, "bilinear", "center")
28 |
29 | out_cp = []
30 | out_cn = []
31 |
32 | for t in positive:
33 | n = [t[0], t[1].copy()]
34 | n[1]['concat_image'] = pixels
35 | n[1]['noise_augmentation'] = noise_augmentation
36 | out_cp.append(n)
37 |
38 | for t in negative:
39 | n = [t[0], t[1].copy()]
40 | n[1]['concat_image'] = pixels
41 | n[1]['noise_augmentation'] = noise_augmentation
42 | out_cn.append(n)
43 |
44 | latent = torch.zeros([images.shape[0], 4, height // 4, width // 4])
45 | return (out_cp, out_cn, {"samples":latent})
46 |
47 | NODE_CLASS_MAPPINGS = {
48 | "SD_4XUpscale_Conditioning": SD_4XUpscale_Conditioning,
49 | }
50 |
--------------------------------------------------------------------------------
/models/configs/v2-inference.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | base_learning_rate: 1.0e-4
3 | target: ldm.models.diffusion.ddpm.LatentDiffusion
4 | params:
5 | linear_start: 0.00085
6 | linear_end: 0.0120
7 | num_timesteps_cond: 1
8 | log_every_t: 200
9 | timesteps: 1000
10 | first_stage_key: "jpg"
11 | cond_stage_key: "txt"
12 | image_size: 64
13 | channels: 4
14 | cond_stage_trainable: false
15 | conditioning_key: crossattn
16 | monitor: val/loss_simple_ema
17 | scale_factor: 0.18215
18 | use_ema: False # we set this to false because this is an inference only config
19 |
20 | unet_config:
21 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel
22 | params:
23 | use_checkpoint: True
24 | use_fp16: True
25 | image_size: 32 # unused
26 | in_channels: 4
27 | out_channels: 4
28 | model_channels: 320
29 | attention_resolutions: [ 4, 2, 1 ]
30 | num_res_blocks: 2
31 | channel_mult: [ 1, 2, 4, 4 ]
32 | num_head_channels: 64 # need to fix for flash-attn
33 | use_spatial_transformer: True
34 | use_linear_in_transformer: True
35 | transformer_depth: 1
36 | context_dim: 1024
37 | legacy: False
38 |
39 | first_stage_config:
40 | target: ldm.models.autoencoder.AutoencoderKL
41 | params:
42 | embed_dim: 4
43 | monitor: val/rec_loss
44 | ddconfig:
45 | #attn_type: "vanilla-xformers"
46 | double_z: true
47 | z_channels: 4
48 | resolution: 256
49 | in_channels: 3
50 | out_ch: 3
51 | ch: 128
52 | ch_mult:
53 | - 1
54 | - 2
55 | - 4
56 | - 4
57 | num_res_blocks: 2
58 | attn_resolutions: []
59 | dropout: 0.0
60 | lossconfig:
61 | target: torch.nn.Identity
62 |
63 | cond_stage_config:
64 | target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
65 | params:
66 | freeze: True
67 | layer: "penultimate"
68 |
--------------------------------------------------------------------------------
/models/configs/v2-inference_fp32.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | base_learning_rate: 1.0e-4
3 | target: ldm.models.diffusion.ddpm.LatentDiffusion
4 | params:
5 | linear_start: 0.00085
6 | linear_end: 0.0120
7 | num_timesteps_cond: 1
8 | log_every_t: 200
9 | timesteps: 1000
10 | first_stage_key: "jpg"
11 | cond_stage_key: "txt"
12 | image_size: 64
13 | channels: 4
14 | cond_stage_trainable: false
15 | conditioning_key: crossattn
16 | monitor: val/loss_simple_ema
17 | scale_factor: 0.18215
18 | use_ema: False # we set this to false because this is an inference only config
19 |
20 | unet_config:
21 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel
22 | params:
23 | use_checkpoint: True
24 | use_fp16: False
25 | image_size: 32 # unused
26 | in_channels: 4
27 | out_channels: 4
28 | model_channels: 320
29 | attention_resolutions: [ 4, 2, 1 ]
30 | num_res_blocks: 2
31 | channel_mult: [ 1, 2, 4, 4 ]
32 | num_head_channels: 64 # need to fix for flash-attn
33 | use_spatial_transformer: True
34 | use_linear_in_transformer: True
35 | transformer_depth: 1
36 | context_dim: 1024
37 | legacy: False
38 |
39 | first_stage_config:
40 | target: ldm.models.autoencoder.AutoencoderKL
41 | params:
42 | embed_dim: 4
43 | monitor: val/rec_loss
44 | ddconfig:
45 | #attn_type: "vanilla-xformers"
46 | double_z: true
47 | z_channels: 4
48 | resolution: 256
49 | in_channels: 3
50 | out_ch: 3
51 | ch: 128
52 | ch_mult:
53 | - 1
54 | - 2
55 | - 4
56 | - 4
57 | num_res_blocks: 2
58 | attn_resolutions: []
59 | dropout: 0.0
60 | lossconfig:
61 | target: torch.nn.Identity
62 |
63 | cond_stage_config:
64 | target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
65 | params:
66 | freeze: True
67 | layer: "penultimate"
68 |
--------------------------------------------------------------------------------
/ldm_patched/pfn/architecture/OmniSR/OSAG.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | #############################################################
4 | # File: OSAG.py
5 | # Created Date: Tuesday April 28th 2022
6 | # Author: Chen Xuanhong
7 | # Email: chenxuanhongzju@outlook.com
8 | # Last Modified: Sunday, 23rd April 2023 3:08:49 pm
9 | # Modified By: Chen Xuanhong
10 | # Copyright (c) 2020 Shanghai Jiao Tong University
11 | #############################################################
12 |
13 |
14 | import torch.nn as nn
15 |
16 | from .esa import ESA
17 | from .OSA import OSA_Block
18 |
19 |
20 | class OSAG(nn.Module):
21 | def __init__(
22 | self,
23 | channel_num=64,
24 | bias=True,
25 | block_num=4,
26 | ffn_bias=False,
27 | window_size=0,
28 | pe=False,
29 | ):
30 | super(OSAG, self).__init__()
31 |
32 | # print("window_size: %d" % (window_size))
33 | # print("with_pe", pe)
34 | # print("ffn_bias: %d" % (ffn_bias))
35 |
36 | # block_script_name = kwargs.get("block_script_name", "OSA")
37 | # block_class_name = kwargs.get("block_class_name", "OSA_Block")
38 |
39 | # script_name = "." + block_script_name
40 | # package = __import__(script_name, fromlist=True)
41 | block_class = OSA_Block # getattr(package, block_class_name)
42 | group_list = []
43 | for _ in range(block_num):
44 | temp_res = block_class(
45 | channel_num,
46 | bias,
47 | ffn_bias=ffn_bias,
48 | window_size=window_size,
49 | with_pe=pe,
50 | )
51 | group_list.append(temp_res)
52 | group_list.append(nn.Conv2d(channel_num, channel_num, 1, 1, 0, bias=bias))
53 | self.residual_layer = nn.Sequential(*group_list)
54 | esa_channel = max(channel_num // 4, 16)
55 | self.esa = ESA(esa_channel, channel_num)
56 |
57 | def forward(self, x):
58 | out = self.residual_layer(x)
59 | out = out + x
60 | return self.esa(out)
61 |
--------------------------------------------------------------------------------
/modules/style_sorter.py:
--------------------------------------------------------------------------------
1 | import os
2 | import gradio as gr
3 | import modules.localization as localization
4 | import json
5 |
6 |
7 | all_styles = []
8 |
9 |
10 | def try_load_sorted_styles(style_names, default_selected):
11 | global all_styles
12 |
13 | all_styles = style_names
14 |
15 | try:
16 | if os.path.exists('sorted_styles.json'):
17 | with open('sorted_styles.json', 'rt', encoding='utf-8') as fp:
18 | sorted_styles = []
19 | for x in json.load(fp):
20 | if x in all_styles:
21 | sorted_styles.append(x)
22 | for x in all_styles:
23 | if x not in sorted_styles:
24 | sorted_styles.append(x)
25 | all_styles = sorted_styles
26 | except Exception as e:
27 | print('Load style sorting failed.')
28 | print(e)
29 |
30 | unselected = [y for y in all_styles if y not in default_selected]
31 | all_styles = default_selected + unselected
32 |
33 | return
34 |
35 |
36 | def sort_styles(selected):
37 | global all_styles
38 | unselected = [y for y in all_styles if y not in selected]
39 | sorted_styles = selected + unselected
40 | try:
41 | with open('sorted_styles.json', 'wt', encoding='utf-8') as fp:
42 | json.dump(sorted_styles, fp, indent=4)
43 | except Exception as e:
44 | print('Write style sorting failed.')
45 | print(e)
46 | all_styles = sorted_styles
47 | return gr.CheckboxGroup.update(choices=sorted_styles)
48 |
49 |
50 | def localization_key(x):
51 | return x + localization.current_translation.get(x, '')
52 |
53 |
54 | def search_styles(selected, query):
55 | unselected = [y for y in all_styles if y not in selected]
56 | matched = [y for y in unselected if query.lower() in localization_key(y).lower()] if len(query.replace(' ', '')) > 0 else []
57 | unmatched = [y for y in unselected if y not in matched]
58 | sorted_styles = matched + selected + unmatched
59 | return gr.CheckboxGroup.update(choices=sorted_styles)
60 |
--------------------------------------------------------------------------------
/models/configs/v2-inference-v.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | base_learning_rate: 1.0e-4
3 | target: ldm.models.diffusion.ddpm.LatentDiffusion
4 | params:
5 | parameterization: "v"
6 | linear_start: 0.00085
7 | linear_end: 0.0120
8 | num_timesteps_cond: 1
9 | log_every_t: 200
10 | timesteps: 1000
11 | first_stage_key: "jpg"
12 | cond_stage_key: "txt"
13 | image_size: 64
14 | channels: 4
15 | cond_stage_trainable: false
16 | conditioning_key: crossattn
17 | monitor: val/loss_simple_ema
18 | scale_factor: 0.18215
19 | use_ema: False # we set this to false because this is an inference only config
20 |
21 | unet_config:
22 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel
23 | params:
24 | use_checkpoint: True
25 | use_fp16: True
26 | image_size: 32 # unused
27 | in_channels: 4
28 | out_channels: 4
29 | model_channels: 320
30 | attention_resolutions: [ 4, 2, 1 ]
31 | num_res_blocks: 2
32 | channel_mult: [ 1, 2, 4, 4 ]
33 | num_head_channels: 64 # need to fix for flash-attn
34 | use_spatial_transformer: True
35 | use_linear_in_transformer: True
36 | transformer_depth: 1
37 | context_dim: 1024
38 | legacy: False
39 |
40 | first_stage_config:
41 | target: ldm.models.autoencoder.AutoencoderKL
42 | params:
43 | embed_dim: 4
44 | monitor: val/rec_loss
45 | ddconfig:
46 | #attn_type: "vanilla-xformers"
47 | double_z: true
48 | z_channels: 4
49 | resolution: 256
50 | in_channels: 3
51 | out_ch: 3
52 | ch: 128
53 | ch_mult:
54 | - 1
55 | - 2
56 | - 4
57 | - 4
58 | num_res_blocks: 2
59 | attn_resolutions: []
60 | dropout: 0.0
61 | lossconfig:
62 | target: torch.nn.Identity
63 |
64 | cond_stage_config:
65 | target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
66 | params:
67 | freeze: True
68 | layer: "penultimate"
69 |
--------------------------------------------------------------------------------
/models/configs/v2-inference-v_fp32.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | base_learning_rate: 1.0e-4
3 | target: ldm.models.diffusion.ddpm.LatentDiffusion
4 | params:
5 | parameterization: "v"
6 | linear_start: 0.00085
7 | linear_end: 0.0120
8 | num_timesteps_cond: 1
9 | log_every_t: 200
10 | timesteps: 1000
11 | first_stage_key: "jpg"
12 | cond_stage_key: "txt"
13 | image_size: 64
14 | channels: 4
15 | cond_stage_trainable: false
16 | conditioning_key: crossattn
17 | monitor: val/loss_simple_ema
18 | scale_factor: 0.18215
19 | use_ema: False # we set this to false because this is an inference only config
20 |
21 | unet_config:
22 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel
23 | params:
24 | use_checkpoint: True
25 | use_fp16: False
26 | image_size: 32 # unused
27 | in_channels: 4
28 | out_channels: 4
29 | model_channels: 320
30 | attention_resolutions: [ 4, 2, 1 ]
31 | num_res_blocks: 2
32 | channel_mult: [ 1, 2, 4, 4 ]
33 | num_head_channels: 64 # need to fix for flash-attn
34 | use_spatial_transformer: True
35 | use_linear_in_transformer: True
36 | transformer_depth: 1
37 | context_dim: 1024
38 | legacy: False
39 |
40 | first_stage_config:
41 | target: ldm.models.autoencoder.AutoencoderKL
42 | params:
43 | embed_dim: 4
44 | monitor: val/rec_loss
45 | ddconfig:
46 | #attn_type: "vanilla-xformers"
47 | double_z: true
48 | z_channels: 4
49 | resolution: 256
50 | in_channels: 3
51 | out_ch: 3
52 | ch: 128
53 | ch_mult:
54 | - 1
55 | - 2
56 | - 4
57 | - 4
58 | num_res_blocks: 2
59 | attn_resolutions: []
60 | dropout: 0.0
61 | lossconfig:
62 | target: torch.nn.Identity
63 |
64 | cond_stage_config:
65 | target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
66 | params:
67 | freeze: True
68 | layer: "penultimate"
69 |
--------------------------------------------------------------------------------
/models/configs/v1-inference.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | base_learning_rate: 1.0e-04
3 | target: ldm.models.diffusion.ddpm.LatentDiffusion
4 | params:
5 | linear_start: 0.00085
6 | linear_end: 0.0120
7 | num_timesteps_cond: 1
8 | log_every_t: 200
9 | timesteps: 1000
10 | first_stage_key: "jpg"
11 | cond_stage_key: "txt"
12 | image_size: 64
13 | channels: 4
14 | cond_stage_trainable: false # Note: different from the one we trained before
15 | conditioning_key: crossattn
16 | monitor: val/loss_simple_ema
17 | scale_factor: 0.18215
18 | use_ema: False
19 |
20 | scheduler_config: # 10000 warmup steps
21 | target: ldm.lr_scheduler.LambdaLinearScheduler
22 | params:
23 | warm_up_steps: [ 10000 ]
24 | cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
25 | f_start: [ 1.e-6 ]
26 | f_max: [ 1. ]
27 | f_min: [ 1. ]
28 |
29 | unet_config:
30 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31 | params:
32 | image_size: 32 # unused
33 | in_channels: 4
34 | out_channels: 4
35 | model_channels: 320
36 | attention_resolutions: [ 4, 2, 1 ]
37 | num_res_blocks: 2
38 | channel_mult: [ 1, 2, 4, 4 ]
39 | num_heads: 8
40 | use_spatial_transformer: True
41 | transformer_depth: 1
42 | context_dim: 768
43 | use_checkpoint: True
44 | legacy: False
45 |
46 | first_stage_config:
47 | target: ldm.models.autoencoder.AutoencoderKL
48 | params:
49 | embed_dim: 4
50 | monitor: val/rec_loss
51 | ddconfig:
52 | double_z: true
53 | z_channels: 4
54 | resolution: 256
55 | in_channels: 3
56 | out_ch: 3
57 | ch: 128
58 | ch_mult:
59 | - 1
60 | - 2
61 | - 4
62 | - 4
63 | num_res_blocks: 2
64 | attn_resolutions: []
65 | dropout: 0.0
66 | lossconfig:
67 | target: torch.nn.Identity
68 |
69 | cond_stage_config:
70 | target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
71 |
--------------------------------------------------------------------------------
/ldm_patched/contrib/external_perpneg.py:
--------------------------------------------------------------------------------
1 | # https://github.com/comfyanonymous/ComfyUI/blob/master/nodes.py
2 |
3 | import torch
4 | import ldm_patched.modules.model_management
5 | import ldm_patched.modules.sample
6 | import ldm_patched.modules.samplers
7 | import ldm_patched.modules.utils
8 |
9 |
10 | class PerpNeg:
11 | @classmethod
12 | def INPUT_TYPES(s):
13 | return {"required": {"model": ("MODEL", ),
14 | "empty_conditioning": ("CONDITIONING", ),
15 | "neg_scale": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0}),
16 | }}
17 | RETURN_TYPES = ("MODEL",)
18 | FUNCTION = "patch"
19 |
20 | CATEGORY = "_for_testing"
21 |
22 | def patch(self, model, empty_conditioning, neg_scale):
23 | m = model.clone()
24 | nocond = ldm_patched.modules.sample.convert_cond(empty_conditioning)
25 |
26 | def cfg_function(args):
27 | model = args["model"]
28 | noise_pred_pos = args["cond_denoised"]
29 | noise_pred_neg = args["uncond_denoised"]
30 | cond_scale = args["cond_scale"]
31 | x = args["input"]
32 | sigma = args["sigma"]
33 | model_options = args["model_options"]
34 | nocond_processed = ldm_patched.modules.samplers.encode_model_conds(model.extra_conds, nocond, x, x.device, "negative")
35 |
36 | (noise_pred_nocond, _) = ldm_patched.modules.samplers.calc_cond_uncond_batch(model, nocond_processed, None, x, sigma, model_options)
37 |
38 | pos = noise_pred_pos - noise_pred_nocond
39 | neg = noise_pred_neg - noise_pred_nocond
40 | perp = ((torch.mul(pos, neg).sum())/(torch.norm(neg)**2)) * neg
41 | perp_neg = perp * neg_scale
42 | cfg_result = noise_pred_nocond + cond_scale*(pos - perp_neg)
43 | cfg_result = x - cfg_result
44 | return cfg_result
45 |
46 | m.set_model_sampler_cfg_function(cfg_function)
47 |
48 | return (m, )
49 |
50 |
51 | NODE_CLASS_MAPPINGS = {
52 | "PerpNeg": PerpNeg,
53 | }
54 |
55 | NODE_DISPLAY_NAME_MAPPINGS = {
56 | "PerpNeg": "Perp-Neg",
57 | }
58 |
--------------------------------------------------------------------------------
/models/configs/v1-inference_fp16.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | base_learning_rate: 1.0e-04
3 | target: ldm.models.diffusion.ddpm.LatentDiffusion
4 | params:
5 | linear_start: 0.00085
6 | linear_end: 0.0120
7 | num_timesteps_cond: 1
8 | log_every_t: 200
9 | timesteps: 1000
10 | first_stage_key: "jpg"
11 | cond_stage_key: "txt"
12 | image_size: 64
13 | channels: 4
14 | cond_stage_trainable: false # Note: different from the one we trained before
15 | conditioning_key: crossattn
16 | monitor: val/loss_simple_ema
17 | scale_factor: 0.18215
18 | use_ema: False
19 |
20 | scheduler_config: # 10000 warmup steps
21 | target: ldm.lr_scheduler.LambdaLinearScheduler
22 | params:
23 | warm_up_steps: [ 10000 ]
24 | cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
25 | f_start: [ 1.e-6 ]
26 | f_max: [ 1. ]
27 | f_min: [ 1. ]
28 |
29 | unet_config:
30 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31 | params:
32 | use_fp16: True
33 | image_size: 32 # unused
34 | in_channels: 4
35 | out_channels: 4
36 | model_channels: 320
37 | attention_resolutions: [ 4, 2, 1 ]
38 | num_res_blocks: 2
39 | channel_mult: [ 1, 2, 4, 4 ]
40 | num_heads: 8
41 | use_spatial_transformer: True
42 | transformer_depth: 1
43 | context_dim: 768
44 | use_checkpoint: True
45 | legacy: False
46 |
47 | first_stage_config:
48 | target: ldm.models.autoencoder.AutoencoderKL
49 | params:
50 | embed_dim: 4
51 | monitor: val/rec_loss
52 | ddconfig:
53 | double_z: true
54 | z_channels: 4
55 | resolution: 256
56 | in_channels: 3
57 | out_ch: 3
58 | ch: 128
59 | ch_mult:
60 | - 1
61 | - 2
62 | - 4
63 | - 4
64 | num_res_blocks: 2
65 | attn_resolutions: []
66 | dropout: 0.0
67 | lossconfig:
68 | target: torch.nn.Identity
69 |
70 | cond_stage_config:
71 | target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
72 |
--------------------------------------------------------------------------------
/tests/test_extra_utils.py:
--------------------------------------------------------------------------------
1 | import numbers
2 | import os
3 | import unittest
4 |
5 | import modules.flags
6 | from modules import extra_utils
7 |
8 |
9 | class TestUtils(unittest.TestCase):
10 | def test_try_eval_env_var(self):
11 | test_cases = [
12 | {
13 | "input": ("foo", str),
14 | "output": "foo"
15 | },
16 | {
17 | "input": ("1", int),
18 | "output": 1
19 | },
20 | {
21 | "input": ("1.0", float),
22 | "output": 1.0
23 | },
24 | {
25 | "input": ("1", numbers.Number),
26 | "output": 1
27 | },
28 | {
29 | "input": ("1.0", numbers.Number),
30 | "output": 1.0
31 | },
32 | {
33 | "input": ("true", bool),
34 | "output": True
35 | },
36 | {
37 | "input": ("True", bool),
38 | "output": True
39 | },
40 | {
41 | "input": ("false", bool),
42 | "output": False
43 | },
44 | {
45 | "input": ("False", bool),
46 | "output": False
47 | },
48 | {
49 | "input": ("True", str),
50 | "output": "True"
51 | },
52 | {
53 | "input": ("False", str),
54 | "output": "False"
55 | },
56 | {
57 | "input": ("['a', 'b', 'c']", list),
58 | "output": ['a', 'b', 'c']
59 | },
60 | {
61 | "input": ("{'a':1}", dict),
62 | "output": {'a': 1}
63 | },
64 | {
65 | "input": ("('foo', 1)", tuple),
66 | "output": ('foo', 1)
67 | }
68 | ]
69 |
70 | for test in test_cases:
71 | value, expected_type = test["input"]
72 | expected = test["output"]
73 | actual = extra_utils.try_eval_env_var(value, expected_type)
74 | self.assertEqual(expected, actual)
75 |
--------------------------------------------------------------------------------
/models/configs/anything_v3.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | base_learning_rate: 1.0e-04
3 | target: ldm.models.diffusion.ddpm.LatentDiffusion
4 | params:
5 | linear_start: 0.00085
6 | linear_end: 0.0120
7 | num_timesteps_cond: 1
8 | log_every_t: 200
9 | timesteps: 1000
10 | first_stage_key: "jpg"
11 | cond_stage_key: "txt"
12 | image_size: 64
13 | channels: 4
14 | cond_stage_trainable: false # Note: different from the one we trained before
15 | conditioning_key: crossattn
16 | monitor: val/loss_simple_ema
17 | scale_factor: 0.18215
18 | use_ema: False
19 |
20 | scheduler_config: # 10000 warmup steps
21 | target: ldm.lr_scheduler.LambdaLinearScheduler
22 | params:
23 | warm_up_steps: [ 10000 ]
24 | cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
25 | f_start: [ 1.e-6 ]
26 | f_max: [ 1. ]
27 | f_min: [ 1. ]
28 |
29 | unet_config:
30 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31 | params:
32 | image_size: 32 # unused
33 | in_channels: 4
34 | out_channels: 4
35 | model_channels: 320
36 | attention_resolutions: [ 4, 2, 1 ]
37 | num_res_blocks: 2
38 | channel_mult: [ 1, 2, 4, 4 ]
39 | num_heads: 8
40 | use_spatial_transformer: True
41 | transformer_depth: 1
42 | context_dim: 768
43 | use_checkpoint: True
44 | legacy: False
45 |
46 | first_stage_config:
47 | target: ldm.models.autoencoder.AutoencoderKL
48 | params:
49 | embed_dim: 4
50 | monitor: val/rec_loss
51 | ddconfig:
52 | double_z: true
53 | z_channels: 4
54 | resolution: 256
55 | in_channels: 3
56 | out_ch: 3
57 | ch: 128
58 | ch_mult:
59 | - 1
60 | - 2
61 | - 4
62 | - 4
63 | num_res_blocks: 2
64 | attn_resolutions: []
65 | dropout: 0.0
66 | lossconfig:
67 | target: torch.nn.Identity
68 |
69 | cond_stage_config:
70 | target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
71 | params:
72 | layer: "hidden"
73 | layer_idx: -2
74 |
--------------------------------------------------------------------------------
/models/configs/v1-inference_clip_skip_2.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | base_learning_rate: 1.0e-04
3 | target: ldm.models.diffusion.ddpm.LatentDiffusion
4 | params:
5 | linear_start: 0.00085
6 | linear_end: 0.0120
7 | num_timesteps_cond: 1
8 | log_every_t: 200
9 | timesteps: 1000
10 | first_stage_key: "jpg"
11 | cond_stage_key: "txt"
12 | image_size: 64
13 | channels: 4
14 | cond_stage_trainable: false # Note: different from the one we trained before
15 | conditioning_key: crossattn
16 | monitor: val/loss_simple_ema
17 | scale_factor: 0.18215
18 | use_ema: False
19 |
20 | scheduler_config: # 10000 warmup steps
21 | target: ldm.lr_scheduler.LambdaLinearScheduler
22 | params:
23 | warm_up_steps: [ 10000 ]
24 | cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
25 | f_start: [ 1.e-6 ]
26 | f_max: [ 1. ]
27 | f_min: [ 1. ]
28 |
29 | unet_config:
30 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31 | params:
32 | image_size: 32 # unused
33 | in_channels: 4
34 | out_channels: 4
35 | model_channels: 320
36 | attention_resolutions: [ 4, 2, 1 ]
37 | num_res_blocks: 2
38 | channel_mult: [ 1, 2, 4, 4 ]
39 | num_heads: 8
40 | use_spatial_transformer: True
41 | transformer_depth: 1
42 | context_dim: 768
43 | use_checkpoint: True
44 | legacy: False
45 |
46 | first_stage_config:
47 | target: ldm.models.autoencoder.AutoencoderKL
48 | params:
49 | embed_dim: 4
50 | monitor: val/rec_loss
51 | ddconfig:
52 | double_z: true
53 | z_channels: 4
54 | resolution: 256
55 | in_channels: 3
56 | out_ch: 3
57 | ch: 128
58 | ch_mult:
59 | - 1
60 | - 2
61 | - 4
62 | - 4
63 | num_res_blocks: 2
64 | attn_resolutions: []
65 | dropout: 0.0
66 | lossconfig:
67 | target: torch.nn.Identity
68 |
69 | cond_stage_config:
70 | target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
71 | params:
72 | layer: "hidden"
73 | layer_idx: -2
74 |
--------------------------------------------------------------------------------
/wildcards/extended-color.txt:
--------------------------------------------------------------------------------
1 | aliceblue
2 | antiquewhite
3 | aqua
4 | aquamarine
5 | azure
6 | beige
7 | bisque
8 | black
9 | blanchedalmond
10 | blue
11 | blueviolet
12 | brown
13 | burlywood
14 | cadetblue
15 | chartreuse
16 | chocolate
17 | coral
18 | cornflowerblue
19 | cornsilk
20 | crimson
21 | cyan
22 | darkblue
23 | darkcyan
24 | darkgoldenrod
25 | darkgray
26 | darkgreen
27 | darkgrey
28 | darkkhaki
29 | darkmagenta
30 | darkolivegreen
31 | darkorange
32 | darkorchid
33 | darkred
34 | darksalmon
35 | darkseagreen
36 | darkslateblue
37 | darkslategray
38 | darkslategrey
39 | darkturquoise
40 | darkviolet
41 | deeppink
42 | deepskyblue
43 | dimgray
44 | dimgrey
45 | dodgerblue
46 | firebrick
47 | floralwhite
48 | forestgreen
49 | fuchsia
50 | gainsboro
51 | ghostwhite
52 | gold
53 | goldenrod
54 | gray
55 | green
56 | greenyellow
57 | grey
58 | honeydew
59 | hotpink
60 | indianred
61 | indigo
62 | ivory
63 | khaki
64 | lavender
65 | lavenderblush
66 | lawngreen
67 | lemonchiffon
68 | lightblue
69 | lightcoral
70 | lightcyan
71 | lightgoldenrodyellow
72 | lightgray
73 | lightgreen
74 | lightgrey
75 | lightpink
76 | lightsalmon
77 | lightseagreen
78 | lightskyblue
79 | lightslategray
80 | lightslategrey
81 | lightsteelblue
82 | lightyellow
83 | lime
84 | limegreen
85 | linen
86 | magenta
87 | maroon
88 | mediumaquamarine
89 | mediumblue
90 | mediumorchid
91 | mediumpurple
92 | mediumseagreen
93 | mediumslateblue
94 | mediumspringgreen
95 | mediumturquoise
96 | mediumvioletred
97 | midnightblue
98 | mintcream
99 | mistyrose
100 | moccasin
101 | navajowhite
102 | navy
103 | oldlace
104 | olive
105 | olivedrab
106 | orange
107 | orangered
108 | orchid
109 | palegoldenrod
110 | palegreen
111 | paleturquoise
112 | palevioletred
113 | papayawhip
114 | peachpuff
115 | peru
116 | pink
117 | plum
118 | powderblue
119 | purple
120 | red
121 | rosybrown
122 | royalblue
123 | saddlebrown
124 | salmon
125 | sandybrown
126 | seagreen
127 | seashell
128 | sienna
129 | silver
130 | skyblue
131 | slateblue
132 | slategray
133 | slategrey
134 | snow
135 | springgreen
136 | steelblue
137 | tan
138 | teal
139 | thistle
140 | tomato
141 | turquoise
142 | violet
143 | wheat
144 | white
145 | whitesmoke
146 | yellow
147 | yellowgreen
148 |
--------------------------------------------------------------------------------
/models/configs/v1-inference_clip_skip_2_fp16.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | base_learning_rate: 1.0e-04
3 | target: ldm.models.diffusion.ddpm.LatentDiffusion
4 | params:
5 | linear_start: 0.00085
6 | linear_end: 0.0120
7 | num_timesteps_cond: 1
8 | log_every_t: 200
9 | timesteps: 1000
10 | first_stage_key: "jpg"
11 | cond_stage_key: "txt"
12 | image_size: 64
13 | channels: 4
14 | cond_stage_trainable: false # Note: different from the one we trained before
15 | conditioning_key: crossattn
16 | monitor: val/loss_simple_ema
17 | scale_factor: 0.18215
18 | use_ema: False
19 |
20 | scheduler_config: # 10000 warmup steps
21 | target: ldm.lr_scheduler.LambdaLinearScheduler
22 | params:
23 | warm_up_steps: [ 10000 ]
24 | cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
25 | f_start: [ 1.e-6 ]
26 | f_max: [ 1. ]
27 | f_min: [ 1. ]
28 |
29 | unet_config:
30 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31 | params:
32 | use_fp16: True
33 | image_size: 32 # unused
34 | in_channels: 4
35 | out_channels: 4
36 | model_channels: 320
37 | attention_resolutions: [ 4, 2, 1 ]
38 | num_res_blocks: 2
39 | channel_mult: [ 1, 2, 4, 4 ]
40 | num_heads: 8
41 | use_spatial_transformer: True
42 | transformer_depth: 1
43 | context_dim: 768
44 | use_checkpoint: True
45 | legacy: False
46 |
47 | first_stage_config:
48 | target: ldm.models.autoencoder.AutoencoderKL
49 | params:
50 | embed_dim: 4
51 | monitor: val/rec_loss
52 | ddconfig:
53 | double_z: true
54 | z_channels: 4
55 | resolution: 256
56 | in_channels: 3
57 | out_ch: 3
58 | ch: 128
59 | ch_mult:
60 | - 1
61 | - 2
62 | - 4
63 | - 4
64 | num_res_blocks: 2
65 | attn_resolutions: []
66 | dropout: 0.0
67 | lossconfig:
68 | target: torch.nn.Identity
69 |
70 | cond_stage_config:
71 | target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
72 | params:
73 | layer: "hidden"
74 | layer_idx: -2
75 |
--------------------------------------------------------------------------------
/models/configs/v1-inpainting-inference.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | base_learning_rate: 7.5e-05
3 | target: ldm.models.diffusion.ddpm.LatentInpaintDiffusion
4 | params:
5 | linear_start: 0.00085
6 | linear_end: 0.0120
7 | num_timesteps_cond: 1
8 | log_every_t: 200
9 | timesteps: 1000
10 | first_stage_key: "jpg"
11 | cond_stage_key: "txt"
12 | image_size: 64
13 | channels: 4
14 | cond_stage_trainable: false # Note: different from the one we trained before
15 | conditioning_key: hybrid # important
16 | monitor: val/loss_simple_ema
17 | scale_factor: 0.18215
18 | finetune_keys: null
19 |
20 | scheduler_config: # 10000 warmup steps
21 | target: ldm.lr_scheduler.LambdaLinearScheduler
22 | params:
23 | warm_up_steps: [ 2500 ] # NOTE for resuming. use 10000 if starting from scratch
24 | cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
25 | f_start: [ 1.e-6 ]
26 | f_max: [ 1. ]
27 | f_min: [ 1. ]
28 |
29 | unet_config:
30 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31 | params:
32 | image_size: 32 # unused
33 | in_channels: 9 # 4 data + 4 downscaled image + 1 mask
34 | out_channels: 4
35 | model_channels: 320
36 | attention_resolutions: [ 4, 2, 1 ]
37 | num_res_blocks: 2
38 | channel_mult: [ 1, 2, 4, 4 ]
39 | num_heads: 8
40 | use_spatial_transformer: True
41 | transformer_depth: 1
42 | context_dim: 768
43 | use_checkpoint: True
44 | legacy: False
45 |
46 | first_stage_config:
47 | target: ldm.models.autoencoder.AutoencoderKL
48 | params:
49 | embed_dim: 4
50 | monitor: val/rec_loss
51 | ddconfig:
52 | double_z: true
53 | z_channels: 4
54 | resolution: 256
55 | in_channels: 3
56 | out_ch: 3
57 | ch: 128
58 | ch_mult:
59 | - 1
60 | - 2
61 | - 4
62 | - 4
63 | num_res_blocks: 2
64 | attn_resolutions: []
65 | dropout: 0.0
66 | lossconfig:
67 | target: torch.nn.Identity
68 |
69 | cond_stage_config:
70 | target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
71 |
72 |
--------------------------------------------------------------------------------
/ldm_patched/contrib/external_align_your_steps.py:
--------------------------------------------------------------------------------
1 | # https://github.com/comfyanonymous/ComfyUI/blob/master/nodes.py
2 |
3 | #from: https://research.nvidia.com/labs/toronto-ai/AlignYourSteps/howto.html
4 | import numpy as np
5 | import torch
6 |
7 | def loglinear_interp(t_steps, num_steps):
8 | """
9 | Performs log-linear interpolation of a given array of decreasing numbers.
10 | """
11 | xs = np.linspace(0, 1, len(t_steps))
12 | ys = np.log(t_steps[::-1])
13 |
14 | new_xs = np.linspace(0, 1, num_steps)
15 | new_ys = np.interp(new_xs, xs, ys)
16 |
17 | interped_ys = np.exp(new_ys)[::-1].copy()
18 | return interped_ys
19 |
20 | NOISE_LEVELS = {"SD1": [14.6146412293, 6.4745760956, 3.8636745985, 2.6946151520, 1.8841921177, 1.3943805092, 0.9642583904, 0.6523686016, 0.3977456272, 0.1515232662, 0.0291671582],
21 | "SDXL":[14.6146412293, 6.3184485287, 3.7681790315, 2.1811480769, 1.3405244945, 0.8620721141, 0.5550693289, 0.3798540708, 0.2332364134, 0.1114188177, 0.0291671582],
22 | "SVD": [700.00, 54.5, 15.886, 7.977, 4.248, 1.789, 0.981, 0.403, 0.173, 0.034, 0.002]}
23 |
24 | class AlignYourStepsScheduler:
25 | @classmethod
26 | def INPUT_TYPES(s):
27 | return {"required":
28 | {"model_type": (["SD1", "SDXL", "SVD"], ),
29 | "steps": ("INT", {"default": 10, "min": 10, "max": 10000}),
30 | "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
31 | }
32 | }
33 | RETURN_TYPES = ("SIGMAS",)
34 | CATEGORY = "sampling/custom_sampling/schedulers"
35 |
36 | FUNCTION = "get_sigmas"
37 |
38 | def get_sigmas(self, model_type, steps, denoise):
39 | total_steps = steps
40 | if denoise < 1.0:
41 | if denoise <= 0.0:
42 | return (torch.FloatTensor([]),)
43 | total_steps = round(steps * denoise)
44 |
45 | sigmas = NOISE_LEVELS[model_type][:]
46 | if (steps + 1) != len(sigmas):
47 | sigmas = loglinear_interp(sigmas, steps + 1)
48 |
49 | sigmas = sigmas[-(total_steps + 1):]
50 | sigmas[-1] = 0
51 | return (torch.FloatTensor(sigmas), )
52 |
53 | NODE_CLASS_MAPPINGS = {
54 | "AlignYourStepsScheduler": AlignYourStepsScheduler,
55 | }
--------------------------------------------------------------------------------
/modules/localization.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 |
4 |
5 | current_translation = {}
6 | localization_root = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'language')
7 |
8 |
9 | def localization_js(filename):
10 | global current_translation
11 |
12 | if isinstance(filename, str):
13 | full_name = os.path.abspath(os.path.join(localization_root, filename + '.json'))
14 | if os.path.exists(full_name):
15 | try:
16 | with open(full_name, encoding='utf-8') as f:
17 | current_translation = json.load(f)
18 | assert isinstance(current_translation, dict)
19 | for k, v in current_translation.items():
20 | assert isinstance(k, str)
21 | assert isinstance(v, str)
22 | except Exception as e:
23 | print(str(e))
24 | print(f'Failed to load localization file {full_name}')
25 |
26 | # current_translation = {k: 'XXX' for k in current_translation.keys()} # use this to see if all texts are covered
27 |
28 | return f"window.localization = {json.dumps(current_translation)}"
29 |
30 |
31 | def dump_english_config(components):
32 | all_texts = []
33 | for c in components:
34 | label = getattr(c, 'label', None)
35 | value = getattr(c, 'value', None)
36 | choices = getattr(c, 'choices', None)
37 | info = getattr(c, 'info', None)
38 |
39 | if isinstance(label, str):
40 | all_texts.append(label)
41 | if isinstance(value, str):
42 | all_texts.append(value)
43 | if isinstance(info, str):
44 | all_texts.append(info)
45 | if isinstance(choices, list):
46 | for x in choices:
47 | if isinstance(x, str):
48 | all_texts.append(x)
49 | if isinstance(x, tuple):
50 | for y in x:
51 | if isinstance(y, str):
52 | all_texts.append(y)
53 |
54 | config_dict = {k: k for k in all_texts if k != "" and 'progress-container' not in k}
55 | full_name = os.path.abspath(os.path.join(localization_root, 'en.json'))
56 |
57 | with open(full_name, "w", encoding="utf-8") as json_file:
58 | json.dump(config_dict, json_file, indent=4)
59 |
60 | return
61 |
--------------------------------------------------------------------------------
/modules/patch_precision.py:
--------------------------------------------------------------------------------
1 | # Consistent with Kohya to reduce differences between model training and inference.
2 |
3 | import torch
4 | import math
5 | import einops
6 | import numpy as np
7 |
8 | import ldm_patched.ldm.modules.diffusionmodules.openaimodel
9 | import ldm_patched.modules.model_sampling
10 | import ldm_patched.modules.sd1_clip
11 |
12 | from ldm_patched.ldm.modules.diffusionmodules.util import make_beta_schedule
13 |
14 |
15 | def patched_timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):
16 | # Consistent with Kohya to reduce differences between model training and inference.
17 |
18 | if not repeat_only:
19 | half = dim // 2
20 | freqs = torch.exp(
21 | -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
22 | ).to(device=timesteps.device)
23 | args = timesteps[:, None].float() * freqs[None]
24 | embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
25 | if dim % 2:
26 | embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
27 | else:
28 | embedding = einops.repeat(timesteps, 'b -> b d', d=dim)
29 | return embedding
30 |
31 |
32 | def patched_register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
33 | linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
34 | # Consistent with Kohya to reduce differences between model training and inference.
35 |
36 | if given_betas is not None:
37 | betas = given_betas
38 | else:
39 | betas = make_beta_schedule(
40 | beta_schedule,
41 | timesteps,
42 | linear_start=linear_start,
43 | linear_end=linear_end,
44 | cosine_s=cosine_s)
45 |
46 | alphas = 1. - betas
47 | alphas_cumprod = np.cumprod(alphas, axis=0)
48 | timesteps, = betas.shape
49 | self.num_timesteps = int(timesteps)
50 | self.linear_start = linear_start
51 | self.linear_end = linear_end
52 | sigmas = torch.tensor(((1 - alphas_cumprod) / alphas_cumprod) ** 0.5, dtype=torch.float32)
53 | self.set_sigmas(sigmas)
54 | alphas_cumprod = torch.tensor(alphas_cumprod, dtype=torch.float32)
55 | self.set_alphas_cumprod(alphas_cumprod)
56 | return
57 |
58 |
59 | def patch_all_precision():
60 | ldm_patched.ldm.modules.diffusionmodules.openaimodel.timestep_embedding = patched_timestep_embedding
61 | ldm_patched.modules.model_sampling.ModelSamplingDiscrete._register_schedule = patched_register_schedule
62 | return
63 |
--------------------------------------------------------------------------------
/ldm_patched/pfn/architecture/OmniSR/layernorm.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | #############################################################
4 | # File: layernorm.py
5 | # Created Date: Tuesday April 28th 2022
6 | # Author: Chen Xuanhong
7 | # Email: chenxuanhongzju@outlook.com
8 | # Last Modified: Thursday, 20th April 2023 9:28:20 am
9 | # Modified By: Chen Xuanhong
10 | # Copyright (c) 2020 Shanghai Jiao Tong University
11 | #############################################################
12 |
13 | import torch
14 | import torch.nn as nn
15 |
16 |
17 | class LayerNormFunction(torch.autograd.Function):
18 | @staticmethod
19 | def forward(ctx, x, weight, bias, eps):
20 | ctx.eps = eps
21 | N, C, H, W = x.size()
22 | mu = x.mean(1, keepdim=True)
23 | var = (x - mu).pow(2).mean(1, keepdim=True)
24 | y = (x - mu) / (var + eps).sqrt()
25 | ctx.save_for_backward(y, var, weight)
26 | y = weight.view(1, C, 1, 1) * y + bias.view(1, C, 1, 1)
27 | return y
28 |
29 | @staticmethod
30 | def backward(ctx, grad_output):
31 | eps = ctx.eps
32 |
33 | N, C, H, W = grad_output.size()
34 | y, var, weight = ctx.saved_variables
35 | g = grad_output * weight.view(1, C, 1, 1)
36 | mean_g = g.mean(dim=1, keepdim=True)
37 |
38 | mean_gy = (g * y).mean(dim=1, keepdim=True)
39 | gx = 1.0 / torch.sqrt(var + eps) * (g - y * mean_gy - mean_g)
40 | return (
41 | gx,
42 | (grad_output * y).sum(dim=3).sum(dim=2).sum(dim=0),
43 | grad_output.sum(dim=3).sum(dim=2).sum(dim=0),
44 | None,
45 | )
46 |
47 |
48 | class LayerNorm2d(nn.Module):
49 | def __init__(self, channels, eps=1e-6):
50 | super(LayerNorm2d, self).__init__()
51 | self.register_parameter("weight", nn.Parameter(torch.ones(channels)))
52 | self.register_parameter("bias", nn.Parameter(torch.zeros(channels)))
53 | self.eps = eps
54 |
55 | def forward(self, x):
56 | return LayerNormFunction.apply(x, self.weight, self.bias, self.eps)
57 |
58 |
59 | class GRN(nn.Module):
60 | """GRN (Global Response Normalization) layer"""
61 |
62 | def __init__(self, dim):
63 | super().__init__()
64 | self.gamma = nn.Parameter(torch.zeros(1, dim, 1, 1))
65 | self.beta = nn.Parameter(torch.zeros(1, dim, 1, 1))
66 |
67 | def forward(self, x):
68 | Gx = torch.norm(x, p=2, dim=(2, 3), keepdim=True)
69 | Nx = Gx / (Gx.mean(dim=1, keepdim=True) + 1e-6)
70 | return self.gamma * (x * Nx) + self.beta + x
71 |
--------------------------------------------------------------------------------
/extras/facexlib/parsing/resnet.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | import torch.nn.functional as F
3 |
4 |
5 | def conv3x3(in_planes, out_planes, stride=1):
6 | """3x3 convolution with padding"""
7 | return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
8 |
9 |
10 | class BasicBlock(nn.Module):
11 |
12 | def __init__(self, in_chan, out_chan, stride=1):
13 | super(BasicBlock, self).__init__()
14 | self.conv1 = conv3x3(in_chan, out_chan, stride)
15 | self.bn1 = nn.BatchNorm2d(out_chan)
16 | self.conv2 = conv3x3(out_chan, out_chan)
17 | self.bn2 = nn.BatchNorm2d(out_chan)
18 | self.relu = nn.ReLU(inplace=True)
19 | self.downsample = None
20 | if in_chan != out_chan or stride != 1:
21 | self.downsample = nn.Sequential(
22 | nn.Conv2d(in_chan, out_chan, kernel_size=1, stride=stride, bias=False),
23 | nn.BatchNorm2d(out_chan),
24 | )
25 |
26 | def forward(self, x):
27 | residual = self.conv1(x)
28 | residual = F.relu(self.bn1(residual))
29 | residual = self.conv2(residual)
30 | residual = self.bn2(residual)
31 |
32 | shortcut = x
33 | if self.downsample is not None:
34 | shortcut = self.downsample(x)
35 |
36 | out = shortcut + residual
37 | out = self.relu(out)
38 | return out
39 |
40 |
41 | def create_layer_basic(in_chan, out_chan, bnum, stride=1):
42 | layers = [BasicBlock(in_chan, out_chan, stride=stride)]
43 | for i in range(bnum - 1):
44 | layers.append(BasicBlock(out_chan, out_chan, stride=1))
45 | return nn.Sequential(*layers)
46 |
47 |
48 | class ResNet18(nn.Module):
49 |
50 | def __init__(self):
51 | super(ResNet18, self).__init__()
52 | self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
53 | self.bn1 = nn.BatchNorm2d(64)
54 | self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
55 | self.layer1 = create_layer_basic(64, 64, bnum=2, stride=1)
56 | self.layer2 = create_layer_basic(64, 128, bnum=2, stride=2)
57 | self.layer3 = create_layer_basic(128, 256, bnum=2, stride=2)
58 | self.layer4 = create_layer_basic(256, 512, bnum=2, stride=2)
59 |
60 | def forward(self, x):
61 | x = self.conv1(x)
62 | x = F.relu(self.bn1(x))
63 | x = self.maxpool(x)
64 |
65 | x = self.layer1(x)
66 | feat8 = self.layer2(x) # 1/8
67 | feat16 = self.layer3(feat8) # 1/16
68 | feat32 = self.layer4(feat16) # 1/32
69 | return feat8, feat16, feat32
70 |
--------------------------------------------------------------------------------
/ldm_patched/modules/conds.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import math
3 | import ldm_patched.modules.utils
4 |
5 |
6 |
7 | class CONDRegular:
8 | def __init__(self, cond):
9 | self.cond = cond
10 |
11 | def _copy_with(self, cond):
12 | return self.__class__(cond)
13 |
14 | def process_cond(self, batch_size, device, **kwargs):
15 | return self._copy_with(ldm_patched.modules.utils.repeat_to_batch_size(self.cond, batch_size).to(device))
16 |
17 | def can_concat(self, other):
18 | if self.cond.shape != other.cond.shape:
19 | return False
20 | return True
21 |
22 | def concat(self, others):
23 | conds = [self.cond]
24 | for x in others:
25 | conds.append(x.cond)
26 | return torch.cat(conds)
27 |
28 | class CONDNoiseShape(CONDRegular):
29 | def process_cond(self, batch_size, device, area, **kwargs):
30 | data = self.cond[:,:,area[2]:area[0] + area[2],area[3]:area[1] + area[3]]
31 | return self._copy_with(ldm_patched.modules.utils.repeat_to_batch_size(data, batch_size).to(device))
32 |
33 |
34 | class CONDCrossAttn(CONDRegular):
35 | def can_concat(self, other):
36 | s1 = self.cond.shape
37 | s2 = other.cond.shape
38 | if s1 != s2:
39 | if s1[0] != s2[0] or s1[2] != s2[2]: #these 2 cases should not happen
40 | return False
41 |
42 | mult_min = math.lcm(s1[1], s2[1])
43 | diff = mult_min // min(s1[1], s2[1])
44 | if diff > 4: #arbitrary limit on the padding because it's probably going to impact performance negatively if it's too much
45 | return False
46 | return True
47 |
48 | def concat(self, others):
49 | conds = [self.cond]
50 | crossattn_max_len = self.cond.shape[1]
51 | for x in others:
52 | c = x.cond
53 | crossattn_max_len = math.lcm(crossattn_max_len, c.shape[1])
54 | conds.append(c)
55 |
56 | out = []
57 | for c in conds:
58 | if c.shape[1] < crossattn_max_len:
59 | c = c.repeat(1, crossattn_max_len // c.shape[1], 1) #padding with repeat doesn't change result
60 | out.append(c)
61 | return torch.cat(out)
62 |
63 | class CONDConstant(CONDRegular):
64 | def __init__(self, cond):
65 | self.cond = cond
66 |
67 | def process_cond(self, batch_size, device, **kwargs):
68 | return self._copy_with(self.cond)
69 |
70 | def can_concat(self, other):
71 | if self.cond != other.cond:
72 | return False
73 | return True
74 |
75 | def concat(self, others):
76 | return self.cond
77 |
--------------------------------------------------------------------------------
/ldm_patched/pfn/architecture/face/fused_act.py:
--------------------------------------------------------------------------------
1 | # pylint: skip-file
2 | # type: ignore
3 | # modify from https://github.com/rosinality/stylegan2-pytorch/blob/master/op/fused_act.py # noqa:E501
4 |
5 | import torch
6 | from torch import nn
7 | from torch.autograd import Function
8 |
9 | fused_act_ext = None
10 |
11 |
12 | class FusedLeakyReLUFunctionBackward(Function):
13 | @staticmethod
14 | def forward(ctx, grad_output, out, negative_slope, scale):
15 | ctx.save_for_backward(out)
16 | ctx.negative_slope = negative_slope
17 | ctx.scale = scale
18 |
19 | empty = grad_output.new_empty(0)
20 |
21 | grad_input = fused_act_ext.fused_bias_act(
22 | grad_output, empty, out, 3, 1, negative_slope, scale
23 | )
24 |
25 | dim = [0]
26 |
27 | if grad_input.ndim > 2:
28 | dim += list(range(2, grad_input.ndim))
29 |
30 | grad_bias = grad_input.sum(dim).detach()
31 |
32 | return grad_input, grad_bias
33 |
34 | @staticmethod
35 | def backward(ctx, gradgrad_input, gradgrad_bias):
36 | (out,) = ctx.saved_tensors
37 | gradgrad_out = fused_act_ext.fused_bias_act(
38 | gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale
39 | )
40 |
41 | return gradgrad_out, None, None, None
42 |
43 |
44 | class FusedLeakyReLUFunction(Function):
45 | @staticmethod
46 | def forward(ctx, input, bias, negative_slope, scale):
47 | empty = input.new_empty(0)
48 | out = fused_act_ext.fused_bias_act(
49 | input, bias, empty, 3, 0, negative_slope, scale
50 | )
51 | ctx.save_for_backward(out)
52 | ctx.negative_slope = negative_slope
53 | ctx.scale = scale
54 |
55 | return out
56 |
57 | @staticmethod
58 | def backward(ctx, grad_output):
59 | (out,) = ctx.saved_tensors
60 |
61 | grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(
62 | grad_output, out, ctx.negative_slope, ctx.scale
63 | )
64 |
65 | return grad_input, grad_bias, None, None
66 |
67 |
68 | class FusedLeakyReLU(nn.Module):
69 | def __init__(self, channel, negative_slope=0.2, scale=2**0.5):
70 | super().__init__()
71 |
72 | self.bias = nn.Parameter(torch.zeros(channel))
73 | self.negative_slope = negative_slope
74 | self.scale = scale
75 |
76 | def forward(self, input):
77 | return fused_leaky_relu(input, self.bias, self.negative_slope, self.scale)
78 |
79 |
80 | def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2**0.5):
81 | return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
82 |
--------------------------------------------------------------------------------