├── .gitattributes ├── .gitignore ├── LICENSE ├── README.md ├── __init__.py ├── install.py ├── pyproject.toml └── src └── inference_core_nodes ├── __init__.py ├── comfyui_experiments ├── LICENSE ├── README.md ├── __init__.py ├── advanced_model_merging.py ├── reference_only.py ├── sampler_rescalecfg.py ├── sampler_tonemap.py ├── sampler_tonemap_rescalecfg.py └── sdxl_model_merging.py ├── controlnet_preprocessors ├── .gitignore ├── LICENSE ├── LICENSE.txt ├── NotoSans-Regular.ttf ├── README.md ├── UPDATES.md ├── __init__.py ├── config.example.yaml ├── dev_interface.py ├── examples │ ├── example_animal_pose.png │ ├── example_anime_face_segmentor.png │ ├── example_densepose.png │ ├── example_depth_anything.png │ ├── example_marigold.png │ ├── example_marigold_flat.jpg │ ├── example_mesh_graphormer.png │ ├── example_onnx.png │ ├── example_recolor.png │ ├── example_save_kps.png │ ├── example_teed.png │ ├── example_torchscript.png │ └── example_unimatch.png ├── hint_image_enchance.py ├── log.py ├── lvminthin.py ├── node_wrappers │ ├── anime_face_segment.py │ ├── anyline.py │ ├── binary.py │ ├── canny.py │ ├── color.py │ ├── densepose.py │ ├── depth_anything.py │ ├── depth_anything_v2.py │ ├── diffusion_edge.py │ ├── dsine.py │ ├── dwpose.py │ ├── hed.py │ ├── inpaint.py │ ├── leres.py │ ├── lineart.py │ ├── lineart_anime.py │ ├── lineart_standard.py │ ├── manga_line.py │ ├── mediapipe_face.py │ ├── mesh_graphormer.py │ ├── metric3d.py │ ├── midas.py │ ├── mlsd.py │ ├── normalbae.py │ ├── oneformer.py │ ├── openpose.py │ ├── pidinet.py │ ├── pose_keypoint_postprocess.py │ ├── pyracanny.py │ ├── recolor.py │ ├── scribble.py │ ├── segment_anything.py │ ├── shuffle.py │ ├── teed.py │ ├── tile.py │ ├── uniformer.py │ ├── unimatch.py │ └── zoe.py ├── pyproject.toml ├── requirements.txt ├── search_hf_assets.py ├── src │ ├── __init__.py │ ├── controlnet_aux │ │ ├── __init__.py │ │ ├── anime_face_segment │ │ │ ├── __init__.py │ │ │ ├── anime_segmentation.py │ │ │ ├── isnet.py │ │ │ ├── network.py │ │ │ └── util.py │ │ ├── binary │ │ │ └── __init__.py │ │ ├── canny │ │ │ └── __init__.py │ │ ├── color │ │ │ └── __init__.py │ │ ├── densepose │ │ │ ├── __init__.py │ │ │ └── densepose.py │ │ ├── depth_anything │ │ │ ├── __init__.py │ │ │ ├── depth_anything │ │ │ │ ├── blocks.py │ │ │ │ ├── dpt.py │ │ │ │ └── util │ │ │ │ │ └── transform.py │ │ │ └── torchhub │ │ │ │ ├── README.md │ │ │ │ └── facebookresearch_dinov2_main │ │ │ │ ├── CODE_OF_CONDUCT.md │ │ │ │ ├── CONTRIBUTING.md │ │ │ │ ├── LICENSE │ │ │ │ ├── MODEL_CARD.md │ │ │ │ ├── README.md │ │ │ │ ├── conda.yaml │ │ │ │ ├── dinov2 │ │ │ │ ├── __init__.py │ │ │ │ ├── configs │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── eval │ │ │ │ │ │ ├── vitb14_pretrain.yaml │ │ │ │ │ │ ├── vitg14_pretrain.yaml │ │ │ │ │ │ ├── vitl14_pretrain.yaml │ │ │ │ │ │ └── vits14_pretrain.yaml │ │ │ │ │ ├── ssl_default_config.yaml │ │ │ │ │ └── train │ │ │ │ │ │ ├── vitg14.yaml │ │ │ │ │ │ ├── vitl14.yaml │ │ │ │ │ │ └── vitl16_short.yaml │ │ │ │ ├── data │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── adapters.py │ │ │ │ │ ├── augmentations.py │ │ │ │ │ ├── collate.py │ │ │ │ │ ├── datasets │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── decoders.py │ │ │ │ │ │ ├── extended.py │ │ │ │ │ │ ├── image_net.py │ │ │ │ │ │ └── image_net_22k.py │ │ │ │ │ ├── loaders.py │ │ │ │ │ ├── masking.py │ │ │ │ │ ├── samplers.py │ │ │ │ │ └── transforms.py │ │ │ │ ├── distributed │ │ │ │ │ └── __init__.py │ │ │ │ ├── eval │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── knn.py │ │ │ │ │ ├── linear.py │ │ │ │ │ ├── log_regression.py │ │ │ │ │ ├── metrics.py │ │ │ │ │ ├── setup.py │ │ │ │ │ └── utils.py │ │ │ │ ├── fsdp │ │ │ │ │ └── __init__.py │ │ │ │ ├── layers │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── attention.py │ │ │ │ │ ├── block.py │ │ │ │ │ ├── dino_head.py │ │ │ │ │ ├── drop_path.py │ │ │ │ │ ├── layer_scale.py │ │ │ │ │ ├── mlp.py │ │ │ │ │ ├── patch_embed.py │ │ │ │ │ └── swiglu_ffn.py │ │ │ │ ├── logging │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── helpers.py │ │ │ │ ├── loss │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── dino_clstoken_loss.py │ │ │ │ │ ├── ibot_patch_loss.py │ │ │ │ │ └── koleo_loss.py │ │ │ │ ├── models │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── vision_transformer.py │ │ │ │ ├── run │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── eval │ │ │ │ │ │ ├── knn.py │ │ │ │ │ │ ├── linear.py │ │ │ │ │ │ └── log_regression.py │ │ │ │ │ ├── submit.py │ │ │ │ │ └── train │ │ │ │ │ │ └── train.py │ │ │ │ ├── train │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── ssl_meta_arch.py │ │ │ │ │ └── train.py │ │ │ │ └── utils │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── cluster.py │ │ │ │ │ ├── config.py │ │ │ │ │ ├── dtype.py │ │ │ │ │ ├── param_groups.py │ │ │ │ │ └── utils.py │ │ │ │ ├── hubconf.py │ │ │ │ ├── pyproject.toml │ │ │ │ ├── requirements-dev.txt │ │ │ │ ├── requirements.txt │ │ │ │ ├── scripts │ │ │ │ └── lint.sh │ │ │ │ ├── setup.cfg │ │ │ │ ├── setup.py │ │ │ │ ├── utils.py │ │ │ │ └── vision_transformer.py │ │ ├── diffusion_edge │ │ │ ├── __init__.py │ │ │ ├── default.yaml │ │ │ ├── denoising_diffusion_pytorch │ │ │ │ ├── __init__.py │ │ │ │ ├── data.py │ │ │ │ ├── ddm_const_sde.py │ │ │ │ ├── efficientnet.py │ │ │ │ ├── ema.py │ │ │ │ ├── encoder_decoder.py │ │ │ │ ├── imagenet.py │ │ │ │ ├── loss.py │ │ │ │ ├── mask_cond_unet.py │ │ │ │ ├── quantization.py │ │ │ │ ├── resnet.py │ │ │ │ ├── swin_transformer.py │ │ │ │ ├── uncond_unet.py │ │ │ │ ├── utils.py │ │ │ │ ├── vgg.py │ │ │ │ ├── wavelet.py │ │ │ │ └── wcc.py │ │ │ ├── model.py │ │ │ ├── requirement.txt │ │ │ └── taming │ │ │ │ ├── __init__.py │ │ │ │ ├── data │ │ │ │ ├── ade20k.py │ │ │ │ ├── annotated_objects_coco.py │ │ │ │ ├── annotated_objects_dataset.py │ │ │ │ ├── annotated_objects_open_images.py │ │ │ │ ├── base.py │ │ │ │ ├── coco.py │ │ │ │ ├── conditional_builder │ │ │ │ │ ├── objects_bbox.py │ │ │ │ │ ├── objects_center_points.py │ │ │ │ │ └── utils.py │ │ │ │ ├── custom.py │ │ │ │ ├── faceshq.py │ │ │ │ ├── helper_types.py │ │ │ │ ├── image_transforms.py │ │ │ │ ├── imagenet.py │ │ │ │ ├── open_images_helper.py │ │ │ │ ├── sflckr.py │ │ │ │ └── utils.py │ │ │ │ ├── modules │ │ │ │ ├── autoencoder │ │ │ │ │ └── lpips │ │ │ │ │ │ └── vgg.pth │ │ │ │ ├── diffusionmodules │ │ │ │ │ └── model.py │ │ │ │ ├── discriminator │ │ │ │ │ └── model.py │ │ │ │ ├── losses │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── lpips.py │ │ │ │ │ ├── segmentation.py │ │ │ │ │ ├── util.py │ │ │ │ │ └── vqperceptual.py │ │ │ │ ├── misc │ │ │ │ │ └── coord.py │ │ │ │ ├── util.py │ │ │ │ └── vqvae │ │ │ │ │ └── quantize.py │ │ │ │ └── util.py │ │ ├── dwpose │ │ │ ├── LICENSE │ │ │ ├── __init__.py │ │ │ ├── animalpose.py │ │ │ ├── body.py │ │ │ ├── dw_onnx │ │ │ │ ├── __init__.py │ │ │ │ ├── cv_ox_det.py │ │ │ │ ├── cv_ox_pose.py │ │ │ │ └── cv_ox_yolo_nas.py │ │ │ ├── dw_torchscript │ │ │ │ ├── __init__.py │ │ │ │ ├── jit_det.py │ │ │ │ └── jit_pose.py │ │ │ ├── face.py │ │ │ ├── hand.py │ │ │ ├── model.py │ │ │ ├── types.py │ │ │ ├── util.py │ │ │ └── wholebody.py │ │ ├── hed │ │ │ └── __init__.py │ │ ├── leres │ │ │ ├── __init__.py │ │ │ ├── leres │ │ │ │ ├── LICENSE │ │ │ │ ├── Resnet.py │ │ │ │ ├── Resnext_torch.py │ │ │ │ ├── __init__.py │ │ │ │ ├── depthmap.py │ │ │ │ ├── multi_depth_model_woauxi.py │ │ │ │ ├── net_tools.py │ │ │ │ └── network_auxi.py │ │ │ └── pix2pix │ │ │ │ ├── LICENSE │ │ │ │ ├── __init__.py │ │ │ │ ├── models │ │ │ │ ├── __init__.py │ │ │ │ ├── base_model.py │ │ │ │ ├── base_model_hg.py │ │ │ │ ├── networks.py │ │ │ │ └── pix2pix4depth_model.py │ │ │ │ ├── options │ │ │ │ ├── __init__.py │ │ │ │ ├── base_options.py │ │ │ │ └── test_options.py │ │ │ │ └── util │ │ │ │ ├── __init__.py │ │ │ │ └── util.py │ │ ├── lineart │ │ │ ├── LICENSE │ │ │ └── __init__.py │ │ ├── lineart_anime │ │ │ ├── LICENSE │ │ │ └── __init__.py │ │ ├── lineart_standard │ │ │ └── __init__.py │ │ ├── manga_line │ │ │ ├── LICENSE │ │ │ ├── __init__.py │ │ │ └── model_torch.py │ │ ├── mediapipe_face │ │ │ ├── __init__.py │ │ │ └── mediapipe_face_common.py │ │ ├── mesh_graphormer │ │ │ ├── __init__.py │ │ │ ├── cls_hrnet_w64_sgd_lr5e-2_wd1e-4_bs32_x100.yaml │ │ │ ├── depth_preprocessor.py │ │ │ ├── hand_landmarker.task │ │ │ └── pipeline.py │ │ ├── midas │ │ │ ├── LICENSE │ │ │ ├── __init__.py │ │ │ ├── api.py │ │ │ └── utils.py │ │ ├── mlsd │ │ │ ├── LICENSE │ │ │ ├── __init__.py │ │ │ ├── models │ │ │ │ ├── __init__.py │ │ │ │ ├── mbv2_mlsd_large.py │ │ │ │ └── mbv2_mlsd_tiny.py │ │ │ └── utils.py │ │ ├── normalbae │ │ │ ├── LICENSE │ │ │ ├── __init__.py │ │ │ └── nets │ │ │ │ ├── NNET.py │ │ │ │ ├── __init__.py │ │ │ │ ├── baseline.py │ │ │ │ └── submodules │ │ │ │ ├── __init__.py │ │ │ │ ├── decoder.py │ │ │ │ ├── efficientnet_repo │ │ │ │ ├── .gitignore │ │ │ │ ├── BENCHMARK.md │ │ │ │ ├── LICENSE │ │ │ │ ├── README.md │ │ │ │ ├── __init__.py │ │ │ │ ├── caffe2_benchmark.py │ │ │ │ ├── caffe2_validate.py │ │ │ │ ├── geffnet │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── activations │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── activations.py │ │ │ │ │ │ ├── activations_jit.py │ │ │ │ │ │ └── activations_me.py │ │ │ │ │ ├── config.py │ │ │ │ │ ├── conv2d_layers.py │ │ │ │ │ ├── efficientnet_builder.py │ │ │ │ │ ├── gen_efficientnet.py │ │ │ │ │ ├── helpers.py │ │ │ │ │ ├── mobilenetv3.py │ │ │ │ │ ├── model_factory.py │ │ │ │ │ └── version.py │ │ │ │ ├── hubconf.py │ │ │ │ ├── onnx_export.py │ │ │ │ ├── onnx_optimize.py │ │ │ │ ├── onnx_to_caffe.py │ │ │ │ ├── onnx_validate.py │ │ │ │ ├── requirements.txt │ │ │ │ ├── setup.py │ │ │ │ ├── utils.py │ │ │ │ └── validate.py │ │ │ │ ├── encoder.py │ │ │ │ └── submodules.py │ │ ├── oneformer │ │ │ ├── __init__.py │ │ │ ├── api.py │ │ │ └── configs │ │ │ │ ├── ade20k │ │ │ │ ├── Base-ADE20K-UnifiedSegmentation.yaml │ │ │ │ ├── oneformer_R50_bs16_160k.yaml │ │ │ │ └── oneformer_swin_large_IN21k_384_bs16_160k.yaml │ │ │ │ └── coco │ │ │ │ ├── Base-COCO-UnifiedSegmentation.yaml │ │ │ │ ├── oneformer_R50_bs16_50ep.yaml │ │ │ │ └── oneformer_swin_large_IN21k_384_bs16_100ep.yaml │ │ ├── open_pose │ │ │ ├── LICENSE │ │ │ ├── __init__.py │ │ │ ├── body.py │ │ │ ├── face.py │ │ │ ├── hand.py │ │ │ ├── model.py │ │ │ └── util.py │ │ ├── pidi │ │ │ ├── LICENSE │ │ │ ├── __init__.py │ │ │ └── model.py │ │ ├── processor.py │ │ ├── recolor │ │ │ └── __init__.py │ │ ├── sam │ │ │ ├── __init__.py │ │ │ ├── automatic_mask_generator.py │ │ │ ├── build_sam.py │ │ │ ├── modeling │ │ │ │ ├── __init__.py │ │ │ │ ├── common.py │ │ │ │ ├── image_encoder.py │ │ │ │ ├── mask_decoder.py │ │ │ │ ├── prompt_encoder.py │ │ │ │ ├── sam.py │ │ │ │ ├── tiny_vit_sam.py │ │ │ │ └── transformer.py │ │ │ ├── predictor.py │ │ │ └── utils │ │ │ │ ├── __init__.py │ │ │ │ ├── amg.py │ │ │ │ ├── onnx.py │ │ │ │ └── transforms.py │ │ ├── scribble │ │ │ └── __init__.py │ │ ├── shuffle │ │ │ └── __init__.py │ │ ├── teed │ │ │ ├── Fmish.py │ │ │ ├── Fsmish.py │ │ │ ├── LICENSE.txt │ │ │ ├── Xmish.py │ │ │ ├── Xsmish.py │ │ │ ├── __init__.py │ │ │ └── ted.py │ │ ├── tests │ │ │ ├── requirements.txt │ │ │ ├── test_image.png │ │ │ ├── test_processor.py │ │ │ └── test_processor_pytest.py │ │ ├── tile │ │ │ └── __init__.py │ │ ├── uniformer │ │ │ ├── __init__.py │ │ │ ├── configs │ │ │ │ └── _base_ │ │ │ │ │ ├── datasets │ │ │ │ │ ├── ade20k.py │ │ │ │ │ ├── chase_db1.py │ │ │ │ │ ├── cityscapes.py │ │ │ │ │ ├── cityscapes_769x769.py │ │ │ │ │ ├── drive.py │ │ │ │ │ ├── hrf.py │ │ │ │ │ ├── pascal_context.py │ │ │ │ │ ├── pascal_context_59.py │ │ │ │ │ ├── pascal_voc12.py │ │ │ │ │ ├── pascal_voc12_aug.py │ │ │ │ │ └── stare.py │ │ │ │ │ ├── default_runtime.py │ │ │ │ │ ├── models │ │ │ │ │ ├── ann_r50-d8.py │ │ │ │ │ ├── apcnet_r50-d8.py │ │ │ │ │ ├── ccnet_r50-d8.py │ │ │ │ │ ├── cgnet.py │ │ │ │ │ ├── danet_r50-d8.py │ │ │ │ │ ├── deeplabv3_r50-d8.py │ │ │ │ │ ├── deeplabv3_unet_s5-d16.py │ │ │ │ │ ├── deeplabv3plus_r50-d8.py │ │ │ │ │ ├── dmnet_r50-d8.py │ │ │ │ │ ├── dnl_r50-d8.py │ │ │ │ │ ├── emanet_r50-d8.py │ │ │ │ │ ├── encnet_r50-d8.py │ │ │ │ │ ├── fast_scnn.py │ │ │ │ │ ├── fcn_hr18.py │ │ │ │ │ ├── fcn_r50-d8.py │ │ │ │ │ ├── fcn_unet_s5-d16.py │ │ │ │ │ ├── fpn_r50.py │ │ │ │ │ ├── fpn_uniformer.py │ │ │ │ │ ├── gcnet_r50-d8.py │ │ │ │ │ ├── lraspp_m-v3-d8.py │ │ │ │ │ ├── nonlocal_r50-d8.py │ │ │ │ │ ├── ocrnet_hr18.py │ │ │ │ │ ├── ocrnet_r50-d8.py │ │ │ │ │ ├── pointrend_r50.py │ │ │ │ │ ├── psanet_r50-d8.py │ │ │ │ │ ├── pspnet_r50-d8.py │ │ │ │ │ ├── pspnet_unet_s5-d16.py │ │ │ │ │ ├── upernet_r50.py │ │ │ │ │ └── upernet_uniformer.py │ │ │ │ │ └── schedules │ │ │ │ │ ├── schedule_160k.py │ │ │ │ │ ├── schedule_20k.py │ │ │ │ │ ├── schedule_40k.py │ │ │ │ │ └── schedule_80k.py │ │ │ ├── inference.py │ │ │ ├── mmcv_custom │ │ │ │ ├── __init__.py │ │ │ │ └── checkpoint.py │ │ │ ├── uniformer.py │ │ │ └── upernet_global_small.py │ │ ├── unimatch │ │ │ ├── __init__.py │ │ │ ├── unimatch │ │ │ │ ├── __init__.py │ │ │ │ ├── attention.py │ │ │ │ ├── backbone.py │ │ │ │ ├── geometry.py │ │ │ │ ├── matching.py │ │ │ │ ├── position.py │ │ │ │ ├── reg_refine.py │ │ │ │ ├── transformer.py │ │ │ │ ├── trident_conv.py │ │ │ │ ├── unimatch.py │ │ │ │ └── utils.py │ │ │ └── utils │ │ │ │ ├── dist_utils.py │ │ │ │ ├── file_io.py │ │ │ │ ├── flow_viz.py │ │ │ │ ├── frame_utils.py │ │ │ │ ├── logger.py │ │ │ │ ├── misc.py │ │ │ │ ├── utils.py │ │ │ │ └── visualization.py │ │ ├── util.py │ │ └── zoe │ │ │ ├── LICENSE │ │ │ ├── __init__.py │ │ │ └── zoedepth │ │ │ ├── __init__.py │ │ │ ├── models │ │ │ ├── __init__.py │ │ │ ├── base_models │ │ │ │ ├── __init__.py │ │ │ │ ├── depth_anything.py │ │ │ │ ├── dpt_dinov2 │ │ │ │ │ ├── blocks.py │ │ │ │ │ └── dpt.py │ │ │ │ └── midas.py │ │ │ ├── builder.py │ │ │ ├── depth_model.py │ │ │ ├── layers │ │ │ │ ├── __init__.py │ │ │ │ ├── attractor.py │ │ │ │ ├── dist_layers.py │ │ │ │ ├── localbins_layers.py │ │ │ │ └── patch_transformer.py │ │ │ ├── model_io.py │ │ │ ├── zoedepth │ │ │ │ ├── __init__.py │ │ │ │ ├── config_zoedepth.json │ │ │ │ ├── config_zoedepth_kitti.json │ │ │ │ └── zoedepth_v1.py │ │ │ ├── zoedepth_anything │ │ │ │ ├── __init__.py │ │ │ │ └── zoedepth_v1.py │ │ │ └── zoedepth_nk │ │ │ │ ├── __init__.py │ │ │ │ ├── config_zoedepth_nk.json │ │ │ │ └── zoedepth_nk_v1.py │ │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── arg_utils.py │ │ │ ├── config.py │ │ │ └── easydict │ │ │ └── __init__.py │ ├── custom_albumentations │ │ ├── LICENSE │ │ ├── __init__.py │ │ ├── augmentations │ │ │ ├── __init__.py │ │ │ ├── blur │ │ │ │ ├── __init__.py │ │ │ │ ├── functional.py │ │ │ │ └── transforms.py │ │ │ ├── crops │ │ │ │ ├── __init__.py │ │ │ │ ├── functional.py │ │ │ │ └── transforms.py │ │ │ ├── domain_adaptation.py │ │ │ ├── dropout │ │ │ │ ├── __init__.py │ │ │ │ ├── channel_dropout.py │ │ │ │ ├── coarse_dropout.py │ │ │ │ ├── cutout.py │ │ │ │ ├── functional.py │ │ │ │ ├── grid_dropout.py │ │ │ │ └── mask_dropout.py │ │ │ ├── functional.py │ │ │ ├── geometric │ │ │ │ ├── __init__.py │ │ │ │ ├── functional.py │ │ │ │ ├── resize.py │ │ │ │ ├── rotate.py │ │ │ │ └── transforms.py │ │ │ ├── transforms.py │ │ │ └── utils.py │ │ ├── core │ │ │ ├── __init__.py │ │ │ ├── bbox_utils.py │ │ │ ├── composition.py │ │ │ ├── keypoints_utils.py │ │ │ ├── serialization.py │ │ │ ├── transforms_interface.py │ │ │ └── utils.py │ │ ├── imgaug │ │ │ ├── __init__.py │ │ │ ├── stubs.py │ │ │ └── transforms.py │ │ ├── pytorch │ │ │ ├── __init__.py │ │ │ ├── functional.py │ │ │ └── transforms.py │ │ └── random_utils.py │ ├── custom_controlnet_aux │ │ ├── __init__.py │ │ ├── anime_face_segment │ │ │ ├── __init__.py │ │ │ ├── anime_segmentation.py │ │ │ ├── isnet.py │ │ │ ├── network.py │ │ │ └── util.py │ │ ├── binary │ │ │ └── __init__.py │ │ ├── canny │ │ │ └── __init__.py │ │ ├── color │ │ │ └── __init__.py │ │ ├── densepose │ │ │ ├── __init__.py │ │ │ └── densepose.py │ │ ├── depth_anything │ │ │ ├── __init__.py │ │ │ ├── depth_anything │ │ │ │ ├── blocks.py │ │ │ │ ├── dpt.py │ │ │ │ └── util │ │ │ │ │ └── transform.py │ │ │ └── torchhub │ │ │ │ ├── README.md │ │ │ │ └── facebookresearch_dinov2_main │ │ │ │ ├── CODE_OF_CONDUCT.md │ │ │ │ ├── CONTRIBUTING.md │ │ │ │ ├── LICENSE │ │ │ │ ├── MODEL_CARD.md │ │ │ │ ├── README.md │ │ │ │ ├── conda.yaml │ │ │ │ ├── dinov2 │ │ │ │ ├── __init__.py │ │ │ │ ├── configs │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── eval │ │ │ │ │ │ ├── vitb14_pretrain.yaml │ │ │ │ │ │ ├── vitg14_pretrain.yaml │ │ │ │ │ │ ├── vitl14_pretrain.yaml │ │ │ │ │ │ └── vits14_pretrain.yaml │ │ │ │ │ ├── ssl_default_config.yaml │ │ │ │ │ └── train │ │ │ │ │ │ ├── vitg14.yaml │ │ │ │ │ │ ├── vitl14.yaml │ │ │ │ │ │ └── vitl16_short.yaml │ │ │ │ ├── data │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── adapters.py │ │ │ │ │ ├── augmentations.py │ │ │ │ │ ├── collate.py │ │ │ │ │ ├── datasets │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── decoders.py │ │ │ │ │ │ ├── extended.py │ │ │ │ │ │ ├── image_net.py │ │ │ │ │ │ └── image_net_22k.py │ │ │ │ │ ├── loaders.py │ │ │ │ │ ├── masking.py │ │ │ │ │ ├── samplers.py │ │ │ │ │ └── transforms.py │ │ │ │ ├── distributed │ │ │ │ │ └── __init__.py │ │ │ │ ├── eval │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── knn.py │ │ │ │ │ ├── linear.py │ │ │ │ │ ├── log_regression.py │ │ │ │ │ ├── metrics.py │ │ │ │ │ ├── setup.py │ │ │ │ │ └── utils.py │ │ │ │ ├── fsdp │ │ │ │ │ └── __init__.py │ │ │ │ ├── layers │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── attention.py │ │ │ │ │ ├── block.py │ │ │ │ │ ├── dino_head.py │ │ │ │ │ ├── drop_path.py │ │ │ │ │ ├── layer_scale.py │ │ │ │ │ ├── mlp.py │ │ │ │ │ ├── patch_embed.py │ │ │ │ │ └── swiglu_ffn.py │ │ │ │ ├── logging │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── helpers.py │ │ │ │ ├── loss │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── dino_clstoken_loss.py │ │ │ │ │ ├── ibot_patch_loss.py │ │ │ │ │ └── koleo_loss.py │ │ │ │ ├── models │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── vision_transformer.py │ │ │ │ ├── run │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── eval │ │ │ │ │ │ ├── knn.py │ │ │ │ │ │ ├── linear.py │ │ │ │ │ │ └── log_regression.py │ │ │ │ │ ├── submit.py │ │ │ │ │ └── train │ │ │ │ │ │ └── train.py │ │ │ │ ├── train │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── ssl_meta_arch.py │ │ │ │ │ └── train.py │ │ │ │ └── utils │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── cluster.py │ │ │ │ │ ├── config.py │ │ │ │ │ ├── dtype.py │ │ │ │ │ ├── param_groups.py │ │ │ │ │ └── utils.py │ │ │ │ ├── hubconf.py │ │ │ │ ├── pyproject.toml │ │ │ │ ├── requirements-dev.txt │ │ │ │ ├── requirements.txt │ │ │ │ ├── scripts │ │ │ │ └── lint.sh │ │ │ │ ├── setup.cfg │ │ │ │ ├── setup.py │ │ │ │ ├── utils.py │ │ │ │ └── vision_transformer.py │ │ ├── depth_anything_v2 │ │ │ ├── __init__.py │ │ │ ├── dinov2.py │ │ │ ├── dinov2_layers │ │ │ │ ├── __init__.py │ │ │ │ ├── attention.py │ │ │ │ ├── block.py │ │ │ │ ├── drop_path.py │ │ │ │ ├── layer_scale.py │ │ │ │ ├── mlp.py │ │ │ │ ├── patch_embed.py │ │ │ │ └── swiglu_ffn.py │ │ │ ├── dpt.py │ │ │ └── util │ │ │ │ ├── blocks.py │ │ │ │ └── transform.py │ │ ├── diffusion_edge │ │ │ ├── __init__.py │ │ │ ├── default.yaml │ │ │ ├── denoising_diffusion_pytorch │ │ │ │ ├── __init__.py │ │ │ │ ├── data.py │ │ │ │ ├── ddm_const_sde.py │ │ │ │ ├── efficientnet.py │ │ │ │ ├── ema.py │ │ │ │ ├── encoder_decoder.py │ │ │ │ ├── imagenet.py │ │ │ │ ├── loss.py │ │ │ │ ├── mask_cond_unet.py │ │ │ │ ├── quantization.py │ │ │ │ ├── resnet.py │ │ │ │ ├── swin_transformer.py │ │ │ │ ├── uncond_unet.py │ │ │ │ ├── utils.py │ │ │ │ ├── vgg.py │ │ │ │ ├── wavelet.py │ │ │ │ └── wcc.py │ │ │ ├── model.py │ │ │ ├── requirement.txt │ │ │ └── taming │ │ │ │ ├── __init__.py │ │ │ │ ├── data │ │ │ │ ├── ade20k.py │ │ │ │ ├── annotated_objects_coco.py │ │ │ │ ├── annotated_objects_dataset.py │ │ │ │ ├── annotated_objects_open_images.py │ │ │ │ ├── base.py │ │ │ │ ├── coco.py │ │ │ │ ├── conditional_builder │ │ │ │ │ ├── objects_bbox.py │ │ │ │ │ ├── objects_center_points.py │ │ │ │ │ └── utils.py │ │ │ │ ├── custom.py │ │ │ │ ├── faceshq.py │ │ │ │ ├── helper_types.py │ │ │ │ ├── image_transforms.py │ │ │ │ ├── imagenet.py │ │ │ │ ├── open_images_helper.py │ │ │ │ ├── sflckr.py │ │ │ │ └── utils.py │ │ │ │ ├── modules │ │ │ │ ├── autoencoder │ │ │ │ │ └── lpips │ │ │ │ │ │ └── vgg.pth │ │ │ │ ├── diffusionmodules │ │ │ │ │ └── model.py │ │ │ │ ├── discriminator │ │ │ │ │ └── model.py │ │ │ │ ├── losses │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── lpips.py │ │ │ │ │ ├── segmentation.py │ │ │ │ │ ├── util.py │ │ │ │ │ └── vqperceptual.py │ │ │ │ ├── misc │ │ │ │ │ └── coord.py │ │ │ │ ├── util.py │ │ │ │ └── vqvae │ │ │ │ │ └── quantize.py │ │ │ │ └── util.py │ │ ├── dsine │ │ │ ├── LICENSE │ │ │ ├── __init__.py │ │ │ ├── models │ │ │ │ ├── dsine_arch.py │ │ │ │ └── submodules │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── efficientnet_repo │ │ │ │ │ ├── .gitignore │ │ │ │ │ ├── BENCHMARK.md │ │ │ │ │ ├── LICENSE │ │ │ │ │ ├── README.md │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── caffe2_benchmark.py │ │ │ │ │ ├── caffe2_validate.py │ │ │ │ │ ├── geffnet │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── activations │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── activations.py │ │ │ │ │ │ ├── activations_jit.py │ │ │ │ │ │ └── activations_me.py │ │ │ │ │ ├── config.py │ │ │ │ │ ├── conv2d_layers.py │ │ │ │ │ ├── efficientnet_builder.py │ │ │ │ │ ├── gen_efficientnet.py │ │ │ │ │ ├── helpers.py │ │ │ │ │ ├── mobilenetv3.py │ │ │ │ │ ├── model_factory.py │ │ │ │ │ └── version.py │ │ │ │ │ ├── hubconf.py │ │ │ │ │ ├── onnx_export.py │ │ │ │ │ ├── onnx_optimize.py │ │ │ │ │ ├── onnx_to_caffe.py │ │ │ │ │ ├── onnx_validate.py │ │ │ │ │ ├── requirements.txt │ │ │ │ │ ├── setup.py │ │ │ │ │ ├── utils.py │ │ │ │ │ └── validate.py │ │ │ └── utils │ │ │ │ ├── rotation.py │ │ │ │ └── utils.py │ │ ├── dwpose │ │ │ ├── LICENSE │ │ │ ├── __init__.py │ │ │ ├── animalpose.py │ │ │ ├── body.py │ │ │ ├── dw_onnx │ │ │ │ ├── __init__.py │ │ │ │ ├── cv_ox_det.py │ │ │ │ ├── cv_ox_pose.py │ │ │ │ └── cv_ox_yolo_nas.py │ │ │ ├── dw_torchscript │ │ │ │ ├── __init__.py │ │ │ │ ├── jit_det.py │ │ │ │ └── jit_pose.py │ │ │ ├── face.py │ │ │ ├── hand.py │ │ │ ├── model.py │ │ │ ├── types.py │ │ │ ├── util.py │ │ │ └── wholebody.py │ │ ├── hed │ │ │ └── __init__.py │ │ ├── leres │ │ │ ├── __init__.py │ │ │ ├── leres │ │ │ │ ├── LICENSE │ │ │ │ ├── Resnet.py │ │ │ │ ├── Resnext_torch.py │ │ │ │ ├── __init__.py │ │ │ │ ├── depthmap.py │ │ │ │ ├── multi_depth_model_woauxi.py │ │ │ │ ├── net_tools.py │ │ │ │ └── network_auxi.py │ │ │ └── pix2pix │ │ │ │ ├── LICENSE │ │ │ │ ├── __init__.py │ │ │ │ ├── models │ │ │ │ ├── __init__.py │ │ │ │ ├── base_model.py │ │ │ │ ├── base_model_hg.py │ │ │ │ ├── networks.py │ │ │ │ └── pix2pix4depth_model.py │ │ │ │ ├── options │ │ │ │ ├── __init__.py │ │ │ │ ├── base_options.py │ │ │ │ └── test_options.py │ │ │ │ └── util │ │ │ │ ├── __init__.py │ │ │ │ └── util.py │ │ ├── lineart │ │ │ ├── LICENSE │ │ │ └── __init__.py │ │ ├── lineart_anime │ │ │ ├── LICENSE │ │ │ └── __init__.py │ │ ├── lineart_standard │ │ │ └── __init__.py │ │ ├── manga_line │ │ │ ├── LICENSE │ │ │ ├── __init__.py │ │ │ └── model_torch.py │ │ ├── mediapipe_face │ │ │ ├── __init__.py │ │ │ └── mediapipe_face_common.py │ │ ├── mesh_graphormer │ │ │ ├── __init__.py │ │ │ ├── cls_hrnet_w64_sgd_lr5e-2_wd1e-4_bs32_x100.yaml │ │ │ ├── depth_preprocessor.py │ │ │ ├── hand_landmarker.task │ │ │ └── pipeline.py │ │ ├── metric3d │ │ │ ├── __init__.py │ │ │ └── mono │ │ │ │ ├── configs │ │ │ │ ├── HourglassDecoder │ │ │ │ │ ├── convlarge.0.3_150.py │ │ │ │ │ ├── test_kitti_convlarge.0.3_150.py │ │ │ │ │ ├── test_nyu_convlarge.0.3_150.py │ │ │ │ │ ├── vit.raft5.giant2.py │ │ │ │ │ ├── vit.raft5.large.py │ │ │ │ │ └── vit.raft5.small.py │ │ │ │ ├── __init__.py │ │ │ │ └── _base_ │ │ │ │ │ ├── _data_base_.py │ │ │ │ │ ├── datasets │ │ │ │ │ └── _data_base_.py │ │ │ │ │ ├── default_runtime.py │ │ │ │ │ └── models │ │ │ │ │ ├── backbones │ │ │ │ │ ├── convnext_large.py │ │ │ │ │ ├── dino_vit_giant2_reg.py │ │ │ │ │ ├── dino_vit_large.py │ │ │ │ │ ├── dino_vit_large_reg.py │ │ │ │ │ └── dino_vit_small_reg.py │ │ │ │ │ └── encoder_decoder │ │ │ │ │ ├── convnext_large.hourglassdecoder.py │ │ │ │ │ ├── dino_vit_giant2_reg.dpt_raft.py │ │ │ │ │ ├── dino_vit_large.dpt_raft.py │ │ │ │ │ ├── dino_vit_large_reg.dpt_raft.py │ │ │ │ │ └── dino_vit_small_reg.dpt_raft.py │ │ │ │ ├── model │ │ │ │ ├── __init__.py │ │ │ │ ├── backbones │ │ │ │ │ ├── ConvNeXt.py │ │ │ │ │ ├── ViT_DINO.py │ │ │ │ │ ├── ViT_DINO_reg.py │ │ │ │ │ └── __init__.py │ │ │ │ ├── decode_heads │ │ │ │ │ ├── HourGlassDecoder.py │ │ │ │ │ ├── RAFTDepthNormalDPTDecoder5.py │ │ │ │ │ └── __init__.py │ │ │ │ ├── model_pipelines │ │ │ │ │ ├── __base_model__.py │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── dense_pipeline.py │ │ │ │ └── monodepth_model.py │ │ │ │ ├── tools │ │ │ │ └── test_scale_cano.py │ │ │ │ └── utils │ │ │ │ ├── __init__.py │ │ │ │ ├── avg_meter.py │ │ │ │ ├── comm.py │ │ │ │ ├── custom_data.py │ │ │ │ ├── do_test.py │ │ │ │ ├── logger.py │ │ │ │ ├── mldb.py │ │ │ │ ├── pcd_filter.py │ │ │ │ ├── running.py │ │ │ │ ├── transform.py │ │ │ │ ├── unproj_pcd.py │ │ │ │ └── visualization.py │ │ ├── midas │ │ │ ├── LICENSE │ │ │ ├── __init__.py │ │ │ ├── api.py │ │ │ └── utils.py │ │ ├── mlsd │ │ │ ├── LICENSE │ │ │ ├── __init__.py │ │ │ ├── models │ │ │ │ ├── __init__.py │ │ │ │ ├── mbv2_mlsd_large.py │ │ │ │ └── mbv2_mlsd_tiny.py │ │ │ └── utils.py │ │ ├── normalbae │ │ │ ├── LICENSE │ │ │ ├── __init__.py │ │ │ └── nets │ │ │ │ ├── NNET.py │ │ │ │ ├── __init__.py │ │ │ │ ├── baseline.py │ │ │ │ └── submodules │ │ │ │ ├── __init__.py │ │ │ │ ├── decoder.py │ │ │ │ ├── efficientnet_repo │ │ │ │ ├── .gitignore │ │ │ │ ├── BENCHMARK.md │ │ │ │ ├── LICENSE │ │ │ │ ├── README.md │ │ │ │ ├── __init__.py │ │ │ │ ├── caffe2_benchmark.py │ │ │ │ ├── caffe2_validate.py │ │ │ │ ├── geffnet │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── activations │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── activations.py │ │ │ │ │ │ ├── activations_jit.py │ │ │ │ │ │ └── activations_me.py │ │ │ │ │ ├── config.py │ │ │ │ │ ├── conv2d_layers.py │ │ │ │ │ ├── efficientnet_builder.py │ │ │ │ │ ├── gen_efficientnet.py │ │ │ │ │ ├── helpers.py │ │ │ │ │ ├── mobilenetv3.py │ │ │ │ │ ├── model_factory.py │ │ │ │ │ └── version.py │ │ │ │ ├── hubconf.py │ │ │ │ ├── onnx_export.py │ │ │ │ ├── onnx_optimize.py │ │ │ │ ├── onnx_to_caffe.py │ │ │ │ ├── onnx_validate.py │ │ │ │ ├── requirements.txt │ │ │ │ ├── setup.py │ │ │ │ ├── utils.py │ │ │ │ └── validate.py │ │ │ │ ├── encoder.py │ │ │ │ └── submodules.py │ │ ├── oneformer │ │ │ ├── __init__.py │ │ │ ├── api.py │ │ │ └── configs │ │ │ │ ├── ade20k │ │ │ │ ├── Base-ADE20K-UnifiedSegmentation.yaml │ │ │ │ ├── oneformer_R50_bs16_160k.yaml │ │ │ │ └── oneformer_swin_large_IN21k_384_bs16_160k.yaml │ │ │ │ └── coco │ │ │ │ ├── Base-COCO-UnifiedSegmentation.yaml │ │ │ │ ├── oneformer_R50_bs16_50ep.yaml │ │ │ │ └── oneformer_swin_large_IN21k_384_bs16_100ep.yaml │ │ ├── open_pose │ │ │ ├── LICENSE │ │ │ ├── __init__.py │ │ │ ├── body.py │ │ │ ├── face.py │ │ │ ├── hand.py │ │ │ ├── model.py │ │ │ └── util.py │ │ ├── pidi │ │ │ ├── LICENSE │ │ │ ├── __init__.py │ │ │ └── model.py │ │ ├── processor.py │ │ ├── pyracanny │ │ │ └── __init__.py │ │ ├── recolor │ │ │ └── __init__.py │ │ ├── sam │ │ │ ├── __init__.py │ │ │ ├── automatic_mask_generator.py │ │ │ ├── build_sam.py │ │ │ ├── modeling │ │ │ │ ├── __init__.py │ │ │ │ ├── common.py │ │ │ │ ├── image_encoder.py │ │ │ │ ├── mask_decoder.py │ │ │ │ ├── prompt_encoder.py │ │ │ │ ├── sam.py │ │ │ │ ├── tiny_vit_sam.py │ │ │ │ └── transformer.py │ │ │ ├── predictor.py │ │ │ └── utils │ │ │ │ ├── __init__.py │ │ │ │ ├── amg.py │ │ │ │ ├── onnx.py │ │ │ │ └── transforms.py │ │ ├── scribble │ │ │ └── __init__.py │ │ ├── shuffle │ │ │ └── __init__.py │ │ ├── teed │ │ │ ├── Fmish.py │ │ │ ├── Fsmish.py │ │ │ ├── LICENSE.txt │ │ │ ├── Xmish.py │ │ │ ├── Xsmish.py │ │ │ ├── __init__.py │ │ │ └── ted.py │ │ ├── tests │ │ │ ├── requirements.txt │ │ │ ├── test_processor.py │ │ │ └── test_processor_pytest.py │ │ ├── tile │ │ │ ├── __init__.py │ │ │ └── guided_filter.py │ │ ├── uniformer │ │ │ ├── __init__.py │ │ │ ├── configs │ │ │ │ └── _base_ │ │ │ │ │ ├── datasets │ │ │ │ │ ├── ade20k.py │ │ │ │ │ ├── chase_db1.py │ │ │ │ │ ├── cityscapes.py │ │ │ │ │ ├── cityscapes_769x769.py │ │ │ │ │ ├── drive.py │ │ │ │ │ ├── hrf.py │ │ │ │ │ ├── pascal_context.py │ │ │ │ │ ├── pascal_context_59.py │ │ │ │ │ ├── pascal_voc12.py │ │ │ │ │ ├── pascal_voc12_aug.py │ │ │ │ │ └── stare.py │ │ │ │ │ ├── default_runtime.py │ │ │ │ │ ├── models │ │ │ │ │ ├── ann_r50-d8.py │ │ │ │ │ ├── apcnet_r50-d8.py │ │ │ │ │ ├── ccnet_r50-d8.py │ │ │ │ │ ├── cgnet.py │ │ │ │ │ ├── danet_r50-d8.py │ │ │ │ │ ├── deeplabv3_r50-d8.py │ │ │ │ │ ├── deeplabv3_unet_s5-d16.py │ │ │ │ │ ├── deeplabv3plus_r50-d8.py │ │ │ │ │ ├── dmnet_r50-d8.py │ │ │ │ │ ├── dnl_r50-d8.py │ │ │ │ │ ├── emanet_r50-d8.py │ │ │ │ │ ├── encnet_r50-d8.py │ │ │ │ │ ├── fast_scnn.py │ │ │ │ │ ├── fcn_hr18.py │ │ │ │ │ ├── fcn_r50-d8.py │ │ │ │ │ ├── fcn_unet_s5-d16.py │ │ │ │ │ ├── fpn_r50.py │ │ │ │ │ ├── fpn_uniformer.py │ │ │ │ │ ├── gcnet_r50-d8.py │ │ │ │ │ ├── lraspp_m-v3-d8.py │ │ │ │ │ ├── nonlocal_r50-d8.py │ │ │ │ │ ├── ocrnet_hr18.py │ │ │ │ │ ├── ocrnet_r50-d8.py │ │ │ │ │ ├── pointrend_r50.py │ │ │ │ │ ├── psanet_r50-d8.py │ │ │ │ │ ├── pspnet_r50-d8.py │ │ │ │ │ ├── pspnet_unet_s5-d16.py │ │ │ │ │ ├── upernet_r50.py │ │ │ │ │ └── upernet_uniformer.py │ │ │ │ │ └── schedules │ │ │ │ │ ├── schedule_160k.py │ │ │ │ │ ├── schedule_20k.py │ │ │ │ │ ├── schedule_40k.py │ │ │ │ │ └── schedule_80k.py │ │ │ ├── inference.py │ │ │ ├── mmcv_custom │ │ │ │ ├── __init__.py │ │ │ │ └── checkpoint.py │ │ │ ├── uniformer.py │ │ │ └── upernet_global_small.py │ │ ├── unimatch │ │ │ ├── __init__.py │ │ │ ├── unimatch │ │ │ │ ├── __init__.py │ │ │ │ ├── attention.py │ │ │ │ ├── backbone.py │ │ │ │ ├── geometry.py │ │ │ │ ├── matching.py │ │ │ │ ├── position.py │ │ │ │ ├── reg_refine.py │ │ │ │ ├── transformer.py │ │ │ │ ├── trident_conv.py │ │ │ │ ├── unimatch.py │ │ │ │ └── utils.py │ │ │ └── utils │ │ │ │ ├── dist_utils.py │ │ │ │ ├── file_io.py │ │ │ │ ├── flow_viz.py │ │ │ │ ├── frame_utils.py │ │ │ │ ├── logger.py │ │ │ │ ├── misc.py │ │ │ │ ├── utils.py │ │ │ │ └── visualization.py │ │ ├── util.py │ │ └── zoe │ │ │ ├── LICENSE │ │ │ ├── __init__.py │ │ │ └── zoedepth │ │ │ ├── __init__.py │ │ │ ├── models │ │ │ ├── __init__.py │ │ │ ├── base_models │ │ │ │ ├── __init__.py │ │ │ │ ├── depth_anything.py │ │ │ │ ├── dpt_dinov2 │ │ │ │ │ ├── blocks.py │ │ │ │ │ └── dpt.py │ │ │ │ └── midas.py │ │ │ ├── builder.py │ │ │ ├── depth_model.py │ │ │ ├── layers │ │ │ │ ├── __init__.py │ │ │ │ ├── attractor.py │ │ │ │ ├── dist_layers.py │ │ │ │ ├── localbins_layers.py │ │ │ │ └── patch_transformer.py │ │ │ ├── model_io.py │ │ │ ├── zoedepth │ │ │ │ ├── __init__.py │ │ │ │ ├── config_zoedepth.json │ │ │ │ ├── config_zoedepth_kitti.json │ │ │ │ └── zoedepth_v1.py │ │ │ ├── zoedepth_anything │ │ │ │ ├── __init__.py │ │ │ │ └── zoedepth_v1.py │ │ │ └── zoedepth_nk │ │ │ │ ├── __init__.py │ │ │ │ ├── config_zoedepth_nk.json │ │ │ │ └── zoedepth_nk_v1.py │ │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── arg_utils.py │ │ │ ├── config.py │ │ │ └── easydict │ │ │ └── __init__.py │ ├── custom_detectron2 │ │ ├── __init__.py │ │ ├── checkpoint │ │ │ ├── __init__.py │ │ │ ├── c2_model_loading.py │ │ │ ├── catalog.py │ │ │ └── detection_checkpoint.py │ │ ├── config │ │ │ ├── __init__.py │ │ │ ├── compat.py │ │ │ ├── config.py │ │ │ ├── defaults.py │ │ │ ├── instantiate.py │ │ │ └── lazy.py │ │ ├── data │ │ │ ├── __init__.py │ │ │ ├── benchmark.py │ │ │ ├── build.py │ │ │ ├── catalog.py │ │ │ ├── common.py │ │ │ ├── dataset_mapper.py │ │ │ ├── datasets │ │ │ │ ├── README.md │ │ │ │ ├── __init__.py │ │ │ │ ├── builtin.py │ │ │ │ ├── builtin_meta.py │ │ │ │ ├── cityscapes.py │ │ │ │ ├── cityscapes_panoptic.py │ │ │ │ ├── coco.py │ │ │ │ ├── coco_panoptic.py │ │ │ │ ├── lvis.py │ │ │ │ ├── lvis_v0_5_categories.py │ │ │ │ ├── lvis_v1_categories.py │ │ │ │ ├── lvis_v1_category_image_count.py │ │ │ │ ├── pascal_voc.py │ │ │ │ └── register_coco.py │ │ │ ├── detection_utils.py │ │ │ ├── samplers │ │ │ │ ├── __init__.py │ │ │ │ ├── distributed_sampler.py │ │ │ │ └── grouped_batch_sampler.py │ │ │ └── transforms │ │ │ │ ├── __init__.py │ │ │ │ ├── augmentation.py │ │ │ │ ├── augmentation_impl.py │ │ │ │ └── transform.py │ │ ├── engine │ │ │ ├── __init__.py │ │ │ ├── defaults.py │ │ │ ├── hooks.py │ │ │ ├── launch.py │ │ │ └── train_loop.py │ │ ├── evaluation │ │ │ ├── __init__.py │ │ │ ├── cityscapes_evaluation.py │ │ │ ├── coco_evaluation.py │ │ │ ├── evaluator.py │ │ │ ├── fast_eval_api.py │ │ │ ├── lvis_evaluation.py │ │ │ ├── panoptic_evaluation.py │ │ │ ├── pascal_voc_evaluation.py │ │ │ ├── rotated_coco_evaluation.py │ │ │ ├── sem_seg_evaluation.py │ │ │ └── testing.py │ │ ├── export │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── api.py │ │ │ ├── c10.py │ │ │ ├── caffe2_export.py │ │ │ ├── caffe2_inference.py │ │ │ ├── caffe2_modeling.py │ │ │ ├── caffe2_patch.py │ │ │ ├── flatten.py │ │ │ ├── shared.py │ │ │ ├── torchscript.py │ │ │ └── torchscript_patch.py │ │ ├── layers │ │ │ ├── __init__.py │ │ │ ├── aspp.py │ │ │ ├── batch_norm.py │ │ │ ├── blocks.py │ │ │ ├── csrc │ │ │ │ ├── README.md │ │ │ │ ├── ROIAlignRotated │ │ │ │ │ ├── ROIAlignRotated.h │ │ │ │ │ ├── ROIAlignRotated_cpu.cpp │ │ │ │ │ └── ROIAlignRotated_cuda.cu │ │ │ │ ├── box_iou_rotated │ │ │ │ │ ├── box_iou_rotated.h │ │ │ │ │ ├── box_iou_rotated_cpu.cpp │ │ │ │ │ ├── box_iou_rotated_cuda.cu │ │ │ │ │ └── box_iou_rotated_utils.h │ │ │ │ ├── cocoeval │ │ │ │ │ ├── cocoeval.cpp │ │ │ │ │ └── cocoeval.h │ │ │ │ ├── cuda_version.cu │ │ │ │ ├── deformable │ │ │ │ │ ├── deform_conv.h │ │ │ │ │ ├── deform_conv_cuda.cu │ │ │ │ │ └── deform_conv_cuda_kernel.cu │ │ │ │ ├── nms_rotated │ │ │ │ │ ├── nms_rotated.h │ │ │ │ │ ├── nms_rotated_cpu.cpp │ │ │ │ │ └── nms_rotated_cuda.cu │ │ │ │ └── vision.cpp │ │ │ ├── deform_conv.py │ │ │ ├── losses.py │ │ │ ├── mask_ops.py │ │ │ ├── nms.py │ │ │ ├── roi_align.py │ │ │ ├── roi_align_rotated.py │ │ │ ├── rotated_boxes.py │ │ │ ├── shape_spec.py │ │ │ └── wrappers.py │ │ ├── model_zoo │ │ │ ├── __init__.py │ │ │ └── model_zoo.py │ │ ├── modeling │ │ │ ├── __init__.py │ │ │ ├── anchor_generator.py │ │ │ ├── backbone │ │ │ │ ├── __init__.py │ │ │ │ ├── backbone.py │ │ │ │ ├── build.py │ │ │ │ ├── fpn.py │ │ │ │ ├── mvit.py │ │ │ │ ├── regnet.py │ │ │ │ ├── resnet.py │ │ │ │ ├── swin.py │ │ │ │ ├── utils.py │ │ │ │ └── vit.py │ │ │ ├── box_regression.py │ │ │ ├── matcher.py │ │ │ ├── meta_arch │ │ │ │ ├── __init__.py │ │ │ │ ├── build.py │ │ │ │ ├── dense_detector.py │ │ │ │ ├── fcos.py │ │ │ │ ├── panoptic_fpn.py │ │ │ │ ├── rcnn.py │ │ │ │ ├── retinanet.py │ │ │ │ └── semantic_seg.py │ │ │ ├── mmdet_wrapper.py │ │ │ ├── poolers.py │ │ │ ├── postprocessing.py │ │ │ ├── proposal_generator │ │ │ │ ├── __init__.py │ │ │ │ ├── build.py │ │ │ │ ├── proposal_utils.py │ │ │ │ ├── rpn.py │ │ │ │ └── rrpn.py │ │ │ ├── roi_heads │ │ │ │ ├── __init__.py │ │ │ │ ├── box_head.py │ │ │ │ ├── cascade_rcnn.py │ │ │ │ ├── fast_rcnn.py │ │ │ │ ├── keypoint_head.py │ │ │ │ ├── mask_head.py │ │ │ │ ├── roi_heads.py │ │ │ │ └── rotated_fast_rcnn.py │ │ │ ├── sampling.py │ │ │ └── test_time_augmentation.py │ │ ├── projects │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ └── deeplab │ │ │ │ ├── __init__.py │ │ │ │ ├── build_solver.py │ │ │ │ ├── config.py │ │ │ │ ├── loss.py │ │ │ │ ├── lr_scheduler.py │ │ │ │ ├── resnet.py │ │ │ │ └── semantic_seg.py │ │ ├── solver │ │ │ ├── __init__.py │ │ │ ├── build.py │ │ │ └── lr_scheduler.py │ │ ├── structures │ │ │ ├── __init__.py │ │ │ ├── boxes.py │ │ │ ├── image_list.py │ │ │ ├── instances.py │ │ │ ├── keypoints.py │ │ │ ├── masks.py │ │ │ └── rotated_boxes.py │ │ ├── tracking │ │ │ ├── __init__.py │ │ │ ├── base_tracker.py │ │ │ ├── bbox_iou_tracker.py │ │ │ ├── hungarian_tracker.py │ │ │ ├── iou_weighted_hungarian_bbox_iou_tracker.py │ │ │ ├── utils.py │ │ │ └── vanilla_hungarian_bbox_iou_tracker.py │ │ └── utils │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── analysis.py │ │ │ ├── collect_env.py │ │ │ ├── colormap.py │ │ │ ├── comm.py │ │ │ ├── develop.py │ │ │ ├── env.py │ │ │ ├── events.py │ │ │ ├── file_io.py │ │ │ ├── logger.py │ │ │ ├── memory.py │ │ │ ├── registry.py │ │ │ ├── serialize.py │ │ │ ├── testing.py │ │ │ ├── tracing.py │ │ │ ├── video_visualizer.py │ │ │ └── visualizer.py │ ├── custom_manopth │ │ ├── CHANGES.md │ │ ├── LICENSE │ │ ├── __init__.py │ │ ├── argutils.py │ │ ├── demo.py │ │ ├── manolayer.py │ │ ├── posemapper.py │ │ ├── rodrigues_layer.py │ │ ├── rot6d.py │ │ ├── rotproj.py │ │ ├── smpl_handpca_wrapper_HAND_only.py │ │ ├── tensutils.py │ │ └── verts.py │ ├── custom_mesh_graphormer │ │ ├── __init__.py │ │ ├── datasets │ │ │ ├── __init__.py │ │ │ ├── build.py │ │ │ ├── hand_mesh_tsv.py │ │ │ └── human_mesh_tsv.py │ │ ├── modeling │ │ │ ├── __init__.py │ │ │ ├── _gcnn.py │ │ │ ├── _mano.py │ │ │ ├── _smpl.py │ │ │ ├── bert │ │ │ │ ├── __init__.py │ │ │ │ ├── bert-base-uncased │ │ │ │ │ └── config.json │ │ │ │ ├── e2e_body_network.py │ │ │ │ ├── e2e_hand_network.py │ │ │ │ ├── file_utils.py │ │ │ │ ├── modeling_bert.py │ │ │ │ ├── modeling_graphormer.py │ │ │ │ └── modeling_utils.py │ │ │ ├── data │ │ │ │ ├── J_regressor_extra.npy │ │ │ │ ├── J_regressor_h36m_correct.npy │ │ │ │ ├── MANO_LEFT.pkl │ │ │ │ ├── MANO_RIGHT.pkl │ │ │ │ ├── README.md │ │ │ │ ├── config.py │ │ │ │ ├── mano_195_adjmat_indices.pt │ │ │ │ ├── mano_195_adjmat_size.pt │ │ │ │ ├── mano_195_adjmat_values.pt │ │ │ │ ├── mano_downsampling.npz │ │ │ │ ├── mesh_downsampling.npz │ │ │ │ ├── smpl_431_adjmat_indices.pt │ │ │ │ ├── smpl_431_adjmat_size.pt │ │ │ │ ├── smpl_431_adjmat_values.pt │ │ │ │ └── smpl_431_faces.npy │ │ │ └── hrnet │ │ │ │ ├── config │ │ │ │ ├── __init__.py │ │ │ │ ├── default.py │ │ │ │ └── models.py │ │ │ │ ├── hrnet_cls_net.py │ │ │ │ └── hrnet_cls_net_gridfeat.py │ │ ├── tools │ │ │ ├── run_gphmer_bodymesh.py │ │ │ ├── run_gphmer_bodymesh_inference.py │ │ │ ├── run_gphmer_handmesh.py │ │ │ ├── run_gphmer_handmesh_inference.py │ │ │ └── run_hand_multiscale.py │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── comm.py │ │ │ ├── dataset_utils.py │ │ │ ├── geometric_layers.py │ │ │ ├── image_ops.py │ │ │ ├── logger.py │ │ │ ├── metric_logger.py │ │ │ ├── metric_pampjpe.py │ │ │ ├── miscellaneous.py │ │ │ ├── renderer.py │ │ │ ├── tsv_file.py │ │ │ └── tsv_file_ops.py │ ├── custom_midas_repo │ │ ├── LICENSE │ │ ├── README.md │ │ ├── __init__.py │ │ ├── hubconf.py │ │ └── midas │ │ │ ├── __init__.py │ │ │ ├── backbones │ │ │ ├── __init__.py │ │ │ ├── beit.py │ │ │ ├── levit.py │ │ │ ├── next_vit.py │ │ │ ├── swin.py │ │ │ ├── swin2.py │ │ │ ├── swin_common.py │ │ │ ├── utils.py │ │ │ └── vit.py │ │ │ ├── base_model.py │ │ │ ├── blocks.py │ │ │ ├── dpt_depth.py │ │ │ ├── midas_net.py │ │ │ ├── midas_net_custom.py │ │ │ ├── model_loader.py │ │ │ └── transforms.py │ ├── custom_mmpkg │ │ ├── __init__.py │ │ ├── custom_mmcv │ │ │ ├── __init__.py │ │ │ ├── arraymisc │ │ │ │ ├── __init__.py │ │ │ │ └── quantization.py │ │ │ ├── cnn │ │ │ │ ├── __init__.py │ │ │ │ ├── alexnet.py │ │ │ │ ├── bricks │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── activation.py │ │ │ │ │ ├── context_block.py │ │ │ │ │ ├── conv.py │ │ │ │ │ ├── conv2d_adaptive_padding.py │ │ │ │ │ ├── conv_module.py │ │ │ │ │ ├── conv_ws.py │ │ │ │ │ ├── depthwise_separable_conv_module.py │ │ │ │ │ ├── drop.py │ │ │ │ │ ├── generalized_attention.py │ │ │ │ │ ├── hsigmoid.py │ │ │ │ │ ├── hswish.py │ │ │ │ │ ├── non_local.py │ │ │ │ │ ├── norm.py │ │ │ │ │ ├── padding.py │ │ │ │ │ ├── plugin.py │ │ │ │ │ ├── registry.py │ │ │ │ │ ├── scale.py │ │ │ │ │ ├── swish.py │ │ │ │ │ ├── transformer.py │ │ │ │ │ ├── upsample.py │ │ │ │ │ └── wrappers.py │ │ │ │ ├── builder.py │ │ │ │ ├── resnet.py │ │ │ │ ├── utils │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── flops_counter.py │ │ │ │ │ ├── fuse_conv_bn.py │ │ │ │ │ ├── sync_bn.py │ │ │ │ │ └── weight_init.py │ │ │ │ └── vgg.py │ │ │ ├── engine │ │ │ │ ├── __init__.py │ │ │ │ └── test.py │ │ │ ├── fileio │ │ │ │ ├── __init__.py │ │ │ │ ├── file_client.py │ │ │ │ ├── handlers │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── base.py │ │ │ │ │ ├── json_handler.py │ │ │ │ │ ├── pickle_handler.py │ │ │ │ │ └── yaml_handler.py │ │ │ │ ├── io.py │ │ │ │ └── parse.py │ │ │ ├── image │ │ │ │ ├── __init__.py │ │ │ │ ├── colorspace.py │ │ │ │ ├── geometric.py │ │ │ │ ├── io.py │ │ │ │ ├── misc.py │ │ │ │ └── photometric.py │ │ │ ├── model_zoo │ │ │ │ ├── deprecated.json │ │ │ │ ├── mmcls.json │ │ │ │ └── open_mmlab.json │ │ │ ├── ops │ │ │ │ ├── __init__.py │ │ │ │ ├── assign_score_withk.py │ │ │ │ ├── ball_query.py │ │ │ │ ├── bbox.py │ │ │ │ ├── border_align.py │ │ │ │ ├── box_iou_rotated.py │ │ │ │ ├── carafe.py │ │ │ │ ├── cc_attention.py │ │ │ │ ├── contour_expand.py │ │ │ │ ├── corner_pool.py │ │ │ │ ├── correlation.py │ │ │ │ ├── deform_conv.py │ │ │ │ ├── deform_roi_pool.py │ │ │ │ ├── deprecated_wrappers.py │ │ │ │ ├── focal_loss.py │ │ │ │ ├── furthest_point_sample.py │ │ │ │ ├── fused_bias_leakyrelu.py │ │ │ │ ├── gather_points.py │ │ │ │ ├── group_points.py │ │ │ │ ├── info.py │ │ │ │ ├── iou3d.py │ │ │ │ ├── knn.py │ │ │ │ ├── masked_conv.py │ │ │ │ ├── merge_cells.py │ │ │ │ ├── modulated_deform_conv.py │ │ │ │ ├── multi_scale_deform_attn.py │ │ │ │ ├── nms.py │ │ │ │ ├── pixel_group.py │ │ │ │ ├── point_sample.py │ │ │ │ ├── points_in_boxes.py │ │ │ │ ├── points_sampler.py │ │ │ │ ├── psa_mask.py │ │ │ │ ├── roi_align.py │ │ │ │ ├── roi_align_rotated.py │ │ │ │ ├── roi_pool.py │ │ │ │ ├── roiaware_pool3d.py │ │ │ │ ├── roipoint_pool3d.py │ │ │ │ ├── saconv.py │ │ │ │ ├── scatter_points.py │ │ │ │ ├── sync_bn.py │ │ │ │ ├── three_interpolate.py │ │ │ │ ├── three_nn.py │ │ │ │ ├── tin_shift.py │ │ │ │ ├── upfirdn2d.py │ │ │ │ └── voxelize.py │ │ │ ├── parallel │ │ │ │ ├── __init__.py │ │ │ │ ├── _functions.py │ │ │ │ ├── collate.py │ │ │ │ ├── data_container.py │ │ │ │ ├── data_parallel.py │ │ │ │ ├── distributed.py │ │ │ │ ├── distributed_deprecated.py │ │ │ │ ├── registry.py │ │ │ │ ├── scatter_gather.py │ │ │ │ └── utils.py │ │ │ ├── runner │ │ │ │ ├── __init__.py │ │ │ │ ├── base_module.py │ │ │ │ ├── base_runner.py │ │ │ │ ├── builder.py │ │ │ │ ├── checkpoint.py │ │ │ │ ├── default_constructor.py │ │ │ │ ├── dist_utils.py │ │ │ │ ├── epoch_based_runner.py │ │ │ │ ├── fp16_utils.py │ │ │ │ ├── hooks │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── checkpoint.py │ │ │ │ │ ├── closure.py │ │ │ │ │ ├── ema.py │ │ │ │ │ ├── evaluation.py │ │ │ │ │ ├── hook.py │ │ │ │ │ ├── iter_timer.py │ │ │ │ │ ├── logger │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── base.py │ │ │ │ │ │ ├── dvclive.py │ │ │ │ │ │ ├── mlflow.py │ │ │ │ │ │ ├── neptune.py │ │ │ │ │ │ ├── pavi.py │ │ │ │ │ │ ├── tensorboard.py │ │ │ │ │ │ ├── text.py │ │ │ │ │ │ └── wandb.py │ │ │ │ │ ├── lr_updater.py │ │ │ │ │ ├── memory.py │ │ │ │ │ ├── momentum_updater.py │ │ │ │ │ ├── optimizer.py │ │ │ │ │ ├── profiler.py │ │ │ │ │ ├── sampler_seed.py │ │ │ │ │ └── sync_buffer.py │ │ │ │ ├── iter_based_runner.py │ │ │ │ ├── log_buffer.py │ │ │ │ ├── optimizer │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── builder.py │ │ │ │ │ └── default_constructor.py │ │ │ │ ├── priority.py │ │ │ │ └── utils.py │ │ │ ├── utils │ │ │ │ ├── __init__.py │ │ │ │ ├── config.py │ │ │ │ ├── env.py │ │ │ │ ├── ext_loader.py │ │ │ │ ├── logging.py │ │ │ │ ├── misc.py │ │ │ │ ├── parrots_jit.py │ │ │ │ ├── parrots_wrapper.py │ │ │ │ ├── path.py │ │ │ │ ├── progressbar.py │ │ │ │ ├── registry.py │ │ │ │ ├── testing.py │ │ │ │ ├── timer.py │ │ │ │ ├── trace.py │ │ │ │ └── version_utils.py │ │ │ ├── version.py │ │ │ ├── video │ │ │ │ ├── __init__.py │ │ │ │ ├── io.py │ │ │ │ ├── optflow.py │ │ │ │ └── processing.py │ │ │ └── visualization │ │ │ │ ├── __init__.py │ │ │ │ ├── color.py │ │ │ │ ├── image.py │ │ │ │ └── optflow.py │ │ └── custom_mmseg │ │ │ ├── apis │ │ │ ├── __init__.py │ │ │ ├── inference.py │ │ │ ├── test.py │ │ │ └── train.py │ │ │ ├── core │ │ │ ├── __init__.py │ │ │ ├── evaluation │ │ │ │ ├── __init__.py │ │ │ │ ├── class_names.py │ │ │ │ ├── eval_hooks.py │ │ │ │ └── metrics.py │ │ │ ├── seg │ │ │ │ ├── __init__.py │ │ │ │ ├── builder.py │ │ │ │ └── sampler │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── base_pixel_sampler.py │ │ │ │ │ └── ohem_pixel_sampler.py │ │ │ └── utils │ │ │ │ ├── __init__.py │ │ │ │ └── misc.py │ │ │ ├── datasets │ │ │ ├── __init__.py │ │ │ ├── ade.py │ │ │ ├── builder.py │ │ │ ├── chase_db1.py │ │ │ ├── cityscapes.py │ │ │ ├── custom.py │ │ │ ├── dataset_wrappers.py │ │ │ ├── drive.py │ │ │ ├── hrf.py │ │ │ ├── pascal_context.py │ │ │ ├── pipelines │ │ │ │ ├── __init__.py │ │ │ │ ├── compose.py │ │ │ │ ├── formating.py │ │ │ │ ├── loading.py │ │ │ │ ├── test_time_aug.py │ │ │ │ └── transforms.py │ │ │ ├── stare.py │ │ │ └── voc.py │ │ │ ├── models │ │ │ ├── __init__.py │ │ │ ├── backbones │ │ │ │ ├── __init__.py │ │ │ │ ├── cgnet.py │ │ │ │ ├── fast_scnn.py │ │ │ │ ├── hrnet.py │ │ │ │ ├── mobilenet_v2.py │ │ │ │ ├── mobilenet_v3.py │ │ │ │ ├── resnest.py │ │ │ │ ├── resnet.py │ │ │ │ ├── resnext.py │ │ │ │ ├── unet.py │ │ │ │ └── vit.py │ │ │ ├── builder.py │ │ │ ├── decode_heads │ │ │ │ ├── __init__.py │ │ │ │ ├── ann_head.py │ │ │ │ ├── apc_head.py │ │ │ │ ├── aspp_head.py │ │ │ │ ├── cascade_decode_head.py │ │ │ │ ├── cc_head.py │ │ │ │ ├── da_head.py │ │ │ │ ├── decode_head.py │ │ │ │ ├── dm_head.py │ │ │ │ ├── dnl_head.py │ │ │ │ ├── ema_head.py │ │ │ │ ├── enc_head.py │ │ │ │ ├── fcn_head.py │ │ │ │ ├── fpn_head.py │ │ │ │ ├── gc_head.py │ │ │ │ ├── lraspp_head.py │ │ │ │ ├── nl_head.py │ │ │ │ ├── ocr_head.py │ │ │ │ ├── point_head.py │ │ │ │ ├── psa_head.py │ │ │ │ ├── psp_head.py │ │ │ │ ├── sep_aspp_head.py │ │ │ │ ├── sep_fcn_head.py │ │ │ │ └── uper_head.py │ │ │ ├── losses │ │ │ │ ├── __init__.py │ │ │ │ ├── accuracy.py │ │ │ │ ├── cross_entropy_loss.py │ │ │ │ ├── dice_loss.py │ │ │ │ ├── lovasz_loss.py │ │ │ │ └── utils.py │ │ │ ├── necks │ │ │ │ ├── __init__.py │ │ │ │ ├── fpn.py │ │ │ │ └── multilevel_neck.py │ │ │ ├── segmentors │ │ │ │ ├── __init__.py │ │ │ │ ├── base.py │ │ │ │ ├── cascade_encoder_decoder.py │ │ │ │ └── encoder_decoder.py │ │ │ └── utils │ │ │ │ ├── __init__.py │ │ │ │ ├── drop.py │ │ │ │ ├── inverted_residual.py │ │ │ │ ├── make_divisible.py │ │ │ │ ├── res_layer.py │ │ │ │ ├── se_layer.py │ │ │ │ ├── self_attention_block.py │ │ │ │ ├── up_conv_block.py │ │ │ │ └── weight_init.py │ │ │ ├── ops │ │ │ ├── __init__.py │ │ │ ├── encoding.py │ │ │ └── wrappers.py │ │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── collect_env.py │ │ │ └── logger.py │ ├── custom_oneformer │ │ ├── __init__.py │ │ ├── config.py │ │ ├── data │ │ │ ├── __init__.py │ │ │ ├── bpe_simple_vocab_16e6.txt.gz │ │ │ ├── build.py │ │ │ ├── dataset_mappers │ │ │ │ ├── __init__.py │ │ │ │ ├── coco_unified_new_baseline_dataset_mapper.py │ │ │ │ ├── dataset_mapper.py │ │ │ │ └── oneformer_unified_dataset_mapper.py │ │ │ ├── datasets │ │ │ │ ├── __init__.py │ │ │ │ ├── register_ade20k_instance.py │ │ │ │ ├── register_ade20k_panoptic.py │ │ │ │ ├── register_cityscapes_panoptic.py │ │ │ │ ├── register_coco_panoptic2instance.py │ │ │ │ └── register_coco_panoptic_annos_semseg.py │ │ │ └── tokenizer.py │ │ ├── demo │ │ │ ├── colormap.py │ │ │ ├── defaults.py │ │ │ ├── predictor.py │ │ │ └── visualizer.py │ │ ├── evaluation │ │ │ ├── __init__.py │ │ │ ├── cityscapes_evaluation.py │ │ │ ├── coco_evaluator.py │ │ │ ├── detection_coco_evaluator.py │ │ │ ├── evaluator.py │ │ │ └── instance_evaluation.py │ │ ├── modeling │ │ │ ├── __init__.py │ │ │ ├── backbone │ │ │ │ ├── __init__.py │ │ │ │ ├── dinat.py │ │ │ │ └── swin.py │ │ │ ├── matcher.py │ │ │ ├── meta_arch │ │ │ │ ├── __init__.py │ │ │ │ └── oneformer_head.py │ │ │ ├── pixel_decoder │ │ │ │ ├── __init__.py │ │ │ │ ├── fpn.py │ │ │ │ ├── msdeformattn.py │ │ │ │ └── ops │ │ │ │ │ ├── functions │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── ms_deform_attn_func.py │ │ │ │ │ ├── make.sh │ │ │ │ │ ├── modules │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── ms_deform_attn.py │ │ │ │ │ ├── setup.py │ │ │ │ │ ├── src │ │ │ │ │ ├── cpu │ │ │ │ │ │ ├── ms_deform_attn_cpu.cpp │ │ │ │ │ │ └── ms_deform_attn_cpu.h │ │ │ │ │ ├── cuda │ │ │ │ │ │ ├── ms_deform_attn_cuda.cu │ │ │ │ │ │ ├── ms_deform_attn_cuda.h │ │ │ │ │ │ └── ms_deform_im2col_cuda.cuh │ │ │ │ │ ├── ms_deform_attn.h │ │ │ │ │ └── vision.cpp │ │ │ │ │ └── test.py │ │ │ └── transformer_decoder │ │ │ │ ├── __init__.py │ │ │ │ ├── oneformer_transformer_decoder.py │ │ │ │ ├── position_encoding.py │ │ │ │ ├── text_transformer.py │ │ │ │ └── transformer.py │ │ ├── oneformer_model.py │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── box_ops.py │ │ │ ├── events.py │ │ │ ├── misc.py │ │ │ └── pos_embed.py │ ├── custom_pycocotools │ │ ├── __init__.py │ │ ├── coco.py │ │ ├── cocoeval.py │ │ └── mask.py │ ├── custom_qudida │ │ ├── LICENSE │ │ ├── __init__.py │ │ └── __version__.py │ ├── custom_timm │ │ ├── __init__.py │ │ ├── data │ │ │ ├── __init__.py │ │ │ ├── auto_augment.py │ │ │ ├── config.py │ │ │ ├── constants.py │ │ │ ├── dataset.py │ │ │ ├── dataset_factory.py │ │ │ ├── distributed_sampler.py │ │ │ ├── loader.py │ │ │ ├── mixup.py │ │ │ ├── parsers │ │ │ │ ├── __init__.py │ │ │ │ ├── class_map.py │ │ │ │ ├── img_extensions.py │ │ │ │ ├── parser.py │ │ │ │ ├── parser_factory.py │ │ │ │ ├── parser_image_folder.py │ │ │ │ ├── parser_image_in_tar.py │ │ │ │ ├── parser_image_tar.py │ │ │ │ └── parser_tfds.py │ │ │ ├── random_erasing.py │ │ │ ├── real_labels.py │ │ │ ├── tf_preprocessing.py │ │ │ ├── transforms.py │ │ │ └── transforms_factory.py │ │ ├── loss │ │ │ ├── __init__.py │ │ │ ├── asymmetric_loss.py │ │ │ ├── binary_cross_entropy.py │ │ │ ├── cross_entropy.py │ │ │ └── jsd.py │ │ ├── models │ │ │ ├── __init__.py │ │ │ ├── beit.py │ │ │ ├── byoanet.py │ │ │ ├── byobnet.py │ │ │ ├── cait.py │ │ │ ├── coat.py │ │ │ ├── convit.py │ │ │ ├── convmixer.py │ │ │ ├── convnext.py │ │ │ ├── crossvit.py │ │ │ ├── cspnet.py │ │ │ ├── deit.py │ │ │ ├── densenet.py │ │ │ ├── dla.py │ │ │ ├── dpn.py │ │ │ ├── edgenext.py │ │ │ ├── efficientformer.py │ │ │ ├── efficientnet.py │ │ │ ├── efficientnet_blocks.py │ │ │ ├── efficientnet_builder.py │ │ │ ├── factory.py │ │ │ ├── features.py │ │ │ ├── fx_features.py │ │ │ ├── gcvit.py │ │ │ ├── ghostnet.py │ │ │ ├── gluon_resnet.py │ │ │ ├── gluon_xception.py │ │ │ ├── hardcorenas.py │ │ │ ├── helpers.py │ │ │ ├── hrnet.py │ │ │ ├── hub.py │ │ │ ├── inception_resnet_v2.py │ │ │ ├── inception_v3.py │ │ │ ├── inception_v4.py │ │ │ ├── layers │ │ │ │ ├── __init__.py │ │ │ │ ├── activations.py │ │ │ │ ├── activations_jit.py │ │ │ │ ├── activations_me.py │ │ │ │ ├── adaptive_avgmax_pool.py │ │ │ │ ├── attention_pool2d.py │ │ │ │ ├── blur_pool.py │ │ │ │ ├── bottleneck_attn.py │ │ │ │ ├── cbam.py │ │ │ │ ├── classifier.py │ │ │ │ ├── cond_conv2d.py │ │ │ │ ├── config.py │ │ │ │ ├── conv2d_same.py │ │ │ │ ├── conv_bn_act.py │ │ │ │ ├── create_act.py │ │ │ │ ├── create_attn.py │ │ │ │ ├── create_conv2d.py │ │ │ │ ├── create_norm.py │ │ │ │ ├── create_norm_act.py │ │ │ │ ├── drop.py │ │ │ │ ├── eca.py │ │ │ │ ├── evo_norm.py │ │ │ │ ├── fast_norm.py │ │ │ │ ├── filter_response_norm.py │ │ │ │ ├── gather_excite.py │ │ │ │ ├── global_context.py │ │ │ │ ├── halo_attn.py │ │ │ │ ├── helpers.py │ │ │ │ ├── inplace_abn.py │ │ │ │ ├── lambda_layer.py │ │ │ │ ├── linear.py │ │ │ │ ├── median_pool.py │ │ │ │ ├── mixed_conv2d.py │ │ │ │ ├── ml_decoder.py │ │ │ │ ├── mlp.py │ │ │ │ ├── non_local_attn.py │ │ │ │ ├── norm.py │ │ │ │ ├── norm_act.py │ │ │ │ ├── padding.py │ │ │ │ ├── patch_embed.py │ │ │ │ ├── pool2d_same.py │ │ │ │ ├── pos_embed.py │ │ │ │ ├── selective_kernel.py │ │ │ │ ├── separable_conv.py │ │ │ │ ├── space_to_depth.py │ │ │ │ ├── split_attn.py │ │ │ │ ├── split_batchnorm.py │ │ │ │ ├── squeeze_excite.py │ │ │ │ ├── std_conv.py │ │ │ │ ├── test_time_pool.py │ │ │ │ ├── trace_utils.py │ │ │ │ └── weight_init.py │ │ │ ├── levit.py │ │ │ ├── maxxvit.py │ │ │ ├── mlp_mixer.py │ │ │ ├── mobilenetv3.py │ │ │ ├── mobilevit.py │ │ │ ├── mvitv2.py │ │ │ ├── nasnet.py │ │ │ ├── nest.py │ │ │ ├── nfnet.py │ │ │ ├── pit.py │ │ │ ├── pnasnet.py │ │ │ ├── poolformer.py │ │ │ ├── pruned │ │ │ │ ├── ecaresnet101d_pruned.txt │ │ │ │ ├── ecaresnet50d_pruned.txt │ │ │ │ ├── efficientnet_b1_pruned.txt │ │ │ │ ├── efficientnet_b2_pruned.txt │ │ │ │ └── efficientnet_b3_pruned.txt │ │ │ ├── pvt_v2.py │ │ │ ├── registry.py │ │ │ ├── regnet.py │ │ │ ├── res2net.py │ │ │ ├── resnest.py │ │ │ ├── resnet.py │ │ │ ├── resnetv2.py │ │ │ ├── rexnet.py │ │ │ ├── selecsls.py │ │ │ ├── senet.py │ │ │ ├── sequencer.py │ │ │ ├── sknet.py │ │ │ ├── swin_transformer.py │ │ │ ├── swin_transformer_v2.py │ │ │ ├── swin_transformer_v2_cr.py │ │ │ ├── tnt.py │ │ │ ├── tresnet.py │ │ │ ├── twins.py │ │ │ ├── vgg.py │ │ │ ├── visformer.py │ │ │ ├── vision_transformer.py │ │ │ ├── vision_transformer_hybrid.py │ │ │ ├── vision_transformer_relpos.py │ │ │ ├── volo.py │ │ │ ├── vovnet.py │ │ │ ├── xception.py │ │ │ ├── xception_aligned.py │ │ │ └── xcit.py │ │ ├── optim │ │ │ ├── __init__.py │ │ │ ├── adabelief.py │ │ │ ├── adafactor.py │ │ │ ├── adahessian.py │ │ │ ├── adamp.py │ │ │ ├── adamw.py │ │ │ ├── lamb.py │ │ │ ├── lars.py │ │ │ ├── lookahead.py │ │ │ ├── madgrad.py │ │ │ ├── nadam.py │ │ │ ├── nvnovograd.py │ │ │ ├── optim_factory.py │ │ │ ├── radam.py │ │ │ ├── rmsprop_tf.py │ │ │ └── sgdp.py │ │ ├── scheduler │ │ │ ├── __init__.py │ │ │ ├── cosine_lr.py │ │ │ ├── multistep_lr.py │ │ │ ├── plateau_lr.py │ │ │ ├── poly_lr.py │ │ │ ├── scheduler.py │ │ │ ├── scheduler_factory.py │ │ │ ├── step_lr.py │ │ │ └── tanh_lr.py │ │ ├── utils │ │ │ ├── __init__.py │ │ │ ├── agc.py │ │ │ ├── checkpoint_saver.py │ │ │ ├── clip_grad.py │ │ │ ├── cuda.py │ │ │ ├── decay_batch.py │ │ │ ├── distributed.py │ │ │ ├── jit.py │ │ │ ├── log.py │ │ │ ├── metrics.py │ │ │ ├── misc.py │ │ │ ├── model.py │ │ │ ├── model_ema.py │ │ │ ├── random.py │ │ │ └── summary.py │ │ └── version.py │ └── wrapper_for_mps │ │ └── __init__.py ├── tests │ ├── pose.png │ ├── test_cn_aux_full.json │ └── test_controlnet_aux.py └── utils.py ├── layer_diffuse ├── LICENSE ├── README.md ├── __init__.py ├── layered_diffusion.py └── lib_layerdiffusion │ ├── __init__.py │ ├── attention_sharing.py │ ├── enums.py │ ├── models.py │ └── utils.py └── prompt_expansion ├── LICENSE ├── README.md ├── __init__.py ├── prompt_expansion.py └── util.py /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ComfyUI-Inference-Core-Nodes 2 | 3 | ## Installation 4 | 1. [Stability Matrix](https://github.com/LykosAI/StabilityMatrix) Extensions Manager 5 | 2. [ComfyUI Manager](https://github.com/ltdrdata/ComfyUI-Manager) 6 | 7 | ## Manual Installation 8 | 1. Clone this repository to `ComfyUI/custom_nodes/` 9 | 10 | 2. Either: 11 | - Run `install.py` using the venv or preferred python environment. 12 | 13 | Or 14 | 15 | (Installs required dependencies and appropriate onnxruntime acceleration via compiled wheels) 16 | - (CUDA 11 or latest stable) Run `pip install -e .[cuda]` 17 | - (CUDA 12) Run `pip install -e .[cuda12]` 18 | - (RoCM) Run `pip install -e .[rocm]` 19 | - (DirectML) Run `pip install -e .[directml]` 20 | - (CPU Only) Run `pip install -e .[cpu]` 21 | 22 | Or 23 | 24 | (Installs only required dependencies without onnxruntime acceleration) 25 | - Run `pip install -e .` 26 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | from inference_core_nodes import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS 2 | 3 | __all__ = ("NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS") 4 | -------------------------------------------------------------------------------- /install.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import subprocess 3 | 4 | from pathlib import Path 5 | from typing import List 6 | 7 | PARENT_DIR = Path(__file__).parent 8 | 9 | 10 | def pip_install(args: List[str]) -> None: 11 | subprocess.check_call( 12 | [sys.executable, "-m", "pip", "install", ] + args, cwd=PARENT_DIR 13 | ) 14 | 15 | 16 | def try_get_cuda_version() -> str | None: 17 | try: 18 | import torch 19 | return torch.version.cuda 20 | except ImportError or AttributeError: 21 | return None 22 | 23 | 24 | def main() -> None: 25 | 26 | cuda_version = try_get_cuda_version() 27 | if cuda_version is not None: 28 | if cuda_version.startswith("12."): 29 | pip_install(["-e", ".[cuda-12]"]) 30 | else: 31 | pip_install(["-e", ".[cuda]"]) 32 | else: 33 | # Default install 34 | pip_install(["-e", ".[cpu]"]) 35 | 36 | 37 | if __name__ == "__main__": 38 | main() 39 | -------------------------------------------------------------------------------- /src/inference_core_nodes/comfyui_experiments/README.md: -------------------------------------------------------------------------------- 1 | ## ComfyUI Experiments 2 | 3 | Based on or modified from: [comfyanonymous/ComfyUI_experiments](https://github.com/comfyanonymous/ComfyUI_experiments) @ 934dba9d206e4738e0dac26a09b51f1dffcb4e44 4 | 5 | License: GPL-3.0 6 | 7 | 8 | -------------------------------------------------------------------------------- /src/inference_core_nodes/comfyui_experiments/__init__.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import os 3 | 4 | node_list = [ #Add list of .py files containing nodes here 5 | "advanced_model_merging", 6 | "reference_only", 7 | "sampler_rescalecfg", 8 | "sampler_tonemap", 9 | "sampler_tonemap_rescalecfg", 10 | "sdxl_model_merging" 11 | ] 12 | 13 | NODE_CLASS_MAPPINGS = {} 14 | NODE_DISPLAY_NAME_MAPPINGS = {} 15 | 16 | for module_name in node_list: 17 | imported_module = importlib.import_module(".{}".format(module_name), __name__) 18 | 19 | NODE_CLASS_MAPPINGS = {**NODE_CLASS_MAPPINGS, **imported_module.NODE_CLASS_MAPPINGS} 20 | if hasattr(imported_module, "NODE_DISPLAY_NAME_MAPPINGS"): 21 | NODE_DISPLAY_NAME_MAPPINGS = {**NODE_DISPLAY_NAME_MAPPINGS, **imported_module.NODE_DISPLAY_NAME_MAPPINGS} 22 | 23 | __all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS'] 24 | -------------------------------------------------------------------------------- /src/inference_core_nodes/comfyui_experiments/advanced_model_merging.py: -------------------------------------------------------------------------------- 1 | import comfy_extras.nodes_model_merging 2 | 3 | class ModelMergeBlockNumber(comfy_extras.nodes_model_merging.ModelMergeBlocks): 4 | @classmethod 5 | def INPUT_TYPES(s): 6 | arg_dict = { "model1": ("MODEL",), 7 | "model2": ("MODEL",)} 8 | 9 | argument = ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}) 10 | 11 | arg_dict["time_embed."] = argument 12 | arg_dict["label_emb."] = argument 13 | 14 | for i in range(12): 15 | arg_dict["input_blocks.{}.".format(i)] = argument 16 | 17 | for i in range(3): 18 | arg_dict["middle_block.{}.".format(i)] = argument 19 | 20 | for i in range(12): 21 | arg_dict["output_blocks.{}.".format(i)] = argument 22 | 23 | arg_dict["out."] = argument 24 | 25 | return {"required": arg_dict} 26 | 27 | 28 | NODE_CLASS_MAPPINGS = { 29 | "ModelMergeBlockNumber": ModelMergeBlockNumber, 30 | } 31 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/NotoSans-Regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/NotoSans-Regular.ttf -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/dev_interface.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from utils import here 3 | import sys 4 | sys.path.append(str(Path(here, "src"))) 5 | 6 | from custom_controlnet_aux import * -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/examples/example_animal_pose.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/examples/example_animal_pose.png -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/examples/example_anime_face_segmentor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/examples/example_anime_face_segmentor.png -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/examples/example_densepose.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/examples/example_densepose.png -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/examples/example_depth_anything.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/examples/example_depth_anything.png -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/examples/example_marigold.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/examples/example_marigold.png -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/examples/example_marigold_flat.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/examples/example_marigold_flat.jpg -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/examples/example_mesh_graphormer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/examples/example_mesh_graphormer.png -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/examples/example_onnx.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/examples/example_onnx.png -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/examples/example_recolor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/examples/example_recolor.png -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/examples/example_save_kps.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/examples/example_save_kps.png -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/examples/example_teed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/examples/example_teed.png -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/examples/example_torchscript.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/examples/example_torchscript.png -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/examples/example_unimatch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/examples/example_unimatch.png -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/node_wrappers/binary.py: -------------------------------------------------------------------------------- 1 | from ..utils import common_annotator_call, INPUT, define_preprocessor_inputs 2 | import comfy.model_management as model_management 3 | 4 | class Binary_Preprocessor: 5 | @classmethod 6 | def INPUT_TYPES(s): 7 | return define_preprocessor_inputs( 8 | bin_threshold=INPUT.INT(default=100, max=255), 9 | resolution=INPUT.RESOLUTION() 10 | ) 11 | 12 | RETURN_TYPES = ("IMAGE",) 13 | FUNCTION = "execute" 14 | 15 | CATEGORY = "ControlNet Preprocessors/Line Extractors" 16 | 17 | def execute(self, image, bin_threshold=100, resolution=512, **kwargs): 18 | from custom_controlnet_aux.binary import BinaryDetector 19 | 20 | return (common_annotator_call(BinaryDetector(), image, bin_threshold=bin_threshold, resolution=resolution), ) 21 | 22 | 23 | 24 | NODE_CLASS_MAPPINGS = { 25 | "BinaryPreprocessor": Binary_Preprocessor 26 | } 27 | NODE_DISPLAY_NAME_MAPPINGS = { 28 | "BinaryPreprocessor": "Binary Lines" 29 | } -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/node_wrappers/color.py: -------------------------------------------------------------------------------- 1 | from ..utils import common_annotator_call, INPUT, define_preprocessor_inputs 2 | import comfy.model_management as model_management 3 | 4 | class Color_Preprocessor: 5 | @classmethod 6 | def INPUT_TYPES(s): 7 | return define_preprocessor_inputs(resolution=INPUT.RESOLUTION()) 8 | 9 | RETURN_TYPES = ("IMAGE",) 10 | FUNCTION = "execute" 11 | 12 | CATEGORY = "ControlNet Preprocessors/T2IAdapter-only" 13 | 14 | def execute(self, image, resolution=512, **kwargs): 15 | from custom_controlnet_aux.color import ColorDetector 16 | 17 | return (common_annotator_call(ColorDetector(), image, resolution=resolution), ) 18 | 19 | 20 | 21 | NODE_CLASS_MAPPINGS = { 22 | "ColorPreprocessor": Color_Preprocessor 23 | } 24 | NODE_DISPLAY_NAME_MAPPINGS = { 25 | "ColorPreprocessor": "Color Pallete" 26 | } -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/node_wrappers/segment_anything.py: -------------------------------------------------------------------------------- 1 | from ..utils import common_annotator_call, define_preprocessor_inputs, INPUT 2 | import comfy.model_management as model_management 3 | 4 | class SAM_Preprocessor: 5 | @classmethod 6 | def INPUT_TYPES(s): 7 | return define_preprocessor_inputs(resolution=INPUT.RESOLUTION()) 8 | 9 | RETURN_TYPES = ("IMAGE",) 10 | FUNCTION = "execute" 11 | 12 | CATEGORY = "ControlNet Preprocessors/others" 13 | 14 | def execute(self, image, resolution=512, **kwargs): 15 | from custom_controlnet_aux.sam import SamDetector 16 | 17 | mobile_sam = SamDetector.from_pretrained().to(model_management.get_torch_device()) 18 | out = common_annotator_call(mobile_sam, image, resolution=resolution) 19 | del mobile_sam 20 | return (out, ) 21 | 22 | NODE_CLASS_MAPPINGS = { 23 | "SAMPreprocessor": SAM_Preprocessor 24 | } 25 | NODE_DISPLAY_NAME_MAPPINGS = { 26 | "SAMPreprocessor": "SAM Segmentor" 27 | } -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/node_wrappers/shuffle.py: -------------------------------------------------------------------------------- 1 | from ..utils import common_annotator_call, define_preprocessor_inputs, INPUT, MAX_RESOLUTION 2 | import comfy.model_management as model_management 3 | 4 | class Shuffle_Preprocessor: 5 | @classmethod 6 | def INPUT_TYPES(s): 7 | return define_preprocessor_inputs( 8 | resolution=INPUT.RESOLUTION(), 9 | seed=INPUT.SEED() 10 | ) 11 | RETURN_TYPES = ("IMAGE",) 12 | FUNCTION = "preprocess" 13 | 14 | CATEGORY = "ControlNet Preprocessors/T2IAdapter-only" 15 | 16 | def preprocess(self, image, resolution=512, seed=0): 17 | from custom_controlnet_aux.shuffle import ContentShuffleDetector 18 | 19 | return (common_annotator_call(ContentShuffleDetector(), image, resolution=resolution, seed=seed), ) 20 | 21 | NODE_CLASS_MAPPINGS = { 22 | "ShufflePreprocessor": Shuffle_Preprocessor 23 | } 24 | 25 | NODE_DISPLAY_NAME_MAPPINGS = { 26 | "ShufflePreprocessor": "Content Shuffle" 27 | } -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/node_wrappers/zoe.py: -------------------------------------------------------------------------------- 1 | from ..utils import common_annotator_call, define_preprocessor_inputs, INPUT 2 | import comfy.model_management as model_management 3 | 4 | class Zoe_Depth_Map_Preprocessor: 5 | @classmethod 6 | def INPUT_TYPES(s): 7 | return define_preprocessor_inputs(resolution=INPUT.RESOLUTION()) 8 | 9 | RETURN_TYPES = ("IMAGE",) 10 | FUNCTION = "execute" 11 | 12 | CATEGORY = "ControlNet Preprocessors/Normal and Depth Estimators" 13 | 14 | def execute(self, image, resolution=512, **kwargs): 15 | from custom_controlnet_aux.zoe import ZoeDetector 16 | 17 | model = ZoeDetector.from_pretrained().to(model_management.get_torch_device()) 18 | out = common_annotator_call(model, image, resolution=resolution) 19 | del model 20 | return (out, ) 21 | 22 | NODE_CLASS_MAPPINGS = { 23 | "Zoe-DepthMapPreprocessor": Zoe_Depth_Map_Preprocessor 24 | } 25 | NODE_DISPLAY_NAME_MAPPINGS = { 26 | "Zoe-DepthMapPreprocessor": "Zoe Depth Map" 27 | } -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "comfyui_controlnet_aux" 3 | description = "Plug-and-play ComfyUI node sets for making ControlNet hint images" 4 | 5 | version = "1.0.7" 6 | dependencies = ["torch", "importlib_metadata", "huggingface_hub", "scipy", "opencv-python>=4.7.0.72", "filelock", "numpy", "Pillow", "einops", "torchvision", "pyyaml", "scikit-image", "python-dateutil", "mediapipe", "svglib", "fvcore", "yapf", "omegaconf", "ftfy", "addict", "yacs", "trimesh[easy]", "albumentations", "scikit-learn", "matplotlib"] 7 | 8 | [project.urls] 9 | Repository = "https://github.com/Fannovel16/comfyui_controlnet_aux" 10 | 11 | [tool.comfy] 12 | PublisherId = "fannovel16" 13 | DisplayName = "comfyui_controlnet_aux" 14 | Icon = "" 15 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/requirements.txt: -------------------------------------------------------------------------------- 1 | torch 2 | importlib_metadata 3 | huggingface_hub 4 | scipy 5 | opencv-python>=4.7.0.72 6 | filelock 7 | numpy 8 | Pillow 9 | einops 10 | torchvision 11 | pyyaml 12 | scikit-image 13 | python-dateutil 14 | mediapipe 15 | svglib 16 | fvcore 17 | yapf 18 | omegaconf 19 | ftfy 20 | addict 21 | yacs 22 | trimesh[easy] 23 | albumentations 24 | scikit-learn 25 | matplotlib 26 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/__init__.py: -------------------------------------------------------------------------------- 1 | #Dummy file ensuring this package will be recognized -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/__init__.py: -------------------------------------------------------------------------------- 1 | #Dummy file ensuring this package will be recognized -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/canny/__init__.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | import cv2 3 | import numpy as np 4 | from PIL import Image 5 | from controlnet_aux.util import resize_image_with_pad, common_input_validate, HWC3 6 | 7 | class CannyDetector: 8 | def __call__(self, input_image=None, low_threshold=100, high_threshold=200, detect_resolution=512, output_type=None, upscale_method="INTER_CUBIC", **kwargs): 9 | input_image, output_type = common_input_validate(input_image, output_type, **kwargs) 10 | detected_map, remove_pad = resize_image_with_pad(input_image, detect_resolution, upscale_method) 11 | detected_map = cv2.Canny(detected_map, low_threshold, high_threshold) 12 | detected_map = HWC3(remove_pad(detected_map)) 13 | 14 | if output_type == "pil": 15 | detected_map = Image.fromarray(detected_map) 16 | 17 | return detected_map 18 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/depth_anything/torchhub/README.md: -------------------------------------------------------------------------------- 1 | # Local PyTorch Hub 2 | 3 | This directory is for loading the DINOv2 encoder locally in case of no Internet connection. 4 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/conda.yaml: -------------------------------------------------------------------------------- 1 | name: dinov2 2 | channels: 3 | - defaults 4 | - pytorch 5 | - nvidia 6 | - xformers 7 | - conda-forge 8 | dependencies: 9 | - python=3.9 10 | - pytorch::pytorch=2.0.0 11 | - pytorch::pytorch-cuda=11.7.0 12 | - pytorch::torchvision=0.15.0 13 | - omegaconf 14 | - torchmetrics=0.10.3 15 | - fvcore 16 | - iopath 17 | - xformers::xformers=0.0.18 18 | - pip 19 | - pip: 20 | - git+https://github.com/facebookincubator/submitit 21 | - --extra-index-url https://pypi.nvidia.com 22 | - cuml-cu11 23 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/dinov2/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # All rights reserved. 3 | # 4 | # This source code is licensed under the license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | __version__ = "0.0.1" 8 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/dinov2/configs/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # All rights reserved. 3 | # 4 | # This source code is licensed under the license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | import pathlib 8 | 9 | from omegaconf import OmegaConf 10 | 11 | 12 | def load_config(config_name: str): 13 | config_filename = config_name + ".yaml" 14 | return OmegaConf.load(pathlib.Path(__file__).parent.resolve() / config_filename) 15 | 16 | 17 | dinov2_default_config = load_config("ssl_default_config") 18 | 19 | 20 | def load_and_merge_config(config_name: str): 21 | default_config = OmegaConf.create(dinov2_default_config) 22 | loaded_config = load_config(config_name) 23 | return OmegaConf.merge(default_config, loaded_config) 24 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/dinov2/configs/eval/vitb14_pretrain.yaml: -------------------------------------------------------------------------------- 1 | student: 2 | arch: vit_base 3 | patch_size: 14 4 | crops: 5 | global_crops_size: 518 # this is to set up the position embeddings properly 6 | local_crops_size: 98 -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/dinov2/configs/eval/vitg14_pretrain.yaml: -------------------------------------------------------------------------------- 1 | student: 2 | arch: vit_giant2 3 | patch_size: 14 4 | ffn_layer: swiglufused 5 | crops: 6 | global_crops_size: 518 # this is to set up the position embeddings properly 7 | local_crops_size: 98 -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/dinov2/configs/eval/vitl14_pretrain.yaml: -------------------------------------------------------------------------------- 1 | student: 2 | arch: vit_large 3 | patch_size: 14 4 | crops: 5 | global_crops_size: 518 # this is to set up the position embeddings properly 6 | local_crops_size: 98 -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/dinov2/configs/eval/vits14_pretrain.yaml: -------------------------------------------------------------------------------- 1 | student: 2 | arch: vit_small 3 | patch_size: 14 4 | crops: 5 | global_crops_size: 518 # this is to set up the position embeddings properly 6 | local_crops_size: 98 -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/dinov2/configs/train/vitg14.yaml: -------------------------------------------------------------------------------- 1 | dino: 2 | head_n_prototypes: 131072 3 | head_bottleneck_dim: 384 4 | ibot: 5 | separate_head: true 6 | head_n_prototypes: 131072 7 | train: 8 | batch_size_per_gpu: 12 9 | dataset_path: ImageNet22k 10 | centering: sinkhorn_knopp 11 | student: 12 | arch: vit_giant2 13 | patch_size: 14 14 | drop_path_rate: 0.4 15 | ffn_layer: swiglufused 16 | block_chunks: 4 17 | teacher: 18 | momentum_teacher: 0.994 19 | optim: 20 | epochs: 500 21 | weight_decay_end: 0.2 22 | base_lr: 2.0e-04 # learning rate for a batch size of 1024 23 | warmup_epochs: 80 24 | layerwise_decay: 1.0 25 | crops: 26 | local_crops_size: 98 -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/dinov2/configs/train/vitl14.yaml: -------------------------------------------------------------------------------- 1 | dino: 2 | head_n_prototypes: 131072 3 | head_bottleneck_dim: 384 4 | ibot: 5 | separate_head: true 6 | head_n_prototypes: 131072 7 | train: 8 | batch_size_per_gpu: 32 9 | dataset_path: ImageNet22k 10 | centering: sinkhorn_knopp 11 | student: 12 | arch: vit_large 13 | patch_size: 14 14 | drop_path_rate: 0.4 15 | ffn_layer: swiglufused 16 | block_chunks: 4 17 | teacher: 18 | momentum_teacher: 0.994 19 | optim: 20 | epochs: 500 21 | weight_decay_end: 0.2 22 | base_lr: 2.0e-04 # learning rate for a batch size of 1024 23 | warmup_epochs: 80 24 | layerwise_decay: 1.0 25 | crops: 26 | local_crops_size: 98 -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/dinov2/configs/train/vitl16_short.yaml: -------------------------------------------------------------------------------- 1 | # this corresponds to the default config 2 | train: 3 | dataset_path: ImageNet:split=TRAIN 4 | batch_size_per_gpu: 64 5 | student: 6 | block_chunks: 4 7 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/dinov2/data/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # All rights reserved. 3 | # 4 | # This source code is licensed under the license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | from .adapters import DatasetWithEnumeratedTargets 8 | from .loaders import make_data_loader, make_dataset, SamplerType 9 | from .collate import collate_data_and_cast 10 | from .masking import MaskingGenerator 11 | from .augmentations import DataAugmentationDINO 12 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/dinov2/data/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # All rights reserved. 3 | # 4 | # This source code is licensed under the license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | from .image_net import ImageNet 8 | from .image_net_22k import ImageNet22k 9 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/dinov2/data/datasets/decoders.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # All rights reserved. 3 | # 4 | # This source code is licensed under the license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | from io import BytesIO 8 | from typing import Any 9 | 10 | from PIL import Image 11 | 12 | 13 | class Decoder: 14 | def decode(self) -> Any: 15 | raise NotImplementedError 16 | 17 | 18 | class ImageDataDecoder(Decoder): 19 | def __init__(self, image_data: bytes) -> None: 20 | self._image_data = image_data 21 | 22 | def decode(self) -> Image: 23 | f = BytesIO(self._image_data) 24 | return Image.open(f).convert(mode="RGB") 25 | 26 | 27 | class TargetDecoder(Decoder): 28 | def __init__(self, target: Any): 29 | self._target = target 30 | 31 | def decode(self) -> Any: 32 | return self._target 33 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/dinov2/eval/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # All rights reserved. 3 | # 4 | # This source code is licensed under the license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/dinov2/layers/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # All rights reserved. 3 | # 4 | # This source code is licensed under the license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | from .dino_head import DINOHead 8 | from .mlp import Mlp 9 | from .patch_embed import PatchEmbed 10 | from .swiglu_ffn import SwiGLUFFN, SwiGLUFFNFused 11 | from .block import NestedTensorBlock 12 | from .attention import MemEffAttention 13 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/dinov2/loss/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # All rights reserved. 3 | # 4 | # This source code is licensed under the license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | from .dino_clstoken_loss import DINOLoss 8 | from .ibot_patch_loss import iBOTPatchLoss 9 | from .koleo_loss import KoLeoLoss 10 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/dinov2/run/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # All rights reserved. 3 | # 4 | # This source code is licensed under the license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/dinov2/train/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # All rights reserved. 3 | # 4 | # This source code is licensed under the license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | from .train import get_args_parser, main 8 | from .ssl_meta_arch import SSLMetaArch 9 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/dinov2/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # All rights reserved. 3 | # 4 | # This source code is licensed under the license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.black] 2 | line-length = 120 3 | 4 | [tool.pylint.master] 5 | persistent = false 6 | score = false 7 | 8 | [tool.pylint.messages_control] 9 | disable = "all" 10 | enable = [ 11 | "miscellaneous", 12 | "similarities", 13 | ] 14 | 15 | [tool.pylint.similarities] 16 | ignore-comments = true 17 | ignore-docstrings = true 18 | ignore-imports = true 19 | min-similarity-lines = 8 20 | 21 | [tool.pylint.reports] 22 | reports = false 23 | 24 | [tool.pylint.miscellaneous] 25 | notes = [ 26 | "FIXME", 27 | "XXX", 28 | "TODO", 29 | ] 30 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/requirements-dev.txt: -------------------------------------------------------------------------------- 1 | black==22.6.0 2 | flake8==5.0.4 3 | pylint==2.15.0 4 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/requirements.txt: -------------------------------------------------------------------------------- 1 | --extra-index-url https://download.pytorch.org/whl/cu117 2 | torch==2.0.0 3 | torchvision==0.15.0 4 | omegaconf 5 | torchmetrics==0.10.3 6 | fvcore 7 | iopath 8 | xformers==0.0.18 9 | submitit 10 | --extra-index-url https://pypi.nvidia.com 11 | cuml-cu11 12 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/scripts/lint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ -n "$1" ]; then 4 | echo "linting \"$1\"" 5 | fi 6 | 7 | echo "running black" 8 | if [ -n "$1" ]; then 9 | black "$1" 10 | else 11 | black dinov2 12 | fi 13 | 14 | echo "running flake8" 15 | if [ -n "$1" ]; then 16 | flake8 "$1" 17 | else 18 | flake8 19 | fi 20 | 21 | echo "running pylint" 22 | if [ -n "$1" ]; then 23 | pylint "$1" 24 | else 25 | pylint dinov2 26 | fi 27 | 28 | exit 0 29 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/setup.cfg: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 120 3 | ignore = E203,E501,W503 4 | per-file-ignores = 5 | __init__.py:F401 6 | exclude = 7 | venv 8 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/diffusion_edge/denoising_diffusion_pytorch/__init__.py: -------------------------------------------------------------------------------- 1 | # from controlnet_aux.diffusion_edge.denoising_diffusion_pytorch.denoising_diffusion_pytorch import GaussianDiffusion, Unet, Trainer 2 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/diffusion_edge/requirement.txt: -------------------------------------------------------------------------------- 1 | #torch==1.12.1+cu113 torchvision==0.13.1+cu113 torchaudio==0.12.1 --extra-index-url https://download.pytorch.org/whl/cu113 2 | einops 3 | scikit-learn 4 | scipy 5 | tensorboard 6 | fvcore 7 | albumentations 8 | omegaconf 9 | numpy==1.23.5 -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/diffusion_edge/taming/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/diffusion_edge/taming/__init__.py -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/diffusion_edge/taming/modules/autoencoder/lpips/vgg.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/diffusion_edge/taming/modules/autoencoder/lpips/vgg.pth -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/diffusion_edge/taming/modules/losses/__init__.py: -------------------------------------------------------------------------------- 1 | from controlnet_aux.diffusion_edge.taming.modules.losses.vqperceptual import DummyLoss 2 | 3 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/diffusion_edge/taming/modules/losses/segmentation.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torch.nn.functional as F 3 | 4 | 5 | class BCELoss(nn.Module): 6 | def forward(self, prediction, target): 7 | loss = F.binary_cross_entropy_with_logits(prediction,target) 8 | return loss, {} 9 | 10 | 11 | class BCELossWithQuant(nn.Module): 12 | def __init__(self, codebook_weight=1.): 13 | super().__init__() 14 | self.codebook_weight = codebook_weight 15 | 16 | def forward(self, qloss, target, prediction, split): 17 | bce_loss = F.binary_cross_entropy_with_logits(prediction,target) 18 | loss = bce_loss + self.codebook_weight*qloss 19 | return loss, {"{}/total_loss".format(split): loss.clone().detach().mean(), 20 | "{}/bce_loss".format(split): bce_loss.detach().mean(), 21 | "{}/quant_loss".format(split): qloss.detach().mean() 22 | } 23 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/dwpose/dw_onnx/__init__.py: -------------------------------------------------------------------------------- 1 | #Dummy file ensuring this package will be recognized -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/dwpose/dw_torchscript/__init__.py: -------------------------------------------------------------------------------- 1 | #Dummy file ensuring this package will be recognized -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/dwpose/types.py: -------------------------------------------------------------------------------- 1 | from typing import NamedTuple, List, Optional 2 | 3 | class Keypoint(NamedTuple): 4 | x: float 5 | y: float 6 | score: float = 1.0 7 | id: int = -1 8 | 9 | 10 | class BodyResult(NamedTuple): 11 | # Note: Using `Optional` instead of `|` operator as the ladder is a Python 12 | # 3.10 feature. 13 | # Annotator code should be Python 3.8 Compatible, as controlnet repo uses 14 | # Python 3.8 environment. 15 | # https://github.com/lllyasviel/ControlNet/blob/d3284fcd0972c510635a4f5abe2eeb71dc0de524/environment.yaml#L6 16 | keypoints: List[Optional[Keypoint]] 17 | total_score: float = 0.0 18 | total_parts: int = 0 19 | 20 | 21 | HandResult = List[Keypoint] 22 | FaceResult = List[Keypoint] 23 | 24 | 25 | class PoseResult(NamedTuple): 26 | body: BodyResult 27 | left_hand: Optional[HandResult] 28 | right_hand: Optional[HandResult] 29 | face: Optional[FaceResult] 30 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/leres/leres/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/leres/leres/__init__.py -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/leres/pix2pix/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/leres/pix2pix/__init__.py -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/leres/pix2pix/options/__init__.py: -------------------------------------------------------------------------------- 1 | """This package options includes option modules: training options, test options, and basic options (used in both training and test).""" 2 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/leres/pix2pix/util/__init__.py: -------------------------------------------------------------------------------- 1 | """This package includes a miscellaneous collection of useful helper functions.""" 2 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/mesh_graphormer/depth_preprocessor.py: -------------------------------------------------------------------------------- 1 | class Preprocessor: 2 | def __init__(self) -> None: 3 | pass 4 | 5 | def get_depth(self, input_dir, file_name): 6 | return -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/mesh_graphormer/hand_landmarker.task: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/mesh_graphormer/hand_landmarker.task -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/mlsd/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/mlsd/models/__init__.py -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/normalbae/nets/NNET.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | from .submodules.encoder import Encoder 6 | from .submodules.decoder import Decoder 7 | 8 | 9 | class NNET(nn.Module): 10 | def __init__(self, args): 11 | super(NNET, self).__init__() 12 | self.encoder = Encoder() 13 | self.decoder = Decoder(args) 14 | 15 | def get_1x_lr_params(self): # lr/10 learning rate 16 | return self.encoder.parameters() 17 | 18 | def get_10x_lr_params(self): # lr learning rate 19 | return self.decoder.parameters() 20 | 21 | def forward(self, img, **kwargs): 22 | return self.decoder(self.encoder(img), **kwargs) -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/normalbae/nets/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/normalbae/nets/__init__.py -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/normalbae/nets/submodules/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/normalbae/nets/submodules/__init__.py -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/__init__.py -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/__init__.py: -------------------------------------------------------------------------------- 1 | from .gen_efficientnet import * 2 | from .mobilenetv3 import * 3 | from .model_factory import create_model 4 | from .config import is_exportable, is_scriptable, set_exportable, set_scriptable 5 | from .activations import * -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/model_factory.py: -------------------------------------------------------------------------------- 1 | from .config import set_layer_config 2 | from .helpers import load_checkpoint 3 | 4 | from .gen_efficientnet import * 5 | from .mobilenetv3 import * 6 | 7 | 8 | def create_model( 9 | model_name='mnasnet_100', 10 | pretrained=None, 11 | num_classes=1000, 12 | in_chans=3, 13 | checkpoint_path='', 14 | **kwargs): 15 | 16 | model_kwargs = dict(num_classes=num_classes, in_chans=in_chans, pretrained=pretrained, **kwargs) 17 | 18 | if model_name in globals(): 19 | create_fn = globals()[model_name] 20 | model = create_fn(**model_kwargs) 21 | else: 22 | raise RuntimeError('Unknown model (%s)' % model_name) 23 | 24 | if checkpoint_path and not pretrained: 25 | load_checkpoint(model, checkpoint_path) 26 | 27 | return model 28 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/version.py: -------------------------------------------------------------------------------- 1 | __version__ = '1.0.2' 2 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/requirements.txt: -------------------------------------------------------------------------------- 1 | torch>=1.2.0 2 | torchvision>=0.4.0 3 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/oneformer/configs/coco/oneformer_swin_large_IN21k_384_bs16_100ep.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: oneformer_R50_bs16_50ep.yaml 2 | MODEL: 3 | BACKBONE: 4 | NAME: "D2SwinTransformer" 5 | SWIN: 6 | EMBED_DIM: 192 7 | DEPTHS: [2, 2, 18, 2] 8 | NUM_HEADS: [6, 12, 24, 48] 9 | WINDOW_SIZE: 12 10 | APE: False 11 | DROP_PATH_RATE: 0.3 12 | PATCH_NORM: True 13 | PRETRAIN_IMG_SIZE: 384 14 | WEIGHTS: "swin_large_patch4_window12_384_22k.pkl" 15 | PIXEL_MEAN: [123.675, 116.280, 103.530] 16 | PIXEL_STD: [58.395, 57.120, 57.375] 17 | ONE_FORMER: 18 | NUM_OBJECT_QUERIES: 150 19 | SOLVER: 20 | STEPS: (655556, 735184) 21 | MAX_ITER: 737500 22 | AMP: 23 | ENABLED: False 24 | TEST: 25 | DETECTIONS_PER_IMAGE: 150 26 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/sam/modeling/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # All rights reserved. 3 | 4 | # This source code is licensed under the license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | from .sam import Sam 8 | from .image_encoder import ImageEncoderViT 9 | from .mask_decoder import MaskDecoder 10 | from .prompt_encoder import PromptEncoder 11 | from .transformer import TwoWayTransformer 12 | from .tiny_vit_sam import TinyViT 13 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/sam/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # All rights reserved. 3 | 4 | # This source code is licensed under the license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/teed/Fmish.py: -------------------------------------------------------------------------------- 1 | """ 2 | Script provides functional interface for Mish activation function. 3 | """ 4 | 5 | # import pytorch 6 | import torch 7 | import torch.nn.functional as F 8 | 9 | 10 | @torch.jit.script 11 | def mish(input): 12 | """ 13 | Applies the mish function element-wise: 14 | mish(x) = x * tanh(softplus(x)) = x * tanh(ln(1 + exp(x))) 15 | See additional documentation for mish class. 16 | """ 17 | return input * torch.tanh(F.softplus(input)) -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/teed/Fsmish.py: -------------------------------------------------------------------------------- 1 | """ 2 | Script based on: 3 | Wang, Xueliang, Honge Ren, and Achuan Wang. 4 | "Smish: A Novel Activation Function for Deep Learning Methods. 5 | " Electronics 11.4 (2022): 540. 6 | """ 7 | 8 | # import pytorch 9 | import torch 10 | import torch.nn.functional as F 11 | 12 | 13 | @torch.jit.script 14 | def smish(input): 15 | """ 16 | Applies the mish function element-wise: 17 | mish(x) = x * tanh(softplus(x)) = x * tanh(ln(1 + exp(sigmoid(x)))) 18 | See additional documentation for mish class. 19 | """ 20 | return input * torch.tanh(torch.log(1+torch.sigmoid(input))) -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/tests/requirements.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/tests/requirements.txt -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/tests/test_image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/tests/test_image.png -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/uniformer/configs/_base_/datasets/pascal_voc12_aug.py: -------------------------------------------------------------------------------- 1 | _base_ = './pascal_voc12.py' 2 | # dataset settings 3 | data = dict( 4 | train=dict( 5 | ann_dir=['SegmentationClass', 'SegmentationClassAug'], 6 | split=[ 7 | 'ImageSets/Segmentation/train.txt', 8 | 'ImageSets/Segmentation/aug.txt' 9 | ])) 10 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/uniformer/configs/_base_/default_runtime.py: -------------------------------------------------------------------------------- 1 | # yapf:disable 2 | log_config = dict( 3 | interval=50, 4 | hooks=[ 5 | dict(type='TextLoggerHook', by_epoch=False), 6 | # dict(type='TensorboardLoggerHook') 7 | ]) 8 | # yapf:enable 9 | dist_params = dict(backend='nccl') 10 | log_level = 'INFO' 11 | load_from = None 12 | resume_from = None 13 | workflow = [('train', 1)] 14 | cudnn_benchmark = True 15 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/uniformer/configs/_base_/models/lraspp_m-v3-d8.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', eps=0.001, requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | backbone=dict( 6 | type='MobileNetV3', 7 | arch='large', 8 | out_indices=(1, 3, 16), 9 | norm_cfg=norm_cfg), 10 | decode_head=dict( 11 | type='LRASPPHead', 12 | in_channels=(16, 24, 960), 13 | in_index=(0, 1, 2), 14 | channels=128, 15 | input_transform='multiple_select', 16 | dropout_ratio=0.1, 17 | num_classes=19, 18 | norm_cfg=norm_cfg, 19 | act_cfg=dict(type='ReLU'), 20 | align_corners=False, 21 | loss_decode=dict( 22 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 23 | # model training and testing settings 24 | train_cfg=dict(), 25 | test_cfg=dict(mode='whole')) 26 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/uniformer/configs/_base_/schedules/schedule_160k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) 3 | optimizer_config = dict() 4 | # learning policy 5 | lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) 6 | # runtime settings 7 | runner = dict(type='IterBasedRunner', max_iters=160000) 8 | checkpoint_config = dict(by_epoch=False, interval=16000) 9 | evaluation = dict(interval=16000, metric='mIoU') 10 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/uniformer/configs/_base_/schedules/schedule_20k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) 3 | optimizer_config = dict() 4 | # learning policy 5 | lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) 6 | # runtime settings 7 | runner = dict(type='IterBasedRunner', max_iters=20000) 8 | checkpoint_config = dict(by_epoch=False, interval=2000) 9 | evaluation = dict(interval=2000, metric='mIoU') 10 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/uniformer/configs/_base_/schedules/schedule_40k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) 3 | optimizer_config = dict() 4 | # learning policy 5 | lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) 6 | # runtime settings 7 | runner = dict(type='IterBasedRunner', max_iters=40000) 8 | checkpoint_config = dict(by_epoch=False, interval=4000) 9 | evaluation = dict(interval=4000, metric='mIoU') 10 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/uniformer/configs/_base_/schedules/schedule_80k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) 3 | optimizer_config = dict() 4 | # learning policy 5 | lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) 6 | # runtime settings 7 | runner = dict(type='IterBasedRunner', max_iters=80000) 8 | checkpoint_config = dict(by_epoch=False, interval=8000) 9 | evaluation = dict(interval=8000, metric='mIoU') 10 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/uniformer/mmcv_custom/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from .checkpoint import load_checkpoint 4 | 5 | __all__ = ['load_checkpoint'] -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/unimatch/unimatch/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/unimatch/unimatch/__init__.py -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/zoe/zoedepth/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/zoe/zoedepth/__init__.py -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/zoe/zoedepth/models/zoedepth/config_zoedepth_kitti.json: -------------------------------------------------------------------------------- 1 | { 2 | "model": { 3 | "bin_centers_type": "normed", 4 | "img_size": [384, 768] 5 | }, 6 | 7 | "train": { 8 | }, 9 | 10 | "infer":{ 11 | "train_midas": false, 12 | "use_pretrained_midas": false, 13 | "pretrained_resource" : "url::https://github.com/isl-org/ZoeDepth/releases/download/v1.0/ZoeD_M12_K.pt", 14 | "force_keep_ar": true 15 | }, 16 | 17 | "eval":{ 18 | "train_midas": false, 19 | "use_pretrained_midas": false, 20 | "pretrained_resource" : "url::https://github.com/isl-org/ZoeDepth/releases/download/v1.0/ZoeD_M12_K.pt" 21 | } 22 | } -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/controlnet_aux/zoe/zoedepth/utils/arg_utils.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | def infer_type(x): # hacky way to infer type from string args 4 | if not isinstance(x, str): 5 | return x 6 | 7 | try: 8 | x = int(x) 9 | return x 10 | except ValueError: 11 | pass 12 | 13 | try: 14 | x = float(x) 15 | return x 16 | except ValueError: 17 | pass 18 | 19 | return x 20 | 21 | 22 | def parse_unknown(unknown_args): 23 | clean = [] 24 | for a in unknown_args: 25 | if "=" in a: 26 | k, v = a.split("=") 27 | clean.extend([k, v]) 28 | else: 29 | clean.append(a) 30 | 31 | keys = clean[::2] 32 | values = clean[1::2] 33 | return {k.replace("--", ""): infer_type(v) for k, v in zip(keys, values)} 34 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_albumentations/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | __version__ = "1.3.1" 4 | 5 | from .augmentations import * 6 | from .core.composition import * 7 | from .core.serialization import * 8 | from .core.transforms_interface import * 9 | 10 | try: 11 | from .imgaug.transforms import * # type: ignore 12 | except ImportError: 13 | # imgaug is not installed by default, so we import stubs. 14 | # Run `pip install -U albumentations[imgaug] if you need augmentations from imgaug.` 15 | from .imgaug.stubs import * # type: ignore 16 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_albumentations/augmentations/__init__.py: -------------------------------------------------------------------------------- 1 | # Common classes 2 | from .blur.functional import * 3 | from .blur.transforms import * 4 | from .crops.functional import * 5 | from .crops.transforms import * 6 | 7 | # New transformations goes to individual files listed below 8 | from .domain_adaptation import * 9 | from .dropout.channel_dropout import * 10 | from .dropout.coarse_dropout import * 11 | from .dropout.cutout import * 12 | from .dropout.functional import * 13 | from .dropout.grid_dropout import * 14 | from .dropout.mask_dropout import * 15 | from .functional import * 16 | from .geometric.functional import * 17 | from .geometric.resize import * 18 | from .geometric.rotate import * 19 | from .geometric.transforms import * 20 | from .transforms import * 21 | from .utils import * 22 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_albumentations/augmentations/blur/__init__.py: -------------------------------------------------------------------------------- 1 | from .functional import * 2 | from .transforms import * 3 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_albumentations/augmentations/crops/__init__.py: -------------------------------------------------------------------------------- 1 | from .functional import * 2 | from .transforms import * 3 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_albumentations/augmentations/dropout/__init__.py: -------------------------------------------------------------------------------- 1 | from .channel_dropout import * 2 | from .coarse_dropout import * 3 | from .cutout import * 4 | from .grid_dropout import * 5 | from .mask_dropout import * 6 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_albumentations/augmentations/geometric/__init__.py: -------------------------------------------------------------------------------- 1 | from .functional import * 2 | from .resize import * 3 | from .rotate import * 4 | from .transforms import * 5 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_albumentations/core/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/custom_albumentations/core/__init__.py -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_albumentations/imgaug/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/custom_albumentations/imgaug/__init__.py -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_albumentations/pytorch/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from .transforms import * 4 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/__init__.py: -------------------------------------------------------------------------------- 1 | #Dummy file ensuring this package will be recognized -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/canny/__init__.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | import cv2 3 | import numpy as np 4 | from PIL import Image 5 | from custom_controlnet_aux.util import resize_image_with_pad, common_input_validate, HWC3 6 | 7 | class CannyDetector: 8 | def __call__(self, input_image=None, low_threshold=100, high_threshold=200, detect_resolution=512, output_type=None, upscale_method="INTER_CUBIC", **kwargs): 9 | input_image, output_type = common_input_validate(input_image, output_type, **kwargs) 10 | detected_map, remove_pad = resize_image_with_pad(input_image, detect_resolution, upscale_method) 11 | detected_map = cv2.Canny(detected_map, low_threshold, high_threshold) 12 | detected_map = HWC3(remove_pad(detected_map)) 13 | 14 | if output_type == "pil": 15 | detected_map = Image.fromarray(detected_map) 16 | 17 | return detected_map 18 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/depth_anything/torchhub/README.md: -------------------------------------------------------------------------------- 1 | # Local PyTorch Hub 2 | 3 | This directory is for loading the DINOv2 encoder locally in case of no Internet connection. 4 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/conda.yaml: -------------------------------------------------------------------------------- 1 | name: dinov2 2 | channels: 3 | - defaults 4 | - pytorch 5 | - nvidia 6 | - xformers 7 | - conda-forge 8 | dependencies: 9 | - python=3.9 10 | - pytorch::pytorch=2.0.0 11 | - pytorch::pytorch-cuda=11.7.0 12 | - pytorch::torchvision=0.15.0 13 | - omegaconf 14 | - torchmetrics=0.10.3 15 | - fvcore 16 | - iopath 17 | - xformers::xformers=0.0.18 18 | - pip 19 | - pip: 20 | - git+https://github.com/facebookincubator/submitit 21 | - --extra-index-url https://pypi.nvidia.com 22 | - cuml-cu11 23 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/dinov2/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # All rights reserved. 3 | # 4 | # This source code is licensed under the license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | __version__ = "0.0.1" 8 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/dinov2/configs/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # All rights reserved. 3 | # 4 | # This source code is licensed under the license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | import pathlib 8 | 9 | from omegaconf import OmegaConf 10 | 11 | 12 | def load_config(config_name: str): 13 | config_filename = config_name + ".yaml" 14 | return OmegaConf.load(pathlib.Path(__file__).parent.resolve() / config_filename) 15 | 16 | 17 | dinov2_default_config = load_config("ssl_default_config") 18 | 19 | 20 | def load_and_merge_config(config_name: str): 21 | default_config = OmegaConf.create(dinov2_default_config) 22 | loaded_config = load_config(config_name) 23 | return OmegaConf.merge(default_config, loaded_config) 24 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/dinov2/configs/eval/vitb14_pretrain.yaml: -------------------------------------------------------------------------------- 1 | student: 2 | arch: vit_base 3 | patch_size: 14 4 | crops: 5 | global_crops_size: 518 # this is to set up the position embeddings properly 6 | local_crops_size: 98 -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/dinov2/configs/eval/vitg14_pretrain.yaml: -------------------------------------------------------------------------------- 1 | student: 2 | arch: vit_giant2 3 | patch_size: 14 4 | ffn_layer: swiglufused 5 | crops: 6 | global_crops_size: 518 # this is to set up the position embeddings properly 7 | local_crops_size: 98 -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/dinov2/configs/eval/vitl14_pretrain.yaml: -------------------------------------------------------------------------------- 1 | student: 2 | arch: vit_large 3 | patch_size: 14 4 | crops: 5 | global_crops_size: 518 # this is to set up the position embeddings properly 6 | local_crops_size: 98 -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/dinov2/configs/eval/vits14_pretrain.yaml: -------------------------------------------------------------------------------- 1 | student: 2 | arch: vit_small 3 | patch_size: 14 4 | crops: 5 | global_crops_size: 518 # this is to set up the position embeddings properly 6 | local_crops_size: 98 -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/dinov2/configs/train/vitg14.yaml: -------------------------------------------------------------------------------- 1 | dino: 2 | head_n_prototypes: 131072 3 | head_bottleneck_dim: 384 4 | ibot: 5 | separate_head: true 6 | head_n_prototypes: 131072 7 | train: 8 | batch_size_per_gpu: 12 9 | dataset_path: ImageNet22k 10 | centering: sinkhorn_knopp 11 | student: 12 | arch: vit_giant2 13 | patch_size: 14 14 | drop_path_rate: 0.4 15 | ffn_layer: swiglufused 16 | block_chunks: 4 17 | teacher: 18 | momentum_teacher: 0.994 19 | optim: 20 | epochs: 500 21 | weight_decay_end: 0.2 22 | base_lr: 2.0e-04 # learning rate for a batch size of 1024 23 | warmup_epochs: 80 24 | layerwise_decay: 1.0 25 | crops: 26 | local_crops_size: 98 -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/dinov2/configs/train/vitl14.yaml: -------------------------------------------------------------------------------- 1 | dino: 2 | head_n_prototypes: 131072 3 | head_bottleneck_dim: 384 4 | ibot: 5 | separate_head: true 6 | head_n_prototypes: 131072 7 | train: 8 | batch_size_per_gpu: 32 9 | dataset_path: ImageNet22k 10 | centering: sinkhorn_knopp 11 | student: 12 | arch: vit_large 13 | patch_size: 14 14 | drop_path_rate: 0.4 15 | ffn_layer: swiglufused 16 | block_chunks: 4 17 | teacher: 18 | momentum_teacher: 0.994 19 | optim: 20 | epochs: 500 21 | weight_decay_end: 0.2 22 | base_lr: 2.0e-04 # learning rate for a batch size of 1024 23 | warmup_epochs: 80 24 | layerwise_decay: 1.0 25 | crops: 26 | local_crops_size: 98 -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/dinov2/configs/train/vitl16_short.yaml: -------------------------------------------------------------------------------- 1 | # this corresponds to the default config 2 | train: 3 | dataset_path: ImageNet:split=TRAIN 4 | batch_size_per_gpu: 64 5 | student: 6 | block_chunks: 4 7 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/dinov2/data/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # All rights reserved. 3 | # 4 | # This source code is licensed under the license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | from .adapters import DatasetWithEnumeratedTargets 8 | from .loaders import make_data_loader, make_dataset, SamplerType 9 | from .collate import collate_data_and_cast 10 | from .masking import MaskingGenerator 11 | from .augmentations import DataAugmentationDINO 12 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/dinov2/data/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # All rights reserved. 3 | # 4 | # This source code is licensed under the license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | from .image_net import ImageNet 8 | from .image_net_22k import ImageNet22k 9 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/dinov2/data/datasets/decoders.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # All rights reserved. 3 | # 4 | # This source code is licensed under the license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | from io import BytesIO 8 | from typing import Any 9 | 10 | from PIL import Image 11 | 12 | 13 | class Decoder: 14 | def decode(self) -> Any: 15 | raise NotImplementedError 16 | 17 | 18 | class ImageDataDecoder(Decoder): 19 | def __init__(self, image_data: bytes) -> None: 20 | self._image_data = image_data 21 | 22 | def decode(self) -> Image: 23 | f = BytesIO(self._image_data) 24 | return Image.open(f).convert(mode="RGB") 25 | 26 | 27 | class TargetDecoder(Decoder): 28 | def __init__(self, target: Any): 29 | self._target = target 30 | 31 | def decode(self) -> Any: 32 | return self._target 33 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/dinov2/eval/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # All rights reserved. 3 | # 4 | # This source code is licensed under the license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/dinov2/layers/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # All rights reserved. 3 | # 4 | # This source code is licensed under the license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | from .dino_head import DINOHead 8 | from .mlp import Mlp 9 | from .patch_embed import PatchEmbed 10 | from .swiglu_ffn import SwiGLUFFN, SwiGLUFFNFused 11 | from .block import NestedTensorBlock 12 | from .attention import MemEffAttention 13 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/dinov2/loss/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # All rights reserved. 3 | # 4 | # This source code is licensed under the license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | from .dino_clstoken_loss import DINOLoss 8 | from .ibot_patch_loss import iBOTPatchLoss 9 | from .koleo_loss import KoLeoLoss 10 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/dinov2/run/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # All rights reserved. 3 | # 4 | # This source code is licensed under the license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/dinov2/train/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # All rights reserved. 3 | # 4 | # This source code is licensed under the license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | from .train import get_args_parser, main 8 | from .ssl_meta_arch import SSLMetaArch 9 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/dinov2/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # All rights reserved. 3 | # 4 | # This source code is licensed under the license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.black] 2 | line-length = 120 3 | 4 | [tool.pylint.master] 5 | persistent = false 6 | score = false 7 | 8 | [tool.pylint.messages_control] 9 | disable = "all" 10 | enable = [ 11 | "miscellaneous", 12 | "similarities", 13 | ] 14 | 15 | [tool.pylint.similarities] 16 | ignore-comments = true 17 | ignore-docstrings = true 18 | ignore-imports = true 19 | min-similarity-lines = 8 20 | 21 | [tool.pylint.reports] 22 | reports = false 23 | 24 | [tool.pylint.miscellaneous] 25 | notes = [ 26 | "FIXME", 27 | "XXX", 28 | "TODO", 29 | ] 30 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/requirements-dev.txt: -------------------------------------------------------------------------------- 1 | black==22.6.0 2 | flake8==5.0.4 3 | pylint==2.15.0 4 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/requirements.txt: -------------------------------------------------------------------------------- 1 | --extra-index-url https://download.pytorch.org/whl/cu117 2 | torch==2.0.0 3 | torchvision==0.15.0 4 | omegaconf 5 | torchmetrics==0.10.3 6 | fvcore 7 | iopath 8 | xformers==0.0.18 9 | submitit 10 | --extra-index-url https://pypi.nvidia.com 11 | cuml-cu11 12 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/scripts/lint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ -n "$1" ]; then 4 | echo "linting \"$1\"" 5 | fi 6 | 7 | echo "running black" 8 | if [ -n "$1" ]; then 9 | black "$1" 10 | else 11 | black dinov2 12 | fi 13 | 14 | echo "running flake8" 15 | if [ -n "$1" ]; then 16 | flake8 "$1" 17 | else 18 | flake8 19 | fi 20 | 21 | echo "running pylint" 22 | if [ -n "$1" ]; then 23 | pylint "$1" 24 | else 25 | pylint dinov2 26 | fi 27 | 28 | exit 0 29 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/depth_anything/torchhub/facebookresearch_dinov2_main/setup.cfg: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 120 3 | ignore = E203,E501,W503 4 | per-file-ignores = 5 | __init__.py:F401 6 | exclude = 7 | venv 8 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/depth_anything_v2/dinov2_layers/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # All rights reserved. 3 | # 4 | # This source code is licensed under the license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | from .mlp import Mlp 8 | from .patch_embed import PatchEmbed 9 | from .swiglu_ffn import SwiGLUFFN, SwiGLUFFNFused 10 | from .block import NestedTensorBlock 11 | from .attention import MemEffAttention 12 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/depth_anything_v2/dinov2_layers/layer_scale.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # All rights reserved. 3 | # 4 | # This source code is licensed under the license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | # Modified from: https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/vision_transformer.py#L103-L110 8 | 9 | from typing import Union 10 | 11 | import torch 12 | from torch import Tensor 13 | from torch import nn 14 | 15 | 16 | class LayerScale(nn.Module): 17 | def __init__( 18 | self, 19 | dim: int, 20 | init_values: Union[float, Tensor] = 1e-5, 21 | inplace: bool = False, 22 | ) -> None: 23 | super().__init__() 24 | self.inplace = inplace 25 | self.gamma = nn.Parameter(init_values * torch.ones(dim)) 26 | 27 | def forward(self, x: Tensor) -> Tensor: 28 | return x.mul_(self.gamma) if self.inplace else x * self.gamma 29 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/diffusion_edge/denoising_diffusion_pytorch/__init__.py: -------------------------------------------------------------------------------- 1 | # from custom_controlnet_aux.diffusion_edge.denoising_diffusion_pytorch.denoising_diffusion_pytorch import GaussianDiffusion, Unet, Trainer 2 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/diffusion_edge/requirement.txt: -------------------------------------------------------------------------------- 1 | #torch==1.12.1+cu113 torchvision==0.13.1+cu113 torchaudio==0.12.1 --extra-index-url https://download.pytorch.org/whl/cu113 2 | einops 3 | scikit-learn 4 | scipy 5 | tensorboard 6 | fvcore 7 | albumentations 8 | omegaconf 9 | numpy==1.23.5 -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/diffusion_edge/taming/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/diffusion_edge/taming/__init__.py -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/diffusion_edge/taming/modules/autoencoder/lpips/vgg.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/diffusion_edge/taming/modules/autoencoder/lpips/vgg.pth -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/diffusion_edge/taming/modules/losses/__init__.py: -------------------------------------------------------------------------------- 1 | from custom_controlnet_aux.diffusion_edge.taming.modules.losses.vqperceptual import DummyLoss 2 | 3 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/diffusion_edge/taming/modules/losses/segmentation.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torch.nn.functional as F 3 | 4 | 5 | class BCELoss(nn.Module): 6 | def forward(self, prediction, target): 7 | loss = F.binary_cross_entropy_with_logits(prediction,target) 8 | return loss, {} 9 | 10 | 11 | class BCELossWithQuant(nn.Module): 12 | def __init__(self, codebook_weight=1.): 13 | super().__init__() 14 | self.codebook_weight = codebook_weight 15 | 16 | def forward(self, qloss, target, prediction, split): 17 | bce_loss = F.binary_cross_entropy_with_logits(prediction,target) 18 | loss = bce_loss + self.codebook_weight*qloss 19 | return loss, {"{}/total_loss".format(split): loss.clone().detach().mean(), 20 | "{}/bce_loss".format(split): bce_loss.detach().mean(), 21 | "{}/quant_loss".format(split): qloss.detach().mean() 22 | } 23 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/dsine/models/submodules/efficientnet_repo/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/dsine/models/submodules/efficientnet_repo/__init__.py -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/dsine/models/submodules/efficientnet_repo/geffnet/__init__.py: -------------------------------------------------------------------------------- 1 | from .gen_efficientnet import * 2 | from .mobilenetv3 import * 3 | from .model_factory import create_model 4 | from .config import is_exportable, is_scriptable, set_exportable, set_scriptable 5 | from .activations import * -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/dsine/models/submodules/efficientnet_repo/geffnet/model_factory.py: -------------------------------------------------------------------------------- 1 | from .config import set_layer_config 2 | from .helpers import load_checkpoint 3 | 4 | from .gen_efficientnet import * 5 | from .mobilenetv3 import * 6 | 7 | 8 | def create_model( 9 | model_name='mnasnet_100', 10 | pretrained=None, 11 | num_classes=1000, 12 | in_chans=3, 13 | checkpoint_path='', 14 | **kwargs): 15 | 16 | model_kwargs = dict(num_classes=num_classes, in_chans=in_chans, pretrained=pretrained, **kwargs) 17 | 18 | if model_name in globals(): 19 | create_fn = globals()[model_name] 20 | model = create_fn(**model_kwargs) 21 | else: 22 | raise RuntimeError('Unknown model (%s)' % model_name) 23 | 24 | if checkpoint_path and not pretrained: 25 | load_checkpoint(model, checkpoint_path) 26 | 27 | return model 28 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/dsine/models/submodules/efficientnet_repo/geffnet/version.py: -------------------------------------------------------------------------------- 1 | __version__ = '1.0.2' 2 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/dsine/models/submodules/efficientnet_repo/requirements.txt: -------------------------------------------------------------------------------- 1 | torch>=1.2.0 2 | torchvision>=0.4.0 3 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/dwpose/dw_onnx/__init__.py: -------------------------------------------------------------------------------- 1 | #Dummy file ensuring this package will be recognized -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/dwpose/dw_torchscript/__init__.py: -------------------------------------------------------------------------------- 1 | #Dummy file ensuring this package will be recognized -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/dwpose/types.py: -------------------------------------------------------------------------------- 1 | from typing import NamedTuple, List, Optional 2 | 3 | class Keypoint(NamedTuple): 4 | x: float 5 | y: float 6 | score: float = 1.0 7 | id: int = -1 8 | 9 | 10 | class BodyResult(NamedTuple): 11 | # Note: Using `Optional` instead of `|` operator as the ladder is a Python 12 | # 3.10 feature. 13 | # Annotator code should be Python 3.8 Compatible, as controlnet repo uses 14 | # Python 3.8 environment. 15 | # https://github.com/lllyasviel/ControlNet/blob/d3284fcd0972c510635a4f5abe2eeb71dc0de524/environment.yaml#L6 16 | keypoints: List[Optional[Keypoint]] 17 | total_score: float = 0.0 18 | total_parts: int = 0 19 | 20 | 21 | HandResult = List[Keypoint] 22 | FaceResult = List[Keypoint] 23 | AnimalPoseResult = List[Keypoint] 24 | 25 | 26 | class PoseResult(NamedTuple): 27 | body: BodyResult 28 | left_hand: Optional[HandResult] 29 | right_hand: Optional[HandResult] 30 | face: Optional[FaceResult] 31 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/leres/leres/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/leres/leres/__init__.py -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/leres/pix2pix/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/leres/pix2pix/__init__.py -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/leres/pix2pix/options/__init__.py: -------------------------------------------------------------------------------- 1 | """This package options includes option modules: training options, test options, and basic options (used in both training and test).""" 2 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/leres/pix2pix/util/__init__.py: -------------------------------------------------------------------------------- 1 | """This package includes a miscellaneous collection of useful helper functions.""" 2 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/mesh_graphormer/depth_preprocessor.py: -------------------------------------------------------------------------------- 1 | class Preprocessor: 2 | def __init__(self) -> None: 3 | pass 4 | 5 | def get_depth(self, input_dir, file_name): 6 | return -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/mesh_graphormer/hand_landmarker.task: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/mesh_graphormer/hand_landmarker.task -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/metric3d/mono/configs/HourglassDecoder/convlarge.0.3_150.py: -------------------------------------------------------------------------------- 1 | _base_=[ 2 | '../_base_/models/encoder_decoder/convnext_large.hourglassdecoder.py', 3 | '../_base_/datasets/_data_base_.py', 4 | '../_base_/default_runtime.py', 5 | ] 6 | 7 | model = dict( 8 | backbone=dict( 9 | pretrained=False, 10 | ) 11 | ) 12 | 13 | # configs of the canonical space 14 | data_basic=dict( 15 | canonical_space = dict( 16 | img_size=(512, 960), 17 | focal_length=1000.0, 18 | ), 19 | depth_range=(0, 1), 20 | depth_normalize=(0.3, 150), 21 | crop_size = (544, 1216), 22 | ) 23 | 24 | batchsize_per_gpu = 2 25 | thread_per_gpu = 4 26 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/metric3d/mono/configs/HourglassDecoder/test_kitti_convlarge.0.3_150.py: -------------------------------------------------------------------------------- 1 | _base_=[ 2 | '../_base_/models/encoder_decoder/convnext_large.hourglassdecoder.py', 3 | '../_base_/datasets/_data_base_.py', 4 | '../_base_/default_runtime.py', 5 | ] 6 | 7 | model = dict( 8 | backbone=dict( 9 | pretrained=False, 10 | ) 11 | ) 12 | 13 | # configs of the canonical space 14 | data_basic=dict( 15 | canonical_space = dict( 16 | img_size=(512, 960), 17 | focal_length=1000.0, 18 | ), 19 | depth_range=(0, 1), 20 | depth_normalize=(0.3, 150), 21 | crop_size = (512, 1088), 22 | ) 23 | 24 | batchsize_per_gpu = 2 25 | thread_per_gpu = 4 26 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/metric3d/mono/configs/HourglassDecoder/test_nyu_convlarge.0.3_150.py: -------------------------------------------------------------------------------- 1 | _base_=[ 2 | '../_base_/models/encoder_decoder/convnext_large.hourglassdecoder.py', 3 | '../_base_/datasets/_data_base_.py', 4 | '../_base_/default_runtime.py', 5 | ] 6 | 7 | model = dict( 8 | backbone=dict( 9 | pretrained=False, 10 | ) 11 | ) 12 | 13 | # configs of the canonical space 14 | data_basic=dict( 15 | canonical_space = dict( 16 | img_size=(512, 960), 17 | focal_length=1000.0, 18 | ), 19 | depth_range=(0, 1), 20 | depth_normalize=(0.3, 150), 21 | crop_size = (480, 1216), 22 | ) 23 | 24 | batchsize_per_gpu = 2 25 | thread_per_gpu = 4 26 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/metric3d/mono/configs/HourglassDecoder/vit.raft5.giant2.py: -------------------------------------------------------------------------------- 1 | _base_=[ 2 | '../_base_/models/encoder_decoder/dino_vit_giant2_reg.dpt_raft.py', 3 | '../_base_/datasets/_data_base_.py', 4 | '../_base_/default_runtime.py', 5 | ] 6 | 7 | model=dict( 8 | decode_head=dict( 9 | type='RAFTDepthNormalDPT5', 10 | iters=8, 11 | n_downsample=2, 12 | detach=False, 13 | ) 14 | ) 15 | 16 | 17 | max_value = 200 18 | # configs of the canonical space 19 | data_basic=dict( 20 | canonical_space = dict( 21 | # img_size=(540, 960), 22 | focal_length=1000.0, 23 | ), 24 | depth_range=(0, 1), 25 | depth_normalize=(0.1, max_value), 26 | crop_size = (616, 1064), # %28 = 0 27 | clip_depth_range=(0.1, 200), 28 | vit_size=(616,1064) 29 | ) 30 | 31 | batchsize_per_gpu = 1 32 | thread_per_gpu = 1 33 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/metric3d/mono/configs/HourglassDecoder/vit.raft5.large.py: -------------------------------------------------------------------------------- 1 | _base_=[ 2 | '../_base_/models/encoder_decoder/dino_vit_large_reg.dpt_raft.py', 3 | '../_base_/datasets/_data_base_.py', 4 | '../_base_/default_runtime.py', 5 | ] 6 | 7 | model=dict( 8 | decode_head=dict( 9 | type='RAFTDepthNormalDPT5', 10 | iters=8, 11 | n_downsample=2, 12 | detach=False, 13 | ) 14 | ) 15 | 16 | 17 | max_value = 200 18 | # configs of the canonical space 19 | data_basic=dict( 20 | canonical_space = dict( 21 | # img_size=(540, 960), 22 | focal_length=1000.0, 23 | ), 24 | depth_range=(0, 1), 25 | depth_normalize=(0.1, max_value), 26 | crop_size = (616, 1064), # %28 = 0 27 | clip_depth_range=(0.1, 200), 28 | vit_size=(616,1064) 29 | ) 30 | 31 | batchsize_per_gpu = 1 32 | thread_per_gpu = 1 33 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/metric3d/mono/configs/HourglassDecoder/vit.raft5.small.py: -------------------------------------------------------------------------------- 1 | _base_=[ 2 | '../_base_/models/encoder_decoder/dino_vit_small_reg.dpt_raft.py', 3 | '../_base_/datasets/_data_base_.py', 4 | '../_base_/default_runtime.py', 5 | ] 6 | 7 | model=dict( 8 | decode_head=dict( 9 | type='RAFTDepthNormalDPT5', 10 | iters=4, 11 | n_downsample=2, 12 | detach=False, 13 | ) 14 | ) 15 | 16 | 17 | max_value = 200 18 | # configs of the canonical space 19 | data_basic=dict( 20 | canonical_space = dict( 21 | # img_size=(540, 960), 22 | focal_length=1000.0, 23 | ), 24 | depth_range=(0, 1), 25 | depth_normalize=(0.1, max_value), 26 | crop_size = (616, 1064), # %28 = 0 27 | clip_depth_range=(0.1, 200), 28 | vit_size=(616,1064) 29 | ) 30 | 31 | batchsize_per_gpu = 1 32 | thread_per_gpu = 1 33 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/metric3d/mono/configs/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/metric3d/mono/configs/_base_/_data_base_.py: -------------------------------------------------------------------------------- 1 | # canonical camera setting and basic data setting 2 | # we set it same as the E300 camera (crop version) 3 | # 4 | data_basic=dict( 5 | canonical_space = dict( 6 | img_size=(540, 960), 7 | focal_length=1196.0, 8 | ), 9 | depth_range=(0.9, 150), 10 | depth_normalize=(0.006, 1.001), 11 | crop_size = (512, 960), 12 | clip_depth_range=(0.9, 150), 13 | ) 14 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/metric3d/mono/configs/_base_/datasets/_data_base_.py: -------------------------------------------------------------------------------- 1 | # canonical camera setting and basic data setting 2 | # 3 | data_basic=dict( 4 | canonical_space = dict( 5 | img_size=(540, 960), 6 | focal_length=1196.0, 7 | ), 8 | depth_range=(0.9, 150), 9 | depth_normalize=(0.006, 1.001), 10 | crop_size = (512, 960), 11 | clip_depth_range=(0.9, 150), 12 | ) 13 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/metric3d/mono/configs/_base_/default_runtime.py: -------------------------------------------------------------------------------- 1 | 2 | load_from = None 3 | cudnn_benchmark = True 4 | test_metrics = ['abs_rel', 'rmse', 'silog', 'delta1', 'delta2', 'delta3','rmse_log', 'log10', 'sq_rel'] 5 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/metric3d/mono/configs/_base_/models/backbones/convnext_large.py: -------------------------------------------------------------------------------- 1 | #_base_ = ['./_model_base_.py',] 2 | 3 | #'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-large_3rdparty_in21k_20220301-e6e0ea0a.pth' 4 | model = dict( 5 | #type='EncoderDecoderAuxi', 6 | backbone=dict( 7 | type='convnext_large', 8 | pretrained=True, 9 | in_22k=True, 10 | out_indices=[0, 1, 2, 3], 11 | drop_path_rate=0.4, 12 | layer_scale_init_value=1.0, 13 | checkpoint='data/pretrained_weight_repo/convnext/convnext_large_22k_1k_384.pth', 14 | prefix='backbones.', 15 | out_channels=[192, 384, 768, 1536]), 16 | ) 17 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/metric3d/mono/configs/_base_/models/backbones/dino_vit_giant2_reg.py: -------------------------------------------------------------------------------- 1 | model = dict( 2 | backbone=dict( 3 | type='vit_giant2_reg', 4 | prefix='backbones.', 5 | out_channels=[1536, 1536, 1536, 1536], 6 | drop_path_rate = 0.0), 7 | ) 8 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/metric3d/mono/configs/_base_/models/backbones/dino_vit_large.py: -------------------------------------------------------------------------------- 1 | model = dict( 2 | backbone=dict( 3 | type='vit_large', 4 | prefix='backbones.', 5 | out_channels=[1024, 1024, 1024, 1024], 6 | drop_path_rate = 0.0), 7 | ) 8 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/metric3d/mono/configs/_base_/models/backbones/dino_vit_large_reg.py: -------------------------------------------------------------------------------- 1 | model = dict( 2 | backbone=dict( 3 | type='vit_large_reg', 4 | prefix='backbones.', 5 | out_channels=[1024, 1024, 1024, 1024], 6 | drop_path_rate = 0.0), 7 | ) 8 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/metric3d/mono/configs/_base_/models/backbones/dino_vit_small_reg.py: -------------------------------------------------------------------------------- 1 | model = dict( 2 | backbone=dict( 3 | type='vit_small_reg', 4 | prefix='backbones.', 5 | out_channels=[384, 384, 384, 384], 6 | drop_path_rate = 0.0), 7 | ) 8 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/metric3d/mono/configs/_base_/models/encoder_decoder/convnext_large.hourglassdecoder.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | _base_ = ['../backbones/convnext_large.py',] 3 | model = dict( 4 | type='DensePredModel', 5 | decode_head=dict( 6 | type='HourglassDecoder', 7 | in_channels=[192, 384, 768, 1536], 8 | decoder_channel=[128, 128, 256, 512], 9 | prefix='decode_heads.'), 10 | ) 11 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/metric3d/mono/configs/_base_/models/encoder_decoder/dino_vit_giant2_reg.dpt_raft.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | _base_ = ['../backbones/dino_vit_giant2_reg.py'] 3 | model = dict( 4 | type='DensePredModel', 5 | decode_head=dict( 6 | type='RAFTDepthDPT', 7 | in_channels=[1536, 1536, 1536, 1536], 8 | use_cls_token=True, 9 | feature_channels = [384, 768, 1536, 1536], # [2/7, 1/7, 1/14, 1/14] 10 | decoder_channels = [192, 384, 768, 1536, 1536], # [4/7, 2/7, 1/7, 1/14, 1/14] 11 | up_scale = 7, 12 | hidden_channels=[192, 192, 192, 192], # [x_4, x_8, x_16, x_32] [192, 384, 768, 1536] 13 | n_gru_layers=3, 14 | n_downsample=2, 15 | iters=3, 16 | slow_fast_gru=True, 17 | num_register_tokens=4, 18 | prefix='decode_heads.'), 19 | ) 20 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/metric3d/mono/configs/_base_/models/encoder_decoder/dino_vit_large.dpt_raft.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | _base_ = ['../backbones/dino_vit_large.py'] 3 | model = dict( 4 | type='DensePredModel', 5 | decode_head=dict( 6 | type='RAFTDepthDPT', 7 | in_channels=[1024, 1024, 1024, 1024], 8 | use_cls_token=True, 9 | feature_channels = [256, 512, 1024, 1024], # [2/7, 1/7, 1/14, 1/14] 10 | decoder_channels = [128, 256, 512, 1024, 1024], # [4/7, 2/7, 1/7, 1/14, 1/14] 11 | up_scale = 7, 12 | hidden_channels=[128, 128, 128, 128], # [x_4, x_8, x_16, x_32] [192, 384, 768, 1536] 13 | n_gru_layers=3, 14 | n_downsample=2, 15 | iters=12, 16 | slow_fast_gru=True, 17 | corr_radius=4, 18 | corr_levels=4, 19 | prefix='decode_heads.'), 20 | ) 21 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/metric3d/mono/configs/_base_/models/encoder_decoder/dino_vit_large_reg.dpt_raft.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | _base_ = ['../backbones/dino_vit_large_reg.py'] 3 | model = dict( 4 | type='DensePredModel', 5 | decode_head=dict( 6 | type='RAFTDepthDPT', 7 | in_channels=[1024, 1024, 1024, 1024], 8 | use_cls_token=True, 9 | feature_channels = [256, 512, 1024, 1024], # [2/7, 1/7, 1/14, 1/14] 10 | decoder_channels = [128, 256, 512, 1024, 1024], # [4/7, 2/7, 1/7, 1/14, 1/14] 11 | up_scale = 7, 12 | hidden_channels=[128, 128, 128, 128], # [x_4, x_8, x_16, x_32] [192, 384, 768, 1536] 13 | n_gru_layers=3, 14 | n_downsample=2, 15 | iters=3, 16 | slow_fast_gru=True, 17 | num_register_tokens=4, 18 | prefix='decode_heads.'), 19 | ) 20 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/metric3d/mono/configs/_base_/models/encoder_decoder/dino_vit_small_reg.dpt_raft.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | _base_ = ['../backbones/dino_vit_small_reg.py'] 3 | model = dict( 4 | type='DensePredModel', 5 | decode_head=dict( 6 | type='RAFTDepthDPT', 7 | in_channels=[384, 384, 384, 384], 8 | use_cls_token=True, 9 | feature_channels = [96, 192, 384, 768], # [2/7, 1/7, 1/14, 1/14] 10 | decoder_channels = [48, 96, 192, 384, 384], # [-, 1/4, 1/7, 1/14, 1/14] 11 | up_scale = 7, 12 | hidden_channels=[48, 48, 48, 48], # [x_4, x_8, x_16, x_32] [1/4, 1/7, 1/14, -] 13 | n_gru_layers=3, 14 | n_downsample=2, 15 | iters=3, 16 | slow_fast_gru=True, 17 | num_register_tokens=4, 18 | prefix='decode_heads.'), 19 | ) 20 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/metric3d/mono/model/__init__.py: -------------------------------------------------------------------------------- 1 | from .monodepth_model import DepthModel 2 | # from .__base_model__ import BaseDepthModel 3 | 4 | 5 | __all__ = ['DepthModel', 'BaseDepthModel'] 6 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/metric3d/mono/model/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | from .ConvNeXt import convnext_xlarge 2 | from .ConvNeXt import convnext_small 3 | from .ConvNeXt import convnext_base 4 | from .ConvNeXt import convnext_large 5 | from .ConvNeXt import convnext_tiny 6 | from .ViT_DINO import vit_large 7 | from .ViT_DINO_reg import vit_small_reg, vit_large_reg, vit_giant2_reg 8 | 9 | __all__ = [ 10 | 'convnext_xlarge', 'convnext_small', 'convnext_base', 'convnext_large', 'convnext_tiny', 'vit_small_reg', 'vit_large_reg', 'vit_giant2_reg' 11 | ] 12 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/metric3d/mono/model/decode_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .HourGlassDecoder import HourglassDecoder 2 | from .RAFTDepthNormalDPTDecoder5 import RAFTDepthNormalDPT5 3 | 4 | __all__=['HourglassDecoder', 'RAFTDepthNormalDPT5'] 5 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/metric3d/mono/model/model_pipelines/__base_model__.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from custom_controlnet_aux.metric3d.mono.utils.comm import get_func 4 | 5 | 6 | class BaseDepthModel(nn.Module): 7 | def __init__(self, cfg, **kwargs) -> None: 8 | super(BaseDepthModel, self).__init__() 9 | model_type = cfg.model.type 10 | self.depth_model = get_func('custom_controlnet_aux.metric3d.mono.model.model_pipelines.' + model_type)(cfg) 11 | 12 | def forward(self, data): 13 | output = self.depth_model(**data) 14 | 15 | return output['prediction'], output['confidence'], output 16 | 17 | def inference(self, data): 18 | with torch.no_grad(): 19 | pred_depth, confidence, _ = self.forward(data) 20 | return pred_depth, confidence -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/metric3d/mono/model/model_pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from .dense_pipeline import DensePredModel 3 | from .__base_model__ import BaseDepthModel 4 | __all__ = [ 5 | 'DensePredModel', 'BaseDepthModel', 6 | ] -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/metric3d/mono/model/model_pipelines/dense_pipeline.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from custom_controlnet_aux.metric3d.mono.utils.comm import get_func 4 | 5 | class DensePredModel(nn.Module): 6 | def __init__(self, cfg) -> None: 7 | super(DensePredModel, self).__init__() 8 | 9 | self.encoder = get_func('custom_controlnet_aux.metric3d.mono.model.' + cfg.model.backbone.prefix + cfg.model.backbone.type)(**cfg.model.backbone) 10 | self.decoder = get_func('custom_controlnet_aux.metric3d.mono.model.' + cfg.model.decode_head.prefix + cfg.model.decode_head.type)(cfg) 11 | 12 | def forward(self, input, **kwargs): 13 | # [f_32, f_16, f_8, f_4] 14 | features = self.encoder(input) 15 | out = self.decoder(features, **kwargs) 16 | return out -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/metric3d/mono/utils/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/mlsd/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/mlsd/models/__init__.py -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/normalbae/nets/NNET.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | from .submodules.encoder import Encoder 6 | from .submodules.decoder import Decoder 7 | 8 | 9 | class NNET(nn.Module): 10 | def __init__(self, args): 11 | super(NNET, self).__init__() 12 | self.encoder = Encoder() 13 | self.decoder = Decoder(args) 14 | 15 | def get_1x_lr_params(self): # lr/10 learning rate 16 | return self.encoder.parameters() 17 | 18 | def get_10x_lr_params(self): # lr learning rate 19 | return self.decoder.parameters() 20 | 21 | def forward(self, img, **kwargs): 22 | return self.decoder(self.encoder(img), **kwargs) -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/normalbae/nets/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/normalbae/nets/__init__.py -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/normalbae/nets/submodules/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/normalbae/nets/submodules/__init__.py -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/normalbae/nets/submodules/efficientnet_repo/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/normalbae/nets/submodules/efficientnet_repo/__init__.py -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/__init__.py: -------------------------------------------------------------------------------- 1 | from .gen_efficientnet import * 2 | from .mobilenetv3 import * 3 | from .model_factory import create_model 4 | from .config import is_exportable, is_scriptable, set_exportable, set_scriptable 5 | from .activations import * -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/model_factory.py: -------------------------------------------------------------------------------- 1 | from .config import set_layer_config 2 | from .helpers import load_checkpoint 3 | 4 | from .gen_efficientnet import * 5 | from .mobilenetv3 import * 6 | 7 | 8 | def create_model( 9 | model_name='mnasnet_100', 10 | pretrained=None, 11 | num_classes=1000, 12 | in_chans=3, 13 | checkpoint_path='', 14 | **kwargs): 15 | 16 | model_kwargs = dict(num_classes=num_classes, in_chans=in_chans, pretrained=pretrained, **kwargs) 17 | 18 | if model_name in globals(): 19 | create_fn = globals()[model_name] 20 | model = create_fn(**model_kwargs) 21 | else: 22 | raise RuntimeError('Unknown model (%s)' % model_name) 23 | 24 | if checkpoint_path and not pretrained: 25 | load_checkpoint(model, checkpoint_path) 26 | 27 | return model 28 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/version.py: -------------------------------------------------------------------------------- 1 | __version__ = '1.0.2' 2 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/normalbae/nets/submodules/efficientnet_repo/requirements.txt: -------------------------------------------------------------------------------- 1 | torch>=1.2.0 2 | torchvision>=0.4.0 3 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/oneformer/configs/coco/oneformer_swin_large_IN21k_384_bs16_100ep.yaml: -------------------------------------------------------------------------------- 1 | _BASE_: oneformer_R50_bs16_50ep.yaml 2 | MODEL: 3 | BACKBONE: 4 | NAME: "D2SwinTransformer" 5 | SWIN: 6 | EMBED_DIM: 192 7 | DEPTHS: [2, 2, 18, 2] 8 | NUM_HEADS: [6, 12, 24, 48] 9 | WINDOW_SIZE: 12 10 | APE: False 11 | DROP_PATH_RATE: 0.3 12 | PATCH_NORM: True 13 | PRETRAIN_IMG_SIZE: 384 14 | WEIGHTS: "swin_large_patch4_window12_384_22k.pkl" 15 | PIXEL_MEAN: [123.675, 116.280, 103.530] 16 | PIXEL_STD: [58.395, 57.120, 57.375] 17 | ONE_FORMER: 18 | NUM_OBJECT_QUERIES: 150 19 | SOLVER: 20 | STEPS: (655556, 735184) 21 | MAX_ITER: 737500 22 | AMP: 23 | ENABLED: False 24 | TEST: 25 | DETECTIONS_PER_IMAGE: 150 26 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/sam/modeling/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # All rights reserved. 3 | 4 | # This source code is licensed under the license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | from .sam import Sam 8 | from .image_encoder import ImageEncoderViT 9 | from .mask_decoder import MaskDecoder 10 | from .prompt_encoder import PromptEncoder 11 | from .transformer import TwoWayTransformer 12 | from .tiny_vit_sam import TinyViT 13 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/sam/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # All rights reserved. 3 | 4 | # This source code is licensed under the license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/teed/Fmish.py: -------------------------------------------------------------------------------- 1 | """ 2 | Script provides functional interface for Mish activation function. 3 | """ 4 | 5 | # import pytorch 6 | import torch 7 | import torch.nn.functional as F 8 | 9 | 10 | @torch.jit.script 11 | def mish(input): 12 | """ 13 | Applies the mish function element-wise: 14 | mish(x) = x * tanh(softplus(x)) = x * tanh(ln(1 + exp(x))) 15 | See additional documentation for mish class. 16 | """ 17 | return input * torch.tanh(F.softplus(input)) -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/teed/Fsmish.py: -------------------------------------------------------------------------------- 1 | """ 2 | Script based on: 3 | Wang, Xueliang, Honge Ren, and Achuan Wang. 4 | "Smish: A Novel Activation Function for Deep Learning Methods. 5 | " Electronics 11.4 (2022): 540. 6 | """ 7 | 8 | # import pytorch 9 | import torch 10 | import torch.nn.functional as F 11 | 12 | 13 | @torch.jit.script 14 | def smish(input): 15 | """ 16 | Applies the mish function element-wise: 17 | mish(x) = x * tanh(softplus(x)) = x * tanh(ln(1 + exp(sigmoid(x)))) 18 | See additional documentation for mish class. 19 | """ 20 | return input * torch.tanh(torch.log(1+torch.sigmoid(input))) -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/tests/requirements.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/tests/requirements.txt -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/uniformer/configs/_base_/datasets/pascal_voc12_aug.py: -------------------------------------------------------------------------------- 1 | _base_ = './pascal_voc12.py' 2 | # dataset settings 3 | data = dict( 4 | train=dict( 5 | ann_dir=['SegmentationClass', 'SegmentationClassAug'], 6 | split=[ 7 | 'ImageSets/Segmentation/train.txt', 8 | 'ImageSets/Segmentation/aug.txt' 9 | ])) 10 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/uniformer/configs/_base_/default_runtime.py: -------------------------------------------------------------------------------- 1 | # yapf:disable 2 | log_config = dict( 3 | interval=50, 4 | hooks=[ 5 | dict(type='TextLoggerHook', by_epoch=False), 6 | # dict(type='TensorboardLoggerHook') 7 | ]) 8 | # yapf:enable 9 | dist_params = dict(backend='nccl') 10 | log_level = 'INFO' 11 | load_from = None 12 | resume_from = None 13 | workflow = [('train', 1)] 14 | cudnn_benchmark = True 15 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/uniformer/configs/_base_/models/lraspp_m-v3-d8.py: -------------------------------------------------------------------------------- 1 | # model settings 2 | norm_cfg = dict(type='SyncBN', eps=0.001, requires_grad=True) 3 | model = dict( 4 | type='EncoderDecoder', 5 | backbone=dict( 6 | type='MobileNetV3', 7 | arch='large', 8 | out_indices=(1, 3, 16), 9 | norm_cfg=norm_cfg), 10 | decode_head=dict( 11 | type='LRASPPHead', 12 | in_channels=(16, 24, 960), 13 | in_index=(0, 1, 2), 14 | channels=128, 15 | input_transform='multiple_select', 16 | dropout_ratio=0.1, 17 | num_classes=19, 18 | norm_cfg=norm_cfg, 19 | act_cfg=dict(type='ReLU'), 20 | align_corners=False, 21 | loss_decode=dict( 22 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 23 | # model training and testing settings 24 | train_cfg=dict(), 25 | test_cfg=dict(mode='whole')) 26 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/uniformer/configs/_base_/schedules/schedule_160k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) 3 | optimizer_config = dict() 4 | # learning policy 5 | lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) 6 | # runtime settings 7 | runner = dict(type='IterBasedRunner', max_iters=160000) 8 | checkpoint_config = dict(by_epoch=False, interval=16000) 9 | evaluation = dict(interval=16000, metric='mIoU') 10 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/uniformer/configs/_base_/schedules/schedule_20k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) 3 | optimizer_config = dict() 4 | # learning policy 5 | lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) 6 | # runtime settings 7 | runner = dict(type='IterBasedRunner', max_iters=20000) 8 | checkpoint_config = dict(by_epoch=False, interval=2000) 9 | evaluation = dict(interval=2000, metric='mIoU') 10 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/uniformer/configs/_base_/schedules/schedule_40k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) 3 | optimizer_config = dict() 4 | # learning policy 5 | lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) 6 | # runtime settings 7 | runner = dict(type='IterBasedRunner', max_iters=40000) 8 | checkpoint_config = dict(by_epoch=False, interval=4000) 9 | evaluation = dict(interval=4000, metric='mIoU') 10 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/uniformer/configs/_base_/schedules/schedule_80k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) 3 | optimizer_config = dict() 4 | # learning policy 5 | lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) 6 | # runtime settings 7 | runner = dict(type='IterBasedRunner', max_iters=80000) 8 | checkpoint_config = dict(by_epoch=False, interval=8000) 9 | evaluation = dict(interval=8000, metric='mIoU') 10 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/uniformer/mmcv_custom/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from .checkpoint import load_checkpoint 4 | 5 | __all__ = ['load_checkpoint'] -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/unimatch/unimatch/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/unimatch/unimatch/__init__.py -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/zoe/zoedepth/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/zoe/zoedepth/__init__.py -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/zoe/zoedepth/models/zoedepth/config_zoedepth_kitti.json: -------------------------------------------------------------------------------- 1 | { 2 | "model": { 3 | "bin_centers_type": "normed", 4 | "img_size": [384, 768] 5 | }, 6 | 7 | "train": { 8 | }, 9 | 10 | "infer":{ 11 | "train_midas": false, 12 | "use_pretrained_midas": false, 13 | "pretrained_resource" : "url::https://github.com/isl-org/ZoeDepth/releases/download/v1.0/ZoeD_M12_K.pt", 14 | "force_keep_ar": true 15 | }, 16 | 17 | "eval":{ 18 | "train_midas": false, 19 | "use_pretrained_midas": false, 20 | "pretrained_resource" : "url::https://github.com/isl-org/ZoeDepth/releases/download/v1.0/ZoeD_M12_K.pt" 21 | } 22 | } -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_controlnet_aux/zoe/zoedepth/utils/arg_utils.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | def infer_type(x): # hacky way to infer type from string args 4 | if not isinstance(x, str): 5 | return x 6 | 7 | try: 8 | x = int(x) 9 | return x 10 | except ValueError: 11 | pass 12 | 13 | try: 14 | x = float(x) 15 | return x 16 | except ValueError: 17 | pass 18 | 19 | return x 20 | 21 | 22 | def parse_unknown(unknown_args): 23 | clean = [] 24 | for a in unknown_args: 25 | if "=" in a: 26 | k, v = a.split("=") 27 | clean.extend([k, v]) 28 | else: 29 | clean.append(a) 30 | 31 | keys = clean[::2] 32 | values = clean[1::2] 33 | return {k.replace("--", ""): infer_type(v) for k, v in zip(keys, values)} 34 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_detectron2/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | 3 | from .utils.env import setup_environment 4 | 5 | setup_environment() 6 | 7 | 8 | # This line will be programatically read/write by setup.py. 9 | # Leave them at the bottom of this file and don't touch them. 10 | __version__ = "0.6" 11 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_detectron2/checkpoint/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # File: 4 | 5 | 6 | from . import catalog as _UNUSED # register the handler 7 | from .detection_checkpoint import DetectionCheckpointer 8 | from fvcore.common.checkpoint import Checkpointer, PeriodicCheckpointer 9 | 10 | __all__ = ["Checkpointer", "PeriodicCheckpointer", "DetectionCheckpointer"] 11 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_detectron2/config/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from .compat import downgrade_config, upgrade_config 3 | from .config import CfgNode, get_cfg, global_cfg, set_global_cfg, configurable 4 | from .instantiate import instantiate 5 | from .lazy import LazyCall, LazyConfig 6 | 7 | __all__ = [ 8 | "CfgNode", 9 | "get_cfg", 10 | "global_cfg", 11 | "set_global_cfg", 12 | "downgrade_config", 13 | "upgrade_config", 14 | "configurable", 15 | "instantiate", 16 | "LazyCall", 17 | "LazyConfig", 18 | ] 19 | 20 | 21 | from custom_detectron2.utils.env import fixup_module_metadata 22 | 23 | fixup_module_metadata(__name__, globals(), __all__) 24 | del fixup_module_metadata 25 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_detectron2/data/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from . import transforms # isort:skip 3 | 4 | from .build import ( 5 | build_batch_data_loader, 6 | build_detection_test_loader, 7 | build_detection_train_loader, 8 | get_detection_dataset_dicts, 9 | load_proposals_into_dataset, 10 | print_instances_class_histogram, 11 | ) 12 | from .catalog import DatasetCatalog, MetadataCatalog, Metadata 13 | from .common import DatasetFromList, MapDataset, ToIterableDataset 14 | from .dataset_mapper import DatasetMapper 15 | 16 | # ensure the builtin datasets are registered 17 | from . import datasets, samplers # isort:skip 18 | 19 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 20 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_detectron2/data/datasets/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ### Common Datasets 4 | 5 | The dataset implemented here do not need to load the data into the final format. 6 | It should provide the minimal data structure needed to use the dataset, so it can be very efficient. 7 | 8 | For example, for an image dataset, just provide the file names and labels, but don't read the images. 9 | Let the downstream decide how to read. 10 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_detectron2/data/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from .coco import load_coco_json, load_sem_seg, register_coco_instances, convert_to_coco_json 3 | from .coco_panoptic import register_coco_panoptic, register_coco_panoptic_separated 4 | from .lvis import load_lvis_json, register_lvis_instances, get_lvis_instances_meta 5 | from .pascal_voc import load_voc_instances, register_pascal_voc 6 | from . import builtin as _builtin # ensure the builtin datasets are registered 7 | 8 | 9 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 10 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_detectron2/data/datasets/register_coco.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from .coco import register_coco_instances # noqa 3 | from .coco_panoptic import register_coco_panoptic_separated # noqa 4 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_detectron2/data/samplers/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from .distributed_sampler import ( 3 | InferenceSampler, 4 | RandomSubsetTrainingSampler, 5 | RepeatFactorTrainingSampler, 6 | TrainingSampler, 7 | ) 8 | 9 | from .grouped_batch_sampler import GroupedBatchSampler 10 | 11 | __all__ = [ 12 | "GroupedBatchSampler", 13 | "TrainingSampler", 14 | "RandomSubsetTrainingSampler", 15 | "InferenceSampler", 16 | "RepeatFactorTrainingSampler", 17 | ] 18 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_detectron2/data/transforms/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from fvcore.transforms.transform import Transform, TransformList # order them first 3 | from fvcore.transforms.transform import * 4 | from .transform import * 5 | from .augmentation import * 6 | from .augmentation_impl import * 7 | 8 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 9 | 10 | 11 | from custom_detectron2.utils.env import fixup_module_metadata 12 | 13 | fixup_module_metadata(__name__, globals(), __all__) 14 | del fixup_module_metadata 15 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_detectron2/engine/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | 3 | from .launch import * 4 | from .train_loop import * 5 | 6 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 7 | 8 | 9 | # prefer to let hooks and defaults live in separate namespaces (therefore not in __all__) 10 | # but still make them available here 11 | from .hooks import * 12 | from .defaults import * 13 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_detectron2/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from .cityscapes_evaluation import CityscapesInstanceEvaluator, CityscapesSemSegEvaluator 3 | from .coco_evaluation import COCOEvaluator 4 | from .rotated_coco_evaluation import RotatedCOCOEvaluator 5 | from .evaluator import DatasetEvaluator, DatasetEvaluators, inference_context, inference_on_dataset 6 | from .lvis_evaluation import LVISEvaluator 7 | from .panoptic_evaluation import COCOPanopticEvaluator 8 | from .pascal_voc_evaluation import PascalVOCDetectionEvaluator 9 | from .sem_seg_evaluation import SemSegEvaluator 10 | from .testing import print_csv_format, verify_results 11 | 12 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 13 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_detectron2/export/README.md: -------------------------------------------------------------------------------- 1 | 2 | This directory contains code to prepare a detectron2 model for deployment. 3 | Currently it supports exporting a detectron2 model to TorchScript, ONNX, or (deprecated) Caffe2 format. 4 | 5 | Please see [documentation](https://detectron2.readthedocs.io/tutorials/deployment.html) for its usage. 6 | 7 | 8 | ### Acknowledgements 9 | 10 | Thanks to Mobile Vision team at Facebook for developing the Caffe2 conversion tools. 11 | 12 | Thanks to Computing Platform Department - PAI team at Alibaba Group (@bddpqq, @chenbohua3) who 13 | help export Detectron2 models to TorchScript. 14 | 15 | Thanks to ONNX Converter team at Microsoft who help export Detectron2 models to ONNX. 16 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_detectron2/export/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import warnings 4 | 5 | from .flatten import TracingAdapter 6 | from .torchscript import dump_torchscript_IR, scripting_with_instances 7 | 8 | try: 9 | from caffe2.proto import caffe2_pb2 as _tmp 10 | from caffe2.python import core 11 | 12 | # caffe2 is optional 13 | except ImportError: 14 | pass 15 | else: 16 | from .api import * 17 | 18 | 19 | # TODO: Update ONNX Opset version and run tests when a newer PyTorch is supported 20 | STABLE_ONNX_OPSET_VERSION = 11 21 | 22 | 23 | def add_export_config(cfg): 24 | warnings.warn( 25 | "add_export_config has been deprecated and behaves as no-op function.", DeprecationWarning 26 | ) 27 | return cfg 28 | 29 | 30 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 31 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_detectron2/layers/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from .batch_norm import FrozenBatchNorm2d, get_norm, NaiveSyncBatchNorm, CycleBatchNormList 3 | from .deform_conv import DeformConv, ModulatedDeformConv 4 | from .mask_ops import paste_masks_in_image 5 | from .nms import batched_nms, batched_nms_rotated, nms, nms_rotated 6 | from .roi_align import ROIAlign, roi_align 7 | from .roi_align_rotated import ROIAlignRotated, roi_align_rotated 8 | from .shape_spec import ShapeSpec 9 | from .wrappers import ( 10 | BatchNorm2d, 11 | Conv2d, 12 | ConvTranspose2d, 13 | cat, 14 | interpolate, 15 | Linear, 16 | nonzero_tuple, 17 | cross_entropy, 18 | empty_input_loss_func_wrapper, 19 | shapes_to_tensor, 20 | move_device_like, 21 | ) 22 | from .blocks import CNNBlockBase, DepthwiseSeparableConv2d 23 | from .aspp import ASPP 24 | from .losses import ciou_loss, diou_loss 25 | 26 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 27 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_detectron2/layers/csrc/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | To add a new Op: 4 | 5 | 1. Create a new directory 6 | 2. Implement new ops there 7 | 3. Delcare its Python interface in `vision.cpp`. 8 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_detectron2/layers/csrc/cuda_version.cu: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. 2 | 3 | #include 4 | 5 | namespace detectron2 { 6 | int get_cudart_version() { 7 | // Not a ROCM platform: Either HIP is not used, or 8 | // it is used, but platform is not ROCM (i.e. it is CUDA) 9 | #if !defined(__HIP_PLATFORM_HCC__) 10 | return CUDART_VERSION; 11 | #else 12 | int version = 0; 13 | 14 | #if HIP_VERSION_MAJOR != 0 15 | // Create a convention similar to that of CUDA, as assumed by other 16 | // parts of the code. 17 | 18 | version = HIP_VERSION_MINOR; 19 | version += (HIP_VERSION_MAJOR * 100); 20 | #else 21 | hipRuntimeGetVersion(&version); 22 | #endif 23 | return version; 24 | #endif 25 | } 26 | } // namespace detectron2 27 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_detectron2/layers/rotated_boxes.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from __future__ import absolute_import, division, print_function, unicode_literals 3 | import torch 4 | 5 | 6 | def pairwise_iou_rotated(boxes1, boxes2): 7 | """ 8 | Return intersection-over-union (Jaccard index) of boxes. 9 | 10 | Both sets of boxes are expected to be in 11 | (x_center, y_center, width, height, angle) format. 12 | 13 | Arguments: 14 | boxes1 (Tensor[N, 5]) 15 | boxes2 (Tensor[M, 5]) 16 | 17 | Returns: 18 | iou (Tensor[N, M]): the NxM matrix containing the pairwise 19 | IoU values for every element in boxes1 and boxes2 20 | """ 21 | return torch.ops.detectron2.box_iou_rotated(boxes1, boxes2) 22 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_detectron2/layers/shape_spec.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | from dataclasses import dataclass 4 | from typing import Optional 5 | 6 | 7 | @dataclass 8 | class ShapeSpec: 9 | """ 10 | A simple structure that contains basic shape specification about a tensor. 11 | It is often used as the auxiliary inputs/outputs of models, 12 | to complement the lack of shape inference ability among pytorch modules. 13 | """ 14 | 15 | channels: Optional[int] = None 16 | height: Optional[int] = None 17 | width: Optional[int] = None 18 | stride: Optional[int] = None 19 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_detectron2/model_zoo/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | """ 3 | Model Zoo API for Detectron2: a collection of functions to create common model architectures 4 | listed in `MODEL_ZOO.md `_, 5 | and optionally load their pre-trained weights. 6 | """ 7 | 8 | from .model_zoo import get, get_config_file, get_checkpoint_url, get_config 9 | 10 | __all__ = ["get_checkpoint_url", "get", "get_config_file", "get_config"] 11 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_detectron2/modeling/backbone/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from .build import build_backbone, BACKBONE_REGISTRY # noqa F401 isort:skip 3 | 4 | from .backbone import Backbone 5 | from .fpn import FPN 6 | from .regnet import RegNet 7 | from .resnet import ( 8 | BasicStem, 9 | ResNet, 10 | ResNetBlockBase, 11 | build_resnet_backbone, 12 | make_stage, 13 | BottleneckBlock, 14 | ) 15 | from .vit import ViT, SimpleFeaturePyramid, get_vit_lr_decay_rate 16 | from .mvit import MViT 17 | from .swin import SwinTransformer 18 | 19 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 20 | # TODO can expose more resnet blocks after careful consideration 21 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_detectron2/modeling/meta_arch/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | 4 | from .build import META_ARCH_REGISTRY, build_model # isort:skip 5 | 6 | from .panoptic_fpn import PanopticFPN 7 | 8 | # import all the meta_arch, so they will be registered 9 | from .rcnn import GeneralizedRCNN, ProposalNetwork 10 | from .dense_detector import DenseDetector 11 | from .retinanet import RetinaNet 12 | from .fcos import FCOS 13 | from .semantic_seg import SEM_SEG_HEADS_REGISTRY, SemanticSegmentor, build_sem_seg_head 14 | 15 | 16 | __all__ = list(globals().keys()) 17 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_detectron2/modeling/meta_arch/build.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | import torch 3 | 4 | from custom_detectron2.utils.logger import _log_api_usage 5 | from custom_detectron2.utils.registry import Registry 6 | 7 | META_ARCH_REGISTRY = Registry("META_ARCH") # noqa F401 isort:skip 8 | META_ARCH_REGISTRY.__doc__ = """ 9 | Registry for meta-architectures, i.e. the whole model. 10 | 11 | The registered object will be called with `obj(cfg)` 12 | and expected to return a `nn.Module` object. 13 | """ 14 | 15 | 16 | def build_model(cfg): 17 | """ 18 | Build the whole model architecture, defined by ``cfg.MODEL.META_ARCHITECTURE``. 19 | Note that it does not load any weights from ``cfg``. 20 | """ 21 | meta_arch = cfg.MODEL.META_ARCHITECTURE 22 | model = META_ARCH_REGISTRY.get(meta_arch)(cfg) 23 | _log_api_usage("modeling.meta_arch." + meta_arch) 24 | return model 25 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_detectron2/modeling/proposal_generator/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from .build import PROPOSAL_GENERATOR_REGISTRY, build_proposal_generator 3 | from .rpn import RPN_HEAD_REGISTRY, build_rpn_head, RPN, StandardRPNHead 4 | 5 | __all__ = list(globals().keys()) 6 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_detectron2/modeling/proposal_generator/build.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from custom_detectron2.utils.registry import Registry 3 | 4 | PROPOSAL_GENERATOR_REGISTRY = Registry("PROPOSAL_GENERATOR") 5 | PROPOSAL_GENERATOR_REGISTRY.__doc__ = """ 6 | Registry for proposal generator, which produces object proposals from feature maps. 7 | 8 | The registered object will be called with `obj(cfg, input_shape)`. 9 | The call should return a `nn.Module` object. 10 | """ 11 | 12 | from . import rpn, rrpn # noqa F401 isort:skip 13 | 14 | 15 | def build_proposal_generator(cfg, input_shape): 16 | """ 17 | Build a proposal generator from `cfg.MODEL.PROPOSAL_GENERATOR.NAME`. 18 | The name can be "PrecomputedProposals" to use no proposal generator. 19 | """ 20 | name = cfg.MODEL.PROPOSAL_GENERATOR.NAME 21 | if name == "PrecomputedProposals": 22 | return None 23 | 24 | return PROPOSAL_GENERATOR_REGISTRY.get(name)(cfg, input_shape) 25 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_detectron2/modeling/roi_heads/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from .box_head import ROI_BOX_HEAD_REGISTRY, build_box_head, FastRCNNConvFCHead 3 | from .keypoint_head import ( 4 | ROI_KEYPOINT_HEAD_REGISTRY, 5 | build_keypoint_head, 6 | BaseKeypointRCNNHead, 7 | KRCNNConvDeconvUpsampleHead, 8 | ) 9 | from .mask_head import ( 10 | ROI_MASK_HEAD_REGISTRY, 11 | build_mask_head, 12 | BaseMaskRCNNHead, 13 | MaskRCNNConvUpsampleHead, 14 | ) 15 | from .roi_heads import ( 16 | ROI_HEADS_REGISTRY, 17 | ROIHeads, 18 | Res5ROIHeads, 19 | StandardROIHeads, 20 | build_roi_heads, 21 | select_foreground_proposals, 22 | ) 23 | from .cascade_rcnn import CascadeROIHeads 24 | from .rotated_fast_rcnn import RROIHeads 25 | from .fast_rcnn import FastRCNNOutputLayers 26 | 27 | from . import cascade_rcnn # isort:skip 28 | 29 | __all__ = list(globals().keys()) 30 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_detectron2/projects/README.md: -------------------------------------------------------------------------------- 1 | 2 | Projects live in the [`projects` directory](../../projects) under the root of this repository, but not here. 3 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_detectron2/projects/deeplab/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from .build_solver import build_lr_scheduler 3 | from .config import add_deeplab_config 4 | from .resnet import build_resnet_deeplab_backbone 5 | from .semantic_seg import DeepLabV3Head, DeepLabV3PlusHead 6 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_detectron2/solver/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from .build import build_lr_scheduler, build_optimizer, get_default_optimizer_params 3 | from .lr_scheduler import ( 4 | LRMultiplier, 5 | LRScheduler, 6 | WarmupCosineLR, 7 | WarmupMultiStepLR, 8 | WarmupParamScheduler, 9 | ) 10 | 11 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 12 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_detectron2/structures/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from .boxes import Boxes, BoxMode, pairwise_iou, pairwise_ioa, pairwise_point_box_distance 3 | from .image_list import ImageList 4 | 5 | from .instances import Instances 6 | from .keypoints import Keypoints, heatmaps_to_keypoints 7 | from .masks import BitMasks, PolygonMasks, polygons_to_bitmask, ROIMasks 8 | from .rotated_boxes import RotatedBoxes 9 | from .rotated_boxes import pairwise_iou as pairwise_iou_rotated 10 | 11 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 12 | 13 | 14 | from custom_detectron2.utils.env import fixup_module_metadata 15 | 16 | fixup_module_metadata(__name__, globals(), __all__) 17 | del fixup_module_metadata 18 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_detectron2/tracking/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from .base_tracker import ( # noqa 3 | BaseTracker, 4 | build_tracker_head, 5 | TRACKER_HEADS_REGISTRY, 6 | ) 7 | from .bbox_iou_tracker import BBoxIOUTracker # noqa 8 | from .hungarian_tracker import BaseHungarianTracker # noqa 9 | from .iou_weighted_hungarian_bbox_iou_tracker import ( # noqa 10 | IOUWeightedHungarianBBoxIOUTracker, 11 | ) 12 | from .utils import create_prediction_pairs # noqa 13 | from .vanilla_hungarian_bbox_iou_tracker import VanillaHungarianBBoxIOUTracker # noqa 14 | 15 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 16 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_detectron2/utils/README.md: -------------------------------------------------------------------------------- 1 | # Utility functions 2 | 3 | This folder contain utility functions that are not used in the 4 | core library, but are useful for building models or training 5 | code using the config system. 6 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_detectron2/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_manopth/CHANGES.md: -------------------------------------------------------------------------------- 1 | * Chumpy is removed -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_manopth/__init__.py: -------------------------------------------------------------------------------- 1 | name = 'manopth' 2 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_manopth/rotproj.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | def batch_rotprojs(batches_rotmats): 5 | proj_rotmats = [] 6 | for batch_idx, batch_rotmats in enumerate(batches_rotmats): 7 | proj_batch_rotmats = [] 8 | for rot_idx, rotmat in enumerate(batch_rotmats): 9 | # GPU implementation of svd is VERY slow 10 | # ~ 2 10^-3 per hit vs 5 10^-5 on cpu 11 | U, S, V = rotmat.cpu().svd() 12 | rotmat = torch.matmul(U, V.transpose(0, 1)) 13 | orth_det = rotmat.det() 14 | # Remove reflection 15 | if orth_det < 0: 16 | rotmat[:, 2] = -1 * rotmat[:, 2] 17 | 18 | rotmat = rotmat.cuda() 19 | proj_batch_rotmats.append(rotmat) 20 | proj_rotmats.append(torch.stack(proj_batch_rotmats)) 21 | return torch.stack(proj_rotmats) 22 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mesh_graphormer/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = "0.1.0" 2 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mesh_graphormer/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mesh_graphormer/modeling/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/custom_mesh_graphormer/modeling/__init__.py -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mesh_graphormer/modeling/bert/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = "1.0.0" 2 | 3 | from .modeling_bert import (BertConfig, BertModel, 4 | load_tf_weights_in_bert) 5 | 6 | from .modeling_graphormer import Graphormer 7 | 8 | from .e2e_body_network import Graphormer_Body_Network 9 | 10 | from .e2e_hand_network import Graphormer_Hand_Network 11 | 12 | CONFIG_NAME = "config.json" 13 | 14 | from .modeling_utils import (WEIGHTS_NAME, TF_WEIGHTS_NAME, 15 | PretrainedConfig, PreTrainedModel, prune_layer, Conv1D) 16 | 17 | from .file_utils import (PYTORCH_PRETRAINED_BERT_CACHE) 18 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mesh_graphormer/modeling/bert/bert-base-uncased/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "architectures": [ 3 | "BertForMaskedLM" 4 | ], 5 | "attention_probs_dropout_prob": 0.1, 6 | "hidden_act": "gelu", 7 | "hidden_dropout_prob": 0.1, 8 | "hidden_size": 768, 9 | "initializer_range": 0.02, 10 | "intermediate_size": 3072, 11 | "max_position_embeddings": 512, 12 | "num_attention_heads": 12, 13 | "num_hidden_layers": 12, 14 | "type_vocab_size": 2, 15 | "vocab_size": 30522 16 | } 17 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mesh_graphormer/modeling/bert/file_utils.py: -------------------------------------------------------------------------------- 1 | from transformers.file_utils import * -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mesh_graphormer/modeling/bert/modeling_bert.py: -------------------------------------------------------------------------------- 1 | from transformers.models.bert import modeling_bert 2 | 3 | for symbol in dir(modeling_bert): 4 | if not symbol.startswith("_"): 5 | globals()[symbol] = getattr(modeling_bert, symbol) 6 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mesh_graphormer/modeling/bert/modeling_utils.py: -------------------------------------------------------------------------------- 1 | from transformers.modeling_utils import * -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mesh_graphormer/modeling/data/J_regressor_extra.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/custom_mesh_graphormer/modeling/data/J_regressor_extra.npy -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mesh_graphormer/modeling/data/J_regressor_h36m_correct.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/custom_mesh_graphormer/modeling/data/J_regressor_h36m_correct.npy -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mesh_graphormer/modeling/data/MANO_LEFT.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/custom_mesh_graphormer/modeling/data/MANO_LEFT.pkl -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mesh_graphormer/modeling/data/MANO_RIGHT.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/custom_mesh_graphormer/modeling/data/MANO_RIGHT.pkl -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mesh_graphormer/modeling/data/mano_195_adjmat_indices.pt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/custom_mesh_graphormer/modeling/data/mano_195_adjmat_indices.pt -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mesh_graphormer/modeling/data/mano_195_adjmat_size.pt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/custom_mesh_graphormer/modeling/data/mano_195_adjmat_size.pt -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mesh_graphormer/modeling/data/mano_195_adjmat_values.pt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/custom_mesh_graphormer/modeling/data/mano_195_adjmat_values.pt -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mesh_graphormer/modeling/data/mano_downsampling.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/custom_mesh_graphormer/modeling/data/mano_downsampling.npz -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mesh_graphormer/modeling/data/mesh_downsampling.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/custom_mesh_graphormer/modeling/data/mesh_downsampling.npz -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mesh_graphormer/modeling/data/smpl_431_adjmat_indices.pt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/custom_mesh_graphormer/modeling/data/smpl_431_adjmat_indices.pt -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mesh_graphormer/modeling/data/smpl_431_adjmat_size.pt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/custom_mesh_graphormer/modeling/data/smpl_431_adjmat_size.pt -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mesh_graphormer/modeling/data/smpl_431_adjmat_values.pt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/custom_mesh_graphormer/modeling/data/smpl_431_adjmat_values.pt -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mesh_graphormer/modeling/data/smpl_431_faces.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/custom_mesh_graphormer/modeling/data/smpl_431_faces.npy -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mesh_graphormer/modeling/hrnet/config/__init__.py: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------ 2 | # Copyright (c) Microsoft 3 | # Licensed under the MIT License. 4 | # Written by Bin Xiao (Bin.Xiao@microsoft.com) 5 | # ------------------------------------------------------------------------------ 6 | 7 | from .default import _C as config 8 | from .default import update_config 9 | from .models import MODEL_EXTRAS 10 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mesh_graphormer/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/custom_mesh_graphormer/utils/__init__.py -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_midas_repo/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/custom_midas_repo/__init__.py -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_midas_repo/midas/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/custom_midas_repo/midas/__init__.py -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_midas_repo/midas/backbones/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/custom_midas_repo/midas/backbones/__init__.py -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_midas_repo/midas/backbones/swin.py: -------------------------------------------------------------------------------- 1 | import custom_timm as timm 2 | 3 | from .swin_common import _make_swin_backbone 4 | 5 | 6 | def _make_pretrained_swinl12_384(pretrained, hooks=None): 7 | model = timm.create_model("swin_large_patch4_window12_384", pretrained=pretrained) 8 | 9 | hooks = [1, 1, 17, 1] if hooks == None else hooks 10 | return _make_swin_backbone( 11 | model, 12 | hooks=hooks 13 | ) 14 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_midas_repo/midas/base_model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class BaseModel(torch.nn.Module): 5 | def load(self, path): 6 | """Load model from file. 7 | 8 | Args: 9 | path (str): file path 10 | """ 11 | parameters = torch.load(path, map_location=torch.device('cpu')) 12 | 13 | if "optimizer" in parameters: 14 | parameters = parameters["model"] 15 | 16 | self.load_state_dict(parameters) 17 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/__init__.py: -------------------------------------------------------------------------------- 1 | #Dummy file ensuring this package will be recognized -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmcv/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | # flake8: noqa 3 | from .arraymisc import * 4 | from .fileio import * 5 | from .image import * 6 | from .utils import * 7 | from .version import * 8 | from .video import * 9 | from .visualization import * 10 | 11 | # The following modules are not imported to this level, so mmcv may be used 12 | # without PyTorch. 13 | # - runner 14 | # - parallel 15 | # - op 16 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmcv/arraymisc/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .quantization import dequantize, quantize 3 | 4 | __all__ = ['quantize', 'dequantize'] 5 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmcv/cnn/bricks/hswish.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch.nn as nn 3 | 4 | from .registry import ACTIVATION_LAYERS 5 | 6 | 7 | @ACTIVATION_LAYERS.register_module() 8 | class HSwish(nn.Module): 9 | """Hard Swish Module. 10 | 11 | This module applies the hard swish function: 12 | 13 | .. math:: 14 | Hswish(x) = x * ReLU6(x + 3) / 6 15 | 16 | Args: 17 | inplace (bool): can optionally do the operation in-place. 18 | Default: False. 19 | 20 | Returns: 21 | Tensor: The output tensor. 22 | """ 23 | 24 | def __init__(self, inplace=False): 25 | super(HSwish, self).__init__() 26 | self.act = nn.ReLU6(inplace) 27 | 28 | def forward(self, x): 29 | return x * self.act(x + 3) / 6 30 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmcv/cnn/bricks/registry.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from custom_mmpkg.custom_mmcv.utils import Registry 3 | 4 | CONV_LAYERS = Registry('conv layer') 5 | NORM_LAYERS = Registry('norm layer') 6 | ACTIVATION_LAYERS = Registry('activation layer') 7 | PADDING_LAYERS = Registry('padding layer') 8 | UPSAMPLE_LAYERS = Registry('upsample layer') 9 | PLUGIN_LAYERS = Registry('plugin layer') 10 | 11 | DROPOUT_LAYERS = Registry('drop out layers') 12 | POSITIONAL_ENCODING = Registry('position encoding') 13 | ATTENTION = Registry('attention') 14 | FEEDFORWARD_NETWORK = Registry('feed-forward Network') 15 | TRANSFORMER_LAYER = Registry('transformerLayer') 16 | TRANSFORMER_LAYER_SEQUENCE = Registry('transformer-layers sequence') 17 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmcv/cnn/bricks/scale.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch 3 | import torch.nn as nn 4 | 5 | 6 | class Scale(nn.Module): 7 | """A learnable scale parameter. 8 | 9 | This layer scales the input by a learnable factor. It multiplies a 10 | learnable scale parameter of shape (1,) with input of any shape. 11 | 12 | Args: 13 | scale (float): Initial value of scale factor. Default: 1.0 14 | """ 15 | 16 | def __init__(self, scale=1.0): 17 | super(Scale, self).__init__() 18 | self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float)) 19 | 20 | def forward(self, x): 21 | return x * self.scale 22 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmcv/cnn/bricks/swish.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch 3 | import torch.nn as nn 4 | 5 | from .registry import ACTIVATION_LAYERS 6 | 7 | 8 | @ACTIVATION_LAYERS.register_module() 9 | class Swish(nn.Module): 10 | """Swish Module. 11 | 12 | This module applies the swish function: 13 | 14 | .. math:: 15 | Swish(x) = x * Sigmoid(x) 16 | 17 | Returns: 18 | Tensor: The output tensor. 19 | """ 20 | 21 | def __init__(self): 22 | super(Swish, self).__init__() 23 | 24 | def forward(self, x): 25 | return x * torch.sigmoid(x) 26 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmcv/engine/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .test import (collect_results_cpu, collect_results_gpu, multi_gpu_test, 3 | single_gpu_test) 4 | 5 | __all__ = [ 6 | 'collect_results_cpu', 'collect_results_gpu', 'multi_gpu_test', 7 | 'single_gpu_test' 8 | ] 9 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmcv/fileio/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .file_client import BaseStorageBackend, FileClient 3 | from .handlers import BaseFileHandler, JsonHandler, PickleHandler, YamlHandler 4 | from .io import dump, load, register_handler 5 | from .parse import dict_from_file, list_from_file 6 | 7 | __all__ = [ 8 | 'BaseStorageBackend', 'FileClient', 'load', 'dump', 'register_handler', 9 | 'BaseFileHandler', 'JsonHandler', 'PickleHandler', 'YamlHandler', 10 | 'list_from_file', 'dict_from_file' 11 | ] 12 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmcv/fileio/handlers/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .base import BaseFileHandler 3 | from .json_handler import JsonHandler 4 | from .pickle_handler import PickleHandler 5 | from .yaml_handler import YamlHandler 6 | 7 | __all__ = ['BaseFileHandler', 'JsonHandler', 'PickleHandler', 'YamlHandler'] 8 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmcv/fileio/handlers/pickle_handler.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import pickle 3 | 4 | from .base import BaseFileHandler 5 | 6 | 7 | class PickleHandler(BaseFileHandler): 8 | 9 | str_like = False 10 | 11 | def load_from_fileobj(self, file, **kwargs): 12 | return pickle.load(file, **kwargs) 13 | 14 | def load_from_path(self, filepath, **kwargs): 15 | return super(PickleHandler, self).load_from_path( 16 | filepath, mode='rb', **kwargs) 17 | 18 | def dump_to_str(self, obj, **kwargs): 19 | kwargs.setdefault('protocol', 2) 20 | return pickle.dumps(obj, **kwargs) 21 | 22 | def dump_to_fileobj(self, obj, file, **kwargs): 23 | kwargs.setdefault('protocol', 2) 24 | pickle.dump(obj, file, **kwargs) 25 | 26 | def dump_to_path(self, obj, filepath, **kwargs): 27 | super(PickleHandler, self).dump_to_path( 28 | obj, filepath, mode='wb', **kwargs) 29 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmcv/fileio/handlers/yaml_handler.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import yaml 3 | 4 | try: 5 | from yaml import CLoader as Loader, CDumper as Dumper 6 | except ImportError: 7 | from yaml import Loader, Dumper 8 | 9 | from .base import BaseFileHandler # isort:skip 10 | 11 | 12 | class YamlHandler(BaseFileHandler): 13 | 14 | def load_from_fileobj(self, file, **kwargs): 15 | kwargs.setdefault('Loader', Loader) 16 | return yaml.load(file, **kwargs) 17 | 18 | def dump_to_fileobj(self, obj, file, **kwargs): 19 | kwargs.setdefault('Dumper', Dumper) 20 | yaml.dump(obj, file, **kwargs) 21 | 22 | def dump_to_str(self, obj, **kwargs): 23 | kwargs.setdefault('Dumper', Dumper) 24 | return yaml.dump(obj, **kwargs) 25 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmcv/model_zoo/deprecated.json: -------------------------------------------------------------------------------- 1 | { 2 | "resnet50_caffe": "detectron/resnet50_caffe", 3 | "resnet50_caffe_bgr": "detectron2/resnet50_caffe_bgr", 4 | "resnet101_caffe": "detectron/resnet101_caffe", 5 | "resnet101_caffe_bgr": "detectron2/resnet101_caffe_bgr" 6 | } 7 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmcv/parallel/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .collate import collate 3 | from .data_container import DataContainer 4 | from .data_parallel import MMDataParallel 5 | from .distributed import MMDistributedDataParallel 6 | from .registry import MODULE_WRAPPERS 7 | from .scatter_gather import scatter, scatter_kwargs 8 | from .utils import is_module_wrapper 9 | 10 | __all__ = [ 11 | 'collate', 'DataContainer', 'MMDataParallel', 'MMDistributedDataParallel', 12 | 'scatter', 'scatter_kwargs', 'is_module_wrapper', 'MODULE_WRAPPERS' 13 | ] 14 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmcv/parallel/registry.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from torch.nn.parallel import DataParallel, DistributedDataParallel 3 | 4 | from custom_mmpkg.custom_mmcv.utils import Registry 5 | 6 | MODULE_WRAPPERS = Registry('module wrapper') 7 | MODULE_WRAPPERS.register_module(module=DataParallel) 8 | MODULE_WRAPPERS.register_module(module=DistributedDataParallel) 9 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmcv/parallel/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .registry import MODULE_WRAPPERS 3 | 4 | 5 | def is_module_wrapper(module): 6 | """Check if a module is a module wrapper. 7 | 8 | The following 3 modules in MMCV (and their subclasses) are regarded as 9 | module wrappers: DataParallel, DistributedDataParallel, 10 | MMDistributedDataParallel (the deprecated version). You may add you own 11 | module wrapper by registering it to mmcv.parallel.MODULE_WRAPPERS. 12 | 13 | Args: 14 | module (nn.Module): The module to be checked. 15 | 16 | Returns: 17 | bool: True if the input module is a module wrapper. 18 | """ 19 | module_wrappers = tuple(MODULE_WRAPPERS.module_dict.values()) 20 | return isinstance(module, module_wrappers) 21 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmcv/runner/builder.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import copy 3 | 4 | from ..utils import Registry 5 | 6 | RUNNERS = Registry('runner') 7 | RUNNER_BUILDERS = Registry('runner builder') 8 | 9 | 10 | def build_runner_constructor(cfg): 11 | return RUNNER_BUILDERS.build(cfg) 12 | 13 | 14 | def build_runner(cfg, default_args=None): 15 | runner_cfg = copy.deepcopy(cfg) 16 | constructor_type = runner_cfg.pop('constructor', 17 | 'DefaultRunnerConstructor') 18 | runner_constructor = build_runner_constructor( 19 | dict( 20 | type=constructor_type, 21 | runner_cfg=runner_cfg, 22 | default_args=default_args)) 23 | runner = runner_constructor() 24 | return runner 25 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmcv/runner/hooks/closure.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .hook import HOOKS, Hook 3 | 4 | 5 | @HOOKS.register_module() 6 | class ClosureHook(Hook): 7 | 8 | def __init__(self, fn_name, fn): 9 | assert hasattr(self, fn_name) 10 | assert callable(fn) 11 | setattr(self, fn_name, fn) 12 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmcv/runner/hooks/iter_timer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import time 3 | 4 | from .hook import HOOKS, Hook 5 | 6 | 7 | @HOOKS.register_module() 8 | class IterTimerHook(Hook): 9 | 10 | def before_epoch(self, runner): 11 | self.t = time.time() 12 | 13 | def before_iter(self, runner): 14 | runner.log_buffer.update({'data_time': time.time() - self.t}) 15 | 16 | def after_iter(self, runner): 17 | runner.log_buffer.update({'time': time.time() - self.t}) 18 | self.t = time.time() 19 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmcv/runner/hooks/logger/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .base import LoggerHook 3 | from .dvclive import DvcliveLoggerHook 4 | from .mlflow import MlflowLoggerHook 5 | from .neptune import NeptuneLoggerHook 6 | from .pavi import PaviLoggerHook 7 | from .tensorboard import TensorboardLoggerHook 8 | from .text import TextLoggerHook 9 | from .wandb import WandbLoggerHook 10 | 11 | __all__ = [ 12 | 'LoggerHook', 'MlflowLoggerHook', 'PaviLoggerHook', 13 | 'TensorboardLoggerHook', 'TextLoggerHook', 'WandbLoggerHook', 14 | 'NeptuneLoggerHook', 'DvcliveLoggerHook' 15 | ] 16 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmcv/runner/hooks/memory.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch 3 | 4 | from .hook import HOOKS, Hook 5 | 6 | 7 | @HOOKS.register_module() 8 | class EmptyCacheHook(Hook): 9 | 10 | def __init__(self, before_epoch=False, after_epoch=True, after_iter=False): 11 | self._before_epoch = before_epoch 12 | self._after_epoch = after_epoch 13 | self._after_iter = after_iter 14 | 15 | def after_iter(self, runner): 16 | if self._after_iter: 17 | torch.cuda.empty_cache() 18 | 19 | def before_epoch(self, runner): 20 | if self._before_epoch: 21 | torch.cuda.empty_cache() 22 | 23 | def after_epoch(self, runner): 24 | if self._after_epoch: 25 | torch.cuda.empty_cache() 26 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmcv/runner/hooks/sampler_seed.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .hook import HOOKS, Hook 3 | 4 | 5 | @HOOKS.register_module() 6 | class DistSamplerSeedHook(Hook): 7 | """Data-loading sampler for distributed training. 8 | 9 | When distributed training, it is only useful in conjunction with 10 | :obj:`EpochBasedRunner`, while :obj:`IterBasedRunner` achieves the same 11 | purpose with :obj:`IterLoader`. 12 | """ 13 | 14 | def before_epoch(self, runner): 15 | if hasattr(runner.data_loader.sampler, 'set_epoch'): 16 | # in case the data loader uses `SequentialSampler` in Pytorch 17 | runner.data_loader.sampler.set_epoch(runner.epoch) 18 | elif hasattr(runner.data_loader.batch_sampler.sampler, 'set_epoch'): 19 | # batch sampler in pytorch warps the sampler as its attributes. 20 | runner.data_loader.batch_sampler.sampler.set_epoch(runner.epoch) 21 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmcv/runner/hooks/sync_buffer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from ..dist_utils import allreduce_params 3 | from .hook import HOOKS, Hook 4 | 5 | 6 | @HOOKS.register_module() 7 | class SyncBuffersHook(Hook): 8 | """Synchronize model buffers such as running_mean and running_var in BN at 9 | the end of each epoch. 10 | 11 | Args: 12 | distributed (bool): Whether distributed training is used. It is 13 | effective only for distributed training. Defaults to True. 14 | """ 15 | 16 | def __init__(self, distributed=True): 17 | self.distributed = distributed 18 | 19 | def after_epoch(self, runner): 20 | """All-reduce model buffers at the end of each epoch.""" 21 | if self.distributed: 22 | allreduce_params(runner.model.buffers()) 23 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmcv/runner/optimizer/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .builder import (OPTIMIZER_BUILDERS, OPTIMIZERS, build_optimizer, 3 | build_optimizer_constructor) 4 | from .default_constructor import DefaultOptimizerConstructor 5 | 6 | __all__ = [ 7 | 'OPTIMIZER_BUILDERS', 'OPTIMIZERS', 'DefaultOptimizerConstructor', 8 | 'build_optimizer', 'build_optimizer_constructor' 9 | ] 10 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmcv/utils/trace.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | 3 | import torch 4 | 5 | from custom_mmpkg.custom_mmcv.utils import digit_version 6 | 7 | 8 | def is_jit_tracing() -> bool: 9 | if (torch.__version__ != 'parrots' 10 | and digit_version(torch.__version__) >= digit_version('1.6.0')): 11 | on_trace = torch.jit.is_tracing() 12 | # In PyTorch 1.6, torch.jit.is_tracing has a bug. 13 | # Refers to https://github.com/pytorch/pytorch/issues/42448 14 | if isinstance(on_trace, bool): 15 | return on_trace 16 | else: 17 | return torch._C._is_tracing() 18 | else: 19 | warnings.warn( 20 | 'torch.jit.is_tracing is only supported after v1.6.0. ' 21 | 'Therefore is_tracing returns False automatically. Please ' 22 | 'set on_trace manually if you are using trace.', UserWarning) 23 | return False 24 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmcv/video/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .io import Cache, VideoReader, frames2video 3 | from .optflow import (dequantize_flow, flow_from_bytes, flow_warp, flowread, 4 | flowwrite, quantize_flow, sparse_flow_from_bytes) 5 | from .processing import concat_video, convert_video, cut_video, resize_video 6 | 7 | __all__ = [ 8 | 'Cache', 'VideoReader', 'frames2video', 'convert_video', 'resize_video', 9 | 'cut_video', 'concat_video', 'flowread', 'flowwrite', 'quantize_flow', 10 | 'dequantize_flow', 'flow_warp', 'flow_from_bytes', 'sparse_flow_from_bytes' 11 | ] 12 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmcv/visualization/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .color import Color, color_val 3 | from .image import imshow, imshow_bboxes, imshow_det_bboxes 4 | from .optflow import flow2rgb, flowshow, make_color_wheel 5 | 6 | __all__ = [ 7 | 'Color', 'color_val', 'imshow', 'imshow_bboxes', 'imshow_det_bboxes', 8 | 'flowshow', 'flow2rgb', 'make_color_wheel' 9 | ] 10 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmseg/apis/__init__.py: -------------------------------------------------------------------------------- 1 | from .inference import inference_segmentor, init_segmentor, show_result_pyplot 2 | from .test import multi_gpu_test, single_gpu_test 3 | from .train import get_root_logger, set_random_seed, train_segmentor 4 | 5 | __all__ = [ 6 | 'get_root_logger', 'set_random_seed', 'train_segmentor', 'init_segmentor', 7 | 'inference_segmentor', 'multi_gpu_test', 'single_gpu_test', 8 | 'show_result_pyplot' 9 | ] 10 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmseg/core/__init__.py: -------------------------------------------------------------------------------- 1 | from .evaluation import * # noqa: F401, F403 2 | from .seg import * # noqa: F401, F403 3 | from .utils import * # noqa: F401, F403 4 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmseg/core/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | from .class_names import get_classes, get_palette 2 | from .eval_hooks import DistEvalHook, EvalHook 3 | from .metrics import eval_metrics, mean_dice, mean_fscore, mean_iou 4 | 5 | __all__ = [ 6 | 'EvalHook', 'DistEvalHook', 'mean_dice', 'mean_iou', 'mean_fscore', 7 | 'eval_metrics', 'get_classes', 'get_palette' 8 | ] 9 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmseg/core/seg/__init__.py: -------------------------------------------------------------------------------- 1 | from .builder import build_pixel_sampler 2 | from .sampler import BasePixelSampler, OHEMPixelSampler 3 | 4 | __all__ = ['build_pixel_sampler', 'BasePixelSampler', 'OHEMPixelSampler'] 5 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmseg/core/seg/builder.py: -------------------------------------------------------------------------------- 1 | from custom_mmpkg.custom_mmcv.utils import Registry, build_from_cfg 2 | 3 | PIXEL_SAMPLERS = Registry('pixel sampler') 4 | 5 | 6 | def build_pixel_sampler(cfg, **default_args): 7 | """Build pixel sampler for segmentation map.""" 8 | return build_from_cfg(cfg, PIXEL_SAMPLERS, default_args) 9 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmseg/core/seg/sampler/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_pixel_sampler import BasePixelSampler 2 | from .ohem_pixel_sampler import OHEMPixelSampler 3 | 4 | __all__ = ['BasePixelSampler', 'OHEMPixelSampler'] 5 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmseg/core/seg/sampler/base_pixel_sampler.py: -------------------------------------------------------------------------------- 1 | from abc import ABCMeta, abstractmethod 2 | 3 | 4 | class BasePixelSampler(metaclass=ABCMeta): 5 | """Base class of pixel sampler.""" 6 | 7 | def __init__(self, **kwargs): 8 | pass 9 | 10 | @abstractmethod 11 | def sample(self, seg_logit, seg_label): 12 | """Placeholder for sample function.""" 13 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmseg/core/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .misc import add_prefix 2 | 3 | __all__ = ['add_prefix'] 4 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmseg/core/utils/misc.py: -------------------------------------------------------------------------------- 1 | def add_prefix(inputs, prefix): 2 | """Add prefix for dict. 3 | 4 | Args: 5 | inputs (dict): The input dict with str keys. 6 | prefix (str): The prefix to add. 7 | 8 | Returns: 9 | 10 | dict: The dict with keys updated with ``prefix``. 11 | """ 12 | 13 | outputs = dict() 14 | for name, value in inputs.items(): 15 | outputs[f'{prefix}.{name}'] = value 16 | 17 | return outputs 18 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmseg/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from .ade import ADE20KDataset 2 | from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset 3 | from .chase_db1 import ChaseDB1Dataset 4 | from .cityscapes import CityscapesDataset 5 | from .custom import CustomDataset 6 | from .dataset_wrappers import ConcatDataset, RepeatDataset 7 | from .drive import DRIVEDataset 8 | from .hrf import HRFDataset 9 | from .pascal_context import PascalContextDataset, PascalContextDataset59 10 | from .stare import STAREDataset 11 | from .voc import PascalVOCDataset 12 | 13 | __all__ = [ 14 | 'CustomDataset', 'build_dataloader', 'ConcatDataset', 'RepeatDataset', 15 | 'DATASETS', 'build_dataset', 'PIPELINES', 'CityscapesDataset', 16 | 'PascalVOCDataset', 'ADE20KDataset', 'PascalContextDataset', 17 | 'PascalContextDataset59', 'ChaseDB1Dataset', 'DRIVEDataset', 'HRFDataset', 18 | 'STAREDataset' 19 | ] 20 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmseg/datasets/chase_db1.py: -------------------------------------------------------------------------------- 1 | import os.path as osp 2 | 3 | from .builder import DATASETS 4 | from .custom import CustomDataset 5 | 6 | 7 | @DATASETS.register_module() 8 | class ChaseDB1Dataset(CustomDataset): 9 | """Chase_db1 dataset. 10 | 11 | In segmentation map annotation for Chase_db1, 0 stands for background, 12 | which is included in 2 categories. ``reduce_zero_label`` is fixed to False. 13 | The ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to 14 | '_1stHO.png'. 15 | """ 16 | 17 | CLASSES = ('background', 'vessel') 18 | 19 | PALETTE = [[120, 120, 120], [6, 230, 230]] 20 | 21 | def __init__(self, **kwargs): 22 | super(ChaseDB1Dataset, self).__init__( 23 | img_suffix='.png', 24 | seg_map_suffix='_1stHO.png', 25 | reduce_zero_label=False, 26 | **kwargs) 27 | assert osp.exists(self.img_dir) 28 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmseg/datasets/drive.py: -------------------------------------------------------------------------------- 1 | import os.path as osp 2 | 3 | from .builder import DATASETS 4 | from .custom import CustomDataset 5 | 6 | 7 | @DATASETS.register_module() 8 | class DRIVEDataset(CustomDataset): 9 | """DRIVE dataset. 10 | 11 | In segmentation map annotation for DRIVE, 0 stands for background, which is 12 | included in 2 categories. ``reduce_zero_label`` is fixed to False. The 13 | ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to 14 | '_manual1.png'. 15 | """ 16 | 17 | CLASSES = ('background', 'vessel') 18 | 19 | PALETTE = [[120, 120, 120], [6, 230, 230]] 20 | 21 | def __init__(self, **kwargs): 22 | super(DRIVEDataset, self).__init__( 23 | img_suffix='.png', 24 | seg_map_suffix='_manual1.png', 25 | reduce_zero_label=False, 26 | **kwargs) 27 | assert osp.exists(self.img_dir) 28 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmseg/datasets/hrf.py: -------------------------------------------------------------------------------- 1 | import os.path as osp 2 | 3 | from .builder import DATASETS 4 | from .custom import CustomDataset 5 | 6 | 7 | @DATASETS.register_module() 8 | class HRFDataset(CustomDataset): 9 | """HRF dataset. 10 | 11 | In segmentation map annotation for HRF, 0 stands for background, which is 12 | included in 2 categories. ``reduce_zero_label`` is fixed to False. The 13 | ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to 14 | '.png'. 15 | """ 16 | 17 | CLASSES = ('background', 'vessel') 18 | 19 | PALETTE = [[120, 120, 120], [6, 230, 230]] 20 | 21 | def __init__(self, **kwargs): 22 | super(HRFDataset, self).__init__( 23 | img_suffix='.png', 24 | seg_map_suffix='.png', 25 | reduce_zero_label=False, 26 | **kwargs) 27 | assert osp.exists(self.img_dir) 28 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmseg/datasets/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | from .compose import Compose 2 | from .formating import (Collect, ImageToTensor, ToDataContainer, ToTensor, 3 | Transpose, to_tensor) 4 | from .loading import LoadAnnotations, LoadImageFromFile 5 | from .test_time_aug import MultiScaleFlipAug 6 | from .transforms import (CLAHE, AdjustGamma, Normalize, Pad, 7 | PhotoMetricDistortion, RandomCrop, RandomFlip, 8 | RandomRotate, Rerange, Resize, RGB2Gray, SegRescale) 9 | 10 | __all__ = [ 11 | 'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer', 12 | 'Transpose', 'Collect', 'LoadAnnotations', 'LoadImageFromFile', 13 | 'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', 'RandomCrop', 14 | 'Normalize', 'SegRescale', 'PhotoMetricDistortion', 'RandomRotate', 15 | 'AdjustGamma', 'CLAHE', 'Rerange', 'RGB2Gray' 16 | ] 17 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmseg/datasets/stare.py: -------------------------------------------------------------------------------- 1 | import os.path as osp 2 | 3 | from .builder import DATASETS 4 | from .custom import CustomDataset 5 | 6 | 7 | @DATASETS.register_module() 8 | class STAREDataset(CustomDataset): 9 | """STARE dataset. 10 | 11 | In segmentation map annotation for STARE, 0 stands for background, which is 12 | included in 2 categories. ``reduce_zero_label`` is fixed to False. The 13 | ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to 14 | '.ah.png'. 15 | """ 16 | 17 | CLASSES = ('background', 'vessel') 18 | 19 | PALETTE = [[120, 120, 120], [6, 230, 230]] 20 | 21 | def __init__(self, **kwargs): 22 | super(STAREDataset, self).__init__( 23 | img_suffix='.png', 24 | seg_map_suffix='.ah.png', 25 | reduce_zero_label=False, 26 | **kwargs) 27 | assert osp.exists(self.img_dir) 28 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmseg/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .backbones import * # noqa: F401,F403 2 | from .builder import (BACKBONES, HEADS, LOSSES, SEGMENTORS, build_backbone, 3 | build_head, build_loss, build_segmentor) 4 | from .decode_heads import * # noqa: F401,F403 5 | from .losses import * # noqa: F401,F403 6 | from .necks import * # noqa: F401,F403 7 | from .segmentors import * # noqa: F401,F403 8 | 9 | __all__ = [ 10 | 'BACKBONES', 'HEADS', 'LOSSES', 'SEGMENTORS', 'build_backbone', 11 | 'build_head', 'build_loss', 'build_segmentor' 12 | ] 13 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmseg/models/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | from .cgnet import CGNet 2 | # from .fast_scnn import FastSCNN 3 | from .hrnet import HRNet 4 | from .mobilenet_v2 import MobileNetV2 5 | from .mobilenet_v3 import MobileNetV3 6 | from .resnest import ResNeSt 7 | from .resnet import ResNet, ResNetV1c, ResNetV1d 8 | from .resnext import ResNeXt 9 | from .unet import UNet 10 | from .vit import VisionTransformer 11 | 12 | __all__ = [ 13 | 'ResNet', 'ResNetV1c', 'ResNetV1d', 'ResNeXt', 'HRNet', 14 | 'ResNeSt', 'MobileNetV2', 'UNet', 'CGNet', 'MobileNetV3', 15 | 'VisionTransformer' 16 | ] 17 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmseg/models/losses/__init__.py: -------------------------------------------------------------------------------- 1 | from .accuracy import Accuracy, accuracy 2 | from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy, 3 | cross_entropy, mask_cross_entropy) 4 | from .dice_loss import DiceLoss 5 | from .lovasz_loss import LovaszLoss 6 | from .utils import reduce_loss, weight_reduce_loss, weighted_loss 7 | 8 | __all__ = [ 9 | 'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy', 10 | 'mask_cross_entropy', 'CrossEntropyLoss', 'reduce_loss', 11 | 'weight_reduce_loss', 'weighted_loss', 'LovaszLoss', 'DiceLoss' 12 | ] 13 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmseg/models/necks/__init__.py: -------------------------------------------------------------------------------- 1 | from .fpn import FPN 2 | from .multilevel_neck import MultiLevelNeck 3 | 4 | __all__ = ['FPN', 'MultiLevelNeck'] 5 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmseg/models/segmentors/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import BaseSegmentor 2 | from .cascade_encoder_decoder import CascadeEncoderDecoder 3 | from .encoder_decoder import EncoderDecoder 4 | 5 | __all__ = ['BaseSegmentor', 'EncoderDecoder', 'CascadeEncoderDecoder'] 6 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmseg/models/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .drop import DropPath 2 | from .inverted_residual import InvertedResidual, InvertedResidualV3 3 | from .make_divisible import make_divisible 4 | from .res_layer import ResLayer 5 | from .se_layer import SELayer 6 | from .self_attention_block import SelfAttentionBlock 7 | from .up_conv_block import UpConvBlock 8 | from .weight_init import trunc_normal_ 9 | 10 | __all__ = [ 11 | 'ResLayer', 'SelfAttentionBlock', 'make_divisible', 'InvertedResidual', 12 | 'UpConvBlock', 'InvertedResidualV3', 'SELayer', 'DropPath', 'trunc_normal_' 13 | ] 14 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmseg/ops/__init__.py: -------------------------------------------------------------------------------- 1 | from .encoding import Encoding 2 | from .wrappers import Upsample, resize 3 | 4 | __all__ = ['Upsample', 'resize', 'Encoding'] 5 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmseg/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .collect_env import collect_env 2 | from .logger import get_root_logger 3 | 4 | __all__ = ['get_root_logger', 'collect_env'] 5 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_mmpkg/custom_mmseg/utils/collect_env.py: -------------------------------------------------------------------------------- 1 | from custom_mmpkg.custom_mmcv.utils import collect_env as collect_base_env 2 | from custom_mmpkg.custom_mmcv.utils import get_git_hash 3 | 4 | import custom_mmpkg.custom_mmseg as mmseg 5 | 6 | 7 | def collect_env(): 8 | """Collect the information of the running environments.""" 9 | env_info = collect_base_env() 10 | env_info['MMSegmentation'] = f'{mmseg.__version__}+{get_git_hash()[:7]}' 11 | 12 | return env_info 13 | 14 | 15 | if __name__ == '__main__': 16 | for name, val in collect_env().items(): 17 | print('{}: {}'.format(name, val)) 18 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_oneformer/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from . import data # register all new datasets 3 | from . import modeling 4 | 5 | # config 6 | from .config import * 7 | 8 | # models 9 | from .oneformer_model import OneFormer -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_oneformer/data/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from . import datasets 3 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_oneformer/data/bpe_simple_vocab_16e6.txt.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/src/custom_oneformer/data/bpe_simple_vocab_16e6.txt.gz -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_oneformer/data/dataset_mappers/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_oneformer/data/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from . import ( 2 | register_ade20k_panoptic, 3 | register_cityscapes_panoptic, 4 | register_coco_panoptic_annos_semseg, 5 | register_ade20k_instance, 6 | register_coco_panoptic2instance, 7 | ) 8 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_oneformer/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | from .detection_coco_evaluator import * 2 | from .coco_evaluator import * 3 | from .cityscapes_evaluation import CityscapesInstanceEvaluator -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_oneformer/modeling/__init__.py: -------------------------------------------------------------------------------- 1 | from .backbone.swin import D2SwinTransformer 2 | from .backbone.dinat import D2DiNAT 3 | from .pixel_decoder.fpn import BasePixelDecoder 4 | from .pixel_decoder.msdeformattn import MSDeformAttnPixelDecoder 5 | from .meta_arch.oneformer_head import OneFormerHead 6 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_oneformer/modeling/backbone/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_oneformer/modeling/meta_arch/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_oneformer/modeling/pixel_decoder/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_oneformer/modeling/pixel_decoder/ops/functions/__init__.py: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------------------------ 2 | # Deformable DETR 3 | # Copyright (c) 2020 SenseTime. All Rights Reserved. 4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details] 5 | # ------------------------------------------------------------------------------------------------ 6 | # Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 7 | # ------------------------------------------------------------------------------------------------ 8 | 9 | # Copyright (c) Facebook, Inc. and its affiliates. 10 | # Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR 11 | 12 | from .ms_deform_attn_func import MSDeformAttnFunction 13 | 14 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_oneformer/modeling/pixel_decoder/ops/make.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # ------------------------------------------------------------------------------------------------ 3 | # Deformable DETR 4 | # Copyright (c) 2020 SenseTime. All Rights Reserved. 5 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details] 6 | # ------------------------------------------------------------------------------------------------ 7 | # Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 8 | # ------------------------------------------------------------------------------------------------ 9 | 10 | # Copyright (c) Facebook, Inc. and its affiliates. 11 | # Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR 12 | 13 | FORCE_CUDA=1 python setup.py build install 14 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_oneformer/modeling/pixel_decoder/ops/modules/__init__.py: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------------------------ 2 | # Deformable DETR 3 | # Copyright (c) 2020 SenseTime. All Rights Reserved. 4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details] 5 | # ------------------------------------------------------------------------------------------------ 6 | # Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 7 | # ------------------------------------------------------------------------------------------------ 8 | 9 | # Copyright (c) Facebook, Inc. and its affiliates. 10 | # Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR 11 | 12 | from .ms_deform_attn import MSDeformAttn 13 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_oneformer/modeling/transformer_decoder/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from .oneformer_transformer_decoder import ContrastiveMultiScaleMaskedTransformerDecoder -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_oneformer/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | from .events import setup_wandb, WandbWriter -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_pycocotools/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'tylin' 2 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_qudida/__version__.py: -------------------------------------------------------------------------------- 1 | __version__ = "0.0.4" 2 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_timm/__init__.py: -------------------------------------------------------------------------------- 1 | from .version import __version__ 2 | from .models import create_model, list_models, is_model, list_modules, model_entrypoint, \ 3 | is_scriptable, is_exportable, set_scriptable, set_exportable, has_pretrained_cfg_key, is_pretrained_cfg_key, \ 4 | get_pretrained_cfg_value, is_model_pretrained 5 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_timm/data/__init__.py: -------------------------------------------------------------------------------- 1 | from .auto_augment import RandAugment, AutoAugment, rand_augment_ops, auto_augment_policy,\ 2 | rand_augment_transform, auto_augment_transform 3 | from .config import resolve_data_config 4 | from .constants import * 5 | from .dataset import ImageDataset, IterableImageDataset, AugMixDataset 6 | from .dataset_factory import create_dataset 7 | from .loader import create_loader 8 | from .mixup import Mixup, FastCollateMixup 9 | from .parsers import create_parser,\ 10 | get_img_extensions, is_img_extension, set_img_extensions, add_img_extensions, del_img_extensions 11 | from .real_labels import RealLabelsImagenet 12 | from .transforms import * 13 | from .transforms_factory import create_transform 14 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_timm/data/constants.py: -------------------------------------------------------------------------------- 1 | DEFAULT_CROP_PCT = 0.875 2 | IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406) 3 | IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225) 4 | IMAGENET_INCEPTION_MEAN = (0.5, 0.5, 0.5) 5 | IMAGENET_INCEPTION_STD = (0.5, 0.5, 0.5) 6 | IMAGENET_DPN_MEAN = (124 / 255, 117 / 255, 104 / 255) 7 | IMAGENET_DPN_STD = tuple([1 / (.0167 * 255)] * 3) 8 | OPENAI_CLIP_MEAN = (0.48145466, 0.4578275, 0.40821073) 9 | OPENAI_CLIP_STD = (0.26862954, 0.26130258, 0.27577711) 10 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_timm/data/parsers/__init__.py: -------------------------------------------------------------------------------- 1 | from .parser_factory import create_parser 2 | from .img_extensions import * 3 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_timm/data/parsers/parser.py: -------------------------------------------------------------------------------- 1 | from abc import abstractmethod 2 | 3 | 4 | class Parser: 5 | def __init__(self): 6 | pass 7 | 8 | @abstractmethod 9 | def _filename(self, index, basename=False, absolute=False): 10 | pass 11 | 12 | def filename(self, index, basename=False, absolute=False): 13 | return self._filename(index, basename=basename, absolute=absolute) 14 | 15 | def filenames(self, basename=False, absolute=False): 16 | return [self._filename(index, basename=basename, absolute=absolute) for index in range(len(self))] 17 | 18 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_timm/loss/__init__.py: -------------------------------------------------------------------------------- 1 | from .asymmetric_loss import AsymmetricLossMultiLabel, AsymmetricLossSingleLabel 2 | from .binary_cross_entropy import BinaryCrossEntropy 3 | from .cross_entropy import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy 4 | from .jsd import JsdCrossEntropy 5 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_timm/models/layers/linear.py: -------------------------------------------------------------------------------- 1 | """ Linear layer (alternate definition) 2 | """ 3 | import torch 4 | import torch.nn.functional as F 5 | from torch import nn as nn 6 | 7 | 8 | class Linear(nn.Linear): 9 | r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b` 10 | 11 | Wraps torch.nn.Linear to support AMP + torchscript usage by manually casting 12 | weight & bias to input.dtype to work around an issue w/ torch.addmm in this use case. 13 | """ 14 | def forward(self, input: torch.Tensor) -> torch.Tensor: 15 | if torch.jit.is_scripting(): 16 | bias = self.bias.to(dtype=input.dtype) if self.bias is not None else None 17 | return F.linear(input, self.weight.to(dtype=input.dtype), bias=bias) 18 | else: 19 | return F.linear(input, self.weight, self.bias) 20 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_timm/models/layers/trace_utils.py: -------------------------------------------------------------------------------- 1 | try: 2 | from torch import _assert 3 | except ImportError: 4 | def _assert(condition: bool, message: str): 5 | assert condition, message 6 | 7 | 8 | def _float_to_int(x: float) -> int: 9 | """ 10 | Symbolic tracing helper to substitute for inbuilt `int`. 11 | Hint: Inbuilt `int` can't accept an argument of type `Proxy` 12 | """ 13 | return int(x) 14 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_timm/optim/__init__.py: -------------------------------------------------------------------------------- 1 | from .adabelief import AdaBelief 2 | from .adafactor import Adafactor 3 | from .adahessian import Adahessian 4 | from .adamp import AdamP 5 | from .adamw import AdamW 6 | from .lamb import Lamb 7 | from .lars import Lars 8 | from .lookahead import Lookahead 9 | from .madgrad import MADGRAD 10 | from .nadam import Nadam 11 | from .nvnovograd import NvNovoGrad 12 | from .radam import RAdam 13 | from .rmsprop_tf import RMSpropTF 14 | from .sgdp import SGDP 15 | from .optim_factory import create_optimizer, create_optimizer_v2, optimizer_kwargs 16 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_timm/scheduler/__init__.py: -------------------------------------------------------------------------------- 1 | from .cosine_lr import CosineLRScheduler 2 | from .multistep_lr import MultiStepLRScheduler 3 | from .plateau_lr import PlateauLRScheduler 4 | from .poly_lr import PolyLRScheduler 5 | from .step_lr import StepLRScheduler 6 | from .tanh_lr import TanhLRScheduler 7 | 8 | from .scheduler_factory import create_scheduler 9 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_timm/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .agc import adaptive_clip_grad 2 | from .checkpoint_saver import CheckpointSaver 3 | from .clip_grad import dispatch_clip_grad 4 | from .cuda import ApexScaler, NativeScaler 5 | from .decay_batch import decay_batch_step, check_batch_size_retry 6 | from .distributed import distribute_bn, reduce_tensor 7 | from .jit import set_jit_legacy, set_jit_fuser 8 | from .log import setup_default_logging, FormatterNoInfo 9 | from .metrics import AverageMeter, accuracy 10 | from .misc import natural_key, add_bool_arg 11 | from .model import unwrap_model, get_state_dict, freeze, unfreeze 12 | from .model_ema import ModelEma, ModelEmaV2 13 | from .random import random_seed 14 | from .summary import update_summary, get_outdir 15 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_timm/utils/clip_grad.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from custom_timm.utils.agc import adaptive_clip_grad 4 | 5 | 6 | def dispatch_clip_grad(parameters, value: float, mode: str = 'norm', norm_type: float = 2.0): 7 | """ Dispatch to gradient clipping method 8 | 9 | Args: 10 | parameters (Iterable): model parameters to clip 11 | value (float): clipping value/factor/norm, mode dependant 12 | mode (str): clipping mode, one of 'norm', 'value', 'agc' 13 | norm_type (float): p-norm, default 2.0 14 | """ 15 | if mode == 'norm': 16 | torch.nn.utils.clip_grad_norm_(parameters, value, norm_type=norm_type) 17 | elif mode == 'value': 18 | torch.nn.utils.clip_grad_value_(parameters, value) 19 | elif mode == 'agc': 20 | adaptive_clip_grad(parameters, value, norm_type=norm_type) 21 | else: 22 | assert False, f"Unknown clip mode ({mode})." 23 | 24 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_timm/utils/misc.py: -------------------------------------------------------------------------------- 1 | """ Misc utils 2 | 3 | Hacked together by / Copyright 2020 Ross Wightman 4 | """ 5 | import re 6 | 7 | 8 | def natural_key(string_): 9 | """See http://www.codinghorror.com/blog/archives/001018.html""" 10 | return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())] 11 | 12 | 13 | def add_bool_arg(parser, name, default=False, help=''): 14 | dest_name = name.replace('-', '_') 15 | group = parser.add_mutually_exclusive_group(required=False) 16 | group.add_argument('--' + name, dest=dest_name, action='store_true', help=help) 17 | group.add_argument('--no-' + name, dest=dest_name, action='store_false', help=help) 18 | parser.set_defaults(**{dest_name: default}) 19 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_timm/utils/random.py: -------------------------------------------------------------------------------- 1 | import random 2 | import numpy as np 3 | import torch 4 | 5 | 6 | def random_seed(seed=42, rank=0): 7 | torch.manual_seed(seed + rank) 8 | np.random.seed(seed + rank) 9 | random.seed(seed + rank) 10 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/custom_timm/version.py: -------------------------------------------------------------------------------- 1 | __version__ = '0.6.13' 2 | -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/src/wrapper_for_mps/__init__.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from comfy.model_management import get_torch_device 3 | 4 | device = get_torch_device() 5 | #https://github.com/microsoft/DirectML/issues/414#issuecomment-1541319479 6 | def sparse_to_dense(sparse_tensor): 7 | return sparse_tensor.to_dense() -------------------------------------------------------------------------------- /src/inference_core_nodes/controlnet_preprocessors/tests/pose.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/controlnet_preprocessors/tests/pose.png -------------------------------------------------------------------------------- /src/inference_core_nodes/layer_diffuse/README.md: -------------------------------------------------------------------------------- 1 | ## Layer Diffuse 2 | 3 | Based on or modified from: [huchenlei/ComfyUI-layerdiffuse](https://github.com/huchenlei/ComfyUI-layerdiffuse) @ 151f7460bbc9d7437d4f0010f21f80178f7a84a6 4 | 5 | License: Apache-2.0 6 | 7 | 8 | -------------------------------------------------------------------------------- /src/inference_core_nodes/layer_diffuse/__init__.py: -------------------------------------------------------------------------------- 1 | from .layered_diffusion import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS 2 | 3 | __all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS'] -------------------------------------------------------------------------------- /src/inference_core_nodes/layer_diffuse/lib_layerdiffusion/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LykosAI/ComfyUI-Inference-Core-Nodes/d6f7fbc8b1a2419faf26cf5f0321d60a4c001bb6/src/inference_core_nodes/layer_diffuse/lib_layerdiffusion/__init__.py -------------------------------------------------------------------------------- /src/inference_core_nodes/layer_diffuse/lib_layerdiffusion/enums.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | 4 | class ResizeMode(Enum): 5 | RESIZE = "Just Resize" 6 | CROP_AND_RESIZE = "Crop and Resize" 7 | RESIZE_AND_FILL = "Resize and Fill" 8 | 9 | def int_value(self): 10 | if self == ResizeMode.RESIZE: 11 | return 0 12 | elif self == ResizeMode.CROP_AND_RESIZE: 13 | return 1 14 | elif self == ResizeMode.RESIZE_AND_FILL: 15 | return 2 16 | return 0 17 | 18 | 19 | class StableDiffusionVersion(Enum): 20 | """The version family of stable diffusion model.""" 21 | 22 | SD1x = "SD15" 23 | SDXL = "SDXL" 24 | -------------------------------------------------------------------------------- /src/inference_core_nodes/prompt_expansion/README.md: -------------------------------------------------------------------------------- 1 | ## Prompt Expansion 2 | 3 | Based on or modified from: [meap158/ComfyUI-Prompt-Expansion](https://github.com/meap158/ComfyUI-Prompt-Expansion) @ 19f487c5ae8f207fd9d5f6983ab46b63ef2f4bc3 4 | 5 | License: AGPLv3 6 | 7 | 8 | -------------------------------------------------------------------------------- /src/inference_core_nodes/prompt_expansion/__init__.py: -------------------------------------------------------------------------------- 1 | from .prompt_expansion import ( 2 | NODE_CLASS_MAPPINGS, 3 | NODE_DISPLAY_NAME_MAPPINGS, 4 | MODEL_FOLDER_NAME, 5 | ) 6 | 7 | __all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS"] 8 | 9 | 10 | def _add_folder_paths(): 11 | from pathlib import Path 12 | import folder_paths 13 | 14 | folder_names_and_paths = folder_paths.folder_names_and_paths 15 | 16 | model_folder_path = str(Path(folder_paths.models_dir).joinpath(MODEL_FOLDER_NAME)) 17 | supported_extensions = {'.ckpt', '.pt', '.bin', '.pth', '.safetensors'} 18 | 19 | if MODEL_FOLDER_NAME in folder_names_and_paths: 20 | folder_names_and_paths[MODEL_FOLDER_NAME][0].append(model_folder_path) 21 | else: 22 | folder_names_and_paths[MODEL_FOLDER_NAME] = ([model_folder_path], supported_extensions) 23 | 24 | 25 | _add_folder_paths() 26 | -------------------------------------------------------------------------------- /src/inference_core_nodes/prompt_expansion/util.py: -------------------------------------------------------------------------------- 1 | def remove_empty_str(items, default=None): 2 | items = [x for x in items if x != ""] 3 | if len(items) == 0 and default is not None: 4 | return [default] 5 | return items 6 | 7 | 8 | def join_prompts(*args, **kwargs): 9 | prompts = [str(x) for x in args if str(x) != ""] 10 | if len(prompts) == 0: 11 | return "" 12 | if len(prompts) == 1: 13 | return prompts[0] 14 | return ", ".join(prompts) 15 | --------------------------------------------------------------------------------