├── .dockerignore ├── .flake8 ├── .gitattributes ├── .github ├── ISSUE_TEMPLATE │ └── bug_report.yml ├── actions │ └── evaluations_environment_setup │ │ └── action.yml └── workflows │ ├── 1-scan-lint-build.yml │ ├── 2-test-stand-alone.yml │ ├── 3-test-docker.yml │ ├── ci_test.yml │ ├── release.yml │ ├── scenario-runner.yml │ ├── self-hosted.yml │ └── tests │ ├── release-dry-run.json │ └── scenario-evaluation-config.json ├── .gitignore ├── CITATION.cff ├── LICENSE ├── README.md ├── armory ├── __init__.py ├── __main__.py ├── arguments.py ├── art_experimental │ ├── __init__.py │ ├── attacks │ │ ├── __init__.py │ │ ├── carla_adversarial_texture.py │ │ ├── carla_mot_adversarial_patch.py │ │ ├── carla_mot_patch.py │ │ ├── carla_obj_det_adversarial_patch.py │ │ ├── carla_obj_det_patch.py │ │ ├── carla_obj_det_utils.py │ │ ├── cascading_attack.py │ │ ├── dapricot_patch.py │ │ ├── fgm_binary_search.py │ │ ├── frame.py │ │ ├── gradient_matching.py │ │ ├── kenansville_dft.py │ │ ├── patch.py │ │ ├── pgd_patch.py │ │ ├── poison_loader_audio.py │ │ ├── poison_loader_clbd.py │ │ ├── poison_loader_dlbd.py │ │ ├── poison_loader_obj_det.py │ │ ├── robust_dpatch.py │ │ ├── snr_pgd.py │ │ ├── sweep.py │ │ └── video_frame_border.py │ ├── defences │ │ ├── __init__.py │ │ ├── base.py │ │ ├── jpeg_compression_multichannel_image.py │ │ ├── jpeg_compression_normalized.py │ │ ├── random_affine_pytorch.py │ │ ├── transformer.py │ │ └── video_compression_normalized.py │ └── poison_detection │ │ ├── __init__.py │ │ └── random_filter.py ├── attacks │ ├── __init__.py │ └── librispeech_target_labels.py ├── baseline_models │ ├── __init__.py │ ├── keras │ │ ├── __init__.py │ │ ├── cifar.py │ │ ├── densenet121_resisc45.py │ │ ├── inception_resnet_v2.py │ │ ├── micronnet_gtsrb.py │ │ ├── mnist.py │ │ ├── resnet50.py │ │ └── so2sat.py │ ├── model_configs │ │ ├── __init__.py │ │ └── yolov3.cfg │ ├── pytorch │ │ ├── __init__.py │ │ ├── carla_goturn.py │ │ ├── carla_mot_frcnn_byte.py │ │ ├── carla_multimodality_object_detection_frcnn.py │ │ ├── carla_multimodality_object_detection_frcnn_robust_fusion.py │ │ ├── carla_single_modality_object_detection_frcnn.py │ │ ├── cifar.py │ │ ├── deep_speech.py │ │ ├── hubert_asr_large.py │ │ ├── micronnet_gtsrb.py │ │ ├── micronnet_gtsrb_bean_regularization.py │ │ ├── mnist.py │ │ ├── resnet18.py │ │ ├── resnet18_bean_regularization.py │ │ ├── resnet50.py │ │ ├── sincnet.py │ │ ├── ucf101_mars.py │ │ ├── xview_frcnn.py │ │ └── yolov3.py │ ├── tf_eager │ │ └── __init__.py │ └── tf_graph │ │ ├── __init__.py │ │ ├── audio_resnet50.py │ │ ├── mnist.py │ │ └── mscoco_frcnn.py ├── cli │ ├── __init__.py │ └── tools │ │ ├── generate_shapes.py │ │ ├── log_current_branch.py │ │ ├── plot_mAP_by_giou_with_patch_cli.py │ │ ├── rgb_depth_convert.py │ │ └── utils.py ├── configuration.py ├── data │ ├── __init__.py │ ├── adversarial │ │ ├── __init__.py │ │ ├── apricot_dev.py │ │ ├── apricot_metadata.py │ │ ├── apricot_test.py │ │ ├── carla_mot_dev.py │ │ ├── carla_mot_test.py │ │ ├── carla_obj_det_dev.py │ │ ├── carla_obj_det_test.py │ │ ├── carla_over_obj_det_dev.py │ │ ├── carla_over_obj_det_test.py │ │ ├── carla_video_tracking_dev.py │ │ ├── carla_video_tracking_test.py │ │ ├── dapricot_dev.py │ │ ├── dapricot_test.py │ │ ├── gtsrb_bh_poison_micronnet.py │ │ ├── imagenet_adversarial.py │ │ ├── librispeech_adversarial.py │ │ ├── pandas_proxy.py │ │ ├── resisc45_densenet121_univpatch_and_univperturbation_adversarial_224x224.py │ │ └── ucf101_mars_perturbation_and_patch_adversarial_112x112.py │ ├── adversarial_datasets.py │ ├── cached_s3_checksums │ │ ├── apricot_dev.txt │ │ ├── apricot_test.txt │ │ ├── carla_mot_dev.txt │ │ ├── carla_mot_test.txt │ │ ├── carla_obj_det_dev.txt │ │ ├── carla_obj_det_test.txt │ │ ├── carla_obj_det_train.txt │ │ ├── carla_over_obj_det_dev.txt │ │ ├── carla_over_obj_det_test.txt │ │ ├── carla_over_obj_det_train.txt │ │ ├── carla_video_tracking_dev.txt │ │ ├── carla_video_tracking_test.txt │ │ ├── cifar10.txt │ │ ├── cifar100.txt │ │ ├── coco.txt │ │ ├── dapricot_dev.txt │ │ ├── dapricot_test.txt │ │ ├── digit.txt │ │ ├── german_traffic_sign.txt │ │ ├── gtsrb_bh_poison_micronnet.txt │ │ ├── imagenet_adversarial.txt │ │ ├── imagenette.txt │ │ ├── librispeech.txt │ │ ├── librispeech_adversarial.txt │ │ ├── librispeech_dev_clean_split.txt │ │ ├── librispeech_full.txt │ │ ├── minicoco.txt │ │ ├── mnist.txt │ │ ├── resisc10_poison.txt │ │ ├── resisc45_densenet121_univpatch_and_univperturbation_adversarial224x224.txt │ │ ├── resisc45_split.txt │ │ ├── so2sat.txt │ │ ├── speech_commands.txt │ │ ├── ucf101.txt │ │ ├── ucf101_clean.txt │ │ ├── ucf101_mars_perturbation_and_patch_adversarial112x112.txt │ │ └── xview.txt │ ├── carla_object_detection │ │ ├── __init__.py │ │ └── carla_obj_det_train.py │ ├── carla_overhead_object_detection │ │ ├── __init__.py │ │ └── carla_over_obj_det_train.py │ ├── datasets.py │ ├── digit │ │ ├── __init__.py │ │ └── digit.py │ ├── german_traffic_sign │ │ ├── __init__.py │ │ └── german_traffic_sign.py │ ├── integrate_tfds.py │ ├── librispeech │ │ ├── __init__.py │ │ ├── librispeech_dev_clean_split.py │ │ └── librispeech_full.py │ ├── majority_masks │ │ ├── __init__.py │ │ └── speech_commands_majority_masks.npz │ ├── minicoco │ │ ├── __init__.py │ │ └── minicoco.py │ ├── model_weights.py │ ├── progress_percentage.py │ ├── pytorch_loader.py │ ├── resisc10 │ │ ├── __init__.py │ │ └── resisc10_poison.py │ ├── resisc45 │ │ ├── __init__.py │ │ ├── resisc45_dataset_partition.py │ │ └── resisc45_split.py │ ├── template_boilerplate.py │ ├── tfds_checksum.py │ ├── ucf101 │ │ ├── __init__.py │ │ └── ucf101_clean.py │ ├── url_checksums │ │ ├── apricot_dev.txt │ │ ├── apricot_test.txt │ │ ├── carla_mot_dev.txt │ │ ├── carla_mot_test.txt │ │ ├── carla_obj_det_dev.txt │ │ ├── carla_obj_det_test.txt │ │ ├── carla_obj_det_train.txt │ │ ├── carla_over_obj_det_dev.txt │ │ ├── carla_over_obj_det_test.txt │ │ ├── carla_over_obj_det_train.txt │ │ ├── carla_video_tracking_dev.txt │ │ ├── carla_video_tracking_test.txt │ │ ├── dapricot_dev.txt │ │ ├── dapricot_test.txt │ │ ├── digit.txt │ │ ├── german_traffic_sign.txt │ │ ├── gtsrb_bh_poison_micronnet.txt │ │ ├── imagenet_adversarial.txt │ │ ├── librispeech_adversarial.txt │ │ ├── librispeech_dev_clean_split.txt │ │ ├── librispeech_full.txt │ │ ├── resisc10_poison.txt │ │ ├── resisc45_densenet121_univpatch_and_univperturbation_adversarial_224x224.txt │ │ ├── resisc45_split.txt │ │ ├── ucf101_clean.txt │ │ ├── ucf101_mars_perturbation_and_patch_adversarial_112x112.txt │ │ └── xview.txt │ ├── utils.py │ └── xview │ │ ├── __init__.py │ │ └── xview.py ├── delayed_imports.py ├── docker │ ├── __init__.py │ ├── host_management.py │ ├── images.py │ └── management.py ├── environment.py ├── eval │ ├── __init__.py │ └── evaluator.py ├── instrument │ ├── __init__.py │ ├── config.py │ ├── export.py │ └── instrument.py ├── logs.py ├── metrics │ ├── __init__.py │ ├── common.py │ ├── compute.py │ ├── perturbation.py │ ├── poisoning.py │ ├── statistical.py │ └── task.py ├── paths.py ├── postprocessing │ ├── __init__.py │ ├── plot.py │ ├── plot_patch_aware_carla_metric.py │ └── plot_poisoning.py ├── scenarios │ ├── __init__.py │ ├── audio_asr.py │ ├── audio_classification.py │ ├── carla_mot.py │ ├── carla_object_detection.py │ ├── carla_video_tracking.py │ ├── dapricot_scenario.py │ ├── download_configs │ │ └── scenarios-set1.json │ ├── image_classification.py │ ├── main.py │ ├── multimodal_so2sat_scenario.py │ ├── object_detection.py │ ├── outputs.py │ ├── poison.py │ ├── poisoning_clbd.py │ ├── poisoning_obj_det.py │ ├── poisoning_sleeper_agent.py │ ├── poisoning_witches_brew.py │ ├── scenario.py │ ├── utils.py │ └── video_ucf101_scenario.py ├── utils │ ├── __init__.py │ ├── config_loading.py │ ├── config_schema.json │ ├── configuration.py │ ├── evaluation.py │ ├── external_repo.py │ ├── json_utils.py │ ├── labels.py │ ├── printing.py │ ├── shape_gen.ipynb │ ├── shape_gen.py │ ├── triggers │ │ ├── __init__.py │ │ ├── baby-on-board.png │ │ ├── bullet_holes.png │ │ ├── car_horn.wav │ │ ├── clapping.wav │ │ ├── copyright.png │ │ ├── dog_clicker.wav │ │ ├── globe.png │ │ ├── htbd.png │ │ ├── letter_A.png │ │ ├── peace.png │ │ ├── skull.png │ │ ├── student-driver.png │ │ ├── trigger_10.png │ │ ├── watermarking.png │ │ └── whistle.wav │ ├── typedef.py │ └── version.py └── validation │ ├── __init__.py │ └── test_config │ ├── __init__.py │ ├── conftest.py │ ├── pytest.ini │ └── test_model.py ├── docker ├── Dockerfile-armory ├── Dockerfile-base ├── Dockerfile-pytorch-deepspeech ├── Dockerfile-yolo ├── README.md ├── build-base.sh └── build.py ├── docs ├── CONTRIBUTING.md ├── adversarial_datasets.md ├── assets │ ├── docs-badge.svg │ └── logo.png ├── attacks.md ├── baseline_models.md ├── baseline_results │ ├── apricot_results.md │ ├── carla_mot_results.md │ ├── carla_od_results.md │ ├── carla_video_tracking_results.md │ ├── cifar10_dlbd_results.md │ ├── cifar10_sleeper_agent_results.md │ ├── cifar10_witches_brew_results.md │ ├── dapricot_results.md │ ├── gtsrb_clbd_results.md │ ├── gtsrb_dlbd_results.md │ ├── gtsrb_witches_brew_results.md │ ├── librispeech_asr_results.md │ ├── librispeech_audio_classification_results.md │ ├── object_detection_poisoning_results.md │ ├── resisc45_results.md │ ├── resisc_clbd_results.md │ ├── resisc_dlbd_results.md │ ├── so2sat_results.md │ ├── speech_commands_poison_results.md │ ├── ucf101_results.md │ └── xview_results.md ├── command_line.md ├── configuration_files.md ├── contributing │ └── self-review.md ├── dataset_licensing.md ├── datasets.md ├── developers │ ├── callchain.md │ ├── config-object.md │ ├── overall-plan.md │ └── testing.md ├── docker.md ├── exporting_data.md ├── external_repos.md ├── faqs.md ├── getting_started.md ├── index.md ├── instrumentation_examples.md ├── integrate_tensorflow_datasets.md ├── logging.md ├── metrics.md ├── no_docker_mode.md ├── poisoning.md ├── poisoning_object_detection.md ├── poisoning_witches_brew.md ├── scenarios.md ├── style.md ├── sweep_attacks.md └── utils.md ├── environment.yml ├── notebooks └── running_armory_scenarios_interactively.ipynb ├── pyproject.toml ├── scenario_configs ├── asr_librispeech_entailment.json ├── asr_librispeech_targeted.json ├── carla_multimodal_object_detection.json ├── carla_video_tracking.json ├── cifar10_baseline.json ├── eval1-4 │ ├── aprioct │ │ ├── apricot_frcnn.json │ │ └── apricot_frcnn_defended.json │ ├── asr_librispeech │ │ ├── librispeech_asr_imperceptible_defended.json │ │ ├── librispeech_asr_imperceptible_undefended.json │ │ ├── librispeech_asr_kenansville_defended.json │ │ ├── librispeech_asr_kenansville_undefended.json │ │ ├── librispeech_asr_pgd_defended.json │ │ ├── librispeech_asr_pgd_multipath_channel_undefended.json │ │ ├── librispeech_asr_pgd_undefended.json │ │ ├── librispeech_asr_snr_targeted.json │ │ └── librispeech_asr_snr_undefended.json │ ├── cifar │ │ └── cifar10_baseline.json │ ├── dapricot │ │ └── dapricot_frcnn_masked_pgd.json │ ├── mnist │ │ └── mnist_baseline.json │ ├── poisoning │ │ ├── gtsrb_scenario_clbd.json │ │ ├── gtsrb_scenario_clbd_bullethole.json │ │ ├── gtsrb_scenario_clbd_defended.json │ │ └── resisc10_poison_dlbd.json │ ├── resisc45 │ │ ├── resisc45_baseline_densenet121.json │ │ ├── resisc45_baseline_densenet121_cascade.json │ │ ├── resisc45_baseline_densenet121_finetune.json │ │ ├── resisc45_baseline_densenet121_sweep_eps.json │ │ └── resisc45_baseline_densenet121_targeted.json │ ├── so2sat │ │ ├── so2sat_eo_masked_pgd_defended.json │ │ ├── so2sat_eo_masked_pgd_undefended.json │ │ ├── so2sat_sar_masked_pgd_defended.json │ │ └── so2sat_sar_masked_pgd_undefended.json │ ├── speaker_id_librispeech │ │ ├── librispeech_baseline_sincnet.json │ │ ├── librispeech_baseline_sincnet_snr_pgd.json │ │ └── librispeech_baseline_sincnet_targeted.json │ ├── ucf101 │ │ ├── ucf101_baseline_finetune.json │ │ ├── ucf101_baseline_pretrained_targeted.json │ │ ├── ucf101_pretrained_flicker_defended.json │ │ ├── ucf101_pretrained_flicker_undefended.json │ │ ├── ucf101_pretrained_frame_saliency_defended.json │ │ ├── ucf101_pretrained_frame_saliency_undefended.json │ │ ├── ucf101_pretrained_masked_pgd_defended.json │ │ └── ucf101_pretrained_masked_pgd_undefended.json │ └── xview │ │ ├── xview_frcnn_masked_pgd_defended.json │ │ ├── xview_frcnn_masked_pgd_undefended.json │ │ ├── xview_frcnn_robust_dpatch_defended.json │ │ ├── xview_frcnn_robust_dpatch_undefended.json │ │ ├── xview_frcnn_sweep_patch_size.json │ │ └── xview_frcnn_targeted.json ├── eval5 │ ├── asr_librispeech │ │ ├── defended_entailment.json │ │ ├── defended_targeted_snr_pgd.json │ │ ├── defended_untargeted_snr_pgd.json │ │ ├── entailment.json │ │ ├── targeted_snr_pgd.json │ │ └── untargeted_snr_pgd.json │ ├── carla_object_detection │ │ ├── carla_obj_det_adversarialpatch_undefended.json │ │ ├── carla_obj_det_dpatch_defended.json │ │ ├── carla_obj_det_dpatch_undefended.json │ │ ├── carla_obj_det_multimodal_adversarialpatch_defended.json │ │ ├── carla_obj_det_multimodal_adversarialpatch_undefended.json │ │ ├── carla_obj_det_multimodal_dpatch_defended.json │ │ └── carla_obj_det_multimodal_dpatch_undefended.json │ ├── carla_video_tracking │ │ ├── carla_video_tracking_goturn_advtextures_defended.json │ │ └── carla_video_tracking_goturn_advtextures_undefended.json │ └── poisoning │ │ ├── baseline_defenses │ │ ├── cifar10 │ │ │ ├── dlbd │ │ │ │ ├── copyright │ │ │ │ │ ├── cifar10_dlbd_copyright_activation_defense.json │ │ │ │ │ ├── cifar10_dlbd_copyright_perfect_filter.json │ │ │ │ │ ├── cifar10_dlbd_copyright_random_filter.json │ │ │ │ │ ├── cifar10_dlbd_copyright_spectral_signature_defense.json │ │ │ │ │ └── cifar10_dlbd_copyright_undefended.json │ │ │ │ └── watermark │ │ │ │ │ ├── cifar10_dlbd_watermark_activation_defense.json │ │ │ │ │ ├── cifar10_dlbd_watermark_perfect_filter.json │ │ │ │ │ ├── cifar10_dlbd_watermark_random_filter.json │ │ │ │ │ ├── cifar10_dlbd_watermark_spectral_signature_defense.json │ │ │ │ │ └── cifar10_dlbd_watermark_undefended.json │ │ │ └── witches_brew │ │ │ │ ├── cifar10_witches_brew_activation_defense.json │ │ │ │ ├── cifar10_witches_brew_perfect_filter.json │ │ │ │ ├── cifar10_witches_brew_random_filter.json │ │ │ │ ├── cifar10_witches_brew_spectral_signature_defense.json │ │ │ │ └── cifar10_witches_brew_undefended.json │ │ └── gtsrb │ │ │ ├── clbd │ │ │ ├── bullet_holes │ │ │ │ ├── gtsrb_clbd_bullet_holes_activation_defense.json │ │ │ │ ├── gtsrb_clbd_bullet_holes_perfect_filter.json │ │ │ │ ├── gtsrb_clbd_bullet_holes_random_filter.json │ │ │ │ ├── gtsrb_clbd_bullet_holes_spectral_signature_defense.json │ │ │ │ └── gtsrb_clbd_bullet_holes_undefended.json │ │ │ └── peace_sign │ │ │ │ ├── gtsrb_clbd_peace_sign_activation_defense.json │ │ │ │ ├── gtsrb_clbd_peace_sign_perfect_filter.json │ │ │ │ ├── gtsrb_clbd_peace_sign_random_filter.json │ │ │ │ ├── gtsrb_clbd_peace_sign_spectral_signature_defense.json │ │ │ │ └── gtsrb_clbd_peace_sign_undefended.json │ │ │ ├── dlbd │ │ │ ├── bullet_holes │ │ │ │ ├── gtsrb_dlbd_bullet_holes_activation_defense.json │ │ │ │ ├── gtsrb_dlbd_bullet_holes_perfect_filter.json │ │ │ │ ├── gtsrb_dlbd_bullet_holes_random_filter.json │ │ │ │ ├── gtsrb_dlbd_bullet_holes_spectral_signature_defense.json │ │ │ │ └── gtsrb_dlbd_bullet_holes_undefended.json │ │ │ └── peace_sign │ │ │ │ ├── gtsrb_dlbd_peace_sign_activation_defense.json │ │ │ │ ├── gtsrb_dlbd_peace_sign_perfect_filter.json │ │ │ │ ├── gtsrb_dlbd_peace_sign_random_filter.json │ │ │ │ ├── gtsrb_dlbd_peace_sign_spectral_signature_defense.json │ │ │ │ └── gtsrb_dlbd_peace_sign_undefended.json │ │ │ └── witches_brew │ │ │ ├── gtsrb_witches_brew_activation_defense.json │ │ │ ├── gtsrb_witches_brew_perfect_filter.json │ │ │ ├── gtsrb_witches_brew_random_filter.json │ │ │ ├── gtsrb_witches_brew_spectral_signature_defense.json │ │ │ └── gtsrb_witches_brew_undefended.json │ │ ├── cifar10_poison_dlbd.json │ │ ├── cifar10_witches_brew.json │ │ ├── gtsrb_dlbd_baseline_keras.json │ │ ├── gtsrb_dlbd_baseline_pytorch.json │ │ └── gtsrb_witches_brew.json ├── eval6 │ ├── asr_librispeech │ │ └── hubert_untargeted_snr_pgd.json │ ├── carla_mot │ │ ├── carla_mot_adversarialpatch_undefended.json │ │ ├── carla_mot_dpatch_defended.json │ │ └── carla_mot_dpatch_undefended.json │ ├── carla_overhead_object_detection │ │ ├── carla_obj_det_adversarialpatch_undefended.json │ │ ├── carla_obj_det_dpatch_defended.json │ │ ├── carla_obj_det_dpatch_undefended.json │ │ ├── carla_obj_det_multimodal_adversarialpatch_defended.json │ │ ├── carla_obj_det_multimodal_adversarialpatch_undefended.json │ │ ├── carla_obj_det_multimodal_dpatch_defended.json │ │ └── carla_obj_det_multimodal_dpatch_undefended.json │ └── poisoning │ │ ├── audio_dlbd │ │ ├── audio_p00_undefended.json │ │ ├── audio_p01_undefended.json │ │ ├── audio_p05_undefended.json │ │ ├── audio_p10_undefended.json │ │ ├── audio_p20_undefended.json │ │ ├── audio_p30_undefended.json │ │ └── baseline_defenses │ │ │ ├── audio_p10_activation_defense.json │ │ │ ├── audio_p10_dpinstahide.json │ │ │ ├── audio_p10_perfect_filter.json │ │ │ ├── audio_p10_random_filter.json │ │ │ └── audio_p10_spectral_signature_defense.json │ │ └── sleeper_agent │ │ ├── baseline_defenses │ │ ├── cifar10_sleeper_agent_p10_activation_defense.json │ │ ├── cifar10_sleeper_agent_p10_dpinstahide.json │ │ ├── cifar10_sleeper_agent_p10_perfect_filter.json │ │ ├── cifar10_sleeper_agent_p10_random_filter.json │ │ └── cifar10_sleeper_agent_p10_spectral_signatures_defense.json │ │ ├── cifar10_sleeper_agent_p00_undefended.json │ │ ├── cifar10_sleeper_agent_p01_undefended.json │ │ ├── cifar10_sleeper_agent_p05_undefended.json │ │ ├── cifar10_sleeper_agent_p10_undefended.json │ │ ├── cifar10_sleeper_agent_p20_undefended.json │ │ ├── cifar10_sleeper_agent_p30_undefended.json │ │ └── cifar10_sleeper_agent_p50_undefended.json ├── eval7 │ ├── carla_mot │ │ ├── carla_mot_adversarialpatch_undefended.json │ │ ├── carla_mot_dpatch_defended.json │ │ └── carla_mot_dpatch_undefended.json │ ├── carla_overhead_object_detection │ │ ├── carla_obj_det_adversarialpatch_targeted_undefended.json │ │ ├── carla_obj_det_adversarialpatch_undefended.json │ │ ├── carla_obj_det_dpatch_defended.json │ │ ├── carla_obj_det_dpatch_undefended.json │ │ ├── carla_obj_det_multimodal_adversarialpatch_defended.json │ │ ├── carla_obj_det_multimodal_adversarialpatch_targeted_undefended.json │ │ ├── carla_obj_det_multimodal_adversarialpatch_undefended.json │ │ ├── carla_obj_det_multimodal_dpatch_defended.json │ │ └── carla_obj_det_multimodal_dpatch_undefended.json │ └── poisoning │ │ ├── obj_det_dlbd_GMA.json │ │ ├── obj_det_dlbd_ODA.json │ │ ├── obj_det_dlbd_OGA.json │ │ └── obj_det_dlbd_RMA.json ├── mnist_baseline.json ├── no_docker │ ├── carla_short.json │ └── cifar_short.json ├── poisoning_cifar10_witches_brew.json ├── poisoning_gtsrb_dirty_label.json ├── so2sat_eo_masked_pgd.json ├── speaker_id_librispeech.json ├── ucf101_masked_pgd.json └── xview_robust_dpatch.json ├── setup.py ├── tests ├── conftest.py ├── end_to_end │ ├── test_e2e_datasets.py │ ├── test_e2e_models.py │ ├── test_e2e_scenarios.py │ └── test_no_docker.py ├── jupyter_manual_test.ipynb ├── scenarios │ ├── broken │ │ ├── invalid_dataset_framework.json │ │ ├── invalid_module.json │ │ └── missing_scenario.json │ └── pytorch │ │ ├── image_classification.json │ │ └── image_classification_pretrained.json ├── test_data │ ├── audio_sample.mp3 │ ├── image_sample.png │ └── video_sample.mp4 ├── test_downloads.py └── unit │ ├── test_compute_metrics.py │ ├── test_configuration.py │ ├── test_cuda.py │ ├── test_datasets.py │ ├── test_docker.py │ ├── test_docker_build_script.py │ ├── test_export.py │ ├── test_instrument.py │ ├── test_json_utils.py │ ├── test_logging.py │ ├── test_models.py │ ├── test_perturbation_metrics.py │ ├── test_poisoning_metrics.py │ ├── test_statistical_metrics.py │ └── test_task_metrics.py ├── tools ├── pre-commit.sh └── trigger-release.sh └── tutorials └── adaptive_attacks ├── README.md ├── custom_attack.json ├── custom_attack.md ├── custom_attack.py ├── patch_loss_gradient.json ├── patch_loss_gradient.md ├── patch_loss_gradient.py ├── patch_loss_gradient_model.py ├── proxy_model.json ├── proxy_model.md ├── proxy_model_attack_model.py └── proxy_model_eval_model.py /.dockerignore: -------------------------------------------------------------------------------- 1 | # ignore .conda, etc. 2 | .* 3 | 4 | # Allowing .git 5 | !.git/ 6 | 7 | # Ensure all git files are present. Otherwise, armory will be "dirty" inside container, which will change the version 8 | !.dockerignore 9 | !.flake8 10 | !.github/ 11 | !.gitignore 12 | !.yamllint 13 | 14 | # ignore large directories 15 | datasets/ 16 | 17 | # ignore temp directories 18 | outputs/ 19 | dist/ 20 | tmp/ 21 | 22 | # Environments 23 | .env 24 | .venv 25 | env/ 26 | venv/ 27 | ENV/ 28 | env.bak/ 29 | venv.bak/ 30 | venv* 31 | 32 | # Byte-compiled / optimized / DLL files 33 | __pycache__/ 34 | *.pyc 35 | *.py[cod] 36 | *$py.class 37 | 38 | # mypy 39 | .mypy_cache/ 40 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | # TODO: move this to pyproject.toml when supported, see https://github.com/PyCQA/flake8/issues/234 2 | 3 | 4 | [flake8] 5 | #imported but unused in __init__.py, that's ok. 6 | per-file-ignores = **/__init__.py:F401 7 | 8 | 9 | exclude = 10 | .git, 11 | datasets, 12 | venv*, 13 | __pycache__, 14 | build, 15 | dist, 16 | 17 | 18 | ignore = 19 | # defaults flake8 ignores 20 | E121,E123,E126,E226,E24,E704,W503,W504 21 | 22 | # import not at top 23 | E402 24 | 25 | # whitespace before ':' 26 | # https://black.readthedocs.io/en/stable/the_black_code_style.html#slices 27 | E203 28 | 29 | # missing whitespace after ',' 30 | # black takes care of that. Sometimes it may 31 | # add a comma at the end of lists. 32 | E231 33 | 34 | # Line too long 35 | # We use black, no need to enforce line length 36 | E501 37 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | * text=auto 2 | 3 | # Tools 4 | /tools/** text eol=lf 5 | 6 | # Documentation 7 | docs/** linguist-documentation 8 | notebooks/** linguist-documentation 9 | tutorials/** linguist-documentation 10 | 11 | # Source files 12 | # ============ 13 | *.pxd text diff=python 14 | *.py text diff=python 15 | *.py3 text diff=python 16 | *.pyw text diff=python 17 | *.pyx text diff=python 18 | *.pyz text diff=python 19 | 20 | # Binary files 21 | # ============ 22 | *.db binary 23 | *.p binary 24 | *.pkl binary 25 | *.pickle binary 26 | *.pyc binary 27 | *.pyd binary 28 | *.pyo binary 29 | 30 | # Jupyter notebook 31 | *.ipynb text 32 | 33 | # Note: .db, .p, and .pkl files are associated 34 | # with the python modules ``pickle``, ``dbm.*``, 35 | # ``shelve``, ``marshal``, ``anydbm``, & ``bsddb`` 36 | # (among others). -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: 🐛Bug Report 3 | 4 | description: File a bug report here 5 | 6 | title: "[BUG]: " 7 | 8 | labels: ["bug"] 9 | 10 | body: 11 | - type: markdown 12 | attributes: 13 | value: | 14 | 🤗 Thank you for taking the time to fill out this bug report! 15 | Be sure to check the [open](https://github.com/twosixlabs/armory/issues)/[closed](https://github.com/twosixlabs/armory/issues?q=is%3Aissue+is%3Aclosed) issues possible resolutions or updates 😃 16 | 17 | - type: textarea 18 | id: bug-description 19 | attributes: 20 | label: Description of the bug 21 | description: Give us a brief description of what happened and what should have happened 22 | validations: 23 | required: true 24 | 25 | - type: textarea 26 | id: steps-to-reproduce 27 | attributes: 28 | label: Steps To Reproduce 29 | description: Steps to reproduce the behavior. 30 | placeholder: | 31 | 1. Go to '...' 32 | 2. Add configuration '...' 33 | 3. Run command '...' 34 | 4. See error 35 | validations: 36 | required: true 37 | 38 | - type: textarea 39 | id: additional-information 40 | attributes: 41 | label: Additional Information 42 | description: | 43 | Include any additional information such as logs, screenshots, or scenarios in which the bug occurs in order to facilitates resolving the issue. 44 | -------------------------------------------------------------------------------- /.github/actions/evaluations_environment_setup/action.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # yamllint disable 3 | name: "Setup Matrix and Armory" 4 | 5 | description: "Install Armory and binary dependencies" 6 | 7 | runs: 8 | using: "composite" 9 | steps: 10 | - uses: actions/setup-python@v4 11 | with: 12 | python-version: 3.9 13 | 14 | 15 | - name: 🌱 Setup Test Enviroment 16 | shell: bash 17 | run: | 18 | python -m pip install --upgrade pip 19 | 20 | pip install virtualenv 21 | virtualenv .venv 22 | source .venv/bin/activate 23 | 24 | 25 | - name: ⚙️ Installing Armory 26 | shell: bash 27 | run: | 28 | pip install --no-compile --editable '.[developer,engine,math,datasets,pytorch,datasets-builder]' 29 | armory configure --use-defaults 30 | 31 | 32 | # TODO: Resolve differences between dockerized conda and runner. 33 | - name: ⚙️ Installing Dependencies 34 | shell: bash 35 | run: | 36 | sudo apt install -y protobuf-compiler 37 | pip install --no-cache-dir cython 38 | 39 | git clone https://github.com/ifzhang/ByteTrack.git 40 | cd ByteTrack 41 | pip3 install -r requirements.txt 42 | python3 setup.py develop 43 | # Requires cython for install, so will fail if run in the same pip install as cython 44 | pip install --no-cache-dir cython-bbox 45 | 46 | 47 | - name: 🐍 Load Cached Venv 48 | id: cache 49 | uses: actions/cache@v3 50 | with: 51 | path: | 52 | .venv/ 53 | key: ${{ runner.os }}-${{ hashFiles('pyproject.toml') }}-${{ hashFiles('armory/__about__.py') }} 54 | -------------------------------------------------------------------------------- /.github/workflows/ci_test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: 🐛📦 Build & Test 3 | 4 | on: 5 | workflow_dispatch: 6 | 7 | push: 8 | branches: 9 | - master 10 | - develop 11 | - 'v[0-9]+.[0-9]+.[0-9]+' # release branches have the form v1.9.x 12 | - 'rc[0-9]+.[0-9]+.[0-9]+' # release candidate branches 13 | 14 | pull_request: 15 | branches: 16 | - master 17 | - develop 18 | - 'v[0-9]+.[0-9]+.[0-9]+' # release branches have the form v1.9.x 19 | - 'rc[0-9]+.[0-9]+.[0-9]+' # release candidate branches 20 | 21 | 22 | env: 23 | DOCKER_BUILDKIT: 1 24 | 25 | 26 | jobs: 27 | scan-lint-build: 28 | uses: ./.github/workflows/1-scan-lint-build.yml 29 | 30 | test-stand-alone-builds: 31 | needs: scan-lint-build 32 | uses: ./.github/workflows/2-test-stand-alone.yml 33 | 34 | test-docker-builds: 35 | uses: ./.github/workflows/3-test-docker.yml 36 | -------------------------------------------------------------------------------- /.github/workflows/tests/release-dry-run.json: -------------------------------------------------------------------------------- 1 | { 2 | "action": "workflow_dispatch", 3 | "inputs": { 4 | "dry_run": true 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /.github/workflows/tests/scenario-evaluation-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "dataset_dir": "/tmp/armory/datasets", 3 | "local_git_dir": "/tmp/armory/git", 4 | "output_dir": "/tmp/armory/outputs", 5 | "saved_model_dir": "/tmp/armory/saved_models", 6 | "tmp_dir": "/tmp/armory/tmp", 7 | "verify_ssl": true 8 | } 9 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # -------------- Project ------------------ 2 | */__about__.py 3 | 4 | 5 | # ------------- Template ------------------ 6 | # Ignore dataset, tmp, and output directories 7 | /datasets/ 8 | /tmp/ 9 | /outputs/ 10 | 11 | # Byte-compiled / optimized / DLL files 12 | __pycache__/ 13 | *.py[cod] 14 | *$py.class 15 | *.pyc 16 | 17 | # C extensions 18 | *.so 19 | 20 | # Distribution / packaging 21 | .Python 22 | build/ 23 | develop-eggs/ 24 | dist/ 25 | downloads/ 26 | eggs/ 27 | .eggs/ 28 | lib/ 29 | lib64/ 30 | parts/ 31 | sdist/ 32 | var/ 33 | wheels/ 34 | *.egg-info/ 35 | .installed.cfg 36 | *.egg 37 | 38 | # PyInstaller 39 | # Usually these files are written by a python script from a template 40 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 41 | *.manifest 42 | *.spec 43 | 44 | # Installer logs 45 | pip-log.txt 46 | pip-delete-this-directory.txt 47 | 48 | # Unit test / coverage reports 49 | htmlcov/ 50 | .tox/ 51 | .coverage 52 | .coverage.* 53 | .cache 54 | nosetests.xml 55 | coverage.xml 56 | *.cover 57 | .hypothesis/ 58 | .pytest_cache/ 59 | 60 | # Translations 61 | *.mo 62 | *.pot 63 | 64 | # Django stuff: 65 | *.log 66 | local_settings.py 67 | db.sqlite3 68 | 69 | # Flask stuff: 70 | instance/ 71 | .webassets-cache 72 | 73 | # Scrapy stuff: 74 | .scrapy 75 | 76 | # Sphinx documentation 77 | docs/_build/ 78 | 79 | # PyBuilder 80 | target/ 81 | 82 | # Jupyter Notebook 83 | .ipynb_checkpoints 84 | 85 | # pyenv 86 | .python-version 87 | 88 | # celery beat schedule file 89 | celerybeat-schedule 90 | 91 | # SageMath parsed files 92 | *.sage.py 93 | 94 | # Environments 95 | .env 96 | .venv 97 | env/ 98 | venv/ 99 | ENV/ 100 | env.bak/ 101 | venv.bak/ 102 | venv* 103 | 104 | # Spyder project settings 105 | .spyderproject 106 | .spyproject 107 | 108 | # Rope project settings 109 | .ropeproject 110 | 111 | # mkdocs documentation 112 | /site 113 | 114 | # mypy 115 | .mypy_cache/ 116 | 117 | # editors 118 | .idea/ 119 | .vscode/ 120 | -------------------------------------------------------------------------------- /CITATION.cff: -------------------------------------------------------------------------------- 1 | # This CITATION.cff file was generated with cffinit. 2 | # Visit https://bit.ly/cffinit to generate yours today! 3 | 4 | cff-version: 1.2.0 5 | title: armory 6 | message: >- 7 | If you use this software, please cite it using the 8 | metadata from this file. 9 | type: software 10 | authors: 11 | - given-names: David 12 | family-names: Slater 13 | email: armory@twosixtech.com 14 | - given-names: Lucas 15 | family-names: Cadalzo 16 | email: armory@twosixtech.com 17 | repository-code: 'https://github.com/twosixlabs/armory' 18 | url: 'https://www.gardproject.org/' 19 | abstract: >- 20 | Armory is a testbed for running scalable evaluations of 21 | adversarial defenses for ML systems. Configuration files 22 | are used to launch local or cloud instances of the Armory 23 | docker containers. Models, datasets, and evaluation 24 | scripts can be pulled from external repositories or from 25 | the baselines within this project. 26 | keywords: 27 | - adversarial machine learning 28 | license: MIT 29 | commit: 029b811eef05167f33d393720ad193f307b1161a 30 | version: 0.16.4 31 | doi: 10.5281/zenodo.7561755 32 | date-released: '2023-01-20' 33 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Two Six Labs 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /armory/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Adversarial Robustness Evaluation Test Bed 3 | """ 4 | 5 | from pathlib import Path 6 | 7 | from armory.logs import log 8 | from armory.utils import typedef, version 9 | 10 | Config = typedef.Config 11 | 12 | 13 | SRC_ROOT = Path(__file__).parent 14 | 15 | 16 | def __getattr__(name): 17 | if name == "__version__": 18 | return version.get_version() 19 | raise AttributeError(f"module '{__name__}' has no attribute '{name}'") 20 | 21 | 22 | END_SENTINEL = "Scenario has finished running cleanly" 23 | -------------------------------------------------------------------------------- /armory/arguments.py: -------------------------------------------------------------------------------- 1 | """ 2 | Handling of command line arguments and other configuration. 3 | """ 4 | 5 | import argparse 6 | 7 | 8 | def merge_config_and_args(config, args): 9 | """ 10 | Override members of config if specified as args. The config dict is mutated. 11 | Members in config are percolated into args to act as if they were specified. 12 | Members of args that are not in config are put there so that the output 13 | accurately records what was run. Returns a modified config and a newly 14 | created args. 15 | The precedence becomes defaults < config block < command args. 16 | """ 17 | 18 | # find truthy sysconfig specifications 19 | sysconf = config["sysconfig"] 20 | new_spec = {name: sysconf[name] for name in sysconf if sysconf[name]} 21 | 22 | # find truthy args specifications, overwriting config if present 23 | cmd = vars(args) 24 | new_args = {name: cmd[name] for name in cmd if cmd[name]} 25 | new_spec.update(new_args) 26 | 27 | # sysconfig gets updated with all truthy members of the prioritized union 28 | sysconf.update(new_spec) 29 | 30 | # new_args now gets the original namespace and all truthy members of the prioritized 31 | # union 32 | cmd.update(new_spec) 33 | new_args = argparse.Namespace(**cmd) 34 | 35 | return config, new_args 36 | -------------------------------------------------------------------------------- /armory/art_experimental/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | This subpackage will contain experimental ART features that are not merged into 3 | the PyPi package. 4 | """ 5 | from armory import delayed_imports 6 | -------------------------------------------------------------------------------- /armory/art_experimental/attacks/__init__.py: -------------------------------------------------------------------------------- 1 | from armory.art_experimental.attacks.fgm_binary_search import FGMBinarySearch 2 | -------------------------------------------------------------------------------- /armory/art_experimental/attacks/cascading_attack.py: -------------------------------------------------------------------------------- 1 | from art.attacks.evasion import AutoAttack 2 | 3 | from armory.utils.config_loading import load_attack 4 | 5 | 6 | class CascadingAttack(AutoAttack): 7 | def __init__(self, estimator, **kwargs): 8 | self._check_kwargs(kwargs) 9 | self.targeted = kwargs.get("targeted", False) 10 | self.attacks = [] 11 | for inner_config in kwargs["inner_configs"]: 12 | inner_config["kwargs"]["targeted"] = self.targeted 13 | self.attacks.append(load_attack(inner_config, estimator)) 14 | kwargs.pop("inner_configs") 15 | super().__init__(estimator=estimator, attacks=self.attacks, **kwargs) 16 | 17 | def _check_kwargs(self, kwargs): 18 | if "inner_configs" not in kwargs: 19 | raise ValueError("Missing 'inner_configs' key in attack kwargs") 20 | if not isinstance(kwargs["inner_configs"], (list, tuple)): 21 | raise ValueError("`inner_configs` key must map to a tuple or list") 22 | for i, config in enumerate(kwargs["inner_configs"]): 23 | if "module" not in config: 24 | raise ValueError(f"Missing 'module' key in inner_configs[{i}]") 25 | if "name" not in config: 26 | raise ValueError(f"Missing 'name' key in inner_configs[{i}]") 27 | -------------------------------------------------------------------------------- /armory/art_experimental/attacks/frame.py: -------------------------------------------------------------------------------- 1 | def get_frame_saliency(classifier, inner_config=None, **kwargs): 2 | from art.attacks.evasion import FrameSaliencyAttack 3 | 4 | from armory.utils import config_loading 5 | 6 | attacker = config_loading.load_attack(inner_config, classifier) 7 | attack = FrameSaliencyAttack(classifier, attacker, **kwargs) 8 | return attack 9 | -------------------------------------------------------------------------------- /armory/art_experimental/attacks/patch.py: -------------------------------------------------------------------------------- 1 | """ 2 | Support for patch attacks using same interface. 3 | """ 4 | 5 | 6 | from art.attacks.attack import EvasionAttack 7 | 8 | 9 | class AttackWrapper(EvasionAttack): 10 | def __init__(self, attack, apply_patch_args, apply_patch_kwargs): 11 | self._attack = attack 12 | self.args = apply_patch_args 13 | self.kwargs = apply_patch_kwargs 14 | 15 | def generate(self, x, y=None, **kwargs): 16 | self._attack.generate(x, y=y, **kwargs) 17 | return self._attack.apply_patch(x, *self.args, **self.kwargs) 18 | -------------------------------------------------------------------------------- /armory/art_experimental/attacks/poison_loader_audio.py: -------------------------------------------------------------------------------- 1 | from art.attacks.poisoning import PoisoningAttackBackdoor 2 | from art.attacks.poisoning.perturbations.audio_perturbations import ( 3 | CacheAudioTrigger, 4 | CacheToneTrigger, 5 | ) 6 | 7 | from armory.utils import triggers 8 | 9 | 10 | def poison_loader_audio(**kwargs): 11 | backdoor_kwargs = kwargs.pop("backdoor_kwargs") 12 | 13 | if "backdoor_path" in backdoor_kwargs: 14 | 15 | backdoor_kwargs["backdoor_path"] = triggers.get_path( 16 | backdoor_kwargs["backdoor_path"] 17 | ) 18 | trigger = CacheAudioTrigger(**backdoor_kwargs) 19 | 20 | def poison_func(x): 21 | return trigger.insert(x) 22 | 23 | elif "frequency" in backdoor_kwargs: 24 | trigger = CacheToneTrigger(**backdoor_kwargs) 25 | 26 | def poison_func(x): 27 | return trigger.insert(x) 28 | 29 | else: 30 | raise ValueError( 31 | 'backdoor_kwargs should include either "frequency" or "backdoor_path"' 32 | ) 33 | return PoisoningAttackBackdoor(poison_func, **kwargs) 34 | -------------------------------------------------------------------------------- /armory/art_experimental/attacks/poison_loader_clbd.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module enables loading of CLBD attack from a json config 3 | """ 4 | 5 | 6 | from art.attacks.poisoning import PoisoningAttackCleanLabelBackdoor 7 | from art.utils import to_categorical 8 | 9 | from armory.art_experimental.attacks.poison_loader_dlbd import poison_loader_dlbd 10 | 11 | 12 | def poison_loader_clbd(**kwargs): 13 | backdoor_kwargs = kwargs.pop("backdoor_kwargs") 14 | backdoor = poison_loader_dlbd(**backdoor_kwargs) 15 | 16 | # Targets is a one-hot numpy array -- need to map from sparse representation 17 | target = kwargs.pop("target") 18 | n_classes = kwargs.pop("n_classes") 19 | targets = to_categorical([target], n_classes)[0] 20 | 21 | return ( 22 | PoisoningAttackCleanLabelBackdoor(backdoor=backdoor, target=targets, **kwargs), 23 | backdoor, 24 | ) 25 | -------------------------------------------------------------------------------- /armory/art_experimental/attacks/poison_loader_obj_det.py: -------------------------------------------------------------------------------- 1 | from armory.art_experimental.attacks.poison_loader_dlbd import poison_loader_dlbd 2 | from armory.utils import config_loading 3 | 4 | bad_det_attacks = [ 5 | "BadDetRegionalMisclassificationAttack", 6 | "BadDetGlobalMisclassificationAttack", 7 | "BadDetObjectGenerationAttack", 8 | "BadDetObjectDisappearanceAttack", 9 | ] 10 | 11 | 12 | def poison_loader_obj_det(**kwargs): 13 | 14 | backdoor_kwargs = kwargs.pop("backdoor_kwargs") 15 | 16 | backdoor = poison_loader_dlbd( 17 | **backdoor_kwargs 18 | ) # loads the PoisoningAttackBackdoor object 19 | kwargs["backdoor"] = backdoor 20 | 21 | attack_version = kwargs.pop("attack_variant") 22 | if attack_version not in bad_det_attacks: 23 | raise ValueError( 24 | f"'attack_variant' is {attack_version} but should be one of {bad_det_attacks}" 25 | ) 26 | 27 | config = { 28 | "module": "art.attacks.poisoning", 29 | "name": attack_version, 30 | "kwargs": kwargs, 31 | } 32 | 33 | return config_loading.load(config) 34 | -------------------------------------------------------------------------------- /armory/art_experimental/attacks/robust_dpatch.py: -------------------------------------------------------------------------------- 1 | from art.attacks.evasion import RobustDPatch 2 | import numpy as np 3 | 4 | from armory.logs import log 5 | 6 | 7 | class RobustDPatch(RobustDPatch): 8 | """ 9 | Generate and apply patch 10 | """ 11 | 12 | def __init__(self, estimator, **kwargs): 13 | # allows for random patch location 14 | if "patch_location" not in kwargs: 15 | self.random_location = True 16 | else: 17 | self.random_location = False 18 | super().__init__(estimator=estimator, **kwargs) 19 | 20 | def generate(self, x, y=None, **generate_kwargs): 21 | if self.random_location: 22 | log.info("Selecting random coordinates for patch_location.") 23 | self.patch_location = ( 24 | np.random.randint(int(x.shape[-3] - self.patch_shape[0])), 25 | np.random.randint(int(x.shape[-2] - self.patch_shape[1])), 26 | ) 27 | super().generate(x, y=y, **generate_kwargs) 28 | return super().apply_patch(x) 29 | -------------------------------------------------------------------------------- /armory/art_experimental/attacks/video_frame_border.py: -------------------------------------------------------------------------------- 1 | from art.attacks.evasion import ProjectedGradientDescent 2 | import numpy as np 3 | 4 | 5 | class FrameBorderPatch(ProjectedGradientDescent): 6 | """ 7 | Apply Masked PGD to video inputs, where only the 8 | video frame is allowed to be perturbed. 9 | Each video is assumed to have shape (NFHWC). 10 | """ 11 | 12 | def __init__(self, estimator, **kwargs): 13 | super().__init__(estimator=estimator, **kwargs) 14 | 15 | def generate(self, x, y=None, patch_ratio=None, **kwargs): 16 | if patch_ratio is None: 17 | raise ValueError("generate_kwargs did not define 'patch_ratio'") 18 | if x.ndim != 5: 19 | raise ValueError("This attack is designed for videos (5-dim)") 20 | width = x.shape[3] 21 | height = x.shape[2] 22 | 23 | t1 = ( 24 | 2 * (width + height) 25 | + (4 * (width + height) ** 2 - 16 * (patch_ratio * width * height)) ** 0.5 26 | ) / 8 27 | t2 = ( 28 | 2 * (width + height) 29 | - (4 * (width + height) ** 2 - 16 * (patch_ratio * width * height)) ** 0.5 30 | ) / 8 31 | thickness = int(min(t1, t2)) 32 | 33 | if (width - 2 * thickness) * (height - 2 * thickness) < ( 34 | 1 - patch_ratio 35 | ) * width * height: 36 | raise ValueError("patch_ratio does not match height and width") 37 | 38 | mask = np.ones(shape=x.shape[1:], dtype=np.float32) 39 | 40 | mask[:, thickness : height - thickness, thickness : width - thickness, :] = 0.0 41 | 42 | return super().generate(x, y=y, mask=mask, **kwargs) 43 | -------------------------------------------------------------------------------- /armory/art_experimental/defences/__init__.py: -------------------------------------------------------------------------------- 1 | from armory.art_experimental.defences.base import Transformer 2 | -------------------------------------------------------------------------------- /armory/art_experimental/defences/base.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module implements the abstract base classes for all extended defenses. 3 | """ 4 | 5 | import abc 6 | 7 | 8 | class Transformer(abc.ABC): 9 | """ 10 | Abstract base class for model transform defense classes. 11 | """ 12 | 13 | @abc.abstractmethod 14 | def transform(self, model): 15 | """ 16 | Returns a transformed version of the target model. 17 | """ 18 | raise NotImplementedError 19 | -------------------------------------------------------------------------------- /armory/art_experimental/defences/jpeg_compression_multichannel_image.py: -------------------------------------------------------------------------------- 1 | from art.defences.preprocessor import JpegCompression 2 | import numpy as np 3 | 4 | 5 | class JpegCompressionMultiChannelImage(JpegCompression): 6 | """ 7 | Rescale inputs that may not be in [0,1] after preprocessing, 8 | process use ART JpegCompression treating input as video 9 | (so that number of channels need not be 1,3), scale back 10 | to original preprocessing 11 | """ 12 | 13 | def __init__( 14 | self, 15 | clip_values, 16 | quality=50, 17 | channels_first=False, 18 | apply_fit=True, 19 | apply_predict=False, 20 | mins=None, 21 | ranges=None, 22 | n_channels=14, 23 | dtype=np.float32, 24 | ): 25 | super().__init__( 26 | clip_values, 27 | quality=quality, 28 | channels_first=channels_first, 29 | apply_fit=apply_fit, 30 | apply_predict=apply_predict, 31 | ) 32 | if mins is None: 33 | mins = (0.0,) * n_channels # identity operation 34 | if len(mins) != n_channels: 35 | raise ValueError(f"mins must have {n_channels} values, one per channel") 36 | self.mins = np.array(mins, dtype=dtype) 37 | 38 | if ranges is None: 39 | ranges = (1.0,) * n_channels # identity operation 40 | if len(ranges) != n_channels: 41 | raise ValueError(f"ranges must have {n_channels} values, one per channel") 42 | self.ranges = np.array(ranges, dtype=dtype) 43 | 44 | def __call__(self, x, y=None): 45 | x = (x - self.mins) / self.ranges 46 | x = np.transpose(x, (0, 3, 1, 2)) # Change from nhwc to nchw 47 | x = np.expand_dims(x, axis=-1) 48 | x, _ = super().__call__(x) 49 | x = np.transpose(x[..., 0], (0, 2, 3, 1)) 50 | x = x * self.ranges + self.mins 51 | return x, y 52 | -------------------------------------------------------------------------------- /armory/art_experimental/defences/jpeg_compression_normalized.py: -------------------------------------------------------------------------------- 1 | from art.defences.preprocessor import JpegCompression 2 | import numpy as np 3 | 4 | 5 | class JpegCompressionNormalized(JpegCompression): 6 | """ 7 | Unnormalize inputs that were normalized during preprocessing, 8 | process use ART JpegCompression, and renormalize 9 | """ 10 | 11 | def __init__( 12 | self, 13 | clip_values, 14 | quality=50, 15 | apply_fit=True, 16 | apply_predict=False, 17 | means=None, 18 | stds=None, 19 | dtype=np.float32, 20 | ): 21 | super().__init__( 22 | clip_values, 23 | quality=quality, 24 | apply_fit=apply_fit, 25 | apply_predict=apply_predict, 26 | ) 27 | if means is None: 28 | means = (0.0, 0.0, 0.0) # identity operation 29 | if len(means) != 3: 30 | raise ValueError("means must have 3 values, one per channel") 31 | self.means = np.array(means, dtype=dtype) 32 | 33 | if stds is None: 34 | stds = (1.0, 1.0, 1.0) # identity operation 35 | if len(stds) != 3: 36 | raise ValueError("stds must have 3 values, one per channel") 37 | self.stds = np.array(stds, dtype=dtype) 38 | 39 | def __call__(self, x, y=None): 40 | x = x * self.stds + self.means 41 | np.clip(x, self.clip_values[0], self.clip_values[1], x) 42 | x, _ = super().__call__(x) 43 | x = (x - self.means) / self.stds 44 | return x, y 45 | -------------------------------------------------------------------------------- /armory/art_experimental/poison_detection/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/armory/art_experimental/poison_detection/__init__.py -------------------------------------------------------------------------------- /armory/art_experimental/poison_detection/random_filter.py: -------------------------------------------------------------------------------- 1 | from art.defences.detector.poison import PoisonFilteringDefence 2 | import numpy as np 3 | 4 | from armory.logs import log 5 | 6 | 7 | class RandomFilterBaselineDefense(PoisonFilteringDefence): 8 | def __init__(self, classifier, x_train, y_train, **kwargs): 9 | """ 10 | Create a :class:`.RandomFilterBaselineDefense` object with the provided classifier. 11 | :param classifier: Model evaluated for poison. 12 | :param x_train: dataset used to train the classifier. 13 | :param y_train: labels used to train the classifier. 14 | """ 15 | super().__init__(classifier, x_train, y_train) 16 | self.n_data = len(y_train) 17 | 18 | def evaluate_defence(self, is_clean, **kwargs): 19 | raise NotImplementedError( 20 | "evaluate_defence() not implemented for RandomFilterBaselineDefense" 21 | ) 22 | 23 | def detect_poison(self, expected_pp_poison=None): 24 | """ 25 | Selects data at random to label as poison. 26 | :return: (report, is_clean_lst): 27 | where report is None (for future ART compatibility) 28 | where is_clean is a list, where a 1 at index i indicates that x_train[i] is clean, 29 | and a 0 indicates that x_train[i] is detected as poison. 30 | """ 31 | 32 | if expected_pp_poison is None: 33 | expected_pp_poison = 0.3 34 | log.info( 35 | "Setting expected_pp_poison to 0.3. This can be set under defense/kwargs in the config" 36 | ) 37 | 38 | if expected_pp_poison < 0 or expected_pp_poison > 1: 39 | raise ValueError( 40 | f"defense/kwargs/expected_pp_poison must be set between 0 and 1 in the config. Got {expected_pp_poison}" 41 | ) 42 | 43 | is_clean = np.random.choice( 44 | [0, 1], self.n_data, p=[expected_pp_poison, 1 - expected_pp_poison] 45 | ) 46 | 47 | return None, is_clean 48 | -------------------------------------------------------------------------------- /armory/attacks/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/armory/attacks/__init__.py -------------------------------------------------------------------------------- /armory/baseline_models/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Baseline models for use in the ARMORY system. 3 | """ 4 | from armory import delayed_imports 5 | -------------------------------------------------------------------------------- /armory/baseline_models/keras/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/armory/baseline_models/keras/__init__.py -------------------------------------------------------------------------------- /armory/baseline_models/keras/cifar.py: -------------------------------------------------------------------------------- 1 | """ 2 | CNN model for 32x32x3 image classification 3 | """ 4 | from typing import Optional 5 | 6 | from art.estimators.classification import KerasClassifier 7 | import tensorflow as tf 8 | from tensorflow.keras.layers import Conv2D, Dense, Flatten, MaxPooling2D 9 | from tensorflow.keras.models import Sequential 10 | 11 | tf.compat.v1.disable_eager_execution() 12 | 13 | 14 | def make_cifar_model(**kwargs) -> tf.keras.Model: 15 | """ 16 | This is a simple CNN for CIFAR-10 and does not achieve SotA performance 17 | """ 18 | model = Sequential() 19 | model.add( 20 | Conv2D( 21 | filters=4, 22 | kernel_size=(5, 5), 23 | strides=1, 24 | activation="relu", 25 | input_shape=(32, 32, 3), 26 | ) 27 | ) 28 | model.add(MaxPooling2D(pool_size=(2, 2))) 29 | model.add( 30 | Conv2D( 31 | filters=10, 32 | kernel_size=(5, 5), 33 | strides=1, 34 | activation="relu", 35 | input_shape=(23, 23, 4), 36 | ) 37 | ) 38 | model.add(MaxPooling2D(pool_size=(2, 2))) 39 | model.add(Flatten()) 40 | model.add(Dense(100, activation="relu")) 41 | model.add(Dense(10, activation="softmax")) 42 | 43 | model.compile( 44 | loss=tf.keras.losses.sparse_categorical_crossentropy, 45 | optimizer=tf.keras.optimizers.Adam(lr=0.003), 46 | metrics=["accuracy"], 47 | ) 48 | return model 49 | 50 | 51 | def get_art_model( 52 | model_kwargs: dict, wrapper_kwargs: dict, weights_path: Optional[str] = None 53 | ) -> KerasClassifier: 54 | model = make_cifar_model(**model_kwargs) 55 | if weights_path: 56 | model.load_weights(weights_path) 57 | 58 | wrapped_model = KerasClassifier(model, clip_values=(0.0, 1.0), **wrapper_kwargs) 59 | return wrapped_model 60 | -------------------------------------------------------------------------------- /armory/baseline_models/keras/inception_resnet_v2.py: -------------------------------------------------------------------------------- 1 | """ 2 | Inception_ResNet_v2 CNN model for 299x299x3 image classification 3 | """ 4 | from typing import Optional 5 | 6 | from art.estimators.classification import KerasClassifier 7 | import tensorflow as tf 8 | from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2 9 | from tensorflow.keras.layers import Lambda 10 | from tensorflow.keras.models import Model 11 | 12 | tf.compat.v1.disable_eager_execution() 13 | 14 | 15 | def get_art_model( 16 | model_kwargs: dict, wrapper_kwargs: dict, weights_path: Optional[str] = None 17 | ): 18 | input = tf.keras.Input(shape=(224, 224, 3)) 19 | 20 | # Preprocessing layers 21 | img_scaled_to_255 = Lambda(lambda image: image * 255)(input) 22 | img_resized = Lambda(lambda image: tf.image.resize(image, (299, 299)))( 23 | img_scaled_to_255 24 | ) 25 | # Model was trained with inputs normalized from -1 to 1 26 | img_normalized = Lambda(lambda image: (image / 127.5) - 1.0)(img_resized) 27 | 28 | inception_resnet_v2 = InceptionResNetV2( 29 | weights=None, input_tensor=img_normalized, **model_kwargs 30 | ) 31 | model = Model(inputs=input, outputs=inception_resnet_v2.output) 32 | if weights_path: 33 | model.load_weights(weights_path) 34 | 35 | wrapped_model = KerasClassifier(model, clip_values=(0.0, 1.0), **wrapper_kwargs) 36 | return wrapped_model 37 | -------------------------------------------------------------------------------- /armory/baseline_models/keras/mnist.py: -------------------------------------------------------------------------------- 1 | """ 2 | CNN model for 28x28x1 image classification 3 | """ 4 | from typing import Optional 5 | 6 | from art.estimators.classification import KerasClassifier 7 | import tensorflow as tf 8 | from tensorflow.keras.layers import Conv2D, Dense, Flatten, MaxPooling2D 9 | from tensorflow.keras.models import Sequential 10 | 11 | tf.compat.v1.disable_eager_execution() 12 | 13 | 14 | def make_mnist_model(**kwargs) -> tf.keras.Model: 15 | """ 16 | This is a simple CNN for MNIST and does not achieve SotA performance 17 | """ 18 | model = Sequential() 19 | model.add( 20 | Conv2D( 21 | filters=4, 22 | kernel_size=(5, 5), 23 | strides=1, 24 | activation="relu", 25 | input_shape=(28, 28, 1), 26 | ) 27 | ) 28 | model.add(MaxPooling2D(pool_size=(2, 2))) 29 | model.add( 30 | Conv2D( 31 | filters=10, 32 | kernel_size=(5, 5), 33 | strides=1, 34 | activation="relu", 35 | input_shape=(23, 23, 4), 36 | ) 37 | ) 38 | model.add(MaxPooling2D(pool_size=(2, 2))) 39 | model.add(Flatten()) 40 | model.add(Dense(100, activation="relu")) 41 | model.add(Dense(10, activation="softmax")) 42 | 43 | model.compile( 44 | loss=tf.keras.losses.sparse_categorical_crossentropy, 45 | optimizer=tf.keras.optimizers.Adam(lr=0.003), 46 | metrics=["accuracy"], 47 | ) 48 | return model 49 | 50 | 51 | def get_art_model( 52 | model_kwargs: dict, wrapper_kwargs: dict, weights_path: Optional[str] = None 53 | ): 54 | model = make_mnist_model(**model_kwargs) 55 | if weights_path: 56 | model.load_weights(weights_path) 57 | wrapped_model = KerasClassifier(model, clip_values=(0.0, 1.0), **wrapper_kwargs) 58 | return wrapped_model 59 | -------------------------------------------------------------------------------- /armory/baseline_models/keras/resnet50.py: -------------------------------------------------------------------------------- 1 | """ 2 | ResNet50 CNN model for 244x244x3 image classification 3 | """ 4 | from typing import Optional 5 | 6 | from art.estimators.classification import KerasClassifier 7 | import tensorflow as tf 8 | from tensorflow.keras.applications.resnet50 import ResNet50 9 | from tensorflow.keras.layers import Lambda 10 | from tensorflow.keras.models import Model 11 | 12 | tf.compat.v1.disable_eager_execution() 13 | 14 | 15 | IMAGENET_MEANS = [103.939, 116.779, 123.68] 16 | 17 | 18 | def get_art_model( 19 | model_kwargs: dict, wrapper_kwargs: dict, weights_path: Optional[str] = None 20 | ) -> KerasClassifier: 21 | """ 22 | resnet pretrained on Imagenet. The initial layers transform the input from canonical form to the expected input 23 | format for the DenseNet-121. 24 | """ 25 | input = tf.keras.Input(shape=(224, 224, 3)) 26 | 27 | # Preprocessing layers 28 | img_scaled_to_255 = Lambda(lambda image: image * 255)(input) 29 | # Reorder image channels i.e. img = img[..., ::-1] 30 | img_channel_reorder = Lambda(lambda image: tf.reverse(image, axis=[-1]))( 31 | img_scaled_to_255 32 | ) 33 | # Model was trained with inputs zero-centered on ImageNet mean 34 | img_normalized = Lambda(lambda image: image - IMAGENET_MEANS)(img_channel_reorder) 35 | 36 | resnet50 = ResNet50(weights=None, input_tensor=img_normalized, **model_kwargs) 37 | model = Model(inputs=input, outputs=resnet50.output) 38 | 39 | if weights_path: 40 | model.load_weights(weights_path) 41 | 42 | wrapped_model = KerasClassifier( 43 | model, 44 | clip_values=(0.0, 1.0), 45 | **wrapper_kwargs, 46 | ) 47 | return wrapped_model 48 | -------------------------------------------------------------------------------- /armory/baseline_models/model_configs/__init__.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | from armory.data.utils import maybe_download_weights_from_s3 4 | from armory.logs import log 5 | 6 | CONFIGS_DIR = Path(__file__).parent 7 | 8 | 9 | def get_path(filename) -> str: 10 | """ 11 | Get the absolute path of the provided config. Ordering priority is: 12 | 1) Check directly for provided filepath 13 | 2) Load from `model_configs` directory 14 | 3) Attempt to download from s3 as a weights file 15 | """ 16 | filename = Path(filename) 17 | if filename.is_file(): 18 | return str(filename) 19 | cfgs_path = CONFIGS_DIR / filename 20 | if cfgs_path.is_file(): 21 | return str(cfgs_path) 22 | 23 | return maybe_download_weights_from_s3(filename) 24 | -------------------------------------------------------------------------------- /armory/baseline_models/pytorch/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/armory/baseline_models/pytorch/__init__.py -------------------------------------------------------------------------------- /armory/baseline_models/pytorch/carla_goturn.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | from art.estimators.object_tracking import PyTorchGoturn 4 | import numpy as np 5 | import torch 6 | 7 | from armory.utils.external_repo import ExternalRepoImport 8 | 9 | # load amoudgl model and instantiate ART PyTorchGoTurn model 10 | with ExternalRepoImport( 11 | repo="amoudgl/pygoturn", 12 | experiment="carla_video_tracking_goturn_advtextures_defended.json", 13 | ): 14 | from pygoturn.src.model import GoNet 15 | 16 | DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") 17 | 18 | 19 | # NOTE: PyTorchGoturn expects numpy input, not torch.Tensor input 20 | def get_art_model( 21 | model_kwargs: dict, wrapper_kwargs: dict, weights_path: Optional[str] = None 22 | ) -> PyTorchGoturn: 23 | 24 | model = GoNet() 25 | 26 | if weights_path: 27 | checkpoint = torch.load(weights_path, map_location=lambda storage, loc: storage) 28 | model.load_state_dict(checkpoint["state_dict"]) 29 | model = model.to(DEVICE) 30 | 31 | wrapped_model = PyTorchGoturn( 32 | model=model, 33 | input_shape=( 34 | 3, 35 | 224, 36 | 224, 37 | ), # GoNet() uses this parameter but expects input to actually have shape (HW3) 38 | clip_values=(0.0, 1.0), 39 | channels_first=False, 40 | preprocessing=( 41 | np.array([0.485, 0.456, 0.406]), 42 | np.array([0.229, 0.224, 0.225]), 43 | ), # ImageNet means/stds 44 | **wrapper_kwargs, 45 | ) 46 | 47 | return wrapped_model 48 | -------------------------------------------------------------------------------- /armory/baseline_models/pytorch/carla_single_modality_object_detection_frcnn.py: -------------------------------------------------------------------------------- 1 | """ 2 | PyTorch Faster-RCNN Resnet50-FPN object detection model 3 | """ 4 | from typing import Optional 5 | 6 | from art.estimators.object_detection import PyTorchFasterRCNN 7 | import torch 8 | from torchvision import models 9 | 10 | DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") 11 | 12 | 13 | # NOTE: PyTorchFasterRCNN expects numpy input, not torch.Tensor input 14 | def get_art_model( 15 | model_kwargs: dict, wrapper_kwargs: dict, weights_path: Optional[str] = None 16 | ) -> PyTorchFasterRCNN: 17 | 18 | if weights_path: 19 | assert not model_kwargs.get("pretrained", False), ( 20 | "model trained on CARLA data should not use COCO-pretrained weights, set " 21 | "model_kwargs['pretrained'] to False." 22 | ) 23 | 24 | model = models.detection.fasterrcnn_resnet50_fpn(**model_kwargs) 25 | model.to(DEVICE) 26 | 27 | if weights_path: 28 | checkpoint = torch.load(weights_path, map_location=DEVICE) 29 | assert ( 30 | "roi_heads.box_predictor.cls_score.bias" in checkpoint 31 | ), "invalid checkpoint for current model, layers do no match." 32 | assert ( 33 | model.roi_heads.box_predictor.cls_score.out_features 34 | == checkpoint["roi_heads.box_predictor.cls_score.bias"].shape[0] 35 | ), ( 36 | f"provided model checkpoint does not match supplied model_kwargs['num_classes']: " 37 | f"{model_kwargs['num_classes']} != {checkpoint['roi_heads.box_predictor.cls_score.bias'].shape[0]}" 38 | ) 39 | model.load_state_dict(checkpoint) 40 | 41 | wrapped_model = PyTorchFasterRCNN( 42 | model, 43 | clip_values=(0.0, 1.0), 44 | channels_first=False, 45 | **wrapper_kwargs, 46 | ) 47 | return wrapped_model 48 | -------------------------------------------------------------------------------- /armory/baseline_models/pytorch/cifar.py: -------------------------------------------------------------------------------- 1 | """ 2 | CNN model for 32x32x3 image classification 3 | """ 4 | from typing import Optional 5 | 6 | from art.estimators.classification import PyTorchClassifier 7 | import torch 8 | import torch.nn as nn 9 | import torch.nn.functional as F 10 | 11 | DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") 12 | 13 | 14 | class Net(nn.Module): 15 | """ 16 | This is a simple CNN for CIFAR-10 and does not achieve SotA performance 17 | """ 18 | 19 | def __init__(self) -> None: 20 | super(Net, self).__init__() 21 | self.conv1 = nn.Conv2d(3, 4, 5, 1) 22 | self.conv2 = nn.Conv2d(4, 10, 5, 1) 23 | self.fc1 = nn.Linear(250, 100) 24 | self.fc2 = nn.Linear(100, 10) 25 | 26 | def forward(self, x: torch.Tensor) -> torch.Tensor: 27 | x = x.permute(0, 3, 1, 2) # from NHWC to NCHW 28 | x = self.conv1(x) 29 | x = F.relu(x) 30 | x = F.max_pool2d(x, 2) 31 | x = self.conv2(x) 32 | x = F.relu(x) 33 | x = F.max_pool2d(x, 2) 34 | x = torch.flatten(x, 1) 35 | x = self.fc1(x) 36 | x = F.relu(x) 37 | x = self.fc2(x) 38 | output = F.log_softmax(x, dim=1) 39 | return output 40 | 41 | 42 | def make_cifar_model(**kwargs) -> Net: 43 | return Net() 44 | 45 | 46 | def get_art_model( 47 | model_kwargs: dict, wrapper_kwargs: dict, weights_path: Optional[str] = None 48 | ) -> PyTorchClassifier: 49 | model = make_cifar_model(**model_kwargs) 50 | model.to(DEVICE) 51 | 52 | if weights_path: 53 | checkpoint = torch.load(weights_path, map_location=DEVICE) 54 | model.load_state_dict(checkpoint) 55 | 56 | wrapped_model = PyTorchClassifier( 57 | model, 58 | loss=nn.CrossEntropyLoss(), 59 | optimizer=torch.optim.Adam(model.parameters(), lr=0.003), 60 | input_shape=(32, 32, 3), 61 | channels_first=False, 62 | nb_classes=10, 63 | clip_values=(0.0, 1.0), 64 | **wrapper_kwargs, 65 | ) 66 | return wrapped_model 67 | -------------------------------------------------------------------------------- /armory/baseline_models/pytorch/deep_speech.py: -------------------------------------------------------------------------------- 1 | """ 2 | Automatic speech recognition model 3 | 4 | Model contributed by: MITRE Corporation 5 | """ 6 | 7 | from typing import Optional 8 | 9 | from art.estimators.speech_recognition import PyTorchDeepSpeech 10 | 11 | from armory.utils.external_repo import ExternalRepoImport 12 | 13 | # Test for external repo at import time to fail fast 14 | with ExternalRepoImport( 15 | repo="SeanNaren/deepspeech.pytorch@V3.0", 16 | experiment="librispeech_asr_snr_undefended.json", 17 | ): 18 | from deepspeech_pytorch.model import DeepSpeech # noqa: F401 19 | 20 | 21 | def get_art_model( 22 | model_kwargs: dict, wrapper_kwargs: dict, weights_path: Optional[str] = None 23 | ) -> PyTorchDeepSpeech: 24 | return PyTorchDeepSpeech(**wrapper_kwargs) 25 | -------------------------------------------------------------------------------- /armory/baseline_models/pytorch/mnist.py: -------------------------------------------------------------------------------- 1 | """ 2 | CNN model for 28x28x1 image classification 3 | """ 4 | from typing import Optional 5 | 6 | from art.estimators.classification import PyTorchClassifier 7 | import torch 8 | import torch.nn as nn 9 | import torch.nn.functional as F 10 | 11 | DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") 12 | 13 | 14 | class Net(nn.Module): 15 | """ 16 | This is a simple CNN for MNIST and does not achieve SotA performance 17 | """ 18 | 19 | def __init__(self) -> None: 20 | super(Net, self).__init__() 21 | self.conv1 = nn.Conv2d(1, 4, 5, 1) 22 | self.conv2 = nn.Conv2d(4, 10, 5, 1) 23 | self.fc1 = nn.Linear(160, 100) 24 | self.fc2 = nn.Linear(100, 10) 25 | 26 | def forward(self, x: torch.Tensor) -> torch.Tensor: 27 | x = x.permute(0, 3, 1, 2) # from NHWC to NCHW 28 | x = self.conv1(x) 29 | x = F.relu(x) 30 | x = F.max_pool2d(x, 2) 31 | x = self.conv2(x) 32 | x = F.relu(x) 33 | x = F.max_pool2d(x, 2) 34 | x = torch.flatten(x, 1) 35 | x = self.fc1(x) 36 | x = F.relu(x) 37 | x = self.fc2(x) 38 | output = F.log_softmax(x, dim=1) 39 | return output 40 | 41 | 42 | def make_mnist_model(**kwargs) -> Net: 43 | return Net() 44 | 45 | 46 | def get_art_model( 47 | model_kwargs: dict, wrapper_kwargs: dict, weights_path: Optional[str] = None 48 | ) -> PyTorchClassifier: 49 | model = make_mnist_model(**model_kwargs) 50 | model.to(DEVICE) 51 | 52 | if weights_path: 53 | checkpoint = torch.load(weights_path, map_location=DEVICE) 54 | model.load_state_dict(checkpoint) 55 | 56 | wrapped_model = PyTorchClassifier( 57 | model, 58 | loss=nn.CrossEntropyLoss(), 59 | optimizer=torch.optim.Adam(model.parameters(), lr=0.003), 60 | input_shape=(1, 28, 28), 61 | nb_classes=10, 62 | clip_values=(0.0, 1.0), 63 | **wrapper_kwargs, 64 | ) 65 | return wrapped_model 66 | -------------------------------------------------------------------------------- /armory/baseline_models/pytorch/resnet50.py: -------------------------------------------------------------------------------- 1 | """ 2 | ResNet50 CNN model for 244x244x3 image classification 3 | """ 4 | from typing import Optional 5 | 6 | from art.estimators.classification import PyTorchClassifier 7 | import torch 8 | from torchvision import models 9 | 10 | DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") 11 | 12 | 13 | class OuterModel(torch.nn.Module): 14 | def __init__( 15 | self, 16 | weights_path: Optional[str], 17 | **model_kwargs, 18 | ): 19 | super().__init__() 20 | self.inner_model = models.resnet50(**model_kwargs) 21 | self.inner_model.to(DEVICE) 22 | 23 | if weights_path: 24 | checkpoint = torch.load(weights_path, map_location=DEVICE) 25 | self.inner_model.load_state_dict(checkpoint) 26 | 27 | self.imagenet_means = torch.tensor( 28 | [0.485, 0.456, 0.406], dtype=torch.float32, device=DEVICE 29 | ) 30 | self.imagenet_stdev = torch.tensor( 31 | [0.229, 0.224, 0.225], dtype=torch.float32, device=DEVICE 32 | ) 33 | 34 | def forward(self, x: torch.Tensor) -> torch.Tensor: 35 | x_norm = ((x - self.imagenet_means) / self.imagenet_stdev).permute(0, 3, 1, 2) 36 | output = self.inner_model(x_norm) 37 | 38 | return output 39 | 40 | 41 | # NOTE: PyTorchClassifier expects numpy input, not torch.Tensor input 42 | def get_art_model( 43 | model_kwargs: dict, wrapper_kwargs: dict, weights_path: Optional[str] = None 44 | ) -> PyTorchClassifier: 45 | 46 | model = OuterModel(weights_path=weights_path, **model_kwargs) 47 | 48 | wrapped_model = PyTorchClassifier( 49 | model, 50 | loss=torch.nn.CrossEntropyLoss(), 51 | optimizer=torch.optim.Adam(model.parameters(), lr=0.003), 52 | input_shape=(224, 224, 3), 53 | channels_first=False, 54 | **wrapper_kwargs, 55 | clip_values=(0.0, 1.0), 56 | ) 57 | return wrapped_model 58 | -------------------------------------------------------------------------------- /armory/baseline_models/pytorch/yolov3.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | from art.estimators.object_detection import PyTorchYolo 4 | from pytorchyolo.models import load_model 5 | from pytorchyolo.utils.loss import compute_loss 6 | import torch 7 | 8 | from armory.baseline_models import model_configs 9 | 10 | DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") 11 | 12 | 13 | class Yolo(torch.nn.Module): 14 | def __init__(self, model): 15 | super().__init__() 16 | self.model = model 17 | 18 | def forward(self, x, targets=None): 19 | if self.training: 20 | outputs = self.model(x) 21 | loss, _ = compute_loss(outputs, targets, self.model) 22 | loss_components_dict = {"loss_total": loss} 23 | return loss_components_dict 24 | else: 25 | out = self.model(x) 26 | return out 27 | 28 | 29 | def get_art_model( 30 | model_kwargs: dict, wrapper_kwargs: dict, weights_path: Optional[str] = None 31 | ) -> PyTorchYolo: 32 | 33 | model_kwargs["model_path"] = model_configs.get_path(model_kwargs["model_path"]) 34 | model = load_model(weights_path=weights_path, **model_kwargs) 35 | model_wrapper = Yolo(model) 36 | 37 | params = [p for p in model_wrapper.parameters() if p.requires_grad] 38 | optimizer = torch.optim.SGD(params, lr=0.01, momentum=0.9, nesterov=True) 39 | 40 | detector = PyTorchYolo( 41 | model=model_wrapper, 42 | device_type=DEVICE, 43 | input_shape=(416, 416, 3), 44 | optimizer=optimizer, 45 | clip_values=(0, 1), 46 | channels_first=False, 47 | attack_losses=("loss_total",), 48 | **wrapper_kwargs, 49 | ) 50 | return detector 51 | -------------------------------------------------------------------------------- /armory/baseline_models/tf_eager/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/armory/baseline_models/tf_eager/__init__.py -------------------------------------------------------------------------------- /armory/baseline_models/tf_graph/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/armory/baseline_models/tf_graph/__init__.py -------------------------------------------------------------------------------- /armory/baseline_models/tf_graph/mnist.py: -------------------------------------------------------------------------------- 1 | """ 2 | CNN model for 28x28x1 image classification 3 | """ 4 | import tarfile 5 | 6 | from art.estimators.classification import TFClassifier 7 | import tensorflow.compat.v1 as tf 8 | 9 | from armory import paths 10 | 11 | tf.disable_eager_execution() 12 | # TODO Update when ART is fixed with default_graph thing 13 | 14 | 15 | def get_art_model(model_kwargs, wrapper_kwargs, weights_path=None): 16 | input_ph = tf.placeholder(tf.float32, shape=[None, 28, 28, 1]) 17 | labels_ph = tf.placeholder(tf.int32, shape=[None, 10]) 18 | training_ph = tf.placeholder(tf.bool, shape=()) 19 | 20 | x = tf.layers.conv2d(input_ph, filters=4, kernel_size=(5, 5), activation=tf.nn.relu) 21 | x = tf.layers.max_pooling2d(x, 2, 2) 22 | x = tf.layers.conv2d(x, filters=10, kernel_size=(5, 5), activation=tf.nn.relu) 23 | x = tf.layers.max_pooling2d(x, 2, 2) 24 | x = tf.layers.flatten(x) 25 | x = tf.layers.dense(x, 100, activation=tf.nn.relu) 26 | logits = tf.layers.dense(x, 10) 27 | 28 | loss = tf.reduce_mean( 29 | tf.losses.softmax_cross_entropy(logits=logits, onehot_labels=labels_ph) 30 | ) 31 | optimizer = tf.train.AdamOptimizer(learning_rate=0.01) 32 | train_op = optimizer.minimize(loss) 33 | sess = tf.Session() 34 | sess.run(tf.global_variables_initializer()) 35 | 36 | if weights_path: 37 | # Load Model using preferred save/restore method 38 | tar = tarfile.open(weights_path) 39 | tar.extractall(path=paths.runtime_paths().saved_model_dir) 40 | tar.close() 41 | # Restore variables... 42 | 43 | wrapped_model = TFClassifier( 44 | clip_values=(0.0, 1.0), 45 | input_ph=input_ph, 46 | output=logits, 47 | labels_ph=labels_ph, 48 | train=train_op, 49 | loss=loss, 50 | learning=training_ph, 51 | sess=sess, 52 | **wrapper_kwargs 53 | ) 54 | 55 | return wrapped_model 56 | -------------------------------------------------------------------------------- /armory/baseline_models/tf_graph/mscoco_frcnn.py: -------------------------------------------------------------------------------- 1 | """ 2 | Implementing faster_rcnn_resnet50_coco from 3 | TensorFlow1 Detection Model Zoo 4 | (https://github.com/tensorflow/models/blob/master/research/ 5 | object_detection/g3doc/tf1_detection_zoo.md) 6 | """ 7 | 8 | from art.estimators.object_detection.tensorflow_faster_rcnn import TensorFlowFasterRCNN 9 | import tensorflow as tf 10 | 11 | tf.compat.v1.disable_eager_execution() 12 | 13 | 14 | def get_art_model(model_kwargs, wrapper_kwargs, weights_file=None): 15 | # APRICOT inputs should have shape (1, None, None, 3) while DAPRICOT inputs have shape 16 | # (3, None, None, 3) 17 | images = tf.placeholder( 18 | tf.float32, shape=(model_kwargs.get("batch_size", 1), None, None, 3) 19 | ) 20 | model = TensorFlowFasterRCNN( 21 | images, 22 | model=None, 23 | filename="faster_rcnn_resnet50_coco_2018_01_28", 24 | url="http://download.tensorflow.org/models/object_detection/faster_rcnn_resnet50_coco_2018_01_28.tar.gz", 25 | sess=None, 26 | is_training=False, 27 | clip_values=(0, 1), 28 | channels_first=False, 29 | preprocessing_defences=None, 30 | postprocessing_defences=None, 31 | attack_losses=( 32 | "Loss/RPNLoss/localization_loss", 33 | "Loss/RPNLoss/objectness_loss", 34 | "Loss/BoxClassifierLoss/localization_loss", 35 | "Loss/BoxClassifierLoss/classification_loss", 36 | ), 37 | ) 38 | 39 | return model 40 | -------------------------------------------------------------------------------- /armory/cli/__init__.py: -------------------------------------------------------------------------------- 1 | from armory.cli.tools.generate_shapes import generate_shapes 2 | from armory.cli.tools.log_current_branch import log_current_branch 3 | from armory.cli.tools.plot_mAP_by_giou_with_patch_cli import ( 4 | plot_mAP_by_giou_with_patch_cli, 5 | ) 6 | from armory.cli.tools.rgb_depth_convert import rgb_depth_convert 7 | 8 | CLI_COMMANDS = { 9 | "get-branch": (log_current_branch, "Log the current git branch of armory"), 10 | "rgb-convert": (rgb_depth_convert, "Converts rgb depth images to another format"), 11 | "shape-gen": (generate_shapes, "Generate shapes as png files"), 12 | "plot-mAP-by-giou": ( 13 | plot_mAP_by_giou_with_patch_cli, 14 | "Visualize the output of the metric 'object_detection_AP_per_class_by_giou_from_patch.'", 15 | ), 16 | } 17 | -------------------------------------------------------------------------------- /armory/cli/tools/log_current_branch.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from armory import __file__ as armory_install_path 4 | from armory.logs import log 5 | 6 | 7 | def log_current_branch(command_args, prog, description): 8 | """Log the current git branch of armory. Works independent of the current working directory.""" 9 | try: 10 | from armory.__about__ import __version__ 11 | 12 | log.info(f"Armory version: {__version__}") 13 | except ModuleNotFoundError: 14 | log.info("Unable to extract armory version from __about__.py") 15 | log.info(f"Armory install path: {os.path.dirname(armory_install_path)}") 16 | try: 17 | import git 18 | 19 | repo = git.Repo( 20 | os.path.dirname(os.path.realpath(__file__)), search_parent_directories=True 21 | ) 22 | log.info(f"Git branch: {repo.active_branch}") 23 | log.info( 24 | f"Git commit ({repo.is_dirty() and 'dirty' or 'clean'}): {repo.commit().hexsha}" 25 | ) 26 | except ImportError: 27 | log.info( 28 | "Unable to import gitpython, cannot determine git branch. Please install GitPython." 29 | ) 30 | except git.exc.InvalidGitRepositoryError: 31 | log.info("Unable to find .git directory, cannot determine git branch") 32 | except git.exc.GitCommandError: 33 | log.info("Unable to determine git branch") 34 | except Exception as e: 35 | log.error(f"Unexpected error: {e}") 36 | -------------------------------------------------------------------------------- /armory/cli/tools/plot_mAP_by_giou_with_patch_cli.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from pathlib import Path 3 | 4 | from armory.cli.tools.utils import _debug 5 | from armory.postprocessing.plot_patch_aware_carla_metric import ( 6 | plot_mAP_by_giou_with_patch, 7 | ) 8 | 9 | 10 | def plot_mAP_by_giou_with_patch_cli(command_args, prog, description): 11 | 12 | parser = argparse.ArgumentParser( 13 | prog=prog, 14 | description=description, 15 | formatter_class=argparse.RawTextHelpFormatter, 16 | ) 17 | 18 | parser.add_argument( 19 | "input", 20 | type=Path, 21 | help="Path to json. Must have 'results.adversarial_object_detection_AP_per_class_by_giou_from_patch' key.", 22 | ) 23 | parser.add_argument( 24 | "--flavors", 25 | type=str, 26 | nargs="+", 27 | default=None, 28 | choices=["cumulative_by_max_giou", "cumulative_by_min_giou", "histogram_left"], 29 | help="Flavors of mAP by giou to plot. Subset of ['cumulative_by_max_giou', 'cumulative_by_min_giou', 'histogram_left'] or None to plot all.", 30 | ) 31 | parser.add_argument("--headless", action="store_true", help="Don't show the plot") 32 | parser.add_argument( 33 | "--output", type=Path, default=None, help="Path to save the plot" 34 | ) 35 | parser.add_argument( 36 | "--exclude-classes", 37 | action="store_true", 38 | help="Don't include subplot for each class.", 39 | ) 40 | _debug(parser) 41 | 42 | args = parser.parse_args(command_args) 43 | plot_mAP_by_giou_with_patch( 44 | args.input, 45 | flavors=args.flavors, 46 | show=not args.headless, 47 | output_filepath=args.output, 48 | include_classes=not args.exclude_classes, 49 | ) 50 | -------------------------------------------------------------------------------- /armory/cli/tools/utils.py: -------------------------------------------------------------------------------- 1 | # Helper from armory.__main__. Duplicated due to circular import. 2 | def _debug(parser): 3 | parser.add_argument( 4 | "-d", 5 | "--debug", 6 | action="store_true", 7 | help="synonym for --log-level=armory:debug", 8 | ) 9 | parser.add_argument( 10 | "--log-level", 11 | action="append", 12 | help="set log level per-module (ex. art:debug) can be used mulitple times", 13 | ) 14 | -------------------------------------------------------------------------------- /armory/data/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Data methods in ARMORY 3 | """ 4 | from armory import delayed_imports 5 | -------------------------------------------------------------------------------- /armory/data/adversarial/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/armory/data/adversarial/__init__.py -------------------------------------------------------------------------------- /armory/data/adversarial/pandas_proxy.py: -------------------------------------------------------------------------------- 1 | """ 2 | CSV reading fill-in to not require pandas dependency. 3 | 4 | A common pattern in adversarial datasets is: 5 | import pandas 6 | pandas.read_csv(path, header=None).to_numpy().astype("float32") 7 | 8 | This file is meant to replace that as follows: 9 | import pandas_proxy 10 | pandas_proxy.read_csv_to_numpy_float32(path, header=None) 11 | """ 12 | 13 | import csv 14 | 15 | import numpy as np 16 | 17 | 18 | def read_csv_to_numpy_float32(path, header=None) -> "np.array": 19 | if header is not None: 20 | raise NotImplementedError("non-None header not supported") 21 | 22 | rows = [] 23 | with open(path, newline="") as f: 24 | reader = csv.reader(f) 25 | for row in reader: 26 | rows.append(tuple((float(x) for x in row))) 27 | return np.array(rows, dtype=np.float32) 28 | -------------------------------------------------------------------------------- /armory/data/cached_s3_checksums/apricot_dev.txt: -------------------------------------------------------------------------------- 1 | armory-public-data adversarial-datasets/cached/apricot_dev_adversarial_1.0.2_cached.tar.gz 591005420 8a24c33dbb0e8cd9ec061c7acada47564405ea0f63a5adfcce2d827ab767d82c 2 | -------------------------------------------------------------------------------- /armory/data/cached_s3_checksums/apricot_test.txt: -------------------------------------------------------------------------------- 1 | armory-public-data adversarial-datasets/cached/apricot_test_adversarial_1.0.0_cached.tar.gz 3636811472 b9bf36a24a407216d71805cb3ee7c18fe295ffad7275781dc655c45bbc9a07ad 2 | -------------------------------------------------------------------------------- /armory/data/cached_s3_checksums/carla_mot_dev.txt: -------------------------------------------------------------------------------- 1 | armory-public-data carla/carla_mot_dev_cached_1.0.1.tar.gz 678720178 3c800b0289a84fea3f11ffa9b5f4cee105cae644223a23532e1b7351560869ed -------------------------------------------------------------------------------- /armory/data/cached_s3_checksums/carla_mot_test.txt: -------------------------------------------------------------------------------- 1 | armory-public-data carla/carla_mot_test_cached_1.0.1.tar.gz 327687101 7d9e3f3cfc59c138727a976fbaeb6209909ba764ef82c5093d1bfbe360529ea0 2 | -------------------------------------------------------------------------------- /armory/data/cached_s3_checksums/carla_obj_det_dev.txt: -------------------------------------------------------------------------------- 1 | armory-public-data carla/carla_od_dev_cached_2.0.0.tar.gz 64152561 3b4a9a0e8c2c4600de948b8d8ba5d55c6449b15449c9c0b70f7f8936e9d3f1c9 -------------------------------------------------------------------------------- /armory/data/cached_s3_checksums/carla_obj_det_test.txt: -------------------------------------------------------------------------------- 1 | armory-public-data carla/carla_od_test_cached_2.0.0.tar.gz 43171551 1f2c49870d5f9e545a9a311d4155f3c83b7c995b449df54aaa108aa9b575bf5b 2 | -------------------------------------------------------------------------------- /armory/data/cached_s3_checksums/carla_obj_det_train.txt: -------------------------------------------------------------------------------- 1 | armory-public-data carla/carla_od_train_val_cached_2.0.0.tar.gz 9748452959 48a52a388a314c294505e8ffd75adf79dcbe756fbac236b98c024aeed013f380 2 | -------------------------------------------------------------------------------- /armory/data/cached_s3_checksums/carla_over_obj_det_dev.txt: -------------------------------------------------------------------------------- 1 | armory-public-data carla/carla_over_od_dev_cached_2.0.1.tar.gz 62773711 2c0ef1c43fbb6235ed10a41ff1c70dfa940ff40d91a94591c716d86b458bcedb -------------------------------------------------------------------------------- /armory/data/cached_s3_checksums/carla_over_obj_det_test.txt: -------------------------------------------------------------------------------- 1 | armory-public-data carla/carla_over_od_test_cached_2.0.0.tar.gz 148538130 fd498cb8c8c005dd5c70a0f9914d92b59aee273eee1a9005f4fcdf44173cd0a9 -------------------------------------------------------------------------------- /armory/data/cached_s3_checksums/carla_over_obj_det_train.txt: -------------------------------------------------------------------------------- 1 | armory-public-data carla/carla_over_od_train_val_cached_1.0.0.tar.gz 14396332859 bc62065e5de5166b4e88e05e8a2e1378e57c9a755afa65bf4c25b17b4bd555d6 -------------------------------------------------------------------------------- /armory/data/cached_s3_checksums/carla_video_tracking_dev.txt: -------------------------------------------------------------------------------- 1 | armory-public-data carla/carla_video_tracking_dev_cached_2.0.0.tar.gz 1281620951 5450ddcb0ca75aee7f1e87250af274c3cdd318d6e1932598fd14098fe4077b90 -------------------------------------------------------------------------------- /armory/data/cached_s3_checksums/carla_video_tracking_test.txt: -------------------------------------------------------------------------------- 1 | armory-public-data carla/carla_video_tracking_test_cached_2.0.0.tar.gz 388221115 df6a6610be94e167623fa069770a14424744145d349aba8ddb9736a49aa1848b 2 | -------------------------------------------------------------------------------- /armory/data/cached_s3_checksums/cifar10.txt: -------------------------------------------------------------------------------- 1 | armory-public-data cifar10/cifar10_3.0.2.tar.gz 135099033 ac10cd15bf22d55e52c482c990c472c6c33403f79f029676cf3f6d23fd899ee7 2 | -------------------------------------------------------------------------------- /armory/data/cached_s3_checksums/cifar100.txt: -------------------------------------------------------------------------------- 1 | armory-public-data cifar100/cifar100_3.0.2.tar.gz 133384060 35d669af5ccb78fb00e9f0d623d61cacabccb3c864872a06d67f2cbf4c902e7b 2 | -------------------------------------------------------------------------------- /armory/data/cached_s3_checksums/coco.txt: -------------------------------------------------------------------------------- 1 | armory-public-data coco/coco_2017_1.1.0.tar.gz 26637020232 371cf143ba73767f6a5bdc7f39d9d7b5d61d59125e94cc2957de29430c727015 2 | -------------------------------------------------------------------------------- /armory/data/cached_s3_checksums/dapricot_dev.txt: -------------------------------------------------------------------------------- 1 | armory-public-data adversarial-datasets/cached/dapricot_dev_adversarial_1.0.1_cached.tar.gz 76511735 58763b1bdf188a5a63e09fffb504f425d9308ea419b0c2402cbfae49c0d62dd5 2 | -------------------------------------------------------------------------------- /armory/data/cached_s3_checksums/dapricot_test.txt: -------------------------------------------------------------------------------- 1 | armory-public-data adversarial-datasets/cached/dapricot_test_adversarial_1.0.0_cached.tar.gz 289432599 608d5b3b37fea7c2f2716cc92d7f227ec93ee222ed5006a677c930d221464666 2 | -------------------------------------------------------------------------------- /armory/data/cached_s3_checksums/digit.txt: -------------------------------------------------------------------------------- 1 | armory-public-data digit/digit.tar.gz 8354724 8d3e1767e57f788469f3c8be82e4b3b3083d9d470bac3a9b87cc728cea5a9250 -------------------------------------------------------------------------------- /armory/data/cached_s3_checksums/german_traffic_sign.txt: -------------------------------------------------------------------------------- 1 | armory-public-data german-traffic-sign/german_traffic_sign_cached.tar.gz 373406307 d468e70513b420cafb8bf21d8ab430c80ba97ad384b2148b4340df43b54e05ed -------------------------------------------------------------------------------- /armory/data/cached_s3_checksums/gtsrb_bh_poison_micronnet.txt: -------------------------------------------------------------------------------- 1 | armory-public-data adversarial-datasets/cached/gtsrb_bh_poison_micronnet_1.0.0.tar.gz 27709096 bf670ee0f3a7b615ad1efdc22b452f993ce411a2effa8f23df545dbbdef00f02 2 | -------------------------------------------------------------------------------- /armory/data/cached_s3_checksums/imagenet_adversarial.txt: -------------------------------------------------------------------------------- 1 | armory-public-data adversarial-datasets/cached/imagenet_adversarial_1.1.0_cached.tar.gz 275704081 a4b618e850103cbd34fb5d7f714b7b16f257087648b4e9df66f02412c4f8154f 2 | -------------------------------------------------------------------------------- /armory/data/cached_s3_checksums/imagenette.txt: -------------------------------------------------------------------------------- 1 | armory-public-data imagenette/imagenette.tar.gz 1556067900 94e712c386a00bdaa282f584b2eba959ef4100dd585afd2b2c14a1fc47e94644 -------------------------------------------------------------------------------- /armory/data/cached_s3_checksums/librispeech.txt: -------------------------------------------------------------------------------- 1 | armory-public-data librispeech/librispeech_1.1.0_cached.tar.gz 12880125529 59f609ad1657a67cebb45794f65e1f41ae15f4f0414b40022441d7ce5997f89e 2 | -------------------------------------------------------------------------------- /armory/data/cached_s3_checksums/librispeech_adversarial.txt: -------------------------------------------------------------------------------- 1 | armory-public-data adversarial-datasets/cached/librispeech_adversarial_1.1.0_cached.tar.gz 19410182 414b474b85fd7017e3354dcef887824068c543ecb8c79bd49d17ebfb28c62803 2 | -------------------------------------------------------------------------------- /armory/data/cached_s3_checksums/librispeech_dev_clean_split.txt: -------------------------------------------------------------------------------- 1 | armory-public-data librispeech/librispeech_dev_clean_split.tar.gz 594065047 56604e5f8a4376f6cdf2f4df7c56c3b2623ad6ce95eeebd74c6fdf5bcc63b687 2 | -------------------------------------------------------------------------------- /armory/data/cached_s3_checksums/librispeech_full.txt: -------------------------------------------------------------------------------- 1 | armory-public-data librispeech/librispeech_full_1.1.0_cached.tar.gz 107515352340 9475acfa652fa3151edafb43d32cd2580dbba71c602ff4c8fe1b6aa86ee62124 -------------------------------------------------------------------------------- /armory/data/cached_s3_checksums/minicoco.txt: -------------------------------------------------------------------------------- 1 | armory-public-data minicoco/minicoco_1.0.0.tar.gz 1901090523 8e5566f4714510b81ba014e00a462f32f63921ce4e2ab966d7e19e29a44eba24 2 | -------------------------------------------------------------------------------- /armory/data/cached_s3_checksums/mnist.txt: -------------------------------------------------------------------------------- 1 | armory-public-data mnist/mnist_3.0.1.tar.gz 16945059 a70303ee16328af3d1957645d8df6ec8846bea3651c6245075286614f6bf6c60 2 | -------------------------------------------------------------------------------- /armory/data/cached_s3_checksums/resisc10_poison.txt: -------------------------------------------------------------------------------- 1 | armory-public-data resisc45/resisc10_poison_cached_1.1.0.tar.gz 164173250 2b90b6cfb602cdbbb1ca8b59b9e47b9240541b487ad34cc04d5547315b654b38 2 | -------------------------------------------------------------------------------- /armory/data/cached_s3_checksums/resisc45_densenet121_univpatch_and_univperturbation_adversarial224x224.txt: -------------------------------------------------------------------------------- 1 | armory-public-data adversarial-datasets/cached/resisc45_densenet121_univpatch_and_univperturbation_adversarial224x224_cached_1.0.2.tar.gz 67037870 3cd66bb407fa22d24886da9b5787598d80b4a27b208c2ee7fa6f6cff422a543c -------------------------------------------------------------------------------- /armory/data/cached_s3_checksums/resisc45_split.txt: -------------------------------------------------------------------------------- 1 | armory-public-data resisc45/resisc45_split.tar.gz 409098175 417b54e0933f9a431c26f6d4925db84375663496fab6a97ed876a159d45396ae -------------------------------------------------------------------------------- /armory/data/cached_s3_checksums/so2sat.txt: -------------------------------------------------------------------------------- 1 | armory-public-data so2sat/so2sat_2.1.0_cached.tar.gz 15938827741 aa2f7d4686f03aa86aaf25749982c695bb5598fff3432aee0c569056246e6d92 2 | -------------------------------------------------------------------------------- /armory/data/cached_s3_checksums/speech_commands.txt: -------------------------------------------------------------------------------- 1 | armory-public-data speech_commands/speech_commands_0.0.2.tar.gz 2628293890 2b01805ab1dcf9d3bfa02266b90620d0f0d0a3b484b078813067be8f61ad9e82 2 | -------------------------------------------------------------------------------- /armory/data/cached_s3_checksums/ucf101.txt: -------------------------------------------------------------------------------- 1 | armory-public-data ucf101/ucf101.tar.gz 7932715061 a16ad9dff646f5645a00a93d79026aeeb5e7ec64a58bf12eebadf7a75ba772be -------------------------------------------------------------------------------- /armory/data/cached_s3_checksums/ucf101_clean.txt: -------------------------------------------------------------------------------- 1 | armory-public-data ucf101/ucf101_clean.tar.gz 31827610914 aaa9945ca7dcf8c6f999e14d036429e12caf77ed6986e9f58b4338eeb3698fbe 2 | -------------------------------------------------------------------------------- /armory/data/cached_s3_checksums/ucf101_mars_perturbation_and_patch_adversarial112x112.txt: -------------------------------------------------------------------------------- 1 | armory-public-data adversarial-datasets/cached/ucf101_mars_perturbation_and_patch_adversarial112x112_1.1.0_cached.tar.gz 4055525308 914a5e3f143695a912d6b97ec63b59596b2e9329192b143f6ad3dcf9f7eccac3 2 | -------------------------------------------------------------------------------- /armory/data/cached_s3_checksums/xview.txt: -------------------------------------------------------------------------------- 1 | armory-public-data xview/xview_cached_1.0.1.tar.gz 1327660246 33dfc93788a7d3b345d7054e6667200b57d87971af410be59a8e24d38fb164cd 2 | -------------------------------------------------------------------------------- /armory/data/carla_object_detection/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/armory/data/carla_object_detection/__init__.py -------------------------------------------------------------------------------- /armory/data/carla_overhead_object_detection/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/armory/data/carla_overhead_object_detection/__init__.py -------------------------------------------------------------------------------- /armory/data/digit/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/armory/data/digit/__init__.py -------------------------------------------------------------------------------- /armory/data/german_traffic_sign/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/armory/data/german_traffic_sign/__init__.py -------------------------------------------------------------------------------- /armory/data/librispeech/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/armory/data/librispeech/__init__.py -------------------------------------------------------------------------------- /armory/data/majority_masks/__init__.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | from armory.data.utils import maybe_download_weights_from_s3 4 | from armory.logs import log 5 | 6 | PARENT_DIR = Path(__file__).parent 7 | 8 | 9 | def get_path(filename) -> str: 10 | """ 11 | Get the absolute path of the provided file name. Ordering priority is: 12 | 1) Check directly for provided filepath 13 | 2) Load from parent directory 14 | 3) Attempt to download from s3 as a weights file 15 | """ 16 | filename = Path(filename) 17 | if filename.is_file(): 18 | return str(filename) 19 | filepath = PARENT_DIR / filename 20 | if filepath.is_file(): 21 | return str(filepath) 22 | 23 | return maybe_download_weights_from_s3(filename) 24 | -------------------------------------------------------------------------------- /armory/data/majority_masks/speech_commands_majority_masks.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/armory/data/majority_masks/speech_commands_majority_masks.npz -------------------------------------------------------------------------------- /armory/data/minicoco/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/armory/data/minicoco/__init__.py -------------------------------------------------------------------------------- /armory/data/model_weights.py: -------------------------------------------------------------------------------- 1 | """ 2 | Utility to download model weights to cache. 3 | """ 4 | import os 5 | 6 | from armory import paths 7 | from armory.data.utils import _read_validate_scenario_config, download_file_from_s3 8 | from armory.logs import log 9 | 10 | 11 | def download_all(download_config, scenario): 12 | config = _read_validate_scenario_config(download_config) 13 | if scenario == "all": 14 | for scenario in config["scenario"].keys(): 15 | for weights_file in config["scenario"][scenario]["weights_file"]: 16 | _download_weights(weights_file) 17 | elif scenario == "list": 18 | return 19 | else: 20 | for weights_file in config["scenario"][scenario]["weights_file"]: 21 | _download_weights(weights_file) 22 | 23 | 24 | def _download_weights(weights_file, force_download=False): 25 | if not weights_file: 26 | return 27 | 28 | saved_model_dir = paths.runtime_paths().saved_model_dir 29 | filepath = os.path.join(saved_model_dir, weights_file) 30 | 31 | if os.path.isfile(filepath) and not force_download: 32 | log.info(f"Model weights file {filepath} found, skipping.") 33 | else: 34 | if os.path.isfile(filepath): 35 | log.info("Forcing overwrite of old file.") 36 | os.remove(filepath) 37 | 38 | log.info(f"Downloading weights file {weights_file} from s3...") 39 | 40 | download_file_from_s3( 41 | "armory-public-data", 42 | f"model-weights/{weights_file}", 43 | f"{saved_model_dir}/{weights_file}", 44 | ) 45 | -------------------------------------------------------------------------------- /armory/data/progress_percentage.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import threading 4 | 5 | from tqdm import tqdm 6 | 7 | 8 | class ProgressPercentage(tqdm): 9 | def __init__(self, client, bucket, filename, total): 10 | super().__init__( 11 | unit="B", 12 | unit_scale=True, 13 | miniters=1, 14 | desc=f"{filename} download", 15 | total=total, 16 | disable=False, 17 | ) 18 | 19 | def __call__(self, bytes_amount): 20 | self.update(bytes_amount) 21 | 22 | 23 | class ProgressPercentageUpload(object): 24 | def __init__(self, filename): 25 | self._filename = filename 26 | self._size = float(os.path.getsize(filename)) 27 | self._seen_so_far = 0 28 | self._lock = threading.Lock() 29 | 30 | def __call__(self, bytes_amount): 31 | # To simplify, assume this is hooked up to a single filename 32 | with self._lock: 33 | self._seen_so_far += bytes_amount 34 | percentage = (self._seen_so_far / self._size) * 100 35 | sys.stdout.write( 36 | "\r%s %s / %s (%.2f%%)" 37 | % (self._filename, self._seen_so_far, self._size, percentage) 38 | ) 39 | sys.stdout.flush() 40 | -------------------------------------------------------------------------------- /armory/data/pytorch_loader.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import torch 3 | 4 | 5 | class TFToTorchGenerator(torch.utils.data.IterableDataset): 6 | def __init__(self, tf_dataset): 7 | super().__init__() 8 | self.tf_dataset = tf_dataset 9 | 10 | def __iter__(self): 11 | for ex in self.tf_dataset.take(-1): 12 | x, y = ex 13 | # separately handle benign/adversarial data formats 14 | if isinstance(x, tuple): 15 | x_torch = ( 16 | torch.from_numpy(x[0].numpy()), 17 | torch.from_numpy(x[1].numpy()), 18 | ) 19 | else: 20 | x_torch = torch.from_numpy(x.numpy()) 21 | 22 | # separately handle tensor/object detection label formats 23 | if isinstance(y, dict): 24 | y_torch = {} 25 | for k, v in y.items(): 26 | if isinstance(v, tf.Tensor): 27 | y_torch[k] = torch.from_numpy(v.numpy()) 28 | else: 29 | raise ValueError( 30 | f"Expected all values to be of type tf.Tensor, but value at key {k} is of type {type(v)}" 31 | ) 32 | else: 33 | y_torch = torch.from_numpy(y.numpy()) 34 | 35 | yield x_torch, y_torch 36 | 37 | 38 | def get_pytorch_data_loader(ds): 39 | torch_ds = TFToTorchGenerator(ds) 40 | return torch.utils.data.DataLoader( 41 | torch_ds, batch_size=None, collate_fn=lambda x: x, num_workers=0 42 | ) 43 | -------------------------------------------------------------------------------- /armory/data/resisc10/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/armory/data/resisc10/__init__.py -------------------------------------------------------------------------------- /armory/data/resisc45/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/armory/data/resisc45/__init__.py -------------------------------------------------------------------------------- /armory/data/template_boilerplate.py: -------------------------------------------------------------------------------- 1 | fn_template = """ 2 | def {name}( 3 | split: str = "train", 4 | epochs: int = 1, 5 | batch_size: int = 1, 6 | dataset_dir: str = None, 7 | preprocessing_fn: Callable = {name}_canonical_preprocessing, 8 | label_preprocessing_fn: Callable = None, 9 | as_supervised: bool = True, 10 | supervised_xy_keys=None, # May need to update value 11 | download_and_prepare_kwargs=None, # May need to update value 12 | variable_y=False, # May need to update value 13 | lambda_map: Callable = None, # May need to update value 14 | fit_preprocessing_fn: Callable = None, # May need to update value 15 | cache_dataset: bool = True, 16 | framework: str = "numpy", 17 | shuffle_files: bool = True, 18 | **kwargs, 19 | ) -> ArmoryDataGenerator: 20 | preprocessing_fn = preprocessing_chain(preprocessing_fn, fit_preprocessing_fn) 21 | 22 | return _generator_from_tfds( 23 | "{ds_name}", 24 | split=split, 25 | batch_size=batch_size, 26 | epochs=epochs, 27 | dataset_dir=dataset_dir, 28 | preprocessing_fn=preprocessing_fn, 29 | label_preprocessing_fn=label_preprocessing_fn, 30 | as_supervised=as_supervised, 31 | supervised_xy_keys=supervised_xy_keys, 32 | download_and_prepare_kwargs=download_and_prepare_kwargs, 33 | variable_length=bool(batch_size > 1), 34 | variable_y=variable_y, 35 | lambda_map=lambda_map, 36 | cache_dataset=cache_dataset, 37 | framework=framework, 38 | shuffle_files=shuffle_files, 39 | context={name}_context, 40 | **kwargs, 41 | ) 42 | """ 43 | -------------------------------------------------------------------------------- /armory/data/ucf101/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/armory/data/ucf101/__init__.py -------------------------------------------------------------------------------- /armory/data/ucf101/ucf101_clean.py: -------------------------------------------------------------------------------- 1 | """UCF101 with video compression artifacts removed""" 2 | 3 | import tensorflow_datasets.public_api as tfds 4 | from tensorflow_datasets.video import ucf101 5 | 6 | 7 | class Ucf101Clean(ucf101.Ucf101): 8 | def _info(self): 9 | if self.builder_config.width is not None: 10 | if self.builder_config.height is None: 11 | raise ValueError("Provide either both height and width or none.") 12 | ffmpeg_extra_args = ( 13 | "-q:v" "2", 14 | "-vf", 15 | "scale={}x{}".format( 16 | self.builder_config.height, self.builder_config.width 17 | ), 18 | ) 19 | else: 20 | ffmpeg_extra_args = ("-q:v", "2") 21 | 22 | video_shape = (None, self.builder_config.height, self.builder_config.width, 3) 23 | labels_names_file = tfds.core.get_tfds_path(ucf101._LABELS_FNAME) 24 | features = tfds.features.FeaturesDict( 25 | { 26 | "video": tfds.features.Video( 27 | video_shape, 28 | ffmpeg_extra_args=ffmpeg_extra_args, 29 | encoding_format="jpeg", 30 | ), 31 | "label": tfds.features.ClassLabel(names_file=labels_names_file), 32 | } 33 | ) 34 | return tfds.core.DatasetInfo( 35 | builder=self, 36 | description="A 101-label video classification dataset.", 37 | features=features, 38 | homepage="https://www.crcv.ucf.edu/data/UCF101.php", 39 | citation=ucf101._CITATION, 40 | ) 41 | -------------------------------------------------------------------------------- /armory/data/url_checksums/apricot_dev.txt: -------------------------------------------------------------------------------- 1 | https://armory-public-data.s3.us-east-2.amazonaws.com/adversarial-datasets/apricot_dev.tar.gz 592721497 e0ad72989609147139ce409fb1f187e5d89b10a5514f4cff612188309d36ff73 2 | -------------------------------------------------------------------------------- /armory/data/url_checksums/apricot_test.txt: -------------------------------------------------------------------------------- 1 | https://armory-public-data.s3.us-east-2.amazonaws.com/adversarial-datasets/apricot_test.tar.gz 4222259840 721f11c2fb6f8df7c663f77b049c0387c9e92350e13e45b51fe00597f5a64d9e 2 | -------------------------------------------------------------------------------- /armory/data/url_checksums/carla_mot_dev.txt: -------------------------------------------------------------------------------- 1 | https://armory-public-data.s3.us-east-2.amazonaws.com/carla/carla_mot_dev_1.0.1.tar.gz 704302804 439c93a85f65acccd1368e2bc6a9d9c30e6253642bdc8bb676da9d2667564acd -------------------------------------------------------------------------------- /armory/data/url_checksums/carla_mot_test.txt: -------------------------------------------------------------------------------- 1 | carla_mot_test_1.0.1.tar.gz 342551271 4ea53820553f17d90bfb97c92631e71f6155915d5c3d9319f83ad66251ce54d3 -------------------------------------------------------------------------------- /armory/data/url_checksums/carla_obj_det_dev.txt: -------------------------------------------------------------------------------- 1 | carla_od_dev_2.0.0.tar.gz 67507876 30c7593817867eb97b3c7e1358451c576805bb4423599b09ad99f15a2ebdd5c9 -------------------------------------------------------------------------------- /armory/data/url_checksums/carla_obj_det_test.txt: -------------------------------------------------------------------------------- 1 | https://armory-public-data.s3.us-east-2.amazonaws.com/carla/carla_od_test_2.0.0.tar.gz 45306312 56d620e4a4a314ec97ec98b9f1fcdc84a74a1e25d2ff6362066f3635beda2af8 2 | -------------------------------------------------------------------------------- /armory/data/url_checksums/carla_obj_det_train.txt: -------------------------------------------------------------------------------- 1 | https://armory-public-data.s3.us-east-2.amazonaws.com/carla/carla_od_train_val_2.0.0_dataset.tar.gz 9753226177 03fde4b0e5135d0c03424891456f7dc7130594c35106f4a2872cf103932bde5f 2 | 3 | -------------------------------------------------------------------------------- /armory/data/url_checksums/carla_over_obj_det_dev.txt: -------------------------------------------------------------------------------- 1 | https://armory-public-data.s3.us-east-2.amazonaws.com/carla/carla_over_od_dev_2.0.1.tar.gz 64846910 499cd578a8c9ebb4dc2b8578e9ae3ac89c60bd57d55250327f683e164944a749 -------------------------------------------------------------------------------- /armory/data/url_checksums/carla_over_obj_det_test.txt: -------------------------------------------------------------------------------- 1 | https://armory-public-data.s3.us-east-2.amazonaws.com/carla/carla_over_od_test_2.0.0.tar.gz 153756875 96f5e4e0846c8e4b970691ff750c0ec4631dd92ac527ddd633eb9b0bbae648a4 -------------------------------------------------------------------------------- /armory/data/url_checksums/carla_over_obj_det_train.txt: -------------------------------------------------------------------------------- 1 | https://armory-public-data.s3.us-east-2.amazonaws.com/carla/carla_over_od_train_val_1.0.0.tar.gz 14777373276 969f0d11884733d7c4cbf06ae6245e546e3676e53d5433025f423dd74b803716 -------------------------------------------------------------------------------- /armory/data/url_checksums/carla_video_tracking_dev.txt: -------------------------------------------------------------------------------- 1 | https://armory-public-data.s3.us-east-2.amazonaws.com/carla/carla_video_tracking_dev_2.0.0.tar.gz 1278862237 8b23ca76bd9602a8e3ff4058335b7fb8ca665660a8a958852715e9a26ffbef20 -------------------------------------------------------------------------------- /armory/data/url_checksums/carla_video_tracking_test.txt: -------------------------------------------------------------------------------- 1 | https://armory-public-data.s3.us-east-2.amazonaws.com/carla/carla_video_tracking_test_2.0.0.tar.gz 387465525 6bd09f5cf50c0e16f34b5054e9d77f95cb4491a373ecb842431cc58ae50b882e 2 | -------------------------------------------------------------------------------- /armory/data/url_checksums/dapricot_dev.txt: -------------------------------------------------------------------------------- 1 | https://armory-public-data.s3.us-east-2.amazonaws.com/adversarial-datasets/dapricot_dev.tar.gz 79101937 f657cbb237878e28bee63ce7bcb15e9781d6c399a63e8487f7199ec84dae3956 2 | -------------------------------------------------------------------------------- /armory/data/url_checksums/dapricot_test.txt: -------------------------------------------------------------------------------- 1 | https://armory-public-data.s3.us-east-2.amazonaws.com/adversarial-datasets/dapricot_test.tar.gz 297987080 836b428043018ddcc5974dd46aa73c933641fa6d822dea91cfe46ed43f774276 2 | -------------------------------------------------------------------------------- /armory/data/url_checksums/digit.txt: -------------------------------------------------------------------------------- 1 | https://armory-public-data.s3.us-east-2.amazonaws.com/digit/digit-1.0.8.tar.gz 7066583 046bb9a7b30cef1a335329d05ccbf40d43c35112a99e50f2b33618f2a6489220 2 | -------------------------------------------------------------------------------- /armory/data/url_checksums/german_traffic_sign.txt: -------------------------------------------------------------------------------- 1 | https://armory-public-data.s3.us-east-2.amazonaws.com/german-traffic-sign/german_traffic_sign.tar.gz 367878784 0a39ee87e4cfd83b293eae21c0206dee304b2d09e17bcce369c97e4807c5ce3f 2 | -------------------------------------------------------------------------------- /armory/data/url_checksums/gtsrb_bh_poison_micronnet.txt: -------------------------------------------------------------------------------- 1 | https://armory-public-data.s3.us-east-2.amazonaws.com/adversarial-datasets/gtsrb_poisoned_images_entire_class.npy 61378688 5b0cacfd3be173d1e73a415dc22a9c6e8f7be63469cdc0dfbc13089c017a12a5 2 | https://armory-public-data.s3.us-east-2.amazonaws.com/adversarial-datasets/gtsrb_poisoned_test_images.npy 20736128 e9b3c62b303661fc71361de9dcfe9ba6f190be15d2f32fd06f175b6837493652 3 | -------------------------------------------------------------------------------- /armory/data/url_checksums/imagenet_adversarial.txt: -------------------------------------------------------------------------------- 1 | https://armory-public-data.s3.us-east-2.amazonaws.com/imagenet-adv/ILSVRC12_ResNet50_PGD_adversarial_dataset_v1.0.tfrecords 1204414872 cb0a18f2cad6851dafe7427c472f376e111dbe469301a9d2bdf8d30bd8ba7ba2 2 | -------------------------------------------------------------------------------- /armory/data/url_checksums/librispeech_adversarial.txt: -------------------------------------------------------------------------------- 1 | https://armory-public-data.s3.us-east-2.amazonaws.com/adversarial-datasets/LibriSpeech_SincNet_UnivPerturbation_and_PGD.tar.gz 16317904 3861848e4b6a2e2db96c9a5a404d192eb327f8a96cc384bdf0a81cebef107743 2 | -------------------------------------------------------------------------------- /armory/data/url_checksums/librispeech_dev_clean_split.txt: -------------------------------------------------------------------------------- 1 | https://armory-public-data.s3.us-east-2.amazonaws.com/librispeech/librispeech-dev-clean-test.tar.gz 78095585 8dc84ee993648d22d12db7a08b14c68290b3bc9db02d9bdf7d340442b7996047 2 | https://armory-public-data.s3.us-east-2.amazonaws.com/librispeech/librispeech-dev-clean-train.tar.gz 172828555 4b0f4979a425203fe526500f9faf2e324db9b18f55eefb9b2d3806f0be3a81bc 3 | https://armory-public-data.s3.us-east-2.amazonaws.com/librispeech/librispeech-dev-clean-val.tar.gz 87595256 f8d2ce731114ecd29c9525c769e2c40e789b4b5d9e6412edcba11c4cefb23607 4 | -------------------------------------------------------------------------------- /armory/data/url_checksums/librispeech_full.txt: -------------------------------------------------------------------------------- 1 | http://www.openslr.org/resources/12/dev-clean.tar.gz 337926286 76f87d090650617fca0cac8f88b9416e0ebf80350acb97b343a85fa903728ab3 2 | http://www.openslr.org/resources/12/dev-other.tar.gz 314305928 12661c48e8c3fe1de2c1caa4c3e135193bfb1811584f11f569dd12645aa84365 3 | http://www.openslr.org/resources/12/test-clean.tar.gz 346663984 39fde525e59672dc6d1551919b1478f724438a95aa55f874b576be21967e6c23 4 | http://www.openslr.org/resources/12/test-other.tar.gz 328757843 d09c181bba5cf717b3dee7d4d592af11a3ee3a09e08ae025c5506f6ebe961c29 5 | http://www.openslr.org/resources/12/train-clean-100.tar.gz 6387309499 d4ddd1d5a6ab303066f14971d768ee43278a5f2a0aa43dc716b0e64ecbbbf6e2 6 | http://www.openslr.org/resources/12/train-clean-360.tar.gz 23049477885 146a56496217e96c14334a160df97fffedd6e0a04e66b9c5af0d40be3c792ecf 7 | http://www.openslr.org/resources/12/train-other-500.tar.gz 30593501606 ddb22f27f96ec163645d53215559df6aa36515f26e01dd70798188350adcb6d2 -------------------------------------------------------------------------------- /armory/data/url_checksums/resisc10_poison.txt: -------------------------------------------------------------------------------- 1 | https://armory-public-data.s3.us-east-2.amazonaws.com/resisc45/resisc_poison_256x256.tar.gz 164298507 0bea1bd6d3e78ff47f65184494605fa99c6678f56049353bbe966263ee48848e 2 | -------------------------------------------------------------------------------- /armory/data/url_checksums/resisc45_densenet121_univpatch_and_univperturbation_adversarial_224x224.txt: -------------------------------------------------------------------------------- 1 | https://armory-public-data.s3.us-east-2.amazonaws.com/adversarial-datasets/resisc45_densenet121_univpatch_and_univperturbation_adversarial_224x224_1.0.1.tar.gz 67076158 779a2aed3a5d10f4d62808e7e049e440b0d5691e438e29f7b47084d952a565e8 2 | -------------------------------------------------------------------------------- /armory/data/url_checksums/resisc45_split.txt: -------------------------------------------------------------------------------- 1 | https://armory-public-data.s3.us-east-2.amazonaws.com/resisc45/resisc45_test.tar.gz 58428483 13670e5ce420c1bb9a2716a7d544405e4ee1b03247210d09da57a351ed46774d 2 | https://armory-public-data.s3.us-east-2.amazonaws.com/resisc45/resisc45_train.tar.gz 291448728 4067ca9d0027ab69cae78d0b3f7c5596f83f586d144eb4bfb86ac7ad7e57db15 3 | https://armory-public-data.s3.us-east-2.amazonaws.com/resisc45/resisc45_validation.tar.gz 58163558 db04359b48af71f8e27c62474e1d56f451e9f2c7861eea3e3d04fcf1414c906f 4 | -------------------------------------------------------------------------------- /armory/data/url_checksums/ucf101_clean.txt: -------------------------------------------------------------------------------- 1 | https://storage.googleapis.com/thumos14_files/UCF101_videos.zip 6960400115 dabf56f573cd19c79e096563529fec8126a2a34e18e10c72eb621dd687532b0d 2 | -------------------------------------------------------------------------------- /armory/data/url_checksums/ucf101_mars_perturbation_and_patch_adversarial_112x112.txt: -------------------------------------------------------------------------------- 1 | https://armory-public-data.s3.us-east-2.amazonaws.com/ucf101-adv/ucf101_mars_perturbation_and_patch_adversarial_112x112.tar.gz 4059732832 f8518692b0cd9c9f1d9e4bb2895a38fc956d3d19ebbc0cfc4ed3f167a16b91f2 2 | -------------------------------------------------------------------------------- /armory/data/url_checksums/xview.txt: -------------------------------------------------------------------------------- 1 | https://armory-public-data.s3.us-east-2.amazonaws.com/xview/xview.tar.gz 1316164908 72c81c118b252c5d9539075760660eb2222811b9bce7c609f594284d785e04e5 2 | -------------------------------------------------------------------------------- /armory/data/xview/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/armory/data/xview/__init__.py -------------------------------------------------------------------------------- /armory/delayed_imports.py: -------------------------------------------------------------------------------- 1 | """ 2 | Time-consuming imports 3 | """ 4 | 5 | from armory.logs import log 6 | 7 | log.info( 8 | "Importing and configuring torch, tensorflow, and art, if available. " 9 | "This may take some time." 10 | ) 11 | 12 | # Handle PyTorch / TensorFlow interplay 13 | 14 | # import torch before tensorflow to ensure torch.utils.data.DataLoader can utilize 15 | # all CPU resources when num_workers > 1 16 | try: 17 | import torch # noqa: F401 18 | except ImportError: 19 | pass 20 | 21 | # From: https://www.tensorflow.org/guide/gpu#limiting_gpu_memory_growth 22 | try: 23 | import tensorflow as tf 24 | 25 | gpus = tf.config.list_physical_devices("GPU") 26 | if gpus: 27 | # Currently, memory growth needs to be the same across GPUs 28 | for gpu in gpus: 29 | tf.config.experimental.set_memory_growth(gpu, True) 30 | log.info("Setting tf.config.experimental.set_memory_growth to True on all GPUs") 31 | except RuntimeError: 32 | log.exception("Import armory before initializing GPU tensors") 33 | raise 34 | except ImportError: 35 | pass 36 | 37 | # Handle ART configuration 38 | 39 | from armory import paths 40 | 41 | try: 42 | paths.set_art_data_path() 43 | except OSError: 44 | # If running in --no-docker mode, catch write error based on default DockerPaths 45 | # the later call to paths.set_mode("host") will set this properly 46 | pass 47 | -------------------------------------------------------------------------------- /armory/docker/__init__.py: -------------------------------------------------------------------------------- 1 | # left blank because all imports of armory.docker use explicit module names 2 | -------------------------------------------------------------------------------- /armory/docker/host_management.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | 4 | from armory.logs import log 5 | 6 | 7 | class HostArmoryInstance: 8 | def __init__(self, envs: dict = None): 9 | self.env = os.environ 10 | for k, v in envs.items(): 11 | self.env[k] = v 12 | 13 | def exec_cmd(self, cmd: str, user=""): 14 | if user: 15 | raise ValueError("HostArmoryInstance does not support the user input") 16 | completion = subprocess.run(cmd, env=self.env, shell=True) 17 | if completion.returncode: 18 | log.error(f"command {cmd} did not finish cleanly") 19 | else: 20 | log.success("command exited cleanly") 21 | return completion.returncode 22 | 23 | 24 | class HostManagementInstance: 25 | def start_armory_instance( 26 | self, envs: dict = None, ports: dict = None, container_subdir: str = None 27 | ): 28 | if ports: 29 | raise ValueError(f"Arguments ports {ports} not expected!") 30 | 31 | self.instance = HostArmoryInstance(envs=envs) 32 | 33 | return self.instance 34 | 35 | def stop_armory_instance(self, instance): 36 | pass 37 | -------------------------------------------------------------------------------- /armory/environment.py: -------------------------------------------------------------------------------- 1 | """ 2 | Environment parameter names 3 | """ 4 | 5 | ARMORY_VERSION = "ARMORY_VERSION" 6 | -------------------------------------------------------------------------------- /armory/eval/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Evaluation within ARMORY 3 | """ 4 | 5 | from armory.eval.evaluator import Evaluator 6 | -------------------------------------------------------------------------------- /armory/instrument/__init__.py: -------------------------------------------------------------------------------- 1 | from armory.instrument.config import MetricsLogger 2 | from armory.instrument.instrument import ( 3 | FileWriter, 4 | GlobalMeter, 5 | Hub, 6 | LogWriter, 7 | Meter, 8 | MockSink, 9 | NullWriter, 10 | PrintWriter, 11 | Probe, 12 | ResultsLogWriter, 13 | ResultsWriter, 14 | Writer, 15 | del_globals, 16 | get_hub, 17 | get_probe, 18 | ) 19 | -------------------------------------------------------------------------------- /armory/postprocessing/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/armory/postprocessing/__init__.py -------------------------------------------------------------------------------- /armory/postprocessing/plot_poisoning.py: -------------------------------------------------------------------------------- 1 | """ 2 | Plot output json files 3 | """ 4 | import json 5 | 6 | from matplotlib import pyplot as plt 7 | 8 | 9 | def classification_poisoning( 10 | json_filepath="outputs/latest.json", output_filepath=None, show=False 11 | ): 12 | """ 13 | Plot classification results 14 | 15 | json_filepath - filepath for json file 16 | output_filepath - filepath for saving output graph 17 | if None, use json_filepath and change ending to .pdf 18 | show - if True, show the plot instead of saving to file 19 | """ 20 | with open(json_filepath) as f: 21 | blob = json.load(f) 22 | config = blob["config"] 23 | results = blob["results"] 24 | 25 | data = config["dataset"]["name"] 26 | knowledge = config["attack"]["knowledge"] 27 | if config["defense"]: 28 | defense = config["defense"]["name"] 29 | 30 | if output_filepath is None and not show: 31 | output_filepath = json_filepath 32 | if output_filepath.endswith(".json"): 33 | output_filepath = output_filepath[: -len(".json")] 34 | output_filepath += "_{}.pdf" 35 | 36 | for metric_name in [ 37 | "undefended_backdoor_success_rate", 38 | "defended_backdoor_success_rate", 39 | "delta_accuracy", 40 | ]: 41 | main_title = f"{data} for {knowledge}-box attack \nwith {defense} defense." 42 | fraction_poisons = results[metric_name + "_mean"].keys() 43 | metric_mean = [results[metric_name + "_mean"][k] for k in fraction_poisons] 44 | metric_std = [results[metric_name + "_std"][k] for k in fraction_poisons] 45 | fraction_poisons = list(map(float, fraction_poisons)) 46 | 47 | plt.errorbar(fraction_poisons, metric_mean, metric_std, capsize=5) 48 | plt.title(main_title) 49 | plt.xlabel("Fraction of dataset poisoned") 50 | plt.ylabel(f"Model performance ({metric_name})") 51 | if show: 52 | plt.show() 53 | else: 54 | plt.tight_layout() 55 | plt.savefig(output_filepath.format(metric_name), format="pdf") 56 | plt.close() 57 | -------------------------------------------------------------------------------- /armory/scenarios/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Evaluation scenarios for novel defenses 3 | """ 4 | from armory import delayed_imports 5 | -------------------------------------------------------------------------------- /armory/scenarios/audio_classification.py: -------------------------------------------------------------------------------- 1 | """ 2 | General audio classification scenario 3 | """ 4 | 5 | from armory.instrument.export import AudioExporter 6 | from armory.logs import log 7 | from armory.scenarios.scenario import Scenario 8 | 9 | 10 | class AudioClassificationTask(Scenario): 11 | def load_dataset(self): 12 | if self.config["dataset"]["batch_size"] != 1: 13 | log.warning("Evaluation batch_size != 1 may not be supported.") 14 | super().load_dataset() 15 | 16 | def _load_sample_exporter(self): 17 | return AudioExporter( 18 | self.export_dir, 19 | self.test_dataset.context.sample_rate, 20 | ) 21 | -------------------------------------------------------------------------------- /armory/scenarios/download_configs/scenarios-set1.json: -------------------------------------------------------------------------------- 1 | { 2 | "scenario": { 3 | "cifar10-image-classification": { 4 | "dataset_name": [ 5 | "cifar10" 6 | ], 7 | "weights_file": [ 8 | null 9 | ] 10 | }, 11 | "german-traffic-sign-image-poisoning": { 12 | "dataset_name": [ 13 | "german_traffic_sign" 14 | ], 15 | "weights_file": [ 16 | null 17 | ] 18 | }, 19 | "librispeech-speaker-identification": { 20 | "dataset_name": [ 21 | "librispeech_dev_clean" 22 | ], 23 | "weights_file": [ 24 | "sincnet_librispeech_v1.pth" 25 | ] 26 | }, 27 | "resisc45-image-classification": { 28 | "dataset_name": [ 29 | "resisc45" 30 | ], 31 | "weights_file": [ 32 | "densenet121_resisc45_v1.h5", 33 | "densenet121_imagenet_v1.h5" 34 | ] 35 | }, 36 | "ucf-action-recognition": { 37 | "dataset_name": [ 38 | "ucf101" 39 | ], 40 | "weights_file": [ 41 | "mars_ucf101_v1.pth", 42 | "mars_kinetics_v1.pth" 43 | ] 44 | } 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /armory/scenarios/image_classification.py: -------------------------------------------------------------------------------- 1 | """ 2 | General image recognition scenario for image classification and object detection. 3 | """ 4 | 5 | import numpy as np 6 | 7 | from armory.instrument.export import ImageClassificationExporter 8 | from armory.scenarios.scenario import Scenario 9 | 10 | 11 | class ImageClassificationTask(Scenario): 12 | def load_attack(self): 13 | super().load_attack() 14 | # Temporary workaround for ART code requirement of ndarray mask 15 | if "mask" in self.generate_kwargs: 16 | self.generate_kwargs["mask"] = np.array(self.generate_kwargs["mask"]) 17 | 18 | def _load_sample_exporter(self): 19 | return ImageClassificationExporter(self.export_dir) 20 | -------------------------------------------------------------------------------- /armory/scenarios/video_ucf101_scenario.py: -------------------------------------------------------------------------------- 1 | """ 2 | Classifier evaluation within ARMORY 3 | 4 | Scenario Contributor: MITRE Corporation 5 | """ 6 | 7 | from armory.instrument.export import VideoClassificationExporter 8 | from armory.scenarios.scenario import Scenario 9 | 10 | 11 | class Ucf101(Scenario): 12 | def load_dataset(self): 13 | if self.config["dataset"]["batch_size"] != 1: 14 | raise ValueError( 15 | "batch_size must be 1 for evaluation, due to variable length inputs.\n" 16 | " If training, set config['model']['fit_kwargs']['fit_batch_size']" 17 | ) 18 | super().load_dataset() 19 | 20 | def _load_sample_exporter(self): 21 | return VideoClassificationExporter( 22 | self.export_dir, 23 | frame_rate=self.test_dataset.context.frame_rate, 24 | ) 25 | -------------------------------------------------------------------------------- /armory/utils/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | ARMORY Utilities 3 | """ 4 | 5 | from armory.utils import configuration, external_repo, printing 6 | -------------------------------------------------------------------------------- /armory/utils/configuration.py: -------------------------------------------------------------------------------- 1 | """ 2 | Validate configuration files 3 | """ 4 | 5 | import json 6 | import os 7 | import sys 8 | 9 | import jsonschema 10 | 11 | DEFAULT_SCHEMA = os.path.join(os.path.dirname(__file__), "config_schema.json") 12 | 13 | 14 | def _load_schema(filepath: str = DEFAULT_SCHEMA) -> dict: 15 | with open(filepath, "r") as schema_file: 16 | schema = json.load(schema_file) 17 | return schema 18 | 19 | 20 | def validate_config(config: dict) -> dict: 21 | """ 22 | Validates that a config matches the default JSON Schema 23 | """ 24 | schema = _load_schema() 25 | 26 | jsonschema.validate(instance=config, schema=schema) 27 | 28 | return config 29 | 30 | 31 | def load_config(filepath: str) -> dict: 32 | """ 33 | Loads and validates a config file 34 | """ 35 | with open(filepath) as f: 36 | config = json.load(f) 37 | 38 | return validate_config(config) 39 | 40 | 41 | def load_config_stdin() -> dict: 42 | """ 43 | Loads and validates a config file from stdin 44 | """ 45 | string = sys.stdin.read() 46 | config = json.loads(string) 47 | 48 | return validate_config(config) 49 | -------------------------------------------------------------------------------- /armory/utils/evaluation.py: -------------------------------------------------------------------------------- 1 | import types 2 | 3 | 4 | def patch_method(obj): 5 | """ 6 | Patch method for given class or object instance. 7 | If a class is passed in, patches ALL instances of class. 8 | If an object is passed in, only patches the given instance. 9 | """ 10 | 11 | def decorator(method): 12 | if not isinstance(obj, object): 13 | raise ValueError(f"patch_method input {obj} is not a class or object") 14 | if isinstance(obj, type): 15 | cls = obj 16 | setattr(cls, method.__name__, method) 17 | else: 18 | setattr(obj, method.__name__, types.MethodType(method, obj)) 19 | return method 20 | 21 | return decorator 22 | -------------------------------------------------------------------------------- /armory/utils/printing.py: -------------------------------------------------------------------------------- 1 | """ 2 | Functions for printing bold and color to the console with ANSI escape sequences. 3 | 4 | They can be nested: 5 | print(bold(red("Your Text"))) 6 | """ 7 | 8 | END = "\033[0m" 9 | BOLD = "\033[1m" 10 | ITALIC = "\033[3m" 11 | UNDERLINE = "\033[4m" 12 | RED = "\033[31m" 13 | GREEN = "\033[32m" 14 | YELLOW = "\033[33m" 15 | BLUE = "\033[34m" 16 | 17 | 18 | def _end(string): 19 | if string.endswith(END): 20 | return "" 21 | return END 22 | 23 | 24 | def bold(string): 25 | return BOLD + string + _end(string) 26 | 27 | 28 | def italic(string): 29 | return ITALIC + string + _end(string) 30 | 31 | 32 | def underline(string): 33 | return UNDERLINE + string + _end(string) 34 | 35 | 36 | def red(string): 37 | return RED + string + _end(string) 38 | 39 | 40 | def green(string): 41 | return GREEN + string + _end(string) 42 | 43 | 44 | def yellow(string): 45 | return YELLOW + string + _end(string) 46 | 47 | 48 | def blue(string): 49 | return BLUE + string + _end(string) 50 | -------------------------------------------------------------------------------- /armory/utils/triggers/__init__.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | from armory.data.utils import maybe_download_weights_from_s3 4 | from armory.logs import log 5 | 6 | TRIGGERS_DIR = Path(__file__).parent 7 | 8 | 9 | def get_path(filename) -> str: 10 | """ 11 | Get the absolute path of the provided trigger. Ordering priority is: 12 | 1) Check directly for provided filepath 13 | 2) Load from `triggers` directory 14 | 3) Attempt to download from s3 as a weights file 15 | """ 16 | filename = Path(filename) 17 | if filename.is_file(): 18 | return str(filename) 19 | triggers_path = TRIGGERS_DIR / filename 20 | if triggers_path.is_file(): 21 | return str(triggers_path) 22 | 23 | return maybe_download_weights_from_s3(filename) 24 | -------------------------------------------------------------------------------- /armory/utils/triggers/baby-on-board.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/armory/utils/triggers/baby-on-board.png -------------------------------------------------------------------------------- /armory/utils/triggers/bullet_holes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/armory/utils/triggers/bullet_holes.png -------------------------------------------------------------------------------- /armory/utils/triggers/car_horn.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/armory/utils/triggers/car_horn.wav -------------------------------------------------------------------------------- /armory/utils/triggers/clapping.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/armory/utils/triggers/clapping.wav -------------------------------------------------------------------------------- /armory/utils/triggers/copyright.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/armory/utils/triggers/copyright.png -------------------------------------------------------------------------------- /armory/utils/triggers/dog_clicker.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/armory/utils/triggers/dog_clicker.wav -------------------------------------------------------------------------------- /armory/utils/triggers/globe.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/armory/utils/triggers/globe.png -------------------------------------------------------------------------------- /armory/utils/triggers/htbd.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/armory/utils/triggers/htbd.png -------------------------------------------------------------------------------- /armory/utils/triggers/letter_A.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/armory/utils/triggers/letter_A.png -------------------------------------------------------------------------------- /armory/utils/triggers/peace.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/armory/utils/triggers/peace.png -------------------------------------------------------------------------------- /armory/utils/triggers/skull.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/armory/utils/triggers/skull.png -------------------------------------------------------------------------------- /armory/utils/triggers/student-driver.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/armory/utils/triggers/student-driver.png -------------------------------------------------------------------------------- /armory/utils/triggers/trigger_10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/armory/utils/triggers/trigger_10.png -------------------------------------------------------------------------------- /armory/utils/triggers/watermarking.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/armory/utils/triggers/watermarking.png -------------------------------------------------------------------------------- /armory/utils/triggers/whistle.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/armory/utils/triggers/whistle.wav -------------------------------------------------------------------------------- /armory/utils/typedef.py: -------------------------------------------------------------------------------- 1 | """ 2 | Custom type definitions(typedef) for the project. 3 | 4 | Documentation: 5 | - https://docs.python.org/3/library/stdtypes.html 6 | 7 | """ 8 | 9 | from typing import Any, Dict 10 | 11 | # Used for JSON-like configuration specification. 12 | Config = Dict[str, Any] 13 | -------------------------------------------------------------------------------- /armory/validation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/armory/validation/__init__.py -------------------------------------------------------------------------------- /armory/validation/test_config/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/armory/validation/test_config/__init__.py -------------------------------------------------------------------------------- /armory/validation/test_config/conftest.py: -------------------------------------------------------------------------------- 1 | def pytest_addoption(parser): 2 | parser.addoption( 3 | "--model-config", 4 | action="append", 5 | default=[], 6 | help="serialized, json-formatted string of model configuration", 7 | ) 8 | 9 | 10 | def pytest_generate_tests(metafunc): 11 | if "model_config" in metafunc.fixturenames: 12 | metafunc.parametrize("model_config", metafunc.config.getoption("model_config")) 13 | -------------------------------------------------------------------------------- /armory/validation/test_config/pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | addopts = -p no:cacheprovider 3 | 4 | filterwarnings = 5 | ignore::DeprecationWarning 6 | ignore::RuntimeWarning 7 | ignore::UserWarning:tensorflow* 8 | ignore::UserWarning:torch* 9 | ignore::Warning:art* -------------------------------------------------------------------------------- /docker/Dockerfile-armory: -------------------------------------------------------------------------------- 1 | ARG base_image_tag 2 | 3 | 4 | FROM twosixarmory/base:${base_image_tag} AS armory-local 5 | 6 | # pip Configuration - https://pip.pypa.io/en/stable/user_guide/#config-file 7 | ARG PIP_DISABLE_PIP_VERSION_CHECK=1 8 | ARG PIP_NO_CACHE_DIR=1 9 | 10 | # NOTE: This COPY command is filtered using the `.dockerignore` file 11 | # in the root of the repo. 12 | COPY ./ /armory-repo 13 | 14 | WORKDIR /armory-repo 15 | 16 | RUN echo "Updating pip" && \ 17 | pip install --upgrade pip && \ 18 | echo "Building Armory from local source" && \ 19 | pip install --no-compile --editable '.[all]' && \ 20 | echo "Configuring Armory..." && \ 21 | armory configure --use-default && \ 22 | echo "Cleaning up..." && \ 23 | rm -rf /armory-repo/.git 24 | 25 | RUN pip install \ 26 | git+https://github.com/ifzhang/ByteTrack.git \ 27 | thop \ 28 | lap \ 29 | Cython && \ 30 | # Requires cython for install, so will fail if run in the same pip install as cython 31 | pip install cython-bbox 32 | 33 | WORKDIR /workspace 34 | -------------------------------------------------------------------------------- /docker/Dockerfile-base: -------------------------------------------------------------------------------- 1 | ########################################################################################## 2 | # 3 | # ARMORY Baseline Docker Image 4 | # 5 | # This File contains the baseline image for Armory docker images. All framework 6 | # based images should inhereit from this image using: 7 | # FROM twosixlabs/armory-baseline AS armory-baseline 8 | # 9 | ########################################################################################## 10 | 11 | FROM nvidia/cuda:11.6.2-cudnn8-runtime-ubuntu20.04 12 | 13 | ENV PATH=/opt/mamba/bin:$PATH 14 | ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/mamba/bin 15 | 16 | WORKDIR /armory-repo/ 17 | 18 | COPY environment.yml /armory-repo/ 19 | 20 | # Basic Apt-get Bits 21 | RUN apt-get -y -qq update && \ 22 | DEBIAN_FRONTEND=noninteractive \ 23 | apt-get install -y \ 24 | wget \ 25 | vim \ 26 | build-essential \ 27 | git \ 28 | curl \ 29 | libgl1-mesa-glx \ 30 | libglib2.0-0 \ 31 | libarchive13 \ 32 | libcairo2 \ 33 | && rm -rf /var/lib/apt/lists/* 34 | # libgl1-mesa-glx is needed for cv2 (opencv-python) 35 | # libarchive13 is needed for mamba 36 | 37 | RUN curl -L -O "https://github.com/conda-forge/miniforge/releases/latest/download/Mambaforge-$(uname)-$(uname -m).sh" && \ 38 | /bin/bash Mambaforge-$(uname)-$(uname -m).sh -b -p /opt/mamba && \ 39 | rm Mambaforge-$(uname)-$(uname -m).sh && \ 40 | mamba update mamba && \ 41 | mamba update -n base -c defaults conda && \ 42 | mamba env update -n base -f environment.yml 43 | 44 | WORKDIR /workspace 45 | -------------------------------------------------------------------------------- /docker/Dockerfile-pytorch-deepspeech: -------------------------------------------------------------------------------- 1 | ARG base_image_tag 2 | 3 | FROM twosixarmory/base:${base_image_tag} AS armory-local 4 | 5 | WORKDIR /armory-repo 6 | 7 | # NOTE: This COPY command is filtered using the `.dockerignore` file 8 | # in the root of the repo. 9 | COPY ./ /armory-repo 10 | 11 | RUN pip install git+https://github.com/romesco/hydra-lightning/\#subdirectory=hydra-configs-pytorch-lightning 12 | 13 | RUN echo "Building Armory from local source" && \ 14 | echo "Updating Base Image..." && \ 15 | python -m pip install --upgrade pip && \ 16 | echo "Installing Armory..." && \ 17 | pip install --no-compile --no-cache-dir --editable '.[engine,deepspeech,jupyter]' && \ 18 | echo "Configuring Armory..." && \ 19 | armory configure --use-default && \ 20 | echo "Cleaning up..." && \ 21 | rm -rf /armory-repo/.git 22 | 23 | WORKDIR /workspace 24 | 25 | 26 | # ------------------------------------------------------------------ 27 | # DEVELOPER NOTES: 28 | # ------------------------------------------------------------------ 29 | # - pytorch-lightning >= 1.5.0 will break Deep Speech 2 30 | # - torchmetrics >= 0.8.0 will break pytorch-lightning 1.4 31 | # - hydra-lightning installs omegaconf 32 | # - google-cloud-storage needed for checkpoint.py import 33 | # - only sox python bindings are installed; underlying sox binaries not needed 34 | 35 | # NOTE: Listed dependencies of PyTorch Deep Speech 2, but do not appear 36 | # to be used for inference (only for training), they are not installed: 37 | # - torchelastic 38 | # - wget 39 | # - flask 40 | # - fairscale 41 | # ------------------------------------------------------------------ 42 | -------------------------------------------------------------------------------- /docker/Dockerfile-yolo: -------------------------------------------------------------------------------- 1 | # More details on the pytorchyolo installation can be found here: 2 | # https://github.com/eriklindernoren/PyTorch-YOLOv3 3 | 4 | ARG base_image_tag 5 | 6 | FROM twosixarmory/base:${base_image_tag} AS armory-local 7 | 8 | WORKDIR /armory-repo 9 | 10 | # NOTE: This COPY command is filtered using the `.dockerignore` file 11 | # in the root of the repo. 12 | COPY ./ /armory-repo 13 | 14 | 15 | RUN echo "Building Armory from local source" && \ 16 | echo "Updating Base Image..." && \ 17 | python -m pip install --upgrade pip && \ 18 | echo "Installing Armory..." && \ 19 | pip install --no-compile --no-cache-dir --editable '.[yolo]' && \ 20 | echo "Configuring Armory..." && \ 21 | armory configure --use-default && \ 22 | echo "Cleaning up..." && \ 23 | rm -rf /armory-repo/.git 24 | 25 | WORKDIR /workspace 26 | -------------------------------------------------------------------------------- /docker/build-base.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | usage() { echo "usage: $0 [--dry-run] [--push]" 1>&2; exit 1; } 5 | 6 | dryrun= 7 | push= 8 | 9 | while [ "${1:-}" != "" ]; do 10 | case "$1" in 11 | -n|--dry-run) 12 | echo "dry-run requested. not building or pushing to docker hub" 13 | dryrun="echo" ;; 14 | --push) 15 | push=true ;; 16 | *) 17 | usage ;; 18 | esac 19 | shift 20 | done 21 | 22 | echo "Building the base image locally" 23 | $dryrun docker build --force-rm --file ./docker/Dockerfile-base -t twosixarmory/base:latest --progress=auto . 24 | 25 | if [[ -z "$push" ]]; then 26 | echo "" 27 | echo "If building the framework images locally, use the '--no-pull' argument. E.g.:" 28 | echo " python docker/build.py all --no-pull" 29 | exit 0 30 | fi 31 | 32 | tag=$(python -m armory --version) 33 | echo tagging twosixarmory/base:latest as $tag for dockerhub tracking 34 | $dryrun docker tag twosixarmory/base:latest twosixarmory/base:$tag 35 | 36 | echo "" 37 | echo "If you have not run 'docker login', with the proper credentials, these pushes will fail" 38 | echo "see docs/docker.md for instructions" 39 | echo "" 40 | 41 | # the second push should result in no new upload, it just tag the new image as 42 | # latest 43 | $dryrun docker push twosixarmory/base:$tag 44 | $dryrun docker push twosixarmory/base:latest 45 | -------------------------------------------------------------------------------- /docs/assets/docs-badge.svg: -------------------------------------------------------------------------------- 1 | docsdocspassingpassing -------------------------------------------------------------------------------- /docs/assets/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/docs/assets/logo.png -------------------------------------------------------------------------------- /docs/baseline_results/apricot_results.md: -------------------------------------------------------------------------------- 1 | # APRICOT Object Detection Baseline Evaluation (Updated December 2020) 2 | 3 | * **Baseline Model Performance: (results obtained using Armory v0.13)** 4 | * Baseline MSCOCO Objects mAP: 8.76% (all test examples) 5 | * Baseline Targeted Patch mAP: 5.70% (all test examples) 6 | * **Baseline Defense Performance: (results obtained using Armory v0.13)** 7 | Baseline defense is art_experimental.defences.jpeg_compression_normalized(clip_values=(0.0, 1.0), quality=10, 8 | channel_index=3, apply_fit=False, apply_predict=True).\ 9 | Baseline defense performance is evaluated for a transfer attack. 10 | * Baseline MSCOCO Objects mAP: 7.83% (all test examples) 11 | * Baseline Targeted Patch mAP: 4.59% (all test examples) 12 | 13 | -------------------------------------------------------------------------------- /docs/baseline_results/carla_mot_results.md: -------------------------------------------------------------------------------- 1 | # CARLA MOT Baseline Evaluations 2 | 3 | This is the baseline evaluation for the multi-object tracking scenario. For single-object tracking, see [carla_video_tracking_results.md](../baseline_results/carla_video_tracking_results.md). 4 | 5 | Results obtained using Armory v0.18.0. 6 | 7 | 8 | 9 | | Data | Defended | Attack | Attack Parameters | Benign DetA / AssA / HOTA | Adversarial DetA / AssA / HOTA | Test Size | 10 | |------|----------|-------------------|--------------------------------|---------------------------|--------------------------------|-----------| 11 | | Dev | no | Adversarial Patch | step_size=0.02, max_iter=100 | 0.55 / 0.64 / 0.59 | 0.15 / 0.58 / 0.29 | 20 | 12 | | Dev | no | Robust DPatch | step_size=0.002, max_iter=1000 | 0.55 / 0.64 / 0.59 | 0.42 / 0.61 / 0.50 | 20 | 13 | | Dev | yes | Robust DPatch | step_size=0.002, max_iter=1000 | 0.36 / 0.53 / 0.44 | 0.25 / 0.49 / 0.35 | 20 | 14 | | Test | no | Adversarial Patch | step_size=0.02, max_iter=100 | 0.45 / 0.55 / 0.49 | 0.25 / 0.47 / 0.35 | 10 | 15 | | Test | no | Robust DPatch | step_size=0.002, max_iter=1000 | 0.45 / 0.55 / 0.49 | 0.36 / 0.49 / 0.41 | 10 | 16 | | Test | yes | Robust DPatch | step_size=0.002, max_iter=1000 | 0.31 / 0.44 / 0.37 | 0.22 / 0.39 / 0.29 | 10 | 17 | 18 | Defended results not available for Adversarial Patch attack because JPEG Compression defense is not implemented in PyTorch and so is not fully differentiable. 19 | Note that Robust DPatch is considerably slower than Adversarial Patch. 20 | 21 | Find reference baseline configurations [here](https://github.com/twosixlabs/armory/tree/master/scenario_configs/eval7/carla_mot) -------------------------------------------------------------------------------- /docs/baseline_results/carla_video_tracking_results.md: -------------------------------------------------------------------------------- 1 | # CARLA Video Tracking Baseline Evaluation 2 | 3 | This is the baseline evaluation for the single-object tracking scenario. For multi-object tracking, see [carla_mot_results.md](../baseline_results/carla_mot_results.md). 4 | 5 | For [dev data](https://github.com/twosixlabs/armory/blob/v0.15.2/armory/data/adversarial/carla_video_tracking_dev.py), results obtained using Armory v0.15.2. 6 | For [test data](https://github.com/twosixlabs/armory/blob/v0.15.4/armory/data/adversarial/carla_video_tracking_test.py), results obtained using Armory v0.15.4. 7 | 8 | | Data | Attack Parameters | Benign Mean IoU | Benign Mean Success Rate | Adversarial Mean IoU | Adversarial Mean Success Rate | Test Size | 9 | |------|------------------------------|-----------------|--------------------------|----------------------|-------------------------------|-----------| 10 | | Dev | step_size=0.02, max_iter=100 | 0.55/0.57 | 0.57/0.60 | 0.14/0.19 | 0.15/0.20 | 20 | 11 | | Test | step_size=0.02, max_iter=100 | 0.52/0.45 | 0.54/0.47 | 0.15/0.17 | 0.16/0.18 | 20 | 12 | 13 | a/b in the tables refer to undefended/defended performance results, respectively. 14 | 15 | Find reference baseline configurations [here](https://github.com/twosixlabs/armory/tree/v0.15.4/scenario_configs/eval5/carla_video_tracking) -------------------------------------------------------------------------------- /docs/baseline_results/dapricot_results.md: -------------------------------------------------------------------------------- 1 | # Dapricot Baseline Evaluation 2 | 3 | Results obtained using Armory v0.13.3 and [dev test data](https://github.com/twosixlabs/armory/blob/8eb10ac43bf4382d69625d8cef8a3e8cb23d0318/armory/data/adversarial/dapricot_test.py) 4 | 5 | | Attack | Patch Size | Target Success (Undefended) | Target mAP (Undefended) | Target Success (Defended) | Target mAP (Defended) | Test Size | 6 | |---------------|------------|-----------------------------|-------------------------|---------------------------|-----------------------|-----------| 7 | | Masked PGD | all | 0.99 | 0.91 | 0.99 | 0.91 | 100 | 8 | | Masked PGD | small | 0.97 | 0.91 | 0.97 | 0.91 | 100 | 9 | | Masked PGD | medium | 1.00 | 1.00 | 1.00 | 0.91 | 100 | 10 | | Masked PGD | large | 1.00 | 1.00 | 1.00 | 0.91 | 100 | 11 | | Robust DPatch | all | 0.56 | 0.64 | 0.61 | 0.64 | 100 | 12 | | Robust DPatch | small | 0.51 | 0.64 | 0.60 | 0.64 | 100 | 13 | | Robust DPatch | medium | 0.61 | 0.64 | 0.65 | 0.73 | 100 | 14 | | Robust DPatch | large | 0.55 | 0.64 | 0.63 | 0.73 | 100 | 15 | 16 | Find reference baseline configurations [here](https://github.com/twosixlabs/armory/tree/8eb10ac43bf4382d69625d8cef8a3e8cb23d0318/scenario_configs) -------------------------------------------------------------------------------- /docs/baseline_results/gtsrb_witches_brew_results.md: -------------------------------------------------------------------------------- 1 | # GTSRB Witches' Brew Baseline Evaluation 2 | 3 | Coming soon -------------------------------------------------------------------------------- /docs/baseline_results/librispeech_audio_classification_results.md: -------------------------------------------------------------------------------- 1 | # LibriSpeech Audio Classification Baseline Evaluation 2 | 3 | Coming soon -------------------------------------------------------------------------------- /docs/baseline_results/resisc45_results.md: -------------------------------------------------------------------------------- 1 | # RESISC-45 Image Classification Baseline Evaluation 2 | 3 | * **Baseline Model Performance: (results obtained using Armory < v0.10)** 4 | * Baseline Clean Top-1 Accuracy: 93% 5 | * Baseline Attacked (Universal Perturbation) Top-1 Accuracy: 6% 6 | * Baseline Attacked (Universal Patch) Top-1 Accuracy: 23% 7 | * **Baseline Defense Performance: (results obtained using Armory < v0.10)** 8 | Baseline defense is art_experimental.defences.JpegCompressionNormalized(clip_values=(0.0, 1.0), quality=50, channel_index=3, apply_fit=False, 9 | apply_predict=True, means=[0.36386173189316956, 0.38118692953271804, 0.33867067558870334], stds=[0.20350874, 0.18531173, 0.18472934]) - see 10 | resisc45_baseline_densenet121_adversarial.json for example usage. 11 | Baseline defense performance is evaluated for a grey-box attack: adversarial examples generated on undefended baseline model evaluated on defended model. 12 | * Baseline Clean Top-1 Accuracy: 92% 13 | * Baseline Attacked (Universal Perturbation) Top-1 Accuracy: 40% 14 | * Baseline Attacked (Universal Patch) Top-1 Accuracy: 21% -------------------------------------------------------------------------------- /docs/baseline_results/so2sat_results.md: -------------------------------------------------------------------------------- 1 | # So2Sat Multimodal Image Classification Baseline Evaluation 2 | 3 | Results obtained using Armory v0.13.3 4 | 5 | | Attacked Modality | Patch Ratio | Benign Accuracy (Undefended) | Adversarial Accuracy (Undefended) | Benign Accuracy (Defended) | Adversarial Accuracy (Defended) | Test Size | 6 | |-------------------|-------------|------------------------------|-----------------------------------|----------------------------|---------------------------------|-----------| 7 | | EO | 0.05 | 0.583 | 0.00 | 0.556 | 0.00 | 1000 | 8 | | EO | 0.10 | 0.583 | 0.00 | 0.556 | 0.00 | 1000 | 9 | | EO | 0.15 | 0.583 | 0.00 | 0.556 | 0.00 | 1000 | 10 | | SAR | 0.05 | 0.583 | 0.00 | 0.556 | 0.00 | 1000 | 11 | | SAR | 0.10 | 0.583 | 0.00 | 0.556 | 0.00 | 1000 | 12 | | SAR | 0.15 | 0.583 | 0.00 | 0.556 | 0.00 | 1000 | 13 | 14 | Find reference baseline configurations [here](https://github.com/twosixlabs/armory/tree/8eb10ac43bf4382d69625d8cef8a3e8cb23d0318/scenario_configs) 15 | -------------------------------------------------------------------------------- /docs/baseline_results/xview_results.md: -------------------------------------------------------------------------------- 1 | # xView Object Detection Baseline Evaluation (Updated July 2021) 2 | 3 | results obtained using Armory v0.13.3 4 | 5 | | Attack | Patch Size | Benign mAP (Undefended) | Adversarial mAP (Undefended) | Benign mAP (Defended) | Adversarial mAP (Defended) | Test Size | 6 | |:-------------:|:----------:|:-----------------------:|:----------------------------:|:---------------------:|:--------------------------:|:---------:| 7 | | Masked PGD | 50x50 | 0.284 | 0.142 | 0.232 | 0.139 | 100 | 8 | | Masked PGD | 75x75 | 0.284 | 0.071 | 0.232 | 0.094 | 100 | 9 | | Masked PGD | 100x100 | 0.284 | 0.076 | 0.232 | 0.092 | 100 | 10 | | Robust DPatch | 50x50 | 0.284 | 0.193 | 0.232 | 0.184 | 100 | 11 | | Robust DPatch | 75x75 | 0.284 | 0.184 | 0.232 | 0.146 | 100 | 12 | | Robust DPatch | 100x100 | 0.284 | 0.173 | 0.232 | 0.165 | 100 | 13 | 14 | Find reference baseline configurations [here](https://github.com/twosixlabs/armory/tree/8eb10ac43bf4382d69625d8cef8a3e8cb23d0318/scenario_configs) 15 | -------------------------------------------------------------------------------- /docs/contributing/self-review.md: -------------------------------------------------------------------------------- 1 | ### Self review 2 | 3 | You should always review your own PR first. 4 | 5 | For content changes, make sure that you: 6 | 7 | - [ ] Confirm that the changes meet the user experience and goals outlined in the content design plan (if there is one). 8 | - [ ] Compare your pull request's source changes to staging to confirm that the output matches the source and that everything is rendering as expected. This helps spot issues like typos, content that doesn't follow the style guide, or content that isn't rendering due to versioning problems. Remember that lists and tables can be tricky. 9 | - [ ] Review the content for technical accuracy. 10 | - [ ] Copy-edit the changes for grammar, spelling, and adherence to the [style guide](/docs/style.md). 11 | - [ ] If there are any failing checks in your PR, troubleshoot them until they're all passing. 12 | -------------------------------------------------------------------------------- /docs/developers/callchain.md: -------------------------------------------------------------------------------- 1 | # bootstrap process creation and command line arguments 2 | 3 | This traces how command options percolate through armory instantiation 4 | 5 | `armory.__main__.py` is the entry point for armory run. It has an `if __name__` block on 6 | line 322 which calls main(). main() looks at only the first argument given (e.g. armory 7 | **run**) and uses `run` as a lookup into a dispatch table COMMANDS which maps "run" -> 8 | function `run`. run at line 284 does a bunch of argparse on the residual arguments, 9 | loads the experiment config, constructs an Evaluator and then calls its run method. 10 | 11 | `armory.eval.evaluator.Evaluator __init__` modifies the in-core experiment, sets up a 12 | docker_client and other miscellany. the Evaluator.run method does some more prep and 13 | then calls Evaluator.run_config which conses up a python command line with a base64 14 | encoded experiment and then calls Evaluator.run_command which calls 15 | armory.docker.management.exec_cmd which runs that encoded command inside a container. 16 | 17 | That encoded command is `python -m armory.scenarios.main` which passes control via python 18 | built-in hidden runpy.py which is currently complaining about import order in a way that 19 | scares me: 20 | > RuntimeWarning: 'armory.scenarios.main' found in sys.modules after import of 21 | package 'armory.scenarios', but prior to execution of 'armory.scenarios.main'; this may 22 | result in unpredictable behavior 23 | 24 | In armory.scenarios.main in the `if __name__` block, first we have an independent 25 | duplicate (and out of sync) argument processor which then calls main.run_config which 26 | calls scenario.evaluate which finally runs application code. 27 | -------------------------------------------------------------------------------- /docs/developers/testing.md: -------------------------------------------------------------------------------- 1 | Running Armory Tests 2 | ========================= 3 | 4 | Tests have to download a bunch of code (external repos,etc.) and model weights the first 5 | time around so that one can take a while. 6 | 7 | You will need to have the `ARMORY_GITHUB_TOKEN` env variable set (which may be done by 8 | armory configure...but will need to make sure) 9 | 10 | Can use pytest -s to run all tests: 11 | ```bash 12 | pytest -s ./tests/` 13 | ``` 14 | 15 | To only run a single file: 16 | ```bash 17 | pytest -s ./tests/test_file.py 18 | ``` 19 | or to run only a single tests 20 | ```bash 21 | pytest -s ./tests/test_file.py::test_name 22 | ``` 23 | 24 | If a test is parameterized to see how to only run one of the 25 | parameters sets use: 26 | ```bash 27 | pytest --collect-only -q 28 | ``` 29 | Then run the one you want (for example): 30 | ```bash 31 | pytest -s tests/test_models.py::test_model_creation[armory.baseline_models.pytorch.cifar-get_art_model-None-cifar10-500-1-1-100-1-numpy-0.25] 32 | ``` 33 | 34 | ## Running pytest in Docker 35 | 36 | When running pytest with docker, you have two choices. 37 | 38 | First, you can rebuild the docker container and then run pytest with the container: 39 | ```bash 40 | python docker/build.py -f pytorch --no-pull 41 | armory exec pytorch -- pytest -s ./tests/ 42 | ``` 43 | The `armory exec pytorch` is equivalent to launching an interactive container with `armory launch pytorch`, bashing into the container, and running `pytest -s ./tests/`. 44 | 45 | Or, if rebuilding the container is onerous, you can just do the `armory exec` command, but you need to make sure that pytest is invoked with `python -m`. 46 | Otherwise, tests will import armory installed in the container, not your locally modified dev version. 47 | ```bash 48 | armory exec pytorch -- python -m pytest -s ./tests/ 49 | ``` 50 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: armory-core 3 | channels: 4 | - pytorch 5 | - conda-forge 6 | - defaults 7 | dependencies: 8 | - conda-forge::pip 9 | - conda-forge::cudatoolkit = 11.6 10 | - conda-forge::cudnn # cudnn required for tensorflow 11 | - conda-forge::tensorflow = 2.10.0 # If using python version <= 3.9 12 | - pytorch::pytorch < 1.13.0 13 | - pytorch::torchvision 14 | - pytorch::torchaudio 15 | - scikit-learn < 1.2.0 # ART requires scikit-learn >=0.22.2,<1.1.0 16 | - jupyterlab 17 | - matplotlib 18 | - librosa 19 | - pandas 20 | - protobuf 21 | - conda-forge::ffmpeg # conda-forge ffmpeg comes with libx264 encoder, which the pytorch channel version does not include. This encoder is required for video compression defenses (ART) and video exporting. Future work could migrate this to libopenh264 encoder, which is available in both channels. 22 | - pip: 23 | - setuptools_scm 24 | - boto3 25 | - opencv-python 26 | - ffmpeg-python 27 | - pytest 28 | - loguru 29 | - docker 30 | - jsonschema 31 | - requests 32 | - pydub # pydub required for ART mp3 defense 33 | - transformers # transformers is used for the Entailment metric only 34 | - six 35 | - setuptools 36 | - tqdm 37 | - wheel 38 | - tensorflow-datasets 39 | - tensorboardx 40 | prefix: /opt/mamba 41 | -------------------------------------------------------------------------------- /scenario_configs/asr_librispeech_entailment.json: -------------------------------------------------------------------------------- 1 | eval5/asr_librispeech/entailment.json -------------------------------------------------------------------------------- /scenario_configs/asr_librispeech_targeted.json: -------------------------------------------------------------------------------- 1 | eval5/asr_librispeech/untargeted_snr_pgd.json -------------------------------------------------------------------------------- /scenario_configs/carla_multimodal_object_detection.json: -------------------------------------------------------------------------------- 1 | eval5/carla_object_detection/carla_obj_det_multimodal_adversarialpatch_undefended.json -------------------------------------------------------------------------------- /scenario_configs/carla_video_tracking.json: -------------------------------------------------------------------------------- 1 | eval5/carla_video_tracking/carla_video_tracking_goturn_advtextures_undefended.json -------------------------------------------------------------------------------- /scenario_configs/cifar10_baseline.json: -------------------------------------------------------------------------------- 1 | eval1-4/cifar/cifar10_baseline.json -------------------------------------------------------------------------------- /scenario_configs/eval1-4/aprioct/apricot_frcnn.json: -------------------------------------------------------------------------------- 1 | { 2 | "_description": "APRICOT object detection, contributed by MITRE Corporation", 3 | "adhoc": null, 4 | "attack": { 5 | "knowledge": "white", 6 | "kwargs": { 7 | "batch_size": 1, 8 | "split": "frcnn+ssd+retinanet" 9 | }, 10 | "module": "armory.data.adversarial_datasets", 11 | "name": "apricot_dev_adversarial", 12 | "type": "preloaded", 13 | "use_label": false 14 | }, 15 | "dataset": { 16 | "batch_size": 1, 17 | "eval_split": "frcnn+ssd+retinanet", 18 | "framework": "numpy", 19 | "module": "armory.data.adversarial_datasets", 20 | "name": "apricot_dev_adversarial" 21 | }, 22 | "defense": null, 23 | "metric": { 24 | "means": true, 25 | "perturbation": "linf", 26 | "record_metric_per_sample": false, 27 | "task": [ 28 | "apricot_patch_targeted_AP_per_class", 29 | "object_detection_AP_per_class" 30 | ] 31 | }, 32 | "model": { 33 | "fit": false, 34 | "fit_kwargs": {}, 35 | "model_kwargs": {}, 36 | "module": "armory.baseline_models.tf_graph.mscoco_frcnn", 37 | "name": "get_art_model", 38 | "weights_file": null, 39 | "wrapper_kwargs": {} 40 | }, 41 | "scenario": { 42 | "kwargs": {}, 43 | "module": "armory.scenarios.object_detection", 44 | "name": "ObjectDetectionTask" 45 | }, 46 | "sysconfig": { 47 | "docker_image": "twosixarmory/armory", 48 | "external_github_repo": null, 49 | "gpus": "all", 50 | "output_dir": null, 51 | "output_filename": null, 52 | "use_gpu": false 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /scenario_configs/eval1-4/asr_librispeech/librispeech_asr_kenansville_undefended.json: -------------------------------------------------------------------------------- 1 | { 2 | "_description": "Baseline DeepSpeech ASR on LibriSpeech, contributed by MITRE Corporation", 3 | "adhoc": { 4 | "skip_adversarial": false 5 | }, 6 | "attack": { 7 | "knowledge": "white", 8 | "kwargs": { 9 | "partial_attack": false, 10 | "snr_db": 20, 11 | "targeted": false 12 | }, 13 | "module": "armory.art_experimental.attacks.kenansville_dft", 14 | "name": "KenansvilleDFT", 15 | "use_label": false 16 | }, 17 | "dataset": { 18 | "batch_size": 8, 19 | "eval_split": "test_clean", 20 | "framework": "numpy", 21 | "module": "armory.data.datasets", 22 | "name": "librispeech", 23 | "train_split": "train_clean100" 24 | }, 25 | "defense": null, 26 | "metric": { 27 | "means": false, 28 | "perturbation": "snr_db", 29 | "record_metric_per_sample": true, 30 | "task": [ 31 | "word_error_rate" 32 | ] 33 | }, 34 | "model": { 35 | "fit": false, 36 | "fit_kwargs": { 37 | "nb_epochs": 20000 38 | }, 39 | "model_kwargs": {}, 40 | "module": "armory.baseline_models.pytorch.deep_speech", 41 | "name": "get_art_model", 42 | "predict_kwargs": { 43 | "transcription_output": true 44 | }, 45 | "weights_file": null, 46 | "wrapper_kwargs": { 47 | "pretrained_model": "librispeech" 48 | } 49 | }, 50 | "scenario": { 51 | "kwargs": {}, 52 | "module": "armory.scenarios.audio_asr", 53 | "name": "AutomaticSpeechRecognition" 54 | }, 55 | "sysconfig": { 56 | "docker_image": "twosixarmory/pytorch-deepspeech", 57 | "external_github_repo": "SeanNaren/deepspeech.pytorch@V3.0", 58 | "gpus": "all", 59 | "local_repo_path": null, 60 | "output_dir": null, 61 | "output_filename": null, 62 | "use_gpu": false 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /scenario_configs/eval1-4/cifar/cifar10_baseline.json: -------------------------------------------------------------------------------- 1 | { 2 | "_description": "Baseline cifar10 image classification", 3 | "adhoc": null, 4 | "attack": { 5 | "knowledge": "white", 6 | "kwargs": { 7 | "batch_size": 1, 8 | "eps": 0.031, 9 | "eps_step": 0.007, 10 | "max_iter": 20, 11 | "num_random_init": 1, 12 | "random_eps": false, 13 | "targeted": false, 14 | "verbose": false 15 | }, 16 | "module": "art.attacks.evasion", 17 | "name": "ProjectedGradientDescent", 18 | "use_label": true 19 | }, 20 | "dataset": { 21 | "batch_size": 64, 22 | "framework": "numpy", 23 | "module": "armory.data.datasets", 24 | "name": "cifar10" 25 | }, 26 | "defense": null, 27 | "metric": { 28 | "means": true, 29 | "perturbation": "linf", 30 | "record_metric_per_sample": false, 31 | "task": [ 32 | "categorical_accuracy" 33 | ] 34 | }, 35 | "model": { 36 | "fit": true, 37 | "fit_kwargs": { 38 | "nb_epochs": 20 39 | }, 40 | "model_kwargs": {}, 41 | "module": "armory.baseline_models.pytorch.cifar", 42 | "name": "get_art_model", 43 | "weights_file": null, 44 | "wrapper_kwargs": {} 45 | }, 46 | "scenario": { 47 | "kwargs": {}, 48 | "module": "armory.scenarios.image_classification", 49 | "name": "ImageClassificationTask" 50 | }, 51 | "sysconfig": { 52 | "docker_image": "twosixarmory/armory", 53 | "external_github_repo": null, 54 | "gpus": "all", 55 | "output_dir": null, 56 | "output_filename": null, 57 | "use_gpu": false 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /scenario_configs/eval1-4/dapricot/dapricot_frcnn_masked_pgd.json: -------------------------------------------------------------------------------- 1 | { 2 | "_description": "DAPRICOT object detection, contributed by MITRE Corporation", 3 | "adhoc": null, 4 | "attack": { 5 | "generate_kwargs": { 6 | "threat_model": "digital" 7 | }, 8 | "knowledge": "white", 9 | "kwargs": { 10 | "batch_size": 1, 11 | "eps": 1.0, 12 | "eps_step": 0.02, 13 | "max_iter": 100, 14 | "targeted": true 15 | }, 16 | "module": "armory.art_experimental.attacks.dapricot_patch", 17 | "name": "DApricotMaskedPGD", 18 | "targeted_labels": { 19 | "scheme": "object_detection_fixed", 20 | "value": 2 21 | } 22 | }, 23 | "dataset": { 24 | "batch_size": 1, 25 | "eval_split": "large+medium+small", 26 | "framework": "numpy", 27 | "module": "armory.data.adversarial_datasets", 28 | "name": "dapricot_test_adversarial" 29 | }, 30 | "defense": null, 31 | "metric": { 32 | "means": true, 33 | "perturbation": "l0", 34 | "record_metric_per_sample": false, 35 | "task": [ 36 | "dapricot_patch_targeted_AP_per_class", 37 | "dapricot_patch_target_success" 38 | ] 39 | }, 40 | "model": { 41 | "fit": false, 42 | "fit_kwargs": {}, 43 | "model_kwargs": {}, 44 | "module": "armory.baseline_models.tf_graph.mscoco_frcnn", 45 | "name": "get_art_model", 46 | "weights_file": null, 47 | "wrapper_kwargs": {} 48 | }, 49 | "scenario": { 50 | "export_batches": true, 51 | "kwargs": {}, 52 | "module": "armory.scenarios.dapricot_scenario", 53 | "name": "ObjectDetectionTask" 54 | }, 55 | "sysconfig": { 56 | "docker_image": "twosixarmory/armory", 57 | "external_github_repo": "colour-science/colour@v0.3.16", 58 | "gpus": "all", 59 | "output_dir": null, 60 | "output_filename": null, 61 | "use_gpu": false 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /scenario_configs/eval1-4/mnist/mnist_baseline.json: -------------------------------------------------------------------------------- 1 | { 2 | "_description": "Baseline mnist image classification", 3 | "adhoc": null, 4 | "attack": { 5 | "knowledge": "white", 6 | "kwargs": { 7 | "batch_size": 1, 8 | "eps": 0.2, 9 | "eps_step": 0.1, 10 | "minimal": false, 11 | "num_random_init": 0, 12 | "targeted": false 13 | }, 14 | "module": "art.attacks.evasion", 15 | "name": "FastGradientMethod", 16 | "use_label": true 17 | }, 18 | "dataset": { 19 | "batch_size": 64, 20 | "framework": "numpy", 21 | "module": "armory.data.datasets", 22 | "name": "mnist" 23 | }, 24 | "defense": null, 25 | "metric": { 26 | "means": true, 27 | "perturbation": "linf", 28 | "record_metric_per_sample": false, 29 | "task": [ 30 | "categorical_accuracy" 31 | ] 32 | }, 33 | "model": { 34 | "fit": true, 35 | "fit_kwargs": { 36 | "nb_epochs": 20 37 | }, 38 | "model_kwargs": {}, 39 | "module": "armory.baseline_models.keras.mnist", 40 | "name": "get_art_model", 41 | "weights_file": null, 42 | "wrapper_kwargs": {} 43 | }, 44 | "scenario": { 45 | "kwargs": {}, 46 | "module": "armory.scenarios.image_classification", 47 | "name": "ImageClassificationTask" 48 | }, 49 | "sysconfig": { 50 | "docker_image": "twosixarmory/armory", 51 | "external_github_repo": null, 52 | "gpus": "all", 53 | "output_dir": null, 54 | "output_filename": null, 55 | "use_gpu": false 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /scenario_configs/eval1-4/resisc45/resisc45_baseline_densenet121.json: -------------------------------------------------------------------------------- 1 | { 2 | "_description": "Resisc45 image classification, contributed by MITRE Corporation", 3 | "adhoc": null, 4 | "attack": { 5 | "knowledge": "white", 6 | "kwargs": { 7 | "batch_size": 1, 8 | "eps": 0.2, 9 | "eps_step": 0.1, 10 | "minimal": false, 11 | "num_random_init": 0, 12 | "targeted": false 13 | }, 14 | "module": "art.attacks.evasion", 15 | "name": "FastGradientMethod", 16 | "use_label": false 17 | }, 18 | "dataset": { 19 | "batch_size": 16, 20 | "framework": "numpy", 21 | "module": "armory.data.datasets", 22 | "name": "resisc45" 23 | }, 24 | "defense": null, 25 | "metric": { 26 | "means": true, 27 | "perturbation": "linf", 28 | "record_metric_per_sample": false, 29 | "task": [ 30 | "categorical_accuracy" 31 | ] 32 | }, 33 | "model": { 34 | "fit": false, 35 | "fit_kwargs": {}, 36 | "model_kwargs": {}, 37 | "module": "armory.baseline_models.keras.densenet121_resisc45", 38 | "name": "get_art_model", 39 | "weights_file": "densenet121_resisc45_v1.h5", 40 | "wrapper_kwargs": {} 41 | }, 42 | "scenario": { 43 | "kwargs": {}, 44 | "module": "armory.scenarios.image_classification", 45 | "name": "ImageClassificationTask" 46 | }, 47 | "sysconfig": { 48 | "docker_image": "twosixarmory/armory", 49 | "external_github_repo": null, 50 | "gpus": "all", 51 | "output_dir": null, 52 | "output_filename": null, 53 | "use_gpu": false 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /scenario_configs/eval1-4/resisc45/resisc45_baseline_densenet121_finetune.json: -------------------------------------------------------------------------------- 1 | { 2 | "_description": "Resisc45 image classification with pretained weights from imagenet, contributed by MITRE Corporation", 3 | "adhoc": null, 4 | "attack": { 5 | "knowledge": "white", 6 | "kwargs": { 7 | "batch_size": 1, 8 | "eps": 0.2, 9 | "eps_step": 0.1, 10 | "minimal": false, 11 | "num_random_init": 0, 12 | "targeted": false 13 | }, 14 | "module": "art.attacks.evasion", 15 | "name": "FastGradientMethod", 16 | "use_label": false 17 | }, 18 | "dataset": { 19 | "batch_size": 16, 20 | "framework": "numpy", 21 | "module": "armory.data.datasets", 22 | "name": "resisc45" 23 | }, 24 | "defense": null, 25 | "metric": { 26 | "means": true, 27 | "perturbation": "linf", 28 | "record_metric_per_sample": false, 29 | "task": [ 30 | "categorical_accuracy" 31 | ] 32 | }, 33 | "model": { 34 | "fit": false, 35 | "fit_kwargs": {}, 36 | "model_kwargs": {}, 37 | "module": "armory.baseline_models.keras.densenet121_resisc45", 38 | "name": "get_art_model", 39 | "weights_file": "densenet121_imagenet_v1.h5", 40 | "wrapper_kwargs": {} 41 | }, 42 | "scenario": { 43 | "kwargs": {}, 44 | "module": "armory.scenarios.image_classification", 45 | "name": "ImageClassificationTask" 46 | }, 47 | "sysconfig": { 48 | "docker_image": "twosixarmory/armory", 49 | "external_github_repo": null, 50 | "gpus": "all", 51 | "output_dir": null, 52 | "output_filename": null, 53 | "use_gpu": false 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /scenario_configs/eval1-4/resisc45/resisc45_baseline_densenet121_targeted.json: -------------------------------------------------------------------------------- 1 | { 2 | "_description": "Resisc45 image classification, contributed by MITRE Corporation", 3 | "adhoc": null, 4 | "attack": { 5 | "knowledge": "white", 6 | "kwargs": { 7 | "batch_size": 1, 8 | "eps": 0.5, 9 | "eps_step": 0.1, 10 | "minimal": false, 11 | "num_random_init": 0, 12 | "targeted": true 13 | }, 14 | "module": "art.attacks.evasion", 15 | "name": "FastGradientMethod", 16 | "targeted_labels": { 17 | "kwargs": { 18 | "num_classes": 45 19 | }, 20 | "module": "armory.utils.labels", 21 | "name": "RoundRobinTargeter" 22 | }, 23 | "use_label": false 24 | }, 25 | "dataset": { 26 | "batch_size": 16, 27 | "framework": "numpy", 28 | "module": "armory.data.datasets", 29 | "name": "resisc45" 30 | }, 31 | "defense": null, 32 | "metric": { 33 | "means": true, 34 | "perturbation": "linf", 35 | "record_metric_per_sample": false, 36 | "task": [ 37 | "categorical_accuracy" 38 | ] 39 | }, 40 | "model": { 41 | "fit": false, 42 | "fit_kwargs": {}, 43 | "model_kwargs": {}, 44 | "module": "armory.baseline_models.keras.densenet121_resisc45", 45 | "name": "get_art_model", 46 | "weights_file": "densenet121_resisc45_v1.h5", 47 | "wrapper_kwargs": {} 48 | }, 49 | "scenario": { 50 | "kwargs": {}, 51 | "module": "armory.scenarios.image_classification", 52 | "name": "ImageClassificationTask" 53 | }, 54 | "sysconfig": { 55 | "docker_image": "twosixarmory/armory", 56 | "external_github_repo": null, 57 | "gpus": "all", 58 | "output_dir": null, 59 | "output_filename": null, 60 | "use_gpu": false 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /scenario_configs/eval1-4/speaker_id_librispeech/librispeech_baseline_sincnet.json: -------------------------------------------------------------------------------- 1 | { 2 | "_description": "Librispeech_dev_clean raw audio classification, contributed by MITRE Corporation", 3 | "adhoc": null, 4 | "attack": { 5 | "knowledge": "white", 6 | "kwargs": { 7 | "batch_size": 1, 8 | "eps": 0.2, 9 | "eps_step": 0.1, 10 | "minimal": false, 11 | "num_random_init": 0, 12 | "targeted": false 13 | }, 14 | "module": "art.attacks.evasion", 15 | "name": "FastGradientMethod", 16 | "use_label": false 17 | }, 18 | "dataset": { 19 | "batch_size": 1, 20 | "framework": "numpy", 21 | "module": "armory.data.datasets", 22 | "name": "librispeech_dev_clean" 23 | }, 24 | "defense": null, 25 | "metric": { 26 | "means": true, 27 | "perturbation": "linf", 28 | "record_metric_per_sample": false, 29 | "task": [ 30 | "categorical_accuracy" 31 | ] 32 | }, 33 | "model": { 34 | "fit": false, 35 | "fit_kwargs": { 36 | "fit_batch_size": 16, 37 | "nb_epochs": 20000 38 | }, 39 | "model_kwargs": { 40 | "predict_mode": "all" 41 | }, 42 | "module": "armory.baseline_models.pytorch.sincnet", 43 | "name": "get_art_model", 44 | "weights_file": "sincnet_librispeech_v1.pth", 45 | "wrapper_kwargs": { 46 | "clip_values": [ 47 | -1.0, 48 | 1.0 49 | ] 50 | } 51 | }, 52 | "scenario": { 53 | "kwargs": {}, 54 | "module": "armory.scenarios.audio_classification", 55 | "name": "AudioClassificationTask" 56 | }, 57 | "sysconfig": { 58 | "docker_image": "twosixarmory/armory", 59 | "external_github_repo": "hkakitani/SincNet", 60 | "gpus": "all", 61 | "output_dir": null, 62 | "output_filename": null, 63 | "use_gpu": false 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /scenario_configs/eval1-4/ucf101/ucf101_baseline_finetune.json: -------------------------------------------------------------------------------- 1 | { 2 | "_description": "UCF101 video classification with finetuning, contributed by MITRE Corporation", 3 | "adhoc": null, 4 | "attack": { 5 | "knowledge": "white", 6 | "kwargs": { 7 | "batch_size": 1, 8 | "eps": 0.2, 9 | "eps_step": 0.1, 10 | "minimal": false, 11 | "num_random_init": 0, 12 | "targeted": false 13 | }, 14 | "module": "art.attacks.evasion", 15 | "name": "FastGradientMethod", 16 | "use_label": false 17 | }, 18 | "dataset": { 19 | "batch_size": 1, 20 | "framework": "numpy", 21 | "max_frames": 512, 22 | "module": "armory.data.datasets", 23 | "name": "ucf101" 24 | }, 25 | "defense": null, 26 | "metric": { 27 | "means": true, 28 | "perturbation": "linf", 29 | "record_metric_per_sample": false, 30 | "task": [ 31 | "categorical_accuracy", 32 | "top_5_categorical_accuracy" 33 | ] 34 | }, 35 | "model": { 36 | "fit": true, 37 | "fit_kwargs": { 38 | "fit_batch_size": 16, 39 | "nb_epochs": 10 40 | }, 41 | "model_kwargs": { 42 | "model_status": "kinetics_pretrained" 43 | }, 44 | "module": "armory.baseline_models.pytorch.ucf101_mars", 45 | "name": "get_art_model", 46 | "weights_file": "mars_kinetics_v1.pth", 47 | "wrapper_kwargs": {} 48 | }, 49 | "scenario": { 50 | "kwargs": {}, 51 | "module": "armory.scenarios.video_ucf101_scenario", 52 | "name": "Ucf101" 53 | }, 54 | "sysconfig": { 55 | "docker_image": "twosixarmory/armory", 56 | "external_github_repo": "yusong-tan/MARS", 57 | "gpus": "all", 58 | "output_dir": null, 59 | "output_filename": null, 60 | "use_gpu": false 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /scenario_configs/eval1-4/ucf101/ucf101_pretrained_flicker_undefended.json: -------------------------------------------------------------------------------- 1 | { 2 | "_description": "UCF101 video classification from pretrained, contributed by MITRE Corporation", 3 | "adhoc": null, 4 | "attack": { 5 | "knowledge": "white", 6 | "kwargs": { 7 | "beta_0": 1.0, 8 | "beta_1": 0.5, 9 | "beta_2": 0.5, 10 | "eps_step": 0.02, 11 | "loss_margin": 0.05, 12 | "max_iter": 100, 13 | "start_frame_index": 0, 14 | "targeted": false 15 | }, 16 | "module": "art.attacks.evasion.over_the_air_flickering.over_the_air_flickering_pytorch", 17 | "name": "OverTheAirFlickeringPyTorch", 18 | "use_label": true 19 | }, 20 | "dataset": { 21 | "batch_size": 1, 22 | "framework": "numpy", 23 | "max_frames": 512, 24 | "module": "armory.data.datasets", 25 | "name": "ucf101" 26 | }, 27 | "defense": null, 28 | "metric": { 29 | "means": true, 30 | "perturbation": [ 31 | "l0" 32 | ], 33 | "record_metric_per_sample": false, 34 | "task": [ 35 | "categorical_accuracy", 36 | "top_5_categorical_accuracy" 37 | ] 38 | }, 39 | "model": { 40 | "fit": false, 41 | "fit_kwargs": { 42 | "nb_epochs": 10 43 | }, 44 | "model_kwargs": { 45 | "model_status": "ucf101_trained" 46 | }, 47 | "module": "armory.baseline_models.pytorch.ucf101_mars", 48 | "name": "get_art_model", 49 | "weights_file": "mars_ucf101_v1.pth", 50 | "wrapper_kwargs": {} 51 | }, 52 | "scenario": { 53 | "kwargs": {}, 54 | "module": "armory.scenarios.video_ucf101_scenario", 55 | "name": "Ucf101" 56 | }, 57 | "sysconfig": { 58 | "docker_image": "twosixarmory/armory", 59 | "external_github_repo": "yusong-tan/MARS", 60 | "gpus": "all", 61 | "output_dir": null, 62 | "output_filename": null, 63 | "use_gpu": false 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /scenario_configs/eval1-4/xview/xview_frcnn_masked_pgd_undefended.json: -------------------------------------------------------------------------------- 1 | { 2 | "_description": "XView object detection, contributed by MITRE Corporation", 3 | "adhoc": null, 4 | "attack": { 5 | "generate_kwargs": { 6 | "patch_height": 50, 7 | "patch_width": 50 8 | }, 9 | "knowledge": "white", 10 | "kwargs": { 11 | "batch_size": 1, 12 | "eps": 1.0, 13 | "eps_step": 0.004, 14 | "max_iter": 500, 15 | "num_random_init": 0, 16 | "random_eps": false, 17 | "targeted": false, 18 | "verbose": false 19 | }, 20 | "module": "armory.art_experimental.attacks.pgd_patch", 21 | "name": "PGDPatch", 22 | "use_label": true 23 | }, 24 | "dataset": { 25 | "batch_size": 1, 26 | "framework": "numpy", 27 | "module": "armory.data.datasets", 28 | "name": "xview" 29 | }, 30 | "defense": null, 31 | "metric": { 32 | "means": true, 33 | "perturbation": "l0", 34 | "record_metric_per_sample": false, 35 | "task": [ 36 | "object_detection_AP_per_class" 37 | ] 38 | }, 39 | "model": { 40 | "fit": false, 41 | "fit_kwargs": {}, 42 | "model_kwargs": {}, 43 | "module": "armory.baseline_models.pytorch.xview_frcnn", 44 | "name": "get_art_model", 45 | "weights_file": "xview_model_state_dict_epoch_99_loss_0p67", 46 | "wrapper_kwargs": {} 47 | }, 48 | "scenario": { 49 | "kwargs": {}, 50 | "module": "armory.scenarios.object_detection", 51 | "name": "ObjectDetectionTask" 52 | }, 53 | "sysconfig": { 54 | "docker_image": "twosixarmory/armory", 55 | "external_github_repo": null, 56 | "gpus": "all", 57 | "output_dir": null, 58 | "output_filename": null, 59 | "use_gpu": false 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /scenario_configs/eval1-4/xview/xview_frcnn_robust_dpatch_undefended.json: -------------------------------------------------------------------------------- 1 | { 2 | "_description": "XView object detection, contributed by MITRE Corporation", 3 | "adhoc": null, 4 | "attack": { 5 | "knowledge": "white", 6 | "kwargs": { 7 | "batch_size": 1, 8 | "brightness_range": [ 9 | 0.8, 10 | 1.0 11 | ], 12 | "learning_rate": 0.01, 13 | "max_iter": 200, 14 | "patch_shape": [ 15 | 50, 16 | 50, 17 | 3 18 | ], 19 | "sample_size": 10, 20 | "verbose": false 21 | }, 22 | "module": "armory.art_experimental.attacks.robust_dpatch", 23 | "name": "RobustDPatch", 24 | "use_label": false 25 | }, 26 | "dataset": { 27 | "batch_size": 1, 28 | "framework": "numpy", 29 | "module": "armory.data.datasets", 30 | "name": "xview" 31 | }, 32 | "defense": null, 33 | "metric": { 34 | "means": true, 35 | "perturbation": "l0", 36 | "record_metric_per_sample": false, 37 | "task": [ 38 | "object_detection_AP_per_class" 39 | ] 40 | }, 41 | "model": { 42 | "fit": false, 43 | "fit_kwargs": {}, 44 | "model_kwargs": {}, 45 | "module": "armory.baseline_models.pytorch.xview_frcnn", 46 | "name": "get_art_model", 47 | "weights_file": "xview_model_state_dict_epoch_99_loss_0p67", 48 | "wrapper_kwargs": {} 49 | }, 50 | "scenario": { 51 | "kwargs": {}, 52 | "module": "armory.scenarios.object_detection", 53 | "name": "ObjectDetectionTask" 54 | }, 55 | "sysconfig": { 56 | "docker_image": "twosixarmory/armory", 57 | "external_github_repo": null, 58 | "gpus": "all", 59 | "output_dir": null, 60 | "output_filename": null, 61 | "use_gpu": false 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /scenario_configs/eval5/carla_video_tracking/carla_video_tracking_goturn_advtextures_undefended.json: -------------------------------------------------------------------------------- 1 | { 2 | "_description": "CARLA video tracking, contributed by MITRE Corporation", 3 | "adhoc": null, 4 | "attack": { 5 | "knowledge": "white", 6 | "kwargs": { 7 | "batch_size": 1, 8 | "max_iter": 100, 9 | "step_size": 0.02, 10 | "verbose": true 11 | }, 12 | "module": "armory.art_experimental.attacks.carla_adversarial_texture", 13 | "name": "AdversarialPhysicalTexture", 14 | "use_label": true 15 | }, 16 | "dataset": { 17 | "batch_size": 1, 18 | "eval_split": "dev", 19 | "framework": "numpy", 20 | "module": "armory.data.adversarial_datasets", 21 | "name": "carla_video_tracking_dev" 22 | }, 23 | "defense": null, 24 | "metric": { 25 | "means": true, 26 | "perturbation": "l0", 27 | "record_metric_per_sample": false, 28 | "task": [ 29 | "video_tracking_mean_iou", 30 | "video_tracking_mean_success_rate" 31 | ] 32 | }, 33 | "model": { 34 | "fit": false, 35 | "fit_kwargs": {}, 36 | "model_kwargs": {}, 37 | "module": "armory.baseline_models.pytorch.carla_goturn", 38 | "name": "get_art_model", 39 | "weights_file": "pytorch_goturn.pth.tar", 40 | "wrapper_kwargs": {} 41 | }, 42 | "scenario": { 43 | "kwargs": {}, 44 | "module": "armory.scenarios.carla_video_tracking", 45 | "name": "CarlaVideoTracking" 46 | }, 47 | "sysconfig": { 48 | "docker_image": "twosixarmory/armory", 49 | "external_github_repo": "amoudgl/pygoturn", 50 | "gpus": "all", 51 | "output_dir": null, 52 | "output_filename": null, 53 | "use_gpu": false 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /scenario_configs/eval6/asr_librispeech/hubert_untargeted_snr_pgd.json: -------------------------------------------------------------------------------- 1 | { 2 | "_description": "Baseline DeepSpeech ASR on LibriSpeech, contributed by MITRE Corporation", 3 | "adhoc": { 4 | "skip_adversarial": false 5 | }, 6 | "attack": { 7 | "knowledge": "white", 8 | "kwargs": { 9 | "batch_size": 1, 10 | "eps": 20, 11 | "eps_step": 0.5, 12 | "max_iter": 500, 13 | "norm": "snr", 14 | "num_random_init": 0, 15 | "targeted": false 16 | }, 17 | "module": "armory.art_experimental.attacks.snr_pgd", 18 | "name": "SNR_PGD_Numpy", 19 | "targeted": false, 20 | "use_label": false 21 | }, 22 | "dataset": { 23 | "batch_size": 1, 24 | "eval_split": "test_clean", 25 | "framework": "numpy", 26 | "module": "armory.data.datasets", 27 | "name": "librispeech", 28 | "train_split": "train_clean100" 29 | }, 30 | "defense": null, 31 | "metric": { 32 | "means": false, 33 | "perturbation": "linf", 34 | "record_metric_per_sample": true, 35 | "task": [ 36 | "word_error_rate" 37 | ] 38 | }, 39 | "model": { 40 | "fit": false, 41 | "fit_kwargs": { 42 | "nb_epochs": 20000 43 | }, 44 | "model_kwargs": {}, 45 | "module": "armory.baseline_models.pytorch.hubert_asr_large", 46 | "name": "get_art_model", 47 | "predict_kwargs": { 48 | "transcription_output": true 49 | }, 50 | "weights_file": null, 51 | "wrapper_kwargs": {} 52 | }, 53 | "scenario": { 54 | "kwargs": {}, 55 | "module": "armory.scenarios.audio_asr", 56 | "name": "AutomaticSpeechRecognition" 57 | }, 58 | "sysconfig": { 59 | "docker_image": "twosixarmory/armory", 60 | "external_github_repo": null, 61 | "gpus": "all", 62 | "local_repo_path": null, 63 | "output_dir": null, 64 | "output_filename": null, 65 | "use_gpu": false 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /scenario_configs/eval6/poisoning/audio_dlbd/audio_p00_undefended.json: -------------------------------------------------------------------------------- 1 | { 2 | "_description": "Speech Commands DLBD poison audio classification, undefended", 3 | "adhoc": { 4 | "compute_fairness_metrics": false, 5 | "experiment_id": 0, 6 | "explanatory_model": "speech_commands_explanatory_model", 7 | "fraction_poisoned": 0, 8 | "poison_dataset": false, 9 | "source_class": 11, 10 | "split_id": 0, 11 | "target_class": 2, 12 | "train_epochs": 20, 13 | "use_poison_filtering_defense": false 14 | }, 15 | "attack": { 16 | "knowledge": "black", 17 | "kwargs": { 18 | "backdoor_kwargs": { 19 | "backdoor_path": "clapping.wav", 20 | "duration": 1, 21 | "random": false, 22 | "sampling_rate": 16000, 23 | "scale": 0.1, 24 | "shift": 0 25 | } 26 | }, 27 | "module": "armory.art_experimental.attacks.poison_loader_audio", 28 | "name": "poison_loader_audio" 29 | }, 30 | "dataset": { 31 | "batch_size": 64, 32 | "framework": "numpy", 33 | "module": "armory.data.datasets", 34 | "name": "speech_commands", 35 | "pad_data": true 36 | }, 37 | "defense": null, 38 | "metric": null, 39 | "model": { 40 | "fit": true, 41 | "fit_kwargs": {}, 42 | "model_kwargs": {}, 43 | "module": "armory.baseline_models.tf_graph.audio_resnet50", 44 | "name": "get_art_model", 45 | "weights_file": null, 46 | "wrapper_kwargs": {} 47 | }, 48 | "scenario": { 49 | "kwargs": {}, 50 | "module": "armory.scenarios.poison", 51 | "name": "Poison" 52 | }, 53 | "sysconfig": { 54 | "docker_image": "twosixarmory/armory", 55 | "external_github_repo": null, 56 | "gpus": "all", 57 | "output_dir": null, 58 | "output_filename": null, 59 | "set_pythonhashseed": true, 60 | "use_gpu": false 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /scenario_configs/eval6/poisoning/audio_dlbd/audio_p01_undefended.json: -------------------------------------------------------------------------------- 1 | { 2 | "_description": "Speech Commands DLBD poison audio classification, undefended", 3 | "adhoc": { 4 | "compute_fairness_metrics": false, 5 | "experiment_id": 0, 6 | "explanatory_model": "speech_commands_explanatory_model", 7 | "fraction_poisoned": 0.01, 8 | "poison_dataset": true, 9 | "source_class": 11, 10 | "split_id": 0, 11 | "target_class": 2, 12 | "train_epochs": 20, 13 | "use_poison_filtering_defense": false 14 | }, 15 | "attack": { 16 | "knowledge": "black", 17 | "kwargs": { 18 | "backdoor_kwargs": { 19 | "backdoor_path": "clapping.wav", 20 | "duration": 1, 21 | "random": false, 22 | "sampling_rate": 16000, 23 | "scale": 0.1, 24 | "shift": 0 25 | } 26 | }, 27 | "module": "armory.art_experimental.attacks.poison_loader_audio", 28 | "name": "poison_loader_audio" 29 | }, 30 | "dataset": { 31 | "batch_size": 64, 32 | "framework": "numpy", 33 | "module": "armory.data.datasets", 34 | "name": "speech_commands", 35 | "pad_data": true 36 | }, 37 | "defense": null, 38 | "metric": null, 39 | "model": { 40 | "fit": true, 41 | "fit_kwargs": {}, 42 | "model_kwargs": {}, 43 | "module": "armory.baseline_models.tf_graph.audio_resnet50", 44 | "name": "get_art_model", 45 | "weights_file": null, 46 | "wrapper_kwargs": {} 47 | }, 48 | "scenario": { 49 | "kwargs": {}, 50 | "module": "armory.scenarios.poison", 51 | "name": "Poison" 52 | }, 53 | "sysconfig": { 54 | "docker_image": "twosixarmory/armory", 55 | "external_github_repo": null, 56 | "gpus": "all", 57 | "output_dir": null, 58 | "output_filename": null, 59 | "set_pythonhashseed": true, 60 | "use_gpu": false 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /scenario_configs/eval6/poisoning/audio_dlbd/audio_p05_undefended.json: -------------------------------------------------------------------------------- 1 | { 2 | "_description": "Speech Commands DLBD poison audio classification, undefended", 3 | "adhoc": { 4 | "compute_fairness_metrics": false, 5 | "experiment_id": 0, 6 | "explanatory_model": "speech_commands_explanatory_model", 7 | "fraction_poisoned": 0.05, 8 | "poison_dataset": true, 9 | "source_class": 11, 10 | "split_id": 0, 11 | "target_class": 2, 12 | "train_epochs": 20, 13 | "use_poison_filtering_defense": false 14 | }, 15 | "attack": { 16 | "knowledge": "black", 17 | "kwargs": { 18 | "backdoor_kwargs": { 19 | "backdoor_path": "clapping.wav", 20 | "duration": 1, 21 | "random": false, 22 | "sampling_rate": 16000, 23 | "scale": 0.1, 24 | "shift": 0 25 | } 26 | }, 27 | "module": "armory.art_experimental.attacks.poison_loader_audio", 28 | "name": "poison_loader_audio" 29 | }, 30 | "dataset": { 31 | "batch_size": 64, 32 | "framework": "numpy", 33 | "module": "armory.data.datasets", 34 | "name": "speech_commands", 35 | "pad_data": true 36 | }, 37 | "defense": null, 38 | "metric": null, 39 | "model": { 40 | "fit": true, 41 | "fit_kwargs": {}, 42 | "model_kwargs": {}, 43 | "module": "armory.baseline_models.tf_graph.audio_resnet50", 44 | "name": "get_art_model", 45 | "weights_file": null, 46 | "wrapper_kwargs": {} 47 | }, 48 | "scenario": { 49 | "kwargs": {}, 50 | "module": "armory.scenarios.poison", 51 | "name": "Poison" 52 | }, 53 | "sysconfig": { 54 | "docker_image": "twosixarmory/armory", 55 | "external_github_repo": null, 56 | "gpus": "all", 57 | "output_dir": null, 58 | "output_filename": null, 59 | "set_pythonhashseed": true, 60 | "use_gpu": false 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /scenario_configs/eval6/poisoning/audio_dlbd/audio_p10_undefended.json: -------------------------------------------------------------------------------- 1 | { 2 | "_description": "Speech Commands DLBD poison audio classification, undefended", 3 | "adhoc": { 4 | "compute_fairness_metrics": false, 5 | "experiment_id": 0, 6 | "explanatory_model": "speech_commands_explanatory_model", 7 | "fraction_poisoned": 0.1, 8 | "poison_dataset": true, 9 | "source_class": 11, 10 | "split_id": 0, 11 | "target_class": 2, 12 | "train_epochs": 20, 13 | "use_poison_filtering_defense": false 14 | }, 15 | "attack": { 16 | "knowledge": "black", 17 | "kwargs": { 18 | "backdoor_kwargs": { 19 | "backdoor_path": "clapping.wav", 20 | "duration": 1, 21 | "random": false, 22 | "sampling_rate": 16000, 23 | "scale": 0.1, 24 | "shift": 0 25 | } 26 | }, 27 | "module": "armory.art_experimental.attacks.poison_loader_audio", 28 | "name": "poison_loader_audio" 29 | }, 30 | "dataset": { 31 | "batch_size": 64, 32 | "framework": "numpy", 33 | "module": "armory.data.datasets", 34 | "name": "speech_commands", 35 | "pad_data": true 36 | }, 37 | "defense": null, 38 | "metric": null, 39 | "model": { 40 | "fit": true, 41 | "fit_kwargs": {}, 42 | "model_kwargs": {}, 43 | "module": "armory.baseline_models.tf_graph.audio_resnet50", 44 | "name": "get_art_model", 45 | "weights_file": null, 46 | "wrapper_kwargs": {} 47 | }, 48 | "scenario": { 49 | "kwargs": {}, 50 | "module": "armory.scenarios.poison", 51 | "name": "Poison" 52 | }, 53 | "sysconfig": { 54 | "docker_image": "twosixarmory/armory", 55 | "external_github_repo": null, 56 | "gpus": "all", 57 | "output_dir": null, 58 | "output_filename": null, 59 | "set_pythonhashseed": true, 60 | "use_gpu": false 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /scenario_configs/eval6/poisoning/audio_dlbd/audio_p20_undefended.json: -------------------------------------------------------------------------------- 1 | { 2 | "_description": "Speech Commands DLBD poison audio classification, undefended", 3 | "adhoc": { 4 | "compute_fairness_metrics": false, 5 | "experiment_id": 0, 6 | "explanatory_model": "speech_commands_explanatory_model", 7 | "fraction_poisoned": 0.2, 8 | "poison_dataset": true, 9 | "source_class": 11, 10 | "split_id": 0, 11 | "target_class": 2, 12 | "train_epochs": 20, 13 | "use_poison_filtering_defense": false 14 | }, 15 | "attack": { 16 | "knowledge": "black", 17 | "kwargs": { 18 | "backdoor_kwargs": { 19 | "backdoor_path": "clapping.wav", 20 | "duration": 1, 21 | "random": false, 22 | "sampling_rate": 16000, 23 | "scale": 0.1, 24 | "shift": 0 25 | } 26 | }, 27 | "module": "armory.art_experimental.attacks.poison_loader_audio", 28 | "name": "poison_loader_audio" 29 | }, 30 | "dataset": { 31 | "batch_size": 64, 32 | "framework": "numpy", 33 | "module": "armory.data.datasets", 34 | "name": "speech_commands", 35 | "pad_data": true 36 | }, 37 | "defense": null, 38 | "metric": null, 39 | "model": { 40 | "fit": true, 41 | "fit_kwargs": {}, 42 | "model_kwargs": {}, 43 | "module": "armory.baseline_models.tf_graph.audio_resnet50", 44 | "name": "get_art_model", 45 | "weights_file": null, 46 | "wrapper_kwargs": {} 47 | }, 48 | "scenario": { 49 | "kwargs": {}, 50 | "module": "armory.scenarios.poison", 51 | "name": "Poison" 52 | }, 53 | "sysconfig": { 54 | "docker_image": "twosixarmory/armory", 55 | "external_github_repo": null, 56 | "gpus": "all", 57 | "output_dir": null, 58 | "output_filename": null, 59 | "set_pythonhashseed": true, 60 | "use_gpu": false 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /scenario_configs/eval6/poisoning/audio_dlbd/audio_p30_undefended.json: -------------------------------------------------------------------------------- 1 | { 2 | "_description": "Speech Commands DLBD poison audio classification, undefended", 3 | "adhoc": { 4 | "compute_fairness_metrics": false, 5 | "experiment_id": 0, 6 | "explanatory_model": "speech_commands_explanatory_model", 7 | "fraction_poisoned": 0.3, 8 | "poison_dataset": true, 9 | "source_class": 11, 10 | "split_id": 0, 11 | "target_class": 2, 12 | "train_epochs": 20, 13 | "use_poison_filtering_defense": false 14 | }, 15 | "attack": { 16 | "knowledge": "black", 17 | "kwargs": { 18 | "backdoor_kwargs": { 19 | "backdoor_path": "clapping.wav", 20 | "duration": 1, 21 | "random": false, 22 | "sampling_rate": 16000, 23 | "scale": 0.1, 24 | "shift": 0 25 | } 26 | }, 27 | "module": "armory.art_experimental.attacks.poison_loader_audio", 28 | "name": "poison_loader_audio" 29 | }, 30 | "dataset": { 31 | "batch_size": 64, 32 | "framework": "numpy", 33 | "module": "armory.data.datasets", 34 | "name": "speech_commands", 35 | "pad_data": true 36 | }, 37 | "defense": null, 38 | "metric": null, 39 | "model": { 40 | "fit": true, 41 | "fit_kwargs": {}, 42 | "model_kwargs": {}, 43 | "module": "armory.baseline_models.tf_graph.audio_resnet50", 44 | "name": "get_art_model", 45 | "weights_file": null, 46 | "wrapper_kwargs": {} 47 | }, 48 | "scenario": { 49 | "kwargs": {}, 50 | "module": "armory.scenarios.poison", 51 | "name": "Poison" 52 | }, 53 | "sysconfig": { 54 | "docker_image": "twosixarmory/armory", 55 | "external_github_repo": null, 56 | "gpus": "all", 57 | "output_dir": null, 58 | "output_filename": null, 59 | "set_pythonhashseed": true, 60 | "use_gpu": false 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /scenario_configs/mnist_baseline.json: -------------------------------------------------------------------------------- 1 | eval1-4/mnist/mnist_baseline.json -------------------------------------------------------------------------------- /scenario_configs/no_docker/cifar_short.json: -------------------------------------------------------------------------------- 1 | { 2 | "_description": "Baseline cifar10 image classification", 3 | "adhoc": null, 4 | "attack": { 5 | "knowledge": "white", 6 | "kwargs": { 7 | "batch_size": 1, 8 | "eps": 0.031, 9 | "eps_step": 0.007, 10 | "max_iter": 2, 11 | "num_random_init": 1, 12 | "random_eps": false, 13 | "targeted": false, 14 | "verbose": false 15 | }, 16 | "module": "art.attacks.evasion", 17 | "name": "ProjectedGradientDescent", 18 | "use_label": true 19 | }, 20 | "dataset": { 21 | "batch_size": 5, 22 | "framework": "numpy", 23 | "index": "[:10]", 24 | "module": "armory.data.datasets", 25 | "name": "cifar10" 26 | }, 27 | "defense": null, 28 | "metric": { 29 | "means": true, 30 | "perturbation": "linf", 31 | "record_metric_per_sample": false, 32 | "task": [ 33 | "categorical_accuracy" 34 | ] 35 | }, 36 | "model": { 37 | "fit": true, 38 | "fit_kwargs": { 39 | "nb_epochs": 1 40 | }, 41 | "model_kwargs": {}, 42 | "module": "armory.baseline_models.pytorch.cifar", 43 | "name": "get_art_model", 44 | "weights_file": null, 45 | "wrapper_kwargs": {} 46 | }, 47 | "scenario": { 48 | "kwargs": {}, 49 | "module": "armory.scenarios.image_classification", 50 | "name": "ImageClassificationTask" 51 | }, 52 | "sysconfig": { 53 | "docker_image": "twosixarmory/armory", 54 | "external_github_repo": null, 55 | "gpus": "all", 56 | "output_dir": null, 57 | "output_filename": null, 58 | "use_gpu": false 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /scenario_configs/poisoning_cifar10_witches_brew.json: -------------------------------------------------------------------------------- 1 | eval5/poisoning/cifar10_witches_brew.json -------------------------------------------------------------------------------- /scenario_configs/poisoning_gtsrb_dirty_label.json: -------------------------------------------------------------------------------- 1 | eval5/poisoning/gtsrb_dlbd_baseline_pytorch.json -------------------------------------------------------------------------------- /scenario_configs/so2sat_eo_masked_pgd.json: -------------------------------------------------------------------------------- 1 | eval1-4/so2sat/so2sat_eo_masked_pgd_undefended.json -------------------------------------------------------------------------------- /scenario_configs/speaker_id_librispeech.json: -------------------------------------------------------------------------------- 1 | eval1-4/speaker_id_librispeech/librispeech_baseline_sincnet_snr_pgd.json -------------------------------------------------------------------------------- /scenario_configs/ucf101_masked_pgd.json: -------------------------------------------------------------------------------- 1 | eval1-4/ucf101/ucf101_pretrained_masked_pgd_undefended.json -------------------------------------------------------------------------------- /scenario_configs/xview_robust_dpatch.json: -------------------------------------------------------------------------------- 1 | eval1-4/xview/xview_frcnn_robust_dpatch_undefended.json -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import setuptools 3 | 4 | if __name__ == "__main__": 5 | # Needed for GitHubs dependency graph. 6 | setuptools.setup() 7 | -------------------------------------------------------------------------------- /tests/end_to_end/test_no_docker.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | 4 | import pytest 5 | 6 | from armory.logs import log 7 | 8 | # Marks all tests in this file as `end_to_end` 9 | pytestmark = pytest.mark.end_to_end 10 | 11 | 12 | @pytest.mark.parametrize( 13 | "config, args", 14 | [ 15 | ("cifar_short.json", ["--check", "--no-docker"]), 16 | ("cifar_short.json", ["--no-docker"]), 17 | ("carla_short.json", ["--check", "--no-docker"]), 18 | ("carla_short.json", ["--no-docker"]), 19 | ], 20 | ) 21 | def test_run(scenario_configs, config, args): 22 | log.info("Running Armory scenarios from cmd line") 23 | cf = os.path.join(scenario_configs, "no_docker", config) 24 | cmd = ["armory", "run", cf] + args 25 | log.info("Executing: {}".format(cmd)) 26 | result = subprocess.run(cmd) 27 | log.info("Resulting Return Code: {}".format(result.returncode)) 28 | assert result.returncode == 0 29 | 30 | 31 | @pytest.mark.parametrize("config", ["cifar_short.json", "carla_short.json"]) 32 | def test_interactive(scenario_configs, config): 33 | # log.info("Executing Config Dir: {}".format(scenario_configs)) 34 | log.info("Running Armory Scenarios interactive in `--no-docker` mode") 35 | from armory import paths 36 | from armory.scenarios.main import get as get_scenario 37 | 38 | log.info("Setting Paths to `host`") 39 | paths.set_mode("host") 40 | 41 | config = os.path.join(scenario_configs, "no_docker", config) 42 | log.info("Loading Config: {}".format(config)) 43 | s = get_scenario(config).load() 44 | log.info("Evaluating Config") 45 | s.evaluate() 46 | -------------------------------------------------------------------------------- /tests/scenarios/broken/invalid_dataset_framework.json: -------------------------------------------------------------------------------- 1 | { 2 | "_description": "", 3 | "adhoc": null, 4 | "attack": { 5 | "knowledge": "white", 6 | "kwargs": { 7 | "batch_size": 1, 8 | "eps": 0.2, 9 | "eps_step": 0.1, 10 | "minimal": false, 11 | "num_random_init": 0, 12 | "targeted": false 13 | }, 14 | "module": "art.attacks", 15 | "name": "FastGradientMethod" 16 | }, 17 | "dataset": { 18 | "batch_size": 64, 19 | "framework": "chainer", 20 | "module": "armory.data.datasets", 21 | "name": "mnist" 22 | }, 23 | "defense": null, 24 | "metric": { 25 | "means": true, 26 | "perturbation": "linf", 27 | "record_metric_per_sample": false, 28 | "task": [ 29 | "categorical_accuracy" 30 | ] 31 | }, 32 | "model": { 33 | "fit": true, 34 | "fit_kwargs": { 35 | "nb_epochs": 3 36 | }, 37 | "model_kwargs": {}, 38 | "module": "armory.baseline_models.keras.mnist", 39 | "name": "get_art_model", 40 | "weights_file": null, 41 | "wrapper_kwargs": {} 42 | }, 43 | "scenario": { 44 | "kwargs": {}, 45 | "module": "armory.scenarios.image_classification", 46 | "name": "ImageClassificationTask" 47 | }, 48 | "sysconfig": { 49 | "docker_image": "twosixarmory/armory", 50 | "external_github_repo": null, 51 | "gpus": "all", 52 | "output_dir": null, 53 | "output_filename": null, 54 | "use_gpu": false 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /tests/scenarios/broken/invalid_module.json: -------------------------------------------------------------------------------- 1 | { 2 | "_description": "Test schema with an invalid `attack.module`", 3 | "adhoc": { 4 | "batch_size": 64, 5 | "epochs": 3 6 | }, 7 | "attack": { 8 | "knowledge": "white", 9 | "kwargs": { 10 | "batch_size": 1, 11 | "eps": 0.2, 12 | "eps_step": 0.1, 13 | "minimal": false, 14 | "num_random_init": 0, 15 | "targeted": false 16 | }, 17 | "module": "art::attacks", 18 | "name": "FastGradientMethod" 19 | }, 20 | "dataset": { 21 | "batch_size": 64, 22 | "module": "armory.data.datasets", 23 | "name": "mnist" 24 | }, 25 | "defense": null, 26 | "metric": null, 27 | "model": { 28 | "fit": true, 29 | "fit_kwargs": {}, 30 | "model_kwargs": {}, 31 | "module": "armory.baseline_models.keras.keras_mnist", 32 | "name": "get_art_model", 33 | "weights_file": "", 34 | "wrapper_kwargs": {} 35 | }, 36 | "scenario": { 37 | "kwargs": {}, 38 | "module": "tests.evals.fgm_attack", 39 | "name": "fgm_attack" 40 | }, 41 | "sysconfig": { 42 | "docker_image": "twosixarmory/armory", 43 | "external_github_repo": null, 44 | "gpus": "", 45 | "output_dir": null, 46 | "output_filename": null, 47 | "use_gpu": false 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /tests/scenarios/broken/missing_scenario.json: -------------------------------------------------------------------------------- 1 | { 2 | "_description": "", 3 | "adhoc": null, 4 | "attack": { 5 | "knowledge": "white", 6 | "kwargs": { 7 | "batch_size": 1, 8 | "eps": 0.2, 9 | "eps_step": 0.1, 10 | "minimal": false, 11 | "num_random_init": 0, 12 | "targeted": false 13 | }, 14 | "module": "art.attacks", 15 | "name": "FastGradientMethod" 16 | }, 17 | "dataset": { 18 | "batch_size": 64, 19 | "module": "armory.data.datasets", 20 | "name": "mnist" 21 | }, 22 | "defense": null, 23 | "metric": { 24 | "means": true, 25 | "perturbation": "linf", 26 | "record_metric_per_sample": false, 27 | "task": [ 28 | "categorical_accuracy" 29 | ] 30 | }, 31 | "model": { 32 | "fit": true, 33 | "fit_kwargs": { 34 | "nb_epochs": 3 35 | }, 36 | "model_kwargs": {}, 37 | "module": "armory.baseline_models.keras.mnist", 38 | "name": "get_art_model", 39 | "weights_file": null, 40 | "wrapper_kwargs": {} 41 | }, 42 | "sysconfig": { 43 | "docker_image": "twosixarmory/armory", 44 | "external_github_repo": null, 45 | "gpus": "all", 46 | "output_dir": null, 47 | "output_filename": null, 48 | "use_gpu": false 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /tests/scenarios/pytorch/image_classification.json: -------------------------------------------------------------------------------- 1 | { 2 | "_description": "", 3 | "adhoc": null, 4 | "attack": { 5 | "knowledge": "white", 6 | "kwargs": { 7 | "batch_size": 1, 8 | "eps": 0.2, 9 | "eps_step": 0.1, 10 | "minimal": false, 11 | "num_random_init": 0, 12 | "targeted": false 13 | }, 14 | "module": "art.attacks.evasion", 15 | "name": "FastGradientMethod" 16 | }, 17 | "dataset": { 18 | "batch_size": 64, 19 | "framework": "numpy", 20 | "module": "armory.data.datasets", 21 | "name": "mnist" 22 | }, 23 | "defense": null, 24 | "metric": { 25 | "means": true, 26 | "perturbation": "linf", 27 | "record_metric_per_sample": false, 28 | "task": [ 29 | "categorical_accuracy" 30 | ] 31 | }, 32 | "model": { 33 | "fit": true, 34 | "fit_kwargs": { 35 | "nb_epochs": 3 36 | }, 37 | "model_kwargs": {}, 38 | "module": "armory.baseline_models.pytorch.mnist", 39 | "name": "get_art_model", 40 | "weights_file": null, 41 | "wrapper_kwargs": {} 42 | }, 43 | "scenario": { 44 | "kwargs": {}, 45 | "module": "armory.scenarios.image_classification", 46 | "name": "ImageClassificationTask" 47 | }, 48 | "sysconfig": { 49 | "docker_image": "twosixarmory/armory", 50 | "external_github_repo": null, 51 | "gpus": "all", 52 | "output_dir": null, 53 | "output_filename": null, 54 | "use_gpu": false 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /tests/scenarios/pytorch/image_classification_pretrained.json: -------------------------------------------------------------------------------- 1 | { 2 | "_description": "", 3 | "adhoc": null, 4 | "attack": { 5 | "knowledge": "white", 6 | "kwargs": { 7 | "batch_size": 1, 8 | "eps": 0.2, 9 | "eps_step": 0.1, 10 | "minimal": false, 11 | "num_random_init": 0, 12 | "targeted": false 13 | }, 14 | "module": "art.attacks.evasion", 15 | "name": "FastGradientMethod" 16 | }, 17 | "dataset": { 18 | "batch_size": 64, 19 | "framework": "numpy", 20 | "module": "armory.data.datasets", 21 | "name": "mnist" 22 | }, 23 | "defense": null, 24 | "metric": { 25 | "means": true, 26 | "perturbation": "linf", 27 | "record_metric_per_sample": false, 28 | "task": [ 29 | "categorical_accuracy" 30 | ] 31 | }, 32 | "model": { 33 | "fit": false, 34 | "fit_kwargs": {}, 35 | "model_kwargs": {}, 36 | "module": "armory.baseline_models.pytorch.mnist", 37 | "name": "get_art_model", 38 | "weights_file": "undefended_mnist_5epochs.pth", 39 | "wrapper_kwargs": {} 40 | }, 41 | "scenario": { 42 | "kwargs": {}, 43 | "module": "armory.scenarios.image_classification", 44 | "name": "ImageClassificationTask" 45 | }, 46 | "sysconfig": { 47 | "docker_image": "twosixarmory/armory", 48 | "external_github_repo": null, 49 | "gpus": "all", 50 | "output_dir": null, 51 | "output_filename": null, 52 | "use_gpu": false 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /tests/test_data/audio_sample.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/tests/test_data/audio_sample.mp3 -------------------------------------------------------------------------------- /tests/test_data/image_sample.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/tests/test_data/image_sample.png -------------------------------------------------------------------------------- /tests/test_data/video_sample.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twosixlabs/armory/1131eadc2e3863677fd3e82b1ebb9350ff661a41/tests/test_data/video_sample.mp4 -------------------------------------------------------------------------------- /tests/unit/test_cuda.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import torch 3 | 4 | # Mark all tests in this file as `unit` 5 | pytestmark = pytest.mark.unit 6 | 7 | 8 | # this test is marked xfail because I don't care if it is present or not, I just want to know 9 | # in the pytest summary this appears as X when present and lower-case x when not 10 | @pytest.mark.xfail 11 | def test_cuda_present(): 12 | assert torch.cuda.is_available() 13 | -------------------------------------------------------------------------------- /tests/unit/test_docker.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | # Mark all tests in this file as `unit` 4 | pytestmark = pytest.mark.unit 5 | 6 | 7 | @pytest.mark.docker_required 8 | def test_one(docker_client): 9 | docker_client.images.list() 10 | -------------------------------------------------------------------------------- /tests/unit/test_json_utils.py: -------------------------------------------------------------------------------- 1 | import io 2 | 3 | import numpy as np 4 | import pytest 5 | 6 | from armory.utils import json_utils 7 | 8 | pytestmark = pytest.mark.unit 9 | 10 | JSON_INPUT = {"hi": np.array([1, 2, 3, 4]), "there": np.float32(5.0), "you": [1, 2, 3]} 11 | JSON_OUTPUT = '{\n "hi": [\n 1,\n 2,\n 3,\n 4\n ],\n "there": 5.0,\n "you": [\n 1,\n 2,\n 3\n ]\n}\n' 12 | 13 | 14 | def test_dumps(inp=JSON_INPUT, out=JSON_OUTPUT): 15 | assert json_utils.dumps(inp) == out 16 | 17 | 18 | def test_dump(inp=JSON_INPUT, out=JSON_OUTPUT): 19 | f = io.StringIO() 20 | json_utils.dump(inp, f) 21 | f.seek(0) 22 | assert f.read() == out 23 | 24 | 25 | def test_size(inp=JSON_INPUT, out=JSON_OUTPUT): 26 | assert json_utils.size(inp) == len(bytes(JSON_OUTPUT, encoding="utf-8")) 27 | 28 | 29 | def test_check_size(inp=JSON_INPUT, out=JSON_OUTPUT): 30 | json_utils.check_size(inp, len(out)) 31 | with pytest.raises(ValueError): 32 | json_utils.check_size(inp, len(out) - 1) 33 | -------------------------------------------------------------------------------- /tests/unit/test_logging.py: -------------------------------------------------------------------------------- 1 | from armory.logs import log 2 | 3 | # test the loguru based logs module for proper pytest caplog behavior 4 | # there is a modified caplog fixture in tests/conftest.py 5 | 6 | 7 | def function_that_warns(): 8 | log.warning("this is a wally warning") 9 | 10 | 11 | def function_that_errors(): 12 | log.error("this is a wally error") 13 | 14 | 15 | def test_warns_function(caplog): 16 | function_that_warns() 17 | # because we known function that warns only makes one log message we know 18 | # that all records should be WARNING 19 | for record in caplog.records: 20 | assert record.levelname == "WARNING" 21 | assert "wally" in caplog.text 22 | assert "this is a wally warning" in caplog.text 23 | 24 | 25 | def test_error_function(caplog): 26 | function_that_errors() 27 | for record in caplog.records: 28 | assert record.levelname == "ERROR" 29 | assert "wally" in caplog.text 30 | -------------------------------------------------------------------------------- /tests/unit/test_models.py: -------------------------------------------------------------------------------- 1 | from importlib import import_module 2 | 3 | import pytest 4 | 5 | from armory.data.utils import maybe_download_weights_from_s3 6 | 7 | # Mark all tests in this file as `unit` 8 | pytestmark = pytest.mark.unit 9 | 10 | 11 | @pytest.mark.usefixtures("ensure_armory_dirs") 12 | @pytest.mark.online 13 | def get_armory_module_and_fn( 14 | module_name, fn_attr_name, weights_path, model_kwargs={}, wrapper_kwargs={} 15 | ): 16 | module = import_module(module_name) 17 | fn = getattr(module, fn_attr_name) 18 | if weights_path is not None: 19 | weights_path = maybe_download_weights_from_s3(weights_path) 20 | classifier = fn( 21 | model_kwargs=model_kwargs, 22 | wrapper_kwargs=wrapper_kwargs, 23 | weights_path=weights_path, 24 | ) 25 | return module, fn, classifier 26 | 27 | 28 | @pytest.mark.parametrize( 29 | "module_name, fn_attr_name, weights_path", 30 | [ 31 | ( 32 | "armory.baseline_models.pytorch.mnist", 33 | "get_art_model", 34 | None, 35 | ), 36 | ( 37 | "armory.baseline_models.pytorch.mnist", 38 | "get_art_model", 39 | "undefended_mnist_5epochs.pth", 40 | ), 41 | ( 42 | "armory.baseline_models.pytorch.cifar", 43 | "get_art_model", 44 | None, 45 | ), 46 | ( 47 | "armory.baseline_models.pytorch.micronnet_gtsrb", 48 | "get_art_model", 49 | None, 50 | ), 51 | ], 52 | ) 53 | def test_model_creation(module_name, fn_attr_name, weights_path): 54 | module, fn, classifier = get_armory_module_and_fn( 55 | module_name, fn_attr_name, weights_path 56 | ) 57 | -------------------------------------------------------------------------------- /tools/trigger-release.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # conveience script to trigger a build and release of Armory 4 | # requires the environment variable ARMORY_GITHUB_TOKEN to be set 5 | 6 | 7 | if [ -z "$ARMORY_GITHUB_TOKEN" ] ; then 8 | echo "Environment variable ARMORY_GITHUB_TOKEN is not set" 9 | exit 1 10 | fi 11 | 12 | 13 | read -e -p "Are you sure you want to trigger a Armory build and release (y/n) " choice 14 | if [[ "$choice" != [Yy]* ]] ; then 15 | echo "Operation Aborted." 16 | exit 2 17 | fi 18 | 19 | echo "Sending request to trigger build and release..." 20 | 21 | curl \ 22 | -H "Accept: application/vnd.github.everest-preview+json" \ 23 | -H "Authorization: token $ARMORY_GITHUB_TOKEN" \ 24 | --request POST \ 25 | --data '{"event_type": "build-and-release"}' \ 26 | https://api.github.com/repos/twosixlabs/armory/dispatches 27 | 28 | echo "Request sent. Check PYPI and Docker Hub for new releases." 29 | -------------------------------------------------------------------------------- /tutorials/adaptive_attacks/README.md: -------------------------------------------------------------------------------- 1 | # Overview 2 | 3 | This tutorial is designed to serve as a quick introduction to performing adaptive attacks in ARMORY. As a result, we will present simple examples to demonstrate each concept, focusing on *how* an attack would be implemented in ARMORY, rather than focusing on attack strength or sophistication. 4 | 5 | For a more comprehensive introduction to adaptive attack techniques, we recommend [On Adaptive Attacks to Adversarial Example Defenses](https://arxiv.org/pdf/2002.08347.pdf). 6 | 7 | # Tutorials 8 | 1. [Developing a Custom Attack](custom_attack.md) 9 | 2. [Patch a Gradient Function](patch_loss_gradient.md) 10 | 3. [Develop a Proxy Model](proxy_model.md) -------------------------------------------------------------------------------- /tutorials/adaptive_attacks/custom_attack.json: -------------------------------------------------------------------------------- 1 | { 2 | "_description": "Baseline cifar10 image classification", 3 | "adhoc": null, 4 | "attack": { 5 | "knowledge": "white", 6 | "kwargs": { 7 | "batch_size": 1, 8 | "eps": 0.031, 9 | "eps_step": 0.007, 10 | "max_iter": 20, 11 | "num_random_init": 1, 12 | "random_eps": false, 13 | "targeted": true, 14 | "verbose": false 15 | }, 16 | "module": "custom_attack", 17 | "name": "CustomAttack", 18 | "targeted_labels": { 19 | "kwargs": {}, 20 | "module": "armory.utils.labels", 21 | "name": "IdentityTargeter" 22 | }, 23 | "use_label": false 24 | }, 25 | "dataset": { 26 | "batch_size": 64, 27 | "framework": "numpy", 28 | "module": "armory.data.datasets", 29 | "name": "cifar10" 30 | }, 31 | "defense": null, 32 | "metric": { 33 | "means": true, 34 | "perturbation": "linf", 35 | "record_metric_per_sample": false, 36 | "task": [ 37 | "categorical_accuracy" 38 | ] 39 | }, 40 | "model": { 41 | "fit": true, 42 | "fit_kwargs": { 43 | "nb_epochs": 20 44 | }, 45 | "model_kwargs": {}, 46 | "module": "armory.baseline_models.pytorch.cifar", 47 | "name": "get_art_model", 48 | "weights_file": null, 49 | "wrapper_kwargs": {} 50 | }, 51 | "scenario": { 52 | "kwargs": {}, 53 | "module": "armory.scenarios.image_classification", 54 | "name": "ImageClassificationTask" 55 | }, 56 | "sysconfig": { 57 | "docker_image": "twosixarmory/armory", 58 | "external_github_repo": null, 59 | "gpus": "all", 60 | "local_repo_path": "twosixlabs/armory/tutorials/adaptive_attacks", 61 | "output_dir": null, 62 | "output_filename": null, 63 | "use_gpu": false 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /tutorials/adaptive_attacks/custom_attack.py: -------------------------------------------------------------------------------- 1 | from art.attacks.evasion import ProjectedGradientDescent 2 | import numpy as np 3 | 4 | 5 | class CustomAttack(ProjectedGradientDescent): 6 | def generate(self, x, y): 7 | 8 | x_adv = [] 9 | for x_sample, y_sample in zip(x, y): 10 | for target in range(10): 11 | 12 | # Do not target correct class 13 | if target == y_sample: 14 | continue 15 | 16 | # Generate sample targeting `target` class 17 | y_target = np.zeros((1, 10), dtype=np.int64) 18 | y_target[0, target] = 1 19 | x_adv_sample = super().generate( 20 | np.expand_dims(x_sample, axis=0), y_target 21 | ) 22 | 23 | # Check - does this example fool the classifier? 24 | x_adv_pred = np.argmax(self.estimator.predict(x_adv_sample)) 25 | if x_adv_pred != y_sample: 26 | break 27 | x_adv.append(x_adv_sample) 28 | 29 | x_adv = np.concatenate(x_adv, axis=0) 30 | return x_adv 31 | -------------------------------------------------------------------------------- /tutorials/adaptive_attacks/patch_loss_gradient.json: -------------------------------------------------------------------------------- 1 | { 2 | "_description": "Baseline cifar10 image classification", 3 | "adhoc": null, 4 | "attack": { 5 | "knowledge": "white", 6 | "kwargs": { 7 | "batch_size": 1, 8 | "eps": 0.031, 9 | "eps_step": 0.007, 10 | "max_iter": 20, 11 | "num_random_init": 1, 12 | "random_eps": false, 13 | "targeted": false, 14 | "verbose": false 15 | }, 16 | "module": "patch_loss_gradient", 17 | "name": "CustomAttack", 18 | "use_label": true 19 | }, 20 | "dataset": { 21 | "batch_size": 64, 22 | "framework": "numpy", 23 | "module": "armory.data.datasets", 24 | "name": "cifar10" 25 | }, 26 | "defense": null, 27 | "metric": { 28 | "means": true, 29 | "perturbation": "linf", 30 | "record_metric_per_sample": false, 31 | "task": [ 32 | "categorical_accuracy" 33 | ] 34 | }, 35 | "model": { 36 | "fit": true, 37 | "fit_kwargs": { 38 | "nb_epochs": 20 39 | }, 40 | "model_kwargs": {}, 41 | "module": "patch_loss_gradient_model", 42 | "name": "get_art_model", 43 | "weights_file": null, 44 | "wrapper_kwargs": {} 45 | }, 46 | "scenario": { 47 | "kwargs": {}, 48 | "module": "armory.scenarios.image_classification", 49 | "name": "ImageClassificationTask" 50 | }, 51 | "sysconfig": { 52 | "docker_image": "twosixarmory/armory", 53 | "external_github_repo": null, 54 | "gpus": "all", 55 | "local_repo_path": "twosixlabs/armory/tutorials/adaptive_attacks", 56 | "output_dir": null, 57 | "output_filename": null, 58 | "use_gpu": false 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /tutorials/adaptive_attacks/patch_loss_gradient.py: -------------------------------------------------------------------------------- 1 | from art.attacks.evasion import ProjectedGradientDescent 2 | from patch_loss_gradient_model import get_art_model 3 | import torch 4 | from torch.autograd import Variable 5 | from torchvision.transforms import RandomErasing 6 | 7 | from armory.utils.evaluation import patch_method 8 | 9 | DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") 10 | 11 | 12 | class CustomAttack(ProjectedGradientDescent): 13 | def __init__(self, estimator, **kwargs): 14 | 15 | # Create copy of the model (to avoid overwriting loss_gradient of original model) 16 | new_estimator = get_art_model(model_kwargs={}, wrapper_kwargs={}) 17 | new_estimator.model.load_state_dict(estimator.model.state_dict()) 18 | # OR: 19 | # import copy 20 | # new_estimator = copy.deepcopy(estimator) 21 | 22 | # Point attack to copy of model 23 | super().__init__(new_estimator, **kwargs) 24 | 25 | @patch_method(new_estimator) 26 | def loss_gradient( 27 | self, x: "torch.Tensor", y: "torch.Tensor", **kwargs 28 | ) -> "torch.Tensor": 29 | x_var = Variable(x, requires_grad=True) 30 | y_cat = torch.argmax(y) 31 | 32 | transform = RandomErasing(p=1.0, scale=(0.5, 0.5)) 33 | x_mod = torch.stack([transform(x_var[0]) for i in range(100)], dim=0) 34 | logits = self.model.net.forward(x_mod) 35 | loss = self._loss(logits, y_cat.repeat(100)) 36 | 37 | self._model.zero_grad() 38 | loss.backward() 39 | grads = x_var.grad 40 | return grads 41 | -------------------------------------------------------------------------------- /tutorials/adaptive_attacks/patch_loss_gradient_model.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | from art.estimators.classification import PyTorchClassifier 4 | import torch 5 | import torch.nn as nn 6 | from torchvision.transforms import RandomErasing 7 | 8 | from armory.baseline_models.pytorch.cifar import Net 9 | 10 | DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") 11 | 12 | 13 | class ModifiedNet(nn.Module): 14 | def __init__(self): 15 | super().__init__() 16 | self.net = Net() 17 | self.transform = RandomErasing(p=1.0, scale=(0.5, 0.5)) 18 | 19 | def forward(self, x: torch.Tensor) -> torch.Tensor: 20 | x_mod = torch.stack([self.transform(x_i) for x_i in x], dim=0) 21 | return self.net.forward(x_mod) 22 | 23 | 24 | def make_modified_model(**kwargs) -> ModifiedNet: 25 | return ModifiedNet() 26 | 27 | 28 | def get_art_model( 29 | model_kwargs: dict, wrapper_kwargs: dict, weights_path: Optional[str] = None 30 | ) -> PyTorchClassifier: 31 | model = make_modified_model(**model_kwargs) 32 | model.to(DEVICE) 33 | 34 | if weights_path: 35 | checkpoint = torch.load(weights_path, map_location=DEVICE) 36 | model.load_state_dict(checkpoint) 37 | 38 | wrapped_model = PyTorchClassifier( 39 | model, 40 | loss=nn.CrossEntropyLoss(), 41 | optimizer=torch.optim.Adam(model.parameters(), lr=0.003), 42 | input_shape=(32, 32, 3), 43 | nb_classes=10, 44 | clip_values=(0.0, 1.0), 45 | **wrapper_kwargs, 46 | ) 47 | return wrapped_model 48 | -------------------------------------------------------------------------------- /tutorials/adaptive_attacks/proxy_model.json: -------------------------------------------------------------------------------- 1 | { 2 | "_description": "Baseline cifar10 image classification", 3 | "adhoc": null, 4 | "attack": { 5 | "knowledge": "white", 6 | "kwargs": { 7 | "batch_size": 1, 8 | "eps": 0.031, 9 | "eps_step": 0.002, 10 | "max_iter": 20, 11 | "num_random_init": 1, 12 | "random_eps": false, 13 | "targeted": false, 14 | "verbose": false 15 | }, 16 | "module": "proxy_model_attack_model", 17 | "name": "CustomAttack", 18 | "use_label": true 19 | }, 20 | "dataset": { 21 | "batch_size": 64, 22 | "framework": "numpy", 23 | "module": "armory.data.datasets", 24 | "name": "cifar10" 25 | }, 26 | "defense": null, 27 | "metric": { 28 | "means": true, 29 | "perturbation": "linf", 30 | "record_metric_per_sample": false, 31 | "task": [ 32 | "categorical_accuracy" 33 | ] 34 | }, 35 | "model": { 36 | "fit": true, 37 | "fit_kwargs": { 38 | "nb_epochs": 20 39 | }, 40 | "model_kwargs": {}, 41 | "module": "proxy_model_eval_model", 42 | "name": "get_art_model", 43 | "weights_file": null, 44 | "wrapper_kwargs": {} 45 | }, 46 | "scenario": { 47 | "kwargs": {}, 48 | "module": "armory.scenarios.image_classification", 49 | "name": "ImageClassificationTask" 50 | }, 51 | "sysconfig": { 52 | "docker_image": "twosixarmory/armory", 53 | "external_github_repo": null, 54 | "gpus": "all", 55 | "local_repo_path": "twosixlabs/armory/tutorials/adaptive_attacks", 56 | "output_dir": null, 57 | "output_filename": null, 58 | "use_gpu": false 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /tutorials/adaptive_attacks/proxy_model_attack_model.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | from art.attacks.evasion import ProjectedGradientDescent 4 | from art.estimators.classification import PyTorchClassifier 5 | import torch 6 | import torch.nn as nn 7 | 8 | from armory.baseline_models.pytorch.cifar import Net 9 | 10 | DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") 11 | 12 | 13 | class ModifiedNet(nn.Module): 14 | def __init__(self): 15 | super().__init__() 16 | self.net = Net() 17 | 18 | def forward(self, x: torch.Tensor) -> torch.Tensor: 19 | return self.net.forward(x) 20 | 21 | 22 | def make_modified_model(**kwargs) -> ModifiedNet: 23 | return ModifiedNet() 24 | 25 | 26 | def get_art_model( 27 | model_kwargs: dict, wrapper_kwargs: dict, weights_path: Optional[str] = None 28 | ) -> PyTorchClassifier: 29 | model = make_modified_model(**model_kwargs) 30 | model.to(DEVICE) 31 | 32 | if weights_path: 33 | checkpoint = torch.load(weights_path, map_location=DEVICE) 34 | model.load_state_dict(checkpoint) 35 | 36 | wrapped_model = PyTorchClassifier( 37 | model, 38 | loss=nn.CrossEntropyLoss(), 39 | optimizer=torch.optim.Adam(model.parameters(), lr=0.003), 40 | input_shape=(32, 32, 3), 41 | nb_classes=10, 42 | clip_values=(0.0, 1.0), 43 | **wrapper_kwargs, 44 | ) 45 | return wrapped_model 46 | 47 | 48 | class CustomAttack(ProjectedGradientDescent): 49 | def __init__(self, estimator, **kwargs): 50 | 51 | # Create copy of the model (to avoid overwriting loss_gradient_framework of original model) 52 | new_estimator = get_art_model(model_kwargs={}, wrapper_kwargs={}) 53 | new_estimator.model.load_state_dict(estimator.model.state_dict()) 54 | 55 | # Point attack to copy of model 56 | super().__init__(new_estimator, **kwargs) 57 | -------------------------------------------------------------------------------- /tutorials/adaptive_attacks/proxy_model_eval_model.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | from art.estimators.classification import PyTorchClassifier 4 | import torch 5 | import torch.nn as nn 6 | 7 | from armory.baseline_models.pytorch.cifar import Net 8 | 9 | DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") 10 | 11 | 12 | class ModifiedNet(nn.Module): 13 | def __init__(self): 14 | super().__init__() 15 | self.net = Net() 16 | 17 | def forward(self, x: torch.Tensor) -> torch.Tensor: 18 | if self.training: 19 | return self.net.forward(x) 20 | else: 21 | x_mod = torch.round(x * 256.0) / 256.0 22 | return self.net.forward(x_mod) 23 | 24 | 25 | def make_modified_model(**kwargs) -> ModifiedNet: 26 | return ModifiedNet() 27 | 28 | 29 | def get_art_model( 30 | model_kwargs: dict, wrapper_kwargs: dict, weights_path: Optional[str] = None 31 | ) -> PyTorchClassifier: 32 | model = make_modified_model(**model_kwargs) 33 | model.to(DEVICE) 34 | 35 | if weights_path: 36 | checkpoint = torch.load(weights_path, map_location=DEVICE) 37 | model.load_state_dict(checkpoint) 38 | 39 | wrapped_model = PyTorchClassifier( 40 | model, 41 | loss=nn.CrossEntropyLoss(), 42 | optimizer=torch.optim.Adam(model.parameters(), lr=0.003), 43 | input_shape=(32, 32, 3), 44 | nb_classes=10, 45 | clip_values=(0.0, 1.0), 46 | **wrapper_kwargs, 47 | ) 48 | return wrapped_model 49 | --------------------------------------------------------------------------------