├── .Rbuildignore ├── .azure-pipelines ├── code-quality.yml ├── docker │ └── Dockerfile ├── function-tests.yml ├── scripts │ ├── check_code_style.R │ ├── validate_copyright_header.R │ └── validate_samples.R ├── update-docs.yml └── validate-samples.yml ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md └── workflows │ └── stale.yml ├── .gitignore ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── DESCRIPTION ├── LICENSE ├── LICENSE.md ├── NAMESPACE ├── NEWS.md ├── R ├── compute.R ├── datasets.R ├── datastore.R ├── do_azureml_parallel.R ├── environment.R ├── estimator.R ├── experiment.R ├── hyperdrive.R ├── install.R ├── keyvault.R ├── model.R ├── modules.R ├── package.R ├── run.R ├── webservice-aci.R ├── webservice-aks.R ├── webservice-local.R ├── webservice.R └── workspace.R ├── README.md ├── _pkgdown.yml ├── azureml-sdk-for-r.Rproj ├── codecov.yml ├── cran-comments.md ├── dev_instruction.md ├── docs ├── 404.html ├── CODE_OF_CONDUCT.html ├── CONTRIBUTING.html ├── LICENSE-text.html ├── LICENSE.html ├── articles │ ├── building-custom-docker-images.html │ ├── configuration.html │ ├── deploy-to-aks.html │ ├── deploying-models.html │ ├── experiments-deep-dive.html │ ├── hyperparameter-tune-with-keras.html │ ├── index.html │ ├── installation.html │ ├── train-and-deploy-first-model.html │ ├── train-with-tensorflow.html │ └── troubleshooting.html ├── authors.html ├── dev_instruction.html ├── docsearch.css ├── docsearch.js ├── index.html ├── link.svg ├── news │ └── index.html ├── pkgdown.css ├── pkgdown.js ├── pkgdown.yml └── reference │ ├── aci_webservice_deployment_config.html │ ├── aks_webservice_deployment_config.html │ ├── attach_aks_compute.html │ ├── azureml.html │ ├── bandit_policy.html │ ├── bayesian_parameter_sampling.html │ ├── cancel_run.html │ ├── choice.html │ ├── complete_run.html │ ├── container_registry.html │ ├── convert_to_dataset_with_csv_files.html │ ├── convert_to_dataset_with_parquet_files.html │ ├── cran_package.html │ ├── create_aks_compute.html │ ├── create_aml_compute.html │ ├── create_child_run.html │ ├── create_child_runs.html │ ├── create_file_dataset_from_files.html │ ├── create_tabular_dataset_from_delimited_files.html │ ├── create_tabular_dataset_from_json_lines_files.html │ ├── create_tabular_dataset_from_parquet_files.html │ ├── create_tabular_dataset_from_sql_query.html │ ├── create_workspace.html │ ├── data_path.html │ ├── data_type_bool.html │ ├── data_type_datetime.html │ ├── data_type_double.html │ ├── data_type_long.html │ ├── data_type_string.html │ ├── dataset_consumption_config.html │ ├── define_timestamp_columns_for_dataset.html │ ├── delete_compute.html │ ├── delete_local_webservice.html │ ├── delete_model.html │ ├── delete_secrets.html │ ├── delete_webservice.html │ ├── delete_workspace.html │ ├── deploy_model.html │ ├── detach_aks_compute.html │ ├── download_file_from_run.html │ ├── download_files_from_run.html │ ├── download_from_datastore.html │ ├── download_from_file_dataset.html │ ├── download_model.html │ ├── drop_columns_from_dataset.html │ ├── estimator.html │ ├── experiment.html │ ├── filter_dataset_after_time.html │ ├── filter_dataset_before_time.html │ ├── filter_dataset_between_time.html │ ├── filter_dataset_from_recent_time.html │ ├── generate_entry_script.html │ ├── generate_new_webservice_key.html │ ├── get_aks_compute_credentials.html │ ├── get_best_run_by_primary_metric.html │ ├── get_child_run_hyperparameters.html │ ├── get_child_run_metrics.html │ ├── get_child_runs.html │ ├── get_child_runs_sorted_by_primary_metric.html │ ├── get_compute.html │ ├── get_current_run.html │ ├── get_dataset_by_id.html │ ├── get_dataset_by_name.html │ ├── get_datastore.html │ ├── get_default_datastore.html │ ├── get_default_keyvault.html │ ├── get_environment.html │ ├── get_file_dataset_paths.html │ ├── get_input_dataset_from_run.html │ ├── get_model.html │ ├── get_model_package_container_registry.html │ ├── get_model_package_creation_logs.html │ ├── get_run.html │ ├── get_run_details.html │ ├── get_run_details_with_logs.html │ ├── get_run_file_names.html │ ├── get_run_metrics.html │ ├── get_runs_in_experiment.html │ ├── get_secrets.html │ ├── get_secrets_from_run.html │ ├── get_webservice.html │ ├── get_webservice_keys.html │ ├── get_webservice_logs.html │ ├── get_webservice_token.html │ ├── get_workspace.html │ ├── get_workspace_details.html │ ├── github_package.html │ ├── grid_parameter_sampling.html │ ├── hyperdrive_config.html │ ├── index.html │ ├── inference_config.html │ ├── install_azureml.html │ ├── interactive_login_authentication.html │ ├── invoke_webservice.html │ ├── keep_columns_from_dataset.html │ ├── list_nodes_in_aml_compute.html │ ├── list_secrets.html │ ├── list_supported_vm_sizes.html │ ├── list_workspaces.html │ ├── load_dataset_into_data_frame.html │ ├── load_workspace_from_config.html │ ├── local_webservice_deployment_config.html │ ├── log_accuracy_table_to_run.html │ ├── log_confusion_matrix_to_run.html │ ├── log_image_to_run.html │ ├── log_list_to_run.html │ ├── log_metric_to_run.html │ ├── log_predictions_to_run.html │ ├── log_residuals_to_run.html │ ├── log_row_to_run.html │ ├── log_table_to_run.html │ ├── lognormal.html │ ├── loguniform.html │ ├── median_stopping_policy.html │ ├── merge_results.html │ ├── mount_file_dataset.html │ ├── normal.html │ ├── package_model.html │ ├── plot_run_details.html │ ├── primary_metric_goal.html │ ├── promote_headers_behavior.html │ ├── pull_model_package_image.html │ ├── qlognormal.html │ ├── qloguniform.html │ ├── qnormal.html │ ├── quniform.html │ ├── r_environment.html │ ├── randint.html │ ├── random_parameter_sampling.html │ ├── random_split_dataset.html │ ├── register_azure_blob_container_datastore.html │ ├── register_azure_data_lake_gen2_datastore.html │ ├── register_azure_file_share_datastore.html │ ├── register_azure_postgre_sql_datastore.html │ ├── register_azure_sql_database_datastore.html │ ├── register_dataset.html │ ├── register_do_azureml_parallel.html │ ├── register_environment.html │ ├── register_model.html │ ├── register_model_from_run.html │ ├── reload_local_webservice_assets.html │ ├── resource_configuration.html │ ├── save_model_package_files.html │ ├── service_principal_authentication.html │ ├── set_default_datastore.html │ ├── set_secrets.html │ ├── skip_from_dataset.html │ ├── split_tasks.html │ ├── start_logging_run.html │ ├── submit_child_run.html │ ├── submit_experiment.html │ ├── take_from_dataset.html │ ├── take_sample_from_dataset.html │ ├── truncation_selection_policy.html │ ├── uniform.html │ ├── unregister_all_dataset_versions.html │ ├── unregister_datastore.html │ ├── update_aci_webservice.html │ ├── update_aks_webservice.html │ ├── update_aml_compute.html │ ├── update_local_webservice.html │ ├── upload_files_to_datastore.html │ ├── upload_files_to_run.html │ ├── upload_folder_to_run.html │ ├── upload_to_datastore.html │ ├── wait_for_deployment.html │ ├── wait_for_model_package_creation.html │ ├── wait_for_provisioning_completion.html │ ├── wait_for_run_completion.html │ └── write_workspace_config.html ├── inst └── widget │ └── app.R ├── man ├── aci_webservice_deployment_config.Rd ├── aks_webservice_deployment_config.Rd ├── attach_aks_compute.Rd ├── azureml.Rd ├── bandit_policy.Rd ├── bayesian_parameter_sampling.Rd ├── cancel_run.Rd ├── choice.Rd ├── complete_run.Rd ├── container_registry.Rd ├── convert_to_dataset_with_csv_files.Rd ├── convert_to_dataset_with_parquet_files.Rd ├── cran_package.Rd ├── create_aks_compute.Rd ├── create_aml_compute.Rd ├── create_child_run.Rd ├── create_child_runs.Rd ├── create_file_dataset_from_files.Rd ├── create_tabular_dataset_from_delimited_files.Rd ├── create_tabular_dataset_from_json_lines_files.Rd ├── create_tabular_dataset_from_parquet_files.Rd ├── create_tabular_dataset_from_sql_query.Rd ├── create_workspace.Rd ├── data_path.Rd ├── data_type_bool.Rd ├── data_type_datetime.Rd ├── data_type_double.Rd ├── data_type_long.Rd ├── data_type_string.Rd ├── dataset_consumption_config.Rd ├── define_timestamp_columns_for_dataset.Rd ├── delete_compute.Rd ├── delete_local_webservice.Rd ├── delete_model.Rd ├── delete_secrets.Rd ├── delete_webservice.Rd ├── delete_workspace.Rd ├── deploy_model.Rd ├── detach_aks_compute.Rd ├── download_file_from_run.Rd ├── download_files_from_run.Rd ├── download_from_datastore.Rd ├── download_from_file_dataset.Rd ├── download_model.Rd ├── drop_columns_from_dataset.Rd ├── estimator.Rd ├── experiment.Rd ├── filter_dataset_after_time.Rd ├── filter_dataset_before_time.Rd ├── filter_dataset_between_time.Rd ├── filter_dataset_from_recent_time.Rd ├── generate_entry_script.Rd ├── generate_new_webservice_key.Rd ├── get_aks_compute_credentials.Rd ├── get_best_run_by_primary_metric.Rd ├── get_child_run_hyperparameters.Rd ├── get_child_run_metrics.Rd ├── get_child_runs.Rd ├── get_child_runs_sorted_by_primary_metric.Rd ├── get_compute.Rd ├── get_current_run.Rd ├── get_dataset_by_id.Rd ├── get_dataset_by_name.Rd ├── get_datastore.Rd ├── get_default_datastore.Rd ├── get_default_keyvault.Rd ├── get_environment.Rd ├── get_file_dataset_paths.Rd ├── get_input_dataset_from_run.Rd ├── get_model.Rd ├── get_model_package_container_registry.Rd ├── get_model_package_creation_logs.Rd ├── get_run.Rd ├── get_run_details.Rd ├── get_run_details_with_logs.Rd ├── get_run_file_names.Rd ├── get_run_metrics.Rd ├── get_runs_in_experiment.Rd ├── get_secrets.Rd ├── get_secrets_from_run.Rd ├── get_webservice.Rd ├── get_webservice_keys.Rd ├── get_webservice_logs.Rd ├── get_webservice_token.Rd ├── get_workspace.Rd ├── get_workspace_details.Rd ├── github_package.Rd ├── grid_parameter_sampling.Rd ├── hyperdrive_config.Rd ├── inference_config.Rd ├── install_azureml.Rd ├── interactive_login_authentication.Rd ├── invoke_webservice.Rd ├── keep_columns_from_dataset.Rd ├── list_nodes_in_aml_compute.Rd ├── list_secrets.Rd ├── list_supported_vm_sizes.Rd ├── list_workspaces.Rd ├── load_dataset_into_data_frame.Rd ├── load_workspace_from_config.Rd ├── local_webservice_deployment_config.Rd ├── log_accuracy_table_to_run.Rd ├── log_confusion_matrix_to_run.Rd ├── log_image_to_run.Rd ├── log_list_to_run.Rd ├── log_metric_to_run.Rd ├── log_predictions_to_run.Rd ├── log_residuals_to_run.Rd ├── log_row_to_run.Rd ├── log_table_to_run.Rd ├── lognormal.Rd ├── loguniform.Rd ├── median_stopping_policy.Rd ├── merge_results.Rd ├── mount_file_dataset.Rd ├── normal.Rd ├── package_model.Rd ├── plot_run_details.Rd ├── primary_metric_goal.Rd ├── promote_headers_behavior.Rd ├── pull_model_package_image.Rd ├── qlognormal.Rd ├── qloguniform.Rd ├── qnormal.Rd ├── quniform.Rd ├── r_environment.Rd ├── randint.Rd ├── random_parameter_sampling.Rd ├── random_split_dataset.Rd ├── register_azure_blob_container_datastore.Rd ├── register_azure_data_lake_gen2_datastore.Rd ├── register_azure_file_share_datastore.Rd ├── register_azure_postgre_sql_datastore.Rd ├── register_azure_sql_database_datastore.Rd ├── register_dataset.Rd ├── register_do_azureml_parallel.Rd ├── register_environment.Rd ├── register_model.Rd ├── register_model_from_run.Rd ├── reload_local_webservice_assets.Rd ├── resource_configuration.Rd ├── save_model_package_files.Rd ├── service_principal_authentication.Rd ├── set_default_datastore.Rd ├── set_secrets.Rd ├── skip_from_dataset.Rd ├── split_tasks.Rd ├── start_logging_run.Rd ├── submit_child_run.Rd ├── submit_experiment.Rd ├── take_from_dataset.Rd ├── take_sample_from_dataset.Rd ├── truncation_selection_policy.Rd ├── uniform.Rd ├── unregister_all_dataset_versions.Rd ├── unregister_datastore.Rd ├── update_aci_webservice.Rd ├── update_aks_webservice.Rd ├── update_aml_compute.Rd ├── update_local_webservice.Rd ├── upload_files_to_datastore.Rd ├── upload_files_to_run.Rd ├── upload_folder_to_run.Rd ├── upload_to_datastore.Rd ├── wait_for_deployment.Rd ├── wait_for_model_package_creation.Rd ├── wait_for_provisioning_completion.Rd ├── wait_for_run_completion.Rd └── write_workspace_config.Rd ├── misc └── r-packages-docker.md ├── samples ├── README.md ├── deployment │ ├── deploy-to-aci │ │ ├── deploy-to-aci.R │ │ ├── model.rds │ │ └── score.R │ └── deploy-to-local │ │ ├── deploy-to-local.R │ │ ├── model.rds │ │ ├── score.R │ │ └── score_new.R ├── foreach │ ├── README.md │ └── batch_inferencing │ │ ├── batch_inferencing.R │ │ ├── iris.csv │ │ └── model.rds └── training │ ├── train-on-amlcompute │ ├── iris.csv │ ├── train-on-amlcompute.R │ └── train.R │ └── train-on-local │ ├── iris.csv │ ├── train-on-local.R │ └── train.R ├── tests ├── testthat.R └── testthat │ ├── dummy_data.txt │ ├── dummy_score.R │ ├── helper-resources.R │ ├── iris.csv │ ├── teardown-resources.R │ ├── test_compute.R │ ├── test_datasets.R │ ├── test_datastore.R │ ├── test_environment.R │ ├── test_estimator.R │ ├── test_experiment.R │ ├── test_hyperdrive.R │ ├── test_keyvault.R │ ├── test_model.R │ ├── test_webservice.R │ ├── test_workspace.R │ ├── train_dummy.R │ ├── train_hyperdrive_dummy.R │ └── utils.R └── vignettes ├── README.md ├── building-custom-docker-images.Rmd ├── configuration.Rmd ├── deploy-to-aks.Rmd ├── deploy-to-aks ├── model.rds └── score.R ├── deploying-models.Rmd ├── experiments-deep-dive.Rmd ├── experiments-deep-dive ├── accident-app │ └── app.R ├── accident-glm.R ├── accident-glmnet.R ├── accident-knn.R └── accident_predict_caret.R ├── hyperparameter-tune-with-keras.Rmd ├── hyperparameter-tune-with-keras ├── Dockerfile └── cifar10_cnn.R ├── installation.Rmd ├── train-and-deploy-first-model.Rmd ├── train-and-deploy-first-model ├── accident_predict.R ├── accidents.R └── nassCDS.csv ├── train-with-tensorflow.Rmd ├── train-with-tensorflow ├── Dockerfile └── tf_mnist.R └── troubleshooting.Rmd /.Rbuildignore: -------------------------------------------------------------------------------- 1 | ^.*\.Rproj$ 2 | ^\.Rproj\.user$ 3 | ^\.azure-pipelines$ 4 | ^\.github$ 5 | ^cran-comments\.md$ 6 | ^CRAN-RELEASE$ 7 | ^\.vs$ 8 | ^\.azureml$ 9 | ^LICENSE\.md$ 10 | ^NEWS\.md$ 11 | ^CONTRIBUTING\.md$ 12 | ^dev_instruction\.md$ 13 | ^docs$ 14 | ^samples$ 15 | ^CODE_OF_CONDUCT\.md$ 16 | ^_pkgdown\.yml 17 | ^misc$ 18 | ^_pkgdown\.yml$ 19 | ^pkgdown$ 20 | -------------------------------------------------------------------------------- /.azure-pipelines/code-quality.yml: -------------------------------------------------------------------------------- 1 | trigger: 2 | - master 3 | 4 | pool: 5 | vmImage: 'ubuntu-18.04' 6 | 7 | container: ninhu/r-sdk-build:latest 8 | 9 | steps: 10 | - task: Bash@3 11 | inputs: 12 | targetType: 'inline' 13 | script: | 14 | Rscript './.azure-pipelines/scripts/validate_copyright_header.R' 'R' 15 | displayName: 'Validate copyright header' 16 | 17 | - task: Bash@3 18 | inputs: 19 | targetType: 'inline' 20 | script: | 21 | pip install --user azureml-sdk 22 | R -e 'dir.create(Sys.getenv("R_LIBS_USER"), recursive = TRUE); 23 | .libPaths(Sys.getenv("R_LIBS_USER")); 24 | # build and install R sdk 25 | devtools::install_deps(upgrade = FALSE); 26 | package_location <- devtools::build(); 27 | install.packages(package_location, repos = NULL)' 28 | displayName: 'Build and Install SDK' 29 | 30 | - task: Bash@3 31 | inputs: 32 | targetType: 'inline' 33 | script: | 34 | Rscript './.azure-pipelines/scripts/check_code_style.R' 'R' 35 | displayName: 'Check code style' 36 | 37 | - task: Bash@3 38 | inputs: 39 | targetType: 'inline' 40 | script: | 41 | R -e 'library("rcmdcheck"); 42 | check_results <- rcmdcheck(".", args = c("--no-manual", "--no-tests")); 43 | stopifnot(length(check_results$errors) == 0); 44 | stopifnot(length(check_results$warnings) == 0); 45 | stopifnot(length(check_results$notes) == 0)' 46 | displayName: 'Check package build' 47 | 48 | -------------------------------------------------------------------------------- /.azure-pipelines/docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:16.04 2 | 3 | USER root:root 4 | ENV LANG=C.UTF-8 LC_ALL=C.UTF-8 5 | ENV DEBIAN_FRONTEND noninteractive 6 | 7 | # Miniconda 8 | ENV MINICONDA_VERSION 4.5.11 9 | ENV PATH /opt/miniconda/bin:$PATH 10 | RUN apt-get update && apt-get install -y bzip2 wget git libxrender1 && \ 11 | wget -qO /tmp/miniconda.sh https://repo.continuum.io/miniconda/Miniconda3-${MINICONDA_VERSION}-Linux-x86_64.sh && \ 12 | bash /tmp/miniconda.sh -bf -p /opt/miniconda && \ 13 | conda clean -ay && \ 14 | rm -rf /opt/miniconda/pkgs && \ 15 | rm /tmp/miniconda.sh && \ 16 | find / -type d -name __pycache__ | xargs rm -rf 17 | 18 | RUN conda install -c r -y r-essentials r-devtools r-testthat r-reticulate && conda clean -ay && \ 19 | pip install azureml-defaults azureml-dataprep[pandas,fuse] 20 | 21 | # Azure CLI 22 | RUN curl -sL https://aka.ms/InstallAzureCLIDeb | bash 23 | 24 | # Install R packages from CRAN 25 | RUN R -e 'install.packages(c("roxygen2", "pkgdown", "rcmdcheck", "fs", "lintr"), repos = "https://cloud.r-project.org/")' -------------------------------------------------------------------------------- /.azure-pipelines/function-tests.yml: -------------------------------------------------------------------------------- 1 | trigger: 2 | - master 3 | 4 | pool: 5 | vmImage: 'ubuntu-18.04' 6 | 7 | container: ninhu/r-sdk-build:latest 8 | 9 | variables: 10 | ${{ if in( variables['Build.Reason'], 'Manual', 'IndividualCI') }}: 11 | TEST_SUBSCRIPTION_ID: $(CI_SUBSCRIPTION_ID) 12 | 13 | steps: 14 | - task: Bash@3 15 | inputs: 16 | targetType: 'inline' 17 | script: | 18 | pip install --user azureml-sdk 19 | R -e 'dir.create(Sys.getenv("R_LIBS_USER"), recursive = TRUE); 20 | .libPaths(Sys.getenv("R_LIBS_USER")); 21 | # build and install R sdk 22 | devtools::install_deps(upgrade = FALSE); 23 | package_location <- devtools::build(); 24 | install.packages(package_location, repos = NULL)' 25 | displayName: 'Build and Install SDK' 26 | - task: AzureCLI@1 27 | inputs: 28 | azureSubscription: 'Project Vienna Build Tests (4faaaf21-663f-4391-96fd-47197c630979)' 29 | scriptLocation: inlineScript 30 | inlineScript: | 31 | python -c "from azureml._base_sdk_common.common import perform_interactive_login; perform_interactive_login(username='$servicePrincipalId', password='$servicePrincipalKey', service_principal=True, tenant='$tenantId')" 32 | R -e '# needed to load all non exported packages for testing 33 | devtools::load_all(); 34 | options(testthat.output_file = "TEST-ALL.xml"); 35 | testthat::test_dir("tests/testthat", reporter = "junit")' 36 | addSpnToEnvironment: true 37 | displayName: 'Run R SDK Tests' 38 | 39 | - task: PublishTestResults@2 40 | inputs: 41 | testResultsFormat: 'JUnit' 42 | testResultsFiles: '**/TEST-*.xml' 43 | failTaskOnFailedTests: true 44 | -------------------------------------------------------------------------------- /.azure-pipelines/scripts/check_code_style.R: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env 2 | args <- commandArgs(trailingOnly = TRUE) 3 | if (length(args) == 0) { 4 | stop("Please provide the directory path", call.=FALSE) 5 | } 6 | 7 | library("lintr") 8 | 9 | check_code_style <- function(args) { 10 | skip_tests = c("package.R") 11 | if (length(args) > 1) { 12 | skip_tests <- append(skip_tests, unlist(strsplit(args[2], ";"))) 13 | } 14 | 15 | directory = args[1] 16 | files <- list.files(directory) 17 | 18 | for (filename in files) { 19 | if (filename %in% skip_tests) { 20 | next 21 | } 22 | 23 | file <- file.path(".", "R", filename) 24 | 25 | style_issues <- lintr::lint(file, linters = with_defaults( 26 | line_length_linter = line_length_linter(240L), 27 | object_length_linter = object_length_linter(50L) 28 | ) 29 | ) 30 | 31 | if (length(style_issues) != 0) { 32 | print(file) 33 | print(style_issues) 34 | stop("Code quality failed.") 35 | } 36 | } 37 | } 38 | 39 | check_code_style(args) 40 | -------------------------------------------------------------------------------- /.azure-pipelines/scripts/validate_copyright_header.R: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env 2 | args <- commandArgs(trailingOnly = TRUE) 3 | if (length(args) == 0) { 4 | stop("Please provide the directory path", call.=FALSE) 5 | } 6 | 7 | validate_copyright_header <- function(directory) { 8 | copyright_header <- c("# Copyright(c) Microsoft Corporation.", 9 | "# Licensed under the MIT license.") 10 | files <- list.files(directory) 11 | for (filename in files) { 12 | file <- file.path(".", "R", filename) 13 | file_handle <- file(file, open="r") 14 | lines <- readLines(file_handle) 15 | 16 | assertthat::assert_that(length(lines) >= length(copyright_header)) 17 | 18 | for (i in 1:length(copyright_header)) { 19 | assertthat::assert_that(lines[[i]] == copyright_header[[i]]) 20 | } 21 | } 22 | } 23 | 24 | 25 | validate_copyright_header(directory = args[1]) 26 | -------------------------------------------------------------------------------- /.azure-pipelines/scripts/validate_samples.R: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env 2 | args <- commandArgs(trailingOnly = TRUE) 3 | if (length(args) == 0) { 4 | stop("Please provide the Samples directory path", call.=FALSE) 5 | } 6 | 7 | library(azuremlsdk) 8 | 9 | subscription_id <- Sys.getenv("TEST_SUBSCRIPTION_ID", unset = NA) 10 | resource_group <- Sys.getenv("TEST_RESOURCE_GROUP") 11 | workspace_name <- Sys.getenv("TEST_WORKSPACE_NAME") 12 | cluster_name <- Sys.getenv("TEST_CLUSTER_NAME") 13 | 14 | root_dir <- getwd() 15 | 16 | getPathLeaves <- function(path){ 17 | children <- list.dirs(path, recursive = FALSE) 18 | if(length(children) == 0) 19 | return(path) 20 | ret <- list() 21 | for(child in children){ 22 | ret[[length(ret)+1]] <- getPathLeaves(child) 23 | } 24 | return(unlist(ret)) 25 | } 26 | 27 | validate_samples <- function(args) { 28 | directory = args[1] 29 | sample_dirs = getPathLeaves(directory) 30 | 31 | skip_tests = c() 32 | 33 | if (length(args) > 1) { 34 | skip_tests = unlist(strsplit(args[2], ";")) 35 | } 36 | 37 | for (sub_dir in sample_dirs) { 38 | if (basename(sub_dir) %in% skip_tests) { 39 | next 40 | } 41 | 42 | entry_script <- paste0(basename(sub_dir), ".R") 43 | setwd(sub_dir) 44 | 45 | tryCatch({ 46 | source(entry_script) 47 | setwd(root_dir) 48 | }, 49 | error = function(e) { 50 | stop(entry_script, "\n", message(e)) 51 | }) 52 | } 53 | } 54 | 55 | if(!is.na(subscription_id)) { 56 | ws <- get_workspace(name = workspace_name, 57 | subscription_id = subscription_id, 58 | resource_group = resource_group) 59 | write_workspace_config(ws, path = root_dir) 60 | validate_samples(args) 61 | } 62 | -------------------------------------------------------------------------------- /.azure-pipelines/update-docs.yml: -------------------------------------------------------------------------------- 1 | trigger: 2 | batch: true 3 | branches: 4 | include: 5 | - master 6 | 7 | # no PR builds 8 | pr: none 9 | 10 | pool: 11 | vmImage: 'ubuntu-18.04' 12 | 13 | container: ninhu/r-sdk-build:latest 14 | 15 | steps: 16 | - task: Bash@3 17 | inputs: 18 | targetType: 'inline' 19 | script: | 20 | git config --global user.email "$(GITHUB_USER_EMAIL)" 21 | git config --global user.name "$(GITHUB_USER_NAME)" 22 | branch_ref=$(Build.SourceBranch) 23 | echo BranchRef=$branch_ref 24 | branch_name="${branch_ref/"refs/heads/"}" 25 | echo BranchName=$branch_name 26 | echo GIT CHECKOUT 27 | git checkout $branch_name 28 | echo GIT STATUS 29 | git status 30 | echo UPDATE DOCS 31 | R -e 'dir.create(Sys.getenv("R_LIBS_USER"), recursive = TRUE); 32 | .libPaths(Sys.getenv("R_LIBS_USER")); 33 | devtools::install_deps(upgrade = FALSE); 34 | unlink("docs", recursive=TRUE, force=TRUE); 35 | pkgdown::build_site();' 36 | retVal=$? 37 | if [ $retVal -ne 0 ]; then 38 | echo "Failed to generate pkgdown docs!! Exiting..." 39 | exit $retVal 40 | fi 41 | echo GIT ADD 42 | git add docs/* 43 | echo GIT COMMIT 44 | git commit -m "Update R SDK docs via Build $(Build.BuildNumber) [skip ci]" 45 | echo GIT STATUS 46 | git status 47 | echo GIT PUSH 48 | git push https://$(GITHUB_AUTH_TOKEN)@github.com/Azure/azureml-sdk-for-r.git 49 | echo GIT STATUS 50 | git status 51 | displayName: 'Update Docs' 52 | -------------------------------------------------------------------------------- /.azure-pipelines/validate-samples.yml: -------------------------------------------------------------------------------- 1 | trigger: 2 | - master 3 | 4 | pool: 5 | vmImage: 'ubuntu-18.04' 6 | 7 | container: ninhu/r-sdk-build:latest 8 | 9 | variables: 10 | ${{ if in( variables['Build.Reason'], 'Schedule', 'Manual') }}: 11 | TEST_SUBSCRIPTION_ID: $(CI_SUBSCRIPTION_ID) 12 | 13 | steps: 14 | - task: Bash@3 15 | inputs: 16 | targetType: 'inline' 17 | script: | 18 | pip install --user azureml-sdk 19 | R -e 'dir.create(Sys.getenv("R_LIBS_USER"), recursive = TRUE); 20 | .libPaths(Sys.getenv("R_LIBS_USER")); 21 | # build and install R sdk 22 | devtools::install_deps(upgrade = FALSE); 23 | package_location <- devtools::build(); 24 | install.packages(package_location, repos = NULL)' 25 | displayName: 'Build and Install SDK' 26 | 27 | - task: AzureCLI@1 28 | inputs: 29 | azureSubscription: 'Project Vienna Build Tests (4faaaf21-663f-4391-96fd-47197c630979)' 30 | scriptLocation: inlineScript 31 | inlineScript: | 32 | Rscript './.azure-pipelines/scripts/validate_samples.R' 'samples' 'deploy-to-local;train-on-local' 33 | addSpnToEnvironment: true 34 | displayName: 'Validate Samples' 35 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 16 | **Expected behavior** 17 | A clear and concise description of what you expected to happen. 18 | 19 | **Screenshots** 20 | If applicable, add screenshots to help explain your problem. 21 | 22 | **Additional context** 23 | Add any other context about the problem here. 24 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .Rproj.user 2 | .Rhistory 3 | .RData 4 | .Ruserdata 5 | config.json 6 | accidents.Rd 7 | model.rds 8 | _generated_score.py 9 | docs 10 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Microsoft Open Source Code of Conduct 2 | 3 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). 4 | 5 | Resources: 6 | 7 | - [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/) 8 | - [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) 9 | - Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns 10 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | This project welcomes contributions and suggestions. Most contributions require you to 4 | agree to a Contributor License Agreement (CLA) declaring that you have the right to, 5 | and actually do, grant us the rights to use your contribution. For details, visit 6 | https://cla.microsoft.com. 7 | 8 | When you submit a pull request, a CLA-bot will automatically determine whether you need 9 | to provide a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the 10 | instructions provided by the bot. You will only need to do this once across all repositories using our CLA. 11 | 12 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). 13 | For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) 14 | or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. -------------------------------------------------------------------------------- /DESCRIPTION: -------------------------------------------------------------------------------- 1 | Package: azuremlsdk 2 | Type: Package 3 | Title: Interface to the 'Azure Machine Learning' 'SDK' 4 | Version: 1.10.0 5 | Authors@R: c( 6 | person("Diondra", "Peck", email = "Diondra.Peck@microsoft.com", role = c("cre", "aut")), 7 | person("Minna", "Xiao", email = "Minna.Xiao@microsoft.com", role = c("aut")), 8 | person("AzureML R SDK Team", email = "amlrsdk@microsoft.com", role = c("ctb")), 9 | person("Microsoft", role = c("cph", "fnd")), 10 | person(family = "Google Inc.", role = c("cph"), comment = "Examples and Tutorials"), 11 | person(family = "The TensorFlow Authors", role = c("cph"), comment = "Examples and Tutorials"), 12 | person(family = "RStudio Inc.", role = c("cph"), comment = "Examples and Tutorials") 13 | ) 14 | URL: https://github.com/azure/azureml-sdk-for-r 15 | BugReports: https://github.com/azure/azureml-sdk-for-r/issues 16 | Description: Interface to the 'Azure Machine Learning' Software Development Kit 17 | ('SDK'). Data scientists can use the 'SDK' to train, deploy, automate, and 18 | manage machine learning models on the 'Azure Machine Learning' service. To 19 | learn more about 'Azure Machine Learning' visit the website: 20 | . 21 | Encoding: UTF-8 22 | License: MIT + file LICENSE 23 | RoxygenNote: 7.1.1 24 | Depends: 25 | R (>= 3.5.0) 26 | Imports: 27 | ggplot2, 28 | reticulate (>= 1.12), 29 | plyr (>= 1.8), 30 | DT, 31 | rstudioapi (>= 0.7), 32 | htmltools, 33 | servr, 34 | shiny, 35 | shinycssloaders 36 | Suggests: rmarkdown, 37 | knitr, 38 | testthat, 39 | dplyr, 40 | jsonlite, 41 | foreach, 42 | iterators, 43 | utils 44 | VignetteBuilder: knitr 45 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | YEAR: 2019 2 | COPYRIGHT HOLDER: Microsoft -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | # MIT License 2 | 3 | Copyright (c) 2019 Microsoft 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /R/modules.R: -------------------------------------------------------------------------------- 1 | # Copyright(c) Microsoft Corporation. 2 | # Licensed under the MIT license. 3 | 4 | #' azureml module 5 | #' User can access functions/modules in azureml that are not exposed through the 6 | #' exported R functions. 7 | #' @export 8 | azureml <- NULL 9 | -------------------------------------------------------------------------------- /R/package.R: -------------------------------------------------------------------------------- 1 | # Copyright(c) Microsoft Corporation. 2 | # Licensed under the MIT license. 3 | 4 | #' @importFrom reticulate import py_str 5 | 6 | .onLoad <- function(libname, pkgname) { 7 | # delay load azureml 8 | azureml <<- import("azureml", delay_load = list( 9 | environment = "r-azureml", 10 | 11 | on_load = function() { 12 | # This function will be called on successful load 13 | 14 | # set user agent 15 | ver <- toString(utils::packageVersion("azuremlsdk")) 16 | azureml$"_base_sdk_common"$user_agent$append("azureml-r-sdk", ver) 17 | 18 | # override workspace __repr__ from python 19 | azureml$core$Workspace$"__repr__" <- function(self) { 20 | sprintf("create_workspace(name=\"%s\", subscription_id=\"%s\", resource_group=\"%s\")", 21 | self$"_workspace_name", 22 | self$"_subscription_id", 23 | self$"_resource_group") 24 | } 25 | }, 26 | 27 | on_error = function(e) { 28 | if (grepl("No module named azureml", e$message)) { 29 | stop("Use azuremlsdk::install_azureml() to install azureml python ", 30 | call. = FALSE) 31 | } else { 32 | stop(e$message, call. = FALSE) 33 | } 34 | } 35 | )) 36 | 37 | # for solving login hang issue on rstudio server 38 | if (grepl("rstudio-server", Sys.getenv("RS_RPOSTBACK_PATH"))) { 39 | webbrowser <- reticulate::import("webbrowser") 40 | # this will force to use device code login 41 | webbrowser$"_tryorder" <- list() 42 | } 43 | 44 | invisible(NULL) 45 | } 46 | -------------------------------------------------------------------------------- /azureml-sdk-for-r.Rproj: -------------------------------------------------------------------------------- 1 | Version: 1.0 2 | 3 | RestoreWorkspace: Default 4 | SaveWorkspace: Default 5 | AlwaysSaveHistory: Default 6 | 7 | EnableCodeIndexing: Yes 8 | UseSpacesForTab: Yes 9 | NumSpacesForTab: 2 10 | Encoding: UTF-8 11 | 12 | RnwWeave: Sweave 13 | LaTeX: pdfLaTeX 14 | 15 | BuildType: Package 16 | PackageUseDevtools: Yes 17 | PackageInstallArgs: --no-multiarch --with-keep.source 18 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | comment: 2 | require_changes: true 3 | 4 | coverage: 5 | status: 6 | project: 7 | default: 8 | target: auto 9 | threshold: 1% 10 | patch: 11 | default: 12 | target: auto 13 | threshold: 1% 14 | -------------------------------------------------------------------------------- /cran-comments.md: -------------------------------------------------------------------------------- 1 | ## Release summary 2 | This is a minor release. 3 | 4 | ## Test environments 5 | * local: windows-x86_64-devel 6 | * azure devops: Ubuntu:16.04 LTS 7 | * win-builder: windows-x86_64-devel 8 | 9 | ## R CMD check results 10 | 0 errors √ | 0 warnings √ | 1 note x 11 | 12 | N checking for future file timestamps (590ms) 13 | unable to verify current time 14 | 15 | note explanation: http://worldclockapi.com/ is not currently available. 16 | -------------------------------------------------------------------------------- /dev_instruction.md: -------------------------------------------------------------------------------- 1 | # Developer instructions on building `azureml` package 2 | 1. Make sure below packages are installed. 3 | ``` 4 | install.packages('devtools') 5 | ``` 6 | 2. Run the following to build the code. The R package file will be created at `package_location`. We can now either upload it to a blob store, publish it to CRAN or install directly from the file. 7 | ``` 8 | setwd('') 9 | 10 | # Generate .Rd files in man/ and NAMESPACE 11 | roxygen2::roxygenise() 12 | 13 | # Build the R package 14 | package_location <- devtools::build() 15 | ``` 16 | 3. To install the package from the `.tar.gz` file in the filesystem, do: 17 | ``` 18 | install.packages(package_location, repos = NULL) 19 | ``` 20 | To install from a url: 21 | ``` 22 | install.packages(package_url, repos = NULL) 23 | ``` 24 | 25 | If you already have the package loaded in your R session, you may want to 26 | remove it from the session to use the new one. This can be done by the 27 | following: 28 | ``` 29 | detach("package:azuremlsdk", unload = TRUE) 30 | ``` 31 | -------------------------------------------------------------------------------- /docs/link.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 8 | 12 | 13 | -------------------------------------------------------------------------------- /docs/pkgdown.yml: -------------------------------------------------------------------------------- 1 | pandoc: '2.6' 2 | pkgdown: 1.6.1 3 | pkgdown_sha: ~ 4 | articles: 5 | building-custom-docker-images: building-custom-docker-images.html 6 | configuration: configuration.html 7 | deploy-to-aks: deploy-to-aks.html 8 | deploying-models: deploying-models.html 9 | experiments-deep-dive: experiments-deep-dive.html 10 | hyperparameter-tune-with-keras: hyperparameter-tune-with-keras.html 11 | installation: installation.html 12 | train-and-deploy-first-model: train-and-deploy-first-model.html 13 | train-with-tensorflow: train-with-tensorflow.html 14 | troubleshooting: troubleshooting.html 15 | last_built: 2021-08-24T17:30Z 16 | 17 | -------------------------------------------------------------------------------- /inst/widget/app.R: -------------------------------------------------------------------------------- 1 | # Copyright(c) Microsoft Corporation. 2 | # Licensed under the MIT license. 3 | 4 | suppressMessages(library(shiny)) 5 | suppressMessages(library(DT)) 6 | 7 | #nolint start 8 | server <- function(input, output, session) { 9 | 10 | values <- reactiveValues(widget_state = "initializing", 11 | current_run = NULL) 12 | 13 | # retrieve run object once widget_state 14 | observeEvent(values$widget_state == "streaming", { 15 | ws <- azuremlsdk::get_workspace(ws_name, subscription_id, rg) 16 | exp <- azuremlsdk::experiment(ws, exp_name) 17 | values$current_run <- azuremlsdk::get_run(exp, run_id) 18 | }, 19 | ignoreInit = TRUE, 20 | once = TRUE) 21 | 22 | # stop app if user closes session 23 | session$onSessionEnded(function() { 24 | shiny::stopApp() 25 | }) 26 | 27 | plot <- function() { 28 | 29 | if(isolate(values$widget_state) %in% c("initializing", "streaming")) { 30 | invalidateLater(10000, session) 31 | } 32 | 33 | if (!is.null(values$current_run)) { 34 | run_details_plot <- azuremlsdk::view_run_details(values$current_run, 35 | auto_refresh = FALSE) 36 | if (isolate(values$widget_state == "streaming") && 37 | values$current_run$status %in% c("Canceled", "Completed", "Failed")) { 38 | values$widget_state <- "finalizing" 39 | print("The run has reached a terminal state. You may close the widget.") 40 | } 41 | } else { 42 | # initialize auto-refresh 10 seconds after job submitted 43 | if (isolate(values$widget_state == "initializing") && 44 | difftime(Sys.time(), start_time, units = "secs") > 10) { 45 | values$widget_state <- "streaming" 46 | } 47 | } 48 | 49 | return(run_details_plot) 50 | } 51 | 52 | output$runDetailsPlot <- DT::renderDataTable({ 53 | plot() 54 | }) 55 | } 56 | #nolint end 57 | 58 | ui <- fluidPage( 59 | shinycssloaders::withSpinner(dataTableOutput("runDetailsPlot"), 60 | 5, 61 | size = 0.5) 62 | ) 63 | 64 | print(paste0("Listening on 127.0.0.1:", port)) 65 | suppressMessages(shiny::runApp(shinyApp(ui, server), port = port)) 66 | -------------------------------------------------------------------------------- /man/azureml.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/modules.R 3 | \docType{data} 4 | \name{azureml} 5 | \alias{azureml} 6 | \title{azureml module 7 | User can access functions/modules in azureml that are not exposed through the 8 | exported R functions.} 9 | \format{ 10 | An object of class \code{python.builtin.module} (inherits from \code{python.builtin.object}) of length 5. 11 | } 12 | \usage{ 13 | azureml 14 | } 15 | \description{ 16 | azureml module 17 | User can access functions/modules in azureml that are not exposed through the 18 | exported R functions. 19 | } 20 | \keyword{datasets} 21 | -------------------------------------------------------------------------------- /man/bayesian_parameter_sampling.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/hyperdrive.R 3 | \name{bayesian_parameter_sampling} 4 | \alias{bayesian_parameter_sampling} 5 | \title{Define Bayesian sampling over a hyperparameter search space} 6 | \usage{ 7 | bayesian_parameter_sampling(parameter_space) 8 | } 9 | \arguments{ 10 | \item{parameter_space}{A named list containing each parameter and its 11 | distribution, e.g. \code{list("parameter" = distribution)}.} 12 | } 13 | \value{ 14 | The \code{BayesianParameterSampling} object. 15 | } 16 | \description{ 17 | Bayesian sampling is based on the Bayesian optimization algorithm and makes 18 | intelligent choices on the hyperparameter values to sample next. It picks 19 | the sample based on how the previous samples performed, such that the new 20 | sample improves the reported primary metric. 21 | } 22 | \section{Details}{ 23 | 24 | When you use Bayesian sampling, the number of concurrent runs has an impact 25 | on the effectiveness of the tuning process. Typically, a smaller number of 26 | concurrent runs can lead to better sampling convergence, since the smaller 27 | degree of parallelism increases the number of runs that benefit from 28 | previously completed runs. 29 | 30 | Bayesian sampling only supports \code{choice()}, \code{uniform()}, and \code{quniform()} 31 | distributions over the search space. 32 | 33 | Bayesian sampling does not support any early termination policy. When 34 | using Bayesian parameter sampling, \code{early_termination_policy} must be 35 | \code{NULL}. 36 | } 37 | 38 | \examples{ 39 | \dontrun{ 40 | param_sampling <- bayesian_parameter_sampling(list("learning_rate" = uniform(0.05, 0.1), 41 | "batch_size" = choice(c(16, 32, 64, 128)))) 42 | } 43 | } 44 | \seealso{ 45 | \code{choice()}, \code{uniform()}, \code{quniform()} 46 | } 47 | -------------------------------------------------------------------------------- /man/cancel_run.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/run.R 3 | \name{cancel_run} 4 | \alias{cancel_run} 5 | \title{Cancel a run} 6 | \usage{ 7 | cancel_run(run) 8 | } 9 | \arguments{ 10 | \item{run}{The \code{Run} object.} 11 | } 12 | \value{ 13 | \code{TRUE} if cancellation was successful, else \code{FALSE}. 14 | } 15 | \description{ 16 | Cancel an ongoing run. 17 | } 18 | -------------------------------------------------------------------------------- /man/choice.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/hyperdrive.R 3 | \name{choice} 4 | \alias{choice} 5 | \title{Specify a discrete set of options to sample from} 6 | \usage{ 7 | choice(options) 8 | } 9 | \arguments{ 10 | \item{options}{A vector of values to choose from.} 11 | } 12 | \value{ 13 | A list of the stochastic expression. 14 | } 15 | \description{ 16 | Specify a discrete set of options to sample the hyperparameters 17 | from. 18 | } 19 | \seealso{ 20 | \code{random_parameter_sampling()}, \code{grid_parameter_sampling()}, 21 | \code{bayesian_parameter_sampling()} 22 | } 23 | -------------------------------------------------------------------------------- /man/complete_run.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/run.R 3 | \name{complete_run} 4 | \alias{complete_run} 5 | \title{Mark a run as completed.} 6 | \usage{ 7 | complete_run(run) 8 | } 9 | \arguments{ 10 | \item{run}{The \code{Run} object.} 11 | } 12 | \value{ 13 | None 14 | } 15 | \description{ 16 | Mark the run as completed. Use for an interactive logging run. 17 | } 18 | \seealso{ 19 | \code{\link[=start_logging_run]{start_logging_run()}} 20 | } 21 | -------------------------------------------------------------------------------- /man/container_registry.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/environment.R 3 | \name{container_registry} 4 | \alias{container_registry} 5 | \title{Specify Azure Container Registry details} 6 | \usage{ 7 | container_registry(address = NULL, username = NULL, password = NULL) 8 | } 9 | \arguments{ 10 | \item{address}{A string of the DNS name or IP address of the 11 | Azure Container Registry (ACR).} 12 | 13 | \item{username}{A string of the username for ACR.} 14 | 15 | \item{password}{A string of the password for ACR.} 16 | } 17 | \value{ 18 | The \code{ContainerRegistry} object. 19 | } 20 | \description{ 21 | Returns a \code{ContainerRegistry} object with the details for an 22 | Azure Container Registry (ACR). This is needed when a custom 23 | Docker image used for training or deployment is located in 24 | a private image registry. Provide a \code{ContainerRegistry} object 25 | to the \code{image_registry_details} parameter of either \code{r_environment()} 26 | or \code{estimator()}. 27 | } 28 | \seealso{ 29 | \code{r_environment()}, \code{estimator()} 30 | } 31 | -------------------------------------------------------------------------------- /man/convert_to_dataset_with_csv_files.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \name{convert_to_dataset_with_csv_files} 4 | \alias{convert_to_dataset_with_csv_files} 5 | \title{Convert the current dataset into a FileDataset containing CSV files.} 6 | \usage{ 7 | convert_to_dataset_with_csv_files(dataset, separator = ",") 8 | } 9 | \arguments{ 10 | \item{dataset}{The Tabular Dataset object.} 11 | 12 | \item{separator}{The separator to use to separate values in the resulting file.} 13 | } 14 | \value{ 15 | A new FileDataset object with a set of CSV files containing the data 16 | in this dataset. 17 | } 18 | \description{ 19 | Convert the current dataset into a FileDataset containing CSV files. 20 | } 21 | -------------------------------------------------------------------------------- /man/convert_to_dataset_with_parquet_files.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \name{convert_to_dataset_with_parquet_files} 4 | \alias{convert_to_dataset_with_parquet_files} 5 | \title{Convert the current dataset into a FileDataset containing Parquet files.} 6 | \usage{ 7 | convert_to_dataset_with_parquet_files(dataset) 8 | } 9 | \arguments{ 10 | \item{dataset}{The Tabular Dataset object.} 11 | } 12 | \value{ 13 | A new FileDataset object with a set of Parquet files containing the 14 | data in this dataset. 15 | } 16 | \description{ 17 | Convert the current dataset into a FileDataset containing Parquet files. 18 | The resulting dataset will contain one or more Parquet files, each corresponding 19 | to a partition of data from the current dataset. These files are not materialized 20 | until they are downloaded or read from. 21 | } 22 | -------------------------------------------------------------------------------- /man/cran_package.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/environment.R 3 | \name{cran_package} 4 | \alias{cran_package} 5 | \title{Specifies a CRAN package to install in environment} 6 | \usage{ 7 | cran_package(name, version = NULL, repo = "https://cloud.r-project.org") 8 | } 9 | \arguments{ 10 | \item{name}{The package name} 11 | 12 | \item{version}{A string of the package version. If not provided, version 13 | will default to latest} 14 | 15 | \item{repo}{The base URL of the repository to use, e.g., the URL of a 16 | CRAN mirror. If not provided, the package will be pulled from 17 | "https://cloud.r-project.org".} 18 | } 19 | \value{ 20 | A named list containing the package specifications 21 | } 22 | \description{ 23 | Specifies a CRAN package to install in run environment 24 | } 25 | \section{Examples}{ 26 | \preformatted{pkg1 <- cran_package("ggplot2", version = "3.3.0") 27 | pkg2 <- cran_package("stringr") 28 | pkg3 <- cran_package("ggplot2", version = "0.9.1", 29 | repo = "http://cran.us.r-project.org") 30 | 31 | env <- r_environment(name = "r_env", 32 | cran_packages = list(pkg1, pkg2, pkg3)) 33 | } 34 | } 35 | 36 | \seealso{ 37 | \code{\link[=r_environment]{r_environment()}} 38 | } 39 | -------------------------------------------------------------------------------- /man/create_child_run.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/run.R 3 | \name{create_child_run} 4 | \alias{create_child_run} 5 | \title{Create a child run} 6 | \usage{ 7 | create_child_run(parent_run, name = NULL, run_id = NULL, outputs = NULL) 8 | } 9 | \arguments{ 10 | \item{parent_run}{The parent \code{Run} object.} 11 | 12 | \item{name}{An optional name for the child run, typically specified for a "part"} 13 | 14 | \item{run_id}{An optional run ID for the child, otherwise it is auto-generated. 15 | Typically this parameter is not set.} 16 | 17 | \item{outputs}{Optional outputs directory to track for the child.} 18 | } 19 | \value{ 20 | The child run, a \code{Run} object. 21 | } 22 | \description{ 23 | Create a child run. This is used to isolate part of a run into a subsection. 24 | } 25 | -------------------------------------------------------------------------------- /man/create_child_runs.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/run.R 3 | \name{create_child_runs} 4 | \alias{create_child_runs} 5 | \title{Create one or many child runs} 6 | \usage{ 7 | create_child_runs(parent_run, count = NULL, tag_key = NULL, tag_values = NULL) 8 | } 9 | \arguments{ 10 | \item{parent_run}{The parent \code{Run} object.} 11 | 12 | \item{count}{An optional number of children to create.} 13 | 14 | \item{tag_key}{An optional key to populate the Tags entry in all created children.} 15 | 16 | \item{tag_values}{An optional list of values that will map onto Tags for the list of runs created.} 17 | } 18 | \value{ 19 | The list of child runs, \code{Run} objects. 20 | } 21 | \description{ 22 | Create one or many child runs. 23 | } 24 | -------------------------------------------------------------------------------- /man/create_file_dataset_from_files.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \name{create_file_dataset_from_files} 4 | \alias{create_file_dataset_from_files} 5 | \title{Create a FileDataset to represent file streams.} 6 | \usage{ 7 | create_file_dataset_from_files(path, validate = TRUE) 8 | } 9 | \arguments{ 10 | \item{path}{A data path in a registered datastore or a local path.} 11 | 12 | \item{validate}{Indicates whether to validate if data can be loaded from the 13 | returned dataset. Defaults to True. Validation requires that the data source 14 | is accessible from the current compute.} 15 | } 16 | \value{ 17 | The FileDataset object 18 | } 19 | \description{ 20 | Create a FileDataset to represent file streams. 21 | } 22 | \seealso{ 23 | \code{\link{data_path}} 24 | } 25 | -------------------------------------------------------------------------------- /man/create_tabular_dataset_from_parquet_files.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \name{create_tabular_dataset_from_parquet_files} 4 | \alias{create_tabular_dataset_from_parquet_files} 5 | \title{Create an unregistered, in-memory Dataset from parquet files.} 6 | \usage{ 7 | create_tabular_dataset_from_parquet_files( 8 | path, 9 | validate = TRUE, 10 | include_path = FALSE, 11 | set_column_types = NULL, 12 | partition_format = NULL 13 | ) 14 | } 15 | \arguments{ 16 | \item{path}{A data path in a registered datastore or a local path.} 17 | 18 | \item{validate}{Boolean to validate if data can be loaded from the returned dataset. 19 | Defaults to True. Validation requires that the data source is accessible from the 20 | current compute.} 21 | 22 | \item{include_path}{Whether to include a column containing the path of the file 23 | from which the data was read. This is useful when you are reading multiple files, 24 | and want to know which file a particular record originated from, or to keep useful 25 | information in file path.} 26 | 27 | \item{set_column_types}{A named list to set column data type, where key is 28 | column name and value is data type.} 29 | 30 | \item{partition_format}{Specify the partition format in path and create string columns from 31 | format '{x}' and datetime column from format '{x:yyyy/MM/dd/HH/mm/ss}', where 'yyyy', 'MM', 32 | 'dd', 'HH', 'mm' and 'ss' are used to extrat year, month, day, hour, minute and second for the datetime 33 | type. The format should start from the postition of first partition key until the end of file path. 34 | For example, given a file path '../USA/2019/01/01/data.csv' and data is partitioned by country and time, 35 | we can define '/{Country}/{PartitionDate:yyyy/MM/dd}/data.csv' to create columns 'Country' 36 | of string type and 'PartitionDate' of datetime type.} 37 | } 38 | \value{ 39 | The Tabular Dataset object. 40 | } 41 | \description{ 42 | Create an unregistered, in-memory Dataset from parquet files. 43 | } 44 | \seealso{ 45 | \code{\link{data_path}} 46 | } 47 | -------------------------------------------------------------------------------- /man/data_path.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \name{data_path} 4 | \alias{data_path} 5 | \title{Represents a path to data in a datastore.} 6 | \usage{ 7 | data_path(datastore, path_on_datastore = NULL, name = NULL) 8 | } 9 | \arguments{ 10 | \item{datastore}{The Datastore to reference.} 11 | 12 | \item{path_on_datastore}{The relative path in the backing storage for the data reference.} 13 | 14 | \item{name}{An optional name for the DataPath.} 15 | } 16 | \value{ 17 | The \code{DataPath} object. 18 | } 19 | \description{ 20 | The path represented by DataPath object can point to a directory or a data artifact (blob, file). 21 | } 22 | \section{Examples}{ 23 | \preformatted{my_data <- register_azure_blob_container_datastore( 24 | workspace = ws, 25 | datastore_name = blob_datastore_name, 26 | container_name = ws_blob_datastore$container_name, 27 | account_name = ws_blob_datastore$account_name, 28 | account_key = ws_blob_datastore$account_key, 29 | create_if_not_exists = TRUE) 30 | 31 | datapath <- data_path(my_data, ) 32 | dataset <- create_file_dataset_from_files(datapath) 33 | } 34 | } 35 | 36 | \seealso{ 37 | \code{\link{create_file_dataset_from_files}} 38 | \code{\link{create_tabular_dataset_from_parquet_files}} 39 | \code{\link{create_tabular_dataset_from_delimited_files}} 40 | \code{\link{create_tabular_dataset_from_json_lines_files}} 41 | \code{\link{create_tabular_dataset_from_sql_query}} 42 | } 43 | -------------------------------------------------------------------------------- /man/data_type_bool.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \name{data_type_bool} 4 | \alias{data_type_bool} 5 | \title{Configure conversion to bool.} 6 | \usage{ 7 | data_type_bool() 8 | } 9 | \value{ 10 | Converted DataType object. 11 | } 12 | \description{ 13 | Configure conversion to bool. 14 | } 15 | -------------------------------------------------------------------------------- /man/data_type_datetime.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \name{data_type_datetime} 4 | \alias{data_type_datetime} 5 | \title{Configure conversion to datetime.} 6 | \usage{ 7 | data_type_datetime(formats = NULL) 8 | } 9 | \arguments{ 10 | \item{formats}{Formats to try for datetime conversion. For example \verb{\%d-\%m-\%Y} for data in "day-month-year", 11 | and \verb{\%Y-\%m-\%dT\%H:\%M:\%S.\%f} for "combined date an time representation" according to ISO 8601. 12 | \itemize{ 13 | \item \%Y: Year with 4 digits 14 | \item \%y: Year with 2 digits 15 | \item \%m: Month in digits 16 | \item \%b: Month represented by its abbreviated name in 3 letters, like Aug 17 | \item \%B: Month represented by its full name, like August 18 | \item \%d: Day in digits 19 | \item \%H: Hour as represented in 24-hour clock time 20 | \item \%I: Hour as represented in 12-hour clock time 21 | \item \%M: Minute in 2 digits 22 | \item \%S: Second in 2 digits 23 | \item \%f: Microsecond 24 | \item \%p: AM/PM designator 25 | \item \%z: Timezone, for example: -0700 26 | } 27 | 28 | Format specifiers will be inferred if not specified. 29 | Inference requires that the data source is accessible from current compute.} 30 | } 31 | \value{ 32 | Converted DataType object. 33 | } 34 | \description{ 35 | Configure conversion to datetime. 36 | } 37 | -------------------------------------------------------------------------------- /man/data_type_double.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \name{data_type_double} 4 | \alias{data_type_double} 5 | \title{Configure conversion to 53-bit double.} 6 | \usage{ 7 | data_type_double() 8 | } 9 | \value{ 10 | Converted DataType object. 11 | } 12 | \description{ 13 | Configure conversion to 53-bit double. 14 | } 15 | -------------------------------------------------------------------------------- /man/data_type_long.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \name{data_type_long} 4 | \alias{data_type_long} 5 | \title{Configure conversion to 64-bit integer.} 6 | \usage{ 7 | data_type_long() 8 | } 9 | \value{ 10 | Converted DataType object. 11 | } 12 | \description{ 13 | Configure conversion to 64-bit integer. 14 | } 15 | -------------------------------------------------------------------------------- /man/data_type_string.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \name{data_type_string} 4 | \alias{data_type_string} 5 | \title{Configure conversion to string.} 6 | \usage{ 7 | data_type_string() 8 | } 9 | \value{ 10 | Converted DataType object. 11 | } 12 | \description{ 13 | Configure conversion to string. 14 | } 15 | -------------------------------------------------------------------------------- /man/dataset_consumption_config.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \name{dataset_consumption_config} 4 | \alias{dataset_consumption_config} 5 | \title{Represent how to deliver the dataset to a compute target.} 6 | \usage{ 7 | dataset_consumption_config( 8 | name, 9 | dataset, 10 | mode = "direct", 11 | path_on_compute = NULL 12 | ) 13 | } 14 | \arguments{ 15 | \item{name}{The name of the dataset in the run, which can be different to the 16 | registered name. The name will be registered as environment variable and can 17 | be used in data plane.} 18 | 19 | \item{dataset}{The dataset that will be consumed in the run.} 20 | 21 | \item{mode}{Defines how the dataset should be delivered to the compute target. There are three modes: 22 | 23 | 'direct': consume the dataset as dataset. 24 | 'download': download the dataset and consume the dataset as downloaded path. 25 | 'mount': mount the dataset and consume the dataset as mount path.} 26 | 27 | \item{path_on_compute}{The target path on the compute to make the data available at. 28 | The folder structure of the source data will be kept, however, we might add prefixes 29 | to this folder structure to avoid collision.} 30 | } 31 | \value{ 32 | The \code{DatasetConsumptionConfig} object. 33 | } 34 | \description{ 35 | Represent how to deliver the dataset to a compute target. 36 | } 37 | \section{Examples}{ 38 | \preformatted{est <- estimator(source_directory = ".", 39 | entry_script = "train.R", 40 | inputs = list(dataset_consumption_config('mydataset', dataset, mode = 'download')), 41 | compute_target = compute_target) 42 | } 43 | } 44 | 45 | \seealso{ 46 | \code{\link{estimator}} 47 | } 48 | -------------------------------------------------------------------------------- /man/define_timestamp_columns_for_dataset.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \name{define_timestamp_columns_for_dataset} 4 | \alias{define_timestamp_columns_for_dataset} 5 | \title{Define timestamp columns for the dataset.} 6 | \usage{ 7 | define_timestamp_columns_for_dataset( 8 | dataset, 9 | fine_grain_timestamp, 10 | coarse_grain_timestamp = NULL, 11 | validate = FALSE 12 | ) 13 | } 14 | \arguments{ 15 | \item{dataset}{The Tabular Dataset object.} 16 | 17 | \item{fine_grain_timestamp}{The name of column as fine grain timestamp. Use None to clear it.} 18 | 19 | \item{coarse_grain_timestamp}{The name of column coarse grain timestamp (optional). 20 | The default is None.} 21 | 22 | \item{validate}{Indicates whether to validate if specified columns exist in dataset. 23 | The default is False. Validation requires that the data source is accessible 24 | from the current compute.} 25 | } 26 | \value{ 27 | The Tabular Dataset with timestamp columns defined. 28 | } 29 | \description{ 30 | Define timestamp columns for the dataset. 31 | The method defines columns to be used as timestamps. Timestamp columns on a dataset 32 | make it possible to treat the data as time-series data and enable additional capabilities. 33 | When a dataset has both \code{fine_grain_timestamp} and \verb{coarse_grain_timestamp defined} 34 | specified, the two columns should represent the same timeline. 35 | } 36 | -------------------------------------------------------------------------------- /man/delete_compute.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/compute.R 3 | \name{delete_compute} 4 | \alias{delete_compute} 5 | \title{Delete a cluster} 6 | \usage{ 7 | delete_compute(cluster) 8 | } 9 | \arguments{ 10 | \item{cluster}{The \code{AmlCompute} or \code{AksCompute} object.} 11 | } 12 | \value{ 13 | None 14 | } 15 | \description{ 16 | Remove the compute object from its associated workspace and delete the 17 | corresponding cloud-based resource. 18 | } 19 | \examples{ 20 | \dontrun{ 21 | ws <- load_workspace_from_config() 22 | compute_target <- get_compute(ws, cluster_name = 'mycluster') 23 | delete_compute(compute_target) 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /man/delete_local_webservice.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/webservice-local.R 3 | \name{delete_local_webservice} 4 | \alias{delete_local_webservice} 5 | \title{Delete a local web service from the local machine} 6 | \usage{ 7 | delete_local_webservice(webservice, delete_cache = TRUE, delete_image = FALSE) 8 | } 9 | \arguments{ 10 | \item{webservice}{The \code{LocalWebservice} object.} 11 | 12 | \item{delete_cache}{If \code{TRUE}, delete the temporary files cached for 13 | the service.} 14 | 15 | \item{delete_image}{If \code{TRUE}, delete the service's Docker image.} 16 | } 17 | \value{ 18 | None 19 | } 20 | \description{ 21 | Delete a local web service from the local machine. This function call 22 | is not asynchronous; it runs until the service is deleted. 23 | } 24 | -------------------------------------------------------------------------------- /man/delete_model.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/model.R 3 | \name{delete_model} 4 | \alias{delete_model} 5 | \title{Delete a model from its associated workspace} 6 | \usage{ 7 | delete_model(model) 8 | } 9 | \arguments{ 10 | \item{model}{The \code{Model} object.} 11 | } 12 | \value{ 13 | None 14 | } 15 | \description{ 16 | Delete the registered model from its associated workspace. Note that 17 | you cannot delete a registered model that is being used by an active 18 | web service deployment. 19 | } 20 | -------------------------------------------------------------------------------- /man/delete_secrets.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/keyvault.R 3 | \name{delete_secrets} 4 | \alias{delete_secrets} 5 | \title{Delete secrets from a keyvault} 6 | \usage{ 7 | delete_secrets(keyvault, secrets) 8 | } 9 | \arguments{ 10 | \item{keyvault}{The \code{Keyvault} object.} 11 | 12 | \item{secrets}{A vector of secret names.} 13 | } 14 | \value{ 15 | None 16 | } 17 | \description{ 18 | Delete secrets from the keyvault associated with the workspace for 19 | a specified set of secret names. 20 | } 21 | -------------------------------------------------------------------------------- /man/delete_webservice.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/webservice.R 3 | \name{delete_webservice} 4 | \alias{delete_webservice} 5 | \title{Delete a web service from a given workspace} 6 | \usage{ 7 | delete_webservice(webservice) 8 | } 9 | \arguments{ 10 | \item{webservice}{The \code{AciWebservice} or \code{AksWebservice} object.} 11 | } 12 | \value{ 13 | None 14 | } 15 | \description{ 16 | Delete a deployed ACI or AKS web service from the given workspace. 17 | This function call is not asynchronous; it runs until the resource is 18 | deleted. 19 | 20 | To delete a \code{LocalWebservice} see \code{delete_local_webservice()}. 21 | } 22 | -------------------------------------------------------------------------------- /man/delete_workspace.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/workspace.R 3 | \name{delete_workspace} 4 | \alias{delete_workspace} 5 | \title{Delete a workspace} 6 | \usage{ 7 | delete_workspace( 8 | workspace, 9 | delete_dependent_resources = FALSE, 10 | no_wait = FALSE 11 | ) 12 | } 13 | \arguments{ 14 | \item{workspace}{The \code{Workspace} object of the workspace to delete.} 15 | 16 | \item{delete_dependent_resources}{If \code{TRUE} the workspace's associated 17 | resources, i.e. ACR, storage account, key value, and application insights 18 | will also be deleted.} 19 | 20 | \item{no_wait}{If \code{FALSE} do not wait for the workspace deletion to complete.} 21 | } 22 | \value{ 23 | None 24 | } 25 | \description{ 26 | Delete the Azure Machine Learning workspace resource. \code{delete_workspace()} 27 | can also delete the workspace's associated resources. 28 | } 29 | -------------------------------------------------------------------------------- /man/detach_aks_compute.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/compute.R 3 | \name{detach_aks_compute} 4 | \alias{detach_aks_compute} 5 | \title{Detach an AksCompute cluster from its associated workspace} 6 | \usage{ 7 | detach_aks_compute(cluster) 8 | } 9 | \arguments{ 10 | \item{cluster}{The \code{AksCompute} object.} 11 | } 12 | \value{ 13 | None 14 | } 15 | \description{ 16 | Detach the AksCompute cluster from its associated workspace. No 17 | underlying cloud resource will be deleted; the association will 18 | just be removed. 19 | } 20 | -------------------------------------------------------------------------------- /man/download_file_from_run.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/run.R 3 | \name{download_file_from_run} 4 | \alias{download_file_from_run} 5 | \title{Download a file from a run} 6 | \usage{ 7 | download_file_from_run(run, name, output_file_path = NULL) 8 | } 9 | \arguments{ 10 | \item{run}{The \code{Run} object.} 11 | 12 | \item{name}{A string of the name of the artifact to be downloaded.} 13 | 14 | \item{output_file_path}{A string of the local path where to download 15 | the artifact to.} 16 | } 17 | \value{ 18 | None 19 | } 20 | \description{ 21 | Download a file from the run record. You can download any file that 22 | was uploaded to the run record via \code{upload_files_to_run()} or 23 | \code{upload_folder_to_run()}, or any file that was written out to 24 | the \code{./outputs} or \code{./logs} folders during a run. 25 | 26 | You can see what files are available to download from the run record 27 | by calling \code{get_run_file_names()}. 28 | } 29 | \seealso{ 30 | \code{\link[=download_files_from_run]{download_files_from_run()}} 31 | } 32 | -------------------------------------------------------------------------------- /man/download_files_from_run.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/run.R 3 | \name{download_files_from_run} 4 | \alias{download_files_from_run} 5 | \title{Download files from a run} 6 | \usage{ 7 | download_files_from_run( 8 | run, 9 | prefix = NULL, 10 | output_directory = NULL, 11 | output_paths = NULL, 12 | batch_size = 100L 13 | ) 14 | } 15 | \arguments{ 16 | \item{run}{The \code{Run} object.} 17 | 18 | \item{prefix}{A string of the the filepath prefix (folder name) from 19 | which to download all artifacts. If not specified, all the artifacts 20 | in the run record will be downloaded.} 21 | 22 | \item{output_directory}{(Optional) A string of the directory that all 23 | artifact paths use as a prefix.} 24 | 25 | \item{output_paths}{(Optional) A list of strings of the local filepaths 26 | where the artifacts will be downloaded to.} 27 | 28 | \item{batch_size}{An int of the number of files to download per batch.} 29 | } 30 | \value{ 31 | None 32 | } 33 | \description{ 34 | Download files from the run record. You can download any files that 35 | were uploaded to the run record via \code{upload_files_to_run()} or 36 | \code{upload_folder_to_run()}, or any files that were written out to 37 | the \code{./outputs} or \code{./logs} folders during a run. 38 | } 39 | \seealso{ 40 | \code{\link[=download_file_from_run]{download_file_from_run()}} 41 | } 42 | -------------------------------------------------------------------------------- /man/download_from_datastore.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datastore.R 3 | \name{download_from_datastore} 4 | \alias{download_from_datastore} 5 | \title{Download data from a datastore to the local file system} 6 | \usage{ 7 | download_from_datastore( 8 | datastore, 9 | target_path, 10 | prefix = NULL, 11 | overwrite = FALSE, 12 | show_progress = TRUE 13 | ) 14 | } 15 | \arguments{ 16 | \item{datastore}{The \code{AzureBlobDatastore} or \code{AzureFileDatastore} object.} 17 | 18 | \item{target_path}{A string of the local directory to download the file to.} 19 | 20 | \item{prefix}{A string of the path to the folder in the blob container 21 | or file store to download. If \code{NULL}, will download everything in the blob 22 | container or file share} 23 | 24 | \item{overwrite}{If \code{TRUE}, overwrites any existing data at \code{target_path}.} 25 | 26 | \item{show_progress}{If \code{TRUE}, show progress of upload in the console.} 27 | } 28 | \value{ 29 | An integer of the number of files successfully downloaded. 30 | } 31 | \description{ 32 | Download data from the datastore to the local file system. 33 | } 34 | -------------------------------------------------------------------------------- /man/download_from_file_dataset.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \name{download_from_file_dataset} 4 | \alias{download_from_file_dataset} 5 | \title{Download file streams defined by the dataset as local files.} 6 | \usage{ 7 | download_from_file_dataset(dataset, target_path = NULL, overwrite = FALSE) 8 | } 9 | \arguments{ 10 | \item{dataset}{The Dataset object} 11 | 12 | \item{target_path}{The local directory to download the files to. If NULL, 13 | the data will be downloaded into a temporary directory.} 14 | 15 | \item{overwrite}{Indicates whether to overwirte existing files. The default 16 | is FALSE. Existing files will be overwritten if \code{overwrite} is set to TRUE; 17 | otherwise an exception will be raised.} 18 | } 19 | \value{ 20 | A list of file paths for each file downloaded. 21 | } 22 | \description{ 23 | Download file streams defined by the dataset as local files. If target_path starts 24 | with a /, then it will be treated as an absolute path. If it doesn't start with a /, 25 | then it will be treated as a relative path relative to the current working directory. 26 | } 27 | -------------------------------------------------------------------------------- /man/download_model.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/model.R 3 | \name{download_model} 4 | \alias{download_model} 5 | \title{Download a model to the local file system} 6 | \usage{ 7 | download_model(model, target_dir = ".", exist_ok = FALSE) 8 | } 9 | \arguments{ 10 | \item{model}{The \code{Model} object.} 11 | 12 | \item{target_dir}{A string of the path to the directory on your local 13 | file system for where to download the model to. Defaults to ".".} 14 | 15 | \item{exist_ok}{If \code{FALSE}, replace the downloaded folder/file if they 16 | already exist.} 17 | } 18 | \value{ 19 | A string of the path to the file or folder of the downloaded 20 | model. 21 | } 22 | \description{ 23 | Download a registered model to the \code{target_dir} of your local file 24 | system. 25 | } 26 | \examples{ 27 | \dontrun{ 28 | ws <- load_workspace_from_config() 29 | model <- get_model(ws, name = "my_model", version = 2) 30 | download_model(model, target_dir = tempdir(), exist_ok = TRUE) 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /man/drop_columns_from_dataset.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \name{drop_columns_from_dataset} 4 | \alias{drop_columns_from_dataset} 5 | \title{Drop the specified columns from the dataset.} 6 | \usage{ 7 | drop_columns_from_dataset(dataset, columns) 8 | } 9 | \arguments{ 10 | \item{dataset}{The Tabular Dataset object.} 11 | 12 | \item{columns}{A name or a list of names for the columns to drop.} 13 | } 14 | \value{ 15 | A new TabularDataset object with the specified columns dropped. 16 | } 17 | \description{ 18 | Drop the specified columns from the dataset. If a timeseries column is dropped, 19 | the corresponding capabilities will be dropped for the returned dataset as well. 20 | } 21 | -------------------------------------------------------------------------------- /man/experiment.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/experiment.R 3 | \name{experiment} 4 | \alias{experiment} 5 | \title{Create an Azure Machine Learning experiment} 6 | \usage{ 7 | experiment(workspace, name) 8 | } 9 | \arguments{ 10 | \item{workspace}{The \code{Workspace} object.} 11 | 12 | \item{name}{A string of the experiment name. The name must be between 13 | 3-36 characters, start with a letter or number, and can only contain 14 | letters, numbers, underscores, and dashes.} 15 | } 16 | \value{ 17 | The \code{Experiment} object. 18 | } 19 | \description{ 20 | An experiment is a grouping of many runs from a specified script. 21 | } 22 | \examples{ 23 | \dontrun{ 24 | ws <- load_workspace_from_config() 25 | exp <- experiment(ws, name = 'myexperiment') 26 | } 27 | } 28 | \seealso{ 29 | \code{submit_experiment()} 30 | } 31 | -------------------------------------------------------------------------------- /man/filter_dataset_after_time.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \name{filter_dataset_after_time} 4 | \alias{filter_dataset_after_time} 5 | \title{Filter Tabular Dataset with time stamp columns after a specified start time.} 6 | \usage{ 7 | filter_dataset_after_time(dataset, start_time, include_boundary = TRUE) 8 | } 9 | \arguments{ 10 | \item{dataset}{The Tabular Dataset object} 11 | 12 | \item{start_time}{The lower bound for filtering data.} 13 | 14 | \item{include_boundary}{Boolean indicating if the row associated with the 15 | boundary time (\code{start_time}) should be included.} 16 | } 17 | \value{ 18 | The filtered Tabular Dataset 19 | } 20 | \description{ 21 | Filter Tabular Dataset with time stamp columns after a specified start time. 22 | } 23 | -------------------------------------------------------------------------------- /man/filter_dataset_before_time.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \name{filter_dataset_before_time} 4 | \alias{filter_dataset_before_time} 5 | \title{Filter Tabular Dataset with time stamp columns before a specified end time.} 6 | \usage{ 7 | filter_dataset_before_time(dataset, end_time, include_boundary = TRUE) 8 | } 9 | \arguments{ 10 | \item{dataset}{The Tabular Dataset object} 11 | 12 | \item{end_time}{The upper bound for filtering data.} 13 | 14 | \item{include_boundary}{Boolean indicating if the row associated with the 15 | boundary time (\code{start_time}) should be included.} 16 | } 17 | \value{ 18 | The filtered Tabular Dataset 19 | } 20 | \description{ 21 | Filter Tabular Dataset with time stamp columns before a specified end time. 22 | } 23 | -------------------------------------------------------------------------------- /man/filter_dataset_between_time.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \name{filter_dataset_between_time} 4 | \alias{filter_dataset_between_time} 5 | \title{Filter Tabular Dataset between a specified start and end time.} 6 | \usage{ 7 | filter_dataset_between_time( 8 | dataset, 9 | start_time, 10 | end_time, 11 | include_boundary = TRUE 12 | ) 13 | } 14 | \arguments{ 15 | \item{dataset}{The Tabular Dataset object} 16 | 17 | \item{start_time}{The lower bound for filtering data.} 18 | 19 | \item{end_time}{The upper bound for filtering data.} 20 | 21 | \item{include_boundary}{Boolean indicating if the row associated with the 22 | boundary time (\code{start_time} and \code{end_time}) should be included.} 23 | } 24 | \value{ 25 | The filtered Tabular Dataset 26 | } 27 | \description{ 28 | Filter Tabular Dataset between a specified start and end time. 29 | } 30 | -------------------------------------------------------------------------------- /man/filter_dataset_from_recent_time.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \name{filter_dataset_from_recent_time} 4 | \alias{filter_dataset_from_recent_time} 5 | \title{Filter Tabular Dataset to contain only the specified duration (amount) of recent data.} 6 | \usage{ 7 | filter_dataset_from_recent_time(dataset, time_delta, include_boundary = TRUE) 8 | } 9 | \arguments{ 10 | \item{dataset}{The Tabular Dataset object} 11 | 12 | \item{time_delta}{The duration (amount) of recent data to retrieve.} 13 | 14 | \item{include_boundary}{Boolean indicating if the row associated with the 15 | boundary time (\code{time_delta}) should be included.} 16 | } 17 | \value{ 18 | The filtered Tabular Dataset 19 | } 20 | \description{ 21 | Filter Tabular Dataset to contain only the specified duration (amount) of recent data. 22 | } 23 | -------------------------------------------------------------------------------- /man/generate_entry_script.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/do_azureml_parallel.R 3 | \name{generate_entry_script} 4 | \alias{generate_entry_script} 5 | \title{Generates the control script for the experiment.} 6 | \usage{ 7 | generate_entry_script(source_directory) 8 | } 9 | \arguments{ 10 | \item{source_directory}{The directory which contains all the files 11 | needed for the experiment.} 12 | } 13 | \description{ 14 | Generates the control script for the experiment. 15 | } 16 | -------------------------------------------------------------------------------- /man/generate_new_webservice_key.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/webservice.R 3 | \name{generate_new_webservice_key} 4 | \alias{generate_new_webservice_key} 5 | \title{Regenerate one of a web service's keys} 6 | \usage{ 7 | generate_new_webservice_key(webservice, key_type) 8 | } 9 | \arguments{ 10 | \item{webservice}{The \code{AciWebservice} or \code{AksWebservice} object.} 11 | 12 | \item{key_type}{A string of which key to regenerate. Options are 13 | "Primary" or "Secondary".} 14 | } 15 | \value{ 16 | None 17 | } 18 | \description{ 19 | Regenerate either the primary or secondary authentication key for 20 | an \code{AciWebservice} or \code{AksWebservice}.The web service must have 21 | been deployed with key-based authentication enabled. 22 | 23 | Not supported for \code{LocalWebservice} deployments. 24 | } 25 | -------------------------------------------------------------------------------- /man/get_aks_compute_credentials.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/compute.R 3 | \name{get_aks_compute_credentials} 4 | \alias{get_aks_compute_credentials} 5 | \title{Get the credentials for an AksCompute cluster} 6 | \usage{ 7 | get_aks_compute_credentials(cluster) 8 | } 9 | \arguments{ 10 | \item{cluster}{The \code{AksCompute} object.} 11 | } 12 | \value{ 13 | Named list of the cluster details. 14 | } 15 | \description{ 16 | Retrieve the credentials for an AksCompute cluster. 17 | } 18 | -------------------------------------------------------------------------------- /man/get_best_run_by_primary_metric.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/hyperdrive.R 3 | \name{get_best_run_by_primary_metric} 4 | \alias{get_best_run_by_primary_metric} 5 | \title{Return the best performing run amongst all completed runs} 6 | \usage{ 7 | get_best_run_by_primary_metric( 8 | hyperdrive_run, 9 | include_failed = TRUE, 10 | include_canceled = TRUE 11 | ) 12 | } 13 | \arguments{ 14 | \item{hyperdrive_run}{The \code{HyperDriveRun} object.} 15 | 16 | \item{include_failed}{If \code{TRUE}, include the failed runs.} 17 | 18 | \item{include_canceled}{If \code{TRUE}, include the canceled runs.} 19 | } 20 | \value{ 21 | The \code{Run} object. 22 | } 23 | \description{ 24 | Find and return the run that corresponds to the best performing run 25 | amongst all the completed runs. 26 | 27 | The best performing run is identified solely based on the primary metric 28 | parameter specified in the \code{HyperDriveConfig} (\code{primary_metric_name}). 29 | The \code{PrimaryMetricGoal} governs whether the minimum or maximum of the 30 | primary metric is used. To do a more detailed analysis of all the 31 | run metrics launched by this HyperDrive run, use \code{get_child_run_metrics()}. 32 | Only one of the runs is returned from \code{get_best_run_by_primary_metric()}, 33 | even if several of the runs launched by this HyperDrive run reached 34 | the same best metric. 35 | } 36 | -------------------------------------------------------------------------------- /man/get_child_run_hyperparameters.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/hyperdrive.R 3 | \name{get_child_run_hyperparameters} 4 | \alias{get_child_run_hyperparameters} 5 | \title{Get the hyperparameters for all child runs} 6 | \usage{ 7 | get_child_run_hyperparameters(hyperdrive_run) 8 | } 9 | \arguments{ 10 | \item{hyperdrive_run}{The \code{HyperDriveRun} object.} 11 | } 12 | \value{ 13 | The named list of hyperparameters where element name 14 | is the run_id, e.g. \code{list("run_id" = hyperparameters)}. 15 | } 16 | \description{ 17 | Return the hyperparameters for all the child runs of the 18 | HyperDrive run. 19 | } 20 | -------------------------------------------------------------------------------- /man/get_child_run_metrics.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/hyperdrive.R 3 | \name{get_child_run_metrics} 4 | \alias{get_child_run_metrics} 5 | \title{Get the metrics from all child runs} 6 | \usage{ 7 | get_child_run_metrics(hyperdrive_run) 8 | } 9 | \arguments{ 10 | \item{hyperdrive_run}{The \code{HyperDriveRun} object.} 11 | } 12 | \value{ 13 | The named list of metrics where element name is 14 | the run_id, e.g. \code{list("run_id" = metrics)}. 15 | } 16 | \description{ 17 | Return the metrics from all the child runs of the 18 | HyperDrive run. 19 | } 20 | -------------------------------------------------------------------------------- /man/get_child_runs.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/run.R 3 | \name{get_child_runs} 4 | \alias{get_child_runs} 5 | \title{Get all children for the current run selected by specified filters} 6 | \usage{ 7 | get_child_runs( 8 | parent_run, 9 | recursive = FALSE, 10 | tags = NULL, 11 | properties = NULL, 12 | type = NULL, 13 | status = NULL 14 | ) 15 | } 16 | \arguments{ 17 | \item{parent_run}{The parent \code{Run} object.} 18 | 19 | \item{recursive}{Boolean indicating whether to recurse through all descendants.} 20 | 21 | \item{tags}{If specified, returns runs matching specified "tag" or list(tag = value).} 22 | 23 | \item{properties}{If specified, returns runs matching specified "property" or list(property = value).} 24 | 25 | \item{type}{If specified, returns runs matching this type.} 26 | 27 | \item{status}{If specified, returns runs with status specified "status".} 28 | } 29 | \value{ 30 | A list of child runs, \code{Run} objects. 31 | } 32 | \description{ 33 | Get all children for the current run selected by specified filters. 34 | } 35 | -------------------------------------------------------------------------------- /man/get_child_runs_sorted_by_primary_metric.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/hyperdrive.R 3 | \name{get_child_runs_sorted_by_primary_metric} 4 | \alias{get_child_runs_sorted_by_primary_metric} 5 | \title{Get the child runs sorted in descending order by 6 | best primary metric} 7 | \usage{ 8 | get_child_runs_sorted_by_primary_metric( 9 | hyperdrive_run, 10 | top = 0L, 11 | reverse = FALSE, 12 | discard_no_metric = FALSE 13 | ) 14 | } 15 | \arguments{ 16 | \item{hyperdrive_run}{The \code{HyperDriveRun} object.} 17 | 18 | \item{top}{An integer of the number of top child runs to be returned. If \code{0} 19 | (the default value), all child runs will be returned.} 20 | 21 | \item{reverse}{If \code{TRUE}, the order will be reversed. This sorting only 22 | impacts child runs with the primary metric.} 23 | 24 | \item{discard_no_metric}{If \code{FALSE}, child runs without the primary metric 25 | will be appended to the list returned.} 26 | } 27 | \value{ 28 | The named list of child runs. 29 | } 30 | \description{ 31 | Return a list of child runs of the HyperDrive run sorted by their best 32 | primary metric. The sorting is done according to the primary metric and 33 | its goal: if it is maximize, then the child runs are returned in descending 34 | order of their best primary metric. If \code{reverse = TRUE}, the order is 35 | reversed. Each child in the result has run id, hyperparameters, best primary 36 | metric value, and status. 37 | 38 | Child runs without the primary metric are discarded when 39 | \code{discard_no_metric = TRUE}. Otherwise, they are appended to the list behind 40 | other child runs with the primary metric. Note that the reverse option has no 41 | impact on them. 42 | } 43 | -------------------------------------------------------------------------------- /man/get_compute.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/compute.R 3 | \name{get_compute} 4 | \alias{get_compute} 5 | \title{Get an existing compute cluster} 6 | \usage{ 7 | get_compute(workspace, cluster_name) 8 | } 9 | \arguments{ 10 | \item{workspace}{The \code{Workspace} object.} 11 | 12 | \item{cluster_name}{A string of the name of the cluster.} 13 | } 14 | \value{ 15 | The \code{AmlCompute} or \code{AksCompute} object. 16 | } 17 | \description{ 18 | Returns an \code{AmlCompute} or \code{AksCompute} object for an existing compute 19 | resource. If the compute target doesn't exist, the function will return 20 | \code{NULL}. 21 | } 22 | \examples{ 23 | \dontrun{ 24 | ws <- load_workspace_from_config() 25 | compute_target <- get_compute(ws, cluster_name = 'mycluster') 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /man/get_current_run.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/run.R 3 | \name{get_current_run} 4 | \alias{get_current_run} 5 | \title{Get the context object for a run} 6 | \usage{ 7 | get_current_run(allow_offline = TRUE) 8 | } 9 | \arguments{ 10 | \item{allow_offline}{If \code{TRUE}, allow the service context to 11 | fall back to offline mode so that the training script can be 12 | tested locally without submitting a job with the SDK.} 13 | } 14 | \value{ 15 | The \code{Run} object. 16 | } 17 | \description{ 18 | This function is commonly used to retrieve the authenticated 19 | run object inside of a script to be submitted for execution 20 | via \code{submit_experiment()}. Note that the logging functions 21 | (\verb{log_*} methods, \code{upload_files_to_run()}, \code{upload_folder_to_run()}) 22 | will by default log the specified metrics or files to the 23 | run returned from \code{get_current_run()}. 24 | } 25 | -------------------------------------------------------------------------------- /man/get_dataset_by_id.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \name{get_dataset_by_id} 4 | \alias{get_dataset_by_id} 5 | \title{Get Dataset by ID.} 6 | \usage{ 7 | get_dataset_by_id(workspace, id) 8 | } 9 | \arguments{ 10 | \item{workspace}{The existing AzureML workspace in which the Dataset is saved.} 11 | 12 | \item{id}{The ID of the dataset} 13 | } 14 | \value{ 15 | The Dataset object 16 | } 17 | \description{ 18 | Get a Dataset which is saved to the workspace using its ID. 19 | } 20 | -------------------------------------------------------------------------------- /man/get_dataset_by_name.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \name{get_dataset_by_name} 4 | \alias{get_dataset_by_name} 5 | \title{Get a registered Dataset from the workspace by its registration name.} 6 | \usage{ 7 | get_dataset_by_name(workspace, name, version = "latest") 8 | } 9 | \arguments{ 10 | \item{workspace}{The existing AzureML workspace in which the Dataset was registered.} 11 | 12 | \item{name}{The registration name.} 13 | 14 | \item{version}{The registration version. Defaults to "latest".} 15 | } 16 | \value{ 17 | The registered Dataset object. 18 | } 19 | \description{ 20 | Get a registered Dataset from the workspace by its registration name. 21 | } 22 | -------------------------------------------------------------------------------- /man/get_datastore.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datastore.R 3 | \name{get_datastore} 4 | \alias{get_datastore} 5 | \title{Get an existing datastore} 6 | \usage{ 7 | get_datastore(workspace, datastore_name) 8 | } 9 | \arguments{ 10 | \item{workspace}{The \code{Workspace} object.} 11 | 12 | \item{datastore_name}{A string of the name of the datastore.} 13 | } 14 | \value{ 15 | The \code{azureml.data.azure_sql_database.AzureBlobDatastore}, 16 | \code{azureml.data.azure_sql_database.AzureFileDatastore}, 17 | \code{azureml.data.azure_sql_database.AzureSqlDatabaseDatastore}, 18 | \code{azureml.data.azure_data_lake_datastore.AzureDataLakeGen2Datastore}, 19 | \code{azureml.data.azure_postgre_sql_datastore.AzurePostgreSqlDatastore}, or 20 | \code{azureml.data.azure_sql_database.AzureSqlDatabaseDatastore} object. 21 | } 22 | \description{ 23 | Get the corresponding datastore object for an existing 24 | datastore by name from the given workspace. 25 | } 26 | -------------------------------------------------------------------------------- /man/get_default_datastore.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/workspace.R 3 | \name{get_default_datastore} 4 | \alias{get_default_datastore} 5 | \title{Get the default datastore for a workspace} 6 | \usage{ 7 | get_default_datastore(workspace) 8 | } 9 | \arguments{ 10 | \item{workspace}{The \code{Workspace} object.} 11 | } 12 | \value{ 13 | The default \code{Datastore} object. 14 | } 15 | \description{ 16 | Returns the default datastore associated with the workspace. 17 | 18 | When you create a workspace, an Azure blob container and Azure file share 19 | are registered to the workspace with the names \code{workspaceblobstore} and 20 | \code{workspacefilestore}, respectively. They store the connection information 21 | of the blob container and the file share that is provisioned in the storage 22 | account attached to the workspace. The \code{workspaceblobstore} is set as the 23 | default datastore, and remains the default datastore unless you set a new 24 | datastore as the default with \code{set_default_datastore()}. 25 | } 26 | \section{Examples}{ 27 | 28 | Get the default datastore for the datastore:\preformatted{ws <- load_workspace_from_config() 29 | ds <- get_default_datastore(ws) 30 | } 31 | 32 | If you have not changed the default datastore for the workspace, the 33 | following code will return the same datastore object as the above 34 | example:\preformatted{ws <- load_workspace_from_config() 35 | ds <- get_datastore(ws, datastore_name = 'workspaceblobstore') 36 | } 37 | } 38 | 39 | \seealso{ 40 | \code{\link[=set_default_datastore]{set_default_datastore()}} 41 | } 42 | -------------------------------------------------------------------------------- /man/get_default_keyvault.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/workspace.R 3 | \name{get_default_keyvault} 4 | \alias{get_default_keyvault} 5 | \title{Get the default keyvault for a workspace} 6 | \usage{ 7 | get_default_keyvault(workspace) 8 | } 9 | \arguments{ 10 | \item{workspace}{The \code{Workspace} object.} 11 | } 12 | \value{ 13 | The \code{Keyvault} object. 14 | } 15 | \description{ 16 | Returns a \code{Keyvault} object representing the default 17 | \href{https://docs.microsoft.com/en-us/azure/key-vault/key-vault-overview}{Azure Key Vault} 18 | associated with the workspace. 19 | } 20 | \seealso{ 21 | \code{\link[=set_secrets]{set_secrets()}} \code{\link[=get_secrets]{get_secrets()}} \code{\link[=list_secrets]{list_secrets()}} \code{\link[=delete_secrets]{delete_secrets()}} 22 | } 23 | -------------------------------------------------------------------------------- /man/get_environment.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/environment.R 3 | \name{get_environment} 4 | \alias{get_environment} 5 | \title{Get an existing environment} 6 | \usage{ 7 | get_environment(workspace, name, version = NULL) 8 | } 9 | \arguments{ 10 | \item{workspace}{The \code{Workspace} object.} 11 | 12 | \item{name}{A string of the name of the environment.} 13 | 14 | \item{version}{A string of the version of the environment.} 15 | } 16 | \value{ 17 | The \code{Environment} object. 18 | } 19 | \description{ 20 | Returns an \code{Environment} object for an existing environment in 21 | the workspace. 22 | } 23 | \examples{ 24 | \dontrun{ 25 | ws <- load_workspace_from_config() 26 | env <- get_environment(ws, name = 'myenv', version = '1') 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /man/get_file_dataset_paths.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \name{get_file_dataset_paths} 4 | \alias{get_file_dataset_paths} 5 | \title{Get a list of file paths for each file stream defined by the dataset.} 6 | \usage{ 7 | get_file_dataset_paths(dataset) 8 | } 9 | \arguments{ 10 | \item{dataset}{The Dataset object.} 11 | } 12 | \value{ 13 | A list of file paths. 14 | } 15 | \description{ 16 | Get a list of file paths for each file stream defined by the dataset. The file 17 | paths are relative paths for local files when the file srteam are downloaded 18 | or mounted. A common prefix will be removed from the file paths based on how 19 | data source was specified to create the dataset. 20 | } 21 | -------------------------------------------------------------------------------- /man/get_input_dataset_from_run.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \name{get_input_dataset_from_run} 4 | \alias{get_input_dataset_from_run} 5 | \title{Return the named list for input datasets.} 6 | \usage{ 7 | get_input_dataset_from_run(name, run = NULL) 8 | } 9 | \arguments{ 10 | \item{name}{The name of the input dataset} 11 | 12 | \item{run}{The run taking the dataset as input} 13 | } 14 | \value{ 15 | A dataset object corresponding to the "name" 16 | } 17 | \description{ 18 | Return the named list for input datasets. 19 | } 20 | -------------------------------------------------------------------------------- /man/get_model.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/model.R 3 | \name{get_model} 4 | \alias{get_model} 5 | \title{Get a registered model} 6 | \usage{ 7 | get_model( 8 | workspace, 9 | name = NULL, 10 | id = NULL, 11 | tags = NULL, 12 | properties = NULL, 13 | version = NULL, 14 | run_id = NULL 15 | ) 16 | } 17 | \arguments{ 18 | \item{workspace}{The \code{Workspace} object.} 19 | 20 | \item{name}{Retrieve the latest model with the corresponding 21 | name (a string), if it exists.} 22 | 23 | \item{id}{Retrieve the model with the corresponding ID (a string), 24 | if it exists.} 25 | 26 | \item{tags}{(Optional) Retrieve the model filtered based on the 27 | provided tags (a list), searching by either 'key' or 28 | 'list(key, value)'.} 29 | 30 | \item{properties}{(Optional) Retrieve the model filter based on the 31 | provided properties (a list), searching by either 'key' or 32 | 'list(key, value)'.} 33 | 34 | \item{version}{(Optional) An int of the version of a model to 35 | retrieve, when provided along with \code{name}. The specific version of 36 | the specified named model will be returned, if it exists.} 37 | 38 | \item{run_id}{(Optional) Retrieve the model filterd by the provided 39 | run ID (a string) the model was registered from, if it exists.} 40 | } 41 | \value{ 42 | The \code{Model} object. 43 | } 44 | \description{ 45 | Returns a \code{Model} object for an existing model that has been 46 | previously registered to the given workspace. 47 | } 48 | -------------------------------------------------------------------------------- /man/get_model_package_container_registry.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/model.R 3 | \name{get_model_package_container_registry} 4 | \alias{get_model_package_container_registry} 5 | \title{Get the Azure container registry that a packaged model uses} 6 | \usage{ 7 | get_model_package_container_registry(package) 8 | } 9 | \arguments{ 10 | \item{package}{The \code{ModelPackage} object.} 11 | } 12 | \value{ 13 | The \code{ContainerRegistry} object. 14 | } 15 | \description{ 16 | Return a \code{ContainerRegistry} object for where the image 17 | (or base image, for Dockerfile packages) is stored in an 18 | Azure container registry. 19 | } 20 | \examples{ 21 | # Given a ModelPackage object, 22 | # get the container registry information 23 | \dontrun{ 24 | container_registry <- get_model_package_container_registry(package) 25 | address <- container_registry$address 26 | username <- container_registry$username 27 | password <- container_registry$password 28 | } 29 | 30 | # To then authenticate Docker with the Azure container registry from 31 | # a shell or command-line session, use the following command, replacing 32 | #
, , and with the values retrieved 33 | # from above: 34 | # ```bash 35 | # docker login
-u -p 36 | # ``` 37 | } 38 | \seealso{ 39 | \code{container_registry()} 40 | } 41 | -------------------------------------------------------------------------------- /man/get_model_package_creation_logs.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/model.R 3 | \name{get_model_package_creation_logs} 4 | \alias{get_model_package_creation_logs} 5 | \title{Get the model package creation logs} 6 | \usage{ 7 | get_model_package_creation_logs(package, decode = TRUE, offset = 0) 8 | } 9 | \arguments{ 10 | \item{package}{The \code{ModelPackage} object.} 11 | 12 | \item{decode}{If \code{TRUE}, decode the raw log bytes to a string.} 13 | 14 | \item{offset}{An int of the byte offset from which to start 15 | reading the logs.} 16 | } 17 | \value{ 18 | A string or character vector of package creation logs. 19 | } 20 | \description{ 21 | Retrieve the creation logs from packaging a model with 22 | \code{package_model()}. 23 | } 24 | -------------------------------------------------------------------------------- /man/get_run.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/run.R 3 | \name{get_run} 4 | \alias{get_run} 5 | \title{Get an experiment run} 6 | \usage{ 7 | get_run(experiment, run_id) 8 | } 9 | \arguments{ 10 | \item{experiment}{The \code{Experiment} object.} 11 | 12 | \item{run_id}{A string of the run ID for the run.} 13 | } 14 | \value{ 15 | The \code{Run} object. 16 | } 17 | \description{ 18 | Given the associated experiment and run ID, return the 19 | run object for a previously submitted/tracked run. 20 | } 21 | -------------------------------------------------------------------------------- /man/get_run_details.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/run.R 3 | \name{get_run_details} 4 | \alias{get_run_details} 5 | \title{Get the details of a run} 6 | \usage{ 7 | get_run_details(run) 8 | } 9 | \arguments{ 10 | \item{run}{The \code{Run} object.} 11 | } 12 | \value{ 13 | A named list of the details for the run. 14 | } 15 | \description{ 16 | Get the definition, status information, current log files, and 17 | other details of the run. 18 | } 19 | \details{ 20 | The returned list contains the following named elements: 21 | \itemize{ 22 | \item \emph{runId}: ID of the run. 23 | \item \emph{target}: The compute target of the run. 24 | \item \emph{status}: The run's current status. 25 | \item \emph{startTimeUtc}: UTC time of when the run was started, in ISO8601. 26 | \item \emph{endTimeUtc}: UTC time of when the run was finished (either 27 | Completed or Failed), in ISO8601. This element does not exist if 28 | the run is still in progress. 29 | \item \emph{properties}: Immutable key-value pairs associated with the run. 30 | \item \emph{logFiles}: Log files from the run. 31 | } 32 | } 33 | \seealso{ 34 | \code{\link[=get_run_details_with_logs]{get_run_details_with_logs()}} 35 | } 36 | -------------------------------------------------------------------------------- /man/get_run_details_with_logs.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/run.R 3 | \name{get_run_details_with_logs} 4 | \alias{get_run_details_with_logs} 5 | \title{Get the details of a run along with the log files' contents} 6 | \usage{ 7 | get_run_details_with_logs(run) 8 | } 9 | \arguments{ 10 | \item{run}{The \code{Run} object.} 11 | } 12 | \value{ 13 | A named list of the run details and log file contents. 14 | } 15 | \description{ 16 | Get the details of a run along with the log files' contents 17 | } 18 | \seealso{ 19 | \code{\link[=get_run_details]{get_run_details()}} 20 | } 21 | -------------------------------------------------------------------------------- /man/get_run_file_names.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/run.R 3 | \name{get_run_file_names} 4 | \alias{get_run_file_names} 5 | \title{List the files that are stored in association with a run} 6 | \usage{ 7 | get_run_file_names(run) 8 | } 9 | \arguments{ 10 | \item{run}{The \code{Run} object.} 11 | } 12 | \value{ 13 | A list of strings of the paths for existing artifacts 14 | in the run record. 15 | } 16 | \description{ 17 | Get the list of files stored in a run record. 18 | } 19 | \seealso{ 20 | \code{\link[=download_file_from_run]{download_file_from_run()}} \code{\link[=download_files_from_run]{download_files_from_run()}} 21 | } 22 | -------------------------------------------------------------------------------- /man/get_run_metrics.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/run.R 3 | \name{get_run_metrics} 4 | \alias{get_run_metrics} 5 | \title{Get the metrics logged to a run} 6 | \usage{ 7 | get_run_metrics( 8 | run, 9 | name = NULL, 10 | recursive = FALSE, 11 | run_type = NULL, 12 | populate = FALSE 13 | ) 14 | } 15 | \arguments{ 16 | \item{run}{The \code{Run} object.} 17 | 18 | \item{name}{The name of the metric.} 19 | 20 | \item{recursive}{If specified, returns runs matching specified \emph{"property"} or {\emph{"property"}: \emph{"value"}}.} 21 | 22 | \item{run_type}{run type} 23 | 24 | \item{populate}{Boolean indicating whether to fetch the contents of external data linked to the metric.} 25 | } 26 | \value{ 27 | A named list of the metrics associated with the run, 28 | e.g. \code{list("metric_name" = metric)}. 29 | } 30 | \description{ 31 | Retrieve the metrics logged to a run that were logged with 32 | the \verb{log_*()} methods. 33 | } 34 | \section{Examples}{ 35 | \preformatted{ws <- load_workspace_from_config() 36 | exp <- experiment(ws, name = 'myexperiment') 37 | run <- get_run(exp, run_id = "myrunid") 38 | metrics <- get_run_metrics(run) 39 | } 40 | } 41 | 42 | -------------------------------------------------------------------------------- /man/get_runs_in_experiment.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/experiment.R 3 | \name{get_runs_in_experiment} 4 | \alias{get_runs_in_experiment} 5 | \title{Return a generator of the runs for an experiment} 6 | \usage{ 7 | get_runs_in_experiment( 8 | experiment, 9 | type = NULL, 10 | tags = NULL, 11 | properties = NULL, 12 | include_children = FALSE 13 | ) 14 | } 15 | \arguments{ 16 | \item{experiment}{The \code{Experiment} object.} 17 | 18 | \item{type}{Filter the returned generator of runs by the provided type.} 19 | 20 | \item{tags}{Filter runs by tags. A named list eg. list("tag" = "value").} 21 | 22 | \item{properties}{Filter runs by properties. A named list 23 | eg. list("property" = "value").} 24 | 25 | \item{include_children}{By default, fetch only top-level runs. 26 | Set to TRUE to list all runs.} 27 | } 28 | \value{ 29 | The list of runs matching supplied filters. 30 | } 31 | \description{ 32 | Return a generator of the runs for an experiment, in reverse 33 | chronological order. 34 | } 35 | -------------------------------------------------------------------------------- /man/get_secrets.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/keyvault.R 3 | \name{get_secrets} 4 | \alias{get_secrets} 5 | \title{Get secrets from a keyvault} 6 | \usage{ 7 | get_secrets(keyvault, secrets) 8 | } 9 | \arguments{ 10 | \item{keyvault}{The \code{Keyvault} object.} 11 | 12 | \item{secrets}{A vector of secret names.} 13 | } 14 | \value{ 15 | A named list of found and not found secrets, where element 16 | name corresponds to the secret name. If a secret was not found, the 17 | corresponding element will be \code{NULL}. 18 | } 19 | \description{ 20 | Returns the secret values from the keyvault associated with the 21 | workspace for a given set of secret names. For runs submitted using 22 | \code{submit_experiment()}, you can use \code{get_secrets_from_run()} instead, 23 | as that method shortcuts workspace instantiation (since a submitted 24 | run is aware of its workspace). 25 | } 26 | -------------------------------------------------------------------------------- /man/get_secrets_from_run.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/run.R 3 | \name{get_secrets_from_run} 4 | \alias{get_secrets_from_run} 5 | \title{Get secrets from the keyvault associated with a run's workspace} 6 | \usage{ 7 | get_secrets_from_run(run, secrets) 8 | } 9 | \arguments{ 10 | \item{run}{The \code{Run} object.} 11 | 12 | \item{secrets}{A vector of strings of secret names to retrieve 13 | the values for.} 14 | } 15 | \value{ 16 | A named list of found and not found secrets. 17 | If a secret was not found, the corresponding element will be \code{NULL}. 18 | } 19 | \description{ 20 | From within the script of a run submitted using 21 | \code{submit_experiment()}, you can use \code{get_secrets_from_run()} 22 | to get secrets that are stored in the keyvault of the associated 23 | workspace. 24 | 25 | Note that this method is slightly different than \code{get_secrets()}, 26 | which first requires you to instantiate the workspace object. 27 | Since a submitted run is aware of its workspace, 28 | \code{get_secrets_from_run()} shortcuts workspace instantiation and 29 | returns the secret value directly. 30 | 31 | Be careful not to expose the secret(s) values by writing or 32 | printing them out. 33 | } 34 | \seealso{ 35 | \code{\link[=set_secrets]{set_secrets()}} 36 | } 37 | -------------------------------------------------------------------------------- /man/get_webservice.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/webservice.R 3 | \name{get_webservice} 4 | \alias{get_webservice} 5 | \title{Get a deployed web service} 6 | \usage{ 7 | get_webservice(workspace, name) 8 | } 9 | \arguments{ 10 | \item{workspace}{The \code{Workspace} object.} 11 | 12 | \item{name}{A string of the name of the web service to retrieve.} 13 | } 14 | \value{ 15 | The \code{LocalWebservice}, \code{AciWebservice}, or \code{AksWebservice} object. 16 | } 17 | \description{ 18 | Return the corresponding Webservice object of a deployed web service from 19 | a given workspace. 20 | } 21 | -------------------------------------------------------------------------------- /man/get_webservice_keys.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/webservice.R 3 | \name{get_webservice_keys} 4 | \alias{get_webservice_keys} 5 | \title{Retrieve auth keys for a web service} 6 | \usage{ 7 | get_webservice_keys(webservice) 8 | } 9 | \arguments{ 10 | \item{webservice}{The \code{AciWebservice} or \code{AksWebservice} object.} 11 | } 12 | \value{ 13 | A list of two strings corresponding to the primary and 14 | secondary authentication keys. 15 | } 16 | \description{ 17 | Get the authentication keys for a web service that is deployed 18 | with key-based authentication enabled. In order to enable 19 | key-based authentication, set the \code{auth_enabled = TRUE} parameter 20 | when you are creating or updating a deployment (either 21 | \code{aci_webservice_deployment_config()} or 22 | \code{aks_webservice_deployment_config()} for creation and 23 | \code{update_aci_webservice()} or \code{update_aks_webservice()} for updating). 24 | Note that key-based auth is enabled by default for \code{AksWebservice} 25 | but not for \code{AciWebservice}. 26 | 27 | To check if a web service has key-based auth enabled, you can 28 | access the following boolean property from the Webservice object: 29 | \code{service$auth_enabled} 30 | 31 | Not supported for \code{LocalWebservice} deployments. 32 | } 33 | \seealso{ 34 | \code{generate_new_webservice_key()} 35 | } 36 | -------------------------------------------------------------------------------- /man/get_webservice_logs.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/webservice.R 3 | \name{get_webservice_logs} 4 | \alias{get_webservice_logs} 5 | \title{Retrieve the logs for a web service} 6 | \usage{ 7 | get_webservice_logs(webservice, num_lines = 5000L) 8 | } 9 | \arguments{ 10 | \item{webservice}{The \code{LocalWebservice}, \code{AciWebservice}, or 11 | \code{AksWebservice} object.} 12 | 13 | \item{num_lines}{An int of the maximum number of log lines to 14 | retrieve.} 15 | } 16 | \value{ 17 | A string of the logs for the web service. 18 | } 19 | \description{ 20 | You can get the detailed Docker engine log messages from your 21 | web service deployment. You can view the logs for local, ACI, 22 | and AKS deployments. 23 | 24 | For example, if your web service deployment fails, you can 25 | inspect the logs to help troubleshoot. 26 | } 27 | -------------------------------------------------------------------------------- /man/get_webservice_token.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/webservice.R 3 | \name{get_webservice_token} 4 | \alias{get_webservice_token} 5 | \title{Retrieve the auth token for a web service} 6 | \usage{ 7 | get_webservice_token(webservice) 8 | } 9 | \arguments{ 10 | \item{webservice}{The \code{AksWebservice} object.} 11 | } 12 | \value{ 13 | An \code{AksServiceAccessToken} object. 14 | } 15 | \description{ 16 | Get the authentication token, scoped to the current user, 17 | for a web service that was deployed with token-based authentication 18 | enabled. Token-based authentication requires clients to use an Azure 19 | Active Directory account to request an authentication token, which is 20 | used to make requests to the deployed service. Only available for 21 | AKS deployments. 22 | 23 | In order to enable token-based authentication, set the 24 | \code{token_auth_enabled = TRUE} parameter when you are creating or 25 | updating a deployment (\code{aks_webservice_deployment_config()} for creation 26 | or \code{update_aks_webservice()} for updating). Note that you cannot have both 27 | key-based authentication and token-based authentication enabled. 28 | Token-based authentication is not enabled by default. 29 | 30 | To check if a web service has token-based auth enabled, you can 31 | access the following boolean property from the Webservice object: 32 | \code{service$token_auth_enabled} 33 | } 34 | -------------------------------------------------------------------------------- /man/get_workspace.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/workspace.R 3 | \name{get_workspace} 4 | \alias{get_workspace} 5 | \title{Get an existing workspace} 6 | \usage{ 7 | get_workspace(name, auth = NULL, subscription_id = NULL, resource_group = NULL) 8 | } 9 | \arguments{ 10 | \item{name}{A string of the workspace name to get.} 11 | 12 | \item{auth}{The \code{ServicePrincipalAuthentication} or \code{InteractiveLoginAuthentication} 13 | object. For more details refer to https://aka.ms/aml-notebook-auth. If NULL, 14 | the default Azure CLI credentials will be used or the API will prompt for credentials.} 15 | 16 | \item{subscription_id}{A string of the subscription ID to use. The parameter 17 | is required if the user has access to more than one subscription.} 18 | 19 | \item{resource_group}{A string of the resource group to use. If \code{NULL} the 20 | method will search all resource groups in the subscription.} 21 | } 22 | \value{ 23 | The \code{Workspace} object. 24 | } 25 | \description{ 26 | Returns a \code{Workspace} object for an existing Azure Machine Learning 27 | workspace. Throws an exception if the workpsace doesn't exist or the 28 | required fields don't lead to a uniquely identifiable workspace. 29 | } 30 | \seealso{ 31 | \code{\link[=create_workspace]{create_workspace()}} \code{\link[=service_principal_authentication]{service_principal_authentication()}} \code{\link[=interactive_login_authentication]{interactive_login_authentication()}} 32 | } 33 | -------------------------------------------------------------------------------- /man/get_workspace_details.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/workspace.R 3 | \name{get_workspace_details} 4 | \alias{get_workspace_details} 5 | \title{Get the details of a workspace} 6 | \usage{ 7 | get_workspace_details(workspace) 8 | } 9 | \arguments{ 10 | \item{workspace}{The \code{Workspace} object.} 11 | } 12 | \value{ 13 | Named list of the workspace details. 14 | } 15 | \description{ 16 | Returns the details of the workspace. 17 | } 18 | \section{Details}{ 19 | 20 | The returned named list contains the following elements: 21 | \itemize{ 22 | \item \emph{id}: URI pointing to the workspace resource, containing subscription ID, 23 | resource group, and workspace name. 24 | \item \emph{name}: Workspace name. 25 | \item \emph{location}: Workspace region. 26 | \item \emph{type}: URI of the format \code{"{providerName}/workspaces"}. 27 | \item \emph{workspaceid}: Workspace ID. 28 | \item \emph{description}: Workspace description. 29 | \item \emph{friendlyName}: Workspace friendly name. 30 | \item \emph{creationTime}: Time the workspace was created, in ISO8601. 31 | \item \emph{containerRegistry}: Workspace container registry. 32 | \item \emph{keyVault}: Workspace key vault. 33 | \item \emph{applicationInsights}: Workspace App Insights. 34 | \item \emph{identityPrincipalId}: Workspace identity principal ID. 35 | \item \emph{identityTenantId}: Workspace tenant ID. 36 | \item \emph{identityType}: Workspace identity type. 37 | \item \emph{storageAccount}: Workspace storage account. 38 | } 39 | } 40 | 41 | -------------------------------------------------------------------------------- /man/github_package.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/environment.R 3 | \name{github_package} 4 | \alias{github_package} 5 | \title{Specifies a Github package to install in environment} 6 | \usage{ 7 | github_package(repository, auth_token = NULL) 8 | } 9 | \arguments{ 10 | \item{repository}{Repository address of the github package} 11 | 12 | \item{auth_token}{Personal access token to install from a private repo.} 13 | } 14 | \value{ 15 | A named list containing the package specifications 16 | } 17 | \description{ 18 | Specifies a Github package to install in run environment 19 | } 20 | \section{Examples}{ 21 | \preformatted{pkg1 <- github_package("Azure/azureml-sdk-for-r") 22 | 23 | env <- r_environment(name = "r_env", 24 | github_packages = list(pkg1)) 25 | } 26 | } 27 | 28 | \seealso{ 29 | \code{\link[=r_environment]{r_environment()}} 30 | } 31 | -------------------------------------------------------------------------------- /man/grid_parameter_sampling.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/hyperdrive.R 3 | \name{grid_parameter_sampling} 4 | \alias{grid_parameter_sampling} 5 | \title{Define grid sampling over a hyperparameter search space} 6 | \usage{ 7 | grid_parameter_sampling(parameter_space) 8 | } 9 | \arguments{ 10 | \item{parameter_space}{A named list containing each parameter and its 11 | distribution, e.g. \code{list("parameter" = distribution)}.} 12 | } 13 | \value{ 14 | The \code{GridParameterSampling} object. 15 | } 16 | \description{ 17 | Grid sampling performs a simple grid search over all feasible values in 18 | the defined search space. It can only be used with hyperparameters 19 | specified using \code{choice()}. 20 | } 21 | \examples{ 22 | \dontrun{ 23 | param_sampling <- grid_parameter_sampling(list("num_hidden_layers" = choice(c(1, 2, 3)), 24 | "batch_size" = choice(c(16, 32)))) 25 | } 26 | } 27 | \seealso{ 28 | \code{choice()} 29 | } 30 | -------------------------------------------------------------------------------- /man/install_azureml.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/install.R 3 | \name{install_azureml} 4 | \alias{install_azureml} 5 | \title{Install azureml sdk package} 6 | \usage{ 7 | install_azureml( 8 | version = "1.10.0", 9 | envname = "r-reticulate", 10 | conda_python_version = "3.6", 11 | restart_session = TRUE, 12 | remove_existing_env = FALSE 13 | ) 14 | } 15 | \arguments{ 16 | \item{version}{azureml sdk package version} 17 | 18 | \item{envname}{name of environment to create, if environment other 19 | than default is desired} 20 | 21 | \item{conda_python_version}{version of python for conda environment} 22 | 23 | \item{restart_session}{restart R session after installation} 24 | 25 | \item{remove_existing_env}{delete the conda environment if already exists} 26 | } 27 | \value{ 28 | None 29 | } 30 | \description{ 31 | Install azureml sdk package 32 | } 33 | -------------------------------------------------------------------------------- /man/interactive_login_authentication.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/workspace.R 3 | \name{interactive_login_authentication} 4 | \alias{interactive_login_authentication} 5 | \title{Manages authentication and acquires an authorization token in interactive login workflows.} 6 | \usage{ 7 | interactive_login_authentication( 8 | force = FALSE, 9 | tenant_id = NULL, 10 | cloud = "AzureCloud" 11 | ) 12 | } 13 | \arguments{ 14 | \item{force}{Indicates whether "az login" will be run even if the old "az login" is still valid.} 15 | 16 | \item{tenant_id}{The string id of the active directory tenant that the service 17 | identity belongs to. This is can be used to specify a specific tenant when 18 | you have access to multiple tenants. If unspecified, the default tenant will be used.} 19 | 20 | \item{cloud}{The name of the target cloud. Can be one of "AzureCloud", "AzureChinaCloud", or 21 | "AzureUSGovernment". If no cloud is specified, "AzureCloud" is used.} 22 | } 23 | \value{ 24 | \code{InteractiveLoginAuthentication} object 25 | } 26 | \description{ 27 | Interactive login authentication is suitable for local experimentation on your own computer, and is the 28 | default authentication model when using Azure Machine Learning SDK. 29 | The constructor of the class will prompt you to login. The constructor then will save the credentials 30 | for any subsequent attempts. If you are already logged in with the Azure CLI or have logged-in before, the 31 | constructor will load the existing credentials without prompt. 32 | } 33 | \section{Examples}{ 34 | \preformatted{interactive_auth <- interactive_login_authentication(tenant_id="your-tenant-id") 35 | 36 | ws <- get_workspace("", 37 | "", 38 | "", 39 | auth = interactive_auth) 40 | } 41 | } 42 | 43 | \seealso{ 44 | \code{\link[=get_workspace]{get_workspace()}} \code{\link[=service_principal_authentication]{service_principal_authentication()}} 45 | } 46 | -------------------------------------------------------------------------------- /man/invoke_webservice.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/webservice.R 3 | \name{invoke_webservice} 4 | \alias{invoke_webservice} 5 | \title{Call a web service with the provided input} 6 | \usage{ 7 | invoke_webservice(webservice, input_data) 8 | } 9 | \arguments{ 10 | \item{webservice}{The \code{LocalWebservice}, \code{AciWebservice}, or 11 | \code{AksWebservice} object.} 12 | 13 | \item{input_data}{The input data to invoke the web service with. This is 14 | the data your model expects as an input to run predictions.} 15 | } 16 | \value{ 17 | A named list of the result of calling the web service. This will 18 | return the predictions run from your model. 19 | } 20 | \description{ 21 | Invoke the web service with the provided input and to receive 22 | predictions from the deployed model. The structure of the 23 | provided input data needs to match what the service's scoring 24 | script and model expect. See the "Details" section of 25 | \code{inference_config()}. 26 | } 27 | \details{ 28 | Instead of invoking the web service using \code{invoke_webservice()}, you can 29 | also consume the web service using the service's REST API. If you've 30 | enabled key-based authentication for your service, you will need to provide 31 | a service key as a token in your request header 32 | (see \code{get_webservice_keys()}). If you've enabled token-based 33 | authentication, you will need to provide an JWT token as a bearer 34 | token in your request header (see \code{get_webservice_token()}). 35 | 36 | To get the REST API address for the service's scoring endpoint, you can 37 | access the following property from the Webservice object: 38 | \code{service$scoring_uri} 39 | } 40 | -------------------------------------------------------------------------------- /man/keep_columns_from_dataset.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \name{keep_columns_from_dataset} 4 | \alias{keep_columns_from_dataset} 5 | \title{Keep the specified columns and drops all others from the dataset.} 6 | \usage{ 7 | keep_columns_from_dataset(dataset, columns, validate = FALSE) 8 | } 9 | \arguments{ 10 | \item{dataset}{The Tabular Dataset object} 11 | 12 | \item{columns}{The name or a list of names for the columns to keep.} 13 | 14 | \item{validate}{Indicates whether to validate if data can be loaded from the 15 | returned dataset. The default is False. Validation requires that the data 16 | source is accessible from current compute.} 17 | } 18 | \value{ 19 | A new Tabular Dataset object with only the specified columns kept. 20 | } 21 | \description{ 22 | Keep the specified columns and drops all others from the dataset. 23 | If a timeseries column is dropped, the corresponding capabilities will be 24 | dropped for the returned dataset as well. 25 | } 26 | -------------------------------------------------------------------------------- /man/list_nodes_in_aml_compute.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/compute.R 3 | \name{list_nodes_in_aml_compute} 4 | \alias{list_nodes_in_aml_compute} 5 | \title{Get the details (e.g IP address, port etc) of all the compute nodes in the 6 | compute target} 7 | \usage{ 8 | list_nodes_in_aml_compute(cluster) 9 | } 10 | \arguments{ 11 | \item{cluster}{cluster object} 12 | } 13 | \value{ 14 | Details of all the compute nodes in the cluster in data frame 15 | } 16 | \description{ 17 | Get the details (e.g IP address, port etc) of all the compute nodes in the 18 | compute target 19 | } 20 | -------------------------------------------------------------------------------- /man/list_secrets.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/keyvault.R 3 | \name{list_secrets} 4 | \alias{list_secrets} 5 | \title{List the secrets in a keyvault} 6 | \usage{ 7 | list_secrets(keyvault) 8 | } 9 | \arguments{ 10 | \item{keyvault}{The \code{Keyvault} object.} 11 | } 12 | \value{ 13 | A list of secret names. 14 | } 15 | \description{ 16 | Returns the list of secret names for all the secrets in the keyvault 17 | associated with the workspace. 18 | } 19 | -------------------------------------------------------------------------------- /man/list_supported_vm_sizes.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/compute.R 3 | \name{list_supported_vm_sizes} 4 | \alias{list_supported_vm_sizes} 5 | \title{List the supported VM sizes in a region} 6 | \usage{ 7 | list_supported_vm_sizes(workspace, location = NULL) 8 | } 9 | \arguments{ 10 | \item{workspace}{The \code{Workspace} object.} 11 | 12 | \item{location}{A string of the location of the cluster. If not specified, 13 | will default to the workspace location.} 14 | } 15 | \value{ 16 | A data frame of supported VM sizes in a region with name of the VM, VCPUs, 17 | RAM. 18 | } 19 | \description{ 20 | List the supported VM sizes in a region 21 | } 22 | -------------------------------------------------------------------------------- /man/list_workspaces.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/workspace.R 3 | \name{list_workspaces} 4 | \alias{list_workspaces} 5 | \title{List all workspaces that the user has access to in a subscription ID} 6 | \usage{ 7 | list_workspaces(subscription_id, resource_group = NULL) 8 | } 9 | \arguments{ 10 | \item{subscription_id}{A string of the specified subscription ID to 11 | list the workspaces in.} 12 | 13 | \item{resource_group}{A string of the specified resource group to list 14 | the workspaces. If \code{NULL} the method will list all the workspaces within 15 | the specified subscription in.} 16 | } 17 | \value{ 18 | A named list of \code{Workspace} objects where element name corresponds 19 | to the workspace name. 20 | } 21 | \description{ 22 | List all workspaces that the user has access to in the specified 23 | \code{subscription_id} parameter. The list of workspaces can be filtered 24 | based on the resource group. 25 | } 26 | -------------------------------------------------------------------------------- /man/load_dataset_into_data_frame.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \name{load_dataset_into_data_frame} 4 | \alias{load_dataset_into_data_frame} 5 | \title{Load all records from the dataset into a dataframe.} 6 | \usage{ 7 | load_dataset_into_data_frame( 8 | dataset, 9 | on_error = "null", 10 | out_of_range_datetime = "null" 11 | ) 12 | } 13 | \arguments{ 14 | \item{dataset}{The Tabular Dataset object.} 15 | 16 | \item{on_error}{How to handle any error values in the dataset, such as those 17 | produced by an error while parsing values. Valid values are 'null' which replaces 18 | them with NULL; and 'fail' which will result in an exception.} 19 | 20 | \item{out_of_range_datetime}{How to handle date-time values that are outside 21 | the range supported by Pandas. Valid values are 'null' which replaces them with 22 | NULL; and 'fail' which will result in an exception.} 23 | } 24 | \value{ 25 | A data.frame. 26 | } 27 | \description{ 28 | Load all records from the dataset into a dataframe. 29 | } 30 | -------------------------------------------------------------------------------- /man/load_workspace_from_config.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/workspace.R 3 | \name{load_workspace_from_config} 4 | \alias{load_workspace_from_config} 5 | \title{Load workspace configuration details from a config file} 6 | \usage{ 7 | load_workspace_from_config(path = NULL, file_name = NULL) 8 | } 9 | \arguments{ 10 | \item{path}{A string of the path to the config file or starting directory 11 | for search. The parameter defaults to starting the search in the current 12 | directory.} 13 | 14 | \item{file_name}{A string that will override the config file name to 15 | search for when path is a directory path.} 16 | } 17 | \value{ 18 | The \code{Workspace} object. 19 | } 20 | \description{ 21 | Returns a \code{Workspace} object for an existing Azure Machine Learning 22 | workspace by reading the workspace configuration from a file. The method 23 | provides a simple way of reusing the same workspace across multiple files or 24 | projects. Users can save the workspace ARM properties using 25 | \code{write_workspace_config()}, and use this method to load the same workspace 26 | in different files or projects without retyping the workspace ARM properties. 27 | } 28 | \seealso{ 29 | \code{\link[=write_workspace_config]{write_workspace_config()}} 30 | } 31 | -------------------------------------------------------------------------------- /man/local_webservice_deployment_config.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/webservice-local.R 3 | \name{local_webservice_deployment_config} 4 | \alias{local_webservice_deployment_config} 5 | \title{Create a deployment config for deploying a local web service} 6 | \usage{ 7 | local_webservice_deployment_config(port = NULL) 8 | } 9 | \arguments{ 10 | \item{port}{An int of the local port on which to expose the service's 11 | HTTP endpoint.} 12 | } 13 | \value{ 14 | The \code{LocalWebserviceDeploymentConfiguration} object. 15 | } 16 | \description{ 17 | You can deploy a model locally for limited testing and troubleshooting. 18 | To do so, you will need to have Docker installed on your local machine. 19 | 20 | If you are using an Azure Machine Learning Compute Instance for 21 | development, you can also deploy locally on your compute instance. 22 | } 23 | \examples{ 24 | \dontrun{ 25 | deployment_config <- local_webservice_deployment_config(port = 8890) 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /man/log_confusion_matrix_to_run.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/run.R 3 | \name{log_confusion_matrix_to_run} 4 | \alias{log_confusion_matrix_to_run} 5 | \title{Log a confusion matrix metric to a run} 6 | \usage{ 7 | log_confusion_matrix_to_run(name, value, description = "", run = NULL) 8 | } 9 | \arguments{ 10 | \item{name}{A string of the name of the metric.} 11 | 12 | \item{value}{A named list containing name, version, and data properties.} 13 | 14 | \item{description}{(Optional) A string of the metric description.} 15 | 16 | \item{run}{The \code{Run} object. If not specified, will default 17 | to the current run from the service context.} 18 | } 19 | \value{ 20 | None 21 | } 22 | \description{ 23 | Log a confusion matrix metric to a run 24 | } 25 | -------------------------------------------------------------------------------- /man/log_image_to_run.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/run.R 3 | \name{log_image_to_run} 4 | \alias{log_image_to_run} 5 | \title{Log an image metric to a run} 6 | \usage{ 7 | log_image_to_run(name, path = NULL, plot = NULL, description = "", run = NULL) 8 | } 9 | \arguments{ 10 | \item{name}{A string of the name of the metric.} 11 | 12 | \item{path}{A string of the path or stream of the image.} 13 | 14 | \item{plot}{The ggplot2 plot to log as an image.} 15 | 16 | \item{description}{(Optional) A string of the metric description.} 17 | 18 | \item{run}{The \code{Run} object. If not specified, will default 19 | to the current run from the service context.} 20 | } 21 | \value{ 22 | None 23 | } 24 | \description{ 25 | Log an image to the run with the give metric name. Use 26 | \code{log_image_to_run()} to log an image file or ggplot2 plot to the 27 | run. These images will be visible and comparable in the run 28 | record. 29 | } 30 | -------------------------------------------------------------------------------- /man/log_list_to_run.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/run.R 3 | \name{log_list_to_run} 4 | \alias{log_list_to_run} 5 | \title{Log a vector metric value to a run} 6 | \usage{ 7 | log_list_to_run(name, value, description = "", run = NULL) 8 | } 9 | \arguments{ 10 | \item{name}{A string of the name of metric.} 11 | 12 | \item{value}{The vector of elements to log.} 13 | 14 | \item{description}{(Optional) A string of the metric description.} 15 | 16 | \item{run}{The \code{Run} object. If not specified, will default 17 | to the current run from the service context.} 18 | } 19 | \value{ 20 | None 21 | } 22 | \description{ 23 | Log a vector with the given metric name to the run. 24 | } 25 | \section{Examples}{ 26 | \preformatted{log_list_to_run("Accuracies", c(0.6, 0.7, 0.87)) 27 | } 28 | } 29 | 30 | -------------------------------------------------------------------------------- /man/log_metric_to_run.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/run.R 3 | \name{log_metric_to_run} 4 | \alias{log_metric_to_run} 5 | \title{Log a metric to a run} 6 | \usage{ 7 | log_metric_to_run(name, value, run = NULL) 8 | } 9 | \arguments{ 10 | \item{name}{A string of the name of the metric.} 11 | 12 | \item{value}{The value of the metric.} 13 | 14 | \item{run}{The \code{Run} object. If not specified, will default 15 | to the current run from the service context.} 16 | } 17 | \value{ 18 | None 19 | } 20 | \description{ 21 | Log a numerical or string value with the given metric name 22 | to the run. Logging a metric to a run causes that metric to 23 | be stored in the run record in the experiment. You can log 24 | the same metric multiple times within a run, the result being 25 | considered a vector of that metric. 26 | } 27 | \section{Examples}{ 28 | \preformatted{log_metric_to_run("Accuracy", 0.95) 29 | } 30 | } 31 | 32 | -------------------------------------------------------------------------------- /man/log_predictions_to_run.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/run.R 3 | \name{log_predictions_to_run} 4 | \alias{log_predictions_to_run} 5 | \title{Log a predictions metric to a run} 6 | \usage{ 7 | log_predictions_to_run(name, value, description = "", run = NULL) 8 | } 9 | \arguments{ 10 | \item{name}{A string of the name of the metric.} 11 | 12 | \item{value}{A named list containing name, version, and data properties.} 13 | 14 | \item{description}{(Optional) A string of the metric description.} 15 | 16 | \item{run}{The \code{Run} object. If not specified, will default 17 | to the current run from the service context.} 18 | } 19 | \value{ 20 | None 21 | } 22 | \description{ 23 | \code{log_predictions_to_run()} logs a metric score that can be used to 24 | compare the distributions of true target values to the distribution 25 | of predicted values for a regression task. 26 | 27 | The predictions are binned and standard deviations are calculated 28 | for error bars on a line chart. 29 | } 30 | \section{Examples}{ 31 | \preformatted{data <- list("bin_averages" = c(0.25, 0.75), 32 | "bin_errors" = c(0.013, 0.042), 33 | "bin_counts" = c(56, 34), 34 | "bin_edges" = c(0.0, 0.5, 1.0)) 35 | predictions <- list("schema_type" = "predictions", 36 | "schema_version" = "v1", 37 | "data" = data) 38 | log_predictions_to_run("mypredictions", predictions) 39 | } 40 | } 41 | 42 | -------------------------------------------------------------------------------- /man/log_residuals_to_run.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/run.R 3 | \name{log_residuals_to_run} 4 | \alias{log_residuals_to_run} 5 | \title{Log a residuals metric to a run} 6 | \usage{ 7 | log_residuals_to_run(name, value, description = "", run = NULL) 8 | } 9 | \arguments{ 10 | \item{name}{A string of the name of the metric.} 11 | 12 | \item{value}{A named list containing name, version, and data properties.} 13 | 14 | \item{description}{(Optional) A string of the metric description.} 15 | 16 | \item{run}{The \code{Run} object. If not specified, will default 17 | to the current run from the service context.} 18 | } 19 | \value{ 20 | None 21 | } 22 | \description{ 23 | \code{log_residuals_to_run()} logs the data needed to display a histogram 24 | of residuals for a regression task. The residuals are \code{predicted - actual}. 25 | 26 | There should be one more edge than the number of counts. 27 | } 28 | \section{Examples}{ 29 | \preformatted{data <- list("bin_edges" = c(50, 100, 200, 300, 350), 30 | "bin_counts" = c(0.88, 20, 30, 50.99)) 31 | residuals <- list("schema_type" = "residuals", 32 | "schema_version" = "v1", 33 | "data" = data) 34 | log_predictions_to_run("myresiduals", predictions) 35 | } 36 | } 37 | 38 | -------------------------------------------------------------------------------- /man/log_row_to_run.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/run.R 3 | \name{log_row_to_run} 4 | \alias{log_row_to_run} 5 | \title{Log a row metric to a run} 6 | \usage{ 7 | log_row_to_run(name, description = "", run = NULL, ...) 8 | } 9 | \arguments{ 10 | \item{name}{A string of the name of metric.} 11 | 12 | \item{description}{(Optional) A string of the metric description.} 13 | 14 | \item{run}{The \code{Run} object. If not specified, will default 15 | to the current run from the service context.} 16 | 17 | \item{...}{Each named parameter generates a column with the value 18 | specified.} 19 | } 20 | \value{ 21 | None 22 | } 23 | \description{ 24 | Using \code{log_row_to_run()} creates a metric with multiple columns 25 | as described in \code{...}. Each named parameter generates a column 26 | with the value specified. \code{log_row_to_run()} can be called once 27 | to log an arbitrary tuple, or multiple times in a loop to generate 28 | a complete table. 29 | } 30 | \section{Examples}{ 31 | 32 | Log an arbitrary tuple:\preformatted{log_row_to_run("Y over X", x = 1, y = 0.4) 33 | } 34 | 35 | Log the complete table:\preformatted{citrus <- c("orange", "lemon", "lime") 36 | sizes <- c(10, 7, 3) 37 | for (i in seq_along(citrus)) \{ 38 | log_row_to_run("citrus", fruit = citrus[i], size = sizes[i]) 39 | \} 40 | } 41 | } 42 | 43 | -------------------------------------------------------------------------------- /man/log_table_to_run.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/run.R 3 | \name{log_table_to_run} 4 | \alias{log_table_to_run} 5 | \title{Log a table metric to a run} 6 | \usage{ 7 | log_table_to_run(name, value, description = "", run = NULL) 8 | } 9 | \arguments{ 10 | \item{name}{A string of the name of metric.} 11 | 12 | \item{value}{The table value of the metric (a named list where the 13 | element name corresponds to the column name).} 14 | 15 | \item{description}{(Optional) A string of the metric description.} 16 | 17 | \item{run}{The \code{Run} object. If not specified, will default 18 | to the current run from the service context.} 19 | } 20 | \value{ 21 | None 22 | } 23 | \description{ 24 | Log a table metric with the given metric name to the run. The 25 | table value is a named list where each element corresponds to 26 | a column of the table. 27 | } 28 | \section{Examples}{ 29 | \preformatted{log_table_to_run("Y over X", 30 | list("x" = c(1, 2, 3), "y" = c(0.6, 0.7, 0.89))) 31 | } 32 | } 33 | 34 | -------------------------------------------------------------------------------- /man/lognormal.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/hyperdrive.R 3 | \name{lognormal} 4 | \alias{lognormal} 5 | \title{Specify a normal distribution of the form \code{exp(normal(mu, sigma))}} 6 | \usage{ 7 | lognormal(mu, sigma) 8 | } 9 | \arguments{ 10 | \item{mu}{A double of the mean of the normal distribution.} 11 | 12 | \item{sigma}{A double of the standard deviation of the normal distribution.} 13 | } 14 | \value{ 15 | A list of the stochastic expression. 16 | } 17 | \description{ 18 | Specify a normal distribution of the form \code{exp(normal(mu, sigma))}. 19 | 20 | The logarithm of the return value is normally distributed. When optimizing, 21 | this variable is constrained to be positive. 22 | } 23 | \seealso{ 24 | \code{random_parameter_sampling()}, \code{grid_parameter_sampling()}, 25 | \code{bayesian_parameter_sampling()} 26 | } 27 | -------------------------------------------------------------------------------- /man/loguniform.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/hyperdrive.R 3 | \name{loguniform} 4 | \alias{loguniform} 5 | \title{Specify a log uniform distribution} 6 | \usage{ 7 | loguniform(min_value, max_value) 8 | } 9 | \arguments{ 10 | \item{min_value}{A double where the minimum value in the range will be 11 | \code{exp(min_value)} (inclusive).} 12 | 13 | \item{max_value}{A double where the maximum value in the range will be 14 | \code{exp(min_value)} (inclusive).} 15 | } 16 | \value{ 17 | A list of the stochastic expression. 18 | } 19 | \description{ 20 | Specify a log uniform distribution. 21 | 22 | A value is drawn according to \code{exp(uniform(min_value, max_value))} so that 23 | the logarithm of the return value is uniformly distributed. When optimizing, 24 | this variable is constrained to the interval 25 | \verb{[exp(min_value), exp(max_value)]}. 26 | } 27 | \seealso{ 28 | \code{random_parameter_sampling()}, \code{grid_parameter_sampling()}, 29 | \code{bayesian_parameter_sampling()} 30 | } 31 | -------------------------------------------------------------------------------- /man/merge_results.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/do_azureml_parallel.R 3 | \name{merge_results} 4 | \alias{merge_results} 5 | \title{Combine the results from the parallel training.} 6 | \usage{ 7 | merge_results(node_count, process_count_per_node, run, source_directory) 8 | } 9 | \arguments{ 10 | \item{node_count}{Number of nodes in the AmlCompute cluster.} 11 | 12 | \item{process_count_per_node}{Number of processes per node.} 13 | 14 | \item{run}{The run object whose output needs to be combined.} 15 | 16 | \item{source_directory}{The directory where the output from the run 17 | would be downloaded.} 18 | } 19 | \description{ 20 | Combine the results from the parallel training. 21 | } 22 | -------------------------------------------------------------------------------- /man/mount_file_dataset.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \name{mount_file_dataset} 4 | \alias{mount_file_dataset} 5 | \title{Create a context manager for mounting file streams defined by the dataset as local files.} 6 | \usage{ 7 | mount_file_dataset(dataset, mount_point = NULL) 8 | } 9 | \arguments{ 10 | \item{dataset}{The Dataset object.} 11 | 12 | \item{mount_point}{The local directory to mount the files to. If NULL, the 13 | data will be mounted into a temporary directory.} 14 | } 15 | \value{ 16 | Returns a context manager for managing the lifecycle of the mount of 17 | type \code{azureml.dataprep.fuse.daemon.MountContext}. 18 | } 19 | \description{ 20 | Create a context manager for mounting file streams defined by the dataset as local files. 21 | A context manager will be returned to manage the lifecycle of the mount. 22 | To mount, you will need to enter the context manager and to unmount, exit from 23 | the context manager. Mount is only supported on Unix or Unix-like operating systems 24 | and libfuse must be present. If you are running inside a docker container, the docker 25 | container must be started with the \code{--privileged} flag or started with 26 | \verb{--cap-add SYS_ADMIN --device /dev/fuse}. 27 | } 28 | -------------------------------------------------------------------------------- /man/normal.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/hyperdrive.R 3 | \name{normal} 4 | \alias{normal} 5 | \title{Specify a real value that is normally-distributed with mean \code{mu} and standard 6 | deviation \code{sigma}} 7 | \usage{ 8 | normal(mu, sigma) 9 | } 10 | \arguments{ 11 | \item{mu}{A double of the mean of the normal distribution.} 12 | 13 | \item{sigma}{A double of the standard deviation of the normal distribution.} 14 | } 15 | \value{ 16 | A list of the stochastic expression. 17 | } 18 | \description{ 19 | Specify a real value that is normally-distributed with mean \code{mu} and 20 | standard deviation \code{sigma}. 21 | 22 | When optimizing, this is an unconstrained variable. 23 | } 24 | \seealso{ 25 | \code{random_parameter_sampling()}, \code{grid_parameter_sampling()}, 26 | \code{bayesian_parameter_sampling()} 27 | } 28 | -------------------------------------------------------------------------------- /man/plot_run_details.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/run.R 3 | \name{plot_run_details} 4 | \alias{plot_run_details} 5 | \title{Generate table of run details} 6 | \usage{ 7 | plot_run_details(run) 8 | } 9 | \arguments{ 10 | \item{run}{The \code{Run} object.} 11 | } 12 | \value{ 13 | Datatable containing run details 14 | } 15 | \description{ 16 | Plot a table of run details including 17 | \itemize{ 18 | \item ID 19 | \item Status 20 | \item Start Time 21 | \item Duration 22 | \item Script Name 23 | \item Arguments 24 | \item Link to Web Portal view 25 | \item Errors 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /man/primary_metric_goal.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/hyperdrive.R 3 | \name{primary_metric_goal} 4 | \alias{primary_metric_goal} 5 | \title{Define supported metric goals for hyperparameter tuning} 6 | \usage{ 7 | primary_metric_goal(goal) 8 | } 9 | \arguments{ 10 | \item{goal}{A string of the metric goal (either "MAXIMIZE" or "MINIMIZE").} 11 | } 12 | \value{ 13 | The \code{PrimaryMetricGoal} object. 14 | } 15 | \description{ 16 | A metric goal is used to determine whether a higher value for a metric 17 | is better or worse. Metric goals are used when comparing runs based on 18 | the primary metric. For example, you may want to maximize accuracy or 19 | minimize error. 20 | 21 | The primary metric name and goal are specified to \code{hyperdrive_config()} 22 | when you configure a HyperDrive run. 23 | } 24 | -------------------------------------------------------------------------------- /man/promote_headers_behavior.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \name{promote_headers_behavior} 4 | \alias{promote_headers_behavior} 5 | \title{Defines options for how column headers are processed when reading data from files to create a dataset.} 6 | \usage{ 7 | promote_headers_behavior(option) 8 | } 9 | \arguments{ 10 | \item{option}{An integer corresponding to an option for how column headers are to be processed 11 | \itemize{ 12 | \item 0: NO_HEADERS No column headers are read 13 | \item 1: ONLY_FIRST_FILE_HAS_HEADERS Read headers only from first row of first file, everything else is data. 14 | \item 2: COMBINE_ALL_FILES_HEADERS Read headers from first row of each file, combining identically named columns. 15 | \item 3: ALL_FILES_HAVE_SAME_HEADERS Read headers from first row of first file, drops first row from other files. 16 | }} 17 | } 18 | \value{ 19 | The PromoteHeadersBehavior object. 20 | } 21 | \description{ 22 | Defines options for how column headers are processed when reading data from files to create a dataset. 23 | These enumeration values are used in the Dataset class method. 24 | } 25 | -------------------------------------------------------------------------------- /man/pull_model_package_image.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/model.R 3 | \name{pull_model_package_image} 4 | \alias{pull_model_package_image} 5 | \title{Pull the Docker image from a \code{ModelPackage} to your local 6 | Docker environment} 7 | \usage{ 8 | pull_model_package_image(package) 9 | } 10 | \arguments{ 11 | \item{package}{The \code{ModelPackage} object.} 12 | } 13 | \value{ 14 | None 15 | } 16 | \description{ 17 | Pull the Docker image from a created \code{ModelPackage} to your 18 | local Docker environment. The output of this call will 19 | display the name of the image. For example: 20 | \verb{Status: Downloaded newer image for myworkspacef78fd10.azurecr.io/package:20190822181338}. 21 | 22 | This can only be used with a Docker image \code{ModelPackage} (where 23 | \code{package_model()} was called with \code{generate_dockerfile = FALSE}). 24 | 25 | After you've pulled the image, you can start a local container based 26 | on this image using Docker commands. 27 | } 28 | \seealso{ 29 | \code{package_model()} 30 | } 31 | -------------------------------------------------------------------------------- /man/qlognormal.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/hyperdrive.R 3 | \name{qlognormal} 4 | \alias{qlognormal} 5 | \title{Specify a normal distribution of the form 6 | \code{round(exp(normal(mu, sigma)) / q) * q}} 7 | \usage{ 8 | qlognormal(mu, sigma, q) 9 | } 10 | \arguments{ 11 | \item{mu}{A double of the mean of the normal distribution.} 12 | 13 | \item{sigma}{A double of the standard deviation of the normal distribution.} 14 | 15 | \item{q}{An integer of the smoothing factor.} 16 | } 17 | \value{ 18 | A list of the stochastic expression. 19 | } 20 | \description{ 21 | Specify a normal distribution of the form 22 | \code{round(exp(normal(mu, sigma)) / q) * q}. 23 | 24 | Suitable for a discrete variable with respect to which the objective is 25 | smooth and gets smoother with the size of the variable, which is bounded 26 | from one side. 27 | } 28 | \seealso{ 29 | \code{random_parameter_sampling()}, \code{grid_parameter_sampling()}, 30 | \code{bayesian_parameter_sampling()} 31 | } 32 | -------------------------------------------------------------------------------- /man/qloguniform.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/hyperdrive.R 3 | \name{qloguniform} 4 | \alias{qloguniform} 5 | \title{Specify a uniform distribution of the form 6 | \verb{round(exp(uniform(min_value, max_value) / q) * q}} 7 | \usage{ 8 | qloguniform(min_value, max_value, q) 9 | } 10 | \arguments{ 11 | \item{min_value}{A double of the minimum value in the range (inclusive).} 12 | 13 | \item{max_value}{A double of the maximum value in the range (inclusive).} 14 | 15 | \item{q}{An integer of the smoothing factor.} 16 | } 17 | \value{ 18 | A list of the stochastic expression. 19 | } 20 | \description{ 21 | Specify a uniform distribution of the form 22 | \verb{round(exp(uniform(min_value, max_value) / q) * q}. 23 | 24 | This is suitable for a discrete variable with respect to which the objective 25 | is "smooth", and gets smoother with the size of the value, but which should 26 | be bounded both above and below. 27 | } 28 | \seealso{ 29 | \code{random_parameter_sampling()}, \code{grid_parameter_sampling()}, 30 | \code{bayesian_parameter_sampling()} 31 | } 32 | -------------------------------------------------------------------------------- /man/qnormal.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/hyperdrive.R 3 | \name{qnormal} 4 | \alias{qnormal} 5 | \title{Specify a normal distribution of the \verb{form round(normal(mu, sigma) / q) * q}} 6 | \usage{ 7 | qnormal(mu, sigma, q) 8 | } 9 | \arguments{ 10 | \item{mu}{A double of the mean of the normal distribution.} 11 | 12 | \item{sigma}{A double of the standard deviation of the normal distribution.} 13 | 14 | \item{q}{An integer of the smoothing factor.} 15 | } 16 | \value{ 17 | A list of the stochastic expression. 18 | } 19 | \description{ 20 | Specify a normal distribution of the form \code{round(normal(mu, sigma) / q) * q}. 21 | 22 | Suitable for a discrete variable that probably takes a value around \code{mu}, 23 | but is fundamentally unbounded. 24 | } 25 | \seealso{ 26 | \code{random_parameter_sampling()}, \code{grid_parameter_sampling()}, 27 | \code{bayesian_parameter_sampling()} 28 | } 29 | -------------------------------------------------------------------------------- /man/quniform.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/hyperdrive.R 3 | \name{quniform} 4 | \alias{quniform} 5 | \title{Specify a uniform distribution of the form 6 | \code{round(uniform(min_value, max_value) / q) * q}} 7 | \usage{ 8 | quniform(min_value, max_value, q) 9 | } 10 | \arguments{ 11 | \item{min_value}{A double of the minimum value in the range (inclusive).} 12 | 13 | \item{max_value}{A double of the maximum value in the range (inclusive).} 14 | 15 | \item{q}{An integer of the smoothing factor.} 16 | } 17 | \value{ 18 | A list of the stochastic expression. 19 | } 20 | \description{ 21 | Specify a uniform distribution of the form 22 | \code{round(uniform(min_value, max_value) / q) * q}. 23 | 24 | This is suitable for a discrete value with respect to which the objective 25 | is still somewhat "smooth", but which should be bounded both above and below. 26 | } 27 | \seealso{ 28 | \code{random_parameter_sampling()}, \code{grid_parameter_sampling()}, 29 | \code{bayesian_parameter_sampling()} 30 | } 31 | -------------------------------------------------------------------------------- /man/randint.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/hyperdrive.R 3 | \name{randint} 4 | \alias{randint} 5 | \title{Specify a set of random integers in the range \verb{[0, upper)}} 6 | \usage{ 7 | randint(upper) 8 | } 9 | \arguments{ 10 | \item{upper}{An integer of the upper bound for the range of 11 | integers (exclusive).} 12 | } 13 | \value{ 14 | A list of the stochastic expression. 15 | } 16 | \description{ 17 | Specify a set of random integers in the range \verb{[0, upper)} 18 | to sample the hyperparameters from. 19 | 20 | The semantics of this distribution is that there is no more 21 | correlation in the loss function between nearby integer values, 22 | as compared with more distant integer values. This is an 23 | appropriate distribution for describing random seeds, for example. 24 | If the loss function is probably more correlated for nearby integer 25 | values, then you should probably use one of the "quantized" continuous 26 | distributions, such as either \code{quniform()}, \code{qloguniform()}, \code{qnormal()}, 27 | or \code{qlognormal()}. 28 | } 29 | \seealso{ 30 | \code{random_parameter_sampling()}, \code{grid_parameter_sampling()}, 31 | \code{bayesian_parameter_sampling()} 32 | } 33 | -------------------------------------------------------------------------------- /man/random_parameter_sampling.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/hyperdrive.R 3 | \name{random_parameter_sampling} 4 | \alias{random_parameter_sampling} 5 | \title{Define random sampling over a hyperparameter search space} 6 | \usage{ 7 | random_parameter_sampling(parameter_space, properties = NULL) 8 | } 9 | \arguments{ 10 | \item{parameter_space}{A named list containing each parameter and its 11 | distribution, e.g. \code{list("parameter" = distribution)}.} 12 | 13 | \item{properties}{A named list of additional properties for the algorithm.} 14 | } 15 | \value{ 16 | The \code{RandomParameterSampling} object. 17 | } 18 | \description{ 19 | In random sampling, hyperparameter values are randomly selected from the 20 | defined search space. Random sampling allows the search space to include 21 | both discrete and continuous hyperparameters. 22 | } 23 | \section{Details}{ 24 | 25 | In this sampling algorithm, parameter values are chosen from a set of 26 | discrete values or a distribution over a continuous range. Functions you can 27 | use include: 28 | \code{choice()}, \code{randint()}, \code{uniform()}, \code{quniform()}, \code{loguniform()}, 29 | \code{qloguniform()}, \code{normal()}, \code{qnormal()}, \code{lognormal()}, and \code{qlognormal()}. 30 | } 31 | 32 | \examples{ 33 | \dontrun{ 34 | param_sampling <- random_parameter_sampling(list("learning_rate" = normal(10, 3), 35 | "keep_probability" = uniform(0.05, 0.1), 36 | "batch_size" = choice(c(16, 32, 64, 128)))) 37 | } 38 | } 39 | \seealso{ 40 | \code{choice()}, \code{randint()}, \code{uniform()}, \code{quniform()}, \code{loguniform()}, 41 | \code{qloguniform()}, \code{normal()}, \code{qnormal()}, \code{lognormal()}, \code{qlognormal()} 42 | } 43 | -------------------------------------------------------------------------------- /man/random_split_dataset.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \name{random_split_dataset} 4 | \alias{random_split_dataset} 5 | \title{Split file streams in the dataset into two parts randomly and approximately by the percentage specified.} 6 | \usage{ 7 | random_split_dataset(dataset, percentage, seed = NULL) 8 | } 9 | \arguments{ 10 | \item{dataset}{The Dataset object.} 11 | 12 | \item{percentage}{The approximate percentage to split the Dataset by. This must 13 | be a number between 0.0 and 1.0.} 14 | 15 | \item{seed}{An optional seed to use for the random generator.} 16 | } 17 | \value{ 18 | A new Dataset object representing the two datasets after the split. 19 | } 20 | \description{ 21 | Split file streams in the dataset into two parts randomly and approximately by the percentage specified. 22 | } 23 | -------------------------------------------------------------------------------- /man/register_azure_postgre_sql_datastore.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datastore.R 3 | \name{register_azure_postgre_sql_datastore} 4 | \alias{register_azure_postgre_sql_datastore} 5 | \title{Initialize a new Azure PostgreSQL Datastore.} 6 | \usage{ 7 | register_azure_postgre_sql_datastore( 8 | workspace, 9 | datastore_name, 10 | server_name, 11 | database_name, 12 | user_id, 13 | user_password, 14 | port_number = NULL, 15 | endpoint = NULL, 16 | overwrite = FALSE 17 | ) 18 | } 19 | \arguments{ 20 | \item{workspace}{The workspace this datastore belongs to.} 21 | 22 | \item{datastore_name}{The datastore name.} 23 | 24 | \item{server_name}{The PostgreSQL server name.} 25 | 26 | \item{database_name}{The PostgreSQL database name.} 27 | 28 | \item{user_id}{The User ID of the PostgreSQL server.} 29 | 30 | \item{user_password}{The User Password of the PostgreSQL server.} 31 | 32 | \item{port_number}{The Port Number of the PostgreSQL server.} 33 | 34 | \item{endpoint}{The endpoint of the PostgreSQL server. If NULL, defaults to 35 | postgres.database.azure.com.} 36 | 37 | \item{overwrite}{Whether to overwrite an existing datastore. If the datastore 38 | does not exist, it will create one. The default is FALSE.} 39 | } 40 | \value{ 41 | The \code{azureml.data.azure_postgre_sql_datastore.AzurePostgreSqlDatastore} 42 | object. 43 | } 44 | \description{ 45 | Initialize a new Azure PostgreSQL Datastore. 46 | } 47 | -------------------------------------------------------------------------------- /man/register_azure_sql_database_datastore.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datastore.R 3 | \name{register_azure_sql_database_datastore} 4 | \alias{register_azure_sql_database_datastore} 5 | \title{Initialize a new Azure SQL database Datastore.} 6 | \usage{ 7 | register_azure_sql_database_datastore( 8 | workspace, 9 | datastore_name, 10 | server_name, 11 | database_name, 12 | tenant_id, 13 | client_id, 14 | client_secret, 15 | resource_url = NULL, 16 | authority_url = NULL, 17 | endpoint = NULL, 18 | overwrite = FALSE, 19 | username = NULL, 20 | password = NULL 21 | ) 22 | } 23 | \arguments{ 24 | \item{workspace}{The workspace this datastore belongs to.} 25 | 26 | \item{datastore_name}{The datastore name.} 27 | 28 | \item{server_name}{The SQL server name.} 29 | 30 | \item{database_name}{The SQL database name.} 31 | 32 | \item{tenant_id}{The Directory ID/Tenant ID of the service principal.} 33 | 34 | \item{client_id}{The Client ID/Application ID of the service principal.} 35 | 36 | \item{client_secret}{The secret of the service principal.} 37 | 38 | \item{resource_url}{The resource URL, which determines what operations will 39 | be performed on the SQL database store, if NULL, defaults to 40 | https://database.windows.net/.} 41 | 42 | \item{authority_url}{The authority URL used to authenticate the user, defaults 43 | to https://login.microsoftonline.com.} 44 | 45 | \item{endpoint}{The endpoint of the SQL server. If NULL, defaults to 46 | database.windows.net.} 47 | 48 | \item{overwrite}{Whether to overwrite an existing datastore. If the datastore does 49 | not exist, it will create one. The default is FALSE.} 50 | 51 | \item{username}{The username of the database user to access the database.} 52 | 53 | \item{password}{The password of the database user to access the database.} 54 | } 55 | \value{ 56 | The \code{azureml.data.azure_sql_database_datastore.AzureSqlDatabaseDatastore} 57 | object. 58 | } 59 | \description{ 60 | Initialize a new Azure SQL database Datastore. 61 | } 62 | -------------------------------------------------------------------------------- /man/register_dataset.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \name{register_dataset} 4 | \alias{register_dataset} 5 | \title{Register a Dataset in the workspace} 6 | \usage{ 7 | register_dataset( 8 | workspace, 9 | dataset, 10 | name, 11 | description = NULL, 12 | tags = NULL, 13 | create_new_version = FALSE 14 | ) 15 | } 16 | \arguments{ 17 | \item{workspace}{The AzureML workspace in which the Dataset is to be registered.} 18 | 19 | \item{dataset}{The dataset to be registered.} 20 | 21 | \item{name}{The name of the Dataset in the workspace.} 22 | 23 | \item{description}{A description of the Dataset.} 24 | 25 | \item{tags}{Named list of tags to give the Dataset. Defaults to NULL.} 26 | 27 | \item{create_new_version}{Boolean to register the dataset as a new version under the specified name.} 28 | } 29 | \value{ 30 | The registered Dataset object. 31 | } 32 | \description{ 33 | Register the Dataset in the workspace, making it available to other users of the workspace. 34 | } 35 | -------------------------------------------------------------------------------- /man/register_do_azureml_parallel.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/do_azureml_parallel.R 3 | \name{register_do_azureml_parallel} 4 | \alias{register_do_azureml_parallel} 5 | \title{Registers AMLCompute as a parallel backend with the foreach package.} 6 | \usage{ 7 | register_do_azureml_parallel(workspace, compute_target) 8 | } 9 | \arguments{ 10 | \item{workspace}{The Workspace object which has the compute_target.} 11 | 12 | \item{compute_target}{The AMLCompute target to use for parallelization.} 13 | } 14 | \description{ 15 | Registers AMLCompute as a parallel backend with the foreach package. 16 | } 17 | -------------------------------------------------------------------------------- /man/register_environment.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/environment.R 3 | \name{register_environment} 4 | \alias{register_environment} 5 | \title{Register an environment in the workspace} 6 | \usage{ 7 | register_environment(environment, workspace) 8 | } 9 | \arguments{ 10 | \item{environment}{The \code{Environment} object.} 11 | 12 | \item{workspace}{The \code{Workspace} object.} 13 | } 14 | \value{ 15 | The \code{Environment} object. 16 | } 17 | \description{ 18 | The environment is automatically registered with your workspace when you 19 | submit an experiment or deploy a web service. You can also manually register 20 | the environment with \code{register_environment()}. This operation makes the 21 | environment into an entity that is tracked and versioned in the cloud, and 22 | can be shared between workspace users. 23 | 24 | Whe used for the first time in training or deployment, the environment is 25 | registered with the workspace, built, and deployed on the compute target. 26 | The environments are cached by the service. Reusing a cached environment 27 | takes much less time than using a new service or one that has bee updated. 28 | } 29 | -------------------------------------------------------------------------------- /man/reload_local_webservice_assets.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/webservice-local.R 3 | \name{reload_local_webservice_assets} 4 | \alias{reload_local_webservice_assets} 5 | \title{Reload a local web service's entry script and dependencies} 6 | \usage{ 7 | reload_local_webservice_assets(webservice, wait = FALSE) 8 | } 9 | \arguments{ 10 | \item{webservice}{The \code{LocalWebservice} object.} 11 | 12 | \item{wait}{If \code{TRUE}, wait for the service's container to reach a 13 | healthy state. Defaults to \code{FALSE}.} 14 | } 15 | \value{ 16 | None 17 | } 18 | \description{ 19 | This restarts the service's container with copies of updated assets, 20 | including the entry script and local dependencies, but it does not 21 | rebuild the underlying image. Accordingly, changes to the environment 22 | will not be reflected in the reloaded local web service. To handle those 23 | changes call \code{update_local_webservice()} instead. 24 | } 25 | -------------------------------------------------------------------------------- /man/resource_configuration.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/model.R 3 | \name{resource_configuration} 4 | \alias{resource_configuration} 5 | \title{Initialize the ResourceConfiguration.} 6 | \usage{ 7 | resource_configuration(cpu = NULL, memory_in_gb = NULL, gpu = NULL) 8 | } 9 | \arguments{ 10 | \item{cpu}{The number of CPU cores to allocate for this resource. Can be a decimal.} 11 | 12 | \item{memory_in_gb}{The amount of memory (in GB) to allocate for this resource. 13 | Can be a decimal If \code{TRUE}, decode the raw log bytes to a string.} 14 | 15 | \item{gpu}{The number of GPUs to allocate for this resource.} 16 | } 17 | \value{ 18 | The \code{ResourceConfiguration} object. 19 | } 20 | \description{ 21 | Initialize the ResourceConfiguration. 22 | } 23 | \examples{ 24 | \dontrun{ 25 | rc <- resource_configuration(2, 2, 0) 26 | 27 | registered_model <- register_model_from_run(run, "my_model_name", 28 | "path_to_my_model", 29 | resource_configuration = rc) 30 | } 31 | } 32 | \seealso{ 33 | \code{\link{register_model_from_run}} 34 | } 35 | -------------------------------------------------------------------------------- /man/save_model_package_files.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/model.R 3 | \name{save_model_package_files} 4 | \alias{save_model_package_files} 5 | \title{Save a Dockerfile and dependencies from a \code{ModelPackage} to 6 | your local file system} 7 | \usage{ 8 | save_model_package_files(package, output_directory) 9 | } 10 | \arguments{ 11 | \item{package}{The \code{ModelPackage} object.} 12 | 13 | \item{output_directory}{A string of the local directory that 14 | will be created to contain the contents of the package.} 15 | } 16 | \value{ 17 | None 18 | } 19 | \description{ 20 | Download the Dockerfile, model, and other assets needed to build 21 | an image locally from a created \code{ModelPackage}. 22 | 23 | This can only be used with a Dockerfile \code{ModelPackage} (where 24 | \code{package_model()} was called with \code{generate_dockerfile = TRUE} to 25 | indicated that you wanted only the files and not a fully built image). 26 | 27 | \code{save_model_package_files()} downloads the files needed to build the 28 | image to the \code{output_directory}. The Dockerfile included in the saved 29 | files references a base image stored in an Azure container registry. 30 | When you build the image on your local Docker installation, you will 31 | need the address, username, and password to authenticate to the registry. 32 | You can get this information using \code{get_model_package_container_registry()}. 33 | } 34 | \seealso{ 35 | \code{package_model()}, \code{get_model_package_container_registry()} 36 | } 37 | -------------------------------------------------------------------------------- /man/set_default_datastore.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/workspace.R 3 | \name{set_default_datastore} 4 | \alias{set_default_datastore} 5 | \title{Set the default datastore for a workspace} 6 | \usage{ 7 | set_default_datastore(workspace, datastore_name) 8 | } 9 | \arguments{ 10 | \item{workspace}{The \code{Workspace} object.} 11 | 12 | \item{datastore_name}{The name of the datastore to be set as default.} 13 | } 14 | \value{ 15 | None 16 | } 17 | \description{ 18 | Set the default datastore associated with the workspace. 19 | } 20 | \seealso{ 21 | \code{\link[=get_default_datastore]{get_default_datastore()}} 22 | } 23 | -------------------------------------------------------------------------------- /man/set_secrets.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/keyvault.R 3 | \name{set_secrets} 4 | \alias{set_secrets} 5 | \title{Add secrets to a keyvault} 6 | \usage{ 7 | set_secrets(keyvault, secrets) 8 | } 9 | \arguments{ 10 | \item{keyvault}{The \code{Keyvault} object.} 11 | 12 | \item{secrets}{The named list of secrets to be added to the keyvault, 13 | where element name corresponds to the secret name.} 14 | } 15 | \value{ 16 | None 17 | } 18 | \description{ 19 | Add a named list of secrets into the keyvault associated with the 20 | workspace. 21 | } 22 | \examples{ 23 | \dontrun{ 24 | ws <- load_workspace_from_config() 25 | my_secret <- Sys.getenv("MY_SECRET") 26 | keyvault <- get_default_keyvault(ws) 27 | set_secrets(list("mysecret" = my_secret)) 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /man/skip_from_dataset.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \name{skip_from_dataset} 4 | \alias{skip_from_dataset} 5 | \title{Skip file streams from the top of the dataset by the specified count.} 6 | \usage{ 7 | skip_from_dataset(dataset, count) 8 | } 9 | \arguments{ 10 | \item{dataset}{The Dataset object.} 11 | 12 | \item{count}{The number of file streams to skip.} 13 | } 14 | \value{ 15 | A new Dataset object representing the dataset with file streams skipped. 16 | } 17 | \description{ 18 | Skip file streams from the top of the dataset by the specified count. 19 | } 20 | -------------------------------------------------------------------------------- /man/split_tasks.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/do_azureml_parallel.R 3 | \name{split_tasks} 4 | \alias{split_tasks} 5 | \title{Splits the job into parallel tasks.} 6 | \usage{ 7 | split_tasks(args_list, node_count, process_count_per_node) 8 | } 9 | \arguments{ 10 | \item{args_list}{The list of arguments which are distributed across all the 11 | processes.} 12 | 13 | \item{node_count}{Number of nodes in the AmlCompute cluster.} 14 | 15 | \item{process_count_per_node}{Number of processes per node.} 16 | } 17 | \description{ 18 | Splits the job into parallel tasks. 19 | } 20 | -------------------------------------------------------------------------------- /man/start_logging_run.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/experiment.R 3 | \name{start_logging_run} 4 | \alias{start_logging_run} 5 | \title{Create an interactive logging run} 6 | \usage{ 7 | start_logging_run(experiment, outputs = NULL, snapshot_directory = NULL) 8 | } 9 | \arguments{ 10 | \item{experiment}{The \code{Experiment} object.} 11 | 12 | \item{outputs}{(Optional) A string of the local path to an 13 | outputs directory to track.} 14 | 15 | \item{snapshot_directory}{(Optional) Directory to take snapshot of. 16 | Setting to \code{NULL} will take no snapshot.} 17 | } 18 | \value{ 19 | The \code{Run} object of the started run. 20 | } 21 | \description{ 22 | Create an interactive run that allows the user to log 23 | metrics and artifacts to a run locally. 24 | 25 | Any metrics that are logged during the interactive run session 26 | are added to the run record in the experiment. If an output 27 | directory is specified, the contents of that directory is 28 | uploaded as run artifacts upon run completion. 29 | 30 | This method is useful if you would like to add experiment 31 | tracking and artifact logging to the corresponding run record 32 | in Azure ML for local runs without have to submit an experiment 33 | run to a compute target with \code{submit_experiment()}. 34 | } 35 | \examples{ 36 | \dontrun{ 37 | ws <- load_workspace_from_config() 38 | exp <- experiment(ws, name = 'myexperiment') 39 | run <- start_logging_run(exp) 40 | log_metric_to_run("Accuracy", 0.9) 41 | complete_run(run) 42 | } 43 | } 44 | \seealso{ 45 | \code{complete_run()} 46 | } 47 | -------------------------------------------------------------------------------- /man/submit_child_run.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/run.R 3 | \name{submit_child_run} 4 | \alias{submit_child_run} 5 | \title{Submit an experiment and return the active child run} 6 | \usage{ 7 | submit_child_run(parent_run, config = NULL, tags = NULL) 8 | } 9 | \arguments{ 10 | \item{parent_run}{The parent \code{Run} object.} 11 | 12 | \item{config}{The \code{RunConfig} object} 13 | 14 | \item{tags}{Tags to be added to the submitted run, e.g., {"tag": "value"}.} 15 | } 16 | \value{ 17 | A \code{Run} object. 18 | } 19 | \description{ 20 | Submit an experiment and return the active child run. 21 | } 22 | -------------------------------------------------------------------------------- /man/submit_experiment.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/experiment.R 3 | \name{submit_experiment} 4 | \alias{submit_experiment} 5 | \title{Submit an experiment and return the active created run} 6 | \usage{ 7 | submit_experiment(experiment, config, tags = NULL) 8 | } 9 | \arguments{ 10 | \item{experiment}{The \code{Experiment} object.} 11 | 12 | \item{config}{The \code{Estimator} or \code{HyperDriveConfig} object.} 13 | 14 | \item{tags}{A named list of tags for the submitted run, e.g. 15 | \code{list("tag" = "value")}.} 16 | } 17 | \value{ 18 | The \code{ScriptRun} or \code{HyperDriveRun} object. 19 | } 20 | \description{ 21 | \code{submit_experiment()} is an asynchronous call to Azure Machine Learning 22 | service to execute a trial on local or remote compute. Depending on the 23 | configuration, \code{submit_experiment()} will automatically prepare your 24 | execution environments, execute your code, and capture your source code 25 | and results in the experiment's run history. 26 | 27 | To submit an experiment you first need to create a configuration object 28 | describing how the experiment is to be run. The configuration depends on 29 | the type of trial required. For a script run, provide an \code{Estimator} object 30 | to the \code{config} parameter. For a HyperDrive run for hyperparameter tuning, 31 | provide a \code{HyperDriveConfig} to \code{config}. 32 | } 33 | \examples{ 34 | # This example submits an Estimator experiment 35 | \dontrun{ 36 | ws <- load_workspace_from_config() 37 | compute_target <- get_compute(ws, cluster_name = 'mycluster') 38 | exp <- experiment(ws, name = 'myexperiment') 39 | est <- estimator(source_directory = '.', 40 | entry_script = 'train.R', 41 | compute_target = compute_target) 42 | run <- submit_experiment(exp, est) 43 | } 44 | } 45 | \seealso{ 46 | \code{estimator()}, \code{hyperdrive_config()} 47 | } 48 | -------------------------------------------------------------------------------- /man/take_from_dataset.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \name{take_from_dataset} 4 | \alias{take_from_dataset} 5 | \title{Take a sample of file streams from top of the dataset by the specified count.} 6 | \usage{ 7 | take_from_dataset(dataset, count) 8 | } 9 | \arguments{ 10 | \item{dataset}{The Dataset object.} 11 | 12 | \item{count}{The number of file streams to take.} 13 | } 14 | \value{ 15 | A new Dataset object representing the sampled dataset. 16 | } 17 | \description{ 18 | Take a sample of file streams from top of the dataset by the specified count. 19 | } 20 | -------------------------------------------------------------------------------- /man/take_sample_from_dataset.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \name{take_sample_from_dataset} 4 | \alias{take_sample_from_dataset} 5 | \title{Take a random sample of file streams in the dataset approximately by the probability specified.} 6 | \usage{ 7 | take_sample_from_dataset(dataset, probability, seed = NULL) 8 | } 9 | \arguments{ 10 | \item{dataset}{The Dataset object.} 11 | 12 | \item{probability}{The probability of a file stream being included in the sample.} 13 | 14 | \item{seed}{An optional seed to use for the random generator.} 15 | } 16 | \value{ 17 | A new Dataset object representing the sampled dataset. 18 | } 19 | \description{ 20 | Take a random sample of file streams in the dataset approximately by the probability specified. 21 | } 22 | -------------------------------------------------------------------------------- /man/uniform.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/hyperdrive.R 3 | \name{uniform} 4 | \alias{uniform} 5 | \title{Specify a uniform distribution of options to sample from} 6 | \usage{ 7 | uniform(min_value, max_value) 8 | } 9 | \arguments{ 10 | \item{min_value}{A double of the minimum value in the range 11 | (inclusive).} 12 | 13 | \item{max_value}{A double of the maximum value in the range 14 | (inclusive).} 15 | } 16 | \value{ 17 | A list of the stochastic expression. 18 | } 19 | \description{ 20 | Specify a uniform distribution of options to sample the 21 | hyperparameters from. 22 | } 23 | \seealso{ 24 | \code{random_parameter_sampling()}, \code{grid_parameter_sampling()}, 25 | \code{bayesian_parameter_sampling()} 26 | } 27 | -------------------------------------------------------------------------------- /man/unregister_all_dataset_versions.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datasets.R 3 | \name{unregister_all_dataset_versions} 4 | \alias{unregister_all_dataset_versions} 5 | \title{Unregister all versions under the registration name of this dataset from the workspace.} 6 | \usage{ 7 | unregister_all_dataset_versions(dataset) 8 | } 9 | \arguments{ 10 | \item{dataset}{The dataset to be unregistered.} 11 | } 12 | \value{ 13 | None 14 | } 15 | \description{ 16 | Unregister all versions under the registration name of this dataset from the workspace. 17 | } 18 | -------------------------------------------------------------------------------- /man/unregister_datastore.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datastore.R 3 | \name{unregister_datastore} 4 | \alias{unregister_datastore} 5 | \title{Unregister a datastore from its associated workspace} 6 | \usage{ 7 | unregister_datastore(datastore) 8 | } 9 | \arguments{ 10 | \item{datastore}{The \code{AzureBlobDatastore} or \code{AzureFileDatastore} object.} 11 | } 12 | \value{ 13 | None 14 | } 15 | \description{ 16 | Unregister the datastore from its associated workspace. The 17 | underlying Azure storage will not be deleted. 18 | } 19 | -------------------------------------------------------------------------------- /man/update_aci_webservice.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/webservice-aci.R 3 | \name{update_aci_webservice} 4 | \alias{update_aci_webservice} 5 | \title{Update a deployed ACI web service} 6 | \usage{ 7 | update_aci_webservice( 8 | webservice, 9 | tags = NULL, 10 | properties = NULL, 11 | description = NULL, 12 | auth_enabled = NULL, 13 | ssl_enabled = NULL, 14 | ssl_cert_pem_file = NULL, 15 | ssl_key_pem_file = NULL, 16 | ssl_cname = NULL, 17 | enable_app_insights = NULL, 18 | models = NULL, 19 | inference_config = NULL 20 | ) 21 | } 22 | \arguments{ 23 | \item{webservice}{The \code{AciWebservice} object.} 24 | 25 | \item{tags}{A named list of key-value tags for the web service, 26 | e.g. \code{list("key" = "value")}. Will replace existing tags.} 27 | 28 | \item{properties}{A named list of key-value properties to add for the web 29 | service, e.g. \code{list("key" = "value")}.} 30 | 31 | \item{description}{A string of the description to give the web service.} 32 | 33 | \item{auth_enabled}{If \code{TRUE} enable key-based authentication for the 34 | web service.} 35 | 36 | \item{ssl_enabled}{Whether or not to enable SSL for this Webservice.} 37 | 38 | \item{ssl_cert_pem_file}{A string of the cert file needed if SSL is enabled.} 39 | 40 | \item{ssl_key_pem_file}{A string of the key file needed if SSL is enabled.} 41 | 42 | \item{ssl_cname}{A string of the cname if SSL is enabled.} 43 | 44 | \item{enable_app_insights}{If \code{TRUE} enable AppInsights for the web service.} 45 | 46 | \item{models}{A list of \code{Model} objects to package into the updated service.} 47 | 48 | \item{inference_config}{An \code{InferenceConfig} object.} 49 | } 50 | \value{ 51 | None 52 | } 53 | \description{ 54 | Update an ACI web service with the provided properties. You can update the 55 | web service to use a new model, a new entry script, or new dependencies 56 | that can be specified in an inference configuration. 57 | 58 | Values left as \code{NULL} will remain unchanged in the web service. 59 | } 60 | -------------------------------------------------------------------------------- /man/update_aml_compute.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/compute.R 3 | \name{update_aml_compute} 4 | \alias{update_aml_compute} 5 | \title{Update scale settings for an AmlCompute cluster} 6 | \usage{ 7 | update_aml_compute( 8 | cluster, 9 | min_nodes = NULL, 10 | max_nodes = NULL, 11 | idle_seconds_before_scaledown = NULL 12 | ) 13 | } 14 | \arguments{ 15 | \item{cluster}{The \code{AmlCompute} cluster.} 16 | 17 | \item{min_nodes}{An integer of the minimum number of nodes to use on 18 | the cluster.} 19 | 20 | \item{max_nodes}{An integer of the maximum number of nodes to use on 21 | the cluster.} 22 | 23 | \item{idle_seconds_before_scaledown}{An integer of the node idle time 24 | in seconds before scaling down the cluster.} 25 | } 26 | \value{ 27 | None 28 | } 29 | \description{ 30 | Update the scale settings for an existing AmlCompute cluster. 31 | } 32 | -------------------------------------------------------------------------------- /man/update_local_webservice.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/webservice-local.R 3 | \name{update_local_webservice} 4 | \alias{update_local_webservice} 5 | \title{Update a local web service} 6 | \usage{ 7 | update_local_webservice( 8 | webservice, 9 | models = NULL, 10 | deployment_config = NULL, 11 | wait = FALSE, 12 | inference_config = NULL 13 | ) 14 | } 15 | \arguments{ 16 | \item{webservice}{The \code{LocalWebservice} object.} 17 | 18 | \item{models}{A list of \code{Model} objects to package into the updated service.} 19 | 20 | \item{deployment_config}{A \code{LocalWebserviceDeploymentConfiguration} to 21 | apply to the web service.} 22 | 23 | \item{wait}{If \code{TRUE}, wait for the service's container to reach a 24 | healthy state. Defaults to \code{FALSE}.} 25 | 26 | \item{inference_config}{An \code{InferenceConfig} object.} 27 | } 28 | \value{ 29 | None 30 | } 31 | \description{ 32 | Update a local web service with the provided properties. You can update the 33 | web service to use a new model, a new entry script, or new dependencies 34 | that can be specified in an inference configuration. 35 | 36 | Values left as \code{NULL} will remain unchanged in the service. 37 | } 38 | -------------------------------------------------------------------------------- /man/upload_files_to_datastore.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datastore.R 3 | \name{upload_files_to_datastore} 4 | \alias{upload_files_to_datastore} 5 | \title{Upload files to the Azure storage a datastore points to} 6 | \usage{ 7 | upload_files_to_datastore( 8 | datastore, 9 | files, 10 | relative_root = NULL, 11 | target_path = NULL, 12 | overwrite = FALSE, 13 | show_progress = TRUE 14 | ) 15 | } 16 | \arguments{ 17 | \item{datastore}{The \code{AzureBlobDatastore} or \code{AzureFileDatastore} object.} 18 | 19 | \item{files}{A character vector of the absolute path to files to upload.} 20 | 21 | \item{relative_root}{A string of the base path from which is used to 22 | determine the path of the files in the Azure storage. For example, if 23 | we upload \verb{/path/to/file.txt}, and we define the base path to be \verb{/path}, 24 | when \code{file.txt} is uploaded to the blob storage or file share, it will 25 | have the path of \verb{/to/file.txt}. If \code{target_path} is also given, then it 26 | will be used as the prefix for the derived path from above. The base path 27 | must be a common path of all of the files, otherwise an exception will be 28 | thrown.} 29 | 30 | \item{target_path}{A string of the location in the blob container or file 31 | share to upload the data to. Defaults to \code{NULL}, in which case the data is 32 | uploaded to the root.} 33 | 34 | \item{overwrite}{If \code{TRUE}, overwrites any existing data at \code{target_path}.} 35 | 36 | \item{show_progress}{If \code{TRUE}, show progress of upload in the console.} 37 | } 38 | \value{ 39 | The \code{DataReference} object for the target path uploaded. 40 | } 41 | \description{ 42 | Upload the data from the local file system to the Azure storage that the 43 | datastore points to. 44 | } 45 | -------------------------------------------------------------------------------- /man/upload_files_to_run.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/run.R 3 | \name{upload_files_to_run} 4 | \alias{upload_files_to_run} 5 | \title{Upload files to a run} 6 | \usage{ 7 | upload_files_to_run(names, paths, timeout_seconds = NULL, run = NULL) 8 | } 9 | \arguments{ 10 | \item{names}{A character vector of the names of the files to upload.} 11 | 12 | \item{paths}{A character vector of relative local paths to the files 13 | to be upload.} 14 | 15 | \item{timeout_seconds}{An int of the timeout in seconds for uploading 16 | the files.} 17 | 18 | \item{run}{The \code{Run} object.} 19 | } 20 | \value{ 21 | None 22 | } 23 | \description{ 24 | Upload files to the run record. 25 | 26 | Note: Runs automatically capture files in the specified output 27 | directory, which defaults to "./outputs". Use \code{upload_files_to_run()} 28 | only when additional files need to be uploaded or an output directory 29 | is not specified. 30 | } 31 | \section{Examples}{ 32 | \preformatted{ws <- load_workspace_from_config() 33 | exp <- experiment(ws, name = 'myexperiment') 34 | 35 | # Start an interactive logging run 36 | run <- start_logging_run(exp) 37 | 38 | # Upload files to the run record 39 | filename1 <- "important_file_1" 40 | filename2 <- "important_file_2" 41 | upload_files_to_run(names = c(filename1, filename2), 42 | paths = c("path/on/disk/file_1.txt", "other/path/on/disk/file_2.txt")) 43 | 44 | # Download a file from the run record 45 | download_file_from_run(filename1, "file_1.txt") 46 | } 47 | } 48 | 49 | \seealso{ 50 | \code{\link[=upload_folder_to_run]{upload_folder_to_run()}} \code{\link[=download_file_from_run]{download_file_from_run()}} \code{\link[=download_files_from_run]{download_files_from_run()}} 51 | } 52 | -------------------------------------------------------------------------------- /man/upload_folder_to_run.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/run.R 3 | \name{upload_folder_to_run} 4 | \alias{upload_folder_to_run} 5 | \title{Upload a folder to a run} 6 | \usage{ 7 | upload_folder_to_run(name, path, run = NULL) 8 | } 9 | \arguments{ 10 | \item{name}{A string of the name of the folder of files to upload.} 11 | 12 | \item{path}{A string of the relative local path to the folder to upload.} 13 | 14 | \item{run}{The \code{Run} object.} 15 | } 16 | \value{ 17 | None 18 | } 19 | \description{ 20 | Upload the specified folder to the given prefix name to the run 21 | record. 22 | 23 | Note: Runs automatically capture files in the specified output 24 | directory, which defaults to "./outputs". Use \code{upload_folder_to_run()} 25 | only when additional files need to be uploaded or an output directory 26 | is not specified. 27 | } 28 | \section{Examples}{ 29 | \preformatted{ws <- load_workspace_from_config() 30 | exp <- experiment(ws, name = 'myexperiment') 31 | 32 | # Start an interactive logging run 33 | run <- start_logging_run(exp) 34 | 35 | # Upload folder to the run record 36 | upload_folder_to_run(name = "important_files", 37 | path = "path/on/disk") 38 | 39 | # Download a file from the run record 40 | download_file_from_run("important_files/existing_file.txt", "local_file.txt") 41 | } 42 | } 43 | 44 | \seealso{ 45 | \code{\link[=upload_files_to_run]{upload_files_to_run()}} \code{\link[=download_file_from_run]{download_file_from_run()}} \code{\link[=download_files_from_run]{download_files_from_run()}} 46 | } 47 | -------------------------------------------------------------------------------- /man/upload_to_datastore.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/datastore.R 3 | \name{upload_to_datastore} 4 | \alias{upload_to_datastore} 5 | \title{Upload a local directory to the Azure storage a datastore points to} 6 | \usage{ 7 | upload_to_datastore( 8 | datastore, 9 | src_dir, 10 | target_path = NULL, 11 | overwrite = FALSE, 12 | show_progress = TRUE 13 | ) 14 | } 15 | \arguments{ 16 | \item{datastore}{The \code{AzureBlobDatastore} or \code{AzureFileDatastore} object.} 17 | 18 | \item{src_dir}{A string of the local directory to upload.} 19 | 20 | \item{target_path}{A string of the location in the blob container or 21 | file share to upload the data to. Defaults to \code{NULL}, in which case the data 22 | is uploaded to the root.} 23 | 24 | \item{overwrite}{If \code{TRUE}, overwrites any existing data at \code{target_path}.} 25 | 26 | \item{show_progress}{If \code{TRUE}, show progress of upload in the console.} 27 | } 28 | \value{ 29 | The \code{DataReference} object for the target path uploaded. 30 | } 31 | \description{ 32 | Upload a local directory to the Azure storage the datastore points to. 33 | } 34 | -------------------------------------------------------------------------------- /man/wait_for_deployment.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/webservice.R 3 | \name{wait_for_deployment} 4 | \alias{wait_for_deployment} 5 | \title{Wait for a web service to finish deploying} 6 | \usage{ 7 | wait_for_deployment(webservice, show_output = FALSE) 8 | } 9 | \arguments{ 10 | \item{webservice}{The \code{LocalWebservice}, \code{AciWebservice}, or 11 | \code{AksWebservice} object.} 12 | 13 | \item{show_output}{If \code{TRUE}, print more verbose output. Defaults 14 | to \code{FALSE}.} 15 | } 16 | \value{ 17 | None 18 | } 19 | \description{ 20 | Automatically poll on the running web service deployment and 21 | wait for the web service to reach a terminal state. Will throw 22 | an exception if it reaches a non-successful terminal state. 23 | 24 | Typically called after running \code{deploy_model()}. 25 | } 26 | \seealso{ 27 | \code{deploy_model()} 28 | } 29 | -------------------------------------------------------------------------------- /man/wait_for_model_package_creation.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/model.R 3 | \name{wait_for_model_package_creation} 4 | \alias{wait_for_model_package_creation} 5 | \title{Wait for a model package to finish creating} 6 | \usage{ 7 | wait_for_model_package_creation(package, show_output = FALSE) 8 | } 9 | \arguments{ 10 | \item{package}{The \code{ModelPackage} object.} 11 | 12 | \item{show_output}{If \code{TRUE}, print more verbose output. Defaults to 13 | \code{FALSE}.} 14 | } 15 | \value{ 16 | None 17 | } 18 | \description{ 19 | Wait for a model package creation to reach a terminal state. 20 | } 21 | -------------------------------------------------------------------------------- /man/wait_for_provisioning_completion.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/compute.R 3 | \name{wait_for_provisioning_completion} 4 | \alias{wait_for_provisioning_completion} 5 | \title{Wait for a cluster to finish provisioning} 6 | \usage{ 7 | wait_for_provisioning_completion(cluster, show_output = FALSE) 8 | } 9 | \arguments{ 10 | \item{cluster}{The \code{AmlCompute} or \code{AksCompute} object.} 11 | 12 | \item{show_output}{If \code{TRUE}, more verbose output will be provided.} 13 | } 14 | \value{ 15 | None 16 | } 17 | \description{ 18 | Wait for a cluster to finish provisioning. Typically invoked after a 19 | \code{create_aml_compute()} or \code{create_aks_compute()} call. 20 | } 21 | \seealso{ 22 | \code{create_aml_compute()}, \code{create_aks_compute()} 23 | } 24 | -------------------------------------------------------------------------------- /man/wait_for_run_completion.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/run.R 3 | \name{wait_for_run_completion} 4 | \alias{wait_for_run_completion} 5 | \title{Wait for the completion of a run} 6 | \usage{ 7 | wait_for_run_completion(run, show_output = TRUE) 8 | } 9 | \arguments{ 10 | \item{run}{The \code{Run} object.} 11 | 12 | \item{show_output}{If \code{TRUE}, print verbose output to console.} 13 | } 14 | \value{ 15 | None 16 | } 17 | \description{ 18 | Wait for the run to reach a terminal state. Typically called 19 | after submitting an experiment run with \code{submit_experiment()}. 20 | } 21 | \seealso{ 22 | \code{\link[=submit_experiment]{submit_experiment()}} 23 | } 24 | -------------------------------------------------------------------------------- /man/write_workspace_config.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/workspace.R 3 | \name{write_workspace_config} 4 | \alias{write_workspace_config} 5 | \title{Write out the workspace configuration details to a config file} 6 | \usage{ 7 | write_workspace_config(workspace, path = NULL, file_name = NULL) 8 | } 9 | \arguments{ 10 | \item{workspace}{The \code{Workspace} object whose config has to be written down.} 11 | 12 | \item{path}{A string of the location to write the config.json file. The config 13 | file will be located in a directory called '.azureml'. The parameter defaults to 14 | the current working directory, so by default config.json will be located at '.azureml/'.} 15 | 16 | \item{file_name}{A string of the name to use for the config file. The 17 | parameter defaults to \code{'config.json'}.} 18 | } 19 | \value{ 20 | None 21 | } 22 | \description{ 23 | Write out the workspace ARM properties to a config file. Workspace ARM 24 | properties can be loaded later using \code{load_workspace_from_config()}. 25 | The method provides a simple way of reusing the same workspace across 26 | multiple files or projects. Users can save the workspace ARM properties 27 | using this function, and use \code{load_workspace_from_config()} to load the 28 | same workspace in different files or projects without retyping the 29 | workspace ARM properties. 30 | } 31 | \seealso{ 32 | \code{\link[=load_workspace_from_config]{load_workspace_from_config()}} 33 | } 34 | -------------------------------------------------------------------------------- /samples/README.md: -------------------------------------------------------------------------------- 1 | ## Azure ML samples 2 | These samples are short code examples for using Azure Machine Learning with the R SDK. If you are new to the R SDK, we recommend that you first take a look at the more detailed end-to-end [vignettes](../vignettes). 3 | 4 | Before running a sample in RStudio, set the working directory to the folder that contains the sample script in RStudio using `setwd(dirname)` or Session -> Set Working Directory -> To Source File Location. Each vignette assumes that the data and scripts are in the current working directory. 5 | 6 | 1. [train-on-amlcompute](training/train-on-amlcompute): Train a model on a remote AmlCompute cluster. 7 | 2. [train-on-local](training/train-on-local): Train a model locally with Docker. 8 | 2. [deploy-to-aci](deployment/deploy-to-aci): Deploy a model as a web service to Azure Container Instances (ACI). 9 | 3. [deploy-to-local](deployment/deploy-to-local): Deploy a model as a web service locally. 10 | 11 | > Before you run these samples, make sure you have an Azure Machine Learning workspace. You can follow the [configuration vignette](../vignettes/configuration.Rmd) to set up a workspace. (You do not need to do this if you are running these examples on an Azure Machine Learning compute instance). 12 | 13 | ### Troubleshooting 14 | 15 | - If the following error occurs when submitting an experiment using RStudio: 16 | ```R 17 | Error in py_call_impl(callable, dots$args, dots$keywords) : 18 | PermissionError: [Errno 13] Permission denied 19 | ``` 20 | Move the files for your project into a subdirectory and reset the working directory to that directory before re-submitting. 21 | 22 | In order to submit an experiment, the Azure ML SDK must create a .zip file of the project directory to send to the service. However, 23 | the SDK does not have permission to write into the .Rproj.user subdirectory that is automatically created during an RStudio 24 | session. For this reason, the recommended best practice is to isolate project files into their own directory. 25 | -------------------------------------------------------------------------------- /samples/deployment/deploy-to-aci/deploy-to-aci.R: -------------------------------------------------------------------------------- 1 | # Copyright(c) Microsoft Corporation. 2 | # Licensed under the MIT license. 3 | 4 | library(azuremlsdk) 5 | library(jsonlite) 6 | 7 | ws <- load_workspace_from_config() 8 | 9 | # Register the model 10 | model <- register_model(ws, model_path = "model.rds", model_name = "model.rds") 11 | 12 | # Create environment 13 | r_env <- r_environment(name = "r_env") 14 | 15 | # Create inference config 16 | inference_config <- inference_config( 17 | entry_script = "score.R", 18 | source_directory = ".", 19 | environment = r_env) 20 | 21 | # Create ACI deployment config 22 | deployment_config <- aci_webservice_deployment_config(cpu_cores = 1, 23 | memory_gb = 1) 24 | 25 | # Deploy the web service 26 | service_name <- paste0('aciwebservice-', sample(1:100, 1, replace=TRUE)) 27 | service <- deploy_model(ws, 28 | service_name, 29 | list(model), 30 | inference_config, 31 | deployment_config) 32 | wait_for_deployment(service, show_output = TRUE) 33 | 34 | # If you encounter any issue in deploying the webservice, please visit 35 | # https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-troubleshoot-deployment 36 | 37 | # Inferencing 38 | # versicolor 39 | plant <- data.frame(Sepal.Length = 6.4, 40 | Sepal.Width = 2.8, 41 | Petal.Length = 4.6, 42 | Petal.Width = 1.8) 43 | # setosa 44 | plant <- data.frame(Sepal.Length = 5.1, 45 | Sepal.Width = 3.5, 46 | Petal.Length = 1.4, 47 | Petal.Width = 0.2) 48 | # virginica 49 | plant <- data.frame(Sepal.Length = 6.7, 50 | Sepal.Width = 3.3, 51 | Petal.Length = 5.2, 52 | Petal.Width = 2.3) 53 | 54 | # Test the web service 55 | predicted_val <- invoke_webservice(service, toJSON(plant)) 56 | predicted_val 57 | 58 | # Delete the web service 59 | delete_webservice(service) 60 | -------------------------------------------------------------------------------- /samples/deployment/deploy-to-aci/model.rds: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure/azureml-sdk-for-r/19301e106c0cec69dac0931e98af7e3f713b7bf7/samples/deployment/deploy-to-aci/model.rds -------------------------------------------------------------------------------- /samples/deployment/deploy-to-aci/score.R: -------------------------------------------------------------------------------- 1 | # Copyright(c) Microsoft Corporation. 2 | # Licensed under the MIT license. 3 | 4 | library(jsonlite) 5 | 6 | init <- function() 7 | { 8 | model_path <- Sys.getenv("AZUREML_MODEL_DIR") 9 | model <- readRDS(file.path(model_path, "model.rds")) 10 | message("model is loaded") 11 | 12 | function(data) 13 | { 14 | plant <- as.data.frame(fromJSON(data)) 15 | prediction <- predict(model, plant) 16 | result <- as.character(prediction) 17 | toJSON(result) 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /samples/deployment/deploy-to-local/model.rds: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure/azureml-sdk-for-r/19301e106c0cec69dac0931e98af7e3f713b7bf7/samples/deployment/deploy-to-local/model.rds -------------------------------------------------------------------------------- /samples/deployment/deploy-to-local/score.R: -------------------------------------------------------------------------------- 1 | # Copyright(c) Microsoft Corporation. 2 | # Licensed under the MIT license. 3 | 4 | library(jsonlite) 5 | 6 | init <- function() 7 | { 8 | model_path <- Sys.getenv("AZUREML_MODEL_DIR") 9 | model <- readRDS(file.path(model_path, "model.rds")) 10 | message("model is loaded") 11 | 12 | function(data) 13 | { 14 | plant <- as.data.frame(fromJSON(data)) 15 | prediction <- predict(model, plant) 16 | result <- as.character(prediction) 17 | message(result) 18 | toJSON(result) 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /samples/deployment/deploy-to-local/score_new.R: -------------------------------------------------------------------------------- 1 | # Copyright(c) Microsoft Corporation. 2 | # Licensed under the MIT license. 3 | 4 | library(jsonlite) 5 | 6 | init <- function() 7 | { 8 | model_path <- Sys.getenv("AZUREML_MODEL_DIR") 9 | model <- readRDS(file.path(model_path, "model.rds")) 10 | message("model is loaded") 11 | 12 | function(data) 13 | { 14 | plant <- as.data.frame(fromJSON(data)) 15 | prediction <- predict(model, plant) 16 | result <- as.character(prediction) 17 | message(result) 18 | message("this is a new scoring script! I was reloaded") 19 | toJSON(result) 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /samples/foreach/batch_inferencing/batch_inferencing.R: -------------------------------------------------------------------------------- 1 | # Copyright(c) Microsoft Corporation. 2 | # Licensed under the MIT license. 3 | 4 | library(azuremlsdk) 5 | library(foreach) 6 | 7 | # needed to load register_do_azureml_parallel() method. 8 | # this won't be required when register_do_azureml_parallel() method is public. 9 | devtools::load_all() 10 | 11 | ws <- load_workspace_from_config() 12 | 13 | # create AmlCompute cluster 14 | cluster_name <- "sample-cluster" 15 | vm_size <- "STANDARD_D2_V2" 16 | compute_target <- create_aml_compute(workspace = ws, 17 | cluster_name = cluster_name, 18 | vm_size = vm_size, 19 | max_nodes = 3L) 20 | 21 | wait_for_provisioning_completion(compute_target, show_output = TRUE) 22 | 23 | # call this method to register foreach backend with Workspace and AmlCompute cluster on which 24 | # parallel job would run. 25 | register_do_azureml_parallel(ws, compute_target) 26 | 27 | model <- readRDS("model.rds") 28 | 29 | data <- read.csv("iris.csv") 30 | nRows <- nrow(data) 31 | 32 | result <- foreach(i = 1:nRows, 33 | .packages = "jsonlite", 34 | node_count = 3L, 35 | process_count_per_node = 2L, 36 | experiment_name = "iris_inferencing", 37 | job_timeout = 3600) %dopar% { 38 | 39 | prediction <- predict(model, data[i, ]) 40 | result <- as.character(prediction) 41 | toJSON(result) 42 | } 43 | 44 | # Delete cluster 45 | delete_compute(compute_target) 46 | -------------------------------------------------------------------------------- /samples/foreach/batch_inferencing/model.rds: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure/azureml-sdk-for-r/19301e106c0cec69dac0931e98af7e3f713b7bf7/samples/foreach/batch_inferencing/model.rds -------------------------------------------------------------------------------- /samples/training/train-on-amlcompute/train-on-amlcompute.R: -------------------------------------------------------------------------------- 1 | # Copyright(c) Microsoft Corporation. 2 | # Licensed under the MIT license. 3 | 4 | # Reminder: set working directory to current file location prior to running this script 5 | 6 | library(azuremlsdk) 7 | 8 | ws <- load_workspace_from_config() 9 | 10 | ds <- get_default_datastore(ws) 11 | 12 | # Upload iris data to the datastore 13 | target_path <- "irisdata" 14 | upload_files_to_datastore(ds, 15 | list("./iris.csv"), 16 | target_path = target_path, 17 | overwrite = TRUE) 18 | 19 | # Create AmlCompute cluster 20 | cluster_name <- "cpu-cluster" 21 | compute_target <- get_compute(ws, cluster_name = cluster_name) 22 | if (is.null(compute_target)) { 23 | vm_size <- "STANDARD_D2_V2" 24 | compute_target <- create_aml_compute(workspace = ws, 25 | cluster_name = cluster_name, 26 | vm_size = vm_size, 27 | max_nodes = 1) 28 | 29 | wait_for_provisioning_completion(compute_target, show_output = TRUE) 30 | } 31 | 32 | # Define estimator 33 | est <- estimator(source_directory = ".", 34 | entry_script = "train.R", 35 | script_params = list("--data_folder" = ds$path(target_path)), 36 | compute_target = compute_target) 37 | 38 | experiment_name <- "train-r-script-on-amlcompute" 39 | exp <- experiment(ws, experiment_name) 40 | 41 | # Submit job and display the run details 42 | run <- submit_experiment(exp, est) 43 | plot_run_details(run) 44 | wait_for_run_completion(run, show_output = TRUE) 45 | 46 | # Get the run metrics 47 | metrics <- get_run_metrics(run) 48 | metrics 49 | 50 | # Delete cluster 51 | delete_compute(compute_target) 52 | -------------------------------------------------------------------------------- /samples/training/train-on-amlcompute/train.R: -------------------------------------------------------------------------------- 1 | # This script loads a dataset of which the last column is supposed to be the 2 | # class and logs the accuracy 3 | 4 | library(azuremlsdk) 5 | library(caret) 6 | library(optparse) 7 | 8 | options <- list( 9 | make_option(c("-d", "--data_folder")) 10 | ) 11 | 12 | opt_parser <- OptionParser(option_list = options) 13 | opt <- parse_args(opt_parser) 14 | 15 | paste(opt$data_folder) 16 | 17 | all_data <- read.csv(file.path(opt$data_folder, "iris.csv")) 18 | summary(all_data) 19 | 20 | in_train <- createDataPartition(y = all_data$Species, p = .8, list = FALSE) 21 | train_data <- all_data[in_train, ] 22 | test_data <- all_data[-in_train, ] 23 | 24 | # Run algorithms using 10-fold cross validation 25 | control <- trainControl(method = "cv", number = 10) 26 | metric <- "Accuracy" 27 | 28 | set.seed(7) 29 | model <- train(Species ~ ., 30 | data = train_data, 31 | method = "lda", 32 | metric = metric, 33 | trControl = control) 34 | predictions <- predict(model, test_data) 35 | conf_matrix <- confusionMatrix(predictions, test_data$Species) 36 | message(conf_matrix) 37 | 38 | log_metric_to_run(metric, conf_matrix$overall["Accuracy"]) 39 | 40 | saveRDS(model, file = "./outputs/model.rds") 41 | message("Model saved") 42 | -------------------------------------------------------------------------------- /samples/training/train-on-local/train-on-local.R: -------------------------------------------------------------------------------- 1 | # Copyright(c) Microsoft Corporation. 2 | # Licensed under the MIT license. 3 | 4 | # Reminder: set working directory to current file location prior to running this script 5 | 6 | library(azuremlsdk) 7 | 8 | ws <- load_workspace_from_config() 9 | 10 | # Define estimator 11 | est <- estimator(source_directory = ".", 12 | entry_script = "train.R", 13 | compute_target = "local") 14 | 15 | # Initialize experiment 16 | experiment_name <- "train-r-script-on-local" 17 | exp <- experiment(ws, experiment_name) 18 | 19 | # Submit job and display the run details 20 | run <- submit_experiment(exp, est) 21 | plot_run_details(run) 22 | wait_for_run_completion(run, show_output = TRUE) 23 | 24 | # Get the run metrics 25 | metrics <- get_run_metrics(run) 26 | metrics 27 | -------------------------------------------------------------------------------- /samples/training/train-on-local/train.R: -------------------------------------------------------------------------------- 1 | # This script loads a dataset of which the last column is supposed to be the 2 | # class and logs the accuracy 3 | 4 | library(azuremlsdk) 5 | library(caret) 6 | 7 | all_data <- read.csv("iris.csv") 8 | summary(all_data) 9 | 10 | in_train <- createDataPartition(y = all_data$Species, p = .8, list = FALSE) 11 | train_data <- all_data[in_train, ] 12 | test_data <- all_data[-in_train, ] 13 | 14 | # Run algorithms using 10-fold cross validation 15 | control <- trainControl(method = "cv", number = 10) 16 | metric <- "Accuracy" 17 | 18 | set.seed(7) 19 | model <- train(Species ~ ., 20 | data = train_data, 21 | method = "lda", 22 | metric = metric, 23 | trControl = control) 24 | predictions <- predict(model, test_data) 25 | conf_matrix <- confusionMatrix(predictions, test_data$Species) 26 | message(conf_matrix) 27 | 28 | log_metric_to_run(metric, conf_matrix$overall["Accuracy"]) 29 | -------------------------------------------------------------------------------- /tests/testthat.R: -------------------------------------------------------------------------------- 1 | library(testthat) 2 | library(azuremlsdk) 3 | 4 | if (identical(Sys.getenv("NOT_CRAN"), "true")) { 5 | test_check("azuremlsdk") 6 | } -------------------------------------------------------------------------------- /tests/testthat/dummy_data.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure/azureml-sdk-for-r/19301e106c0cec69dac0931e98af7e3f713b7bf7/tests/testthat/dummy_data.txt -------------------------------------------------------------------------------- /tests/testthat/dummy_score.R: -------------------------------------------------------------------------------- 1 | #' Copyright (c) Microsoft Corporation. All rights reserved. 2 | #' Licensed under the MIT License. 3 | 4 | library(jsonlite) 5 | 6 | init <- function() 7 | { 8 | model_path <- Sys.getenv("AZUREML_MODEL_DIR") 9 | model <- readRDS(file.path(model_path, "model.rds")) 10 | message("model is loaded") 11 | 12 | function(data) 13 | { 14 | plant <- as.data.frame(fromJSON(data)) 15 | prediction <- predict(model, plant) 16 | result <- as.character(prediction) 17 | toJSON(result) 18 | } 19 | } -------------------------------------------------------------------------------- /tests/testthat/helper-resources.R: -------------------------------------------------------------------------------- 1 | # can be used across all test files 2 | subscription_id <- Sys.getenv("TEST_SUBSCRIPTION_ID", unset = NA) 3 | resource_group <- Sys.getenv("TEST_RESOURCE_GROUP") 4 | location <- Sys.getenv("TEST_LOCATION") 5 | workspace_name <- Sys.getenv("TEST_WORKSPACE_NAME", unset = "r_sdk_workspace") 6 | cluster_name <- Sys.getenv("TEST_CLUSTER_NAME", unset = "r-cluster-cpu") 7 | test_env <- paste0('test_', as.integer(Sys.time())) 8 | build_num <- Sys.getenv('TEST_BUILD_NUMBER') 9 | build_num <- gsub('[.]', '-', build_num) 10 | 11 | 12 | library(azuremlsdk) 13 | library(ggplot2) 14 | 15 | if (!is.na(subscription_id)) { 16 | if (is.na(Sys.getenv("AZUREML_PYTHON_INSTALLED", unset = NA))) { 17 | install_azureml() 18 | } 19 | 20 | existing_ws <- create_workspace(workspace_name, 21 | subscription_id = subscription_id, 22 | resource_group = resource_group, 23 | location = location, 24 | exist_ok = TRUE) 25 | 26 | existing_compute <- get_compute(workspace = existing_ws, 27 | cluster_name = cluster_name) 28 | if (is.null(existing_compute)) { 29 | vm_size <- "STANDARD_D2_V2" 30 | existing_compute <- create_aml_compute(workspace = existing_ws, 31 | cluster_name = cluster_name, 32 | vm_size = vm_size, 33 | min_nodes = 0, 34 | max_nodes = 1) 35 | wait_for_provisioning_completion(existing_compute) 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /tests/testthat/teardown-resources.R: -------------------------------------------------------------------------------- 1 | tryCatch( { 2 | reticulate::conda_remove(test_env) 3 | }, 4 | error = function(e) { 5 | NULL 6 | }) 7 | -------------------------------------------------------------------------------- /tests/testthat/test_compute.R: -------------------------------------------------------------------------------- 1 | context("compute tests") 2 | source("utils.R") 3 | 4 | test_that("create amlcompute", { 5 | skip_if_no_subscription() 6 | ws <- existing_ws 7 | 8 | vm_size <- "STANDARD_D2_V2" 9 | cluster_name <- paste("aml", build_num, sep = "") 10 | compute_target <- create_aml_compute(workspace = ws, 11 | cluster_name = cluster_name, 12 | vm_size = vm_size, 13 | max_nodes = 1) 14 | wait_for_provisioning_completion(compute_target) 15 | expect_equal(compute_target$name, cluster_name) 16 | 17 | compute_target <- get_compute(ws, cluster_name = cluster_name) 18 | expect_equal(compute_target$name, cluster_name) 19 | 20 | non_existent_cluster <- get_compute(ws, cluster_name = "nonexistent") 21 | expect_equal(non_existent_cluster, NULL) 22 | 23 | # tear down compute 24 | delete_compute(compute_target) 25 | }) 26 | 27 | test_that("create akscompute", { 28 | skip('skip') 29 | ws <- existing_ws 30 | 31 | # create aks compute 32 | cluster_name <- paste("aks", build_num, sep = "") 33 | compute_target <- create_aks_compute(workspace = ws, 34 | cluster_name = cluster_name) 35 | wait_for_provisioning_completion(compute_target) 36 | expect_equal(compute_target$name, cluster_name) 37 | 38 | compute_target <- get_compute(ws, cluster_name = cluster_name) 39 | expect_equal(compute_target$name, cluster_name) 40 | 41 | # tear down compute 42 | delete_compute(compute_target) 43 | }) 44 | -------------------------------------------------------------------------------- /tests/testthat/test_estimator.R: -------------------------------------------------------------------------------- 1 | context("estimator") 2 | 3 | test_that("create estimator", { 4 | skip_if_no_azureml() 5 | 6 | r_env <- r_environment("r-env", 7 | cran_packages = list(cran_package("ggplot2")), 8 | use_gpu = TRUE, 9 | environment_variables = list("var1" = "val1")) 10 | 11 | est <- estimator(".", compute_target = "local", 12 | script_params = list("param1" = 1), 13 | environment = r_env) 14 | 15 | expect_equal(est$run_config$target, "local") 16 | expect_equal(length(est$run_config$arguments), 2) 17 | expect_equal(est$run_config$arguments[[1]], "param1") 18 | expect_equal(est$run_config$arguments[[2]], 1) 19 | }) -------------------------------------------------------------------------------- /tests/testthat/test_hyperdrive.R: -------------------------------------------------------------------------------- 1 | context("hyperdrive") 2 | source("utils.R") 3 | 4 | test_that("create hyperdrive config, launch runs, get run metrics", { 5 | skip_if_no_subscription() 6 | experiment_name <- "test_experiment" 7 | 8 | ws <- existing_ws 9 | 10 | # create experiment 11 | exp <- experiment(ws, experiment_name) 12 | expect_equal(exp$name, experiment_name) 13 | 14 | # get existing experiment 15 | exp <- experiment(ws, experiment_name) 16 | expect_equal(exp$name, experiment_name) 17 | 18 | # start a remote job and get the run, wait for it to finish 19 | tmp_dir_name <- file.path(tempdir(), "tmp_dir") 20 | script_name <- "train_hyperdrive_dummy.R" 21 | 22 | dir.create(tmp_dir_name) 23 | file.copy(script_name, tmp_dir_name) 24 | 25 | script_params <- list(number_1 = 3, number_2 = 2) 26 | est <- estimator(source_directory = tmp_dir_name, 27 | entry_script = script_name, 28 | compute_target = existing_compute$name, 29 | script_params = script_params) 30 | 31 | # define sampling and policy for hyperparameter tuning 32 | sampling <- 33 | grid_parameter_sampling(list(number_1 = choice(c(3, 6)), 34 | number_2 = choice(c(2, 5)))) 35 | policy <- median_stopping_policy() 36 | hyperdrive_config <- 37 | hyperdrive_config(sampling, "Sum", 38 | primary_metric_goal("MAXIMIZE"), 39 | 4, 40 | policy = policy, 41 | estimator = est) 42 | # submit hyperdrive run 43 | hyperdrive_run <- submit_experiment(exp, hyperdrive_config) 44 | wait_for_run_completion(hyperdrive_run, show_output = TRUE) 45 | 46 | child_runs <- 47 | get_child_runs_sorted_by_primary_metric(hyperdrive_run) 48 | expected_best_run <- toString(child_runs[[1]][1]) 49 | expect_equal(length(child_runs), 5) 50 | 51 | child_run_metrics <- get_child_run_metrics(hyperdrive_run) 52 | expect_equal(length(child_run_metrics), 4) 53 | 54 | # find best-performing run 55 | best_run <- get_best_run_by_primary_metric(hyperdrive_run) 56 | 57 | expect_equal(expected_best_run, best_run$id) 58 | 59 | # tear down resources 60 | unlink(tmp_dir_name, recursive = TRUE) 61 | }) -------------------------------------------------------------------------------- /tests/testthat/test_keyvault.R: -------------------------------------------------------------------------------- 1 | context("keyvault") 2 | source("utils.R") 3 | 4 | test_that("keyvault tests, list/set/get/delete secrets", 5 | { 6 | skip_if_no_subscription() 7 | ws <- existing_ws 8 | kv <- get_default_keyvault(ws) 9 | expect_gte(length(list_secrets(kv)), 0) 10 | 11 | secret_name <- paste0("secret", gsub("-", "", build_num)) 12 | secret_value <- paste0("value", gsub("-", "", build_num)) 13 | secrets <- list() 14 | secrets[[ secret_name ]] <- secret_value 15 | 16 | set_secrets(kv, secrets) 17 | expect_equal(get_secrets(kv, list(secret_name))[[ secret_name ]], 18 | secret_value) 19 | 20 | delete_secrets(kv, list(secret_name)) 21 | expect_equal(get_secrets(kv, list(secret_name))[[ secret_name ]], 22 | NULL) 23 | }) -------------------------------------------------------------------------------- /tests/testthat/test_webservice.R: -------------------------------------------------------------------------------- 1 | context("webservice tests") 2 | source("utils.R") 3 | 4 | test_that("create, get, generate keys of, and delete webservice", { 5 | skip('skip') 6 | ws <- existing_ws 7 | 8 | tmp_dir_name <- file.path(tempdir(), "tmp_dir") 9 | model_name <- "dummy_model.data" 10 | dir.create(tmp_dir_name) 11 | file.create(file.path(tmp_dir_name, model_name)) 12 | 13 | # register the model 14 | model <- register_model(ws, tmp_dir_name, model_name) 15 | 16 | # Create the inference config to use for Webservice 17 | config <- inference_config(entry_script = "dummy_score.R") 18 | 19 | # Create ACI deployment config 20 | tags <- reticulate::py_dict('name', 'temp') 21 | aciconfig <- 22 | azureml$core$webservice$AciWebservice$deploy_configuration(cpu_cores = 1, 23 | memory_gb = 1, 24 | tags = tags, 25 | auth_enabled = T) 26 | # Deploy the model 27 | service_name <- paste("svc", build_num, sep="") 28 | service <- deploy_model(ws, 29 | service_name, 30 | models = c(model), 31 | inference_config = config, 32 | deployment_config = aciconfig) 33 | 34 | wait_for_deployment(service, show_output = TRUE) 35 | 36 | # Get webservice 37 | service <- get_webservice(ws, name = service_name) 38 | 39 | # Check the logs 40 | logs <- get_webservice_logs(service) 41 | expect_equal(length(logs), 1) 42 | 43 | # Get the service keys 44 | keys <- get_webservice_keys(service) 45 | expect_equal(length(keys), 2) 46 | 47 | # Try changing secondary key 48 | generate_new_webservice_key(service, key_type = 'Secondary') 49 | new_keys <- get_webservice_keys(service) 50 | expect_equal(length(new_keys), 2) 51 | 52 | # check if the new secondary key is different from the previous one 53 | expect_false(keys[[2]] == new_keys[[2]]) 54 | 55 | # delete the webservice 56 | delete_webservice(service) 57 | }) -------------------------------------------------------------------------------- /tests/testthat/test_workspace.R: -------------------------------------------------------------------------------- 1 | context("Workspace") 2 | source("utils.R") 3 | 4 | subscription_id <- Sys.getenv("TEST_SUBSCRIPTION_ID") 5 | resource_group <- Sys.getenv("TEST_RESOURCE_GROUP") 6 | location <- Sys.getenv("TEST_LOCATION") 7 | 8 | test_that("create, get, save, load and delete workspace", { 9 | skip('skip') 10 | # create workspace 11 | workspace_name <- paste0("test_ws", build_num) 12 | existing_ws <- create_workspace(workspace_name, 13 | subscription_id = subscription_id, 14 | resource_group = resource_group, 15 | location = location) 16 | 17 | # retrieve workspace 18 | ws <- get_workspace(workspace_name, 19 | subscription_id = subscription_id, 20 | resource_group = resource_group) 21 | expect_equal(ws$name, existing_ws$name) 22 | get_workspace_details(ws) 23 | kv <- get_default_keyvault(ws) 24 | expect_equal(length(kv$list_secrets()), 0) 25 | 26 | # write config 27 | write_workspace_config(existing_ws) 28 | 29 | # load from config 30 | loaded_ws <- load_workspace_from_config(".") 31 | expect_equal(loaded_ws$name, workspace_name) 32 | 33 | # delete workspace 34 | delete_workspace(existing_ws) 35 | 36 | # negative testing 37 | ws <- get_workspace("random", subscription_id = subscription_id) 38 | expect_equal(ws, NULL) 39 | }) 40 | -------------------------------------------------------------------------------- /tests/testthat/train_dummy.R: -------------------------------------------------------------------------------- 1 | # check if ggplot2 and dplyr are already installed 2 | library("ggplot2") 3 | library("dplyr") 4 | 5 | library(azuremlsdk) 6 | 7 | log_metric_to_run("test_metric", 0.5) 8 | -------------------------------------------------------------------------------- /tests/testthat/train_hyperdrive_dummy.R: -------------------------------------------------------------------------------- 1 | library(azuremlsdk) 2 | 3 | args <- commandArgs(trailingOnly = TRUE) 4 | number_1 <- args[2] 5 | log_metric_to_run("First Number", number_1) 6 | number_2 <- args[4] 7 | log_metric_to_run("Second Number", number_2) 8 | 9 | sum <- as.numeric(number_1) + as.numeric(number_2) 10 | log_metric_to_run("Sum", sum) -------------------------------------------------------------------------------- /tests/testthat/utils.R: -------------------------------------------------------------------------------- 1 | skip_if_no_azureml <- function() { 2 | if (!reticulate::py_module_available("azureml")) 3 | skip("azureml not available for testing") 4 | } 5 | 6 | skip_if_no_subscription <- function() { 7 | subscription_id <- Sys.getenv("TEST_SUBSCRIPTION_ID", unset = NA) 8 | if (is.na(subscription_id)) 9 | skip("subscription not available for testing") 10 | } -------------------------------------------------------------------------------- /vignettes/README.md: -------------------------------------------------------------------------------- 1 | ## Azure ML vignettes 2 | These vignettes are end-to-end tutorials for using Azure Machine Learning with the R SDK. 3 | 4 | Before running a vignette in RStudio, set the working directory to the folder that contains the vignette file (.Rmd file) in RStudio using `setwd(dirname)` or Session -> Set Working Directory -> To Source File Location. Each vignette assumes that the data and scripts are relative to vignette file location. 5 | 6 | The following vignettes are included: 7 | 1. [installation](installation.Rmd): Install the Azure ML SDK for R. 8 | 2. [configuration](configuration.Rmd): Set up an Azure ML workspace. 9 | 3. [train-and-deploy-first-model](train-and-deploy-first-model.Rmd): Train a caret model and deploy as a web service to Azure Container Instances (ACI). 10 | 4. [train-with-tensorflow](train-with-tensorflow.Rmd): Train a deep learning TensorFlow model with Azure ML. 11 | 5. [hyperparameter-tune-with-keras](hyperparameter-tune-with-keras.Rmd): Hyperparameter tune a Keras model using HyperDrive, Azure ML's hyperparameter tuning functionality. 12 | 6. [deploy-to-aks](deploy-to-aks.Rmd): Production deploy a model as a web service to Azure Kubernetes Service (AKS). 13 | 14 | > If you are running these examples on an Azure Machine Learning compute instance, skip the installation and configuration vignettes (#1 and #2), as the compute instance has the Azure ML SDK pre-installed and your workspace details pre-configured. 15 | 16 | For additional examples on using the R SDK, see the [samples](../samples) folder. 17 | 18 | ### Azure ML guides 19 | In addition to the end-to-end vignettes, we also provide more detailed documentation for the following: 20 | * [Deploying models](deploying-models.Rmd): Where and how to deploy models on Azure ML. 21 | * [Troubleshooting](troubleshooting.Rmd): Known issues and troubleshooting for using R in Azure ML. 22 | -------------------------------------------------------------------------------- /vignettes/deploy-to-aks/model.rds: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure/azureml-sdk-for-r/19301e106c0cec69dac0931e98af7e3f713b7bf7/vignettes/deploy-to-aks/model.rds -------------------------------------------------------------------------------- /vignettes/deploy-to-aks/score.R: -------------------------------------------------------------------------------- 1 | #' Copyright(c) Microsoft Corporation. 2 | #' Licensed under the MIT license. 3 | 4 | library(jsonlite) 5 | 6 | init <- function() 7 | { 8 | model_path <- Sys.getenv("AZUREML_MODEL_DIR") 9 | model <- readRDS(file.path(model_path, "model.rds")) 10 | message("model is loaded") 11 | 12 | function(data) 13 | { 14 | plant <- as.data.frame(fromJSON(data)) 15 | prediction <- predict(model, plant) 16 | result <- as.character(prediction) 17 | toJSON(result) 18 | } 19 | } -------------------------------------------------------------------------------- /vignettes/experiments-deep-dive/accident-glm.R: -------------------------------------------------------------------------------- 1 | #' Copyright(c) Microsoft Corporation. 2 | #' Licensed under the MIT license. 3 | 4 | library(azuremlsdk) 5 | library(optparse) 6 | library(caret) 7 | 8 | options <- list( 9 | make_option(c("-d", "--data_folder")), 10 | make_option(c("-p", "--percent_train")) 11 | ) 12 | 13 | opt_parser <- OptionParser(option_list = options) 14 | opt <- parse_args(opt_parser) 15 | 16 | ## Print data folder to log 17 | paste(opt$data_folder) 18 | 19 | accidents <- readRDS(file.path(opt$data_folder, "accidents.Rd")) 20 | summary(accidents) 21 | 22 | ## Create data partition for use with caret 23 | train.pct <- as.numeric(opt$percent_train) 24 | if(length(train.pct)==0 || (train.pct<0) || (train.pct>1)) train.pct <- 0.75 25 | accident_idx <- createDataPartition(accidents$dead, p = train.pct, list = FALSE) 26 | accident_trn <- accidents[accident_idx, ] 27 | accident_tst <- accidents[-accident_idx, ] 28 | ## utility function to calculate accuracy in test set 29 | calc_acc = function(actual, predicted) { 30 | mean(actual == predicted) 31 | } 32 | 33 | ## Caret GLM model on training set with 5-fold cross validation 34 | accident_glm_mod <- train( 35 | form = dead ~ ., 36 | data = accident_trn, 37 | trControl = trainControl(method = "cv", number = 5), 38 | method = "glm", 39 | family = "binomial" 40 | ) 41 | summary(accident_glm_mod) 42 | 43 | log_metric_to_run("Accuracy", 44 | calc_acc(actual = accident_tst$dead, 45 | predicted = predict(accident_glm_mod, newdata = accident_tst)) 46 | ) 47 | log_metric_to_run("Method","GLM") 48 | log_metric_to_run("TrainPCT",train.pct) 49 | 50 | output_dir = "outputs" 51 | if (!dir.exists(output_dir)){ 52 | dir.create(output_dir) 53 | } 54 | saveRDS(accident_glm_mod, file = "./outputs/model.rds") 55 | 56 | message("Model saved") -------------------------------------------------------------------------------- /vignettes/experiments-deep-dive/accident-glmnet.R: -------------------------------------------------------------------------------- 1 | #' Copyright(c) Microsoft Corporation. 2 | #' Licensed under the MIT license. 3 | 4 | library(azuremlsdk) 5 | library(optparse) 6 | library(caret) 7 | library(glmnet) 8 | 9 | options <- list( 10 | make_option(c("-d", "--data_folder")), 11 | make_option(c("-p", "--percent_train")) 12 | ) 13 | 14 | opt_parser <- OptionParser(option_list = options) 15 | opt <- parse_args(opt_parser) 16 | 17 | ## Print data folder to log 18 | paste(opt$data_folder) 19 | 20 | accidents <- readRDS(file.path(opt$data_folder, "accidents.Rd")) 21 | summary(accidents) 22 | 23 | ## Create data partition for use with caret 24 | train.pct <- as.numeric(opt$percent_train) 25 | if(length(train.pct)==0 || (train.pct<0) || (train.pct>1)) train.pct <- 0.75 26 | accident_idx <- createDataPartition(accidents$dead, p = train.pct, list = FALSE) 27 | accident_trn <- accidents[accident_idx, ] 28 | accident_tst <- accidents[-accident_idx, ] 29 | ## utility function to calculate accuracy in test set 30 | calc_acc = function(actual, predicted) { 31 | mean(actual == predicted) 32 | } 33 | 34 | ## GLMNET 35 | accident_glmnet_mod = train( 36 | dead ~ ., 37 | data = accident_trn, 38 | method = "glmnet" 39 | ) 40 | summary(accident_glmnet_mod) 41 | 42 | log_metric_to_run("Accuracy", 43 | calc_acc(actual = accident_tst$dead, 44 | predicted = predict(accident_glmnet_mod, newdata = accident_tst)) 45 | ) 46 | log_metric_to_run("Method","GLMNET") 47 | log_metric_to_run("TrainPCT",train.pct) 48 | 49 | output_dir = "outputs" 50 | if (!dir.exists(output_dir)){ 51 | dir.create(output_dir) 52 | } 53 | saveRDS(accident_glmnet_mod, file = "./outputs/model.rds") 54 | 55 | message("Model saved") -------------------------------------------------------------------------------- /vignettes/experiments-deep-dive/accident-knn.R: -------------------------------------------------------------------------------- 1 | #' Copyright(c) Microsoft Corporation. 2 | #' Licensed under the MIT license. 3 | 4 | library(azuremlsdk) 5 | library(optparse) 6 | library(caret) 7 | 8 | options <- list( 9 | make_option(c("-d", "--data_folder")), 10 | make_option(c("-p", "--percent_train")) 11 | ) 12 | 13 | opt_parser <- OptionParser(option_list = options) 14 | opt <- parse_args(opt_parser) 15 | 16 | ## Print data folder to log 17 | paste(opt$data_folder) 18 | 19 | accidents <- readRDS(file.path(opt$data_folder, "accidents.Rd")) 20 | summary(accidents) 21 | 22 | ## Create data partition for use with caret 23 | train.pct <- as.numeric(opt$percent_train) 24 | if(length(train.pct)==0 || (train.pct<0) || (train.pct>1)) train.pct <- 0.75 25 | accident_idx <- createDataPartition(accidents$dead, p = train.pct, list = FALSE) 26 | accident_trn <- accidents[accident_idx, ] 27 | accident_tst <- accidents[-accident_idx, ] 28 | ## utility function to calculate accuracy in test set 29 | calc_acc = function(actual, predicted) { 30 | mean(actual == predicted) 31 | } 32 | 33 | ## Caret KNN model 34 | accident_knn_mod = train( 35 | dead ~ ., 36 | data = accident_trn, 37 | method = "knn", 38 | trControl = trainControl(method = "cv", number = 5) 39 | ) 40 | summary(accident_knn_mod) 41 | 42 | log_metric_to_run("Accuracy", 43 | calc_acc(actual = accident_tst$dead, 44 | predicted = predict(accident_knn_mod, newdata = accident_tst)) 45 | ) 46 | log_metric_to_run("Method","KNN") 47 | log_metric_to_run("TrainPCT",train.pct) 48 | 49 | output_dir = "outputs" 50 | if (!dir.exists(output_dir)){ 51 | dir.create(output_dir) 52 | } 53 | saveRDS(accident_knn_mod, file = "./outputs/model.rds") 54 | 55 | message("Model saved") -------------------------------------------------------------------------------- /vignettes/experiments-deep-dive/accident_predict_caret.R: -------------------------------------------------------------------------------- 1 | #' Copyright(c) Microsoft Corporation. 2 | #' Licensed under the MIT license. 3 | 4 | library(jsonlite) 5 | 6 | init <- function() 7 | { 8 | model_path <- Sys.getenv("AZUREML_MODEL_DIR") 9 | model <- readRDS(file.path(model_path, "model.rds")) 10 | method <- model$method 11 | message(paste(method, "model loaded")) 12 | 13 | function(data) 14 | { 15 | vars <- as.data.frame(fromJSON(data)) 16 | prediction <- predict(model, newdata=vars, type="prob")[,"dead"] 17 | toJSON(prediction) 18 | } 19 | } -------------------------------------------------------------------------------- /vignettes/hyperparameter-tune-with-keras/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM mcr.microsoft.com/azureml/base-gpu:openmpi3.1.2-cuda10.1-cudnn7-ubuntu18.04 2 | 3 | RUN conda install -c r -y conda=4.8.3 r-essentials openssl=1.1.1c && \ 4 | conda clean -ay && \ 5 | pip install --no-cache-dir azureml-defaults tensorflow-gpu keras 6 | 7 | ENV TAR="/bin/tar" 8 | RUN R -e "install.packages(c('remotes', 'reticulate', 'optparse', 'azuremlsdk', 'keras'), repos = 'https://cloud.r-project.org/')" -------------------------------------------------------------------------------- /vignettes/train-and-deploy-first-model/accident_predict.R: -------------------------------------------------------------------------------- 1 | #' Copyright(c) Microsoft Corporation. 2 | #' Licensed under the MIT license. 3 | 4 | library(jsonlite) 5 | 6 | init <- function() 7 | { 8 | model_path <- Sys.getenv("AZUREML_MODEL_DIR") 9 | model <- readRDS(file.path(model_path, "model.rds")) 10 | message("logistic regression model loaded") 11 | 12 | function(data) 13 | { 14 | vars <- as.data.frame(fromJSON(data)) 15 | prediction <- as.numeric(predict(model, vars, type="response")*100) 16 | toJSON(prediction) 17 | } 18 | } -------------------------------------------------------------------------------- /vignettes/train-and-deploy-first-model/accidents.R: -------------------------------------------------------------------------------- 1 | #' Copyright(c) Microsoft Corporation. 2 | #' Licensed under the MIT license. 3 | 4 | library(azuremlsdk) 5 | library(optparse) 6 | 7 | options <- list( 8 | make_option(c("-d", "--data_folder")) 9 | ) 10 | 11 | opt_parser <- OptionParser(option_list = options) 12 | opt <- parse_args(opt_parser) 13 | 14 | paste(opt$data_folder) 15 | 16 | accidents <- readRDS(file.path(opt$data_folder, "accidents.Rd")) 17 | summary(accidents) 18 | 19 | mod <- glm(dead ~ dvcat + seatbelt + frontal + sex + ageOFocc + yearVeh + airbag + occRole, family=binomial, data=accidents) 20 | summary(mod) 21 | predictions <- factor(ifelse(predict(mod)>0.1, "dead","alive")) 22 | accuracy <- mean(predictions == accidents$dead) 23 | log_metric_to_run("Accuracy", accuracy) 24 | 25 | output_dir = "outputs" 26 | if (!dir.exists(output_dir)){ 27 | dir.create(output_dir) 28 | } 29 | saveRDS(mod, file = "./outputs/model.rds") 30 | message("Model saved") -------------------------------------------------------------------------------- /vignettes/train-with-tensorflow/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM mcr.microsoft.com/azureml/base-gpu:openmpi3.1.2-cuda10.1-cudnn7-ubuntu18.04 2 | 3 | RUN conda install -c r -y conda=4.8.3 r-essentials openssl=1.1.1c && \ 4 | conda clean -ay && \ 5 | pip install --no-cache-dir azureml-defaults tensorflow-gpu==1.14 6 | 7 | ENV TAR="/bin/tar" 8 | RUN R -e "install.packages(c('remotes', 'reticulate', 'optparse', 'azuremlsdk'), repos = 'https://cloud.r-project.org/')" && \ 9 | R -e "remotes::install_version('tensorflow', version = '1.14.0', upgrade = 'never', repos = 'https://cloud.r-project.org/')" --------------------------------------------------------------------------------