├── .deepsource.toml ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ ├── feature_request.md │ └── question.md └── workflows │ ├── build-test-deploy.yml │ └── codeql-analysis.yml ├── .gitignore ├── CHANGELOG.md ├── CONTRIBUTING.md ├── ContributorAgreement.txt ├── LICENSE ├── MANIFEST.in ├── README.md ├── SUPPORT.md ├── doc ├── Makefile ├── _static │ ├── sas_logo.svg │ └── sasctl.css ├── _templates │ └── autosummary │ │ ├── attribute.rst │ │ ├── base.rst │ │ ├── class.rst │ │ ├── member.rst │ │ ├── method.rst │ │ ├── minimal_module.rst │ │ └── module.rst ├── api │ ├── pzmm │ │ ├── import_model.rst │ │ ├── mlflow_model.rst │ │ ├── model_parameters.rst │ │ ├── pickle_model.rst │ │ ├── write_json_files.rst │ │ ├── write_score_code.rst │ │ └── zip_model.rst │ ├── sasctl.core.rst │ ├── sasctl.current_session.rst │ ├── sasctl.pzmm.rst │ ├── sasctl.rst │ ├── sasctl.services.rst │ ├── sasctl.session.rst │ ├── sasctl.tasks.rst │ ├── sasctl.utils.pymas.rst │ ├── sasctl.utils.pyml2ds.rst │ ├── sasctl.utils.rst │ └── services │ │ ├── cas_management.rst │ │ ├── concepts.rst │ │ ├── data_sources.rst │ │ ├── files.rst │ │ ├── folders.rst │ │ ├── microanalytic_score.rst │ │ ├── model_management.rst │ │ ├── model_publish.rst │ │ ├── model_repository.rst │ │ ├── projects.rst │ │ ├── relationships.rst │ │ ├── report_images.rst │ │ ├── reports.rst │ │ ├── saslogon.rst │ │ ├── sentiment_analysis.rst │ │ ├── text_categorization.rst │ │ └── text_parsing.rst ├── conf.py ├── index.rst └── make.bat ├── examples ├── ARCHIVE │ └── v1_8 │ │ ├── pzmmModelImportExample.ipynb │ │ ├── pzmmModelImportExampleH2O.ipynb │ │ ├── pzmmModelImportExampleMLFlow.ipynb │ │ └── pzmmModelImportExampleRegression.ipynb ├── README.md ├── data │ ├── BiasMetrics │ │ ├── examModels │ │ │ ├── GradientBoost │ │ │ │ ├── GradientBoost.pickle │ │ │ │ ├── GradientBoost.zip │ │ │ │ ├── ModelProperties.json │ │ │ │ ├── fileMetadata.json │ │ │ │ ├── groupMetrics.json │ │ │ │ ├── inputVar.json │ │ │ │ ├── maxDifferences.json │ │ │ │ ├── outputVar.json │ │ │ │ └── score_GradientBoost.py │ │ │ ├── LinearRegression │ │ │ │ ├── LinearRegression.pickle │ │ │ │ ├── LinearRegression.zip │ │ │ │ ├── ModelProperties.json │ │ │ │ ├── fileMetadata.json │ │ │ │ ├── groupMetrics.json │ │ │ │ ├── inputVar.json │ │ │ │ ├── maxDifferences.json │ │ │ │ ├── outputVar.json │ │ │ │ └── score_LinearRegression.py │ │ │ └── RandomForest │ │ │ │ ├── ModelProperties.json │ │ │ │ ├── RandomForest.pickle │ │ │ │ ├── RandomForest.zip │ │ │ │ ├── fileMetadata.json │ │ │ │ ├── groupMetrics.json │ │ │ │ ├── inputVar.json │ │ │ │ ├── maxDifferences.json │ │ │ │ ├── outputVar.json │ │ │ │ └── score_RandomForest.py │ │ └── titanicModels │ │ │ ├── DecisionTree │ │ │ ├── DecisionTree.pickle │ │ │ ├── DecisionTree.zip │ │ │ ├── ModelProperties.json │ │ │ ├── dmcas_fitstat.json │ │ │ ├── dmcas_lift.json │ │ │ ├── dmcas_roc.json │ │ │ ├── fileMetadata.json │ │ │ ├── groupMetrics.json │ │ │ ├── inputVar.json │ │ │ ├── maxDifferences.json │ │ │ ├── outputVar.json │ │ │ └── score_DecisionTree.py │ │ │ ├── GradientBoost │ │ │ ├── GradientBoost.pickle │ │ │ ├── GradientBoost.zip │ │ │ ├── ModelProperties.json │ │ │ ├── dmcas_fitstat.json │ │ │ ├── dmcas_lift.json │ │ │ ├── dmcas_roc.json │ │ │ ├── fileMetadata.json │ │ │ ├── groupMetrics.json │ │ │ ├── inputVar.json │ │ │ ├── maxDifferences.json │ │ │ ├── outputVar.json │ │ │ └── score_GradientBoost.py │ │ │ ├── MultiRandomForest │ │ │ ├── groupMetrics.json │ │ │ └── maxDifferences.json │ │ │ └── RandomForest │ │ │ ├── ModelProperties.json │ │ │ ├── RandomForest.pickle │ │ │ ├── RandomForest.zip │ │ │ ├── dmcas_fitstat.json │ │ │ ├── dmcas_lift.json │ │ │ ├── dmcas_roc.json │ │ │ ├── fileMetadata.json │ │ │ ├── groupMetrics.json │ │ │ ├── inputVar.json │ │ │ ├── maxDifferences.json │ │ │ ├── outputVar.json │ │ │ └── score_RandomForest.py │ ├── HMEQPERF_1_Q1.csv │ ├── HMEQPERF_2_Q2.csv │ ├── HMEQPERF_3_Q3.csv │ ├── HMEQPERF_4_Q4.csv │ ├── MLFlowModels │ │ ├── Model1 │ │ │ ├── MLFlowModel.pickle │ │ │ ├── MLFlowModel.zip │ │ │ ├── ModelProperties.json │ │ │ ├── fileMetadata.json │ │ │ ├── inputVar.json │ │ │ ├── outputVar.json │ │ │ └── score_MLFlowModel.py │ │ ├── Test1 │ │ │ ├── MLFlowTest.pickle │ │ │ ├── MLFlowTest.zip │ │ │ ├── MLFlowTestScore.py │ │ │ ├── ModelProperties.json │ │ │ ├── dmcas_epscorecode.sas │ │ │ ├── dmcas_packagescorecode.sas │ │ │ ├── fileMetadata.json │ │ │ ├── inputVar.json │ │ │ └── outputVar.json │ │ └── Test2 │ │ │ ├── MLFlowTest2.pickle │ │ │ ├── MLFlowTest2.zip │ │ │ ├── MLFlowTest2Score.py │ │ │ ├── ModelProperties.json │ │ │ ├── dmcas_epscorecode.sas │ │ │ ├── dmcas_packagescorecode.sas │ │ │ ├── fileMetadata.json │ │ │ ├── inputVar.json │ │ │ ├── model.pkl │ │ │ └── outputVar.json │ ├── ModelMigration │ │ └── DecisionTreeClassifier.zip │ ├── README.md │ ├── USAHousingModels │ │ ├── LinearRegression.zip │ │ └── LinearRegression │ │ │ ├── LinearRegression.pickle │ │ │ ├── LinearRegression.zip │ │ │ ├── LinearRegressionScore.py │ │ │ ├── ModelProperties.json │ │ │ ├── dmcas_epscorecode.sas │ │ │ ├── dmcas_packagescorecode.sas │ │ │ ├── fileMetadata.json │ │ │ ├── inputVar.json │ │ │ ├── outputVar.json │ │ │ └── score_LinearRegression.py │ ├── USA_Housing.csv │ ├── adult.csv │ ├── airline_tweets.csv │ ├── boston_house_prices.csv │ ├── exams.csv │ ├── hmeq.csv │ ├── hmeqModels │ │ ├── DecisionTreeClassifier │ │ │ ├── DecisionTreeClassifier.pickle │ │ │ ├── DecisionTreeClassifier.zip │ │ │ ├── ModelProperties.json │ │ │ ├── dmcas_fitstat.json │ │ │ ├── dmcas_lift.json │ │ │ ├── dmcas_misc.json │ │ │ ├── dmcas_relativeimportance.json │ │ │ ├── dmcas_roc.json │ │ │ ├── fileMetadata.json │ │ │ ├── inputVar.json │ │ │ ├── outputVar.json │ │ │ └── score_DecisionTreeClassifier.py │ │ ├── GradientBoosting │ │ │ ├── GradientBoosting.pickle │ │ │ ├── GradientBoosting.zip │ │ │ ├── ModelProperties.json │ │ │ ├── dmcas_fitstat.json │ │ │ ├── dmcas_lift.json │ │ │ ├── dmcas_misc.json │ │ │ ├── dmcas_relativeimportance.json │ │ │ ├── dmcas_roc.json │ │ │ ├── fileMetadata.json │ │ │ ├── inputVar.json │ │ │ ├── outputVar.json │ │ │ └── score_GradientBoosting.py │ │ ├── H2OBinaryGLM │ │ │ ├── ModelProperties.json │ │ │ ├── dmcas_epscorecode.sas │ │ │ ├── dmcas_packagescorecode.sas │ │ │ ├── fileMetadata.json │ │ │ ├── glmfit_binary.pickle │ │ │ ├── glmfit_binary.zip │ │ │ ├── inputVar.json │ │ │ ├── outputVar.json │ │ │ └── score_glmfit_binary.py │ │ ├── H2OMOJOGLM │ │ │ ├── ModelProperties.json │ │ │ ├── dmcas_epscorecode.sas │ │ │ ├── dmcas_packagescorecode.sas │ │ │ ├── fileMetadata.json │ │ │ ├── glmfit_mojo.mojo │ │ │ ├── glmfit_mojo.zip │ │ │ ├── inputVar.json │ │ │ ├── outputVar.json │ │ │ └── score_glmfit_mojo.py │ │ └── RandomForest │ │ │ ├── ModelProperties.json │ │ │ ├── RandomForest.pickle │ │ │ ├── RandomForest.zip │ │ │ ├── dmcas_fitstat.json │ │ │ ├── dmcas_lift.json │ │ │ ├── dmcas_misc.json │ │ │ ├── dmcas_relativeimportance.json │ │ │ ├── dmcas_roc.json │ │ │ ├── fileMetadata.json │ │ │ ├── inputVar.json │ │ │ ├── outputVar.json │ │ │ └── score_RandomForest.py │ ├── iris.csv │ └── titanic.csv ├── data_science_pilot.ipynb ├── direct_REST_calls.py ├── full_lifecycle.py ├── pzmm_binary_classification_model_import.ipynb ├── pzmm_classification_assess_model_bias.ipynb ├── pzmm_complete_model_example_MLFlow_sklearn.ipynb ├── pzmm_custom_kpi_model_parameters.ipynb ├── pzmm_generate_complete_model_card.ipynb ├── pzmm_generate_requirements_json.ipynb ├── pzmm_git_integration_example.ipynb ├── pzmm_h2o_model_import.ipynb ├── pzmm_mlflow_model_import.ipynb ├── pzmm_regression_assess_model_bias.ipynb ├── pzmm_regression_model_import.ipynb ├── pzmm_tensorflow_keras_model_import.ipynb ├── register_Azure_OpenAI_model_using_REST_calls.ipynb ├── register_custom_model.py ├── register_pytorch_comp_vision.ipynb ├── register_sas_classification_model.py ├── register_sas_dlpy_model.py ├── register_sas_regression_model.py ├── register_scikit_classification_model.py ├── register_scikit_regression_model.py ├── score_testing_example.ipynb ├── upload_file.py └── viya35_to_viya4_model_migration.ipynb ├── setup.py ├── src └── sasctl │ ├── __init__.py │ ├── __main__.py │ ├── _services │ ├── __init__.py │ ├── cas_management.py │ ├── concepts.py │ ├── data_sources.py │ ├── files.py │ ├── folders.py │ ├── microanalytic_score.py │ ├── model_management.py │ ├── model_publish.py │ ├── model_repository.py │ ├── projects.py │ ├── relationships.py │ ├── report_images.py │ ├── reports.py │ ├── saslogon.py │ ├── score_definitions.py │ ├── score_execution.py │ ├── sentiment_analysis.py │ ├── service.py │ ├── text_categorization.py │ ├── text_parsing.py │ └── workflow.py │ ├── core.py │ ├── exceptions.py │ ├── pzmm │ ├── README.md │ ├── __init__.py │ ├── git_integration.py │ ├── import_model.py │ ├── mlflow_model.py │ ├── model_parameters.py │ ├── pickle_model.py │ ├── pzmmintro.jpg │ ├── template_files │ │ ├── clf_jsons │ │ │ ├── groupMetrics.json │ │ │ └── maxDifferences.json │ │ ├── dmcas_fitstat.json │ │ ├── dmcas_lift.json │ │ ├── dmcas_misc.json │ │ ├── dmcas_relativeimportance.json │ │ ├── dmcas_roc.json │ │ └── reg_jsons │ │ │ ├── groupMetrics.json │ │ │ └── maxDifferences.json │ ├── write_json_files.py │ ├── write_score_code.py │ └── zip_model.py │ ├── services.py │ ├── tasks.py │ └── utils │ ├── __init__.py │ ├── astore.py │ ├── cli.py │ ├── decorators.py │ ├── misc.py │ ├── model_info.py │ ├── model_migration.py │ ├── pymas │ ├── __init__.py │ ├── core.py │ ├── ds2.py │ └── python.py │ └── pyml2ds │ ├── __init__.py │ ├── basic │ ├── __init__.py │ └── tree.py │ ├── connectors │ ├── __init__.py │ └── ensembles │ │ ├── __init__.py │ │ ├── core.py │ │ ├── lgb.py │ │ ├── pmml.py │ │ └── xgb.py │ └── core.py ├── tests ├── __init__.py ├── betamax_utils.py ├── conftest.py ├── integration │ ├── __init__.py │ ├── cassettes │ │ ├── test_astore_models.test_bayesnet_binary_classification.viya_202310.swat.lzma │ │ ├── test_astore_models.test_bayesnet_binary_classification.viya_35.swat.lzma │ │ ├── test_astore_models.test_bayesnet_classification.viya_202310.swat.lzma │ │ ├── test_astore_models.test_bayesnet_classification.viya_35.swat.lzma │ │ ├── test_astore_models.test_dtree_regression.viya_202310.swat.lzma │ │ ├── test_astore_models.test_dtree_regression.viya_35.swat.lzma │ │ ├── test_astore_models.test_forest_classification.viya_202310.swat.lzma │ │ ├── test_astore_models.test_forest_classification.viya_35.swat.lzma │ │ ├── test_astore_models.test_forest_regression.viya_202310.swat.lzma │ │ ├── test_astore_models.test_forest_regression.viya_35.swat.lzma │ │ ├── test_astore_models.test_forest_regression_with_nominals.viya_202310.swat.lzma │ │ ├── test_astore_models.test_forest_regression_with_nominals.viya_35.swat.lzma │ │ ├── test_astore_models.test_glm.viya_202310.swat.lzma │ │ ├── test_astore_models.test_glm.viya_35.swat.lzma │ │ ├── test_astore_models.test_gradboost_binary_classification.viya_202310.swat.lzma │ │ ├── test_astore_models.test_gradboost_binary_classification.viya_35.swat.lzma │ │ ├── test_astore_models.test_gradboost_classification.viya_202310.swat.lzma │ │ ├── test_astore_models.test_gradboost_classification.viya_35.swat.lzma │ │ ├── test_astore_models.test_gradboost_regression.viya_202310.swat.lzma │ │ ├── test_astore_models.test_gradboost_regression.viya_35.swat.lzma │ │ ├── test_astore_models.test_gradboost_regression_with_nominals.viya_202310.swat.lzma │ │ ├── test_astore_models.test_gradboost_regression_with_nominals.viya_35.swat.lzma │ │ ├── test_astore_models.test_logistic.viya_202310.swat.lzma │ │ ├── test_astore_models.test_logistic.viya_35.swat.lzma │ │ ├── test_astore_models.test_neuralnet_regression.viya_202310.swat.lzma │ │ ├── test_astore_models.test_neuralnet_regression.viya_35.swat.lzma │ │ ├── test_astore_models.test_svm_classification.viya_202310.swat.lzma │ │ ├── test_astore_models.test_svm_classification.viya_35.swat.lzma │ │ ├── test_astore_models.test_svm_regression.viya_202310.swat.lzma │ │ ├── test_astore_models.test_svm_regression.viya_35.swat.lzma │ │ ├── test_astore_models.test_svm_regression_with_nominals.viya_202310.swat.lzma │ │ ├── test_astore_models.test_svm_regression_with_nominals.viya_35.swat.lzma │ │ ├── test_cas_management.test_create_session.viya_35.lzma │ │ ├── test_cas_management.test_del_table.viya_202310.lzma │ │ ├── test_cas_management.test_del_table.viya_35.lzma │ │ ├── test_cas_management.test_delete_session.viya_202310.lzma │ │ ├── test_cas_management.test_delete_session.viya_35.lzma │ │ ├── test_cas_management.test_get_caslib.viya_202310.lzma │ │ ├── test_cas_management.test_get_caslib.viya_35.lzma │ │ ├── test_cas_management.test_get_server.viya_202310.lzma │ │ ├── test_cas_management.test_get_server.viya_35.lzma │ │ ├── test_cas_management.test_get_table.viya_202310.lzma │ │ ├── test_cas_management.test_get_table.viya_35.lzma │ │ ├── test_cas_management.test_info.viya_202310.lzma │ │ ├── test_cas_management.test_info.viya_35.lzma │ │ ├── test_cas_management.test_is_available.viya_202310.lzma │ │ ├── test_cas_management.test_is_available.viya_35.lzma │ │ ├── test_cas_management.test_list_caslibs.viya_202310.lzma │ │ ├── test_cas_management.test_list_caslibs.viya_35.lzma │ │ ├── test_cas_management.test_list_servers.viya_202310.lzma │ │ ├── test_cas_management.test_list_servers.viya_35.lzma │ │ ├── test_cas_management.test_list_sessions.viya_202310.lzma │ │ ├── test_cas_management.test_list_sessions.viya_35.lzma │ │ ├── test_cas_management.test_list_tables.viya_202310.lzma │ │ ├── test_cas_management.test_list_tables.viya_35.lzma │ │ ├── test_cas_management.test_promote_table.viya_202310.lzma │ │ ├── test_cas_management.test_promote_table.viya_35.lzma │ │ ├── test_cas_management.test_save_table.viya_202310.lzma │ │ ├── test_cas_management.test_save_table.viya_35.lzma │ │ ├── test_cas_management.test_upload_file.viya_202310.lzma │ │ ├── test_cas_management.test_upload_file.viya_35.lzma │ │ ├── test_command_line.test_command_with_args.viya_202310.lzma │ │ ├── test_command_line.test_command_with_args.viya_35.lzma │ │ ├── test_command_line.test_command_without_args.viya_202310.lzma │ │ ├── test_command_line.test_command_without_args.viya_35.lzma │ │ ├── test_command_line.test_insecure_connection.viya_202310.lzma │ │ ├── test_command_line.test_insecure_connection.viya_35.lzma │ │ ├── test_command_line.test_secure_connection.viya_202310.lzma │ │ ├── test_command_line.test_secure_connection.viya_35.lzma │ │ ├── test_concepts.test_from_inline_docs.viya_202310.lzma │ │ ├── test_concepts.test_from_inline_docs.viya_35.lzma │ │ ├── test_concepts.test_from_table.viya_202310.lzma │ │ ├── test_concepts.test_from_table.viya_202310.swat.lzma │ │ ├── test_concepts.test_from_table.viya_35.lzma │ │ ├── test_concepts.test_from_table.viya_35.swat.lzma │ │ ├── test_data_sources.test_get_cas_source.viya_202310.lzma │ │ ├── test_data_sources.test_get_cas_source.viya_35.lzma │ │ ├── test_data_sources.test_get_caslib.viya_202310.lzma │ │ ├── test_data_sources.test_get_caslib.viya_35.lzma │ │ ├── test_data_sources.test_get_table.viya_202310.lzma │ │ ├── test_data_sources.test_get_table.viya_35.lzma │ │ ├── test_data_sources.test_info.viya_202310.lzma │ │ ├── test_data_sources.test_info.viya_35.lzma │ │ ├── test_data_sources.test_is_available.viya_202310.lzma │ │ ├── test_data_sources.test_is_available.viya_35.lzma │ │ ├── test_data_sources.test_list_cas_sources.viya_202310.lzma │ │ ├── test_data_sources.test_list_cas_sources.viya_35.lzma │ │ ├── test_data_sources.test_list_caslibs.viya_202310.lzma │ │ ├── test_data_sources.test_list_caslibs.viya_35.lzma │ │ ├── test_data_sources.test_list_providers.viya_202310.lzma │ │ ├── test_data_sources.test_list_providers.viya_35.lzma │ │ ├── test_data_sources.test_list_tables.viya_202310.lzma │ │ ├── test_data_sources.test_list_tables.viya_35.lzma │ │ ├── test_examples.test_astore_model.viya_202310.lzma │ │ ├── test_examples.test_astore_model.viya_202310.swat.lzma │ │ ├── test_examples.test_astore_model.viya_35.lzma │ │ ├── test_examples.test_astore_model.viya_35.swat.lzma │ │ ├── test_examples.test_direct_rest_calls.viya_202310.lzma │ │ ├── test_examples.test_direct_rest_calls.viya_35.lzma │ │ ├── test_examples.test_full_lifecycle.viya_202310.lzma │ │ ├── test_examples.test_full_lifecycle.viya_35.lzma │ │ ├── test_examples.test_register_custom_model.viya_202310.lzma │ │ ├── test_examples.test_register_custom_model.viya_35.lzma │ │ ├── test_examples.test_register_sas_regression_model.viya_202310.lzma │ │ ├── test_examples.test_register_sas_regression_model.viya_202310.swat.lzma │ │ ├── test_examples.test_register_sas_regression_model.viya_35.lzma │ │ ├── test_examples.test_register_sas_regression_model.viya_35.swat.lzma │ │ ├── test_examples.test_scikit_regression_model.viya_202310.lzma │ │ ├── test_examples.test_scikit_regression_model.viya_35.lzma │ │ ├── test_examples.test_sklearn_model.viya_202310.lzma │ │ ├── test_examples.test_sklearn_model.viya_35.lzma │ │ ├── test_files.TestPickleFile.test_create_file_from_file_object.viya_35.lzma │ │ ├── test_files.TestPickleFile.test_create_file_with_name.viya_202310.lzma │ │ ├── test_files.TestPickleFile.test_create_file_with_name.viya_35.lzma │ │ ├── test_files.TestPickleFile.test_create_file_without_name.viya_202310.lzma │ │ ├── test_files.TestPickleFile.test_create_file_without_name.viya_35.lzma │ │ ├── test_files.TestPickleFile.test_delete_file_with_name.viya_202310.lzma │ │ ├── test_files.TestPickleFile.test_delete_file_with_name.viya_35.lzma │ │ ├── test_files.TestPickleFile.test_delete_file_without_name.viya_35.lzma │ │ ├── test_files.TestPickleFile.test_get_file_content.viya_202310.lzma │ │ ├── test_files.TestPickleFile.test_get_file_content.viya_35.lzma │ │ ├── test_files.TestPickleFile.test_get_file_from_file_object.viya_35.lzma │ │ ├── test_files.TestPickleFile.test_get_file_with_name.viya_202310.lzma │ │ ├── test_files.TestPickleFile.test_get_file_with_name.viya_35.lzma │ │ ├── test_files.TestPickleFile.test_get_file_without_name.viya_35.lzma │ │ ├── test_files.TestTextFile.test_create_file_from_file_object.viya_35.lzma │ │ ├── test_files.TestTextFile.test_create_file_with_name.viya_202310.lzma │ │ ├── test_files.TestTextFile.test_create_file_with_name.viya_35.lzma │ │ ├── test_files.TestTextFile.test_create_file_without_name.viya_202310.lzma │ │ ├── test_files.TestTextFile.test_create_file_without_name.viya_35.lzma │ │ ├── test_files.TestTextFile.test_delete_file_with_name.viya_202310.lzma │ │ ├── test_files.TestTextFile.test_delete_file_with_name.viya_35.lzma │ │ ├── test_files.TestTextFile.test_delete_file_without_name.viya_35.lzma │ │ ├── test_files.TestTextFile.test_get_file_content.viya_202310.lzma │ │ ├── test_files.TestTextFile.test_get_file_content.viya_35.lzma │ │ ├── test_files.TestTextFile.test_get_file_from_file_object.viya_35.lzma │ │ ├── test_files.TestTextFile.test_get_file_with_name.viya_202310.lzma │ │ ├── test_files.TestTextFile.test_get_file_with_name.viya_35.lzma │ │ ├── test_files.TestTextFile.test_get_file_without_name.viya_35.lzma │ │ ├── test_files.test_list_files.viya_202310.lzma │ │ ├── test_files.test_list_files.viya_35.lzma │ │ ├── test_folders.TestFolders.test_create_folder.viya_202310.lzma │ │ ├── test_folders.TestFolders.test_create_folder.viya_35.lzma │ │ ├── test_folders.TestFolders.test_delete_folder.viya_202310.lzma │ │ ├── test_folders.TestFolders.test_delete_folder.viya_35.lzma │ │ ├── test_folders.TestFolders.test_get_folder.viya_202310.lzma │ │ ├── test_folders.TestFolders.test_get_folder.viya_35.lzma │ │ ├── test_folders.TestFolders.test_list_folders.viya_202310.lzma │ │ ├── test_folders.TestFolders.test_list_folders.viya_35.lzma │ │ ├── test_folders.TestFolders.test_list_with_pagination.viya_202310.lzma │ │ ├── test_folders.TestFolders.test_list_with_pagination.viya_35.lzma │ │ ├── test_full_pipelines.TestAStoreRegressionModel.test_create_cas_destination.viya_202310.lzma │ │ ├── test_full_pipelines.TestAStoreRegressionModel.test_create_cas_destination.viya_35.lzma │ │ ├── test_full_pipelines.TestAStoreRegressionModel.test_create_mas_destination.viya_202310.lzma │ │ ├── test_full_pipelines.TestAStoreRegressionModel.test_create_mas_destination.viya_35.lzma │ │ ├── test_full_pipelines.TestAStoreRegressionModel.test_publish_cas.viya_202310.lzma │ │ ├── test_full_pipelines.TestAStoreRegressionModel.test_publish_cas.viya_35.lzma │ │ ├── test_full_pipelines.TestAStoreRegressionModel.test_publish_mas.viya_35.lzma │ │ ├── test_full_pipelines.TestAStoreRegressionModel.test_register_model.viya_202310.lzma │ │ ├── test_full_pipelines.TestAStoreRegressionModel.test_register_model.viya_202310.swat.lzma │ │ ├── test_full_pipelines.TestAStoreRegressionModel.test_register_model.viya_35.lzma │ │ ├── test_full_pipelines.TestAStoreRegressionModel.test_register_model.viya_35.swat.lzma │ │ ├── test_full_pipelines.TestAStoreRegressionModel.test_score_cas.viya_202310.lzma │ │ ├── test_full_pipelines.TestAStoreRegressionModel.test_score_cas.viya_202310.swat.lzma │ │ ├── test_full_pipelines.TestAStoreRegressionModel.test_score_cas.viya_35.lzma │ │ ├── test_full_pipelines.TestAStoreRegressionModel.test_score_cas.viya_35.swat.lzma │ │ ├── test_full_pipelines.TestAStoreRegressionModel.test_score_mas.viya_35.lzma │ │ ├── test_full_pipelines.TestSklearnClassificationModel.test_create_cas_destination.viya_202310.lzma │ │ ├── test_full_pipelines.TestSklearnClassificationModel.test_create_cas_destination.viya_35.lzma │ │ ├── test_full_pipelines.TestSklearnClassificationModel.test_publish_cas.viya_202310.lzma │ │ ├── test_full_pipelines.TestSklearnClassificationModel.test_publish_cas.viya_35.lzma │ │ ├── test_full_pipelines.TestSklearnClassificationModel.test_publish_mas.viya_202310.lzma │ │ ├── test_full_pipelines.TestSklearnClassificationModel.test_publish_mas.viya_35.lzma │ │ ├── test_full_pipelines.TestSklearnClassificationModel.test_register_model.viya_202310.lzma │ │ ├── test_full_pipelines.TestSklearnClassificationModel.test_register_model.viya_35.lzma │ │ ├── test_full_pipelines.TestSklearnClassificationModel.test_score_mas.viya_202310.lzma │ │ ├── test_full_pipelines.TestSklearnClassificationModel.test_score_mas.viya_35.lzma │ │ ├── test_full_pipelines.TestSklearnRegressionModel.test_create_cas_destination.viya_202310.lzma │ │ ├── test_full_pipelines.TestSklearnRegressionModel.test_create_cas_destination.viya_35.lzma │ │ ├── test_full_pipelines.TestSklearnRegressionModel.test_publish_cas.viya_202310.lzma │ │ ├── test_full_pipelines.TestSklearnRegressionModel.test_publish_cas.viya_35.lzma │ │ ├── test_full_pipelines.TestSklearnRegressionModel.test_publish_mas.viya_202310.lzma │ │ ├── test_full_pipelines.TestSklearnRegressionModel.test_publish_mas.viya_35.lzma │ │ ├── test_full_pipelines.TestSklearnRegressionModel.test_register_model.viya_202310.lzma │ │ ├── test_full_pipelines.TestSklearnRegressionModel.test_register_model.viya_35.lzma │ │ ├── test_full_pipelines.TestSklearnRegressionModel.test_score_cas.viya_202310.lzma │ │ ├── test_full_pipelines.TestSklearnRegressionModel.test_score_cas.viya_202310.swat.lzma │ │ ├── test_full_pipelines.TestSklearnRegressionModel.test_score_cas.viya_35.lzma │ │ ├── test_full_pipelines.TestSklearnRegressionModel.test_score_cas.viya_35.swat.lzma │ │ ├── test_full_pipelines.TestSklearnRegressionModel.test_score_mas.viya_202310.lzma │ │ ├── test_full_pipelines.TestSklearnRegressionModel.test_score_mas.viya_35.lzma │ │ ├── test_import_model.test_import_model.viya_202310.lzma │ │ ├── test_import_model.test_import_model.viya_35.lzma │ │ ├── test_import_model.test_model_exists.viya_202310.lzma │ │ ├── test_import_model.test_model_exists.viya_35.lzma │ │ ├── test_import_model.test_project_exists.viya_202310.lzma │ │ ├── test_import_model.test_project_exists.viya_35.lzma │ │ ├── test_microanalytic_score.TestMicroAnalyticScore.test_call_python_module_steps.viya_202310.lzma │ │ ├── test_microanalytic_score.TestMicroAnalyticScore.test_call_python_module_steps.viya_35.lzma │ │ ├── test_microanalytic_score.TestMicroAnalyticScore.test_call_python_module_steps_numpy.viya_202310.lzma │ │ ├── test_microanalytic_score.TestMicroAnalyticScore.test_call_python_module_steps_numpy.viya_35.lzma │ │ ├── test_microanalytic_score.TestMicroAnalyticScore.test_call_python_module_steps_pandas.viya_202310.lzma │ │ ├── test_microanalytic_score.TestMicroAnalyticScore.test_call_python_module_steps_pandas.viya_35.lzma │ │ ├── test_microanalytic_score.TestMicroAnalyticScore.test_create_python_module.viya_202310.lzma │ │ ├── test_microanalytic_score.TestMicroAnalyticScore.test_create_python_module.viya_35.lzma │ │ ├── test_microanalytic_score.TestMicroAnalyticScore.test_delete_module.viya_202310.lzma │ │ ├── test_microanalytic_score.TestMicroAnalyticScore.test_delete_module.viya_35.lzma │ │ ├── test_microanalytic_score.TestMicroAnalyticScore.test_get_module.viya_202310.lzma │ │ ├── test_microanalytic_score.TestMicroAnalyticScore.test_get_module.viya_35.lzma │ │ ├── test_microanalytic_score.TestMicroAnalyticScore.test_list_module_steps.viya_202310.lzma │ │ ├── test_microanalytic_score.TestMicroAnalyticScore.test_list_module_steps.viya_35.lzma │ │ ├── test_microanalytic_score.TestMicroAnalyticScore.test_list_modules.viya_202310.lzma │ │ ├── test_microanalytic_score.TestMicroAnalyticScore.test_list_modules.viya_35.lzma │ │ ├── test_model_publish.TestModelPublish.test_create_cas_destination.viya_202310.lzma │ │ ├── test_model_publish.TestModelPublish.test_create_cas_destination.viya_35.lzma │ │ ├── test_model_publish.TestModelPublish.test_create_mas_destination.viya_202310.lzma │ │ ├── test_model_publish.TestModelPublish.test_create_mas_destination.viya_35.lzma │ │ ├── test_model_publish.TestModelPublish.test_delete_cas_destination.viya_202310.lzma │ │ ├── test_model_publish.TestModelPublish.test_delete_cas_destination.viya_35.lzma │ │ ├── test_model_publish.TestModelPublish.test_get_publish_destination.viya_202310.lzma │ │ ├── test_model_publish.TestModelPublish.test_get_publish_destination.viya_35.lzma │ │ ├── test_model_publish.TestModelPublish.test_list_publish_destinations.viya_202310.lzma │ │ ├── test_model_publish.TestModelPublish.test_list_publish_destinations.viya_35.lzma │ │ ├── test_model_repository.TestAStoreModel.test_copy_astore.viya_202310.lzma │ │ ├── test_model_repository.TestAStoreModel.test_copy_astore.viya_35.lzma │ │ ├── test_model_repository.TestAStoreModel.test_create_model_version.viya_202310.lzma │ │ ├── test_model_repository.TestAStoreModel.test_create_model_version.viya_35.lzma │ │ ├── test_model_repository.TestAStoreModel.test_delete_model.viya_202310.lzma │ │ ├── test_model_repository.TestAStoreModel.test_delete_model.viya_35.lzma │ │ ├── test_model_repository.TestAStoreModel.test_delete_project.viya_202310.lzma │ │ ├── test_model_repository.TestAStoreModel.test_delete_project.viya_35.lzma │ │ ├── test_model_repository.TestAStoreModel.test_get_model_contents.viya_202310.lzma │ │ ├── test_model_repository.TestAStoreModel.test_get_model_contents.viya_35.lzma │ │ ├── test_model_repository.TestAStoreModel.test_get_model_versions.viya_202310.lzma │ │ ├── test_model_repository.TestAStoreModel.test_get_model_versions.viya_35.lzma │ │ ├── test_model_repository.TestAStoreModel.test_model_import.viya_202310.lzma │ │ ├── test_model_repository.TestAStoreModel.test_model_import.viya_202310.swat.lzma │ │ ├── test_model_repository.TestAStoreModel.test_model_import.viya_35.lzma │ │ ├── test_model_repository.TestAStoreModel.test_model_import.viya_35.swat.lzma │ │ ├── test_model_repository.TestAStoreModel.test_model_publish.viya_202310.lzma │ │ ├── test_model_repository.TestAStoreModel.test_model_publish.viya_35.lzma │ │ ├── test_model_repository.TestAStoreModel.test_module_execute.viya_202310.lzma │ │ ├── test_model_repository.TestAStoreModel.test_module_execute.viya_35.lzma │ │ ├── test_model_repository.TestBasicModel.test_create_model.viya_202310.lzma │ │ ├── test_model_repository.TestBasicModel.test_create_model.viya_35.lzma │ │ ├── test_model_repository.TestBasicModel.test_create_project.viya_202310.lzma │ │ ├── test_model_repository.TestBasicModel.test_create_project.viya_35.lzma │ │ ├── test_model_repository.TestBasicModel.test_delete_model.viya_202310.lzma │ │ ├── test_model_repository.TestBasicModel.test_delete_model.viya_35.lzma │ │ ├── test_model_repository.TestBasicModel.test_delete_project.viya_202310.lzma │ │ ├── test_model_repository.TestBasicModel.test_delete_project.viya_35.lzma │ │ ├── test_model_repository.TestBasicModel.test_project_missing.viya_202310.lzma │ │ ├── test_model_repository.TestBasicModel.test_project_missing.viya_35.lzma │ │ ├── test_projects.test_create_project.viya_202310.lzma │ │ ├── test_projects.test_create_project.viya_35.lzma │ │ ├── test_projects.test_delete_project.viya_202310.lzma │ │ ├── test_projects.test_delete_project.viya_35.lzma │ │ ├── test_projects.test_get_project.viya_202310.lzma │ │ ├── test_projects.test_get_project.viya_35.lzma │ │ ├── test_projects.test_list_projects.viya_202310.lzma │ │ ├── test_projects.test_list_projects.viya_35.lzma │ │ ├── test_relationships.test_list_relationships.viya_202310.lzma │ │ ├── test_relationships.test_list_relationships.viya_35.lzma │ │ ├── test_report_images.TestReportImages.test_get_elements.viya_202310.lzma │ │ ├── test_report_images.TestReportImages.test_get_elements.viya_35.lzma │ │ ├── test_report_images.TestReportImages.test_get_images.viya_202310.lzma │ │ ├── test_report_images.TestReportImages.test_get_images.viya_35.lzma │ │ ├── test_report_images.TestReportImages.test_get_report.viya_202310.lzma │ │ ├── test_report_images.TestReportImages.test_get_report.viya_35.lzma │ │ ├── test_report_images.TestReportImages.test_get_single_element.viya_202310.lzma │ │ ├── test_report_images.TestReportImages.test_get_single_element.viya_35.lzma │ │ ├── test_reports.test_get_report.viya_202310.lzma │ │ ├── test_reports.test_get_report.viya_35.lzma │ │ ├── test_reports.test_list_reports.viya_202310.lzma │ │ ├── test_reports.test_list_reports.viya_35.lzma │ │ ├── test_sentiment_analysis.test_from_inline_docs.viya_202310.lzma │ │ ├── test_sentiment_analysis.test_from_inline_docs.viya_35.lzma │ │ ├── test_sentiment_analysis.test_from_table.viya_202310.lzma │ │ ├── test_sentiment_analysis.test_from_table.viya_202310.swat.lzma │ │ ├── test_sentiment_analysis.test_from_table.viya_35.lzma │ │ ├── test_sentiment_analysis.test_from_table.viya_35.swat.lzma │ │ ├── test_sentiment_analysis.test_service_removed_error.viya_202310.lzma │ │ ├── test_sentiment_analysis.test_service_removed_error.viya_35.lzma │ │ ├── test_tasks.TestModels.test_created_project.viya_202310.lzma │ │ ├── test_tasks.TestModels.test_created_project.viya_35.lzma │ │ ├── test_tasks.TestModels.test_publish_sklearn.viya_35.lzma │ │ ├── test_tasks.TestModels.test_publish_sklearn_again.viya_35.lzma │ │ ├── test_tasks.TestModels.test_register_astore.viya_202310.lzma │ │ ├── test_tasks.TestModels.test_register_astore.viya_202310.swat.lzma │ │ ├── test_tasks.TestModels.test_register_astore.viya_35.lzma │ │ ├── test_tasks.TestModels.test_register_astore.viya_35.swat.lzma │ │ ├── test_tasks.TestModels.test_register_sklearn.viya_202310.lzma │ │ ├── test_tasks.TestModels.test_register_sklearn.viya_35.lzma │ │ ├── test_tasks.TestModels.test_score_sklearn.viya_35.lzma │ │ ├── test_tasks.TestSklearnLinearModel.test_create_performance_definition.viya_35.lzma │ │ ├── test_tasks.TestSklearnLinearModel.test_register_model.viya_202310.lzma │ │ ├── test_tasks.TestSklearnLinearModel.test_register_model.viya_35.lzma │ │ ├── test_tasks.TestSklearnLinearModel.test_update_model_performance.viya_35.lzma │ │ ├── test_tasks.TestSklearnLinearModel.test_update_model_performance.viya_35.swat.lzma │ │ ├── test_text_categorization.test_from_table.viya_202310.lzma │ │ ├── test_text_categorization.test_from_table.viya_35.lzma │ │ ├── test_text_categorization.test_service_removed_error.viya_202310.lzma │ │ ├── test_text_categorization.test_service_removed_error.viya_35.lzma │ │ ├── test_text_parsing.test_from_table.viya_202310.lzma │ │ ├── test_text_parsing.test_from_table.viya_202310.swat.lzma │ │ ├── test_text_parsing.test_from_table.viya_35.lzma │ │ ├── test_text_parsing.test_from_table.viya_35.swat.lzma │ │ ├── test_text_parsing.test_parsing_inline_docs.viya_202310.lzma │ │ ├── test_text_parsing.test_parsing_inline_docs.viya_35.lzma │ │ ├── test_text_parsing.test_service_removed_error.viya_202310.lzma │ │ ├── test_text_parsing.test_service_removed_error.viya_35.lzma │ │ ├── test_write_json_files.test_calculate_model_statistics.viya_202310.lzma │ │ ├── test_write_json_files.test_calculate_model_statistics.viya_202310.swat.lzma │ │ ├── test_write_json_files.test_calculate_model_statistics.viya_35.lzma │ │ ├── test_write_json_files.test_calculate_model_statistics.viya_35.swat.lzma │ │ ├── test_write_score_code.test_write_score_code.viya_202310.lzma │ │ └── test_write_score_code.test_write_score_code.viya_35.lzma │ ├── test_astore_models.py │ ├── test_cas_management.py │ ├── test_command_line.py │ ├── test_concepts.py │ ├── test_data_sources.py │ ├── test_examples.py │ ├── test_files.py │ ├── test_folders.py │ ├── test_full_pipelines.py │ ├── test_import_model.py │ ├── test_microanalytic_score.py │ ├── test_model_parameters.py │ ├── test_model_publish.py │ ├── test_model_repository.py │ ├── test_projects.py │ ├── test_pymas.py │ ├── test_relationships.py │ ├── test_report_images.py │ ├── test_reports.py │ ├── test_sentiment_analysis.py │ ├── test_tasks.py │ ├── test_text_categorization.py │ ├── test_text_parsing.py │ ├── test_write_json_files.py │ └── test_write_score_code.py ├── pyml2ds_data │ ├── gbm.pmml │ ├── gbm_datastep │ ├── lgb.pkl │ ├── lgb_datastep │ ├── xgb.pkl │ └── xgb_datastep ├── scenarios │ ├── cassettes │ │ ├── test_project_with_sas_and_sklearn_classification_models.test.viya_202209.lzma │ │ ├── test_project_with_sas_and_sklearn_classification_models.test.viya_202209.swat.lzma │ │ ├── test_project_with_sas_and_sklearn_classification_models.test.viya_35.lzma │ │ ├── test_project_with_sas_and_sklearn_classification_models.test.viya_35.swat.lzma │ │ ├── test_project_with_sas_and_sklearn_regression_models.test.viya_202209.lzma │ │ ├── test_project_with_sas_and_sklearn_regression_models.test.viya_202209.swat.lzma │ │ ├── test_project_with_sas_and_sklearn_regression_models.test.viya_35.lzma │ │ └── test_project_with_sas_and_sklearn_regression_models.test.viya_35.swat.lzma │ ├── test_project_with_sas_and_sklearn_classification_models.py │ └── test_project_with_sas_and_sklearn_regression_models.py └── unit │ ├── __init__.py │ ├── test_auth.py │ ├── test_command_line.py │ ├── test_core.py │ ├── test_decorators.py │ ├── test_folders.py │ ├── test_git_integration.py │ ├── test_import_model.py │ ├── test_microanalytic_score.py │ ├── test_misc_utils.py │ ├── test_mlflow_model.py │ ├── test_model_info_onnx.py │ ├── test_model_info_sklearn.py │ ├── test_model_info_torch.py │ ├── test_model_management.py │ ├── test_model_parameters.py │ ├── test_model_repository.py │ ├── test_module_publish.py │ ├── test_pageditemiterator.py │ ├── test_pagedlist.py │ ├── test_pageiterator.py │ ├── test_pickle_model.py │ ├── test_pymas.py │ ├── test_pyml2ds.py │ ├── test_restobj.py │ ├── test_saslogon.py │ ├── test_score_definitions.py │ ├── test_score_execution.py │ ├── test_session.py │ ├── test_tasks.py │ ├── test_workflow.py │ ├── test_write_json_files.py │ ├── test_write_score_code.py │ └── test_zip_model.py └── tox.ini /.deepsource.toml: -------------------------------------------------------------------------------- 1 | version = 1 2 | 3 | test_patterns = [ 4 | "tests/**" 5 | ] 6 | 7 | exclude_patterns = [ 8 | "tests/**" 9 | ] 10 | 11 | 12 | [[analyzers]] 13 | name = "python" 14 | enabled = true 15 | 16 | [analyzers.meta] 17 | runtime_version = "3.x.x" 18 | max_line_length = 90 19 | skip_doc_coverage = ["init", "magic", "nonpublic"] -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Report an issue 3 | about: Report a problem you're having or a bug you found 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the issue** 11 | A clear and concise description of the issue you're experiencing. 12 | 13 | **To Reproduce** 14 | Steps or example code to reproduce the issue. 15 | 16 | **Expected behavior** 17 | A clear and concise description of what you expected to happen. 18 | 19 | **Stack Trace** 20 | If you're experiencing an exception, include the full stack trace and error message. 21 | 22 | **Version** 23 | What version of `sasctl` are you using? 24 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Request a feature 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/question.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Ask a question 3 | about: Ask for help if you need information or are experiencing a problem. 4 | title: '' 5 | labels: question 6 | assignees: '' 7 | 8 | --- 9 | 10 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | name: "Security Scan" 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | # The branches below must be a subset of the branches above 8 | branches: [ master ] 9 | schedule: 10 | - cron: '43 20 * * 4' 11 | 12 | jobs: 13 | analyze: 14 | name: Analyze 15 | runs-on: ubuntu-latest 16 | 17 | permissions: 18 | # Require to update security info on GH repo 19 | security-events: write 20 | 21 | strategy: 22 | fail-fast: false 23 | matrix: 24 | language: [ 'python' ] 25 | 26 | steps: 27 | - name: Checkout repository 28 | uses: actions/checkout@v3 29 | 30 | # Initializes the CodeQL tools for scanning. 31 | - name: Initialize CodeQL 32 | uses: github/codeql-action/init@v2 33 | with: 34 | languages: python 35 | 36 | - name: Perform CodeQL Analysis 37 | uses: github/codeql-action/analyze@v2 38 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ## Assorted files that shouldn't be uploaded 2 | .private/* 3 | _private/* 4 | 5 | ## Editor temporary/working/backup files 6 | .#* 7 | *\#*\# 8 | [#]*# 9 | *~ 10 | *$ 11 | *.bak 12 | *.old 13 | *.log 14 | *.nfs* 15 | *.swo 16 | *.swp 17 | *.pdb 18 | .project 19 | .pydevproject 20 | .settings 21 | .idea 22 | .pytest_cache 23 | .vagrant 24 | .noseids 25 | .ipynb_checkpoints 26 | .tags 27 | *.iml 28 | _scratch 29 | 30 | ## Python files 31 | # setup.py working directory 32 | build 33 | # setup.py dist directory 34 | dist 35 | # Egg metadata 36 | *.egg-info 37 | .eggs 38 | .pypirc 39 | *.pyc 40 | 41 | ## tox testing tool 42 | .tox 43 | # rope 44 | .ropeproject 45 | # wheel files 46 | *.whl 47 | **/wheelhouse/* 48 | # coverage 49 | .coverage 50 | swat.egg-info/ 51 | __pycache__/ 52 | _stats.txt 53 | cover/ 54 | test-reports/ 55 | .pytest_cache 56 | 57 | ## OS generated files 58 | .directory 59 | .gdb_history 60 | .DS_Store 61 | ehthumbs.db 62 | Icon? 63 | Thumbs.db 64 | 65 | ## Documentation generated files 66 | doc/build 67 | doc/source/generated 68 | doc/_build 69 | doc/.doctrees 70 | 71 | ## VS CODE settings 72 | .vscode/settings.json 73 | -------------------------------------------------------------------------------- /ContributorAgreement.txt: -------------------------------------------------------------------------------- 1 | Contributor Agreement 2 | 3 | Version 1.1 4 | 5 | Contributions to this software are accepted only when they are 6 | properly accompanied by a Contributor Agreement. The Contributor 7 | Agreement for this software is the Developer's Certificate of Origin 8 | 1.1 (DCO) as provided with and required for accepting contributions 9 | to the Linux kernel. 10 | 11 | In each contribution proposed to be included in this software, the 12 | developer must include a "sign-off" that denotes consent to the 13 | terms of the Developer's Certificate of Origin. The sign-off is 14 | a line of text in the description that accompanies the change, 15 | certifying that you have the right to provide the contribution 16 | to be included. For changes provided in source code control (for 17 | example, via a Git pull request) the sign-off must be included in 18 | the commit message in source code control. For changes provided 19 | in email or issue tracking, the sign-off must be included in the 20 | email or the issue, and the sign-off will be incorporated into the 21 | permanent commit message if the contribution is accepted into the 22 | official source code. 23 | 24 | If you can certify the below: 25 | 26 | Developer's Certificate of Origin 1.1 27 | 28 | By making a contribution to this project, I certify that: 29 | 30 | (a) The contribution was created in whole or in part by me and I 31 | have the right to submit it under the open source license 32 | indicated in the file; or 33 | 34 | (b) The contribution is based upon previous work that, to the best 35 | of my knowledge, is covered under an appropriate open source 36 | license and I have the right under that license to submit that 37 | work with modifications, whether created in whole or in part 38 | by me, under the same open source license (unless I am 39 | permitted to submit under a different license), as indicated 40 | in the file; or 41 | 42 | (c) The contribution was provided directly to me by some other 43 | person who certified (a), (b) or (c) and I have not modified 44 | it. 45 | 46 | (d) I understand and agree that this project and the contribution 47 | are public and that a record of the contribution (including all 48 | personal information I submit with it, including my sign-off) is 49 | maintained indefinitely and may be redistributed consistent with 50 | this project or the open source license(s) involved. 51 | 52 | then you just add a line saying 53 | 54 | Signed-off-by: Random J Developer 55 | 56 | using your real name (sorry, no pseudonyms or anonymous contributions.) -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.md 2 | include LICENSE 3 | include CHANGELOG.md 4 | include CONTRIBUTING.md 5 | include ContributorAgreement.txt 6 | include SUPPORT.md 7 | include MANIFEST.in 8 | 9 | include src/sasctl/pzmm/template_files/*.json 10 | include src/sasctl/pzmm/template_files/clf_jsons/*.json 11 | include src/sasctl/pzmm/template_files/reg_jsons/*.json -------------------------------------------------------------------------------- /SUPPORT.md: -------------------------------------------------------------------------------- 1 | ## Support 2 | 3 | We use GitHub for tracking bugs and feature requests. 4 | Please submit a GitHub issue for support. Consult 5 | [GitHub Help](https://help.github.com/en/articles/about-issues) for more 6 | information on submitting issues. 7 | 8 | If you encounter an issue and fix it yourself, or implement a new 9 | feature that you'd like to see included in the project, please 10 | see [CONTRIBUTING.md](CONTRIBUTING.md) for information on how to 11 | contribute. -------------------------------------------------------------------------------- /doc/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SPHINXPROJ = sasctl 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -------------------------------------------------------------------------------- /doc/_templates/autosummary/attribute.rst: -------------------------------------------------------------------------------- 1 | :orphan: 2 | 3 | {{ fullname | escape | underline}} 4 | 5 | .. currentmodule:: {{ module }} 6 | 7 | attribute 8 | 9 | .. auto{{ objtype }}:: {{ fullname | replace("numpy.", "numpy::") }} 10 | 11 | {# In the fullname (e.g. `numpy.ma.MaskedArray.methodname`), the module name 12 | is ambiguous. Using a `::` separator (e.g. `numpy::ma.MaskedArray.methodname`) 13 | specifies `numpy` as the module name. #} 14 | -------------------------------------------------------------------------------- /doc/_templates/autosummary/base.rst: -------------------------------------------------------------------------------- 1 | {% if objtype == 'property' %} 2 | :orphan: 3 | {% endif %} 4 | 5 | {{ fullname | escape | underline}} 6 | 7 | .. currentmodule:: {{ module }} 8 | 9 | {% if objtype == 'property' %} 10 | property 11 | {% endif %} 12 | 13 | .. auto{{ objtype }}:: {{ fullname | replace("numpy.", "numpy::") }} 14 | 15 | {# In the fullname (e.g. `numpy.ma.MaskedArray.methodname`), the module name 16 | is ambiguous. Using a `::` separator (e.g. `numpy::ma.MaskedArray.methodname`) 17 | specifies `numpy` as the module name. #} 18 | -------------------------------------------------------------------------------- /doc/_templates/autosummary/class.rst: -------------------------------------------------------------------------------- 1 | {% extends "!autosummary/class.rst" %} 2 | 3 | {% block methods %} 4 | {% if methods %} 5 | .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. 6 | .. autosummary:: 7 | :toctree: 8 | {% for item in all_methods %} 9 | {%- if not item.startswith('_') or item in ['__call__'] %} 10 | {{ name }}.{{ item }} 11 | {%- endif -%} 12 | {%- endfor %} 13 | {% endif %} 14 | {% endblock %} 15 | 16 | {% block attributes %} 17 | {% if attributes %} 18 | .. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. 19 | .. autosummary:: 20 | :toctree: 21 | {% for item in all_attributes %} 22 | {%- if not item.startswith('_') %} 23 | {{ name }}.{{ item }} 24 | {%- endif -%} 25 | {%- endfor %} 26 | {% endif %} 27 | {% endblock %} 28 | -------------------------------------------------------------------------------- /doc/_templates/autosummary/member.rst: -------------------------------------------------------------------------------- 1 | :orphan: 2 | 3 | {{ fullname | escape | underline}} 4 | 5 | .. currentmodule:: {{ module }} 6 | 7 | member 8 | 9 | .. auto{{ objtype }}:: {{ fullname | replace("numpy.", "numpy::") }} 10 | 11 | {# In the fullname (e.g. `numpy.ma.MaskedArray.methodname`), the module name 12 | is ambiguous. Using a `::` separator (e.g. `numpy::ma.MaskedArray.methodname`) 13 | specifies `numpy` as the module name. #} 14 | -------------------------------------------------------------------------------- /doc/_templates/autosummary/method.rst: -------------------------------------------------------------------------------- 1 | :orphan: 2 | 3 | {{ fullname | escape | underline}} 4 | 5 | .. currentmodule:: {{ module }} 6 | 7 | method 8 | 9 | .. auto{{ objtype }}:: {{ fullname | replace("numpy.", "numpy::") }} 10 | 11 | {# In the fullname (e.g. `numpy.ma.MaskedArray.methodname`), the module name 12 | is ambiguous. Using a `::` separator (e.g. `numpy::ma.MaskedArray.methodname`) 13 | specifies `numpy` as the module name. #} 14 | -------------------------------------------------------------------------------- /doc/_templates/autosummary/minimal_module.rst: -------------------------------------------------------------------------------- 1 | {{ fullname | escape | underline}} 2 | 3 | .. automodule:: {{ fullname }} 4 | 5 | {% block docstring %} 6 | {% endblock %} 7 | 8 | 9 | -------------------------------------------------------------------------------- /doc/_templates/autosummary/module.rst: -------------------------------------------------------------------------------- 1 | {% extends "!autosummary/module.rst" %} 2 | 3 | {# This file is almost the same as the default, but adds :toctree: to the autosummary directives. 4 | The original can be found at `sphinx/ext/autosummary/templates/autosummary/module.rst`. #} 5 | 6 | {% block attributes %} 7 | {% if attributes %} 8 | .. rubric:: Module Attributes 9 | 10 | .. autosummary:: 11 | :toctree: 12 | {% for item in attributes %} 13 | {{ item }} 14 | {%- endfor %} 15 | {% endif %} 16 | {% endblock %} 17 | 18 | {% block functions %} 19 | {% if functions %} 20 | .. rubric:: Functions 21 | 22 | .. autosummary:: 23 | :toctree: 24 | {% for item in functions %} 25 | {{ item }} 26 | {%- endfor %} 27 | {% endif %} 28 | {% endblock %} 29 | 30 | {% block classes %} 31 | {% if classes %} 32 | .. rubric:: Classes 33 | 34 | .. autosummary:: 35 | :toctree: 36 | {% for item in classes %} 37 | {{ item }} 38 | {%- endfor %} 39 | {% endif %} 40 | {% endblock %} 41 | -------------------------------------------------------------------------------- /doc/api/pzmm/import_model.rst: -------------------------------------------------------------------------------- 1 | sasctl.pzmm.import\_model 2 | =============================== 3 | 4 | 5 | .. automodule:: sasctl.pzmm.import_model 6 | :members: 7 | :undoc-members: 8 | :show-inheritance: 9 | -------------------------------------------------------------------------------- /doc/api/pzmm/mlflow_model.rst: -------------------------------------------------------------------------------- 1 | sasctl.pzmm.mlflow\_model 2 | =============================== 3 | 4 | 5 | .. automodule:: sasctl.pzmm.mlflow_model 6 | :members: 7 | :undoc-members: 8 | :show-inheritance: 9 | -------------------------------------------------------------------------------- /doc/api/pzmm/model_parameters.rst: -------------------------------------------------------------------------------- 1 | sasctl.pzmm.model\_parameters 2 | =============================== 3 | 4 | 5 | .. automodule:: sasctl.pzmm.model_parameters 6 | :members: 7 | :undoc-members: 8 | :show-inheritance: 9 | -------------------------------------------------------------------------------- /doc/api/pzmm/pickle_model.rst: -------------------------------------------------------------------------------- 1 | sasctl.pzmm.pickle\_model 2 | =============================== 3 | 4 | 5 | .. automodule:: sasctl.pzmm.pickle_model 6 | :members: 7 | :undoc-members: 8 | :show-inheritance: 9 | -------------------------------------------------------------------------------- /doc/api/pzmm/write_json_files.rst: -------------------------------------------------------------------------------- 1 | sasctl.pzmm.write\_json\_files 2 | =============================== 3 | 4 | 5 | .. automodule:: sasctl.pzmm.write_json_files 6 | :members: 7 | :undoc-members: 8 | :show-inheritance: 9 | -------------------------------------------------------------------------------- /doc/api/pzmm/write_score_code.rst: -------------------------------------------------------------------------------- 1 | sasctl.pzmm.write\_score\_code 2 | =============================== 3 | 4 | 5 | .. automodule:: sasctl.pzmm.write_score_code 6 | :members: 7 | :undoc-members: 8 | :show-inheritance: 9 | -------------------------------------------------------------------------------- /doc/api/pzmm/zip_model.rst: -------------------------------------------------------------------------------- 1 | sasctl.pzmm.zip\_model 2 | =============================== 3 | 4 | 5 | .. automodule:: sasctl.pzmm.zip_model 6 | :members: 7 | :undoc-members: 8 | :show-inheritance: 9 | -------------------------------------------------------------------------------- /doc/api/sasctl.core.rst: -------------------------------------------------------------------------------- 1 | sasctl.core 2 | ======================= 3 | 4 | .. automodule:: sasctl.core 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | -------------------------------------------------------------------------------- /doc/api/sasctl.current_session.rst: -------------------------------------------------------------------------------- 1 | current_session 2 | =============== 3 | 4 | -------------------------------------------------------------------------------- /doc/api/sasctl.pzmm.rst: -------------------------------------------------------------------------------- 1 | sasctl.pzmm package 2 | ==================== 3 | 4 | .. toctree:: 5 | :glob: 6 | 7 | pzmm/* 8 | -------------------------------------------------------------------------------- /doc/api/sasctl.rst: -------------------------------------------------------------------------------- 1 | sasctl package 2 | ============== 3 | 4 | Module contents 5 | --------------- 6 | 7 | .. toctree:: 8 | 9 | sasctl.session 10 | sasctl.current_session 11 | 12 | Submodules 13 | ---------- 14 | 15 | .. toctree:: 16 | 17 | sasctl.core 18 | sasctl.tasks 19 | 20 | Subpackages 21 | ----------- 22 | 23 | .. toctree:: 24 | 25 | sasctl.pzmm 26 | sasctl.services 27 | sasctl.utils 28 | -------------------------------------------------------------------------------- /doc/api/sasctl.services.rst: -------------------------------------------------------------------------------- 1 | sasctl.services package 2 | ======================= 3 | 4 | .. toctree:: 5 | :glob: 6 | 7 | services/* -------------------------------------------------------------------------------- /doc/api/sasctl.session.rst: -------------------------------------------------------------------------------- 1 | Session 2 | ======= 3 | 4 | .. autoclass:: sasctl.core.Session 5 | :members: 6 | -------------------------------------------------------------------------------- /doc/api/sasctl.tasks.rst: -------------------------------------------------------------------------------- 1 | sasctl.tasks 2 | ======================= 3 | 4 | .. automodule:: sasctl.tasks 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /doc/api/sasctl.utils.pymas.rst: -------------------------------------------------------------------------------- 1 | sasctl.utils.pymas package 2 | ========================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | sasctl.utils.pymas.core module 8 | ------------------------------ 9 | 10 | .. automodule:: sasctl.utils.pymas.core 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | sasctl.utils.pymas.ds2 module 16 | ----------------------------- 17 | 18 | .. automodule:: sasctl.utils.pymas.ds2 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | sasctl.utils.pymas.python module 24 | -------------------------------- 25 | 26 | .. automodule:: sasctl.utils.pymas.python 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | 32 | Module contents 33 | --------------- 34 | 35 | .. automodule:: sasctl.utils.pymas 36 | :members: 37 | :undoc-members: 38 | :show-inheritance: 39 | -------------------------------------------------------------------------------- /doc/api/sasctl.utils.pyml2ds.rst: -------------------------------------------------------------------------------- 1 | sasctl.utils.pyml2ds package 2 | ============================ 3 | 4 | Submodules 5 | ---------- 6 | 7 | sasctl.utils.pyml2ds.pyml2ds module 8 | ----------------------------------- 9 | 10 | .. automodule:: sasctl.utils.pyml2ds.pyml2ds 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | -------------------------------------------------------------------------------- /doc/api/sasctl.utils.rst: -------------------------------------------------------------------------------- 1 | sasctl.utils package 2 | ==================== 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | 9 | sasctl.utils.pymas 10 | sasctl.utils.pyml2ds 11 | 12 | Submodules 13 | ---------- 14 | 15 | sasctl.utils.astore module 16 | -------------------------- 17 | 18 | .. automodule:: sasctl.utils.astore 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | 24 | Module contents 25 | --------------- 26 | 27 | .. automodule:: sasctl.utils 28 | :members: 29 | :undoc-members: 30 | :show-inheritance: 31 | -------------------------------------------------------------------------------- /doc/api/services/cas_management.rst: -------------------------------------------------------------------------------- 1 | sasctl.services.cas\_management 2 | =============================== 3 | 4 | 5 | .. automodule:: sasctl._services.cas_management 6 | :members: 7 | :undoc-members: 8 | :show-inheritance: 9 | 10 | -------------------------------------------------------------------------------- /doc/api/services/concepts.rst: -------------------------------------------------------------------------------- 1 | sasctl.services.concepts 2 | ======================== 3 | 4 | .. automodule:: sasctl._services.concepts 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /doc/api/services/data_sources.rst: -------------------------------------------------------------------------------- 1 | sasctl.services.data\_sources 2 | ============================= 3 | 4 | .. automodule:: sasctl._services.data_sources 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /doc/api/services/files.rst: -------------------------------------------------------------------------------- 1 | sasctl.services.files 2 | ===================== 3 | 4 | .. automodule:: sasctl._services.files 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /doc/api/services/folders.rst: -------------------------------------------------------------------------------- 1 | sasctl.services.folders 2 | ======================= 3 | 4 | .. automodule:: sasctl._services.folders 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /doc/api/services/microanalytic_score.rst: -------------------------------------------------------------------------------- 1 | sasctl.services.microanalytic\_score 2 | ==================================== 3 | 4 | .. automodule:: sasctl._services.microanalytic_score 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /doc/api/services/model_management.rst: -------------------------------------------------------------------------------- 1 | sasctl.services.model\_management 2 | ================================= 3 | 4 | .. automodule:: sasctl._services.model_management 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /doc/api/services/model_publish.rst: -------------------------------------------------------------------------------- 1 | sasctl.services.model\_publish 2 | ============================== 3 | 4 | .. automodule:: sasctl._services.model_publish 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /doc/api/services/model_repository.rst: -------------------------------------------------------------------------------- 1 | sasctl.services.model\_repository 2 | ================================= 3 | 4 | .. automodule:: sasctl._services.model_repository 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /doc/api/services/projects.rst: -------------------------------------------------------------------------------- 1 | sasctl.services.projects 2 | ======================== 3 | 4 | .. automodule:: sasctl._services.projects 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /doc/api/services/relationships.rst: -------------------------------------------------------------------------------- 1 | sasctl.services.relationships 2 | ============================= 3 | 4 | .. automodule:: sasctl._services.relationships 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /doc/api/services/report_images.rst: -------------------------------------------------------------------------------- 1 | sasctl.services.report_images 2 | ============================= 3 | 4 | .. automodule:: sasctl._services.report_images 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /doc/api/services/reports.rst: -------------------------------------------------------------------------------- 1 | sasctl.services.reports 2 | ======================= 3 | 4 | .. automodule:: sasctl._services.reports 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /doc/api/services/saslogon.rst: -------------------------------------------------------------------------------- 1 | sasctl.services.saslogon 2 | ======================== 3 | 4 | .. automodule:: sasctl._services.saslogon 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /doc/api/services/sentiment_analysis.rst: -------------------------------------------------------------------------------- 1 | sasctl.services.sentiment\_analysis 2 | =================================== 3 | 4 | .. automodule:: sasctl._services.sentiment_analysis 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /doc/api/services/text_categorization.rst: -------------------------------------------------------------------------------- 1 | sasctl.services.text\_categorization 2 | ==================================== 3 | 4 | .. automodule:: sasctl._services.text_categorization 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /doc/api/services/text_parsing.rst: -------------------------------------------------------------------------------- 1 | sasctl.services.text\_parsing 2 | ============================= 3 | 4 | .. automodule:: sasctl._services.text_parsing 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /doc/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | set SPHINXPROJ=sasctl 13 | 14 | if "%1" == "" goto help 15 | 16 | %SPHINXBUILD% >NUL 2>NUL 17 | if errorlevel 9009 ( 18 | echo. 19 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 20 | echo.installed, then set the SPHINXBUILD environment variable to point 21 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 22 | echo.may add the Sphinx directory to PATH. 23 | echo. 24 | echo.If you don't have Sphinx installed, grab it from 25 | echo.http://sphinx-doc.org/ 26 | exit /b 1 27 | ) 28 | 29 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 30 | goto end 31 | 32 | :help 33 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 34 | 35 | :end 36 | popd 37 | -------------------------------------------------------------------------------- /examples/data/BiasMetrics/examModels/GradientBoost/GradientBoost.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sassoftware/python-sasctl/e0934185b9a91e3f77260c64910fa30f1fced7d8/examples/data/BiasMetrics/examModels/GradientBoost/GradientBoost.pickle -------------------------------------------------------------------------------- /examples/data/BiasMetrics/examModels/GradientBoost/GradientBoost.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sassoftware/python-sasctl/e0934185b9a91e3f77260c64910fa30f1fced7d8/examples/data/BiasMetrics/examModels/GradientBoost/GradientBoost.zip -------------------------------------------------------------------------------- /examples/data/BiasMetrics/examModels/GradientBoost/ModelProperties.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "GradientBoost", 3 | "description": "Description for the GradientBoost model.", 4 | "scoreCodeType": "python", 5 | "trainTable": "", 6 | "trainCodeType": "Python", 7 | "algorithm": "", 8 | "function": "Prediction", 9 | "targetVariable": "composite_score", 10 | "targetEvent": "", 11 | "targetLevel": "INTERVAL", 12 | "eventProbVar": "", 13 | "modeler": "sasdemo", 14 | "tool": "Python 3", 15 | "toolVersion": "3.11.3", 16 | "properties": [] 17 | } -------------------------------------------------------------------------------- /examples/data/BiasMetrics/examModels/GradientBoost/fileMetadata.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "role": "inputVariables", 4 | "name": "inputVar.json" 5 | }, 6 | { 7 | "role": "outputVariables", 8 | "name": "outputVar.json" 9 | }, 10 | { 11 | "role": "score", 12 | "name": "score_GradientBoost.py" 13 | }, 14 | { 15 | "role": "scoreResource", 16 | "name": "GradientBoost.pickle" 17 | } 18 | ] -------------------------------------------------------------------------------- /examples/data/BiasMetrics/examModels/GradientBoost/inputVar.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "gender_male", 4 | "level": "interval", 5 | "type": "decimal", 6 | "length": 8 7 | }, 8 | { 9 | "name": "raceethnicity_group_A", 10 | "level": "interval", 11 | "type": "decimal", 12 | "length": 8 13 | }, 14 | { 15 | "name": "raceethnicity_group_B", 16 | "level": "interval", 17 | "type": "decimal", 18 | "length": 8 19 | }, 20 | { 21 | "name": "raceethnicity_group_C", 22 | "level": "interval", 23 | "type": "decimal", 24 | "length": 8 25 | }, 26 | { 27 | "name": "raceethnicity_group_D", 28 | "level": "interval", 29 | "type": "decimal", 30 | "length": 8 31 | }, 32 | { 33 | "name": "parental_level_of_education_associates_degree", 34 | "level": "interval", 35 | "type": "decimal", 36 | "length": 8 37 | }, 38 | { 39 | "name": "parental_level_of_education_bachelors_degree", 40 | "level": "interval", 41 | "type": "decimal", 42 | "length": 8 43 | }, 44 | { 45 | "name": "parental_level_of_education_high_school", 46 | "level": "interval", 47 | "type": "decimal", 48 | "length": 8 49 | }, 50 | { 51 | "name": "parental_level_of_education_some_college", 52 | "level": "interval", 53 | "type": "decimal", 54 | "length": 8 55 | }, 56 | { 57 | "name": "parental_level_of_education_some_high_school", 58 | "level": "interval", 59 | "type": "decimal", 60 | "length": 8 61 | }, 62 | { 63 | "name": "lunch_freereduced", 64 | "level": "interval", 65 | "type": "decimal", 66 | "length": 8 67 | }, 68 | { 69 | "name": "test_preparation_course_none", 70 | "level": "interval", 71 | "type": "decimal", 72 | "length": 8 73 | } 74 | ] -------------------------------------------------------------------------------- /examples/data/BiasMetrics/examModels/GradientBoost/outputVar.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "EM_PREDICTION", 4 | "level": "interval", 5 | "type": "decimal", 6 | "length": 8 7 | } 8 | ] -------------------------------------------------------------------------------- /examples/data/BiasMetrics/examModels/LinearRegression/LinearRegression.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sassoftware/python-sasctl/e0934185b9a91e3f77260c64910fa30f1fced7d8/examples/data/BiasMetrics/examModels/LinearRegression/LinearRegression.pickle -------------------------------------------------------------------------------- /examples/data/BiasMetrics/examModels/LinearRegression/LinearRegression.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sassoftware/python-sasctl/e0934185b9a91e3f77260c64910fa30f1fced7d8/examples/data/BiasMetrics/examModels/LinearRegression/LinearRegression.zip -------------------------------------------------------------------------------- /examples/data/BiasMetrics/examModels/LinearRegression/ModelProperties.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "LinearRegression", 3 | "description": "Description for the LinearRegression model.", 4 | "scoreCodeType": "python", 5 | "trainTable": "", 6 | "trainCodeType": "Python", 7 | "algorithm": "", 8 | "function": "Prediction", 9 | "targetVariable": "composite_score", 10 | "targetEvent": "", 11 | "targetLevel": "INTERVAL", 12 | "eventProbVar": "", 13 | "modeler": "sasdemo", 14 | "tool": "Python 3", 15 | "toolVersion": "3.11.3", 16 | "properties": [] 17 | } -------------------------------------------------------------------------------- /examples/data/BiasMetrics/examModels/LinearRegression/fileMetadata.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "role": "inputVariables", 4 | "name": "inputVar.json" 5 | }, 6 | { 7 | "role": "outputVariables", 8 | "name": "outputVar.json" 9 | }, 10 | { 11 | "role": "score", 12 | "name": "score_LinearRegression.py" 13 | }, 14 | { 15 | "role": "scoreResource", 16 | "name": "LinearRegression.pickle" 17 | } 18 | ] -------------------------------------------------------------------------------- /examples/data/BiasMetrics/examModels/LinearRegression/inputVar.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "gender_male", 4 | "level": "interval", 5 | "type": "decimal", 6 | "length": 8 7 | }, 8 | { 9 | "name": "raceethnicity_group_A", 10 | "level": "interval", 11 | "type": "decimal", 12 | "length": 8 13 | }, 14 | { 15 | "name": "raceethnicity_group_B", 16 | "level": "interval", 17 | "type": "decimal", 18 | "length": 8 19 | }, 20 | { 21 | "name": "raceethnicity_group_C", 22 | "level": "interval", 23 | "type": "decimal", 24 | "length": 8 25 | }, 26 | { 27 | "name": "raceethnicity_group_D", 28 | "level": "interval", 29 | "type": "decimal", 30 | "length": 8 31 | }, 32 | { 33 | "name": "parental_level_of_education_associates_degree", 34 | "level": "interval", 35 | "type": "decimal", 36 | "length": 8 37 | }, 38 | { 39 | "name": "parental_level_of_education_bachelors_degree", 40 | "level": "interval", 41 | "type": "decimal", 42 | "length": 8 43 | }, 44 | { 45 | "name": "parental_level_of_education_high_school", 46 | "level": "interval", 47 | "type": "decimal", 48 | "length": 8 49 | }, 50 | { 51 | "name": "parental_level_of_education_some_college", 52 | "level": "interval", 53 | "type": "decimal", 54 | "length": 8 55 | }, 56 | { 57 | "name": "parental_level_of_education_some_high_school", 58 | "level": "interval", 59 | "type": "decimal", 60 | "length": 8 61 | }, 62 | { 63 | "name": "lunch_freereduced", 64 | "level": "interval", 65 | "type": "decimal", 66 | "length": 8 67 | }, 68 | { 69 | "name": "test_preparation_course_none", 70 | "level": "interval", 71 | "type": "decimal", 72 | "length": 8 73 | } 74 | ] -------------------------------------------------------------------------------- /examples/data/BiasMetrics/examModels/LinearRegression/outputVar.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "EM_PREDICTION", 4 | "level": "interval", 5 | "type": "decimal", 6 | "length": 8 7 | } 8 | ] -------------------------------------------------------------------------------- /examples/data/BiasMetrics/examModels/RandomForest/ModelProperties.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "RandomForest", 3 | "description": "Description for the RandomForest model.", 4 | "scoreCodeType": "python", 5 | "trainTable": "", 6 | "trainCodeType": "Python", 7 | "algorithm": "", 8 | "function": "Prediction", 9 | "targetVariable": "composite_score", 10 | "targetEvent": "", 11 | "targetLevel": "INTERVAL", 12 | "eventProbVar": "", 13 | "modeler": "sasdemo", 14 | "tool": "Python 3", 15 | "toolVersion": "3.11.3", 16 | "properties": [] 17 | } -------------------------------------------------------------------------------- /examples/data/BiasMetrics/examModels/RandomForest/RandomForest.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sassoftware/python-sasctl/e0934185b9a91e3f77260c64910fa30f1fced7d8/examples/data/BiasMetrics/examModels/RandomForest/RandomForest.pickle -------------------------------------------------------------------------------- /examples/data/BiasMetrics/examModels/RandomForest/RandomForest.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sassoftware/python-sasctl/e0934185b9a91e3f77260c64910fa30f1fced7d8/examples/data/BiasMetrics/examModels/RandomForest/RandomForest.zip -------------------------------------------------------------------------------- /examples/data/BiasMetrics/examModels/RandomForest/fileMetadata.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "role": "inputVariables", 4 | "name": "inputVar.json" 5 | }, 6 | { 7 | "role": "outputVariables", 8 | "name": "outputVar.json" 9 | }, 10 | { 11 | "role": "score", 12 | "name": "score_RandomForest.py" 13 | }, 14 | { 15 | "role": "scoreResource", 16 | "name": "RandomForest.pickle" 17 | } 18 | ] -------------------------------------------------------------------------------- /examples/data/BiasMetrics/examModels/RandomForest/inputVar.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "gender_male", 4 | "level": "interval", 5 | "type": "decimal", 6 | "length": 8 7 | }, 8 | { 9 | "name": "raceethnicity_group_A", 10 | "level": "interval", 11 | "type": "decimal", 12 | "length": 8 13 | }, 14 | { 15 | "name": "raceethnicity_group_B", 16 | "level": "interval", 17 | "type": "decimal", 18 | "length": 8 19 | }, 20 | { 21 | "name": "raceethnicity_group_C", 22 | "level": "interval", 23 | "type": "decimal", 24 | "length": 8 25 | }, 26 | { 27 | "name": "raceethnicity_group_D", 28 | "level": "interval", 29 | "type": "decimal", 30 | "length": 8 31 | }, 32 | { 33 | "name": "parental_level_of_education_associates_degree", 34 | "level": "interval", 35 | "type": "decimal", 36 | "length": 8 37 | }, 38 | { 39 | "name": "parental_level_of_education_bachelors_degree", 40 | "level": "interval", 41 | "type": "decimal", 42 | "length": 8 43 | }, 44 | { 45 | "name": "parental_level_of_education_high_school", 46 | "level": "interval", 47 | "type": "decimal", 48 | "length": 8 49 | }, 50 | { 51 | "name": "parental_level_of_education_some_college", 52 | "level": "interval", 53 | "type": "decimal", 54 | "length": 8 55 | }, 56 | { 57 | "name": "parental_level_of_education_some_high_school", 58 | "level": "interval", 59 | "type": "decimal", 60 | "length": 8 61 | }, 62 | { 63 | "name": "lunch_freereduced", 64 | "level": "interval", 65 | "type": "decimal", 66 | "length": 8 67 | }, 68 | { 69 | "name": "test_preparation_course_none", 70 | "level": "interval", 71 | "type": "decimal", 72 | "length": 8 73 | } 74 | ] -------------------------------------------------------------------------------- /examples/data/BiasMetrics/examModels/RandomForest/outputVar.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "EM_PREDICTION", 4 | "level": "interval", 5 | "type": "decimal", 6 | "length": 8 7 | } 8 | ] -------------------------------------------------------------------------------- /examples/data/BiasMetrics/titanicModels/DecisionTree/DecisionTree.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sassoftware/python-sasctl/e0934185b9a91e3f77260c64910fa30f1fced7d8/examples/data/BiasMetrics/titanicModels/DecisionTree/DecisionTree.pickle -------------------------------------------------------------------------------- /examples/data/BiasMetrics/titanicModels/DecisionTree/DecisionTree.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sassoftware/python-sasctl/e0934185b9a91e3f77260c64910fa30f1fced7d8/examples/data/BiasMetrics/titanicModels/DecisionTree/DecisionTree.zip -------------------------------------------------------------------------------- /examples/data/BiasMetrics/titanicModels/DecisionTree/ModelProperties.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "DecisionTree", 3 | "description": "Description for the DecisionTree model.", 4 | "scoreCodeType": "python", 5 | "trainTable": "", 6 | "trainCodeType": "Python", 7 | "algorithm": "", 8 | "function": "Classification", 9 | "targetVariable": "Survived", 10 | "targetEvent": "1", 11 | "targetLevel": "BINARY", 12 | "eventProbVar": "P_1", 13 | "modeler": "sasdemo", 14 | "tool": "Python 3", 15 | "toolVersion": "3.11.3", 16 | "properties": [] 17 | } -------------------------------------------------------------------------------- /examples/data/BiasMetrics/titanicModels/DecisionTree/fileMetadata.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "role": "inputVariables", 4 | "name": "inputVar.json" 5 | }, 6 | { 7 | "role": "outputVariables", 8 | "name": "outputVar.json" 9 | }, 10 | { 11 | "role": "score", 12 | "name": "score_DecisionTree.py" 13 | }, 14 | { 15 | "role": "scoreResource", 16 | "name": "DecisionTree.pickle" 17 | } 18 | ] -------------------------------------------------------------------------------- /examples/data/BiasMetrics/titanicModels/DecisionTree/inputVar.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "Age", 4 | "level": "interval", 5 | "type": "decimal", 6 | "length": 8 7 | }, 8 | { 9 | "name": "SibSp", 10 | "level": "interval", 11 | "type": "decimal", 12 | "length": 8 13 | }, 14 | { 15 | "name": "Parch", 16 | "level": "interval", 17 | "type": "decimal", 18 | "length": 8 19 | }, 20 | { 21 | "name": "Fare", 22 | "level": "interval", 23 | "type": "decimal", 24 | "length": 8 25 | }, 26 | { 27 | "name": "Sex_female", 28 | "level": "interval", 29 | "type": "decimal", 30 | "length": 8 31 | }, 32 | { 33 | "name": "Sex_male", 34 | "level": "interval", 35 | "type": "decimal", 36 | "length": 8 37 | }, 38 | { 39 | "name": "Pclass_1", 40 | "level": "interval", 41 | "type": "decimal", 42 | "length": 8 43 | }, 44 | { 45 | "name": "Pclass_2", 46 | "level": "interval", 47 | "type": "decimal", 48 | "length": 8 49 | }, 50 | { 51 | "name": "Pclass_3", 52 | "level": "interval", 53 | "type": "decimal", 54 | "length": 8 55 | }, 56 | { 57 | "name": "Embarked_C", 58 | "level": "interval", 59 | "type": "decimal", 60 | "length": 8 61 | }, 62 | { 63 | "name": "Embarked_Q", 64 | "level": "interval", 65 | "type": "decimal", 66 | "length": 8 67 | }, 68 | { 69 | "name": "Embarked_S", 70 | "level": "interval", 71 | "type": "decimal", 72 | "length": 8 73 | } 74 | ] -------------------------------------------------------------------------------- /examples/data/BiasMetrics/titanicModels/DecisionTree/outputVar.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "EM_CLASSIFICATION", 4 | "level": "nominal", 5 | "type": "string", 6 | "length": 1 7 | }, 8 | { 9 | "name": "EM_EVENTPROBABILITY", 10 | "level": "interval", 11 | "type": "decimal", 12 | "length": 8 13 | } 14 | ] -------------------------------------------------------------------------------- /examples/data/BiasMetrics/titanicModels/DecisionTree/score_DecisionTree.py: -------------------------------------------------------------------------------- 1 | import math 2 | import pickle 3 | import pandas as pd 4 | import numpy as np 5 | from pathlib import Path 6 | 7 | import settings 8 | 9 | with open(Path(settings.pickle_path) / "DecisionTree.pickle", "rb") as pickle_model: 10 | model = pickle.load(pickle_model) 11 | 12 | def score(Age, SibSp, Parch, Fare, Sex_female, Sex_male, Pclass_1, Pclass_2, Pclass_3, Embarked_C, Embarked_Q, Embarked_S): 13 | "Output: EM_CLASSIFICATION, EM_EVENTPROBABILITY" 14 | 15 | try: 16 | global model 17 | except NameError: 18 | with open(Path(settings.pickle_path) / "DecisionTree.pickle", "rb") as pickle_model: 19 | model = pickle.load(pickle_model) 20 | 21 | 22 | 23 | try: 24 | if math.isnan(Age): 25 | Age = 28.06264 26 | except TypeError: 27 | Age = 28.06264 28 | try: 29 | if math.isnan(SibSp): 30 | SibSp = 0.712 31 | except TypeError: 32 | SibSp = 0.712 33 | try: 34 | if math.isnan(Parch): 35 | Parch = 0.464 36 | except TypeError: 37 | Parch = 0.464 38 | try: 39 | if math.isnan(Fare): 40 | Fare = 30.497600000000002 41 | except TypeError: 42 | Fare = 30.497600000000002 43 | try: 44 | Sex_female = Sex_female.strip() 45 | except AttributeError: 46 | Sex_female = "" 47 | try: 48 | Sex_male = Sex_male.strip() 49 | except AttributeError: 50 | Sex_male = "" 51 | try: 52 | Pclass_1 = Pclass_1.strip() 53 | except AttributeError: 54 | Pclass_1 = "" 55 | try: 56 | Pclass_2 = Pclass_2.strip() 57 | except AttributeError: 58 | Pclass_2 = "" 59 | try: 60 | Pclass_3 = Pclass_3.strip() 61 | except AttributeError: 62 | Pclass_3 = "" 63 | try: 64 | Embarked_C = Embarked_C.strip() 65 | except AttributeError: 66 | Embarked_C = "" 67 | try: 68 | Embarked_Q = Embarked_Q.strip() 69 | except AttributeError: 70 | Embarked_Q = "" 71 | try: 72 | Embarked_S = Embarked_S.strip() 73 | except AttributeError: 74 | Embarked_S = "" 75 | 76 | input_array = pd.DataFrame([[Age, SibSp, Parch, Fare, Sex_female, Sex_male, Pclass_1, Pclass_2, Pclass_3, Embarked_C, Embarked_Q, Embarked_S]], 77 | columns=["Age", "SibSp", "Parch", "Fare", "Sex_female", "Sex_male", "Pclass_1", "Pclass_2", "Pclass_3", "Embarked_C", "Embarked_Q", "Embarked_S"], 78 | dtype=float) 79 | prediction = model.predict_proba(input_array) 80 | 81 | # Check for numpy values and convert to a CAS readable representation 82 | if isinstance(prediction, np.ndarray): 83 | prediction = prediction.tolist()[0] 84 | 85 | if prediction[0] > prediction[1]: 86 | EM_CLASSIFICATION = "1" 87 | else: 88 | EM_CLASSIFICATION = "0" 89 | 90 | return EM_CLASSIFICATION, prediction[0] -------------------------------------------------------------------------------- /examples/data/BiasMetrics/titanicModels/GradientBoost/GradientBoost.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sassoftware/python-sasctl/e0934185b9a91e3f77260c64910fa30f1fced7d8/examples/data/BiasMetrics/titanicModels/GradientBoost/GradientBoost.pickle -------------------------------------------------------------------------------- /examples/data/BiasMetrics/titanicModels/GradientBoost/GradientBoost.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sassoftware/python-sasctl/e0934185b9a91e3f77260c64910fa30f1fced7d8/examples/data/BiasMetrics/titanicModels/GradientBoost/GradientBoost.zip -------------------------------------------------------------------------------- /examples/data/BiasMetrics/titanicModels/GradientBoost/ModelProperties.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "GradientBoost", 3 | "description": "Description for the GradientBoost model.", 4 | "scoreCodeType": "python", 5 | "trainTable": "", 6 | "trainCodeType": "Python", 7 | "algorithm": "", 8 | "function": "Classification", 9 | "targetVariable": "Survived", 10 | "targetEvent": "1", 11 | "targetLevel": "BINARY", 12 | "eventProbVar": "P_1", 13 | "modeler": "sasdemo", 14 | "tool": "Python 3", 15 | "toolVersion": "3.11.3", 16 | "properties": [] 17 | } -------------------------------------------------------------------------------- /examples/data/BiasMetrics/titanicModels/GradientBoost/fileMetadata.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "role": "inputVariables", 4 | "name": "inputVar.json" 5 | }, 6 | { 7 | "role": "outputVariables", 8 | "name": "outputVar.json" 9 | }, 10 | { 11 | "role": "score", 12 | "name": "score_GradientBoost.py" 13 | }, 14 | { 15 | "role": "scoreResource", 16 | "name": "GradientBoost.pickle" 17 | } 18 | ] -------------------------------------------------------------------------------- /examples/data/BiasMetrics/titanicModels/GradientBoost/inputVar.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "Age", 4 | "level": "interval", 5 | "type": "decimal", 6 | "length": 8 7 | }, 8 | { 9 | "name": "SibSp", 10 | "level": "interval", 11 | "type": "decimal", 12 | "length": 8 13 | }, 14 | { 15 | "name": "Parch", 16 | "level": "interval", 17 | "type": "decimal", 18 | "length": 8 19 | }, 20 | { 21 | "name": "Fare", 22 | "level": "interval", 23 | "type": "decimal", 24 | "length": 8 25 | }, 26 | { 27 | "name": "Sex_female", 28 | "level": "interval", 29 | "type": "decimal", 30 | "length": 8 31 | }, 32 | { 33 | "name": "Sex_male", 34 | "level": "interval", 35 | "type": "decimal", 36 | "length": 8 37 | }, 38 | { 39 | "name": "Pclass_1", 40 | "level": "interval", 41 | "type": "decimal", 42 | "length": 8 43 | }, 44 | { 45 | "name": "Pclass_2", 46 | "level": "interval", 47 | "type": "decimal", 48 | "length": 8 49 | }, 50 | { 51 | "name": "Pclass_3", 52 | "level": "interval", 53 | "type": "decimal", 54 | "length": 8 55 | }, 56 | { 57 | "name": "Embarked_C", 58 | "level": "interval", 59 | "type": "decimal", 60 | "length": 8 61 | }, 62 | { 63 | "name": "Embarked_Q", 64 | "level": "interval", 65 | "type": "decimal", 66 | "length": 8 67 | }, 68 | { 69 | "name": "Embarked_S", 70 | "level": "interval", 71 | "type": "decimal", 72 | "length": 8 73 | } 74 | ] -------------------------------------------------------------------------------- /examples/data/BiasMetrics/titanicModels/GradientBoost/outputVar.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "EM_CLASSIFICATION", 4 | "level": "nominal", 5 | "type": "string", 6 | "length": 1 7 | }, 8 | { 9 | "name": "EM_EVENTPROBABILITY", 10 | "level": "interval", 11 | "type": "decimal", 12 | "length": 8 13 | } 14 | ] -------------------------------------------------------------------------------- /examples/data/BiasMetrics/titanicModels/GradientBoost/score_GradientBoost.py: -------------------------------------------------------------------------------- 1 | import math 2 | import pickle 3 | import pandas as pd 4 | import numpy as np 5 | from pathlib import Path 6 | 7 | import settings 8 | 9 | with open(Path(settings.pickle_path) / "GradientBoost.pickle", "rb") as pickle_model: 10 | model = pickle.load(pickle_model) 11 | 12 | def score(Age, SibSp, Parch, Fare, Sex_female, Sex_male, Pclass_1, Pclass_2, Pclass_3, Embarked_C, Embarked_Q, Embarked_S): 13 | "Output: EM_CLASSIFICATION, EM_EVENTPROBABILITY" 14 | 15 | try: 16 | global model 17 | except NameError: 18 | with open(Path(settings.pickle_path) / "GradientBoost.pickle", "rb") as pickle_model: 19 | model = pickle.load(pickle_model) 20 | 21 | 22 | 23 | try: 24 | if math.isnan(Age): 25 | Age = 28.06264 26 | except TypeError: 27 | Age = 28.06264 28 | try: 29 | if math.isnan(SibSp): 30 | SibSp = 0.712 31 | except TypeError: 32 | SibSp = 0.712 33 | try: 34 | if math.isnan(Parch): 35 | Parch = 0.464 36 | except TypeError: 37 | Parch = 0.464 38 | try: 39 | if math.isnan(Fare): 40 | Fare = 30.497600000000002 41 | except TypeError: 42 | Fare = 30.497600000000002 43 | try: 44 | Sex_female = Sex_female.strip() 45 | except AttributeError: 46 | Sex_female = "" 47 | try: 48 | Sex_male = Sex_male.strip() 49 | except AttributeError: 50 | Sex_male = "" 51 | try: 52 | Pclass_1 = Pclass_1.strip() 53 | except AttributeError: 54 | Pclass_1 = "" 55 | try: 56 | Pclass_2 = Pclass_2.strip() 57 | except AttributeError: 58 | Pclass_2 = "" 59 | try: 60 | Pclass_3 = Pclass_3.strip() 61 | except AttributeError: 62 | Pclass_3 = "" 63 | try: 64 | Embarked_C = Embarked_C.strip() 65 | except AttributeError: 66 | Embarked_C = "" 67 | try: 68 | Embarked_Q = Embarked_Q.strip() 69 | except AttributeError: 70 | Embarked_Q = "" 71 | try: 72 | Embarked_S = Embarked_S.strip() 73 | except AttributeError: 74 | Embarked_S = "" 75 | 76 | input_array = pd.DataFrame([[Age, SibSp, Parch, Fare, Sex_female, Sex_male, Pclass_1, Pclass_2, Pclass_3, Embarked_C, Embarked_Q, Embarked_S]], 77 | columns=["Age", "SibSp", "Parch", "Fare", "Sex_female", "Sex_male", "Pclass_1", "Pclass_2", "Pclass_3", "Embarked_C", "Embarked_Q", "Embarked_S"], 78 | dtype=float) 79 | prediction = model.predict_proba(input_array) 80 | 81 | # Check for numpy values and convert to a CAS readable representation 82 | if isinstance(prediction, np.ndarray): 83 | prediction = prediction.tolist()[0] 84 | 85 | if prediction[0] > prediction[1]: 86 | EM_CLASSIFICATION = "1" 87 | else: 88 | EM_CLASSIFICATION = "0" 89 | 90 | return EM_CLASSIFICATION, prediction[0] -------------------------------------------------------------------------------- /examples/data/BiasMetrics/titanicModels/RandomForest/ModelProperties.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "RandomForest", 3 | "description": "Description for the RandomForest model.", 4 | "scoreCodeType": "python", 5 | "trainTable": "", 6 | "trainCodeType": "Python", 7 | "algorithm": "", 8 | "function": "Classification", 9 | "targetVariable": "Survived", 10 | "targetEvent": "1", 11 | "targetLevel": "BINARY", 12 | "eventProbVar": "P_1", 13 | "modeler": "sasdemo", 14 | "tool": "Python 3", 15 | "toolVersion": "3.11.3", 16 | "properties": [] 17 | } -------------------------------------------------------------------------------- /examples/data/BiasMetrics/titanicModels/RandomForest/RandomForest.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sassoftware/python-sasctl/e0934185b9a91e3f77260c64910fa30f1fced7d8/examples/data/BiasMetrics/titanicModels/RandomForest/RandomForest.pickle -------------------------------------------------------------------------------- /examples/data/BiasMetrics/titanicModels/RandomForest/RandomForest.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sassoftware/python-sasctl/e0934185b9a91e3f77260c64910fa30f1fced7d8/examples/data/BiasMetrics/titanicModels/RandomForest/RandomForest.zip -------------------------------------------------------------------------------- /examples/data/BiasMetrics/titanicModels/RandomForest/fileMetadata.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "role": "inputVariables", 4 | "name": "inputVar.json" 5 | }, 6 | { 7 | "role": "outputVariables", 8 | "name": "outputVar.json" 9 | }, 10 | { 11 | "role": "score", 12 | "name": "score_RandomForest.py" 13 | }, 14 | { 15 | "role": "scoreResource", 16 | "name": "RandomForest.pickle" 17 | } 18 | ] -------------------------------------------------------------------------------- /examples/data/BiasMetrics/titanicModels/RandomForest/inputVar.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "Age", 4 | "level": "interval", 5 | "type": "decimal", 6 | "length": 8 7 | }, 8 | { 9 | "name": "SibSp", 10 | "level": "interval", 11 | "type": "decimal", 12 | "length": 8 13 | }, 14 | { 15 | "name": "Parch", 16 | "level": "interval", 17 | "type": "decimal", 18 | "length": 8 19 | }, 20 | { 21 | "name": "Fare", 22 | "level": "interval", 23 | "type": "decimal", 24 | "length": 8 25 | }, 26 | { 27 | "name": "Sex_female", 28 | "level": "interval", 29 | "type": "decimal", 30 | "length": 8 31 | }, 32 | { 33 | "name": "Sex_male", 34 | "level": "interval", 35 | "type": "decimal", 36 | "length": 8 37 | }, 38 | { 39 | "name": "Pclass_1", 40 | "level": "interval", 41 | "type": "decimal", 42 | "length": 8 43 | }, 44 | { 45 | "name": "Pclass_2", 46 | "level": "interval", 47 | "type": "decimal", 48 | "length": 8 49 | }, 50 | { 51 | "name": "Pclass_3", 52 | "level": "interval", 53 | "type": "decimal", 54 | "length": 8 55 | }, 56 | { 57 | "name": "Embarked_C", 58 | "level": "interval", 59 | "type": "decimal", 60 | "length": 8 61 | }, 62 | { 63 | "name": "Embarked_Q", 64 | "level": "interval", 65 | "type": "decimal", 66 | "length": 8 67 | }, 68 | { 69 | "name": "Embarked_S", 70 | "level": "interval", 71 | "type": "decimal", 72 | "length": 8 73 | } 74 | ] -------------------------------------------------------------------------------- /examples/data/BiasMetrics/titanicModels/RandomForest/outputVar.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "EM_CLASSIFICATION", 4 | "level": "nominal", 5 | "type": "string", 6 | "length": 1 7 | }, 8 | { 9 | "name": "EM_EVENTPROBABILITY", 10 | "level": "interval", 11 | "type": "decimal", 12 | "length": 8 13 | } 14 | ] -------------------------------------------------------------------------------- /examples/data/BiasMetrics/titanicModels/RandomForest/score_RandomForest.py: -------------------------------------------------------------------------------- 1 | import math 2 | import pickle 3 | import pandas as pd 4 | import numpy as np 5 | from pathlib import Path 6 | 7 | import settings 8 | 9 | with open(Path(settings.pickle_path) / "RandomForest.pickle", "rb") as pickle_model: 10 | model = pickle.load(pickle_model) 11 | 12 | def score(Age, SibSp, Parch, Fare, Sex_female, Sex_male, Pclass_1, Pclass_2, Pclass_3, Embarked_C, Embarked_Q, Embarked_S): 13 | "Output: EM_CLASSIFICATION, EM_EVENTPROBABILITY" 14 | 15 | try: 16 | global model 17 | except NameError: 18 | with open(Path(settings.pickle_path) / "RandomForest.pickle", "rb") as pickle_model: 19 | model = pickle.load(pickle_model) 20 | 21 | 22 | 23 | try: 24 | if math.isnan(Age): 25 | Age = 28.06264 26 | except TypeError: 27 | Age = 28.06264 28 | try: 29 | if math.isnan(SibSp): 30 | SibSp = 0.712 31 | except TypeError: 32 | SibSp = 0.712 33 | try: 34 | if math.isnan(Parch): 35 | Parch = 0.464 36 | except TypeError: 37 | Parch = 0.464 38 | try: 39 | if math.isnan(Fare): 40 | Fare = 30.497600000000002 41 | except TypeError: 42 | Fare = 30.497600000000002 43 | try: 44 | Sex_female = Sex_female.strip() 45 | except AttributeError: 46 | Sex_female = "" 47 | try: 48 | Sex_male = Sex_male.strip() 49 | except AttributeError: 50 | Sex_male = "" 51 | try: 52 | Pclass_1 = Pclass_1.strip() 53 | except AttributeError: 54 | Pclass_1 = "" 55 | try: 56 | Pclass_2 = Pclass_2.strip() 57 | except AttributeError: 58 | Pclass_2 = "" 59 | try: 60 | Pclass_3 = Pclass_3.strip() 61 | except AttributeError: 62 | Pclass_3 = "" 63 | try: 64 | Embarked_C = Embarked_C.strip() 65 | except AttributeError: 66 | Embarked_C = "" 67 | try: 68 | Embarked_Q = Embarked_Q.strip() 69 | except AttributeError: 70 | Embarked_Q = "" 71 | try: 72 | Embarked_S = Embarked_S.strip() 73 | except AttributeError: 74 | Embarked_S = "" 75 | 76 | input_array = pd.DataFrame([[Age, SibSp, Parch, Fare, Sex_female, Sex_male, Pclass_1, Pclass_2, Pclass_3, Embarked_C, Embarked_Q, Embarked_S]], 77 | columns=["Age", "SibSp", "Parch", "Fare", "Sex_female", "Sex_male", "Pclass_1", "Pclass_2", "Pclass_3", "Embarked_C", "Embarked_Q", "Embarked_S"], 78 | dtype=float) 79 | prediction = model.predict_proba(input_array) 80 | 81 | # Check for numpy values and convert to a CAS readable representation 82 | if isinstance(prediction, np.ndarray): 83 | prediction = prediction.tolist()[0] 84 | 85 | if prediction[0] > prediction[1]: 86 | EM_CLASSIFICATION = "1" 87 | else: 88 | EM_CLASSIFICATION = "0" 89 | 90 | return EM_CLASSIFICATION, prediction[0] -------------------------------------------------------------------------------- /examples/data/MLFlowModels/Model1/MLFlowModel.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sassoftware/python-sasctl/e0934185b9a91e3f77260c64910fa30f1fced7d8/examples/data/MLFlowModels/Model1/MLFlowModel.pickle -------------------------------------------------------------------------------- /examples/data/MLFlowModels/Model1/MLFlowModel.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sassoftware/python-sasctl/e0934185b9a91e3f77260c64910fa30f1fced7d8/examples/data/MLFlowModels/Model1/MLFlowModel.zip -------------------------------------------------------------------------------- /examples/data/MLFlowModels/Model1/ModelProperties.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "MLFlowModel", 3 | "description": "MLFlow Model", 4 | "scoreCodeType": "python", 5 | "trainTable": "", 6 | "trainCodeType": "Python", 7 | "algorithm": "", 8 | "function": "Classification", 9 | "targetVariable": "tensor", 10 | "targetEvent": "1", 11 | "targetLevel": "BINARY", 12 | "eventProbVar": "P_1", 13 | "modeler": "sasdemo", 14 | "tool": "Python 3", 15 | "toolVersion": "3.8.16", 16 | "properties": [] 17 | } -------------------------------------------------------------------------------- /examples/data/MLFlowModels/Model1/fileMetadata.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "role": "inputVariables", 4 | "name": "inputVar.json" 5 | }, 6 | { 7 | "role": "outputVariables", 8 | "name": "outputVar.json" 9 | }, 10 | { 11 | "role": "score", 12 | "name": "score_MLFlowModel.py" 13 | }, 14 | { 15 | "role": "scoreResource", 16 | "name": "MLFlowModel.pickle" 17 | } 18 | ] -------------------------------------------------------------------------------- /examples/data/MLFlowModels/Model1/inputVar.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "fixedacidity", 4 | "level": "interval", 5 | "type": "decimal", 6 | "length": 8 7 | }, 8 | { 9 | "name": "volatileacidity", 10 | "level": "interval", 11 | "type": "decimal", 12 | "length": 8 13 | }, 14 | { 15 | "name": "citricacid", 16 | "level": "interval", 17 | "type": "decimal", 18 | "length": 8 19 | }, 20 | { 21 | "name": "residualsugar", 22 | "level": "interval", 23 | "type": "decimal", 24 | "length": 8 25 | }, 26 | { 27 | "name": "chlorides", 28 | "level": "interval", 29 | "type": "decimal", 30 | "length": 8 31 | }, 32 | { 33 | "name": "freesulfurdioxide", 34 | "level": "interval", 35 | "type": "decimal", 36 | "length": 8 37 | }, 38 | { 39 | "name": "totalsulfurdioxide", 40 | "level": "interval", 41 | "type": "decimal", 42 | "length": 8 43 | }, 44 | { 45 | "name": "density", 46 | "level": "interval", 47 | "type": "decimal", 48 | "length": 8 49 | }, 50 | { 51 | "name": "pH", 52 | "level": "interval", 53 | "type": "decimal", 54 | "length": 8 55 | }, 56 | { 57 | "name": "sulphates", 58 | "level": "interval", 59 | "type": "decimal", 60 | "length": 8 61 | }, 62 | { 63 | "name": "alcohol", 64 | "level": "interval", 65 | "type": "decimal", 66 | "length": 8 67 | } 68 | ] -------------------------------------------------------------------------------- /examples/data/MLFlowModels/Model1/outputVar.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "tensor", 4 | "level": "interval", 5 | "type": "decimal", 6 | "length": 8 7 | } 8 | ] -------------------------------------------------------------------------------- /examples/data/MLFlowModels/Model1/score_MLFlowModel.py: -------------------------------------------------------------------------------- 1 | import math 2 | import cloudpickle 3 | import pandas as pd 4 | import numpy as np 5 | from pathlib import Path 6 | 7 | import settings 8 | 9 | with open(Path(settings.pickle_path) / "MLFlowTest.pickle", "rb") as pickle_model: 10 | model = cloudpickle.load(pickle_model) 11 | 12 | def score(fixedacidity, volatileacidity, citricacid, residualsugar, chlorides, freesulfurdioxide, totalsulfurdioxide, density, pH, sulphates, alcohol): 13 | "Output: tensor" 14 | 15 | try: 16 | global model 17 | except NameError: 18 | with open(Path(settings.pickle_path) / "MLFlowTest.pickle", "rb") as pickle_model: 19 | model = cloudpickle.load(pickle_model) 20 | 21 | input_array = pd.DataFrame([[fixedacidity, volatileacidity, citricacid, residualsugar, chlorides, freesulfurdioxide, totalsulfurdioxide, density, pH, sulphates, alcohol]], 22 | columns=["fixedacidity", "volatileacidity", "citricacid", "residualsugar", "chlorides", "freesulfurdioxide", "totalsulfurdioxide", "density", "pH", "sulphates", "alcohol"], 23 | dtype=float) 24 | prediction = model.predict(input_array) 25 | 26 | # Check for numpy values and convert to a CAS readable representation 27 | if isinstance(prediction, np.ndarray): 28 | prediction = prediction.tolist()[0] 29 | 30 | tensor = prediction 31 | 32 | return tensor -------------------------------------------------------------------------------- /examples/data/MLFlowModels/Test1/MLFlowTest.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sassoftware/python-sasctl/e0934185b9a91e3f77260c64910fa30f1fced7d8/examples/data/MLFlowModels/Test1/MLFlowTest.pickle -------------------------------------------------------------------------------- /examples/data/MLFlowModels/Test1/MLFlowTest.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sassoftware/python-sasctl/e0934185b9a91e3f77260c64910fa30f1fced7d8/examples/data/MLFlowModels/Test1/MLFlowTest.zip -------------------------------------------------------------------------------- /examples/data/MLFlowModels/Test1/MLFlowTestScore.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | import math 4 | import pickle 5 | import pandas as pd 6 | import numpy as np 7 | 8 | with open('/models/resources/viya/d02aadfe-618e-44e0-af6d-1bf00c6396e3/MLFlowTest.pickle', 'rb') as _pFile: 9 | _thisModelFit = pickle.load(_pFile) 10 | 11 | def scoreMLFlowTest(fixedacidity, volatileacidity, citricacid, residualsugar, chlorides, freesulfurdioxide, totalsulfurdioxide, density, pH, sulphates, alcohol): 12 | "Output: tensor" 13 | 14 | try: 15 | global _thisModelFit 16 | except NameError: 17 | 18 | with open('/models/resources/viya/d02aadfe-618e-44e0-af6d-1bf00c6396e3/MLFlowTest.pickle', 'rb') as _pFile: 19 | _thisModelFit = pickle.load(_pFile) 20 | 21 | try: 22 | inputArray = pd.DataFrame([[fixedacidity, volatileacidity, citricacid, residualsugar, chlorides, freesulfurdioxide, totalsulfurdioxide, density, pH, sulphates, alcohol]], 23 | columns=['fixedacidity', 'volatileacidity', 'citricacid', 'residualsugar', 'chlorides', 'freesulfurdioxide', 'totalsulfurdioxide', 'density', 'pH', 'sulphates', 'alcohol'], 24 | dtype=float) 25 | prediction = _thisModelFit.predict(inputArray) 26 | except ValueError: 27 | # For models requiring or including an intercept value, a 'const' column is required 28 | # For example, many statsmodels models include an intercept value that must be included for the model prediction 29 | inputArray = pd.DataFrame([[1.0, fixedacidity, volatileacidity, citricacid, residualsugar, chlorides, freesulfurdioxide, totalsulfurdioxide, density, pH, sulphates, alcohol]], 30 | columns=['const', 'fixedacidity', 'volatileacidity', 'citricacid', 'residualsugar', 'chlorides', 'freesulfurdioxide', 'totalsulfurdioxide', 'density', 'pH', 'sulphates', 'alcohol'], 31 | dtype=float) 32 | prediction = _thisModelFit.predict(inputArray) 33 | 34 | tensor = prediction 35 | 36 | return(tensor) 37 | -------------------------------------------------------------------------------- /examples/data/MLFlowModels/Test1/ModelProperties.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "MLFlowTest", 3 | "description": "MLFlow Model ", 4 | "function": "classification", 5 | "scoreCodeType": "python", 6 | "trainTable": " ", 7 | "trainCodeType": "Python", 8 | "algorithm": "", 9 | "targetVariable": "", 10 | "targetEvent": 1, 11 | "targetLevel": "BINARY", 12 | "eventProbVar": "tensor", 13 | "modeler": "sasdemo", 14 | "tool": "Python 3", 15 | "toolVersion": "3.8.3" 16 | } -------------------------------------------------------------------------------- /examples/data/MLFlowModels/Test1/fileMetadata.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "role": "inputVariables", 4 | "name": "inputVar.json" 5 | }, 6 | { 7 | "role": "outputVariables", 8 | "name": "outputVar.json" 9 | }, 10 | { 11 | "role": "score", 12 | "name": "MLFlowTestScore.py" 13 | }, 14 | { 15 | "role": "scoreResource", 16 | "name": "MLFlowTest.pickle" 17 | } 18 | ] -------------------------------------------------------------------------------- /examples/data/MLFlowModels/Test1/inputVar.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "fixedacidity", 4 | "length": 8, 5 | "type": "decimal", 6 | "level": "interval" 7 | }, 8 | { 9 | "name": "volatileacidity", 10 | "length": 8, 11 | "type": "decimal", 12 | "level": "interval" 13 | }, 14 | { 15 | "name": "citricacid", 16 | "length": 8, 17 | "type": "decimal", 18 | "level": "interval" 19 | }, 20 | { 21 | "name": "residualsugar", 22 | "length": 8, 23 | "type": "decimal", 24 | "level": "interval" 25 | }, 26 | { 27 | "name": "chlorides", 28 | "length": 8, 29 | "type": "decimal", 30 | "level": "interval" 31 | }, 32 | { 33 | "name": "freesulfurdioxide", 34 | "length": 8, 35 | "type": "decimal", 36 | "level": "interval" 37 | }, 38 | { 39 | "name": "totalsulfurdioxide", 40 | "length": 8, 41 | "type": "decimal", 42 | "level": "interval" 43 | }, 44 | { 45 | "name": "density", 46 | "length": 8, 47 | "type": "decimal", 48 | "level": "interval" 49 | }, 50 | { 51 | "name": "pH", 52 | "length": 8, 53 | "type": "decimal", 54 | "level": "interval" 55 | }, 56 | { 57 | "name": "sulphates", 58 | "length": 8, 59 | "type": "decimal", 60 | "level": "interval" 61 | }, 62 | { 63 | "name": "alcohol", 64 | "length": 8, 65 | "type": "decimal", 66 | "level": "interval" 67 | } 68 | ] -------------------------------------------------------------------------------- /examples/data/MLFlowModels/Test1/outputVar.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "tensor", 4 | "length": 8, 5 | "type": "decimal", 6 | "level": "interval" 7 | } 8 | ] -------------------------------------------------------------------------------- /examples/data/MLFlowModels/Test2/MLFlowTest2.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sassoftware/python-sasctl/e0934185b9a91e3f77260c64910fa30f1fced7d8/examples/data/MLFlowModels/Test2/MLFlowTest2.pickle -------------------------------------------------------------------------------- /examples/data/MLFlowModels/Test2/MLFlowTest2.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sassoftware/python-sasctl/e0934185b9a91e3f77260c64910fa30f1fced7d8/examples/data/MLFlowModels/Test2/MLFlowTest2.zip -------------------------------------------------------------------------------- /examples/data/MLFlowModels/Test2/MLFlowTest2Score.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | import math 4 | import pickle 5 | import pandas as pd 6 | import numpy as np 7 | 8 | import settings 9 | 10 | with open(settings.pickle_path + 'MLFlowTest2.pickle', 'rb') as _pFile: 11 | _thisModelFit = pickle.load(_pFile) 12 | 13 | def scoreMLFlowTest2(fixedacidity, volatileacidity, citricacid, residualsugar, chlorides, freesulfurdioxide, totalsulfurdioxide, density, pH, sulphates, alcohol): 14 | "Output: tensor" 15 | 16 | try: 17 | global _thisModelFit 18 | except NameError: 19 | 20 | with open(settings.pickle_path + 'MLFlowTest2.pickle', 'rb') as _pFile: 21 | _thisModelFit = pickle.load(_pFile) 22 | 23 | try: 24 | inputArray = pd.DataFrame([[fixedacidity, volatileacidity, citricacid, residualsugar, chlorides, freesulfurdioxide, totalsulfurdioxide, density, pH, sulphates, alcohol]], 25 | columns=['fixedacidity', 'volatileacidity', 'citricacid', 'residualsugar', 'chlorides', 'freesulfurdioxide', 'totalsulfurdioxide', 'density', 'pH', 'sulphates', 'alcohol'], 26 | dtype=float) 27 | prediction = _thisModelFit.predict(inputArray) 28 | except ValueError: 29 | # For models requiring or including an intercept value, a 'const' column is required 30 | # For example, many statsmodels models include an intercept value that must be included for the model prediction 31 | inputArray = pd.DataFrame([[1.0, fixedacidity, volatileacidity, citricacid, residualsugar, chlorides, freesulfurdioxide, totalsulfurdioxide, density, pH, sulphates, alcohol]], 32 | columns=['const', 'fixedacidity', 'volatileacidity', 'citricacid', 'residualsugar', 'chlorides', 'freesulfurdioxide', 'totalsulfurdioxide', 'density', 'pH', 'sulphates', 'alcohol'], 33 | dtype=float) 34 | prediction = _thisModelFit.predict(inputArray) 35 | 36 | tensor = prediction 37 | if isinstance(tensor, np.ndarray): 38 | tensor = prediction.item(0) 39 | 40 | return(tensor) 41 | -------------------------------------------------------------------------------- /examples/data/MLFlowModels/Test2/ModelProperties.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "MLFlowTest2", 3 | "description": "MLFlow Model ", 4 | "function": "classification", 5 | "scoreCodeType": "python", 6 | "trainTable": " ", 7 | "trainCodeType": "Python", 8 | "algorithm": "", 9 | "targetVariable": "", 10 | "targetEvent": 1, 11 | "targetLevel": "BINARY", 12 | "eventProbVar": "tensor", 13 | "modeler": "sasdemo", 14 | "tool": "Python 3", 15 | "toolVersion": "3.8.3" 16 | } -------------------------------------------------------------------------------- /examples/data/MLFlowModels/Test2/fileMetadata.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "role": "inputVariables", 4 | "name": "inputVar.json" 5 | }, 6 | { 7 | "role": "outputVariables", 8 | "name": "outputVar.json" 9 | }, 10 | { 11 | "role": "score", 12 | "name": "MLFlowTest2Score.py" 13 | }, 14 | { 15 | "role": "scoreResource", 16 | "name": "MLFlowTest2.pickle" 17 | } 18 | ] -------------------------------------------------------------------------------- /examples/data/MLFlowModels/Test2/inputVar.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "fixedacidity", 4 | "length": 8, 5 | "type": "decimal", 6 | "level": "interval" 7 | }, 8 | { 9 | "name": "volatileacidity", 10 | "length": 8, 11 | "type": "decimal", 12 | "level": "interval" 13 | }, 14 | { 15 | "name": "citricacid", 16 | "length": 8, 17 | "type": "decimal", 18 | "level": "interval" 19 | }, 20 | { 21 | "name": "residualsugar", 22 | "length": 8, 23 | "type": "decimal", 24 | "level": "interval" 25 | }, 26 | { 27 | "name": "chlorides", 28 | "length": 8, 29 | "type": "decimal", 30 | "level": "interval" 31 | }, 32 | { 33 | "name": "freesulfurdioxide", 34 | "length": 8, 35 | "type": "decimal", 36 | "level": "interval" 37 | }, 38 | { 39 | "name": "totalsulfurdioxide", 40 | "length": 8, 41 | "type": "decimal", 42 | "level": "interval" 43 | }, 44 | { 45 | "name": "density", 46 | "length": 8, 47 | "type": "decimal", 48 | "level": "interval" 49 | }, 50 | { 51 | "name": "pH", 52 | "length": 8, 53 | "type": "decimal", 54 | "level": "interval" 55 | }, 56 | { 57 | "name": "sulphates", 58 | "length": 8, 59 | "type": "decimal", 60 | "level": "interval" 61 | }, 62 | { 63 | "name": "alcohol", 64 | "length": 8, 65 | "type": "decimal", 66 | "level": "interval" 67 | } 68 | ] -------------------------------------------------------------------------------- /examples/data/MLFlowModels/Test2/model.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sassoftware/python-sasctl/e0934185b9a91e3f77260c64910fa30f1fced7d8/examples/data/MLFlowModels/Test2/model.pkl -------------------------------------------------------------------------------- /examples/data/MLFlowModels/Test2/outputVar.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "tensor", 4 | "length": 8, 5 | "type": "decimal", 6 | "level": "interval" 7 | } 8 | ] -------------------------------------------------------------------------------- /examples/data/ModelMigration/DecisionTreeClassifier.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sassoftware/python-sasctl/e0934185b9a91e3f77260c64910fa30f1fced7d8/examples/data/ModelMigration/DecisionTreeClassifier.zip -------------------------------------------------------------------------------- /examples/data/USAHousingModels/LinearRegression.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sassoftware/python-sasctl/e0934185b9a91e3f77260c64910fa30f1fced7d8/examples/data/USAHousingModels/LinearRegression.zip -------------------------------------------------------------------------------- /examples/data/USAHousingModels/LinearRegression/LinearRegression.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sassoftware/python-sasctl/e0934185b9a91e3f77260c64910fa30f1fced7d8/examples/data/USAHousingModels/LinearRegression/LinearRegression.pickle -------------------------------------------------------------------------------- /examples/data/USAHousingModels/LinearRegression/LinearRegression.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sassoftware/python-sasctl/e0934185b9a91e3f77260c64910fa30f1fced7d8/examples/data/USAHousingModels/LinearRegression/LinearRegression.zip -------------------------------------------------------------------------------- /examples/data/USAHousingModels/LinearRegression/LinearRegressionScore.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | import math 4 | import pickle 5 | import pandas as pd 6 | import numpy as np 7 | 8 | with open('/models/resources/viya/cb485d04-6cb9-48ce-a829-67b8167303ce/LinearRegression.pickle', 'rb') as _pFile: 9 | _thisModelFit = pickle.load(_pFile) 10 | 11 | def scoreLinearRegression(Avg_Area_Income, Avg_Area_House_Age, Avg_Area_Number_of_Rooms, Avg_Area_Number_of_Bedrooms, Area_Population): 12 | "Output: EM_PREDICTION, EM_PREDICTION" 13 | 14 | try: 15 | global _thisModelFit 16 | except NameError: 17 | 18 | with open('/models/resources/viya/cb485d04-6cb9-48ce-a829-67b8167303ce/LinearRegression.pickle', 'rb') as _pFile: 19 | _thisModelFit = pickle.load(_pFile) 20 | 21 | try: 22 | inputArray = pd.DataFrame([[Avg_Area_Income, Avg_Area_House_Age, Avg_Area_Number_of_Rooms, Avg_Area_Number_of_Bedrooms, Area_Population]], 23 | columns=['Avg_Area_Income', 'Avg_Area_House_Age', 'Avg_Area_Number_of_Rooms', 'Avg_Area_Number_of_Bedrooms', 'Area_Population'], 24 | dtype=float) 25 | prediction = _thisModelFit.predict(inputArray) 26 | except ValueError: 27 | # For models requiring or including an intercept value, a 'const' column is required 28 | # For example, many statsmodels models include an intercept value that must be included for the model prediction 29 | inputArray = pd.DataFrame([[1.0, Avg_Area_Income, Avg_Area_House_Age, Avg_Area_Number_of_Rooms, Avg_Area_Number_of_Bedrooms, Area_Population]], 30 | columns=['const', 'Avg_Area_Income', 'Avg_Area_House_Age', 'Avg_Area_Number_of_Rooms', 'Avg_Area_Number_of_Bedrooms', 'Area_Population'], 31 | dtype=float) 32 | prediction = _thisModelFit.predict(inputArray) 33 | 34 | try: 35 | EM_PREDICTION = float(prediction) 36 | except TypeError: 37 | # If the model expects non-binary responses, a TypeError will be raised. 38 | # The except block shifts the prediction to accept a non-binary response. 39 | EM_PREDICTION = float(prediction[:,1]) 40 | 41 | if (EM_PREDICTION >= 1232072.6541453): 42 | EM_PREDICTION = '1' 43 | else: 44 | EM_PREDICTION = '0' 45 | 46 | return(EM_PREDICTION, EM_PREDICTION) 47 | -------------------------------------------------------------------------------- /examples/data/USAHousingModels/LinearRegression/ModelProperties.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "LinearRegression", 3 | "description": "Description for the LinearRegression model.", 4 | "scoreCodeType": "python", 5 | "trainTable": "", 6 | "trainCodeType": "Python", 7 | "algorithm": "", 8 | "function": "Prediction", 9 | "targetVariable": "Price", 10 | "targetEvent": "", 11 | "targetLevel": "INTERVAL", 12 | "eventProbVar": "", 13 | "modeler": "sasdemo", 14 | "tool": "Python 3", 15 | "toolVersion": "3.8.16", 16 | "properties": [] 17 | } -------------------------------------------------------------------------------- /examples/data/USAHousingModels/LinearRegression/fileMetadata.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "role": "inputVariables", 4 | "name": "inputVar.json" 5 | }, 6 | { 7 | "role": "outputVariables", 8 | "name": "outputVar.json" 9 | }, 10 | { 11 | "role": "score", 12 | "name": "score_LinearRegression.py" 13 | }, 14 | { 15 | "role": "scoreResource", 16 | "name": "LinearRegression.pickle" 17 | } 18 | ] -------------------------------------------------------------------------------- /examples/data/USAHousingModels/LinearRegression/inputVar.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "Avg_Area_Income", 4 | "level": "interval", 5 | "type": "decimal", 6 | "length": 8 7 | }, 8 | { 9 | "name": "Avg_Area_House_Age", 10 | "level": "interval", 11 | "type": "decimal", 12 | "length": 8 13 | }, 14 | { 15 | "name": "Avg_Area_Number_of_Rooms", 16 | "level": "interval", 17 | "type": "decimal", 18 | "length": 8 19 | }, 20 | { 21 | "name": "Avg_Area_Number_of_Bedrooms", 22 | "level": "interval", 23 | "type": "decimal", 24 | "length": 8 25 | }, 26 | { 27 | "name": "Area_Population", 28 | "level": "interval", 29 | "type": "decimal", 30 | "length": 8 31 | } 32 | ] -------------------------------------------------------------------------------- /examples/data/USAHousingModels/LinearRegression/outputVar.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "EM_PREDICTION", 4 | "level": "interval", 5 | "type": "decimal", 6 | "length": 8 7 | } 8 | ] -------------------------------------------------------------------------------- /examples/data/USAHousingModels/LinearRegression/score_LinearRegression.py: -------------------------------------------------------------------------------- 1 | import math 2 | import pickle 3 | import pandas as pd 4 | import numpy as np 5 | from pathlib import Path 6 | 7 | import settings 8 | 9 | with open(Path(settings.pickle_path) / "LinearRegression.pickle", "rb") as pickle_model: 10 | model = pickle.load(pickle_model) 11 | 12 | def score(Avg_Area_Income, Avg_Area_House_Age, Avg_Area_Number_of_Rooms, Avg_Area_Number_of_Bedrooms, Area_Population): 13 | "Output: EM_PREDICTION" 14 | 15 | try: 16 | global model 17 | except NameError: 18 | with open(Path(settings.pickle_path) / "LinearRegression.pickle", "rb") as pickle_model: 19 | model = pickle.load(pickle_model) 20 | 21 | try: 22 | if math.isnan(Avg_Area_Income): 23 | Avg_Area_Income = 68583.10898397 24 | except TypeError: 25 | Avg_Area_Income = 68583.10898397 26 | try: 27 | if math.isnan(Avg_Area_House_Age): 28 | Avg_Area_House_Age = 5.977222035287 29 | except TypeError: 30 | Avg_Area_House_Age = 5.977222035287 31 | try: 32 | if math.isnan(Avg_Area_Number_of_Rooms): 33 | Avg_Area_Number_of_Rooms = 6.9877918509092005 34 | except TypeError: 35 | Avg_Area_Number_of_Rooms = 6.9877918509092005 36 | try: 37 | if math.isnan(Avg_Area_Number_of_Bedrooms): 38 | Avg_Area_Number_of_Bedrooms = 3.9813300000000003 39 | except TypeError: 40 | Avg_Area_Number_of_Bedrooms = 3.9813300000000003 41 | try: 42 | if math.isnan(Area_Population): 43 | Area_Population = 36163.516038540256 44 | except TypeError: 45 | Area_Population = 36163.516038540256 46 | 47 | input_array = pd.DataFrame([[Avg_Area_Income, Avg_Area_House_Age, Avg_Area_Number_of_Rooms, Avg_Area_Number_of_Bedrooms, Area_Population]], 48 | columns=["Avg_Area_Income", "Avg_Area_House_Age", "Avg_Area_Number_of_Rooms", "Avg_Area_Number_of_Bedrooms", "Area_Population"], 49 | dtype=float) 50 | prediction = model.predict(input_array) 51 | 52 | # Check for numpy values and convert to a CAS readable representation 53 | if isinstance(prediction, np.ndarray): 54 | prediction = prediction.tolist()[0] 55 | 56 | EM_PREDICTION = prediction 57 | 58 | return EM_PREDICTION -------------------------------------------------------------------------------- /examples/data/hmeqModels/DecisionTreeClassifier/DecisionTreeClassifier.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sassoftware/python-sasctl/e0934185b9a91e3f77260c64910fa30f1fced7d8/examples/data/hmeqModels/DecisionTreeClassifier/DecisionTreeClassifier.pickle -------------------------------------------------------------------------------- /examples/data/hmeqModels/DecisionTreeClassifier/DecisionTreeClassifier.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sassoftware/python-sasctl/e0934185b9a91e3f77260c64910fa30f1fced7d8/examples/data/hmeqModels/DecisionTreeClassifier/DecisionTreeClassifier.zip -------------------------------------------------------------------------------- /examples/data/hmeqModels/DecisionTreeClassifier/ModelProperties.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "DecisionTreeClassifier", 3 | "description": "Description for the DecisionTreeClassifier model.", 4 | "scoreCodeType": "python", 5 | "trainTable": "", 6 | "trainCodeType": "Python", 7 | "algorithm": "", 8 | "function": "Classification", 9 | "targetVariable": "BAD", 10 | "targetEvent": "1", 11 | "targetLevel": "BINARY", 12 | "eventProbVar": "P_1", 13 | "modeler": "sasdemo", 14 | "tool": "Python 3", 15 | "toolVersion": "3.8.16", 16 | "properties": [] 17 | } -------------------------------------------------------------------------------- /examples/data/hmeqModels/DecisionTreeClassifier/fileMetadata.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "role": "inputVariables", 4 | "name": "inputVar.json" 5 | }, 6 | { 7 | "role": "outputVariables", 8 | "name": "outputVar.json" 9 | }, 10 | { 11 | "role": "score", 12 | "name": "score_DecisionTreeClassifier.py" 13 | }, 14 | { 15 | "role": "scoreResource", 16 | "name": "DecisionTreeClassifier.pickle" 17 | } 18 | ] -------------------------------------------------------------------------------- /examples/data/hmeqModels/DecisionTreeClassifier/inputVar.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "LOAN", 4 | "level": "interval", 5 | "type": "decimal", 6 | "length": 8 7 | }, 8 | { 9 | "name": "MORTDUE", 10 | "level": "interval", 11 | "type": "decimal", 12 | "length": 8 13 | }, 14 | { 15 | "name": "VALUE", 16 | "level": "interval", 17 | "type": "decimal", 18 | "length": 8 19 | }, 20 | { 21 | "name": "YOJ", 22 | "level": "interval", 23 | "type": "decimal", 24 | "length": 8 25 | }, 26 | { 27 | "name": "DEROG", 28 | "level": "interval", 29 | "type": "decimal", 30 | "length": 8 31 | }, 32 | { 33 | "name": "DELINQ", 34 | "level": "interval", 35 | "type": "decimal", 36 | "length": 8 37 | }, 38 | { 39 | "name": "CLAGE", 40 | "level": "interval", 41 | "type": "decimal", 42 | "length": 8 43 | }, 44 | { 45 | "name": "NINQ", 46 | "level": "interval", 47 | "type": "decimal", 48 | "length": 8 49 | }, 50 | { 51 | "name": "CLNO", 52 | "level": "interval", 53 | "type": "decimal", 54 | "length": 8 55 | }, 56 | { 57 | "name": "DEBTINC", 58 | "level": "interval", 59 | "type": "decimal", 60 | "length": 8 61 | } 62 | ] -------------------------------------------------------------------------------- /examples/data/hmeqModels/DecisionTreeClassifier/outputVar.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "EM_CLASSIFICATION", 4 | "level": "nominal", 5 | "type": "string", 6 | "length": 1 7 | }, 8 | { 9 | "name": "EM_EVENTPROBABILITY", 10 | "level": "interval", 11 | "type": "decimal", 12 | "length": 8 13 | } 14 | ] -------------------------------------------------------------------------------- /examples/data/hmeqModels/DecisionTreeClassifier/score_DecisionTreeClassifier.py: -------------------------------------------------------------------------------- 1 | import math 2 | import pickle 3 | import pandas as pd 4 | import numpy as np 5 | from pathlib import Path 6 | 7 | import settings 8 | 9 | with open(Path(settings.pickle_path) / "DecisionTreeClassifier.pickle", "rb") as pickle_model: 10 | model = pickle.load(pickle_model) 11 | 12 | def score(LOAN, MORTDUE, VALUE, YOJ, DEROG, DELINQ, CLAGE, NINQ, CLNO, DEBTINC): 13 | "Output: EM_CLASSIFICATION, EM_EVENTPROBABILITY" 14 | 15 | try: 16 | global model 17 | except NameError: 18 | with open(Path(settings.pickle_path) / "DecisionTreeClassifier.pickle", "rb") as pickle_model: 19 | model = pickle.load(pickle_model) 20 | 21 | 22 | 23 | try: 24 | if math.isnan(LOAN): 25 | LOAN = 18607.96979865772 26 | except TypeError: 27 | LOAN = 18607.96979865772 28 | try: 29 | if math.isnan(MORTDUE): 30 | MORTDUE = 73760.817199559 31 | except TypeError: 32 | MORTDUE = 73760.817199559 33 | try: 34 | if math.isnan(VALUE): 35 | VALUE = 101776.04874145007 36 | except TypeError: 37 | VALUE = 101776.04874145007 38 | try: 39 | if math.isnan(YOJ): 40 | YOJ = 8.922268135904499 41 | except TypeError: 42 | YOJ = 8.922268135904499 43 | try: 44 | if math.isnan(DEROG): 45 | DEROG = 0.2545696877380046 46 | except TypeError: 47 | DEROG = 0.2545696877380046 48 | try: 49 | if math.isnan(DELINQ): 50 | DELINQ = 0.4494423791821561 51 | except TypeError: 52 | DELINQ = 0.4494423791821561 53 | try: 54 | if math.isnan(CLAGE): 55 | CLAGE = 179.7662751900465 56 | except TypeError: 57 | CLAGE = 179.7662751900465 58 | try: 59 | if math.isnan(NINQ): 60 | NINQ = 1.1860550458715597 61 | except TypeError: 62 | NINQ = 1.1860550458715597 63 | try: 64 | if math.isnan(CLNO): 65 | CLNO = 21.29609620076682 66 | except TypeError: 67 | CLNO = 21.29609620076682 68 | try: 69 | if math.isnan(DEBTINC): 70 | DEBTINC = 33.779915349235246 71 | except TypeError: 72 | DEBTINC = 33.779915349235246 73 | 74 | input_array = pd.DataFrame([[LOAN, MORTDUE, VALUE, YOJ, DEROG, DELINQ, CLAGE, NINQ, CLNO, DEBTINC]], 75 | columns=["LOAN", "MORTDUE", "VALUE", "YOJ", "DEROG", "DELINQ", "CLAGE", "NINQ", "CLNO", "DEBTINC"], 76 | dtype=float) 77 | prediction = model.predict_proba(input_array) 78 | 79 | # Check for numpy values and convert to a CAS readable representation 80 | if isinstance(prediction, np.ndarray): 81 | prediction = prediction.tolist()[0] 82 | 83 | if prediction[0] > prediction[1]: 84 | EM_CLASSIFICATION = "1" 85 | else: 86 | EM_CLASSIFICATION = "0" 87 | 88 | return EM_CLASSIFICATION, prediction[0] -------------------------------------------------------------------------------- /examples/data/hmeqModels/GradientBoosting/GradientBoosting.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sassoftware/python-sasctl/e0934185b9a91e3f77260c64910fa30f1fced7d8/examples/data/hmeqModels/GradientBoosting/GradientBoosting.pickle -------------------------------------------------------------------------------- /examples/data/hmeqModels/GradientBoosting/GradientBoosting.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sassoftware/python-sasctl/e0934185b9a91e3f77260c64910fa30f1fced7d8/examples/data/hmeqModels/GradientBoosting/GradientBoosting.zip -------------------------------------------------------------------------------- /examples/data/hmeqModels/GradientBoosting/ModelProperties.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "GradientBoosting", 3 | "description": "Description for the GradientBoosting model.", 4 | "scoreCodeType": "python", 5 | "trainTable": "", 6 | "trainCodeType": "Python", 7 | "algorithm": "", 8 | "function": "Classification", 9 | "targetVariable": "BAD", 10 | "targetEvent": "1", 11 | "targetLevel": "BINARY", 12 | "eventProbVar": "P_1", 13 | "modeler": "sasdemo", 14 | "tool": "Python 3", 15 | "toolVersion": "3.8.16", 16 | "properties": [] 17 | } -------------------------------------------------------------------------------- /examples/data/hmeqModels/GradientBoosting/fileMetadata.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "role": "inputVariables", 4 | "name": "inputVar.json" 5 | }, 6 | { 7 | "role": "outputVariables", 8 | "name": "outputVar.json" 9 | }, 10 | { 11 | "role": "score", 12 | "name": "score_GradientBoosting.py" 13 | }, 14 | { 15 | "role": "scoreResource", 16 | "name": "GradientBoosting.pickle" 17 | } 18 | ] -------------------------------------------------------------------------------- /examples/data/hmeqModels/GradientBoosting/inputVar.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "LOAN", 4 | "level": "interval", 5 | "type": "decimal", 6 | "length": 8 7 | }, 8 | { 9 | "name": "MORTDUE", 10 | "level": "interval", 11 | "type": "decimal", 12 | "length": 8 13 | }, 14 | { 15 | "name": "VALUE", 16 | "level": "interval", 17 | "type": "decimal", 18 | "length": 8 19 | }, 20 | { 21 | "name": "YOJ", 22 | "level": "interval", 23 | "type": "decimal", 24 | "length": 8 25 | }, 26 | { 27 | "name": "DEROG", 28 | "level": "interval", 29 | "type": "decimal", 30 | "length": 8 31 | }, 32 | { 33 | "name": "DELINQ", 34 | "level": "interval", 35 | "type": "decimal", 36 | "length": 8 37 | }, 38 | { 39 | "name": "CLAGE", 40 | "level": "interval", 41 | "type": "decimal", 42 | "length": 8 43 | }, 44 | { 45 | "name": "NINQ", 46 | "level": "interval", 47 | "type": "decimal", 48 | "length": 8 49 | }, 50 | { 51 | "name": "CLNO", 52 | "level": "interval", 53 | "type": "decimal", 54 | "length": 8 55 | }, 56 | { 57 | "name": "DEBTINC", 58 | "level": "interval", 59 | "type": "decimal", 60 | "length": 8 61 | } 62 | ] -------------------------------------------------------------------------------- /examples/data/hmeqModels/GradientBoosting/outputVar.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "EM_CLASSIFICATION", 4 | "level": "nominal", 5 | "type": "string", 6 | "length": 1 7 | }, 8 | { 9 | "name": "EM_EVENTPROBABILITY", 10 | "level": "interval", 11 | "type": "decimal", 12 | "length": 8 13 | } 14 | ] -------------------------------------------------------------------------------- /examples/data/hmeqModels/GradientBoosting/score_GradientBoosting.py: -------------------------------------------------------------------------------- 1 | import math 2 | import pickle 3 | import pandas as pd 4 | import numpy as np 5 | from pathlib import Path 6 | 7 | import settings 8 | 9 | with open(Path(settings.pickle_path) / "GradientBoosting.pickle", "rb") as pickle_model: 10 | model = pickle.load(pickle_model) 11 | 12 | def score(LOAN, MORTDUE, VALUE, YOJ, DEROG, DELINQ, CLAGE, NINQ, CLNO, DEBTINC): 13 | "Output: EM_CLASSIFICATION, EM_EVENTPROBABILITY" 14 | 15 | try: 16 | global model 17 | except NameError: 18 | with open(Path(settings.pickle_path) / "GradientBoosting.pickle", "rb") as pickle_model: 19 | model = pickle.load(pickle_model) 20 | 21 | 22 | 23 | try: 24 | if math.isnan(LOAN): 25 | LOAN = 18607.96979865772 26 | except TypeError: 27 | LOAN = 18607.96979865772 28 | try: 29 | if math.isnan(MORTDUE): 30 | MORTDUE = 73760.817199559 31 | except TypeError: 32 | MORTDUE = 73760.817199559 33 | try: 34 | if math.isnan(VALUE): 35 | VALUE = 101776.04874145007 36 | except TypeError: 37 | VALUE = 101776.04874145007 38 | try: 39 | if math.isnan(YOJ): 40 | YOJ = 8.922268135904499 41 | except TypeError: 42 | YOJ = 8.922268135904499 43 | try: 44 | if math.isnan(DEROG): 45 | DEROG = 0.2545696877380046 46 | except TypeError: 47 | DEROG = 0.2545696877380046 48 | try: 49 | if math.isnan(DELINQ): 50 | DELINQ = 0.4494423791821561 51 | except TypeError: 52 | DELINQ = 0.4494423791821561 53 | try: 54 | if math.isnan(CLAGE): 55 | CLAGE = 179.7662751900465 56 | except TypeError: 57 | CLAGE = 179.7662751900465 58 | try: 59 | if math.isnan(NINQ): 60 | NINQ = 1.1860550458715597 61 | except TypeError: 62 | NINQ = 1.1860550458715597 63 | try: 64 | if math.isnan(CLNO): 65 | CLNO = 21.29609620076682 66 | except TypeError: 67 | CLNO = 21.29609620076682 68 | try: 69 | if math.isnan(DEBTINC): 70 | DEBTINC = 33.779915349235246 71 | except TypeError: 72 | DEBTINC = 33.779915349235246 73 | 74 | input_array = pd.DataFrame([[LOAN, MORTDUE, VALUE, YOJ, DEROG, DELINQ, CLAGE, NINQ, CLNO, DEBTINC]], 75 | columns=["LOAN", "MORTDUE", "VALUE", "YOJ", "DEROG", "DELINQ", "CLAGE", "NINQ", "CLNO", "DEBTINC"], 76 | dtype=float) 77 | prediction = model.predict_proba(input_array) 78 | 79 | # Check for numpy values and convert to a CAS readable representation 80 | if isinstance(prediction, np.ndarray): 81 | prediction = prediction.tolist()[0] 82 | 83 | if prediction[0] > prediction[1]: 84 | EM_CLASSIFICATION = "1" 85 | else: 86 | EM_CLASSIFICATION = "0" 87 | 88 | return EM_CLASSIFICATION, prediction[0] -------------------------------------------------------------------------------- /examples/data/hmeqModels/H2OBinaryGLM/ModelProperties.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "glmfit_binary", 3 | "description": "Binary H2O model.", 4 | "scoreCodeType": "python", 5 | "trainTable": "", 6 | "trainCodeType": "Python", 7 | "algorithm": "", 8 | "function": "Classification", 9 | "targetVariable": "BAD", 10 | "targetEvent": "1", 11 | "targetLevel": "BINARY", 12 | "eventProbVar": "P_1", 13 | "modeler": "sasdemo", 14 | "tool": "Python 3", 15 | "toolVersion": "3.8.16", 16 | "properties": [] 17 | } -------------------------------------------------------------------------------- /examples/data/hmeqModels/H2OBinaryGLM/fileMetadata.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "role": "inputVariables", 4 | "name": "inputVar.json" 5 | }, 6 | { 7 | "role": "outputVariables", 8 | "name": "outputVar.json" 9 | }, 10 | { 11 | "role": "score", 12 | "name": "score_glmfit_binary.py" 13 | }, 14 | { 15 | "role": "scoreResource", 16 | "name": "glmfit_binary.pickle" 17 | } 18 | ] -------------------------------------------------------------------------------- /examples/data/hmeqModels/H2OBinaryGLM/glmfit_binary.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sassoftware/python-sasctl/e0934185b9a91e3f77260c64910fa30f1fced7d8/examples/data/hmeqModels/H2OBinaryGLM/glmfit_binary.pickle -------------------------------------------------------------------------------- /examples/data/hmeqModels/H2OBinaryGLM/glmfit_binary.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sassoftware/python-sasctl/e0934185b9a91e3f77260c64910fa30f1fced7d8/examples/data/hmeqModels/H2OBinaryGLM/glmfit_binary.zip -------------------------------------------------------------------------------- /examples/data/hmeqModels/H2OBinaryGLM/inputVar.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "LOAN", 4 | "level": "interval", 5 | "type": "decimal", 6 | "length": 8 7 | }, 8 | { 9 | "name": "MORTDUE", 10 | "level": "interval", 11 | "type": "decimal", 12 | "length": 8 13 | }, 14 | { 15 | "name": "VALUE", 16 | "level": "interval", 17 | "type": "decimal", 18 | "length": 8 19 | }, 20 | { 21 | "name": "REASON", 22 | "level": "nominal", 23 | "type": "string", 24 | "length": 7 25 | }, 26 | { 27 | "name": "JOB", 28 | "level": "nominal", 29 | "type": "string", 30 | "length": 7 31 | }, 32 | { 33 | "name": "YOJ", 34 | "level": "interval", 35 | "type": "decimal", 36 | "length": 8 37 | }, 38 | { 39 | "name": "DEROG", 40 | "level": "interval", 41 | "type": "decimal", 42 | "length": 8 43 | }, 44 | { 45 | "name": "DELINQ", 46 | "level": "interval", 47 | "type": "decimal", 48 | "length": 8 49 | }, 50 | { 51 | "name": "CLAGE", 52 | "level": "interval", 53 | "type": "decimal", 54 | "length": 8 55 | }, 56 | { 57 | "name": "NINQ", 58 | "level": "interval", 59 | "type": "decimal", 60 | "length": 8 61 | }, 62 | { 63 | "name": "CLNO", 64 | "level": "interval", 65 | "type": "decimal", 66 | "length": 8 67 | }, 68 | { 69 | "name": "DEBTINC", 70 | "level": "interval", 71 | "type": "decimal", 72 | "length": 8 73 | } 74 | ] -------------------------------------------------------------------------------- /examples/data/hmeqModels/H2OBinaryGLM/outputVar.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "EM_CLASSIFICATION", 4 | "level": "nominal", 5 | "type": "string", 6 | "length": 1 7 | }, 8 | { 9 | "name": "EM_EVENTPROBABILITY", 10 | "level": "interval", 11 | "type": "decimal", 12 | "length": 8 13 | } 14 | ] -------------------------------------------------------------------------------- /examples/data/hmeqModels/H2OMOJOGLM/ModelProperties.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "glmfit_mojo", 3 | "description": "MOJO H2O model.", 4 | "scoreCodeType": "python", 5 | "trainTable": "", 6 | "trainCodeType": "Python", 7 | "algorithm": "", 8 | "function": "Classification", 9 | "targetVariable": "BAD", 10 | "targetEvent": "1", 11 | "targetLevel": "BINARY", 12 | "eventProbVar": "P_1", 13 | "modeler": "sasdemo", 14 | "tool": "Python 3", 15 | "toolVersion": "3.8.16", 16 | "properties": [] 17 | } -------------------------------------------------------------------------------- /examples/data/hmeqModels/H2OMOJOGLM/fileMetadata.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "role": "inputVariables", 4 | "name": "inputVar.json" 5 | }, 6 | { 7 | "role": "outputVariables", 8 | "name": "outputVar.json" 9 | }, 10 | { 11 | "role": "score", 12 | "name": "score_glmfit_mojo.py" 13 | }, 14 | { 15 | "role": "scoreResource", 16 | "name": "glmfit_mojo.mojo" 17 | } 18 | ] -------------------------------------------------------------------------------- /examples/data/hmeqModels/H2OMOJOGLM/glmfit_mojo.mojo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sassoftware/python-sasctl/e0934185b9a91e3f77260c64910fa30f1fced7d8/examples/data/hmeqModels/H2OMOJOGLM/glmfit_mojo.mojo -------------------------------------------------------------------------------- /examples/data/hmeqModels/H2OMOJOGLM/glmfit_mojo.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sassoftware/python-sasctl/e0934185b9a91e3f77260c64910fa30f1fced7d8/examples/data/hmeqModels/H2OMOJOGLM/glmfit_mojo.zip -------------------------------------------------------------------------------- /examples/data/hmeqModels/H2OMOJOGLM/inputVar.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "LOAN", 4 | "level": "interval", 5 | "type": "decimal", 6 | "length": 8 7 | }, 8 | { 9 | "name": "MORTDUE", 10 | "level": "interval", 11 | "type": "decimal", 12 | "length": 8 13 | }, 14 | { 15 | "name": "VALUE", 16 | "level": "interval", 17 | "type": "decimal", 18 | "length": 8 19 | }, 20 | { 21 | "name": "REASON", 22 | "level": "nominal", 23 | "type": "string", 24 | "length": 7 25 | }, 26 | { 27 | "name": "JOB", 28 | "level": "nominal", 29 | "type": "string", 30 | "length": 7 31 | }, 32 | { 33 | "name": "YOJ", 34 | "level": "interval", 35 | "type": "decimal", 36 | "length": 8 37 | }, 38 | { 39 | "name": "DEROG", 40 | "level": "interval", 41 | "type": "decimal", 42 | "length": 8 43 | }, 44 | { 45 | "name": "DELINQ", 46 | "level": "interval", 47 | "type": "decimal", 48 | "length": 8 49 | }, 50 | { 51 | "name": "CLAGE", 52 | "level": "interval", 53 | "type": "decimal", 54 | "length": 8 55 | }, 56 | { 57 | "name": "NINQ", 58 | "level": "interval", 59 | "type": "decimal", 60 | "length": 8 61 | }, 62 | { 63 | "name": "CLNO", 64 | "level": "interval", 65 | "type": "decimal", 66 | "length": 8 67 | }, 68 | { 69 | "name": "DEBTINC", 70 | "level": "interval", 71 | "type": "decimal", 72 | "length": 8 73 | } 74 | ] -------------------------------------------------------------------------------- /examples/data/hmeqModels/H2OMOJOGLM/outputVar.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "EM_CLASSIFICATION", 4 | "level": "nominal", 5 | "type": "string", 6 | "length": 1 7 | }, 8 | { 9 | "name": "EM_EVENTPROBABILITY", 10 | "level": "interval", 11 | "type": "decimal", 12 | "length": 8 13 | } 14 | ] -------------------------------------------------------------------------------- /examples/data/hmeqModels/RandomForest/ModelProperties.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "RandomForest", 3 | "description": "Description for the RandomForest model.", 4 | "scoreCodeType": "python", 5 | "trainTable": "", 6 | "trainCodeType": "Python", 7 | "algorithm": "", 8 | "function": "Classification", 9 | "targetVariable": "BAD", 10 | "targetEvent": "1", 11 | "targetLevel": "BINARY", 12 | "eventProbVar": "P_1", 13 | "modeler": "sasdemo", 14 | "tool": "Python 3", 15 | "toolVersion": "3.8.16", 16 | "properties": [] 17 | } -------------------------------------------------------------------------------- /examples/data/hmeqModels/RandomForest/RandomForest.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sassoftware/python-sasctl/e0934185b9a91e3f77260c64910fa30f1fced7d8/examples/data/hmeqModels/RandomForest/RandomForest.pickle -------------------------------------------------------------------------------- /examples/data/hmeqModels/RandomForest/RandomForest.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sassoftware/python-sasctl/e0934185b9a91e3f77260c64910fa30f1fced7d8/examples/data/hmeqModels/RandomForest/RandomForest.zip -------------------------------------------------------------------------------- /examples/data/hmeqModels/RandomForest/fileMetadata.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "role": "inputVariables", 4 | "name": "inputVar.json" 5 | }, 6 | { 7 | "role": "outputVariables", 8 | "name": "outputVar.json" 9 | }, 10 | { 11 | "role": "score", 12 | "name": "score_RandomForest.py" 13 | }, 14 | { 15 | "role": "scoreResource", 16 | "name": "RandomForest.pickle" 17 | } 18 | ] -------------------------------------------------------------------------------- /examples/data/hmeqModels/RandomForest/inputVar.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "LOAN", 4 | "level": "interval", 5 | "type": "decimal", 6 | "length": 8 7 | }, 8 | { 9 | "name": "MORTDUE", 10 | "level": "interval", 11 | "type": "decimal", 12 | "length": 8 13 | }, 14 | { 15 | "name": "VALUE", 16 | "level": "interval", 17 | "type": "decimal", 18 | "length": 8 19 | }, 20 | { 21 | "name": "YOJ", 22 | "level": "interval", 23 | "type": "decimal", 24 | "length": 8 25 | }, 26 | { 27 | "name": "DEROG", 28 | "level": "interval", 29 | "type": "decimal", 30 | "length": 8 31 | }, 32 | { 33 | "name": "DELINQ", 34 | "level": "interval", 35 | "type": "decimal", 36 | "length": 8 37 | }, 38 | { 39 | "name": "CLAGE", 40 | "level": "interval", 41 | "type": "decimal", 42 | "length": 8 43 | }, 44 | { 45 | "name": "NINQ", 46 | "level": "interval", 47 | "type": "decimal", 48 | "length": 8 49 | }, 50 | { 51 | "name": "CLNO", 52 | "level": "interval", 53 | "type": "decimal", 54 | "length": 8 55 | }, 56 | { 57 | "name": "DEBTINC", 58 | "level": "interval", 59 | "type": "decimal", 60 | "length": 8 61 | } 62 | ] -------------------------------------------------------------------------------- /examples/data/hmeqModels/RandomForest/outputVar.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "EM_CLASSIFICATION", 4 | "level": "nominal", 5 | "type": "string", 6 | "length": 1 7 | }, 8 | { 9 | "name": "EM_EVENTPROBABILITY", 10 | "level": "interval", 11 | "type": "decimal", 12 | "length": 8 13 | } 14 | ] -------------------------------------------------------------------------------- /examples/data/hmeqModels/RandomForest/score_RandomForest.py: -------------------------------------------------------------------------------- 1 | import math 2 | import pickle 3 | import pandas as pd 4 | import numpy as np 5 | from pathlib import Path 6 | 7 | import settings 8 | 9 | with open(Path(settings.pickle_path) / "RandomForest.pickle", "rb") as pickle_model: 10 | model = pickle.load(pickle_model) 11 | 12 | def score(LOAN, MORTDUE, VALUE, YOJ, DEROG, DELINQ, CLAGE, NINQ, CLNO, DEBTINC): 13 | "Output: EM_CLASSIFICATION, EM_EVENTPROBABILITY" 14 | 15 | try: 16 | global model 17 | except NameError: 18 | with open(Path(settings.pickle_path) / "RandomForest.pickle", "rb") as pickle_model: 19 | model = pickle.load(pickle_model) 20 | 21 | 22 | 23 | try: 24 | if math.isnan(LOAN): 25 | LOAN = 18607.96979865772 26 | except TypeError: 27 | LOAN = 18607.96979865772 28 | try: 29 | if math.isnan(MORTDUE): 30 | MORTDUE = 73760.817199559 31 | except TypeError: 32 | MORTDUE = 73760.817199559 33 | try: 34 | if math.isnan(VALUE): 35 | VALUE = 101776.04874145007 36 | except TypeError: 37 | VALUE = 101776.04874145007 38 | try: 39 | if math.isnan(YOJ): 40 | YOJ = 8.922268135904499 41 | except TypeError: 42 | YOJ = 8.922268135904499 43 | try: 44 | if math.isnan(DEROG): 45 | DEROG = 0.2545696877380046 46 | except TypeError: 47 | DEROG = 0.2545696877380046 48 | try: 49 | if math.isnan(DELINQ): 50 | DELINQ = 0.4494423791821561 51 | except TypeError: 52 | DELINQ = 0.4494423791821561 53 | try: 54 | if math.isnan(CLAGE): 55 | CLAGE = 179.7662751900465 56 | except TypeError: 57 | CLAGE = 179.7662751900465 58 | try: 59 | if math.isnan(NINQ): 60 | NINQ = 1.1860550458715597 61 | except TypeError: 62 | NINQ = 1.1860550458715597 63 | try: 64 | if math.isnan(CLNO): 65 | CLNO = 21.29609620076682 66 | except TypeError: 67 | CLNO = 21.29609620076682 68 | try: 69 | if math.isnan(DEBTINC): 70 | DEBTINC = 33.779915349235246 71 | except TypeError: 72 | DEBTINC = 33.779915349235246 73 | 74 | input_array = pd.DataFrame([[LOAN, MORTDUE, VALUE, YOJ, DEROG, DELINQ, CLAGE, NINQ, CLNO, DEBTINC]], 75 | columns=["LOAN", "MORTDUE", "VALUE", "YOJ", "DEROG", "DELINQ", "CLAGE", "NINQ", "CLNO", "DEBTINC"], 76 | dtype=float) 77 | prediction = model.predict_proba(input_array) 78 | 79 | # Check for numpy values and convert to a CAS readable representation 80 | if isinstance(prediction, np.ndarray): 81 | prediction = prediction.tolist()[0] 82 | 83 | if prediction[0] > prediction[1]: 84 | EM_CLASSIFICATION = "1" 85 | else: 86 | EM_CLASSIFICATION = "0" 87 | 88 | return EM_CLASSIFICATION, prediction[0] -------------------------------------------------------------------------------- /examples/direct_REST_calls.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | import pickle # skipcq: BAN-B403 8 | from sasctl import get, get_link, request_link, Session 9 | 10 | # Authenticate to the SAS environment 11 | s = Session('example.sas.com', 'brian', 'N@ughtiusMax1mus') 12 | 13 | # Use the established session to GET example.sas.com/files (the root URI for 14 | # the Files service) 15 | response = get('files') 16 | 17 | # List all of the links currently available from the root URL 18 | for link in response.links: 19 | print(link) 20 | 21 | # Retrieve info about the "files" link, including URL, content type, etc. 22 | # Only returns information about the link, does not actually make a REST call. 23 | link = get_link(response, 'files') 24 | 25 | # Actually make the request identified by the "files" link. In this example, 26 | # that corresponds to GET example.sas.com/files/files which returns a paginated 27 | # list of all files available. 28 | all_files = request_link(response, 'files') 29 | 30 | # Iterate through all files and print those with a particular filename. 31 | # NOTE: this is NOT a recommended approach as the client must retrieve all 32 | # filenames from the server to filter. 33 | for file in filter(lambda x: x.name == 'traincode.sas', all_files): 34 | print(file) 35 | 36 | # Make the same request as before, but with a service-side filter to perform 37 | # the filtering. 38 | all_files = request_link(response, 'files', params={'filter': 'eq(name, "traincode.sas")'}) 39 | 40 | # Select the first file matching the filter criteria. 41 | # NOTE: this is not the actual file, just a collection of metadata about the 42 | # file and the associated REST links available. 43 | file = all_files[0] 44 | 45 | # Make a request to the "content" link to retrieve the actual file contents. 46 | content = request_link(file, 'content') 47 | print(content) 48 | 49 | # Make a request for files where the filename matches "model.pkl" 50 | # NOTE: this example assumes there is a single matching file on the server. If 51 | # there are multiple such files you will need to select one. 52 | file = request_link(response, 'files', params={'filter': 'eq(name, "model.pkl")'}) 53 | if file: 54 | file = file[0] 55 | 56 | # Request the contents of the file. 57 | # NOTE: because the file is a binary pickle file, the "format='content'" 58 | # parameter is required to indicate that we want to raw content of the response 59 | # without attempting to parse it as text or JSON. 60 | pkl = request_link(file, 'content', format='content') 61 | 62 | # Load the pickled file to reconstitute the Python object on the client. 63 | # WARNING: you should not unpickle objects from untrusted sources as they may 64 | # contain malicious content. Additionally, unpickling is likely to fail if 65 | # the Python environment on the client differs from the environment where the 66 | # object was first created. 67 | pickle.loads(pkl) # skipcq: BAN-B301 68 | -------------------------------------------------------------------------------- /examples/register_custom_model.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | import pickle 8 | from datetime import datetime 9 | 10 | from sasctl import register_model, Session 11 | 12 | 13 | s = Session('hostname', 'username', 'password') 14 | 15 | # The register_model task will attempt to extract the necessary metadata from the provided ASTORE file or Python model. 16 | # However, if this doesn't work for your model or you need to specify different metadata, you can provide it as a 17 | # dictionary instead. For a full list of parameters that can be specified see the documentation here: 18 | # https://developer.sas.com/apis/rest/DecisionManagement/#schemamodel 19 | model_info = { 20 | 'name': 'Custom Model', 21 | 'description': 'This model is for demonstration purposes only.', 22 | 'scoreCodeType': 'Python', 23 | 'algorithm': 'Other' 24 | } 25 | 26 | # To include the contents of the model itself, simply provide the information for each file in a list. 27 | files = [ 28 | 29 | # Files can be added to the model by specifying a name of the file and its contents 30 | dict(name='greetings.txt', file='Hello World!'), 31 | 32 | # You can also specify file-like object to be included. Here we upload this Python file itself to the model. 33 | # In addition, the optional `role` parameter can be used to assign a File Role to the file in Model Manager. 34 | dict(name=__file__, file=open(__file__), role='Score code'), 35 | 36 | # The files also need not be simple text. Here we create a simple Python datetime object, pickle it, and then 37 | # include the binary file with the model. 38 | dict(name='datetime.pkl', file=pickle.dumps(datetime.now())) 39 | ] 40 | 41 | model = register_model(model_info, name=model_info['name'], project='Examples', files=files, force=True) 42 | -------------------------------------------------------------------------------- /examples/register_sas_classification_model.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | import swat 8 | from sasctl import Session 9 | from sasctl.tasks import register_model, publish_model 10 | 11 | 12 | # Connect to the CAS server 13 | s = swat.CAS('hostname', 5570, 'username', 'password') 14 | 15 | # Upload the training data to CAS 16 | tbl = s.upload('data/iris.csv').casTable 17 | 18 | # Train a gradient boosting model to identify iris species. 19 | s.loadactionset('decisionTree') 20 | tbl.decisionTree.gbtreetrain(target='Species', 21 | inputs=['SepalLength', 'SepalWidth', 22 | 'PetalLength', 'PetalWidth'], 23 | savestate='gradboost_astore') 24 | 25 | # Establish a reference to the newly created ASTORE table. 26 | astore = s.CASTable('gradboost_astore') 27 | 28 | # Connect to the SAS environment 29 | with Session('hostname', 'username', 'password'): 30 | # Register the trained model by providing: 31 | # - the ASTORE containing the model 32 | # - a name for the model 33 | # - a name for the project 34 | # 35 | # NOTE: the force=True option will create the project if it does not exist. 36 | model = register_model(astore, 'Gradient Boosting', 'Iris', force=True) 37 | 38 | # Publish the model to SAS® Micro Analytic Service (MAS). Specifically to 39 | # the default MAS service "maslocal". 40 | module = publish_model(model, 'maslocal') 41 | 42 | # sasctl wraps the published module with Python methods corresponding to 43 | # the various steps defined in the module (like "predict"). 44 | response = module.score(SepalLength=5.1, SepalWidth=3.5, 45 | PetalLength=1.4, PetalWidth=0.2) 46 | -------------------------------------------------------------------------------- /examples/register_sas_dlpy_model.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | #WARNING DLPY Does not work with Python 3.10 and later 8 | 9 | import swat 10 | from dlpy.applications import Sequential 11 | from dlpy.layers import Dense, InputLayer, OutputLayer 12 | from sasctl import Session 13 | from sasctl.tasks import register_model, publish_model 14 | 15 | 16 | # Connect to the CAS server 17 | s = swat.CAS('hostname', 5570, 'username', 'password') 18 | 19 | # Upload the training data to CAS 20 | tbl = s.upload('data/iris.csv').casTable 21 | 22 | # Construct a simple neural network 23 | model = Sequential(conn=s, model_table='dlpy_model') 24 | model.add(InputLayer()) 25 | model.add(Dense(n=64)) 26 | model.add(Dense(n=32)) 27 | model.add(OutputLayer(n=3)) 28 | 29 | # Train on the sample 30 | model.fit(data=tbl, 31 | inputs=['SepalLength', 'SepalWidth', 'PetalLength', 'PetalWidth'], 32 | target='Species', 33 | max_epochs=50, 34 | lr=0.001) 35 | 36 | # Export the model as an ASTORE and get a reference to the new ASTORE table 37 | s.deeplearn.dlexportmodel(modelTable=model.model_table, initWeights=model.model_weights, casout='astore_table') 38 | astore = s.CASTable('astore_table') 39 | 40 | # Connect to the SAS environment 41 | with Session('hostname', 'username', 'password'): 42 | # Register the trained model by providing: 43 | # - the ASTORE containing the model 44 | # - a name for the model 45 | # - a name for the project 46 | # 47 | # NOTE: the force=True option will create the project if it does not exist. 48 | model = register_model(astore, 'Deep Learning', 'Iris', force=True) 49 | 50 | # Publish the model to SAS® Micro Analytic Service (MAS). Specifically to 51 | # the default MAS service "maslocal". 52 | module = publish_model(model, 'maslocal') 53 | 54 | # sasctl wraps the published module with Python methods corresponding to 55 | # the various steps defined in the module (like "predict"). 56 | response = module.score(SepalLength=5.1, SepalWidth=3.5, 57 | PetalLength=1.4, PetalWidth=0.2) 58 | 59 | s.terminate() -------------------------------------------------------------------------------- /examples/register_sas_regression_model.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | import swat 8 | from sasctl import Session 9 | from sasctl.tasks import register_model, publish_model 10 | 11 | 12 | with swat.CAS('hostname', 5570, 'username', 'password') as cas: 13 | # Load the regression actions in CAS 14 | cas.loadactionset('regression') 15 | 16 | # Upload the local CSV file to a CAS table 17 | tbl = cas.upload('data/boston_house_prices.csv').casTable 18 | 19 | # Model input features are everything except the target 20 | features = tbl.columns[tbl.columns != 'medv'] 21 | 22 | # Fit a linear regression model in CAS and output an ASTORE 23 | tbl.glm(target='medv', inputs=list(features), savestate='model_table') 24 | 25 | astore = cas.CASTable('model_table') 26 | 27 | # Use sasctl to connect to SAS 28 | Session('hostname', 'username', 'password') 29 | 30 | # Register the model in SAS Model Manager, creating the "Boston Housing" 31 | # project if it doesn't already exist 32 | model = register_model(astore, 'Linear Regression', 'Boston Housing', force=True) 33 | 34 | # Publish the model to a real-time scoring engine 35 | module = publish_model(model, 'maslocal') 36 | 37 | # Pass a row of data to MAS and receive the predicted result. 38 | first_row = tbl.head(1) 39 | result = module.score(first_row) 40 | print(result) 41 | -------------------------------------------------------------------------------- /examples/register_scikit_classification_model.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | import pandas as pd 8 | from sasctl import Session, register_model, publish_model 9 | from sklearn.linear_model import LogisticRegression 10 | 11 | 12 | # Load the Iris data set and split into features and target. 13 | df = pd.read_csv('data/iris.csv') 14 | X = df.drop('Species', axis=1) 15 | y = df.Species.astype('category') 16 | 17 | # Fit a sci-kit learn model 18 | model = LogisticRegression(max_iter=10000) 19 | model.fit(X, y) 20 | 21 | # Establish a session with Viya 22 | with Session('hostname', 'username', 'password'): 23 | model_name = 'Iris Regression' 24 | 25 | # Register the model in Model Manager 26 | register_model(model, 27 | model_name, 28 | X=X, # Use X to determine model inputs 29 | project='Iris', # Register in "Iris" project 30 | force=True) # Create project if it doesn't exist 31 | 32 | # Publish the model to the real-time scoring engine 33 | module = publish_model(model_name, 'maslocal') 34 | 35 | # Select the first row of training data 36 | x = X.iloc[0, :] 37 | 38 | # Call the published module and score the record 39 | result = module.score(x) 40 | print(result) -------------------------------------------------------------------------------- /examples/register_scikit_regression_model.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | import pandas as pd 8 | from sasctl import Session, register_model, publish_model 9 | from sklearn.ensemble import GradientBoostingRegressor 10 | 11 | 12 | # Convert the local CSV file into a Pandas DataFrame 13 | df = pd.read_csv('data/boston_house_prices.csv') 14 | 15 | # The model input data (X) is every column in the DataFrame except the target. 16 | # The target (y) is equal to the median home value. 17 | target = 'medv' 18 | X = df.drop(target, axis=1) 19 | y = df[target] 20 | 21 | # Fit a sci-kit learn model 22 | model = GradientBoostingRegressor() 23 | model.fit(X, y) 24 | 25 | # Establish a session with Viya 26 | with Session('hostname', 'username', 'password'): 27 | model_name = 'GB Regression' 28 | project_name = 'Boston Housing' 29 | 30 | # Register the model in SAS Model Manager 31 | register_model(model, model_name, project_name, X=X, force=True) 32 | 33 | # Publish the model to the real-time scoring engine 34 | module = publish_model(model_name, 'maslocal', replace=True) 35 | 36 | # Select the first row of training data 37 | x = X.iloc[0, :] 38 | 39 | # Call the published module and score the record 40 | result = module.score(x) 41 | print(result) -------------------------------------------------------------------------------- /examples/upload_file.py: -------------------------------------------------------------------------------- 1 | import getpass 2 | import os 3 | 4 | from sasctl import Session 5 | from sasctl.services import files, folders 6 | 7 | server_name = "myServer.sas.com" 8 | user = "sasdemo" 9 | password = getpass.getpass() 10 | os_path = "/test/Downloads/pdfs" 11 | sas_path = "/Public/PDFs" 12 | 13 | 14 | def build_storage_lists(source_path, target_path): 15 | """ 16 | Function to create a list of files and directories 17 | from a source folder at OS level 18 | and mapped to target folder in SAS Content Server. 19 | 20 | Parameters 21 | ---------- 22 | source_path : str 23 | Path to the folder which should be imported. 24 | target_path : str 25 | Path to the folder which will contain the files and folders in SAS Content Server. 26 | 27 | Returns 28 | ------- 29 | f_list 30 | A list of file information which can be used to map local and SAS files. 31 | d_list 32 | A list of directory information which can be user to map local and SAS folder structure. 33 | """ 34 | files_list = [] 35 | folders_list = [] 36 | 37 | for root, _, src_files in os.walk(source_path): 38 | for src_file in src_files: 39 | if src_file.endswith(".pdf"): 40 | if root not in folders_list: 41 | folder_info = root.replace(source_path, target_path) 42 | if folder_info not in folders_list: 43 | folders_list.append(folder_info) 44 | file_info = {} 45 | file_info["source_file"] = os.path.join(root, src_file) 46 | file_info["target_folder"] = root.replace( 47 | source_path, target_path) 48 | files_list.append(file_info) 49 | return folders_list, files_list 50 | 51 | 52 | with Session(server_name, user, password): 53 | folders_list, files_list = build_storage_lists(os_path, sas_path) 54 | for folder in folders_list: 55 | folders.create_path(folder) 56 | for file in files_list: 57 | files.create_file( 58 | file["source_file"], 59 | file["target_folder"], 60 | os.path.basename(file["source_file"]) 61 | ) 62 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | import os 8 | import re 9 | 10 | from setuptools import setup, find_packages 11 | 12 | 13 | def read_dunder(name): 14 | with open(os.path.join("src", "sasctl", "__init__.py")) as f: 15 | for line in f.readlines(): 16 | match = re.search(r'(?<=^__{}__ = [\'"]).*\b'.format(name), line) 17 | if match: 18 | return match.group(0) 19 | raise RuntimeError("Unable to find __%s__ in __init__.py" % name) 20 | 21 | 22 | def get_file(filename): 23 | with open(filename, "r") as f: 24 | return f.read() 25 | 26 | 27 | setup( 28 | name="sasctl", 29 | description="SAS Viya Python Client", 30 | long_description=get_file("README.md"), 31 | long_description_content_type="text/markdown", 32 | version=read_dunder("version"), 33 | author=read_dunder("author"), 34 | license="Apache v2.0", 35 | url="https://github.com/sassoftware/python-sasctl/", 36 | project_urls={ 37 | "Bug Tracker": "https://github.com/sassoftware/python-sasctl/issues", 38 | "Documentation": "https://sassoftware.github.io/python-sasctl/", 39 | "Source Code": "https://github.com/sassoftware/python-sasctl", 40 | }, 41 | include_package_data=True, 42 | packages=find_packages(where="src"), 43 | package_dir={"": "src"}, 44 | python_requires=">=3.6", 45 | install_requires=["dill", "pandas>=0.24.0", "requests", "pyyaml", "packaging"], 46 | extras_require={ 47 | "swat": ["swat"], 48 | "GitPython": ["GitPython"], 49 | "scikit-learn": ["scikit-learn"], 50 | "kerberos": [ 51 | 'kerberos ; platform_system != "Windows"', 52 | 'winkerberos ; platform_system == "Windows"', 53 | ], 54 | "all": [ 55 | "swat", 56 | "GitPython", 57 | 'kerberos ; platform_system != "Windows"', 58 | 'winkerberos ; platform_system == "Windows"', 59 | ], 60 | }, 61 | entry_points={"console_scripts": ["sasctl = sasctl.utils.cli:main"]}, 62 | classifiers=[ 63 | "Development Status :: 5 - Production/Stable", 64 | "License :: OSI Approved :: Apache Software License", 65 | "Environment :: Console", 66 | "Intended Audience :: Science/Research", 67 | "Intended Audience :: Developers", 68 | "Programming Language :: Python", 69 | "Programming Language :: Python :: 3", 70 | "Programming Language :: Python :: 3.6", 71 | "Programming Language :: Python :: 3.7", 72 | "Programming Language :: Python :: 3.8", 73 | "Programming Language :: Python :: 3.9", 74 | "Programming Language :: Python :: 3.10", 75 | "Topic :: Software Development", 76 | "Topic :: Scientific/Engineering", 77 | "Operating System :: OS Independent", 78 | ], 79 | ) 80 | -------------------------------------------------------------------------------- /src/sasctl/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | __version__ = "1.11.4" 8 | __author__ = "SAS" 9 | __credits__ = [ 10 | "Yi Jian Ching", 11 | "Lucas De Paula", 12 | "James Kochuba", 13 | "Peter Tobac", 14 | "Chris Toth", 15 | "Jon Walker", 16 | "Scott Lindauer", 17 | "DJ Moore", 18 | "Samya Potlapalli", 19 | ] 20 | __license__ = "Apache 2.0" 21 | __copyright__ = ( 22 | "Copyright © 2019, SAS Institute Inc., ", 23 | "Cary, NC, USA. All Rights Reserved.", 24 | ) 25 | 26 | import logging 27 | import warnings 28 | 29 | from .core import ( 30 | HTTPError, 31 | RestObj, 32 | Session, 33 | current_session, 34 | delete, 35 | get, 36 | get_link, 37 | platform_version, 38 | post, 39 | put, 40 | request_link, 41 | ) 42 | from .tasks import publish_model, register_model, update_model_performance 43 | 44 | # Ensure deprecation warnings are shown to users. 45 | warnings.filterwarnings("always", category=DeprecationWarning, module=r"^sasctl\.") 46 | 47 | 48 | # Prevent package from emitting log records unless consuming 49 | # application configures logging. 50 | logging.getLogger(__name__).addHandler(logging.NullHandler()) 51 | -------------------------------------------------------------------------------- /src/sasctl/__main__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | if __name__ == "__main__": 8 | from sasctl.utils.cli import main 9 | 10 | main() 11 | -------------------------------------------------------------------------------- /src/sasctl/_services/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | -------------------------------------------------------------------------------- /src/sasctl/_services/projects.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | from .service import Service 8 | 9 | 10 | class Projects(Service): 11 | _SERVICE_ROOT = "/projects" 12 | 13 | list_projects, get_project, update_project, delete_project = Service._crud_funcs( 14 | "/projects", "project" 15 | ) 16 | 17 | @classmethod 18 | def create_project(cls, name, description=None, image=None): 19 | """ 20 | 21 | Parameters 22 | ---------- 23 | name : str 24 | description : str 25 | image : str 26 | URI of an image to use as the project avatar 27 | 28 | Returns 29 | ------- 30 | RestObj 31 | 32 | """ 33 | body = {"name": name, "description": description, "imageUri": image} 34 | 35 | return cls.post( 36 | "/projects", 37 | json=body, 38 | headers={"Content-Type": "application/vnd.sas.project+json"}, 39 | ) 40 | -------------------------------------------------------------------------------- /src/sasctl/_services/relationships.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | from .service import Service 8 | 9 | 10 | class Relationships(Service): 11 | """The Relationships API manages a repository of relationships. 12 | 13 | A relationship describes how two resources are connected. A relationship 14 | contains a subject resource, the related resource, and the relationship 15 | type. A relationship type describes the nature of the relationship between 16 | two resources. The relationship type includes a name, label, and 17 | description. 18 | 19 | """ 20 | 21 | _SERVICE_ROOT = "/relationships" 22 | 23 | ( 24 | list_relationships, 25 | get_relationship, 26 | update_relationship, 27 | delete_relationship, 28 | ) = Service._crud_funcs("/relationships", "relationship") 29 | -------------------------------------------------------------------------------- /src/sasctl/_services/reports.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | from .service import Service 8 | 9 | 10 | class Reports(Service): 11 | """Creates, reads, updates, and deletes reports, report states, and content. 12 | 13 | See Also 14 | -------- 15 | `REST Documentation `_ 17 | 18 | """ 19 | 20 | _SERVICE_ROOT = "reports" 21 | 22 | list_reports, get_report, _, _ = Service._crud_funcs("/reports", "report") 23 | 24 | @classmethod 25 | def get_visual_elements(cls, report): 26 | """Get the visual components of a report. 27 | 28 | Returned components may be visualized by rendering with the 29 | `report_images.get_images` method. 30 | 31 | Parameters 32 | ---------- 33 | report : str or dict 34 | The name or id of the report, or a dictionary representation of the 35 | report. 36 | 37 | Returns 38 | ------- 39 | List[RestObj] 40 | List of metadata about each element. 41 | 42 | """ 43 | report = cls.get_report(report) 44 | elements = cls.request_link(report, "contentVisualElements") 45 | 46 | # Despite being "visual" not all elements can be rendered by 47 | # report_images service. Only return renderable elements. 48 | return [e for e in elements if e.type not in ("Table")] 49 | -------------------------------------------------------------------------------- /src/sasctl/exceptions.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | 8 | class AuthenticationError(ValueError): 9 | """A user could not be authenticated.""" 10 | 11 | def __init__(self, username=None, msg=None, *args, **kwargs): 12 | if msg is None: 13 | if username: 14 | msg = "Authentication failed for user '%s'." % username 15 | else: 16 | msg = "Unable to authenticate the user." 17 | 18 | super(AuthenticationError, self).__init__(msg, *args, **kwargs) 19 | 20 | 21 | class AuthorizationError(RuntimeError): 22 | """A user lacks permission to perform an action.""" 23 | 24 | 25 | class JobTimeoutError(RuntimeError): 26 | pass 27 | 28 | 29 | class ServiceUnavailableError(RuntimeError): 30 | """A required SAS service is unavailable. 31 | 32 | Raised when correct execution depends on a SAS Viya service that is 33 | unavailable. This could be because the necessary SAS components have not 34 | been licensed or installed, or because the service is temporarily offline. 35 | """ 36 | 37 | def __init__(self, service, *args, **kwargs): 38 | msg = "The service at '%s' is unavailable." % service._SERVICE_ROOT 39 | super(ServiceUnavailableError, self).__init__(msg, *args, **kwargs) 40 | -------------------------------------------------------------------------------- /src/sasctl/pzmm/README.md: -------------------------------------------------------------------------------- 1 | # pzmm Module 2 | 3 | ## Overview 4 | 5 | The goal of the pzmm (Python Zip Model Management) module is to enable users of SAS Model Manager on SAS Viya and SAS Open Model Manager to zip through the process of importing Python models into the common model repository. In order to facilitate model imports, the module allows the user to complete the following tasks: 6 | 7 | * Writes JSON files to read in the model information, which includes the following files: 8 | * `fileMetadata.json` specifies the file roles for the names of the input and output variables files, the Python score code file, and the Python pickle file 9 | * `ModelProperties.json` is used to set the model properties that are read during the import process 10 | * `inputVar.json` and `outputVar.json` are used to set the input and output variables of the model 11 | * `dmcas_fitstat.json` is an optional file that provides the fit statistics that are associated with the imported model, which are either user-generated or data-generated 12 | * `dmcas_lift.json` and `dmcas_roc.json` are optional files that provide the Lift and ROC plots that are associated with the imported model, which are data-generated 13 | * Writes the `*score.py` model file that is used for model scoring 14 | * Serializes a trained model into a binary pickle file or saves a relevant H2O MOJO file 15 | * Archives all relevant model files into a ZIP file and imports the model using REST API calls 16 | 17 | ## Prerequisites 18 | 19 | Use of this package requires the following: 20 | 21 | * Python version 3+ 22 | * Automatic generation of score code is limited to Python >= 3.6 (this functionality will be backported to Python 3+ in future releases) 23 | * SAS Viya 3.5+ environment or SAS Open Model Manager 1.2+ and user credentials 24 | * External Python libraries: 25 | * scipy v1.4.0+ 26 | * scikit-learn v0.22.1+ 27 | * pandas v0.25.3+ 28 | * requests v2.23.0+ 29 | 30 | ## Module Import 31 | 32 | Importing the pzmm module is done by running the following line in Python after installation of the python-sasctl package: 33 | 34 | `import sasctl.pzmm as pzmm` 35 | 36 | ## Demos 37 | 38 | The following demo video walks through the process of importing a Python model in SAS Model Manager in SAS Viya and shows most of the current features of pzmm. The notebook from the demo is called FleetManagement.ipynb and can be found in the [examples folder here.](../../../examples/FleetManagement.ipynb) 39 | 40 | [drawing](https://players.brightcove.net/3665946608001/default_default/index.html?videoId=6207407207001) 41 | 42 | ## License 43 | 44 | This project is licensed under the [Apache 2.0 License](/LICENSE). 45 | 46 | 47 | -------------------------------------------------------------------------------- /src/sasctl/pzmm/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 2 | # SPDX-License-Identifier: Apache-2.0 3 | 4 | from .git_integration import GitIntegrate 5 | from .import_model import ImportModel 6 | from .mlflow_model import MLFlowModel 7 | from .model_parameters import ModelParameters 8 | from .pickle_model import PickleModel 9 | from .write_json_files import JSONFiles 10 | from .write_score_code import ScoreCode 11 | from .zip_model import ZipModel 12 | -------------------------------------------------------------------------------- /src/sasctl/pzmm/mlflow_model.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 2 | # SPDX-License-Identifier: Apache-2.0 3 | 4 | import yaml 5 | import json 6 | from pathlib import Path 7 | 8 | 9 | class MLFlowModel: 10 | @classmethod 11 | def read_mlflow_model_file(cls, m_path=Path.cwd()): 12 | """ 13 | Read and return model metadata and input/output variables as dictionaries from 14 | an MLFlow model directory. 15 | 16 | Current implementation only handles simple pickled models. Future feature work 17 | is required to include more types of MLFlow models. 18 | 19 | Parameters 20 | ---------- 21 | m_path : str or pathlib.Path, optional 22 | Directory path of the MLFlow model files. Default is the current working 23 | directory. 24 | 25 | Returns 26 | ------- 27 | var_dict : dict 28 | Model properties and metadata 29 | inputs_dict : list of dict 30 | Model input variables 31 | outputs_dict : list of dict 32 | Model output variables 33 | """ 34 | with open(Path(m_path) / "MLmodel", "r") as m_file: 35 | m_yml = yaml.safe_load(m_file) 36 | 37 | # Read in metadata and properties from the MLFlow model 38 | try: 39 | var_dict = { 40 | "python_version": m_yml["flavors"]["python_function"]["python_version"], 41 | "model_path": m_yml["flavors"]["python_function"]["model_path"], 42 | "serialization_format": m_yml["flavors"]["sklearn"][ 43 | "serialization_format" 44 | ], 45 | "run_id": m_yml["run_id"], 46 | "mlflowPath": m_path, 47 | } 48 | except KeyError: 49 | raise ValueError( 50 | "This MLFlow model type is not currently supported." 51 | ) from None 52 | except TypeError: 53 | raise ValueError( 54 | "This MLFlow model type is not currently supported." 55 | ) from None 56 | 57 | # Read in the input and output variables 58 | try: 59 | inputs_dict = json.loads(m_yml["signature"]["inputs"]) 60 | outputs_dict = json.loads(m_yml["signature"]["outputs"]) 61 | except KeyError: 62 | raise ValueError( 63 | "Improper or unset signature values for model. No input or output " 64 | "dicts could be generated. " 65 | ) from None 66 | 67 | return var_dict, inputs_dict, outputs_dict 68 | -------------------------------------------------------------------------------- /src/sasctl/pzmm/pzmmintro.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sassoftware/python-sasctl/e0934185b9a91e3f77260c64910fa30f1fced7d8/src/sasctl/pzmm/pzmmintro.jpg -------------------------------------------------------------------------------- /src/sasctl/pzmm/template_files/clf_jsons/maxDifferences.json: -------------------------------------------------------------------------------- 1 | { 2 | "creationTimeStamp": "0001-01-01T00:00:00Z", 3 | "modifiedTimeStamp": "0001-01-01T00:00:00Z", 4 | "revision": 0, 5 | "name": "maxDifferences", 6 | "version": 0, 7 | "order": 0, 8 | "parameterMap": { 9 | "BASE": { 10 | "label": "Base Level", 11 | "length": 100, 12 | "order": 4, 13 | "parameter": "BASE", 14 | "preformatted": false, 15 | "type": "char", 16 | "values": [ 17 | "BASE" 18 | ] 19 | }, 20 | "COMPARE": { 21 | "label": "Comparison Level", 22 | "length": 100, 23 | "order": 5, 24 | "parameter": "COMPARE", 25 | "preformatted": false, 26 | "type": "char", 27 | "values": [ 28 | "COMPARE" 29 | ] 30 | }, 31 | "Metric": { 32 | "label": "Metric", 33 | "length": 15, 34 | "order": 6, 35 | "parameter": "Metric", 36 | "preformatted": false, 37 | "type": "char", 38 | "values": [ 39 | "Metric" 40 | ] 41 | }, 42 | "MetricLabel": { 43 | "label": "Metric Label", 44 | "length": 42, 45 | "order": 7, 46 | "parameter": "MetricLabel", 47 | "preformatted": false, 48 | "type": "char", 49 | "values": [ 50 | "MetricLabel" 51 | ] 52 | }, 53 | "VLABEL": { 54 | "label": "Variable Label", 55 | "length": 256, 56 | "order": 2, 57 | "parameter": "VLABEL", 58 | "preformatted": false, 59 | "type": "char", 60 | "values": [ 61 | "VLABEL" 62 | ] 63 | }, 64 | "_DATAROLE_": { 65 | "label": "Data Role", 66 | "length": 8, 67 | "order": 3, 68 | "parameter": "_DATAROLE_", 69 | "preformatted": false, 70 | "type": "char", 71 | "values": [ 72 | "_DATAROLE_" 73 | ] 74 | }, 75 | "_VARIABLE_": { 76 | "label": "Variable", 77 | "length": 255, 78 | "order": 1, 79 | "parameter": "_VARIABLE_", 80 | "preformatted": false, 81 | "type": "char", 82 | "values": [ 83 | "_VARIABLE_" 84 | ] 85 | }, 86 | "maxdiff": { 87 | "label": "Maximum Metric Difference", 88 | "length": 8, 89 | "order": 8, 90 | "parameter": "maxdiff", 91 | "preformatted": false, 92 | "type": "num", 93 | "values": [ 94 | "maxdiff" 95 | ] 96 | } 97 | }, 98 | "data": [ 99 | 100 | ], 101 | "xInteger": false, 102 | "yInteger": false 103 | } -------------------------------------------------------------------------------- /src/sasctl/pzmm/template_files/dmcas_relativeimportance.json: -------------------------------------------------------------------------------- 1 | { 2 | "creationTimeStamp" : "0001-01-01T00:00:00Z", 3 | "modifiedTimeStamp" : "0001-01-01T00:00:00Z", 4 | "revision" : 0, 5 | "name" : "dmcas_relativeimportance", 6 | "version" : 0, 7 | "order" : 0, 8 | "parameterMap" : { 9 | "LABEL" : { 10 | "label" : "Variable Label", 11 | "length" : 256, 12 | "order" : 1, 13 | "parameter" : "LABEL", 14 | "preformatted" : false, 15 | "type" : "char", 16 | "values" : [ "LABEL" ] 17 | }, 18 | "LEVEL" : { 19 | "label" : "Variable Level", 20 | "length" : 10, 21 | "order" : 5, 22 | "parameter" : "LEVEL", 23 | "preformatted" : false, 24 | "type" : "char", 25 | "values" : [ "LEVEL" ] 26 | }, 27 | "ROLE" : { 28 | "label" : "Role", 29 | "length" : 32, 30 | "order" : 4, 31 | "parameter" : "ROLE", 32 | "preformatted" : false, 33 | "type" : "char", 34 | "values" : [ "ROLE" ] 35 | }, 36 | "RelativeImportance" : { 37 | "label" : "Relative Importance", 38 | "length" : 8, 39 | "order" : 3, 40 | "parameter" : "RelativeImportance", 41 | "preformatted" : false, 42 | "type" : "num", 43 | "values" : [ "RelativeImportance" ] 44 | }, 45 | "Variable" : { 46 | "label" : "Variable Name", 47 | "length" : 255, 48 | "order" : 2, 49 | "parameter" : "Variable", 50 | "preformatted" : false, 51 | "type" : "char", 52 | "values" : [ "Variable" ] 53 | } 54 | }, 55 | "data" : [], 56 | "xInteger" : false, 57 | "yInteger" : false 58 | } -------------------------------------------------------------------------------- /src/sasctl/pzmm/template_files/reg_jsons/maxDifferences.json: -------------------------------------------------------------------------------- 1 | { 2 | "creationTimeStamp": "0001-01-01T00:00:00Z", 3 | "modifiedTimeStamp": "0001-01-01T00:00:00Z", 4 | "revision": 0, 5 | "name": "maxDifferences", 6 | "version": 0, 7 | "order": 0, 8 | "parameterMap": { 9 | "BASE": { 10 | "label": "Base Level", 11 | "length": 100, 12 | "order": 4, 13 | "parameter": "BASE", 14 | "preformatted": false, 15 | "type": "char", 16 | "values": [ 17 | "BASE" 18 | ] 19 | }, 20 | "COMPARE": { 21 | "label": "Comparison Level", 22 | "length": 100, 23 | "order": 5, 24 | "parameter": "COMPARE", 25 | "preformatted": false, 26 | "type": "char", 27 | "values": [ 28 | "COMPARE" 29 | ] 30 | }, 31 | "Metric": { 32 | "label": "Metric", 33 | "length": 9, 34 | "order": 6, 35 | "parameter": "Metric", 36 | "preformatted": false, 37 | "type": "char", 38 | "values": [ 39 | "Metric" 40 | ] 41 | }, 42 | "MetricLabel": { 43 | "label": "Metric Label", 44 | "length": 35, 45 | "order": 7, 46 | "parameter": "MetricLabel", 47 | "preformatted": false, 48 | "type": "char", 49 | "values": [ 50 | "MetricLabel" 51 | ] 52 | }, 53 | "VLABEL": { 54 | "label": "Variable Label", 55 | "length": 256, 56 | "order": 2, 57 | "parameter": "VLABEL", 58 | "preformatted": false, 59 | "type": "char", 60 | "values": [ 61 | "VLABEL" 62 | ] 63 | }, 64 | "_DATAROLE_": { 65 | "label": "Data Role", 66 | "length": 8, 67 | "order": 3, 68 | "parameter": "_DATAROLE_", 69 | "preformatted": false, 70 | "type": "char", 71 | "values": [ 72 | "_DATAROLE_" 73 | ] 74 | }, 75 | "_VARIABLE_": { 76 | "label": "Variable", 77 | "length": 255, 78 | "order": 1, 79 | "parameter": "_VARIABLE_", 80 | "preformatted": false, 81 | "type": "char", 82 | "values": [ 83 | "_VARIABLE_" 84 | ] 85 | }, 86 | "maxdiff": { 87 | "label": "Maximum Metric Difference", 88 | "length": 8, 89 | "order": 8, 90 | "parameter": "maxdiff", 91 | "preformatted": false, 92 | "type": "num", 93 | "values": [ 94 | "maxdiff" 95 | ] 96 | } 97 | }, 98 | "data": [ 99 | 100 | ], 101 | "xInteger": false, 102 | "yInteger": false 103 | } -------------------------------------------------------------------------------- /src/sasctl/services.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | # Import all of the services from their respective modules to provide 8 | # a single location for importing services. All services should utilize 9 | # classmethods allowing them to be used without instantiation 10 | from ._services.cas_management import CASManagement as cas_management 11 | from ._services.concepts import Concepts as concepts 12 | from ._services.data_sources import DataSources as data_sources 13 | from ._services.files import Files as files 14 | from ._services.folders import Folders as folders 15 | from ._services.microanalytic_score import MicroAnalyticScore as microanalytic_score 16 | from ._services.model_management import ModelManagement as model_management 17 | from ._services.model_publish import ModelPublish as model_publish 18 | from ._services.model_repository import ModelRepository as model_repository 19 | from ._services.projects import Projects as projects 20 | from ._services.relationships import Relationships as relationships 21 | from ._services.report_images import ReportImages as report_images 22 | from ._services.reports import Reports as reports 23 | from ._services.saslogon import SASLogon as saslogon 24 | from ._services.score_definitions import ScoreDefinitions as score_definitions 25 | from ._services.score_execution import ScoreExecution as score_execution 26 | from ._services.sentiment_analysis import SentimentAnalysis as sentiment_analysis 27 | from ._services.text_categorization import TextCategorization as text_categorization 28 | from ._services.text_parsing import TextParsing as text_parsing 29 | -------------------------------------------------------------------------------- /src/sasctl/utils/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | from .astore import ( 8 | create_package, 9 | create_package_from_astore, 10 | create_package_from_datastep, 11 | ) 12 | from .model_info import get_model_info 13 | from .model_migration import convert_model_zip 14 | -------------------------------------------------------------------------------- /src/sasctl/utils/misc.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | import random 8 | import string 9 | 10 | from .decorators import versionadded 11 | 12 | 13 | def installed_packages(): 14 | """List Python packages installed in the current environment. 15 | 16 | Returns 17 | ------- 18 | 19 | Notes 20 | ----- 21 | Uses pip freeze functionality so pip module must be present. For pip 22 | versions >=20.1, this functionality fails to provide versions for some 23 | conda installed, locally installed, and url installed packages. Instead 24 | uses the pkg_resources package which is typically bundled with pip. 25 | 26 | """ 27 | from packaging import version 28 | 29 | try: 30 | import pip 31 | 32 | if version.parse(pip.__version__) >= version.parse("20.1"): 33 | import pkg_resources 34 | 35 | return [ 36 | p.project_name + "==" + p.version for p in pkg_resources.working_set 37 | ] 38 | else: 39 | from pip._internal.operations import freeze 40 | except ImportError: 41 | try: 42 | from pip.operations import freeze 43 | except ImportError: 44 | freeze = None 45 | 46 | if freeze is not None: 47 | return list(freeze.freeze()) 48 | 49 | 50 | @versionadded(version="1.5.1") 51 | def random_string(length): 52 | """Generates a random alpha-numeric string of a given length. 53 | 54 | Parameters 55 | ---------- 56 | length : int 57 | The length of the generate string. 58 | 59 | Returns 60 | ------- 61 | str 62 | 63 | """ 64 | 65 | # random.choices() wasn't added until Python 3.6, so repeatedly call .choice() instead 66 | chars = string.ascii_letters + string.digits 67 | return "".join(random.choice(chars) for _ in range(length)) 68 | 69 | 70 | @versionadded(version="1.9.0") 71 | def check_if_jupyter() -> bool: 72 | """ 73 | Check if the code is being executed from a jupyter notebook. 74 | 75 | Source: https://stackoverflow.com/questions/47211324/check-if-module-is-running-in- 76 | jupyter-or-not 77 | 78 | Returns 79 | ------- 80 | bool 81 | True if a jupyter notebook is detected. False otherwise. 82 | """ 83 | try: 84 | shell = get_ipython().__class__.__name__ 85 | if shell == "ZMQInteractiveShell": 86 | return True 87 | elif shell == "TerminalInteractiveShell": 88 | return False 89 | else: 90 | return False 91 | except (ImportError, NameError): 92 | return False 93 | -------------------------------------------------------------------------------- /src/sasctl/utils/pymas/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | from .core import PyMAS, from_inline, from_pickle, from_python_file 8 | -------------------------------------------------------------------------------- /src/sasctl/utils/pyml2ds/__init__.py: -------------------------------------------------------------------------------- 1 | from .core import pyml2ds 2 | -------------------------------------------------------------------------------- /src/sasctl/utils/pyml2ds/basic/__init__.py: -------------------------------------------------------------------------------- 1 | from .tree import TreeParser 2 | -------------------------------------------------------------------------------- /src/sasctl/utils/pyml2ds/connectors/__init__.py: -------------------------------------------------------------------------------- 1 | from .ensembles import LightgbmParser, PmmlParser, XgbParser 2 | -------------------------------------------------------------------------------- /src/sasctl/utils/pyml2ds/connectors/ensembles/__init__.py: -------------------------------------------------------------------------------- 1 | from .lgb import LightgbmParser 2 | from .pmml import PmmlParser 3 | from .xgb import XgbParser 4 | -------------------------------------------------------------------------------- /src/sasctl/utils/pyml2ds/connectors/ensembles/core.py: -------------------------------------------------------------------------------- 1 | import abc 2 | 3 | 4 | class EnsembleParser(metaclass=abc.ABCMeta): 5 | """Abstract class for parsing decision tree ensembles. 6 | 7 | Attributes 8 | ---------- 9 | out_transform : string 10 | Output transformation for generated value. For example, if logreg is 11 | used: 1 / (1 + exp(-{0})), where {0} stands for resulting gbvalue. 12 | out_var_name : string 13 | Name used for output variable. 14 | 15 | """ 16 | 17 | def __init__(self, out_transform="{0}", out_var_name="P_TARGET"): 18 | self.out_transform = out_transform 19 | self.out_var_name = out_var_name 20 | 21 | @abc.abstractmethod 22 | def _iter_trees(self): 23 | pass 24 | 25 | @classmethod 26 | def _aggregate(cls, booster_count): 27 | return "treeValue = sum({});\n".format( 28 | ", ".join("treeValue%d" % i for i in range(booster_count)) 29 | ) 30 | 31 | def translate(self, file): 32 | """Translate a gradient boosting model and write SAS scoring code to 33 | a file. 34 | 35 | Attributes 36 | ---------- 37 | file : file object 38 | Open file for writing output SAS code. 39 | 40 | """ 41 | for booster_id, tree in self._iter_trees(): 42 | file.write("/* Parsing tree {}*/\n".format(booster_id)) 43 | 44 | self._tree_parser.init(tree, booster_id) 45 | self._tree_parser.parse_node(file) 46 | 47 | file.write("\n") 48 | 49 | file.write("/* Getting target probability */\n") 50 | file.write(self._aggregate(booster_id + 1)) 51 | file.write( 52 | "{} = {};\n".format( 53 | self.out_var_name, self.out_transform.format("treeValue") 54 | ) 55 | ) 56 | -------------------------------------------------------------------------------- /src/sasctl/utils/pyml2ds/connectors/ensembles/lgb.py: -------------------------------------------------------------------------------- 1 | from sasctl.utils.pyml2ds.basic import TreeParser 2 | 3 | from .core import EnsembleParser 4 | 5 | 6 | class LightgbmTreeParser(TreeParser): 7 | """Class for parsing lightgbm tree.""" 8 | 9 | def _not_leaf(self): 10 | return "split_feature" in self._node 11 | 12 | def _get_var(self): 13 | return self._features[self._node["split_feature"]] 14 | 15 | def _go_left(self): 16 | return self._node["default_left"] 17 | 18 | def _go_right(self): 19 | return not self._node["default_left"] 20 | 21 | def _left_node(self): 22 | return self._node["left_child"] 23 | 24 | def _right_node(self): 25 | return self._node["right_child"] 26 | 27 | def _missing_node(self): 28 | return None 29 | 30 | def _split_value(self): 31 | return self._node["threshold"] 32 | 33 | def _decision_type(self): 34 | return self._node["decision_type"] 35 | 36 | def _leaf_value(self): 37 | return self._node["leaf_value"] 38 | 39 | 40 | class LightgbmParser(EnsembleParser): 41 | """Class for parsing lightgbm model. 42 | 43 | Parameters 44 | ---------- 45 | booster : lightgbm.basic.Booster 46 | Booster of lightgbm model. 47 | 48 | """ 49 | 50 | def __init__(self, booster): 51 | super(LightgbmParser, self).__init__() 52 | 53 | self._booster = booster 54 | self._dump = booster.dump_model() 55 | 56 | objective = self._dump.get("objective") 57 | 58 | if objective != "binary sigmoid:1": 59 | raise ValueError( 60 | "Only binary sigmoid objective function is " 61 | "currently supported. Received '%s'." % objective 62 | ) 63 | 64 | self._features = self._dump["feature_names"] 65 | self.out_transform = "1 / (1 + exp(-{0}))" 66 | 67 | self._tree_parser = LightgbmTreeParser() 68 | self._tree_parser._features = self._features 69 | 70 | def _iter_trees(self): 71 | for tree in self._dump["tree_info"]: 72 | yield tree["tree_index"], tree["tree_structure"] 73 | -------------------------------------------------------------------------------- /src/sasctl/utils/pyml2ds/connectors/ensembles/pmml.py: -------------------------------------------------------------------------------- 1 | from sasctl.utils.pyml2ds.basic import TreeParser 2 | 3 | from .core import EnsembleParser 4 | 5 | 6 | class PmmlTreeParser(TreeParser): 7 | """Class for parsing pmml gradient boosting tree.""" 8 | 9 | def _not_leaf(self): 10 | return self._node.get("defaultChild") 11 | 12 | def _get_var(self): 13 | return self._node.find("Node").find("SimplePredicate").get("field") 14 | 15 | def _go_left(self): 16 | return self._node.find("Node").get("id") == self._node.get("defaultChild") 17 | 18 | def _go_right(self): 19 | return not self._node.find("Node").get("id") == self._node.get("defaultChild") 20 | 21 | def _left_node(self): 22 | return self._node.findall("Node")[0] 23 | 24 | def _right_node(self): 25 | return self._node.findall("Node")[1] 26 | 27 | def _missing_node(self): 28 | return None 29 | 30 | def _split_value(self): 31 | return self._node.find("Node").find("SimplePredicate").get("value") 32 | 33 | def _decision_type(self): 34 | ops = { 35 | "lessThan": "<", 36 | "lessOrEqual": "<=", 37 | "greaterThan": ">", 38 | "greaterOrEqual": ">=", 39 | } 40 | 41 | return ops[self._node.find("Node").find("SimplePredicate").get("operator")] 42 | 43 | def _leaf_value(self): 44 | return self._node.get("score") 45 | 46 | 47 | class PmmlParser(EnsembleParser): 48 | """Class for parsing pmml gradient boosting models. 49 | 50 | Parameters 51 | ---------- 52 | tree_root : etree.Element 53 | Root node of pmml gradient boosting forest. 54 | 55 | """ 56 | 57 | def __init__(self, tree_root): 58 | super(PmmlParser, self).__init__() 59 | 60 | self._tree_root = tree_root 61 | for elem in tree_root.iter(): 62 | if hasattr(elem.tag, "find"): 63 | i = elem.tag.find("}") 64 | if i >= 0: 65 | elem.tag = elem.tag[i + 1 :] 66 | 67 | self._forest = tree_root.find("MiningModel/Segmentation")[0].find("MiningModel") 68 | 69 | rescaleConstant = self._forest.find("Targets/Target").get("rescaleConstant") 70 | self.out_transform = "1 / (1 + exp(-{}))".format( 71 | "({0} + " + "{})".format(rescaleConstant) 72 | ) 73 | 74 | self._tree_parser = PmmlTreeParser() 75 | 76 | def _iter_trees(self): 77 | for booster_id, tree_elem in enumerate(self._forest.find("Segmentation")): 78 | yield booster_id, tree_elem.find("TreeModel/Node") 79 | -------------------------------------------------------------------------------- /src/sasctl/utils/pyml2ds/connectors/ensembles/xgb.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | from sasctl.utils.pyml2ds.basic import TreeParser 4 | 5 | from .core import EnsembleParser 6 | 7 | 8 | class XgbTreeParser(TreeParser): 9 | """Class for parsing xgboost tree.""" 10 | 11 | # Generate node dictionary 12 | def _gen_dict(self): 13 | pnode = self._node 14 | 15 | self.d[self._node["nodeid"]] = self._node 16 | if "children" in self._node: 17 | for child in self._node["children"]: 18 | self._node = child 19 | self._gen_dict() 20 | 21 | self._node = pnode 22 | 23 | def _not_leaf(self): 24 | return "split" in self._node 25 | 26 | def _get_var(self): 27 | return self._node["split"] 28 | 29 | def _go_left(self): 30 | return self._node["missing"] == self._node["yes"] 31 | 32 | def _go_right(self): 33 | return self._node["missing"] == self._node["no"] 34 | 35 | def _left_node(self): 36 | return self.d[self._node["yes"]] 37 | 38 | def _right_node(self): 39 | return self.d[self._node["no"]] 40 | 41 | def _missing_node(self): 42 | return self.d[self._node["missing"]] 43 | 44 | def _split_value(self): 45 | return self._node["split_condition"] 46 | 47 | def _decision_type(self): 48 | return "<" 49 | 50 | def _leaf_value(self): 51 | return self._node["leaf"] 52 | 53 | 54 | class XgbParser(EnsembleParser): 55 | """Class for parsing xgboost model. 56 | 57 | Parameters 58 | ---------- 59 | booster : xgboost.core.Booster 60 | Booster of xgboost model. 61 | objective : {'reg:linear', 'binary:logistic'} 62 | Xgboost objective function. 63 | 64 | """ 65 | 66 | def __init__(self, booster, objective): 67 | super(XgbParser, self).__init__() 68 | 69 | self._booster = booster 70 | self._dump = booster.get_dump(dump_format="json") 71 | self._objective = objective 72 | self._features = booster.feature_names 73 | 74 | if objective == "binary:logistic": 75 | self.out_transform = "1 / (1 + exp(-{0}))" 76 | elif objective == "reg:linear": 77 | pass 78 | else: 79 | raise ValueError( 80 | "Unsupported objective: '%s'. " 81 | "Expected " 82 | "'binary:logistic' or 'reg:linear'." % objective 83 | ) 84 | 85 | self._tree_parser = XgbTreeParser() 86 | 87 | def _iter_trees(self): 88 | for booster_id, tree_json in enumerate(self._dump): 89 | yield booster_id, json.loads(tree_json) 90 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | -------------------------------------------------------------------------------- /tests/integration/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | -------------------------------------------------------------------------------- /tests/integration/test_command_line.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | import pytest 8 | 9 | from sasctl.utils.cli import main 10 | 11 | pytestmark = pytest.mark.usefixtures("session") 12 | 13 | 14 | def test_insecure_connection(): 15 | """Verify that an insecure connection flag works.""" 16 | 17 | r = main(["-k", "folders", "get", "dummy_folder"]) 18 | assert r is None 19 | 20 | 21 | def test_secure_connection(): 22 | """Verify that a secure connection flag works.""" 23 | 24 | r = main(["folders", "get", "dummy_folder"]) 25 | assert r is None 26 | 27 | 28 | def test_command_without_args(capsys): 29 | """Verify that a simple command works.""" 30 | 31 | main(["folders", "list"]) 32 | 33 | captured = capsys.readouterr() 34 | assert "Application Data" in captured.out 35 | assert "Model Repositories" in captured.out 36 | 37 | 38 | def test_command_with_args(capsys): 39 | """Verify that a simple command with arguments works.""" 40 | import json 41 | 42 | FOLDER_NAME = "Public" 43 | 44 | main(["folders", "get", FOLDER_NAME]) 45 | 46 | captured = capsys.readouterr() 47 | 48 | # Output should be valid JSON 49 | output = json.loads(captured.out) 50 | 51 | assert output["name"] == FOLDER_NAME 52 | assert "id" in output 53 | assert "links" in output 54 | -------------------------------------------------------------------------------- /tests/integration/test_concepts.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | import time 8 | 9 | import pytest 10 | 11 | from sasctl.core import current_session, request_link 12 | from sasctl.services import concepts as cp 13 | 14 | pytestmark = pytest.mark.usefixtures("session") 15 | 16 | 17 | def assert_job_succeeds(job): 18 | assert job.state == "pending" 19 | 20 | while request_link(job, "state") in ("pending", "running"): 21 | time.sleep(1) 22 | 23 | state = request_link(job, "state") 24 | 25 | if state == "failed": 26 | # Refresh to get 'errors' ref 27 | job = request_link(job, "self") 28 | errors = request_link(job, "errors") 29 | pytest.fail("Job failed: " + str(errors)) 30 | assert state == "completed" 31 | 32 | 33 | def test_from_table(cas_session, airline_dataset): 34 | if current_session().version_info() > 3.5: 35 | pytest.skip("Concepts service was removed in Viya 4.") 36 | 37 | TABLE_NAME = "airline_tweets" 38 | cas_session.upload(airline_dataset, casout=dict(name=TABLE_NAME, replace=True)) 39 | from sasctl.services import cas_management as cm 40 | 41 | cas_session.table.promote(TABLE_NAME, targetlib="Public") 42 | 43 | input = cm.get_table(TABLE_NAME, "Public") 44 | job = cp.assign_concepts(input, id_column="tweet_id", text_column="text") 45 | 46 | assert_job_succeeds(job) 47 | 48 | 49 | def test_from_inline_docs(): 50 | if current_session().version_info() > 3.5: 51 | pytest.skip("Concepts service was removed in Viya 4.") 52 | 53 | from sasctl.services import cas_management as cm 54 | 55 | caslib = cm.get_caslib("Public") 56 | input = [ 57 | "Oh yes, the, uh, the Norwegian Blue. What's wrong with it?", 58 | "I'll tell you what's wrong with it, my lad. He's dead, that's " 59 | "what's wrong with it!", 60 | "No, no, he's uh,...he's resting.", 61 | "Look, matey, I know a dead parrot when I see one, and I'm looking " 62 | "at one right now.", 63 | "No no he's not dead, he's, he's resting! Remarkable bird, " 64 | "the Norwegian Blue, isn't it? Beautiful plumage!", 65 | "The plumage don't enter into it. It's stone dead.", 66 | ] 67 | 68 | job = cp.assign_concepts(input, caslib=caslib) 69 | 70 | assert_job_succeeds(job) 71 | -------------------------------------------------------------------------------- /tests/integration/test_data_sources.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | from unittest import mock 8 | 9 | import pytest 10 | 11 | from sasctl.services import data_sources as ds 12 | 13 | pytestmark = pytest.mark.usefixtures("session") 14 | 15 | 16 | def test_is_available(): 17 | assert ds.is_available() 18 | 19 | with mock.patch("sasctl._services.data_sources.DataSources.head") as mocked: 20 | mocked.return_value.status_code = 404 21 | assert not ds.is_available() 22 | 23 | 24 | def test_info(): 25 | info = ds.info() 26 | 27 | assert "dataSources" == info.serviceId 28 | 29 | 30 | def test_list_providers(): 31 | providers = ds.list_providers() 32 | 33 | assert isinstance(providers, list) 34 | assert "cas" in [p.id for p in providers] 35 | 36 | 37 | def test_list_cas_sources(): 38 | sources = ds.list_sources("cas") 39 | 40 | assert isinstance(sources, list) 41 | assert any(s.name == "cas-shared-default" for s in sources) 42 | 43 | 44 | def test_get_cas_source(): 45 | target = "cas-shared-default" 46 | source = ds.get_source("cas", target) 47 | 48 | assert target == source.name 49 | 50 | 51 | def test_list_caslibs(): 52 | cas_server = "cas-shared-default" 53 | 54 | # Get caslibs using a Source name 55 | caslibs = ds.list_caslibs(cas_server) 56 | 57 | assert isinstance(caslibs, list) 58 | assert any(c.name == "Public" for c in caslibs) 59 | 60 | # Get caslibs using a Source instance 61 | source = ds.get_source("cas", cas_server) 62 | caslibs_2 = ds.list_caslibs(source) 63 | 64 | assert isinstance(caslibs_2, list) 65 | # Force download before comparing 66 | assert caslibs[:] == caslibs_2[:] 67 | 68 | 69 | def test_get_caslib(): 70 | target = "Public" 71 | caslib = ds.get_caslib(target) 72 | 73 | assert target == caslib.name 74 | 75 | 76 | def test_list_tables(): 77 | tables = ds.list_tables("Samples") 78 | 79 | assert isinstance(tables, list) 80 | assert any(t.name == "WATER_CLUSTER" for t in tables) 81 | 82 | 83 | def test_get_table(): 84 | target = "WATER_CLUSTER" 85 | table = ds.get_table(target, "Samples") 86 | 87 | assert target == table.name 88 | 89 | table = ds.get_table(target.lower(), "Samples") 90 | 91 | # Filter is case sensitive 92 | assert table is None 93 | -------------------------------------------------------------------------------- /tests/integration/test_folders.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | import pytest 8 | 9 | from sasctl.core import RestObj 10 | from sasctl.services import folders 11 | 12 | pytestmark = pytest.mark.usefixtures("session") 13 | 14 | 15 | @pytest.mark.incremental 16 | class TestFolders: 17 | FOLDER_NAME = "Test Folder" 18 | 19 | def test_list_folders(self): 20 | all_folders = folders.list_folders() 21 | assert all(isinstance(f, RestObj) for f in all_folders) 22 | 23 | # def test_get_folder(self): 24 | # folder = folders.get_folder('Resources') 25 | # assert isinstance(folder, RestObj) 26 | # assert 'Resources' == folder.name 27 | 28 | def test_create_folder(self): 29 | folder = folders.create_folder(self.FOLDER_NAME) 30 | assert isinstance(folder, RestObj) 31 | assert self.FOLDER_NAME == folder.name 32 | 33 | def test_get_folder(self): 34 | folder = folders.get_folder(self.FOLDER_NAME) 35 | assert isinstance(folder, RestObj) 36 | assert folder.name == self.FOLDER_NAME 37 | 38 | def test_list_with_pagination(self): 39 | some_folders = folders.list_folders(limit=2) 40 | 41 | # PagedList with technically include all results, so subset to just 42 | # the results that were explicitly requested 43 | some_folders = some_folders[:2] 44 | 45 | assert isinstance(some_folders, list) 46 | assert all(isinstance(f, RestObj) for f in some_folders) 47 | 48 | other_folders = folders.list_folders(start=2, limit=3) 49 | assert isinstance(other_folders, list) 50 | other_folders = other_folders[:3] 51 | 52 | assert all(f not in some_folders for f in other_folders) 53 | 54 | def test_delete_folder(self): 55 | folders.delete_folder(self.FOLDER_NAME) 56 | folder = folders.get_folder(self.FOLDER_NAME) 57 | assert folder is None 58 | -------------------------------------------------------------------------------- /tests/integration/test_microanalytic_score.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | import pytest 8 | 9 | from sasctl.services import microanalytic_score as mas 10 | 11 | pytestmark = pytest.mark.usefixtures("session") 12 | 13 | 14 | @pytest.mark.incremental 15 | class TestMicroAnalyticScore: 16 | MODULE_NAME = "sasctl_testmodule" 17 | 18 | def test_create_python_module(self): 19 | source = "\n".join( 20 | ( 21 | "def myfunction(var1, var2):", 22 | " 'Output: out1, out2'", 23 | " out1 = var1 + 5", 24 | " out2 = var2.upper()", 25 | " return out1, out2", 26 | "def myfunction2(var1, var2):", 27 | " 'Output: out1'", 28 | " return var1 + var2", 29 | ) 30 | ) 31 | 32 | r = mas.create_module(source=source, name=self.MODULE_NAME) 33 | assert self.MODULE_NAME == r.id 34 | assert "public" == r.scope 35 | 36 | def test_call_python_module_steps(self): 37 | r = mas.define_steps(self.MODULE_NAME) 38 | assert (6, "TEST") == r.myfunction(1, "test") 39 | 40 | def test_call_python_module_steps_pandas(self): 41 | pd = pytest.importorskip("pandas") 42 | 43 | r = mas.define_steps(self.MODULE_NAME) 44 | df = pd.DataFrame(dict(var1=[1], var2=["test"])) 45 | assert (6, "TEST") == r.myfunction(df.iloc[0, :]) 46 | 47 | df = pd.DataFrame(dict(var1=[1.5], var2=[3])) 48 | assert r.myfunction2(df.iloc[0, :]) == 4.5 49 | 50 | def test_call_python_module_steps_numpy(self): 51 | np = pytest.importorskip("numpy") 52 | 53 | r = mas.define_steps(self.MODULE_NAME) 54 | array = np.array([1.5, 3]) 55 | assert r.myfunction2(array) == 4.5 56 | 57 | def test_list_modules(self): 58 | all_modules = mas.list_modules() 59 | 60 | assert isinstance(all_modules, list) 61 | assert len(all_modules) > 0 62 | assert any(x.id == self.MODULE_NAME for x in all_modules) 63 | 64 | def test_get_module(self): 65 | module = mas.get_module(self.MODULE_NAME) 66 | 67 | assert module.id == self.MODULE_NAME 68 | 69 | def test_list_module_steps(self): 70 | steps = mas.list_module_steps(self.MODULE_NAME) 71 | 72 | assert isinstance(steps, list) 73 | assert any(s.id == "myfunction" for s in steps) 74 | 75 | def test_delete_module(self): 76 | assert mas.get_module(self.MODULE_NAME) is not None 77 | 78 | mas.delete_module(self.MODULE_NAME) 79 | 80 | assert mas.get_module(self.MODULE_NAME) is None 81 | -------------------------------------------------------------------------------- /tests/integration/test_model_parameters.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | 3 | import pytest 4 | 5 | from sasctl.pzmm.model_parameters import ModelParameters as mp 6 | 7 | pytestmark = pytest.mark.usefixtures("session") 8 | 9 | 10 | @pytest.fixture 11 | def train_data(): 12 | """Returns the Iris data set as (X, y)""" 13 | 14 | try: 15 | import pandas as pd 16 | except ImportError: 17 | pytest.skip("Package `pandas` not found.") 18 | 19 | try: 20 | from sklearn import datasets 21 | except ImportError: 22 | pytest.skip("Package `sklearn` not found.") 23 | 24 | raw = datasets.load_iris() 25 | iris = pd.DataFrame(raw.data, columns=raw.feature_names) 26 | iris = iris.join(pd.DataFrame(raw.target)) 27 | iris.columns = ["SepalLength", "SepalWidth", "PetalLength", "PetalWidth", "Species"] 28 | iris["Species"] = iris["Species"].astype("category") 29 | iris.Species.cat.categories = raw.target_names 30 | return iris.iloc[:, 0:4], iris["Species"] 31 | 32 | 33 | @pytest.fixture 34 | def sklearn_model(train_data): 35 | """Returns a simple Scikit-Learn model""" 36 | 37 | try: 38 | from sklearn.linear_model import LogisticRegression 39 | except ImportError: 40 | pytest.skip("Package `sklearn` not found.") 41 | 42 | X, y = train_data 43 | with warnings.catch_warnings(): 44 | warnings.simplefilter("ignore") 45 | model = LogisticRegression(multi_class="multinomial", solver="lbfgs") 46 | model.fit(X, y) 47 | return model 48 | 49 | 50 | class TestSklearnModel: 51 | PROJECT_NAME = "Test SKLearn Model" 52 | MODEL_NAME = "SKLearnModel" 53 | PATH = "." 54 | -------------------------------------------------------------------------------- /tests/integration/test_model_publish.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | import pytest 8 | 9 | from sasctl import current_session 10 | from sasctl.services import model_publish as mp 11 | 12 | # Every test function in the module will automatically receive the session fixture 13 | pytestmark = pytest.mark.usefixtures("session") 14 | 15 | 16 | @pytest.mark.incremental 17 | class TestModelPublish: 18 | def test_list_publish_destinations(self): 19 | destinations = mp.list_destinations() 20 | 21 | assert isinstance(destinations, list) 22 | assert any(d.name == "maslocal" for d in destinations) 23 | 24 | def test_get_publish_destination(self): 25 | dest = mp.get_destination("maslocal") 26 | 27 | assert dest.name == "maslocal" 28 | assert dest.destinationType == "microAnalyticService" 29 | 30 | def test_create_cas_destination(self): 31 | dest = mp.create_cas_destination( 32 | "sasctlcas", 33 | "Public", 34 | "sasctl_models", 35 | description="Test CAS publish destination from sasctl.", 36 | ) 37 | 38 | assert dest.name == "sasctlcas" 39 | assert dest.destinationType == "cas" 40 | assert dest.casLibrary == "Public" 41 | assert dest.casServerName == "cas-shared-default" 42 | assert dest.destinationTable == "sasctl_models" 43 | assert dest.description == "Test CAS publish destination from sasctl." 44 | 45 | def test_delete_cas_destination(self): 46 | dest = mp.get_destination("sasctlcas") 47 | assert dest.name == "sasctlcas" 48 | 49 | mp.delete_destination("sasctlcas") 50 | 51 | dest = mp.get_destination("sasctlcas") 52 | assert dest is None 53 | 54 | def test_create_mas_destination(self): 55 | if current_session().version_info() == 4: 56 | pytest.skip( 57 | "Publishing destinations for a remote SAS Micro Analytic Service are currently not supported." 58 | ) 59 | 60 | dest = mp.create_mas_destination("sasctlmas", "localhost") 61 | 62 | assert dest.name == "sasctlmas" 63 | assert dest.destinationType == "microAnalyticService" 64 | assert "description" not in dest 65 | -------------------------------------------------------------------------------- /tests/integration/test_projects.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | import pytest 8 | 9 | from sasctl import RestObj, current_session 10 | from sasctl.services import projects as proj 11 | 12 | pytestmark = pytest.mark.usefixtures("session") 13 | 14 | PROJECT_NAME = "Test Project" 15 | 16 | 17 | def test_list_projects(): 18 | projects = proj.list_projects() 19 | assert all(isinstance(f, RestObj) for f in projects) 20 | 21 | 22 | def test_get_project(): 23 | project = proj.get_project("Not a Project") 24 | assert project is None 25 | 26 | 27 | def test_create_project(): 28 | if current_session().version_info() >= 4: 29 | pytest.skip("Projects service was removed from Viya 4.") 30 | 31 | project = proj.create_project(PROJECT_NAME) 32 | assert isinstance(project, RestObj) 33 | assert PROJECT_NAME == project.name 34 | 35 | 36 | def test_delete_project(): 37 | proj.delete_project(PROJECT_NAME) 38 | project = proj.get_project(PROJECT_NAME) 39 | assert project is None 40 | -------------------------------------------------------------------------------- /tests/integration/test_relationships.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | import pytest 8 | 9 | from sasctl import RestObj 10 | from sasctl.services import relationships as rel 11 | 12 | pytestmark = pytest.mark.usefixtures("session") 13 | 14 | 15 | def test_list_relationships(): 16 | relationships = rel.list_relationships() 17 | assert all(isinstance(f, RestObj) for f in relationships) 18 | -------------------------------------------------------------------------------- /tests/integration/test_reports.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | import pytest 8 | 9 | from sasctl import RestObj 10 | from sasctl.services import reports 11 | 12 | pytestmark = pytest.mark.usefixtures("session") 13 | 14 | 15 | def test_list_reports(): 16 | all_reports = reports.list_reports() 17 | 18 | assert len(all_reports) > 0 19 | assert all(isinstance(x, RestObj) for x in all_reports) 20 | assert all(x.type == "report" for x in all_reports) 21 | 22 | 23 | def test_get_report(): 24 | NAME = "User Activity" 25 | r = reports.get_report(NAME) 26 | 27 | assert isinstance(r, RestObj) 28 | assert r.name == NAME 29 | -------------------------------------------------------------------------------- /tests/integration/test_sentiment_analysis.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | import time 8 | 9 | import pytest 10 | 11 | from sasctl.core import current_session, request_link 12 | from sasctl.services import sentiment_analysis as sa 13 | 14 | pytestmark = pytest.mark.usefixtures("session") 15 | 16 | 17 | def assert_job_succeeds(job): 18 | assert job.state == "pending" 19 | 20 | while request_link(job, "state") in ("pending", "running"): 21 | time.sleep(1) 22 | 23 | state = request_link(job, "state") 24 | 25 | if state == "failed": 26 | # Refresh to get 'errors' ref 27 | job = request_link(job, "self") 28 | errors = request_link(job, "errors") 29 | pytest.fail("Job failed: " + str(errors)) 30 | assert state == "completed" 31 | 32 | 33 | def test_from_table(cas_session, airline_dataset): 34 | from sasctl.services import cas_management as cm 35 | 36 | if current_session().version_info() > 3.5: 37 | pytest.skip("Sentiment Analysis service was removed in Viya 4.") 38 | 39 | TABLE_NAME = "airline_tweets" 40 | cas_session.upload(airline_dataset, casout=dict(name=TABLE_NAME, replace=True)) 41 | 42 | cas_session.table.promote(TABLE_NAME, targetlib="Public") 43 | 44 | input = cm.get_table(TABLE_NAME, "Public") 45 | job = sa.analyze_sentiment(input, id_column="tweet_id", text_column="text") 46 | 47 | assert_job_succeeds(job) 48 | 49 | 50 | def test_from_inline_docs(): 51 | from sasctl.services import cas_management as cm 52 | 53 | if current_session().version_info() > 3.5: 54 | pytest.skip("Sentiment Analysis service was removed in Viya 4.") 55 | 56 | caslib = cm.get_caslib("Public") 57 | input = [ 58 | "Oh yes, the, uh, the Norwegian Blue. What's wrong with it?", 59 | "I'll tell you what's wrong with it, my lad. He's dead, that's " 60 | "what's wrong with it!", 61 | "No, no, he's uh,...he's resting.", 62 | "Look, matey, I know a dead parrot when I see one, and I'm looking " 63 | "at one right now.", 64 | "No no he's not dead, he's, he's resting! Remarkable bird, " 65 | "the Norwegian Blue, isn't it? Beautiful plumage!", 66 | "The plumage don't enter into it. It's stone dead.", 67 | ] 68 | 69 | job = sa.analyze_sentiment(input, caslib=caslib) 70 | 71 | assert_job_succeeds(job) 72 | 73 | 74 | def test_service_removed_error(): 75 | if current_session().version_info() < 4: 76 | pytest.skip("Sentiment Analysis service was not removed until Viya 4.") 77 | 78 | with pytest.raises(RuntimeError): 79 | sa.analyze_sentiment(input, caslib="Public") 80 | -------------------------------------------------------------------------------- /tests/integration/test_text_categorization.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | import time 8 | 9 | import pytest 10 | 11 | from sasctl.core import current_session, request_link 12 | from sasctl.services import text_categorization as tc 13 | 14 | pytestmark = pytest.mark.usefixtures("session") 15 | 16 | 17 | def assert_job_succeeds(job): 18 | assert job.state == "pending" 19 | 20 | while request_link(job, "state") in ("pending", "running"): 21 | time.sleep(1) 22 | 23 | state = request_link(job, "state") 24 | 25 | if state == "failed": 26 | # Refresh to get 'errors' ref 27 | job = request_link(job, "self") 28 | errors = request_link(job, "errors") 29 | pytest.fail("Job failed: " + str(errors)) 30 | assert state == "completed" 31 | 32 | 33 | def test_from_table(): 34 | from sasctl.services import cas_management as cm 35 | 36 | pytest.xfail("Need to input model URI. Where to pull from?") 37 | 38 | input = cm.get_table("COMPLAINTS", "Public") 39 | job = tc.categorize( 40 | input, id_column="__uniqueid__", text_column="Consumer_complaint_narrative" 41 | ) 42 | 43 | assert_job_succeeds(job) 44 | 45 | 46 | def test_service_removed_error(): 47 | if current_session().version_info() < 4: 48 | pytest.skip("Text Categorization service was not removed until Viya 4.") 49 | 50 | with pytest.raises(RuntimeError): 51 | tc.categorize("", None, id_column="", text_column="") 52 | -------------------------------------------------------------------------------- /tests/integration/test_text_parsing.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | import time 8 | 9 | import pytest 10 | 11 | from sasctl.core import current_session, request_link 12 | from sasctl.services import text_parsing as tp 13 | 14 | pytestmark = pytest.mark.usefixtures("session") 15 | 16 | 17 | def assert_job_succeeds(job): 18 | assert job.state == "pending" 19 | 20 | while request_link(job, "state") in ("pending", "running"): 21 | time.sleep(1) 22 | 23 | state = request_link(job, "state") 24 | 25 | if state == "failed": 26 | # Refresh to get 'errors' ref 27 | job = request_link(job, "self") 28 | errors = request_link(job, "errors") 29 | pytest.fail("Job failed: " + str(errors)) 30 | assert state == "completed" 31 | 32 | 33 | def test_from_table(cas_session, airline_dataset): 34 | if current_session().version_info() > 3.5: 35 | pytest.skip("Text Parsing service was removed in Viya 4.") 36 | 37 | TABLE_NAME = "airline_tweets" 38 | cas_session.upload(airline_dataset, casout=dict(name=TABLE_NAME, replace=True)) 39 | from sasctl.services import cas_management as cm 40 | 41 | cas_session.table.promote(TABLE_NAME, targetlib="Public") 42 | 43 | input = cm.get_table(TABLE_NAME, "Public") 44 | job = tp.parse_documents(input, id_column="tweet_id", text_column="text") 45 | 46 | assert_job_succeeds(job) 47 | 48 | 49 | def test_parsing_inline_docs(): 50 | from sasctl.services import cas_management as cm 51 | 52 | if current_session().version_info() > 3.5: 53 | pytest.skip("Text Parsing service was removed in Viya 4.") 54 | 55 | caslib = cm.get_caslib("Public") 56 | input = [ 57 | "Halt! Who goes there?", 58 | " It is I, Arthur, son of Uther Pendragon, from the castle of " 59 | "Camelot. King of the Britons, defeater of the Saxons, Sovereign of all England!", 60 | "Pull the other one!", 61 | " I am, and this is my trusty servant Patsy. We have ridden the " 62 | "length and breadth of the land in search of knights who will join me in my court at Camelot. I must speak with your lord and master.", 63 | "What? Ridden on a horse?", 64 | ] 65 | 66 | job = tp.parse_documents(input, caslib=caslib, min_doc_count=1) 67 | 68 | assert_job_succeeds(job) 69 | 70 | 71 | def test_service_removed_error(): 72 | if current_session().version_info() < 4: 73 | pytest.skip("Text Parsing service was not removed until Viya 4.") 74 | 75 | with pytest.raises(RuntimeError): 76 | tp.parse_documents("", caslib="Public", min_doc_count=1) 77 | -------------------------------------------------------------------------------- /tests/integration/test_write_json_files.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2023, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | import tempfile 8 | from pathlib import Path 9 | 10 | import pandas as pd 11 | import pytest 12 | 13 | from sasctl.pzmm.write_json_files import JSONFiles as jf 14 | 15 | pytestmark = pytest.mark.usefixtures("session") 16 | 17 | 18 | def _classification_model(data, target): 19 | from sklearn.model_selection import train_test_split 20 | from sklearn.tree import DecisionTreeClassifier 21 | 22 | data = pd.get_dummies(data, drop_first=True) 23 | data.fillna(data.mean(), inplace=True) 24 | x_train, x_test, y_train, y_test = train_test_split( 25 | data.drop(columns=target), data[target], test_size=0.3 26 | ) 27 | 28 | model = DecisionTreeClassifier() 29 | model.fit(x_train, y_train) 30 | 31 | return model, x_test, y_test 32 | 33 | 34 | def test_calculate_model_statistics(cas_session, hmeq_dataset): 35 | """ 36 | Test Cases: 37 | - No output files 38 | - Output files for fitstat, roc, and lift 39 | """ 40 | model, x, y = _classification_model(hmeq_dataset, "BAD") 41 | 42 | predict = model.predict(x) 43 | proba = model.predict_proba(x) 44 | predict_proba = [] 45 | for i, row in enumerate(proba): 46 | predict_proba.append(row[int(predict[i])]) 47 | predict_df = pd.DataFrame( 48 | {"predict": list(predict), "proba": predict_proba}, index=y.index 49 | ) 50 | test_data = pd.concat([y, predict_df], axis=1) 51 | 52 | json_dicts = jf.calculate_model_statistics(target_value="1", test_data=test_data) 53 | assert "dmcas_fitstat.json" in json_dicts 54 | assert "dmcas_roc.json" in json_dicts 55 | assert "dmcas_lift.json" in json_dicts 56 | 57 | with tempfile.TemporaryDirectory() as tmp_dir: 58 | jf.calculate_model_statistics( 59 | target_value="1", test_data=test_data, json_path=Path(tmp_dir) 60 | ) 61 | assert (Path(tmp_dir) / "dmcas_fitstat.json").exists() 62 | assert (Path(tmp_dir) / "dmcas_roc.json").exists() 63 | assert (Path(tmp_dir) / "dmcas_lift.json").exists() 64 | -------------------------------------------------------------------------------- /tests/integration/test_write_score_code.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2023, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | import pytest 8 | 9 | from sasctl import current_session 10 | from sasctl._services.model_repository import ModelRepository as mr 11 | from sasctl.pzmm.write_score_code import ScoreCode as sc 12 | 13 | pytestmark = pytest.mark.usefixtures("session") 14 | 15 | 16 | @pytest.fixture() 17 | def fake_predict(): 18 | return "A", 1.0 19 | 20 | 21 | def example_model(data): 22 | input_vars = [ 23 | {"name": x, "type": "decimal", "role": "input"} for x in data.columns.to_list() 24 | ] 25 | output_vars = [ 26 | {"name": "Classification", "type": "decimal", "role": "output"}, 27 | {"name": "Prediction", "type": "decimal", "role": "output"}, 28 | ] 29 | project = mr.get_project("TestProject") 30 | if not project: 31 | project = mr.create_project( 32 | project="TestProject", 33 | repository=mr.default_repository().get("id"), 34 | variables=input_vars + output_vars, 35 | ) 36 | model = mr.create_model( 37 | model="TestModel", 38 | project=project, 39 | score_code_type="Python", 40 | input_variables=input_vars, 41 | output_variables=output_vars, 42 | ) 43 | return model 44 | 45 | 46 | def test_write_score_code(hmeq_dataset): 47 | """ 48 | Test Cases: 49 | - Python score code is uploaded successfully 50 | - DS2 wrapper is created and scoreCodeType property updates 51 | - MAS/CAS code uploaded successfully and scoreCodeType property updated 52 | - files exist in output_dict 53 | """ 54 | if current_session().version_info() == 4: 55 | pytest.skip( 56 | "The write_score_code function does not make any API calls for SAS Viya 4." 57 | ) 58 | input_data = hmeq_dataset.drop(columns=["BAD"]) 59 | model = example_model(input_data) 60 | output_dict = sc().write_score_code( 61 | model_prefix="TestModel", 62 | input_data=input_data, 63 | predict_method=[fake_predict, ["A", 1.0]], 64 | score_metrics=["Classification", "Prediction"], 65 | model=model, 66 | binary_string=b"BinaryStringModel", 67 | ) 68 | 69 | assert "score_TestModel.py" in output_dict 70 | assert "dmcas_epscorecode.sas" in output_dict 71 | assert "dmcas_packagescorecode.sas" in output_dict 72 | -------------------------------------------------------------------------------- /tests/pyml2ds_data/lgb.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sassoftware/python-sasctl/e0934185b9a91e3f77260c64910fa30f1fced7d8/tests/pyml2ds_data/lgb.pkl -------------------------------------------------------------------------------- /tests/pyml2ds_data/xgb.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sassoftware/python-sasctl/e0934185b9a91e3f77260c64910fa30f1fced7d8/tests/pyml2ds_data/xgb.pkl -------------------------------------------------------------------------------- /tests/scenarios/test_project_with_sas_and_sklearn_classification_models.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | """Register SAS and scikit regression models. 8 | 9 | Performs the following steps: 10 | - Train a SAS GLM model on the Boston housing dataset 11 | - Train a scikit-learn LinearRegression model on the Boston housing dataset 12 | - Register the SAS model into a new project 13 | - Register the scikit-learn model into the same project 14 | - Publish the SAS model to MAS 15 | - Publish the scikit-learn model to MAS 16 | - Call both MAS modules with the same input row 17 | - Verify that results match 18 | 19 | """ 20 | 21 | import pytest 22 | 23 | from sasctl import publish_model, register_model 24 | from sasctl.services import model_repository as mr 25 | 26 | sklearn = pytest.importorskip("sklearn") 27 | 28 | 29 | # Every test function in the module will automatically receive the session fixture 30 | pytestmark = pytest.mark.usefixtures("session") 31 | 32 | SAS_MODEL_NAME = "sasctl_test_SAS_Iris_Gradboost" 33 | SCIKIT_MODEL_NAME = "sasctl_test_scikit_Iris_Gradboost" 34 | PROJECT_NAME = "sasctl_test_Iris_Species" 35 | TARGET = "Species" 36 | 37 | 38 | @pytest.fixture(autouse=True) 39 | def run_around_tests(session): 40 | # Run setup/teardown code 41 | def clean(): 42 | mr.delete_project(PROJECT_NAME) 43 | 44 | clean() 45 | yield 46 | clean() 47 | 48 | 49 | def test(cas_session, iris_dataset): 50 | pytest.skip("Re-enable once MAS publish no longer hangs.") 51 | cas_session.loadactionset("decisiontree") 52 | 53 | tbl = cas_session.upload(iris_dataset).casTable 54 | features = list(tbl.columns[tbl.columns != TARGET]) 55 | 56 | # Fit a linear regression model in CAS and output an ASTORE 57 | tbl.decisiontree.gbtreetrain( 58 | target=TARGET, inputs=features, savestate="model_table" 59 | ) 60 | astore = cas_session.CASTable("model_table") 61 | 62 | from sklearn.ensemble import GradientBoostingClassifier 63 | 64 | X = iris_dataset.drop(TARGET, axis=1) 65 | y = iris_dataset[TARGET] 66 | sk_model = GradientBoostingClassifier() 67 | sk_model.fit(X, y) 68 | 69 | sas_model = register_model(astore, SAS_MODEL_NAME, PROJECT_NAME, force=True) 70 | sk_model = register_model(sk_model, SCIKIT_MODEL_NAME, PROJECT_NAME, input_data=X) 71 | 72 | # Publish to MAS 73 | sas_module = publish_model(sas_model, "maslocal", replace=True) 74 | sk_module = publish_model(sk_model, "maslocal", replace=True) 75 | 76 | # Pass a row of data to MAS and receive the predicted result. 77 | first_row = tbl.head(1) 78 | result = sas_module.score(first_row) 79 | p1, p1, p2, species, warning = result 80 | 81 | result2 = sk_module.predict(first_row) 82 | assert result2 in ("setosa", "virginica", "versicolor") 83 | 84 | # SAS model may have CHAR variable that's padded with spaces. 85 | assert species.strip() == result2 86 | 87 | result3 = sk_module.predict_proba(first_row) 88 | assert round(sum(result3), 5) == 1 89 | -------------------------------------------------------------------------------- /tests/unit/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | -------------------------------------------------------------------------------- /tests/unit/test_misc_utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | import re 8 | from unittest.mock import PropertyMock, patch 9 | 10 | 11 | def test_list_packages(): 12 | from sasctl.utils.misc import installed_packages 13 | 14 | packages = installed_packages() 15 | 16 | # We know that these packages should always be present 17 | assert any(re.match("requests==.*", p) for p in packages) 18 | assert any( 19 | re.match("sasctl.*", p) for p in packages 20 | ) # sasctl may be installed from disk so no '==' 21 | assert any(re.match("pytest==.*", p) for p in packages) 22 | 23 | 24 | def test_check_if_jupyter(): 25 | """ 26 | Test Cases: 27 | - can't mock get_ipython() attributes; so only test for ImportError or NameError 28 | """ 29 | from sasctl.utils.misc import check_if_jupyter 30 | 31 | assert not check_if_jupyter() 32 | -------------------------------------------------------------------------------- /tests/unit/test_model_info_onnx.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import pytest 3 | 4 | onnx = pytest.importorskip("onnx") 5 | torch = pytest.importorskip("torch") 6 | 7 | import sasctl.utils.model_info 8 | from sasctl.utils import get_model_info 9 | 10 | # mnist 11 | # get input/output shapes 12 | # get var names if available 13 | # classification/regression/etc 14 | # 15 | 16 | 17 | @pytest.fixture 18 | def mnist_model(tmp_path): 19 | class Net(torch.nn.Module): 20 | def __init__(self): 21 | super(Net, self).__init__() 22 | self.fc1 = torch.nn.Linear(14 * 14, 128) 23 | self.fc2 = torch.nn.Linear(128, 10) 24 | 25 | def forward(self, x): 26 | x = torch.nn.functional.max_pool2d(x, 2) 27 | x = x.reshape(-1, 1 * 14 * 14) 28 | x = self.fc1(x) 29 | x = torch.nn.functional.relu(x) 30 | x = self.fc2(x) 31 | output = torch.nn.functional.softmax(x, dim=1) 32 | return output 33 | 34 | model = Net() 35 | 36 | path = tmp_path / "model.onnx" 37 | X = torch.randn(1, 1, 28, 28) 38 | torch.onnx.export(model, X, path, input_names=["image"], output_names=["digit"]) 39 | yield onnx.load(path), X 40 | 41 | 42 | def test_get_info(mnist_model): 43 | info = get_model_info(*mnist_model) 44 | assert isinstance(info, sasctl.utils.model_info.OnnxModelInfo) 45 | 46 | # Output be classification into 10 digits 47 | assert len(info.output_column_names) == 10 48 | assert all(c.startswith("digit") for c in info.output_column_names) 49 | 50 | assert isinstance(info.X, pd.DataFrame) 51 | assert len(info.X.columns) == 28 * 28 52 | 53 | assert info.is_classifier 54 | assert not info.is_binary_classifier 55 | assert not info.is_regressor 56 | assert not info.is_clusterer 57 | -------------------------------------------------------------------------------- /tests/unit/test_model_info_torch.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | import numpy as np 4 | import pytest 5 | 6 | torch = pytest.importorskip("torch") 7 | from torch import nn 8 | 9 | from sasctl.utils.model_info import get_model_info 10 | 11 | 12 | def prepare_model_for_sas(model, model_name): 13 | import base64 14 | import inspect 15 | import pickle 16 | 17 | # Pickle the entire model 18 | pickled_model = pickle.dumps(model) 19 | 20 | # Save the state dict separately 21 | state_dict = model.state_dict() 22 | 23 | # Get the source code for the model class 24 | model_source = inspect.getsource(model.__class__) 25 | 26 | # Get the source for any custom modules used in the model 27 | custom_modules = {} 28 | for name, module in model.named_modules(): 29 | if not hasattr(torch.nn, module.__class__.__name__): 30 | custom_modules[module.__class__.__name__] = inspect.getsource( 31 | module.__class__ 32 | ) 33 | 34 | # Capture initialization parameters 35 | init_signature = inspect.signature(model.__class__.__init__) 36 | init_params = {} 37 | for param_name, param in init_signature.parameters.items(): 38 | if param_name == "self": 39 | continue 40 | if hasattr(model, param_name): 41 | init_params[param_name] = getattr(model, param_name) 42 | # else: 43 | # # If the parameter is not stored as an attribute, we need to find its value 44 | # # This is a bit tricky and might not work for all cases 45 | # frame = inspect.currentframe() 46 | # try: 47 | # while frame: 48 | # if param_name in frame.f_locals: 49 | # init_params[param_name] = frame.f_locals[param_name] 50 | # break 51 | # frame = frame.f_back 52 | # finally: 53 | # del frame 54 | 55 | # Create a metadata dictionary 56 | metadata = { 57 | "model_name": model_name, 58 | "class_name": model.__class__.__name__, 59 | "pickled_model": base64.b64encode(pickle.dumps(model)).decode("utf-8"), 60 | "state_dict": {k: v.tolist() for k, v in model.state_dict().items()}, 61 | "model_source": inspect.getsource(model.__class__), 62 | "custom_modules": custom_modules, 63 | "init_params": init_params, 64 | } 65 | 66 | return metadata 67 | 68 | 69 | class MnistLogistic(nn.Module): 70 | def __init__(self): 71 | super().__init__() 72 | self.weights = nn.Parameter(torch.randn(784, 10) / math.sqrt(784)) 73 | self.bias = nn.Parameter(torch.zeros(10)) 74 | 75 | def forward(self, xb): 76 | return xb @ self.weights + self.bias 77 | 78 | 79 | def test_mnist(): 80 | X = np.random.random(784).reshape(1, 784).astype("float32") 81 | 82 | model = MnistLogistic() 83 | info = get_model_info(model, X) 84 | 85 | meta = prepare_model_for_sas(model, "MnistLogistic") 86 | # assert info.is_classifier 87 | -------------------------------------------------------------------------------- /tests/unit/test_module_publish.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | from unittest import mock 8 | 9 | from sasctl.services import model_publish as mp 10 | 11 | 12 | def test_publish_name(): 13 | assert "ModuleName" == mp._publish_name("Module Name") # Remove spaces 14 | assert "_1stModule" == mp._publish_name("1st Module") # Cannot start with numbers 15 | assert "ValidModule" == mp._publish_name("$&^Va*li#d @Modu(le)!") 16 | 17 | 18 | def test_create_cas_destination(): 19 | target = { 20 | "name": "caslocal", 21 | "destinationType": "cas", 22 | "casServerName": "camelot", 23 | "casLibrary": "round", 24 | "destinationTable": "table", 25 | "description": None, 26 | } 27 | 28 | with mock.patch("sasctl._services.model_publish.ModelPublish.post") as post: 29 | mp.create_cas_destination( 30 | "caslocal", server="camelot", library="round", table="table" 31 | ) 32 | 33 | assert post.called 34 | json = post.call_args[1]["json"] 35 | 36 | for k in json.keys(): 37 | assert json[k] == target[k] 38 | 39 | 40 | def test_create_mas_destination(): 41 | target = { 42 | "name": "spam", 43 | "destinationType": "microAnalyticService", 44 | "masUri": "http://spam.com", 45 | "description": "Real-time spam", 46 | } 47 | 48 | with mock.patch("sasctl._services.model_publish.ModelPublish.post") as post: 49 | mp.create_mas_destination( 50 | target["name"], target["masUri"], target["description"] 51 | ) 52 | 53 | assert post.called 54 | json = post.call_args[1]["json"] 55 | 56 | for k in json.keys(): 57 | assert json[k] == target[k] 58 | -------------------------------------------------------------------------------- /tests/unit/test_pageiterator.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | from unittest import mock 8 | 9 | import pytest 10 | 11 | from sasctl.core import PageIterator, RestObj 12 | 13 | 14 | @pytest.fixture( 15 | scope="function", params=[(6, 2, 2), (6, 1, 4), (6, 5, 4), (6, 6, 2), (100, 10, 20)] 16 | ) 17 | def paging(request): 18 | """Create a RestObj designed to page through a collection of items and the 19 | collection itself. 20 | 21 | Returns 22 | ------- 23 | RestObj : initial RestObj that can be used to initialize a paging iterator 24 | List[dict] : List of items being used as the "server-side" source 25 | MagicMock : Mock of sasctl.request for performing additional validation 26 | 27 | """ 28 | import math 29 | import re 30 | 31 | num_items, start, limit = request.param 32 | 33 | with mock.patch("sasctl.core.request") as req: 34 | items = [{"name": str(i)} for i in range(num_items)] 35 | 36 | obj = RestObj( 37 | items=items[:start], 38 | count=len(items), 39 | links=[ 40 | { 41 | "rel": "next", 42 | "href": "/moaritems?start=%d&limit=%d" % (start, limit), 43 | } 44 | ], 45 | ) 46 | 47 | def side_effect(_, link, **kwargs): 48 | assert "limit=%d" % limit in link 49 | start = int(re.search(r"(?<=start=)[\d]+", link).group()) 50 | return RestObj(items=items[start : start + limit]) 51 | 52 | req.side_effect = side_effect 53 | yield obj, items[:], req 54 | 55 | # Enough requests should have been made to retrieve all the data. 56 | # Additional requests may have been made by workers to non-existent pages. 57 | call_count = (num_items - start) / float(limit) 58 | assert req.call_count >= math.ceil(call_count) 59 | 60 | 61 | def test_paging_required(paging): 62 | """Requests should be made to retrieve additional pages.""" 63 | obj, items, _ = paging 64 | 65 | with PageIterator(obj) as pager: 66 | init_count = pager._start 67 | 68 | for i, page in enumerate(pager): 69 | for j, item in enumerate(page): 70 | if i == 0: 71 | item_idx = j 72 | else: 73 | # Account for initial page size not necessarily being same size 74 | # as additional pages 75 | item_idx = init_count + (i - 1) * pager._limit + j 76 | target = RestObj(items[item_idx]) 77 | assert item.name == target.name 78 | -------------------------------------------------------------------------------- /tests/unit/test_restobj.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | import pytest 8 | 9 | from sasctl import RestObj 10 | 11 | 12 | def test_restobj(): 13 | o = RestObj(a=1, b=2) 14 | 15 | assert o.a == 1 16 | assert o["a"] == 1 17 | assert o.b == 2 18 | assert o["b"] == 2 19 | 20 | with pytest.raises(AttributeError) as e: 21 | print(o.missingattribute) 22 | assert "missingattribute" in str(e.value) 23 | 24 | with pytest.raises(KeyError): 25 | print(o["c"]) 26 | 27 | setattr(o, "c", "attribute") 28 | assert o.c == "attribute" 29 | 30 | with pytest.raises(KeyError): 31 | print(o["c"]) 32 | 33 | 34 | def test_repr(): 35 | d = dict(a=1, b=2) 36 | 37 | assert "'a': 1" in repr(RestObj(d)) 38 | assert "'b': 2" in repr(RestObj(d)) 39 | 40 | 41 | def test_str(): 42 | assert str(RestObj(name="test", id=1)) == "test" 43 | assert str(RestObj(id=1)) == "1" 44 | assert str({"var": "test"}) in str(RestObj(var="test")) 45 | 46 | 47 | def test_pickle(): 48 | import pickle 49 | 50 | obj = RestObj(name="test", id=1) 51 | 52 | pickled = pickle.dumps(obj) 53 | new_obj = pickle.loads(pickled) 54 | 55 | assert obj == new_obj 56 | -------------------------------------------------------------------------------- /tests/unit/test_zip_model.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # 4 | # Copyright © 2023, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | import json 8 | import tempfile 9 | from contextlib import closing 10 | from io import BytesIO 11 | from pathlib import Path 12 | from zipfile import ZipFile 13 | 14 | import pytest 15 | 16 | from sasctl.pzmm.zip_model import ZipModel as zm 17 | 18 | 19 | def _create_sample_archive(prefix=None, suffix=None, is_viya_4=False): 20 | tmp_dir = tempfile.TemporaryDirectory() 21 | if suffix: 22 | for s in suffix: 23 | _ = tempfile.NamedTemporaryFile(delete=False, suffix=s, dir=tmp_dir.name) 24 | if prefix: 25 | _ = tempfile.NamedTemporaryFile( 26 | delete=False, prefix=prefix, suffix=".py", dir=tmp_dir.name 27 | ) 28 | bytes_zip = zm.zip_files(tmp_dir.name, "Unit_Test_Model", is_viya4=is_viya_4) 29 | # Check that for files with a valid extension, the generated zip file contains 30 | # the expected number of files 31 | with closing(ZipFile(Path(tmp_dir.name) / "Unit_Test_Model.zip")) as archive: 32 | num_files = len(archive.infolist()) 33 | return bytes_zip, num_files 34 | 35 | 36 | def test_zip_files(): 37 | """ 38 | Test cases: 39 | - Creates in memory zip 40 | - Writes zip to disk 41 | - Returns proper BytesIO object in both cases 42 | """ 43 | model_files = { 44 | "Test.json": json.dumps({"Test": True, "TestNum": 1}), 45 | "Test.py": f"import sasctl\ndef score():\n{'':4}return \"Test score\"", 46 | } 47 | bytes_zip = zm.zip_files(model_files, "Unit_Test_Model") 48 | assert issubclass(BytesIO, type(bytes_zip)) 49 | 50 | bytes_zip, _ = _create_sample_archive(suffix=[".json"]) 51 | assert issubclass(BytesIO, type(bytes_zip)) 52 | 53 | 54 | def test_zip_files_filter(): 55 | """ 56 | Test cases: 57 | - Zip proper number of files for Viya 4 model 58 | - Zip proper number of files for Viya 3.5 model 59 | - Raise error if no valid files 60 | """ 61 | suffix = [".json", ".pickle", ".mojo", ".txt"] 62 | prefix = "score_" 63 | 64 | # Viya 4 model 65 | _, num_files = _create_sample_archive(prefix, suffix, True) 66 | assert num_files == 4 67 | 68 | # Viya 3.5 model 69 | _, num_files = _create_sample_archive(prefix, suffix, False) 70 | assert num_files == 3 71 | 72 | # No valid files 73 | with pytest.raises(FileNotFoundError): 74 | _, _ = _create_sample_archive(suffix=[".txt"]) 75 | --------------------------------------------------------------------------------