├── Multi-cluster SDK
├── readme.md
├── Primary fact import SDK.zip
└── Remote fact export SDK.zip
├── cloud
├── README.md
├── Prompt
│ └── Insurance claims scored payload data with reference data .numbers
└── End To End Flow
│ └── Inventory Notebook IBM Cloud.ipynb
├── Assets
├── report_templates
│ ├── test1
│ ├── factsheet_utils.ftl
│ └── factsheet_common_elements.ftl
├── archive
│ └── notebooks
│ │ ├── README.md
│ │ └── Factsheet_Asset_Environments_Utilities.ipynb
└── data
│ ├── Asset_type_definition_me.csv
│ ├── Asset_type_definition.csv
│ └── external_model_facts_payload.json
├── cloud_pak_for_data
├── README.md
├── 5.0x Version
│ └── Prompt
│ │ └── Insurance claims scored payload data with reference data .numbers
└── 4.8x Version
│ └── End To End Flow
│ └── AI-usecase Approach 4.8x Edition.ipynb
├── .gitignore
├── LICENSE
└── README.md
/Multi-cluster SDK/readme.md:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/cloud/README.md:
--------------------------------------------------------------------------------
1 | # IBM CLOUD SAMPLES
2 |
--------------------------------------------------------------------------------
/Assets/report_templates/test1:
--------------------------------------------------------------------------------
1 | report_templates
2 |
--------------------------------------------------------------------------------
/cloud_pak_for_data/README.md:
--------------------------------------------------------------------------------
1 | # CPD Samples
2 |
3 |
--------------------------------------------------------------------------------
/Assets/archive/notebooks/README.md:
--------------------------------------------------------------------------------
1 | # Model and Model usecase related notebooks using python client
2 |
3 |
--------------------------------------------------------------------------------
/Multi-cluster SDK/Primary fact import SDK.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IBM/ai-governance-factsheet-samples/main/Multi-cluster SDK/Primary fact import SDK.zip
--------------------------------------------------------------------------------
/Multi-cluster SDK/Remote fact export SDK.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IBM/ai-governance-factsheet-samples/main/Multi-cluster SDK/Remote fact export SDK.zip
--------------------------------------------------------------------------------
/cloud/Prompt/Insurance claims scored payload data with reference data .numbers:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IBM/ai-governance-factsheet-samples/main/cloud/Prompt/Insurance claims scored payload data with reference data .numbers
--------------------------------------------------------------------------------
/cloud_pak_for_data/5.0x Version/Prompt/Insurance claims scored payload data with reference data .numbers:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/IBM/ai-governance-factsheet-samples/main/cloud_pak_for_data/5.0x Version/Prompt/Insurance claims scored payload data with reference data .numbers
--------------------------------------------------------------------------------
/Assets/data/Asset_type_definition_me.csv:
--------------------------------------------------------------------------------
1 | name,type,description,placeholder,is_array,required,hidden,readonly,default_value,label,minimum,maximum,min_length,max_length,is_searchable
2 | model_scope,string,Model Scope,Model Scope,FALSE,TRUE,FALSE,FALSE,,Model Scope,,,2,1000,TRUE
3 | model_purpose,string,Model Purpose,Model Purpose,FALSE,TRUE,FALSE,FALSE,,Model Purpose,,,5,1000,TRUE
4 | model_use,string,Model Use,Model Use,TRUE,TRUE,FALSE,FALSE,,Model Use,,,5,500,TRUE
5 | model_sponsor,string,Model Sponsor,Model Sponsor,FALSE,TRUE,FALSE,FALSE,,Model Sponsor,,,5,200,TRUE
6 |
--------------------------------------------------------------------------------
/Assets/data/Asset_type_definition.csv:
--------------------------------------------------------------------------------
1 | name,type,description,placeholder,is_array,required,hidden,readonly,default_value,label,minimum,maximum,min_length,max_length,is_searchable
2 | TrainingData_Size,integer,Number of records in the training data,desc of userattrstr,false,true,false,false,0,TrainingData_Size,,,,,true
3 | TrainingData_Ratio,string,Percentage of records in the training data,Percentage of records in the training data,false,true,false,false,0,TrainingData_Ratio,,,,,true
4 | TestData_Size,integer,desc of userattrint,desc of userattrint,false,true,false,false,0,TestData_Size,,,,,false
5 | TestData_Ratio,string,Percentage of records in the test data,Percentage of records in the test data,false,true,false,false,0,TestData_Ratio,,,,,true
6 | Train_Class_Distributions,string,class distributions,class distributions,true,true,false,false,0,Train_Class_Distributions,,,,,false
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea
2 | .vscode
3 | .vscode/*
4 | *.code-workspace
5 | *.dat
6 | *.lck
7 | *.log
8 | *.ctrl
9 | *.pyc
10 |
11 | **/.DS_Store
12 |
13 | *tests/config/*
14 | metastore_db/log/README_DO_NOT_TOUCH_FILES.txt
15 |
16 | metastore_db/README_DO_NOT_TOUCH_FILES.txt
17 |
18 | metastore_db/seg0/README_DO_NOT_TOUCH_FILES.txt
19 |
20 | metastore_db/service.properties
21 |
22 | .cache/*
23 | .vscode/settings.json
24 |
25 | __pycache__/
26 | *-checkpoint.ipynb
27 |
28 | **/.ipynb_checkpoints/*
29 | **/.virtual_documents/*
30 | assets/.METADATA/job_run.*
31 | assets/job_run
32 | __pypackages__/
33 | *.py[cod]
34 | *$py.class
35 | *.so
36 | share/python-wheels/
37 | *.egg-info/
38 | .installed.cfg
39 | *.egg
40 | *.manifest
41 | *.spec
42 | pip-log.txt
43 | pip-delete-this-directory.txt
44 | htmlcov/
45 | .tox/
46 | .nox/
47 | .coverage
48 | .coverage.*
49 | nosetests.xml
50 | coverage.xml
51 | *.cover
52 | *.py,cover
53 | .hypothesis/
54 | .pytest_cache/
55 | cover/
56 | *.mo
57 | *.pot
58 | local_settings.py
59 | instance/
60 | .webassets-cache
61 | .scrapy
62 | docs/_build/
63 | .pybuilder/
64 | profile_default/
65 | ipython_config.py
66 | celerybeat-schedule
67 | celerybeat.pid
68 | .spyderproject
69 | .spyproject
70 | .ropeproject
71 | .mypy_cache/
72 | .pyre/
73 | .pytype/
74 | cython_debug/
75 |
76 | **/mlruns
77 | **/logs
78 | **/lightning_logs
79 | **/w3publisher_samples
--------------------------------------------------------------------------------
/Assets/report_templates/factsheet_utils.ftl:
--------------------------------------------------------------------------------
1 | <#-- This file collects macros and other helpers that can be use in factsheet reports -->
2 |
3 | <#-- conditionalColor marks a stretch of text in a color (if a condition is true) -->
4 | <#macro header>
5 |
15 | #macro>
16 |
17 | <#macro footer>
18 |
28 | #macro>
29 |
30 | <#-- conditionalColor marks a stretch of text in a color (if a condition is true) -->
31 | <#function conditionalColor str condition=true color="red">
32 | <#if condition>
33 | <#return "" + str + "" >
34 | <#else>
35 | <#return str >
36 | #if>
37 | #function>
38 |
39 | <#function color str color="red">
40 | <#return conditionalColor(str, true, color)>
41 | #function>
42 |
43 | <#-- if a JSON sub-elment can either be an atomic string or an array this macro allows to return the result in one expression -->
44 | <#macro stringOrList expr><#if expr?is_enumerable >${expr?join(" - ","")}<#else>${expr}#if>#macro>
45 |
46 | <#-- if a JSON sub-elment can either be an atomic string or an sub-json (i.e. hash) this macro allows to return the result in one expression -->
47 | <#-- TODO: when it's a hash we should not just return and empty string but some more meaningful string rendering -->
48 | <#macro stringOrHash expr><#if expr?is_hash > <#else>${expr}#if>#macro>
49 |
50 | <#-- uniqueList removes all duplicates from the originalList and assigns the result to outList -->
51 | <#function uniqueList originalList >
52 | <#assign newList = [] />
53 | <#list originalList as ol>
54 | <#if ! newList?seq_contains(ol)>
55 | <#assign newList = newList + [ol] />
56 | #if>
57 | #list>
58 | <#return newList >
59 | #function>
60 |
61 | <#-- environmentClassification assumes a model JSON object as input as returned from the Model use case API call.
62 | It returns Development, Test, Pre-production or Production -->
63 | <#function environmentClassification model>
64 | <#switch model.container_type>
65 | <#case "project">
66 | <#return "Develop" />
67 | <#break>
68 | <#case "space">
69 | <#switch model.deployment_space_type>
70 | <#case "development">
71 | <#return "Test" />
72 | <#break>
73 | <#case "pre-production">
74 | <#return "Pre-Production" />
75 | <#break>
76 | <#case "production">
77 | <#return "Production" />
78 | <#break>
79 | <#default>
80 | <#return "Unknown deployment space type!!" />
81 | #switch>
82 | <#break>
83 | <#case "catalog">
84 | <#return "Catalog" />
85 | <#break>
86 | <#default>
87 | <#return "Unknown container type!!" />
88 | #switch>
89 | #function>
90 |
91 | <#function getEnvClassForDeploymentSpaceType deployment_space_type>
92 | <#switch deployment_space_type>
93 | <#case "development">
94 | <#return {"envClass" : "Test", "envClassId" : 1} />
95 | <#break>
96 | <#case "pre-production">
97 | <#return {"envClass" : "Pre-Production", "envClassId" : 2} />
98 | <#break>
99 | <#case "production">
100 | <#return {"envClass" : "Production", "envClassId" : 3} />
101 | <#break>
102 | <#default>
103 | <#return {"envClass" : "Unknown environment type", "envClassId" : 5} />
104 | #switch>
105 | #function>
106 | <#-- environmentClassification assumes a model JSON object as input as returned from the Model use case API call.
107 | It creates an enriched copy of th input model object which has the environment class string and an number (for sorting) -->
108 | <#function environmentClassificationObject model>
109 | <#switch model.container_type>
110 | <#case "project">
111 | <#return model + {"envClass" : "Development", "envClassId" : 0} />
112 | <#break>
113 | <#case "space">
114 | <#return model + getEnvClassForDeploymentSpaceType(model.deployment_space_type) />
115 | <#break>
116 | <#case "catalog">
117 | <#if model.type?? && model.type == "external_model">
118 | <#return model + getEnvClassForDeploymentSpaceType(model.deployment_space_type) />
119 | <#else> <#-- wml model -->
120 | <#return model + {"envClass" : "Catalog", "envClassId" : 4} />
121 | #if>
122 | <#break>
123 | <#default>
124 | <#return model + {"envClass" : "Unknown environment type", "envClassId" : 5} />
125 | #switch>
126 | #function>
127 |
128 | <#function preprocessPhysicalModels models>
129 | <#assign newList = [] />
130 | <#list models as model>
131 | <#assign newList = newList + [environmentClassificationObject(model)] />
132 | #list>
133 | <#return newList >
134 | #function>
135 |
136 | <#-- Takes the original Array and arranges in order of arrangeArray and returns a new Array -->
137 | <#function arrangeList originalArray arrangeArray>
138 | <#assign newList = [] />
139 | <#list arrangeArray as item>
140 | <#if originalArray?seq_contains(item) >
141 | <#assign newList += [(item)] />
142 | #if>
143 | #list>
144 | <#return newList >
145 | #function>
146 |
147 | <#-- Unused - delete later onc we decide to not pursue this approach to metadata
148 | <#ftl
149 | attributes={
150 | "name": "Model Report",
151 | "description": "A sample template to illustrate reporting against model facts",
152 | "type": "Model"
153 | }
154 | >
155 | -->
156 | <#-- Above is an exploratory/experimental and optional header section to see if/how we could use embedded metadata in a template.
157 | The ftl attributes section above must be the very first in the file.
158 | It allows to put some metadata about the template right into the template itself
159 | This can be read programmatically. The "type" could give an indication that this template needs to be used with
160 | data conforming to the model REST API results (as opposed to Model use case) that would allow for conistency checks.
161 | The "name" and "description" could be used to in the UI to allow users to pick a template by name and description.
162 | Embeded metadata has its advantages (can't be separated/lost from the actual template).
163 | But as a disadvantage it makes the template "ugly" and complex and it's easy to have typos.
164 | But we may still want to explore other ways to store this metadata. -->
165 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # IBM AI Factsheet Governance Samples
2 |
3 | Repository for sample models, notebooks, and applications related to AI Governance Factsheets.
4 |
5 | ## Overview
6 |
7 | **IBM AI Factsheet** is a component of **watsonx.governance**. It provides a systematic approach to collecting and managing facts about machine learning models. The `ibm_aigov_facts_client` is a Python SDK designed to collect and manage facts about models, including gathering details from external models and prompts throughout their development lifecycle.
8 |
9 | Our Python client library facilitates the collection of facts from various experiments conducted within Jupyter notebooks, whether hosted on IBM Cloud, external Machine Learning Engines, or in standalone environments.
10 |
11 | ## Installation
12 | To install the IBM AI Governance Facts Client, ensure Python 3.7 or later is installed. Then, use pip:
13 |
14 | ```
15 | pip install ibm-aigov-facts-client
16 | ```
17 | The package automatically installs required dependencies
18 |
19 | ## Client Initialization
20 | To initialize the client, follow these guidelines:
21 |
22 | Container Type: Choose either space or project. Note that environment utilities (get/set) require the model asset to be stored in a Space.
23 | Experiment Management: If re-running the notebook with the same experiment name or encountering errors like Experiment with same name already exists, set set_as_current_experiment=True during client initialization.
24 | use_software Parameter: Set use_software=True if using IBM watsonx.governance software, or False if using IBM Cloud.
25 |
26 | Example:
27 | ```
28 | if use_software:
29 | facts_client = AIGovFactsClient(
30 | cloud_pak_for_data_configs=creds,
31 | experiment_name=,
32 | container_type="space or project",
33 | container_id=,
34 | set_as_current_experiment=True
35 | )
36 | else:
37 | facts_client = AIGovFactsClient(
38 | api_key=,
39 | experiment_name=,
40 | container_type="space or project",
41 | container_id=,
42 | set_as_current_experiment=True
43 | )
44 | ```
45 |
46 | ### Region Specification
47 | To use the IBM AI Gov Facts Client in different regions, specify the region where watsonx.governance is hosted. Examples for Frankfurt and Sydney:
48 |
49 | - Sydney
50 |
51 | ```
52 | from ibm_aigov_facts_client import AIGovFactsClient, CloudPakforDataConfig
53 |
54 | client = AIGovFactsClient(
55 | api_key=,
56 | experiment_name=,
57 | container_type="space or project",
58 | container_id=,
59 | region="sydney"
60 | )
61 | ```
62 | - Frankfurt
63 | ```from ibm_aigov_facts_client import AIGovFactsClient, CloudPakforDataConfig
64 |
65 | client = AIGovFactsClient(
66 | api_key=,
67 | experiment_name=,
68 | container_type="space or project",
69 | container_id=,
70 | region="frankfurt"
71 | )
72 | ```
73 |
74 | # Notebooks Guidance
75 | ## End-to-End Workflow Notebooks
76 |
77 | This section demonstrates the creation of a machine learning model while covering all features provided by IBM AI Factsheets, such as:
78 | - Trace and Customize Training Run
79 | - Export Training Facts
80 | - Inventory Management
81 | - Additional Training Information
82 | - Custom Facts
83 | - Capture Cell Facts
84 | - Associate Workspaces
85 | - Governing AI Assets
86 | - AI usecase Approaches
87 |
88 | | Notebook | Description | Cloud | CPD 4.8x | CPD 5.0x |
89 | | :---------------------------------- | :--------------------------------------------------------- |:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|
90 | | End-to-End Workflow Notebook | Demonstrates all features provided by IBM AI Factsheet | [link](https://github.com/IBM/ai-governance-factsheet-samples/blob/4082dfd199b9ba2c3a5a87f3a93519c8c1c1563f/cloud/End%20To%20End%20Flow/End-to-End%20Workflow%20IBM%20Cloud.ipynb) | [link](https://github.com/IBM/ai-governance-factsheet-samples/blob/4082dfd199b9ba2c3a5a87f3a93519c8c1c1563f/cloud_pak_for_data/4.8x%20Version/End%20To%20End%20Flow/End-to-End%20Workflow%204.8x%20Edition.ipynb) | [link](https://github.com/IBM/ai-governance-factsheet-samples/blob/4082dfd199b9ba2c3a5a87f3a93519c8c1c1563f/cloud_pak_for_data/5.0x%20Version/End%20To%20End%20Flow/End-to-End%20Workflow%205.0x%20Edition.ipynb.ipynb) |
91 | | Inventory Management Notebook | Demonstrates managing inventories across various platforms. It includes detailed instructions for creating, updating, and managing inventories, as well as handling collaborator roles. | [link](https://github.com/IBM/ai-governance-factsheet-samples/blob/4082dfd199b9ba2c3a5a87f3a93519c8c1c1563f/cloud/End%20To%20End%20Flow/Inventory%20Notebook%20IBM%20Cloud.ipynb) | NA | NA |
92 | | AI-usecase Approaches Notebook |Create and manage models and AI use cases, showcasing various approaches and versioning (Major, Minor, Patch) for effective model tracking.. | [link](https://github.com/IBM/ai-governance-factsheet-samples/blob/4082dfd199b9ba2c3a5a87f3a93519c8c1c1563f/cloud/End%20To%20End%20Flow/AI-usecase%20Approach%20IBM%20Cloud.ipynb) | [link](https://github.com/IBM/ai-governance-factsheet-samples/blob/4082dfd199b9ba2c3a5a87f3a93519c8c1c1563f/cloud_pak_for_data/4.8x%20Version/End%20To%20End%20Flow/AI-usecase%20Approach%204.8x%20Edition.ipynb) | [link](https://github.com/IBM/ai-governance-factsheet-samples/blob/4082dfd199b9ba2c3a5a87f3a93519c8c1c1563f/cloud_pak_for_data/5.0x%20Version/End%20To%20End%20Flow/AI-usecase%20Approach%205.0x%20Edition.ipynb) |
93 |
94 | ## External Model Notebooks
95 |
96 | With watsonx.governance or AI Factsheets, you can manage models created outside IBM Cloud Pak for Data, including those from platforms like AWS or Azure. These tools allow you to track model performance and evaluation results in detailed factsheets, ensuring compliance and transparency.
97 |
98 | This section demonstrate creating, listing, managing external models, deploying, and managing lifecycle phases for external models in IBM watsonx.governance.
99 |
100 | | Notebook | Description | Cloud | CPD 4.8x | CPD 5.0x |
101 | | :---------------------------------- | :--------------------------------------------------------- |:--------:| :-----: |:--------:|
102 | | Getting Started with External Model in IBM Factsheet | Demonstrates all features for external model provided by IBM AI Factsheet | [link](https://github.com/IBM/ai-governance-factsheet-samples/blob/4082dfd199b9ba2c3a5a87f3a93519c8c1c1563f/cloud/External%20Model/External%20Model%20with%20wx.goverance%20IBM%20Cloud.ipynb) | [link](https://github.com/IBM/ai-governance-factsheet-samples/blob/4082dfd199b9ba2c3a5a87f3a93519c8c1c1563f/cloud_pak_for_data/4.8x%20Version/External%20Model/External%20Model%20with%20wx.goverance%204.8.x.ipynb) | [link](https://github.com/IBM/ai-governance-factsheet-samples/blob/4082dfd199b9ba2c3a5a87f3a93519c8c1c1563f/cloud_pak_for_data/5.0x%20Version/External%20Model/External%20Model%20with%20wx.goverance%205.0x.ipynb) |
103 |
104 |
105 |
106 | ## Prompt Notebooks
107 |
108 | This section covers managing prompt template assets for Language Models (LLMs) across various platforms, including:
109 |
110 | - Detached Prompts: Prompt on Third-Party Platforms Such as AWS Bedrock and Azure.
111 | - Standard Prompts: Created directly within the watsonx.ai platform.
112 |
113 | | Notebook | Description | Cloud | CPD 5.0x |
114 | | :---------------------------------- | :--------------------------------------------------------- |:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:--------:|
115 | | End-to-End Detached PTA with Evalution | Demonstrates entire workflow, from the creation of prompt template assets to their evaluation, ensuring a thorough understanding of both the setup and assessment phases. | [link](https://github.com/IBM/ai-governance-factsheet-samples/blob/4082dfd199b9ba2c3a5a87f3a93519c8c1c1563f/cloud/Prompt/End-to-End%20Detached%20PTA%20with%20Evalutions%20IBM%20Cloud.ipynb) | [link](https://github.com/IBM/ai-governance-factsheet-samples/blob/4082dfd199b9ba2c3a5a87f3a93519c8c1c1563f/cloud_pak_for_data/5.0x%20Version/Prompt/End-to-End%20Detached%20PTA%20with%20Evalutions%205.0x.ipynb) |
116 | | Getting Started with Regular Prompt Notebook in IBM Factsheet | Demonstrates insights into the management and utilization of these prompt templates | [link](https://github.com/IBM/ai-governance-factsheet-samples/blob/4082dfd199b9ba2c3a5a87f3a93519c8c1c1563f/cloud/Prompt/Prompt%20Notebook%20with%20wx.goverance%20IBM%20Cloud.ipynb) | [link](https://github.com/IBM/ai-governance-factsheet-samples/blob/4082dfd199b9ba2c3a5a87f3a93519c8c1c1563f/cloud_pak_for_data/5.0x%20Version/Prompt/Prompt%20Notebook%20with%20wx.goverance%205.0x.ipynb) |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
--------------------------------------------------------------------------------
/cloud/End To End Flow/Inventory Notebook IBM Cloud.ipynb:
--------------------------------------------------------------------------------
1 | {"cells":[{"cell_type":"markdown","metadata":{"id":"b2d1378b-9649-411e-b79a-e9edef98d55f"},"source":["# Introduction \n","\n","This notebook outlines the procedures for managing inventories across various platforms. It includes detailed instructions for creating, updating, and managing inventories, as well as handling collaborator roles.\n","\n","- **Inventory Management:** Enable the creation and management of inventory items while supporting multiple users with varied roles for seamless collaboration in inventory management.\n","\n","**Required Services:**\n","- `watsonx.governance`\n","\n","**Required Packages:**\n","- **IBM Facts Client Python SDK (>=1.0.80)**\n"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"b093b952-c39c-4a1b-8ece-5a7f8bd23baf"},"outputs":[],"source":["!pip install ibm-aigov-facts-client\n"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"9683f045-a078-4e2a-9d83-faaf425652c3"},"outputs":[],"source":["import shutil\n","from dotenv import load_dotenv\n","import os\n","from IPython.core.display import display, Markdown\n","from ibm_aigov_facts_client import AIGovFactsClient,CloudPakforDataConfig\n","\n","# warnings.filterwarnings('ignore')\n","shutil.rmtree('./mlruns', ignore_errors=True)\n","load_dotenv()"]},{"cell_type":"markdown","metadata":{"id":"78b96fc0-d3db-4715-b7a8-152a1b988b33"},"source":["- This sample will use IBM Cloud by default. If you prefer to IBM watsonx.goverance software, set `use_software=True`"]},{"cell_type":"code","execution_count":2,"metadata":{"id":"0147615f-7179-4025-9393-6d3ed22b3064"},"outputs":[],"source":["use_software=True"]},{"cell_type":"markdown","metadata":{},"source":["---\n","## Authentication Setup"]},{"cell_type":"markdown","metadata":{"id":"de002a4b-298e-4f24-a2b4-184e7e903e10"},"source":["### IBM Cloud \n","\n","Your Cloud API key can be generated by going to the Users section of the Cloud console. From that page, go to **Manage->Access(IAM)->API keys-> Create**. Give your key a name and click Create, then copy the created key and use as API_KEY.\n","\n","NOTE: You can also get OpenScale API_KEY using IBM CLOUD CLI.\n","\n","How to install IBM Cloud (bluemix) console: instruction\n","\n","How to get api key using console:\n","\n","```\n","bx login --sso\n","bx iam api-key-create 'my_key'\n","```\n","- Get relevant space id from UI `(Deployments -> Spaces-> open space -> Manage -Space GUID)`"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"b842e440-ab6c-44ad-a6a2-c9271e602b2d"},"outputs":[],"source":["if not use_software:\n"," API_KEY=os.getenv(\"CLOUD_API_KEY\",\"\")"]},{"cell_type":"markdown","metadata":{},"source":["[back to top](#introduction)\n","### IBM watsonx.goverance software\n","\n","- Service url is the watsonx.goverance software platform host URL. For skytap environment, it would be the internal nginx URL.\n","- You can either use user `password` or platform `apikey` to authenticate"]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":["if use_software:\n"," creds=CloudPakforDataConfig(service_url=os.getenv(\"CPD_SERVICE_URL\", \"\"),\n"," username=os.getenv(\"CPD_USERNAME\", \"\"),\n"," password=os.getenv(\"CPD_PASSWORD\", \"\"))\n"]},{"cell_type":"markdown","metadata":{},"source":["## Client Initialization\n","\n","\n","\n"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"7271e2ca-e26a-4489-ae0f-6f02705f81c1"},"outputs":[],"source":["if use_software:\n"," facts_client = AIGovFactsClient(cloud_pak_for_data_configs=creds,disable_tracing=True,external_model=True)\n","else: \n"," facts_client = AIGovFactsClient(api_key=API_KEY,disable_tracing=True,external_model=True)"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"2a54b56a-fce0-4528-9faa-116930d91f59"},"outputs":[],"source":["facts_client.version "]},{"cell_type":"markdown","metadata":{"id":"c7dc927c-dd96-4485-8570-69839c4e57c4"},"source":["---\n","## Inventory Management\n","\n","This section focuses on the creation and management of inventory on the Watsonx.Governance platform.\n","The inventory is a view where you can define an AI use case to request a new model, and then track the model and related assets through its lifecycle"]},{"cell_type":"markdown","metadata":{},"source":["Create Inventory
\n","\n","This method is used to create an inventory item with a specified name and description.\n","\n","If you are utilizing IBM Cloud, ensure that you provide the **`cloud_object_storage_name`** parameter to correctly associate the inventory item with a Cloud Object Storage (COS) instance.\n"]},{"cell_type":"markdown","metadata":{},"source":["#### -> Retrieves a list of cloud object storage instances."]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":["if not use_software:\n"," cloud_object_storage_details=facts_client.utilities.get_cloud_object_storage_instances()\n"," print(cloud_object_storage_details)\n"]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":["if not use_software:\n"," def get_storage_name_by_name(cloud_details, name):\n"," for detail in cloud_details:\n"," if detail['Name'] == name:\n"," return detail['Name'] # Return only the name\n"," return None # Return None if the name is not found\n"," \n"," \n"," # select the desired name from the above list \n"," user_selected_name = 'CloudObjectStorage'\n"," cloud_object_storage_name = get_storage_name_by_name(cloud_object_storage_details, user_selected_name)\n","\n"," if cloud_object_storage_name:\n"," print(cloud_object_storage_name)\n"," else:\n"," print(f\"No cloud_object_storage found with the name '{user_selected_name}'\")\n"]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":["if not use_software:\n"," inventory=facts_client.assets.create_inventory(name=\"New Inventory\",description=\"testing\",cloud_object_storage_name=cloud_object_storage_name)\n","else:\n"," inventory=facts_client.assets.create_inventory(name=\"New Inventory\",description=\"testing\")"]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":["inventory.get_info()"]},{"cell_type":"markdown","metadata":{},"source":["Retrieve a specific inventory by its Inventory ID.
\n","\n","This method fetches the details of a specific inventory item using its inventory_id.\n"]},{"cell_type":"markdown","metadata":{},"source":["#### -> List Inventories"]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":["inventory_details=facts_client.assets.list_inventories()\n","print(inventory_details)\n","\n"]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":["desired_inventory_name = \"New Inventory\"\n","\n","inventory_id = None\n","for inventory in inventory_details:\n"," if inventory.to_dict().get(\"inventory_name\") == desired_inventory_name:\n"," inventory_id = inventory.to_dict().get(\"inventory_id\")\n"," break \n","\n","print(inventory_id)"]},{"cell_type":"markdown","metadata":{},"source":["#### -> Get Inventory"]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":["inventory=facts_client.assets.get_inventory(inventory_id=inventory_id)\n"]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":["inventory.get_info()"]},{"cell_type":"markdown","metadata":{},"source":["Set Details of an Inventory Item
\n","\n","It allows updating the name and/or description of the inventory."]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":["inventory.set_detail(name=\"New Inventory Name\",description=\"New Inventory Description\")"]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":["inventory.get_info()"]},{"cell_type":"markdown","metadata":{},"source":["---\n","\n","Inventory Collaborators
\n","\n","Inventories are inherently collaborative, allowing multiple users with distinct roles to participate in the inventory.\n","\n","This section provides a detailed overview of the methods available for managing collaborators within each inventory item, including:\n","\n","- Viewing the current list of collaborators\n","- Assigning new collaborators\n","- Defining roles for each collaborator\n","- Removing existing collaborators\n","\n","\n"]},{"cell_type":"markdown","metadata":{},"source":["Retrieve a list of collaborators for the inventory.
"]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":["# Get the list of collaborators\n","inventory.list_collaborators()"]},{"cell_type":"markdown","metadata":{},"source":["---\n","\n","Assigning New Collaborators
\n","\n","To add new collaborators, you need either the `user_id` or the `access_group_id`, depending on the platform you're using.\n","\n","#### In the Watsonx.Gov Platform:\n","You need either the `user_id` or the `access_group_id`. To find these:\n","\n","1. **Find the `user_id`:**\n"," - Go to the Watsonx.Gov Platform.\n"," - Navigate to **Access Control**.\n"," - Select **Users**.\n"," - Locate the `User-ID` for the desired user.\n","\n","2. **Find the `access_group_id`:**\n"," - Go to the Watsonx.Gov Platform.\n"," - Navigate to **Access Control**.\n"," - Select **Users Group**.\n"," - Find the desired group.\n"," - The `access_group_id` is located at the end of the URL, e.g., `usermgmt-ui/groups/10001`, where `10001` is the group ID.\n","\n","#### In the IBM Cloud:\n","You need either the `user_iam_id` or the `access_group_id`. To find these:\n","\n","1. **Find the `user_iam_id`:**\n"," - Go to [cloud.ibm.com](https://cloud.ibm.com).\n"," - Navigate to **IAM**.\n"," - Select **Users**.\n"," - Click on the three dots next to the user and choose **Manage User**. This will allow you to retrieve the necessary `user_iam_id` to add collaborators.\n","\n","2. **Find the `access_group_id`:**\n"," - Go to [cloud.ibm.com](https://cloud.ibm.com).\n"," - Navigate to **IAM**.\n"," - Select **Access Groups**.\n"," - Click on the three dots next to the group and choose **Manage Access**. \n"," - This will allow you to retrieve the necessary `access_group_id` from the details to add collaborators.\n","\n","\n"]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":["# Determine user_id and access_group_id based on use_software flag\n","if use_software:\n"," user_id = \"1000000**\"\n"," access_group_id = \"1000*\"\n","else:\n"," user_id = \"IBMid-69*******\"\n"," access_group_id = \"AccessGroupId-**********\"\n"]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":["if user_id:\n"," inventory.add_collaborator(user_id=user_id, role=\"editor\")\n","elif access_group_id:\n"," inventory.add_collaborator(access_group_id=access_group_id, role=\"editor\")\n","else:\n"," raise ValueError(\"Either user_id or access_group_id must be provided\")"]},{"cell_type":"markdown","metadata":{},"source":["Settting the role of an existing collaborator in the inventory
\n","\n","This method changes the role assigned to a user in the inventory"]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":["# desired collaborator name \n","collaborator_name=\"Software Developer\""]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":["# Get the list of collaborators\n","collaborators = inventory.list_collaborators()\n","collaborator_id = None\n","access_group_id = None\n","\n","for collaborator in collaborators:\n"," name_or_id = collaborator.get(\"name\")\n"," \n"," if name_or_id == collaborator_name:\n"," collaborator_id = collaborator.get(\"user_id\")\n"," if not collaborator_id:\n"," access_group_id = collaborator.get(\"access_group_id\")\n"," break\n","\n","# Print the appropriate ID if found\n","if collaborator_id:\n"," print(f\"User ID: {collaborator_id}\")\n","elif access_group_id:\n"," print(f\"Access Group ID: {access_group_id}\")\n","else:\n"," print(f\"Collaborator with name or access group ID {collaborator_name=} not found.\")\n"]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":["inventory.set_collaborator_role(user_id=collaborator_id,access_group_id=access_group_id,role=\"admin\")"]},{"cell_type":"markdown","metadata":{},"source":["Deleting existing collaborators
\n","\n","This method removes a user from the inventory."]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":["inventory.remove_collaborator(user_id=collaborator_id,access_group_id=access_group_id)"]},{"cell_type":"markdown","metadata":{},"source":["---\n","## Delete Inventory\n","\n","This section covers the process for deleting an inventory item. Make sure to carefully review the item before proceeding, as deletion is irreversible.\n"]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":["inventory.delete_inventory()"]},{"cell_type":"markdown","metadata":{},"source":["**Created by:** \n","\n","\n","IBM watsonx.governance - AI Factsheet Python SDK Team\n","\n","---\n","\n","**Copyright © 2020-2024 IBM** \n","Released under the MIT License.\n"]}],"metadata":{"kernelspec":{"display_name":"Python 3.11","language":"python","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.10.0"}},"nbformat":4,"nbformat_minor":4}
2 |
--------------------------------------------------------------------------------
/Assets/data/external_model_facts_payload.json:
--------------------------------------------------------------------------------
1 | {
2 | "model_id":"PytorchExternalModelId",
3 | "name":"PytorchExternalModel",
4 | "schemas":{
5 | "input":[
6 | {
7 | "fields":[
8 | {
9 | "metadata":{
10 | "columnInfo":{
11 | "columnLength":64
12 | },
13 | "measure":"discrete",
14 | "modeling_role":"feature"
15 | },
16 | "name":"CheckingStatus",
17 | "nullable":true,
18 | "type":"string"
19 | },
20 | {
21 | "metadata":{
22 | "modeling_role":"feature"
23 | },
24 | "name":"LoanDuration",
25 | "nullable":true,
26 | "type":"integer"
27 | },
28 | {
29 | "metadata":{
30 | "columnInfo":{
31 | "columnLength":64
32 | },
33 | "measure":"discrete",
34 | "modeling_role":"feature"
35 | },
36 | "name":"CreditHistory",
37 | "nullable":true,
38 | "type":"string"
39 | },
40 | {
41 | "metadata":{
42 | "columnInfo":{
43 | "columnLength":64
44 | },
45 | "measure":"discrete",
46 | "modeling_role":"feature"
47 | },
48 | "name":"LoanPurpose",
49 | "nullable":true,
50 | "type":"string"
51 | },
52 | {
53 | "metadata":{
54 | "modeling_role":"feature"
55 | },
56 | "name":"LoanAmount",
57 | "nullable":true,
58 | "type":"integer"
59 | },
60 | {
61 | "metadata":{
62 | "columnInfo":{
63 | "columnLength":64
64 | },
65 | "measure":"discrete",
66 | "modeling_role":"feature"
67 | },
68 | "name":"ExistingSavings",
69 | "nullable":true,
70 | "type":"string"
71 | },
72 | {
73 | "metadata":{
74 | "columnInfo":{
75 | "columnLength":64
76 | },
77 | "measure":"discrete",
78 | "modeling_role":"feature"
79 | },
80 | "name":"EmploymentDuration",
81 | "nullable":true,
82 | "type":"string"
83 | },
84 | {
85 | "metadata":{
86 | "modeling_role":"feature"
87 | },
88 | "name":"InstallmentPercent",
89 | "nullable":true,
90 | "type":"integer"
91 | },
92 | {
93 | "metadata":{
94 | "columnInfo":{
95 | "columnLength":64
96 | },
97 | "measure":"discrete",
98 | "modeling_role":"feature"
99 | },
100 | "name":"Sex",
101 | "nullable":true,
102 | "type":"string"
103 | },
104 | {
105 | "metadata":{
106 | "columnInfo":{
107 | "columnLength":64
108 | },
109 | "measure":"discrete",
110 | "modeling_role":"feature"
111 | },
112 | "name":"OthersOnLoan",
113 | "nullable":true,
114 | "type":"string"
115 | },
116 | {
117 | "metadata":{
118 | "modeling_role":"feature"
119 | },
120 | "name":"CurrentResidenceDuration",
121 | "nullable":true,
122 | "type":"integer"
123 | },
124 | {
125 | "metadata":{
126 | "columnInfo":{
127 | "columnLength":64
128 | },
129 | "measure":"discrete",
130 | "modeling_role":"feature"
131 | },
132 | "name":"OwnsProperty",
133 | "nullable":true,
134 | "type":"string"
135 | },
136 | {
137 | "metadata":{
138 | "modeling_role":"feature"
139 | },
140 | "name":"Age",
141 | "nullable":true,
142 | "type":"integer"
143 | },
144 | {
145 | "metadata":{
146 | "columnInfo":{
147 | "columnLength":64
148 | },
149 | "measure":"discrete",
150 | "modeling_role":"feature"
151 | },
152 | "name":"InstallmentPlans",
153 | "nullable":true,
154 | "type":"string"
155 | },
156 | {
157 | "metadata":{
158 | "columnInfo":{
159 | "columnLength":64
160 | },
161 | "measure":"discrete",
162 | "modeling_role":"feature"
163 | },
164 | "name":"Housing",
165 | "nullable":true,
166 | "type":"string"
167 | },
168 | {
169 | "metadata":{
170 | "modeling_role":"feature"
171 | },
172 | "name":"ExistingCreditsCount",
173 | "nullable":true,
174 | "type":"integer"
175 | },
176 | {
177 | "metadata":{
178 | "columnInfo":{
179 | "columnLength":64
180 | },
181 | "measure":"discrete",
182 | "modeling_role":"feature"
183 | },
184 | "name":"Job",
185 | "nullable":true,
186 | "type":"string"
187 | },
188 | {
189 | "metadata":{
190 | "modeling_role":"feature"
191 | },
192 | "name":"Dependents",
193 | "nullable":true,
194 | "type":"integer"
195 | },
196 | {
197 | "metadata":{
198 | "columnInfo":{
199 | "columnLength":64
200 | },
201 | "measure":"discrete",
202 | "modeling_role":"feature"
203 | },
204 | "name":"Telephone",
205 | "nullable":true,
206 | "type":"string"
207 | },
208 | {
209 | "metadata":{
210 | "columnInfo":{
211 | "columnLength":64
212 | },
213 | "measure":"discrete",
214 | "modeling_role":"feature"
215 | },
216 | "name":"ForeignWorker",
217 | "nullable":true,
218 | "type":"string"
219 | }
220 | ],
221 | "type":"struct"
222 | }
223 | ],
224 | "output":[
225 | {
226 | "fields":[
227 | {
228 | "metadata":{
229 | "modeling_role":"record-id",
230 | "primary_key":true
231 | },
232 | "name":"scoring_id",
233 | "nullable":false,
234 | "type":"string"
235 | },
236 | {
237 | "metadata":{
238 | "modeling_role":"record-timestamp"
239 | },
240 | "name":"scoring_timestamp",
241 | "nullable":false,
242 | "type":"timestamp"
243 | },
244 | {
245 | "metadata":{
246 |
247 | },
248 | "name":"deployment_id",
249 | "nullable":false,
250 | "type":"string"
251 | },
252 | {
253 | "metadata":{
254 |
255 | },
256 | "name":"asset_revision",
257 | "nullable":true,
258 | "type":"string"
259 | },
260 | {
261 | "metadata":{
262 | "columnInfo":{
263 | "columnLength":64
264 | },
265 | "measure":"discrete",
266 | "modeling_role":"feature"
267 | },
268 | "name":"CheckingStatus",
269 | "nullable":true,
270 | "type":"string"
271 | },
272 | {
273 | "metadata":{
274 | "modeling_role":"feature"
275 | },
276 | "name":"LoanDuration",
277 | "nullable":true,
278 | "type":"integer"
279 | },
280 | {
281 | "metadata":{
282 | "columnInfo":{
283 | "columnLength":64
284 | },
285 | "measure":"discrete",
286 | "modeling_role":"feature"
287 | },
288 | "name":"CreditHistory",
289 | "nullable":true,
290 | "type":"string"
291 | },
292 | {
293 | "metadata":{
294 | "columnInfo":{
295 | "columnLength":64
296 | },
297 | "measure":"discrete",
298 | "modeling_role":"feature"
299 | },
300 | "name":"LoanPurpose",
301 | "nullable":true,
302 | "type":"string"
303 | },
304 | {
305 | "metadata":{
306 | "modeling_role":"feature"
307 | },
308 | "name":"LoanAmount",
309 | "nullable":true,
310 | "type":"integer"
311 | },
312 | {
313 | "metadata":{
314 | "columnInfo":{
315 | "columnLength":64
316 | },
317 | "measure":"discrete",
318 | "modeling_role":"feature"
319 | },
320 | "name":"ExistingSavings",
321 | "nullable":true,
322 | "type":"string"
323 | },
324 | {
325 | "metadata":{
326 | "columnInfo":{
327 | "columnLength":64
328 | },
329 | "measure":"discrete",
330 | "modeling_role":"feature"
331 | },
332 | "name":"EmploymentDuration",
333 | "nullable":true,
334 | "type":"string"
335 | },
336 | {
337 | "metadata":{
338 | "modeling_role":"feature"
339 | },
340 | "name":"InstallmentPercent",
341 | "nullable":true,
342 | "type":"integer"
343 | },
344 | {
345 | "metadata":{
346 | "columnInfo":{
347 | "columnLength":64
348 | },
349 | "measure":"discrete",
350 | "modeling_role":"feature"
351 | },
352 | "name":"Sex",
353 | "nullable":true,
354 | "type":"string"
355 | },
356 | {
357 | "metadata":{
358 | "columnInfo":{
359 | "columnLength":64
360 | },
361 | "measure":"discrete",
362 | "modeling_role":"feature"
363 | },
364 | "name":"OthersOnLoan",
365 | "nullable":true,
366 | "type":"string"
367 | },
368 | {
369 | "metadata":{
370 | "modeling_role":"feature"
371 | },
372 | "name":"CurrentResidenceDuration",
373 | "nullable":true,
374 | "type":"integer"
375 | },
376 | {
377 | "metadata":{
378 | "columnInfo":{
379 | "columnLength":64
380 | },
381 | "measure":"discrete",
382 | "modeling_role":"feature"
383 | },
384 | "name":"OwnsProperty",
385 | "nullable":true,
386 | "type":"string"
387 | },
388 | {
389 | "metadata":{
390 | "modeling_role":"feature"
391 | },
392 | "name":"Age",
393 | "nullable":true,
394 | "type":"integer"
395 | },
396 | {
397 | "metadata":{
398 | "columnInfo":{
399 | "columnLength":64
400 | },
401 | "measure":"discrete",
402 | "modeling_role":"feature"
403 | },
404 | "name":"InstallmentPlans",
405 | "nullable":true,
406 | "type":"string"
407 | },
408 | {
409 | "metadata":{
410 | "columnInfo":{
411 | "columnLength":64
412 | },
413 | "measure":"discrete",
414 | "modeling_role":"feature"
415 | },
416 | "name":"Housing",
417 | "nullable":true,
418 | "type":"string"
419 | },
420 | {
421 | "metadata":{
422 | "modeling_role":"feature"
423 | },
424 | "name":"ExistingCreditsCount",
425 | "nullable":true,
426 | "type":"integer"
427 | },
428 | {
429 | "metadata":{
430 | "columnInfo":{
431 | "columnLength":64
432 | },
433 | "measure":"discrete",
434 | "modeling_role":"feature"
435 | },
436 | "name":"Job",
437 | "nullable":true,
438 | "type":"string"
439 | },
440 | {
441 | "metadata":{
442 | "modeling_role":"feature"
443 | },
444 | "name":"Dependents",
445 | "nullable":true,
446 | "type":"integer"
447 | },
448 | {
449 | "metadata":{
450 | "columnInfo":{
451 | "columnLength":64
452 | },
453 | "measure":"discrete",
454 | "modeling_role":"feature"
455 | },
456 | "name":"Telephone",
457 | "nullable":true,
458 | "type":"string"
459 | },
460 | {
461 | "metadata":{
462 | "columnInfo":{
463 | "columnLength":64
464 | },
465 | "measure":"discrete",
466 | "modeling_role":"feature"
467 | },
468 | "name":"ForeignWorker",
469 | "nullable":true,
470 | "type":"string"
471 | },
472 | {
473 | "metadata":{
474 | "columnInfo":{
475 | "columnLength":64
476 | },
477 | "modeling_role":"prediction"
478 | },
479 | "name":"Scored Labels",
480 | "nullable":true,
481 | "type":"string"
482 | },
483 | {
484 | "metadata":{
485 | "modeling_role":"probability"
486 | },
487 | "name":"Scored Probabilities",
488 | "nullable":true,
489 | "type":{
490 | "containsNull":true,
491 | "elementType":"double",
492 | "type":"array"
493 | }
494 | },
495 | {
496 | "metadata":{
497 | "modeling_role":"prediction-probability"
498 | },
499 | "name":"prediction_probability",
500 | "nullable":true,
501 | "type":"double"
502 | },
503 | {
504 | "metadata":{
505 | "columnInfo":{
506 | "columnLength":64
507 | },
508 | "modeling_role":"debiased-prediction"
509 | },
510 | "name":"debiased_prediction",
511 | "nullable":true,
512 | "type":"string"
513 | },
514 | {
515 | "metadata":{
516 | "modeling_role":"debiased-probability"
517 | },
518 | "name":"debiased_probability",
519 | "nullable":true,
520 | "type":{
521 | "containsNull":true,
522 | "elementType":"double",
523 | "type":"array"
524 | }
525 | }
526 | ],
527 | "type":"struct"
528 | }
529 | ]
530 | }
531 | }
--------------------------------------------------------------------------------
/Assets/archive/notebooks/Factsheet_Asset_Environments_Utilities.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Introduction \n",
8 | "The notebook will **create a classification model using Scikit-learn, log facts using autologging,save model to Space using Watson Machine Learning**, **link model to model usecase**, **get and set environment types** for saved model asset. \n",
9 | "
\n",
10 | "For more information around model helper utilities see [here](https://ibm-aigov-facts-client.mybluemix.net/#model-helpers)\n",
11 | "\n",
12 | "It requires following services in the platform:\n",
13 | "\n",
14 | "- Watson Studio\n",
15 | "- Watson Machine Learning\n",
16 | "- Watson Knowledge catalog\n",
17 | "- AI factsheet\n",
18 | "- Facts client python sdk (>=1.0.42)\n",
19 | "\n",
20 | "Sections: \n",
21 | "\n",
22 | "- [Setup](#setup)\n",
23 | " - [Cloud](#setupcloud)\n",
24 | " - [Cloud Pak for data](#setupcpd)\n",
25 | "- [Initialize client](#init)\n",
26 | " - [Cloud](#setupcloudinit)\n",
27 | " - [Cloud Pak for data](#setupcpdinit)\n",
28 | "- [Create model using scikit learn](#createmodel)\n",
29 | "- [Save model in Watson machine learning space](#savemodel)\n",
30 | "- [Get model object](#getmodel)\n",
31 | "- [Add model to model usecase ](#add_mu)\n",
32 | "- [Get model current environment](#getenv)\n",
33 | "- [Set model environment ](#remove_mu)\n",
34 | "- [Cleanup](#clean)\n",
35 | "\n",
36 | "\n"
37 | ]
38 | },
39 | {
40 | "cell_type": "markdown",
41 | "metadata": {},
42 | "source": [
43 | ""
44 | ]
45 | },
46 | {
47 | "cell_type": "markdown",
48 | "metadata": {},
49 | "source": [
50 | "# Setup \n",
51 | "### Imports\n"
52 | ]
53 | },
54 | {
55 | "cell_type": "code",
56 | "execution_count": null,
57 | "metadata": {},
58 | "outputs": [],
59 | "source": [
60 | "!pip install -U ibm-aigov-facts-client --quiet\n",
61 | "!pip install -U ibm-watson-machine-learning --quiet"
62 | ]
63 | },
64 | {
65 | "cell_type": "code",
66 | "execution_count": 1,
67 | "metadata": {},
68 | "outputs": [],
69 | "source": [
70 | "import warnings\n",
71 | "import shutil\n",
72 | "import time\n",
73 | "from ibm_watson_machine_learning import APIClient\n",
74 | "from ibm_aigov_facts_client import AIGovFactsClient\n",
75 | "from IPython.core.display import display, Markdown\n",
76 | "\n",
77 | "warnings.filterwarnings('ignore')\n",
78 | "shutil.rmtree('./mlruns', ignore_errors=True)"
79 | ]
80 | },
81 | {
82 | "cell_type": "markdown",
83 | "metadata": {},
84 | "source": [
85 | "- This sample will use IBM Cloud by default. If you prefer to use Cloud pak for data, set `use_cp4d=True`\n",
86 | "- Flag `run_cleanup_at_end` offers option to delete created assets at the end of the notebook.The notebook will show URL to UI for model and model use case at certain cells. By dafault we set it to `run_cleanup_at_end=False` so you can access UI and see the changes. If you decide to cleanup assets at the end, set `run_cleanup_at_end=True` and remember cells showing links to UI will `NOT` work in that case."
87 | ]
88 | },
89 | {
90 | "cell_type": "code",
91 | "execution_count": 2,
92 | "metadata": {},
93 | "outputs": [],
94 | "source": [
95 | "use_cp4d=False\n",
96 | "run_cleanup_at_end=False"
97 | ]
98 | },
99 | {
100 | "cell_type": "markdown",
101 | "metadata": {},
102 | "source": [
103 | "- Model container type can be `SPACE` or `PROJECT`. However to use get/set environment utilities in this notebook, model asset need to be promoted to Space first."
104 | ]
105 | },
106 | {
107 | "cell_type": "code",
108 | "execution_count": 3,
109 | "metadata": {},
110 | "outputs": [],
111 | "source": [
112 | "EXPERIMENT_NAME=\"IrisClassification\"\n",
113 | "MODEL_NAME=\"IrisScikitModel\"\n",
114 | "CONTAINER_TYPE=\"space\""
115 | ]
116 | },
117 | {
118 | "cell_type": "markdown",
119 | "metadata": {},
120 | "source": [
121 | "## IBM Cloud \n",
122 | "\n",
123 | "Your Cloud API key can be generated by going to the Users section of the Cloud console. From that page, go to **Manage->Access(IAM)->API keys-> Create**. Give your key a name and click Create, then copy the created key and use as API_KEY.\n",
124 | "\n",
125 | "NOTE: You can also get OpenScale API_KEY using IBM CLOUD CLI.\n",
126 | "\n",
127 | "How to install IBM Cloud (bluemix) console: instruction\n",
128 | "\n",
129 | "How to get api key using console:\n",
130 | "\n",
131 | "```\n",
132 | "bx login --sso\n",
133 | "bx iam api-key-create 'my_key'\n",
134 | "```\n",
135 | "- Get relevant space id from UI `(Deployments -> Spaces-> open space -> Manage -Space GUID)`"
136 | ]
137 | },
138 | {
139 | "cell_type": "code",
140 | "execution_count": 4,
141 | "metadata": {},
142 | "outputs": [],
143 | "source": [
144 | "if not use_cp4d:\n",
145 | " API_KEY=\"***\"\n",
146 | " SPACE_ID=\"***\""
147 | ]
148 | },
149 | {
150 | "cell_type": "markdown",
151 | "metadata": {},
152 | "source": [
153 | "[back to top](#introduction)\n",
154 | "## Cloud Pak for Data \n",
155 | "- Service url is the Cloud pak for data platform host URL. For skytap environment, it would be the internal nginx URL.\n",
156 | "- You can either use user `password` or platform `apikey` to authenticate\n",
157 | "- Get relevant space id from UI `(Deployments -> Spaces-> open space -> Manage -Space GUID)`"
158 | ]
159 | },
160 | {
161 | "cell_type": "code",
162 | "execution_count": 5,
163 | "metadata": {},
164 | "outputs": [],
165 | "source": [
166 | "if use_cp4d:\n",
167 | " from ibm_aigov_facts_client import CloudPakforDataConfig\n",
168 | "\n",
169 | " creds=CloudPakforDataConfig(service_url=\"***\",\n",
170 | " username=\"***\",\n",
171 | " api_key=\"***\")\n",
172 | " \n",
173 | " SPACE_ID=\"***\"\n"
174 | ]
175 | },
176 | {
177 | "cell_type": "markdown",
178 | "metadata": {},
179 | "source": [
180 | "[back to top](#introduction)\n",
181 | "# Client Initialization\n",
182 | "\n",
183 | "## IBM Cloud \n",
184 | "\n",
185 | "- Container type would be either `space` or `project`. To use get/set environment utilities, model asset should be stored in Space.\n",
186 | "- If running this notebook multiple times with same experiment name or anytime face error saying `Experiment with same name already exists`, use `set_as_current_experiment=True` when initiating client"
187 | ]
188 | },
189 | {
190 | "cell_type": "code",
191 | "execution_count": 6,
192 | "metadata": {},
193 | "outputs": [
194 | {
195 | "name": "stdout",
196 | "output_type": "stream",
197 | "text": [
198 | "2022/12/12 18:02:11 INFO : Experiment successfully created with ID 1 and name IrisClassification\n",
199 | "2022/12/12 18:02:12 INFO : Autolog enabled Successfully\n"
200 | ]
201 | }
202 | ],
203 | "source": [
204 | "if not use_cp4d:\n",
205 | " facts_client = AIGovFactsClient(api_key=API_KEY, experiment_name= EXPERIMENT_NAME, container_type=CONTAINER_TYPE,container_id=SPACE_ID)"
206 | ]
207 | },
208 | {
209 | "cell_type": "markdown",
210 | "metadata": {},
211 | "source": [
212 | "## Cloud Pak for Data \n",
213 | "\n",
214 | "- Service url is the Cloud pak for data platform host URL. For skytap environment, it would be the internal nginx URL.\n",
215 | "- You can either use user password or platform apikey to authenticate\n",
216 | "- If running this notebook multiple times with same experiment name or anytime face error saying `Experiment with same name already exists`, use `set_as_current_experiment=True` when initiating client"
217 | ]
218 | },
219 | {
220 | "cell_type": "code",
221 | "execution_count": 7,
222 | "metadata": {},
223 | "outputs": [],
224 | "source": [
225 | "if use_cp4d:\n",
226 | " facts_client = AIGovFactsClient(experiment_name= EXPERIMENT_NAME,container_type=CONTAINER_TYPE,container_id=SPACE_ID,cloud_pak_for_data_configs=creds)"
227 | ]
228 | },
229 | {
230 | "cell_type": "markdown",
231 | "metadata": {},
232 | "source": [
233 | "[back to top](#introduction)\n",
234 | "## Create model "
235 | ]
236 | },
237 | {
238 | "cell_type": "code",
239 | "execution_count": 8,
240 | "metadata": {},
241 | "outputs": [
242 | {
243 | "name": "stdout",
244 | "output_type": "stream",
245 | "text": [
246 | "2022/12/12 18:02:14 INFO : logging results to factsheet for run_id ed6c56247dab41fda8ce45997e29e0e3\n",
247 | "2022/12/12 18:02:16 INFO : Successfully logged results to Factsheet service for run_id ed6c56247dab41fda8ce45997e29e0e3 under asset_id: 9bc27910-3cd7-4799-bbbd-8ec6ee6d268b and space_id : 2f3d2c21-9b7e-4d8f-af5d-c65276c71318\n"
248 | ]
249 | }
250 | ],
251 | "source": [
252 | "import pandas as pd\n",
253 | "from sklearn import svm, datasets\n",
254 | "import numpy as np\n",
255 | "\n",
256 | "iris = datasets.load_iris()\n",
257 | "data1 = pd.DataFrame(data= np.c_[iris['data'], iris['target']],\n",
258 | " columns= ['sepal_length','sepal_width','petal_length','petal_width','target'])\n",
259 | "\n",
260 | "from sklearn.model_selection import train_test_split\n",
261 | "train, test = train_test_split(data1, test_size = 0.4, random_state = 42)\n",
262 | "\n",
263 | "X_train = train[['sepal_length','sepal_width','petal_length','petal_width']]\n",
264 | "y_train = train.target\n",
265 | "X_test = test[['sepal_length','sepal_width','petal_length','petal_width']]\n",
266 | "y_test = test.target\n",
267 | "\n",
268 | "svc = svm.SVC(kernel=\"rbf\", C=2,probability=True)\n",
269 | "model=svc.fit(X_train,y_train)\n",
270 | "\n",
271 | "model.score(X_test, y_test)\n",
272 | "y_pred = model.predict(X_test)\n",
273 | "y_pred_prob = model.predict_proba(X_test)\n"
274 | ]
275 | },
276 | {
277 | "cell_type": "markdown",
278 | "metadata": {},
279 | "source": [
280 | "[back to top](#introduction)\n",
281 | "# Store model to Space using Watson Machine Learning "
282 | ]
283 | },
284 | {
285 | "cell_type": "code",
286 | "execution_count": 9,
287 | "metadata": {},
288 | "outputs": [],
289 | "source": [
290 | "if use_cp4d:\n",
291 | " WML_CREDENTIALS = {\n",
292 | " \"url\": creds.url,\n",
293 | " \"username\": creds.username,\n",
294 | " \"apikey\" : creds.api_key,\n",
295 | " \"instance_id\": \"wml_local\",\n",
296 | " \"version\" : \"4.5\"\n",
297 | " }\n",
298 | "else:\n",
299 | " WML_CREDENTIALS = {\n",
300 | " \"url\": \"https://us-south.ml.cloud.ibm.com\",\n",
301 | " \"apikey\": API_KEY\n",
302 | " }"
303 | ]
304 | },
305 | {
306 | "cell_type": "code",
307 | "execution_count": 10,
308 | "metadata": {},
309 | "outputs": [
310 | {
311 | "data": {
312 | "text/plain": [
313 | "'1.0.253'"
314 | ]
315 | },
316 | "execution_count": 10,
317 | "metadata": {},
318 | "output_type": "execute_result"
319 | }
320 | ],
321 | "source": [
322 | "wml_client = APIClient(WML_CREDENTIALS)\n",
323 | "wml_client.version"
324 | ]
325 | },
326 | {
327 | "cell_type": "code",
328 | "execution_count": 11,
329 | "metadata": {},
330 | "outputs": [
331 | {
332 | "data": {
333 | "text/plain": [
334 | "'SUCCESS'"
335 | ]
336 | },
337 | "execution_count": 11,
338 | "metadata": {},
339 | "output_type": "execute_result"
340 | }
341 | ],
342 | "source": [
343 | "wml_client.set.default_space(SPACE_ID)"
344 | ]
345 | },
346 | {
347 | "cell_type": "code",
348 | "execution_count": 12,
349 | "metadata": {},
350 | "outputs": [
351 | {
352 | "name": "stdout",
353 | "output_type": "stream",
354 | "text": [
355 | "Software Specification ID: 12b83a17-24d8-5082-900f-0ab31fbfd3cb\n",
356 | "Storing model .....\n",
357 | "Done\n",
358 | "Model ID: 72d97d15-1e9c-4246-95ac-fbfdb519168c\n"
359 | ]
360 | }
361 | ],
362 | "source": [
363 | "software_spec_uid = wml_client.software_specifications.get_id_by_name(\"runtime-22.1-py3.9\")\n",
364 | "print(\"Software Specification ID: {}\".format(software_spec_uid))\n",
365 | "\n",
366 | "model_props = {\n",
367 | " wml_client._models.ConfigurationMetaNames.NAME:\"{}\".format(MODEL_NAME),\n",
368 | " wml_client._models.ConfigurationMetaNames.TYPE: \"scikit-learn_1.0\",\n",
369 | " wml_client._models.ConfigurationMetaNames.SOFTWARE_SPEC_UID: software_spec_uid,\n",
370 | " wml_client._models.ConfigurationMetaNames.LABEL_FIELD:\"target\",\n",
371 | "}\n",
372 | "\n",
373 | "facts_client.export_facts.prepare_model_meta(wml_client=wml_client,meta_props=model_props)\n",
374 | "\n",
375 | "print(\"Storing model .....\")\n",
376 | "\n",
377 | "published_model_details = wml_client.repository.store_model(model=model, meta_props=model_props, training_data=data1.drop([\"target\"], axis=1), training_target=data1.target)\n",
378 | "model_id = wml_client.repository.get_model_id(published_model_details)\n",
379 | "print(\"Done\")\n",
380 | "print(\"Model ID: {}\".format(model_id))"
381 | ]
382 | },
383 | {
384 | "cell_type": "markdown",
385 | "metadata": {},
386 | "source": [
387 | "## Get model \n",
388 | "- It initates model object using Watson machine learning saved model details and returns a model object that allows you to use all helper methods [here](https://ibm-aigov-facts-client.mybluemix.net/#model-helpers)\n",
389 | "- `verbose=True` gives you additional informations about model asset\n",
390 | "- You can also use model_id to initate model object `facts_client.assets.get_model(model_id=)`"
391 | ]
392 | },
393 | {
394 | "cell_type": "code",
395 | "execution_count": 13,
396 | "metadata": {},
397 | "outputs": [
398 | {
399 | "name": "stdout",
400 | "output_type": "stream",
401 | "text": [
402 | "2022/12/12 18:02:23 INFO : Current model information: {'asset_id': '72d97d15-1e9c-4246-95ac-fbfdb519168c', 'container_type': 'space', 'container_id': '2f3d2c21-9b7e-4d8f-af5d-c65276c71318', 'facts_type': 'modelfacts_user'}\n"
403 | ]
404 | },
405 | {
406 | "data": {
407 | "text/plain": [
408 | "{'name': 'IrisScikitModel',\n",
409 | " 'asset_type': 'wml_model',\n",
410 | " 'url': 'https://dataplatform.cloud.ibm.com/ml-runtime/models/72d97d15-1e9c-4246-95ac-fbfdb519168c?space_id=2f3d2c21-9b7e-4d8f-af5d-c65276c71318&context=cpdaas',\n",
411 | " 'asset_id': '72d97d15-1e9c-4246-95ac-fbfdb519168c',\n",
412 | " 'container_type': 'space',\n",
413 | " 'container_id': '2f3d2c21-9b7e-4d8f-af5d-c65276c71318',\n",
414 | " 'facts_type': 'modelfacts_user'}"
415 | ]
416 | },
417 | "execution_count": 13,
418 | "metadata": {},
419 | "output_type": "execute_result"
420 | }
421 | ],
422 | "source": [
423 | "wml_model=facts_client.assets.get_model(wml_stored_model_details=published_model_details)\n",
424 | "wml_model.get_info(verbose=True)"
425 | ]
426 | },
427 | {
428 | "cell_type": "code",
429 | "execution_count": 14,
430 | "metadata": {},
431 | "outputs": [
432 | {
433 | "data": {
434 | "text/markdown": [
435 | "[Click here to see the created wml model details in UI](https://dataplatform.cloud.ibm.com/ml-runtime/models/72d97d15-1e9c-4246-95ac-fbfdb519168c?space_id=2f3d2c21-9b7e-4d8f-af5d-c65276c71318&context=cpdaas)"
436 | ],
437 | "text/plain": [
438 | ""
439 | ]
440 | },
441 | "metadata": {},
442 | "output_type": "display_data"
443 | }
444 | ],
445 | "source": [
446 | "model_ui_url = wml_model.get_info(verbose=True)[\"url\"]\n",
447 | "display(Markdown(\"[Click here to see the created wml model details in UI](\" + model_ui_url + \")\"))"
448 | ]
449 | },
450 | {
451 | "cell_type": "markdown",
452 | "metadata": {},
453 | "source": [
454 | "[back to top](#introduction)\n",
455 | "## Add model to new model usecase \n",
456 | "\n",
457 | "- Model usecase helps to track model asset lifecycle across different environments like development,pre-production and production.\n",
458 | "- You can link a model to existing model usecase by providing `model_usecase_id` or create a new usecase and link to that by providing `model_usecase_name` and `model_usecase_desc`.\n",
459 | "- You can link to existing model usecase by invoking `external_model.add_tracking_model_usecase(model_usecase_id=\"\")`\n",
460 | "- If `model_usecase_catalog_id` is not given, it uses tha default available platform asset catalog id.\n",
461 | "- When you open the model usecase in model inventory, `model_usecase_id` refers to value after `asset/{ usecase id}` in browser URL.\n",
462 | "- You can also invoke `get_tracking_model_usecase()` to return currently tracked model usecase and get model usecase details by invoking `get_tracking_model_usecase().get_info()`\n",
463 | "- If using Cloud pak for data, make sure Openpages integration is disabled in your platform (create catalog permission needed). You can check it from UI `(Model inventory -> Manage)`."
464 | ]
465 | },
466 | {
467 | "cell_type": "code",
468 | "execution_count": 15,
469 | "metadata": {},
470 | "outputs": [
471 | {
472 | "name": "stdout",
473 | "output_type": "stream",
474 | "text": [
475 | "2022/12/12 18:02:24 INFO : Initiate linking model to new model use case......\n",
476 | "2022/12/12 18:02:38 INFO : Successfully finished linking Model 72d97d15-1e9c-4246-95ac-fbfdb519168c to model use case\n"
477 | ]
478 | },
479 | {
480 | "data": {
481 | "text/plain": [
482 | "{'model_entry_catalog_id': '745c21d7-ab6f-4408-9b3b-5d4760ccee1a',\n",
483 | " 'model_entry_id': 'aba647a6-f4ba-4133-bbe9-9e1c86f1263d',\n",
484 | " 'model_entry_name': 'ModelUsecaseDraft',\n",
485 | " 'model_entry_status': 'draft'}"
486 | ]
487 | },
488 | "execution_count": 15,
489 | "metadata": {},
490 | "output_type": "execute_result"
491 | }
492 | ],
493 | "source": [
494 | "wml_model.add_tracking_model_usecase(model_usecase_name=\"ModelUsecaseDraft\",model_usecase_desc=\"Draft model usecase for testing\")"
495 | ]
496 | },
497 | {
498 | "cell_type": "code",
499 | "execution_count": 16,
500 | "metadata": {},
501 | "outputs": [
502 | {
503 | "data": {
504 | "text/plain": [
505 | "{'model_usecase_id': 'aba647a6-f4ba-4133-bbe9-9e1c86f1263d',\n",
506 | " 'container_type': 'catalog',\n",
507 | " 'catalog_id': '745c21d7-ab6f-4408-9b3b-5d4760ccee1a',\n",
508 | " 'facts_type': 'model_entry_user'}"
509 | ]
510 | },
511 | "execution_count": 16,
512 | "metadata": {},
513 | "output_type": "execute_result"
514 | }
515 | ],
516 | "source": [
517 | "model_usecase=wml_model.get_tracking_model_usecase()\n",
518 | "model_usecase.get_info()"
519 | ]
520 | },
521 | {
522 | "cell_type": "markdown",
523 | "metadata": {},
524 | "source": [
525 | "[back to top](#introduction)\n",
526 | "## Get model environment type \n",
527 | "- When tracking is enabled, the recording of facts is triggered by actions on the model or related assets. Similarly, changes in the AI lifecycle control where the model display in a model usecase, to show the progression from development to operation. \n",
528 | "- For example, when a model is saved in a project, the model displays in the Develop pillar. When the model is promoted to a space, the entry is updated to show the model in the Test pillar, and so on.\n",
529 | "- `get_environment_type()` shows you where your asset exists and the reason behind it.\n",
530 | "- `TEST` environment is similar to `DEPLOY` in Cloud pak for data UI until `4.6.3` release. There is no difference in functionalities. From `4.6.3` onwards the environment will be called/showed as `TEST` in Cloud pak for data platform. \n",
531 | "\n",
532 | "\n",
533 | "Current expectations:\n",
534 | "\n",
535 | "| Model location | Develop | Test (show as Deploy in CPD <=4.6.2) | Validate | Operate |\n",
536 | "| --- | --- | --- |--- | --- |\n",
537 | "| Project | ✓ |\n",
538 | "| Space | | ✓ |\n",
539 | "| Space tagged with AIGovernance: Pre-production | | | ✓ |\n",
540 | "| Space tagged with AIGovernance: Production | | | | ✓ |\n",
541 | "| Watson OpenScale Pre-production | | |✓ |\n",
542 | "| Watson OpenScale Production | | | | ✓ |\n",
543 | "\n",
544 | "- For external models, space concept is not applicable, however the method works as is considering your external model is saved as model stub in `Platform asset catalog` and linked to model usecase. You can follow steps as follows:\n",
545 | " - Save mode stub using `save_external_model_asset()`. You can see example [here](https://ibm-aigov-facts-client.mybluemix.net/#externalmodelfactselements)\n",
546 | " - Add to model usecase invoking `add_tracking_model_usecase()`\n",
547 | " - Get current environment invoking `get_environment_type()`\n"
548 | ]
549 | },
550 | {
551 | "cell_type": "code",
552 | "execution_count": 17,
553 | "metadata": {},
554 | "outputs": [
555 | {
556 | "data": {
557 | "text/plain": [
558 | "{'classification': 'TEST',\n",
559 | " 'reason': 'The space type is development which is considered as TEST environment and asset shows under TEST stage'}"
560 | ]
561 | },
562 | "execution_count": 17,
563 | "metadata": {},
564 | "output_type": "execute_result"
565 | }
566 | ],
567 | "source": [
568 | "wml_model.get_environment_type()"
569 | ]
570 | },
571 | {
572 | "cell_type": "markdown",
573 | "metadata": {},
574 | "source": [
575 | "[back to top](#introduction)\n",
576 | "## Set model environment type \n",
577 | "\n",
578 | "\n",
579 | "Following notes describe how the interaction between the lifecycle components controls where a Watson Machine Learning displays in a model entry.\n",
580 | "\n",
581 | "- When a model is promoted to a space, the model displays in the Test pillar. If the deployment for that model is evaluated in Watson OpenScale and tagged as pre-production, then the model displays in Validate rather than Test. If the model deployment is tagged as production instead of pre-production, then it moves to the Operate pillar.\n",
582 | "- If you assign the space tag `AIGovernance: Pre-production` or `AIGovernance: Production`, the model displays in the Validate or Operate pillar, respectively. Note: This change is triggered by an action resulting in a change to the model metadata, such as updating the model name, description, or tags, if the tag is added to the space after the model is tracked.\n",
583 | "- If a model is promoted to a space where the space has tag `AIGovernance: Pre-production`, the model displays in the Validate pillar. If the deployment for that model is evaluated in Watson OpenScale and tagged as pre-production, then the model displays in the Validate pillar. If the model deployment is tagged as production instead of pre-production, then it moves to the Operate pillar.\n",
584 | "- If a model is promoted to a space where the space has the tag AIGovernance: Production, it displays in the Operate pillar. If the deployment for this model is evaluated in Watson OpenScale and tagged as production, then it will be still in Operate pillar. But if it is tagged as Pre-production it displays in the Validate pillar.\n",
585 | "- If a model deployment is not evaluated in Watson OpenScale, it will have a Pending Evaluation tag. If the model deployment is evaluated, it will have an Evaluated tag. If the model deployment is approved after evaluation in Watson OpenScale, it will have an Approved tag.\n",
586 | "\n",
587 | "Notes:\n",
588 | "- If model is promoted to space and not monitored in Watson OpenScale, you can set model to specific environment pillar.\n",
589 | "- For external models, bahavior is identical."
590 | ]
591 | },
592 | {
593 | "cell_type": "code",
594 | "execution_count": 18,
595 | "metadata": {},
596 | "outputs": [
597 | {
598 | "name": "stdout",
599 | "output_type": "stream",
600 | "text": [
601 | "2022/12/12 18:02:46 INFO : Asset successfully moved from test to validate environment\n"
602 | ]
603 | }
604 | ],
605 | "source": [
606 | "time.sleep(3)\n",
607 | "wml_model.set_environment_type(from_container=\"test\",to_container=\"validate\")\n",
608 | "\n",
609 | "# uncomment if running cell by cell to see changes in UI\n",
610 | "\n",
611 | "# model_usecase_ui_url = model_usecase.get_info(verbose=True)[\"url\"]\n",
612 | "# display(Markdown(\"[Click here to see the model current environment in UI (Under Assets tab)](\" + model_usecase_ui_url + \")\"))"
613 | ]
614 | },
615 | {
616 | "cell_type": "code",
617 | "execution_count": 19,
618 | "metadata": {},
619 | "outputs": [
620 | {
621 | "name": "stdout",
622 | "output_type": "stream",
623 | "text": [
624 | "2022/12/12 18:02:52 INFO : Asset successfully moved from validate to operate environment\n"
625 | ]
626 | }
627 | ],
628 | "source": [
629 | "time.sleep(3)\n",
630 | "wml_model.set_environment_type(from_container=\"validate\",to_container=\"operate\")\n",
631 | "\n",
632 | "# uncomment if running cell by cell to see changes in UI\n",
633 | "\n",
634 | "# model_usecase_ui_url = model_usecase.get_info(verbose=True)[\"url\"]\n",
635 | "# display(Markdown(\"[Click here to see the model current environment in UI (Under Assets tab)](\" + model_usecase_ui_url + \")\"))"
636 | ]
637 | },
638 | {
639 | "cell_type": "code",
640 | "execution_count": 20,
641 | "metadata": {},
642 | "outputs": [
643 | {
644 | "name": "stdout",
645 | "output_type": "stream",
646 | "text": [
647 | "2022/12/12 18:02:59 INFO : Asset successfully moved from operate to validate environment\n"
648 | ]
649 | }
650 | ],
651 | "source": [
652 | "time.sleep(3)\n",
653 | "wml_model.set_environment_type(from_container=\"operate\",to_container=\"validate\")\n",
654 | "\n",
655 | "# uncomment if running cell by cell to see changes in UI\n",
656 | "\n",
657 | "# model_usecase_ui_url = model_usecase.get_info(verbose=True)[\"url\"]\n",
658 | "# display(Markdown(\"[Click here to see the model current environment in UI (Under Assets tab)](\" + model_usecase_ui_url + \")\"))"
659 | ]
660 | },
661 | {
662 | "cell_type": "code",
663 | "execution_count": 21,
664 | "metadata": {},
665 | "outputs": [
666 | {
667 | "name": "stdout",
668 | "output_type": "stream",
669 | "text": [
670 | "2022/12/12 18:03:05 INFO : Asset successfully moved from validate to test environment\n"
671 | ]
672 | }
673 | ],
674 | "source": [
675 | "time.sleep(3)\n",
676 | "wml_model.set_environment_type(from_container=\"validate\",to_container=\"test\")\n",
677 | "\n",
678 | "# uncomment if running cell by cell to see changes in UI\n",
679 | "\n",
680 | "# model_usecase_ui_url = model_usecase.get_info(verbose=True)[\"url\"]\n",
681 | "# display(Markdown(\"[Click here to see the model current environment in UI (Under Assets tab)](\" + model_usecase_ui_url + \")\"))"
682 | ]
683 | },
684 | {
685 | "cell_type": "markdown",
686 | "metadata": {},
687 | "source": [
688 | "## Cleanup "
689 | ]
690 | },
691 | {
692 | "cell_type": "code",
693 | "execution_count": 22,
694 | "metadata": {},
695 | "outputs": [
696 | {
697 | "data": {
698 | "text/markdown": [
699 | "[Click here to see the created wml model details in UI](https://dataplatform.cloud.ibm.com/ml-runtime/models/72d97d15-1e9c-4246-95ac-fbfdb519168c?space_id=2f3d2c21-9b7e-4d8f-af5d-c65276c71318&context=cpdaas)"
700 | ],
701 | "text/plain": [
702 | ""
703 | ]
704 | },
705 | "metadata": {},
706 | "output_type": "display_data"
707 | },
708 | {
709 | "data": {
710 | "text/markdown": [
711 | "[Click here to see the created model use case in UI](https://dataplatform.cloud.ibm.com/data/catalogs/745c21d7-ab6f-4408-9b3b-5d4760ccee1a/asset/aba647a6-f4ba-4133-bbe9-9e1c86f1263d?context=cpdaas)"
712 | ],
713 | "text/plain": [
714 | ""
715 | ]
716 | },
717 | "metadata": {},
718 | "output_type": "display_data"
719 | }
720 | ],
721 | "source": [
722 | "if run_cleanup_at_end:\n",
723 | " # remove model and model usecase\n",
724 | " model_usecase=wml_model.get_tracking_model_usecase()\n",
725 | " wml_model.remove_tracking_model_usecase()\n",
726 | " facts_client.assets.remove_asset(asset_id=model_usecase.get_info()[\"model_usecase_id\"],container_type=model_usecase.get_info()[\"container_type\"],container_id=model_usecase.get_info()[\"catalog_id\"])\n",
727 | " facts_client.assets.remove_asset(asset_id=wml_model.get_info()[\"asset_id\"],container_type=wml_model.get_info()[\"container_type\"],container_id=wml_model.get_info()[\"container_id\"])\n",
728 | "\n",
729 | "else:\n",
730 | " \n",
731 | " model_ui_url = wml_model.get_info(verbose=True)[\"url\"]\n",
732 | " display(Markdown(\"[Click here to see the created wml model details in UI](\" + model_ui_url + \")\"))\n",
733 | " model_usecase_ui_url = model_usecase.get_info(verbose=True)[\"url\"]\n",
734 | " display(Markdown(\"[Click here to see the created model use case in UI](\" + model_usecase_ui_url + \")\"))"
735 | ]
736 | },
737 | {
738 | "cell_type": "markdown",
739 | "metadata": {},
740 | "source": [
741 | "[back to top](#introduction)"
742 | ]
743 | },
744 | {
745 | "cell_type": "markdown",
746 | "metadata": {},
747 | "source": [
748 | "Congratulations. You have successfully finished the end to end sample around get/set environment pillars helpers in model usecase."
749 | ]
750 | },
751 | {
752 | "cell_type": "markdown",
753 | "metadata": {},
754 | "source": [
755 | "Copyright © 2020, 2022 IBM. This notebook and its source code are released under the terms of the MIT License."
756 | ]
757 | },
758 | {
759 | "cell_type": "markdown",
760 | "metadata": {},
761 | "source": []
762 | }
763 | ],
764 | "metadata": {
765 | "interpreter": {
766 | "hash": "671fb7aa161d7cd648bbbcad6b003541cd9cf13e8157e186cbe0090b82566204"
767 | },
768 | "kernelspec": {
769 | "display_name": "Python 3 (ipykernel)",
770 | "language": "python",
771 | "name": "python3"
772 | },
773 | "language_info": {
774 | "codemirror_mode": {
775 | "name": "ipython",
776 | "version": 3
777 | },
778 | "file_extension": ".py",
779 | "mimetype": "text/x-python",
780 | "name": "python",
781 | "nbconvert_exporter": "python",
782 | "pygments_lexer": "ipython3",
783 | "version": "3.9.7"
784 | },
785 | "varInspector": {
786 | "cols": {
787 | "lenName": 16,
788 | "lenType": 16,
789 | "lenVar": 40
790 | },
791 | "kernels_config": {
792 | "python": {
793 | "delete_cmd_postfix": "",
794 | "delete_cmd_prefix": "del ",
795 | "library": "var_list.py",
796 | "varRefreshCmd": "print(var_dic_list())"
797 | },
798 | "r": {
799 | "delete_cmd_postfix": ") ",
800 | "delete_cmd_prefix": "rm(",
801 | "library": "var_list.r",
802 | "varRefreshCmd": "cat(var_dic_list()) "
803 | }
804 | },
805 | "types_to_exclude": [
806 | "module",
807 | "function",
808 | "builtin_function_or_method",
809 | "instance",
810 | "_Feature"
811 | ],
812 | "window_display": false
813 | }
814 | },
815 | "nbformat": 4,
816 | "nbformat_minor": 4
817 | }
818 |
--------------------------------------------------------------------------------
/Assets/report_templates/factsheet_common_elements.ftl:
--------------------------------------------------------------------------------
1 | <#-- This file collects common reporting macros used in factsheet reports -->
2 | <#import "factsheet_utils.ftl" as utils>
3 | <#------------------------------------------------------------------------------->
4 | <#-- Helpers to deal with type defintions for custom attributes/facts -->
5 | <#------------------------------------------------------------------------------->
6 |
7 | <#-- Set a global variable for the new section in the input JSON with all "custom_fact_definitions" -->
8 | <#global global_custom_fact_definitions=custom_fact_definitions!{}/>
9 |
10 | <#-- get the label for one fact out of a specific type definition -->
11 | <#function getCustomFactLabel fact_id custom_fact_definition>
12 | <#if (custom_fact_definition["properties"][fact_id])?? >
13 | <#return custom_fact_definition["properties"][fact_id]["label"]["default"] />
14 | <#else>
15 | <#-- we sometimes have "old" fact values that are left in CAMS after the type definition has been changed -->
16 | <#return fact_id />
17 | #if>
18 | #function>
19 |
20 | <#-- check if a fact is defined in a specific type definition -->
21 | <#function isCustomFactDefined fact_id custom_fact_definition>
22 | <#return (custom_fact_definition["properties"][fact_id])?? >
23 | #function>
24 |
25 | <#------------------------------------------------------------------------------->
26 | <#-- Display custom facts (same for model, Model use case, openpages, ...) -->
27 | <#-- Input: The JSON for the decorator type -->
28 | <#------------------------------------------------------------------------------->
29 |
30 | <#macro display_custom_facts custom_facts custom_fact_definition custom_facts_header="Additional Details">
31 | <#if (custom_facts)?has_content>
32 | ## ${custom_facts_header}
33 | <#-- First list all custom facts an atomic type (string, int, date, ..) -->
34 | <#list custom_facts?keys as fact_key>
35 | <#if !custom_facts[fact_key]?is_enumerable >
36 | <#if commons.isCustomFactDefined(fact_key, custom_fact_definition)>
37 | **${commons.getCustomFactLabel(fact_key, custom_fact_definition)}:** ${custom_facts[fact_key]}
38 | <#else>
39 | <#-- we sometimes have "old" fact values that are left in CAMS after the type defintion has been changed
40 | so we won't find the labels for them any more.
41 | We should not show them at all - at least that is what what the UI does) -->
42 | ${fact_key}: ${custom_facts[fact_key]}
43 | #if>
44 | #if>
45 | #list>
46 | <#-- Now list all custom facts with value arrays -->
47 | <#list custom_facts?keys as fact_key>
48 | <#if custom_facts[fact_key]?is_enumerable >
49 | <#if commons.isCustomFactDefined(fact_key, custom_fact_definition)>
50 | **${commons.getCustomFactLabel(fact_key, custom_fact_definition)}:**
51 | <#list custom_facts[fact_key] as array_fact_value>
52 | - ${array_fact_value}
53 | #list><#-- The empty line below is important to keep lists separated -->
54 |
55 | #if>
56 | #if>
57 | #list>
58 | #if>
59 | #macro>
60 |
61 | <#------------------------------------------------------------------------------->
62 | <#-- Additional details/Custom facts for model entries -->
63 | <#-- Input: Model use case JSON -->
64 | <#------------------------------------------------------------------------------->
65 |
66 | <#macro additional_details_model_entry model_entry custom_facts_header="Additional Model use case Details">
67 | <#if (model_entry.model_entry_user)??>
68 | <@commons.display_custom_facts (model_entry.model_entry_user)!{} global_custom_fact_definitions["model_entry_user"]!{} custom_facts_header/>
69 | #if>
70 | #macro>
71 |
72 | <#------------------------------------------------------------------------------->
73 | <#-- Additional details/Custom facts for models -->
74 | <#-- Input: model JSON -->
75 | <#------------------------------------------------------------------------------->
76 | <#macro additional_details_model model custom_facts_header="Additional Model Details">
77 | <#if (model.additional_details)??>
78 | <@commons.display_custom_facts (model.additional_details)!{} global_custom_fact_definitions["modelfacts_user"]!{} custom_facts_header/>
79 | #if>
80 | #macro>
81 |
82 | <#------------------------------------------------------------------------------->
83 | <#-- Additional details/Custom facts from OpenPage for models -->
84 | <#-- Input: model JSON -->
85 | <#------------------------------------------------------------------------------->
86 | <#macro additional_details_openpages_model model custom_facts_header="Additional IBM Openpages Model Details">
87 | <#if (model.additional_details_op)??>
88 | <@commons.display_custom_facts (model.additional_details_op)!{} global_custom_fact_definitions["modelfacts_user"]!{} custom_facts_header/>
89 | #if>
90 | #macro>
91 |
92 | <#------------------------------------------------------------------------------->
93 | <#-- Additional details/Custom facts from OpenPage for model entries -->
94 | <#-- Input: model JSON -->
95 | <#------------------------------------------------------------------------------->
96 | <#macro additional_details_openpages_model_entry model_entry custom_facts_header="Additional IBM Openpages Model use case Details">
97 | <#if (model_entry.additional_details_op)??>
98 | <@commons.display_custom_facts (model_entry.additional_details_op)!{} global_custom_fact_definitions["model_entry_user"]!{} custom_facts_header/>
99 | #if>
100 | #macro>
101 |
102 | <#------------------------------------------------------------------------------->
103 | <#-- Notebook Experimentss -->
104 | <#-- Input: one -->
105 | <#-- Report output for training_data, training_metrics, schemas -->
106 | <#------------------------------------------------------------------------------->
107 | <#macro notebook_experiment_report system_facts >
108 | <#if (system_facts.notebook_experiment)?? >
109 | ### Notebook Training Facts
110 | <#if (system_facts.notebook_experiment.params)?? >
111 |
112 | ### Training Parameters
113 |
114 | |Parameter|Value|
115 | |:---|:---|
116 | <#list system_facts.notebook_experiment.params?sort_by("key") as Param>
117 | |${Param.key}|${Param.value?replace("\n", "")!}|
118 | #list>
119 | #if>
120 | <#if (system_facts.notebook_experiment.metrics)?? >
121 |
122 | ### Training Metrics
123 |
124 | |Metric|Value|
125 | |:---|:---|
126 | <#list system_facts.notebook_experiment.metrics?sort_by("key") as Metric>
127 | |${Metric.key}|${Metric.value!}|
128 | #list>
129 | #if>
130 | <#if (system_facts.notebook_experiment.tags)?? >
131 |
132 | ### Training Tags
133 |
134 |
135 | |Tag|Value|
136 | |:---|:---|
137 | <#list system_facts.notebook_experiment.tags?sort_by("key") as Tag>
138 | |${Tag.key}|${Tag.value!}|
139 | #list>
140 | #if>
141 | #if>
142 | #macro>
143 |
144 | <#------------------------------------------------------------------------------->
145 | <#-- Training data references -->
146 | <#-- Input: model entry system facts or whole model JSON -->
147 | <#-- Report output for training_data, training_metrics, schemas -->
148 | <#------------------------------------------------------------------------------->
149 | <#macro training_data_references_report training_data_references >
150 | <#if (training_data_references)?? >
151 | ### Training Data Information
152 |
153 | <#list training_data_references as DataRef >
154 | <#if (DataRef.training_data)?? && (DataRef.training_data?length > 0) >
155 | **Training data:** `${DataRef.training_data}` <#if (DataRef.training_data_href)?? && (DataRef.training_data_href?length > 0)>[Link](${DataRef.training_data_href}) #if>
156 |
157 | <#if (DataRef.source)?? && (DataRef.source?length > 0) >
158 | **Training data:** `${DataRef.source}` <#if (DataRef.source_href)?? && (DataRef.source_href?length > 0)>[Link](${DataRef.source_href}) #if>
159 | #if>
160 | <#if (DataRef.source_path)?? && (DataRef.source_path?length > 0)>
161 | **Source path:** ${DataRef.source_path}
162 | #if>
163 | <#if (DataRef.source_type)?? && (DataRef.source_type?length > 0)>
164 | **Source type:** ${DataRef.source_type}
165 | #if>
166 | <#if (DataRef.type)?? && (DataRef.type?length > 0)>
167 | **Type:** ${DataRef.type}
168 | #if>
169 |
170 | <#else>
171 | could not locate the asset
172 | #if>
173 |
174 | #list>
175 | #if>
176 | #macro>
177 |
178 | <#------------------------------------------------------------------------------->
179 | <#-- Training information -->
180 | <#-- Input: model entry system facts or whole model JSON -->
181 | <#-- Report output for training_data, training_metrics, schemas -->
182 | <#------------------------------------------------------------------------------->
183 | <#macro training_information_report system_facts show_training_data_references=true>
184 | <#if !(system_facts)??>No system facts available for this model!#if>
185 | <#if (system_facts.hyper_parameters)?? && (system_facts.hyper_parameters?length > 0) >
186 | **Hyper parameters:** ${system_facts.hyper_parameters}
187 | #if>
188 | <#if (system_facts.features)?? && (system_facts.features > 0) >
189 | **Features:** ${system_facts.features}
190 | #if>
191 | <#if (system_facts.hybrid_pipeline)?? && (system_facts.hybrid_pipeline.size > 0) >
192 | **Hybrid pipeline:** ${(system_facts.hybrid_pipeline?map(pipeline -> pipeline.name)?join(", ","_(empty)_"))!}
193 | #if>
194 |
195 | <#if show_training_data_references && (system_facts.training_information.training_data_references)?? >
196 | <@commons.training_data_references_report system_facts.training_information.training_data_references />
197 | #if>
198 |
199 | <@commons.notebook_experiment_report system_facts />
200 |
201 | <#if system_facts.training_metrics??>
202 | ### Training Metrics
203 |
204 |
205 |
206 | <#if (system_facts.training_metrics[0].ml_metrics[0])?? >
207 | <#assign valueKeys = system_facts.training_metrics[0].ml_metrics[0]?keys?filter(key -> key?ends_with("value")) />
208 | <#assign valueLabels = valueKeys?map(value -> (value[0..value?length-7])?cap_first) />
209 | #if>
210 |
211 | |Name|<#list valueLabels as valueLabel>${valueLabel}|#list>
212 | |:---|<#list valueLabels as valueLabel>:---|#list>
213 | <#list system_facts.training_metrics as Metric>
214 | <#list Metric.ml_metrics as MlMetric>
215 | |${MlMetric.name!}|<#list valueKeys as valueKey>${("MlMetric."+valueKey)?eval}|#list>
216 | #list>
217 | #list>
218 | #if>
219 |
220 | <#assign i= 1>
221 | <#if (system_facts.schemas.input)?? && (system_facts.schemas.input?size > 0) >
222 | ### Input Schema
223 |
224 | <#list system_facts.schemas.input as InputSchema>
225 |
226 | **Input ${i}:** ${InputSchema.id!}
227 |
228 | <#if (InputSchema.fields)?? && (InputSchema.fields?size > 0) >
229 | |Name|Type|
230 | |:---|:---|
231 | <#list InputSchema.fields as Field>
232 | |${Field.name}|${Field.type}|
233 | #list>
234 | #if>
235 | <#assign i = i+1>
236 | #list>
237 | #if>
238 |
239 | <#assign i= 1>
240 | <#if (system_facts.schemas.output)?? && (system_facts.schemas.output?size > 0) >
241 | ### Output Schema
242 |
243 | <#list system_facts.schemas.output as OutputSchema>
244 |
245 | **Output ${i}:** ${OutputSchema.id!}
246 |
247 | <#if (OutputSchema.fields)?? && (OutputSchema.fields?size > 0) >
248 | |Name|Type|Measure|Modeling Role|
249 | |:---|:---|:------|:------------|
250 | <#list OutputSchema.fields as Field>
251 | |${(Field.name)!}|<@utils.stringOrHash (Field.type)/>|${(Field.metadata.measure)!}|${(Field.metadata.modeling_role)!}|
252 | #list>
253 | #if>
254 | <#assign i = i+1>
255 | #list>
256 | #if>
257 | #macro>
258 |
259 | <#------------------------------------------------------------------------------->
260 | <#-- Model Information Report -->
261 | <#-- Input: model_information JSON (works both for model and model entry JSON)-->
262 | <#-- Report basic metadata about the model -->
263 | <#------------------------------------------------------------------------------->
264 | <#macro model_information_report model_information ommitNameAndID=false >
265 | <#if !ommitNameAndID >
266 | **Name:** ${model_information.model_name!"_(not specified)_"}
267 | **Model ID:** ${model_information.model_id!"_(not specified)_"}
268 | #if>
269 | <#if (model_information.model_description)?? && model_information.model_description?has_content >
270 | **Description:** ${model_information.model_description!"_(not specified)_"}
271 | #if>
272 | <#if (model_information.model_tags)?? && model_information.model_tags?has_content >
273 | **Tags:** ${(model_information.model_tags?join(", ","_(empty)_"))!"_(not specified)_"}
274 | #if>
275 | <#if (model_information.last_modified)?? && model_information.last_modified?has_content >
276 | **Last modified:** ${(model_information.last_modified?datetime)!"_(not specified)_"}
277 | #if>
278 | <#if (model_information.created)?? && model_information.created?has_content >
279 | **Created:** ${(model_information.created?datetime)!"_(not specified)_"}
280 | #if>
281 | <#if (model_information.created_by)?? && model_information.created_by?has_content >
282 | **Created by:** ${model_information.created_by!"_(not specified)_"}
283 | #if>
284 | <#if (model_information.label_column)?? && model_information.label_column?has_content >
285 | **Label/prediction column:** ${model_information.label_column!"_(not specified)_"}
286 | #if>
287 | <#if (model_information.model_type)?? && model_information.model_type?has_content >
288 | **Model type:** ${model_information.model_type!"_(not specified)_"}
289 | #if>
290 | <#if (model_information.input_type)?? && model_information.input_type?has_content >
291 | **Data Type:** ${model_information.input_type!"_(not specified)_"}
292 | #if>
293 | <#if (model_information.algorithm)?? && model_information.algorithm?has_content >
294 | **Algorithm:** ${model_information.algorithm!"_(not specified)_"}
295 | #if>
296 | <#if (model_information.prediction_type)?? && model_information.prediction_type?has_content >
297 | **Prediction Type:** ${model_information.prediction_type!"_(not specified)_"}
298 | #if>
299 | <#if (model_information.software_spec)?? && model_information.software_spec?has_content >
300 | **Software specification:** ${model_information.software_spec!"_(not specified)_"}
301 | #if>
302 | #macro>
303 |
304 | <#------------------------------------------------------------------------------->
305 | <#-- System Facts Report -->
306 | <#-- Input: system_facts JSON or whole model JSON -->
307 | <#-- Report output for model_information and training information -->
308 | <#------------------------------------------------------------------------------->
309 | <#macro system_facts_report system_facts show_training_data_references=true ommitNameAndID=false>
310 | <#if (system_facts)?has_content >
311 | <@model_information_report system_facts.model_information ommitNameAndID/>
312 | <@additional_details_model system_facts />
313 | <@training_information_report system_facts show_training_data_references />
314 | #if>
315 | #macro>
316 |
317 | <#------------------------------------------------------------------------------->
318 | <#-- Physical Model Report -->
319 | <#-- Input: physical_model JSON from model entry JSON -->
320 | <#-- Currently we don't report metadata directly from physical_model -->
321 | <#-- but the sub-element system_facts is of course our main source -->
322 | <#------------------------------------------------------------------------------->
323 | <#macro physical_model_report physical_model show_training_data_references=true ommitNameAndID=false>
324 | <@system_facts_report physical_model.system_facts!{} show_training_data_references ommitNameAndID />
325 | #macro>
326 |
327 | <#------------------------------------------------------------------------------->
328 | <#-- Deployment -->
329 | <#-- Input: deployment system facts, counter, alerts & status (optional) -->
330 | <#-- Report output for full deployment information -->
331 | <#------------------------------------------------------------------------------->
332 | <#macro deployment_report deployment counter=1 alerts=[] status="" >
333 | <#if deployment?? && deployment?is_hash>
334 | ### Deployment #${counter}: "${deployment.name!}"
335 | <#if status?length != 0 >**Status**: ${status} #if>
336 | <#if alerts?size != 0 >**Alerts**: ${utils.color(alerts?join(", "))}#if>
337 |
338 | <#if (deployment.external_identifier)?? && deployment.external_identifier?has_content >
339 | **External deployment identifier**: ${deployment.external_identifier!}
340 | #if>
341 | <#if (deployment.description)?? && deployment.description?has_content >
342 | **Description**: ${deployment.description!}
343 | #if>
344 | <#if (deployment.created_on)?? && deployment.created_on?has_content >
345 | **Created**: ${(deployment.created_on?datetime)!}
346 | #if>
347 | <#if (deployment.last_modified)?? && deployment.last_modified?has_content >
348 | **Modified**: ${(deployment.last_modified?datetime)!}
349 | #if>
350 | <#if (deployment.deployment_tags)?? && deployment.deployment_tags?has_content >
351 | **Tags**: ${(deployment.deployment_tags?join(", ","_(empty)_"))!}
352 | #if>
353 | <#if (deployment.model_revision)?? && deployment.model_revision?has_content >
354 | **Revision**: ${deployment.model_revision!}
355 | #if>
356 | <#if (deployment.deployment_type)?? && deployment.deployment_type?has_content >
357 | **Type**: ${deployment.deployment_type!}
358 | <#elseif (deployment.type)?? && deployment.type?has_content >
359 | **Type**: ${deployment.type!}
360 | #if>
361 | <#if (deployment.deployment_copies)?? && deployment.deployment_copies?has_content >
362 | **Copies**: ${deployment.deployment_copies!}
363 | #if>
364 | <#if (deployment.scoring_urls)?? && (deployment.scoring_urls?size > 0) >
365 | **Scoring endpoints**:
366 | <#list deployment.scoring_urls as url>
367 | ${url} <#sep>, #sep>
368 | #list>
369 | #if>
370 |
371 | <#if (deployment.evaluation_details)??>
372 | ### Evaluation Information
373 | <#if (deployment.openscale_details)?? && deployment.openscale_details.service_instance_id?has_content >
374 | **OpenScale instance ID:** ${(deployment.openscale_details.service_instance_id)!}
375 | #if>
376 | <#if (deployment.openscale_details)?? && deployment.openscale_details.service_provider.name?has_content >
377 | **OpenScale service provider:** ${(deployment.openscale_details.service_provider.name)!}
378 | #if>
379 | <#if (deployment.approval_status)?? && (deployment.approval_status.state?has_content) >
380 | **Approval status:** ${(deployment.approval_status.state)!}
381 | #if>
382 | <#if (deployment.approval_status)?? && deployment.approval_status.reviewed_by?has_content >
383 | **Reviewed by:** ${(deployment.approval_status.reviewed_by)!}
384 | #if>
385 | <#if (deployment.approval_status)?? && deployment.approval_status.review_date?has_content >
386 | **Reviewed on:** ${(deployment.approval_status.review_date?datetime)!}
387 | #if>
388 | <#if (deployment.evaluation_details)?? && deployment.evaluation_details.evaluation_date?has_content >
389 | **Last evaluation:** ${(deployment.evaluation_details.evaluation_date?datetime)!}
390 | #if>
391 | <#if (deployment.evaluation_details.asset)?? && deployment.evaluation_details.asset.name?has_content >
392 | **Evaluation data:** ${(deployment.evaluation_details.asset.name)!}
393 | #if>
394 | <#if (deployment.evaluation_details.asset)?? && deployment.evaluation_details.asset.type?has_content >
395 | **Evaluation data type:** ${(deployment.evaluation_details.asset.type)!}
396 | #if>
397 |
398 | #if>
399 | <#if (deployment.quality)??>
400 | ### Quality Evaluation
401 |
402 | <#assign breach_status =(deployment.quality.summary.breach_status)!"">
403 | **Breach status:** ${utils.conditionalColor(breach_status, breach_status == "RED")}
404 | **Records evaluated:** ${deployment.quality.records_evaluated!}
405 |
406 | |Quality Metric|Value|
407 | |:-------------|:----|
408 | <#setting number_format=",##0.00">
409 | <#list (deployment.quality.metrics?values) as metric>
410 | |${metric.name}|${utils.conditionalColor(metric.value, (metric.breach_status?? && metric.breach_status == "RED"))}|
411 | #list>
412 | <#setting number_format="" />
413 | #if>
414 | <#if (deployment.fairness)??>
415 | ### Fairness Evaluation
416 |
417 | <#assign breach_status =(deployment.fairness.summary.breach_status)!"">
418 | **Breach status:** ${utils.conditionalColor(breach_status, breach_status == "RED")}
419 | **Records evaluated:** ${deployment.fairness.records_evaluated!}
420 |
421 | |Fairness Metric|Value|
422 | |:--------------|:----|
423 | <#list (deployment.fairness.features) as feature>
424 | <#list (feature.metrics?values) as metric>
425 | |${feature.name} ${metric.name}|${utils.conditionalColor((metric.value * 100)?round+ "%", ((metric.lower_limit?? && metric.value < metric.lower_limit) || (metric.upper_limit?? && metric.value > metric.upper_limit)))}|
426 | #list>
427 | <#-- TODO: The following is just using the first [0] element. It probably should return the lowest scoring if the array has multiple vals -->
428 | |${feature.name} with the lowest score|<@utils.stringOrList (feature.individual_scores[0].group_name)/>|
429 | #list>
430 | #if>
431 | <#if (deployment.drift)??>
432 | ### Data drift Evaluation
433 |
434 | <#assign breach_status =(deployment.drift.summary.breach_status)!"">
435 | **Breach status:** ${utils.conditionalColor(breach_status, breach_status == "RED")}
436 | **Records evaluated:** ${deployment.drift.records_evaluated!}
437 |
438 | |Drift Metric|Value|
439 | |:-----------|:----|
440 | <#list (deployment.drift.metrics?values) as metric>
441 | |${metric.name}|${utils.conditionalColor((metric.value * 100)?round + "%", ((metric.lower_limit?? && metric.value < metric.lower_limit) || (metric.upper_limit?? && metric.value > metric.upper_limit)))}|
442 | #list>
443 | #if>
444 |
445 | <#if (deployment.custom_monitors)??>
446 | <#list deployment.custom_monitors as CustomMonitor>
447 | ### ${CustomMonitor.name} Evaluation
448 |
449 | <#assign breach_status =(CustomMonitor.summary.breach_status)!"">
450 | <#assign breach_status =(CustomMonitor.summary.breach_status)!"">
451 | **Breach status:** ${utils.conditionalColor(breach_status, breach_status == "RED")}
452 |
453 | |Custom Monitor Metric|Value|
454 | |:--------------------|:----|
455 | <#list CustomMonitor.metrics as metric>
456 | |${metric.name}|${utils.conditionalColor(metric.value, metric.breach_status == "RED")}|
457 | |${metric.name}|${utils.conditionalColor(metric.value, metric.breach_status == "RED")}|
458 | #list>
459 | #list>
460 | #if>
461 |
462 | #if>
463 | #macro>
464 |
465 | <#--------------------------------------------------------------------------------->
466 | <#--------------------------------------------------------------------------------->
467 | <#-- Section for attachment support -->
468 | <#--------------------------------------------------------------------------------->
469 | <#--------------------------------------------------------------------------------->
470 |
471 | <#-- first import our custom Java-based directive for attachment downloading and embedding and publish it as macro @attachment -->
472 | <#assign attachment_directive = "com.ibm.wkc.aigov.rest.resources.freemarker.directives.AttachmentDirective"?new()>
473 | <#-- there is also a helper Java-based directive just for base64 encoding (e.g. to embedd logo images): publish it as macro @encode -->
474 | <#assign encode = "com.ibm.wkc.aigov.rest.resources.freemarker.directives.EncodeDirective"?new()>
475 | <#-- there is also a helper Java-based directive to compute th UI URL for an asset: publish it as macro @asset_url -->
476 | <#assign asset_url = "com.ibm.wkc.aigov.rest.resources.freemarker.directives.AssetUrlDirective"?new()>
477 |
478 | <#---------------------------------------------------------------------------------
479 | embed_attachment (singular)
480 | embeds a single attachment.
481 | Image attachments are embedded in base64 where possible. Simple HTML content is also directly embedded.
482 | - attachment: one element of the attachments JSON array from the data model
483 | - skipNonEmbeddableAttachments: no output for large/complex HTML and binary files that can't be embedded into HTML.
484 | Otherwise just ouput their name and other metadata as a list
485 | - displayCommentsAsCaptions: use the "comments" JSON string and output it as caption over the embedded attachment content
486 | --------------------------------------------------------------------------------->
487 | <#macro embed_attachment attachment, skipNonEmbeddableAttachments=true, displayCommentsAsCaptions=true downloadToFile=false showContentInline=true>
488 | <#-- When downloading the file append id to get a unique file name -->
489 | <#local attachment_name_with_id=attachment.name?keep_before_last(".") + "_" + attachment.id + "." + attachment.name?keep_after_last(".")>
490 | <#if (attachment.url)?? && attachment.mime?starts_with("image") && showContentInline>
491 | <#-- regular image files -->
492 | <#if displayCommentsAsCaptions> <#noautoesc>Attachment Name: ${attachment.name!}#noautoesc>
493 | <#noautoesc>Attachment Description: ${attachment.description!}#noautoesc>#if>
494 | <#if downloadToFile >
495 |
<#-- should we limit/scale to a certain size or %? -->
496 | <#else>
497 |
<#-- should we limit/scale to a certain size or %? -->
498 | #if>
499 | <#elseif (attachment.url)?? && (attachment.mime?starts_with("text/html") || attachment.mime?starts_with("text/plain") ) && (attachment.html_rendering_hint)?? && attachment.html_rendering_hint?starts_with("inline") && showContentInline>
500 | <#if attachment.html_rendering_hint == "inline_html" >
501 | <#-- inline_html assumes simple, small html that can be embedded directly -->
502 | <#if displayCommentsAsCaptions > <#noautoesc>Attachment Name: ${attachment.name!}#noautoesc>
503 | <#noautoesc>Attachment Description: ${attachment.description!}#noautoesc>#if>
504 | <#if downloadToFile >
505 | <#noautoesc>
506 | <#outputformat "HTML">
507 | <@attachment_directive returnContent=true htmlCleanup=true url=attachment.url downloadFilename=attachment_name_with_id />
508 | #outputformat>
509 | #noautoesc>
510 | <#else>
511 | <@attachment_directive returnContent=true htmlCleanup=true url=attachment.url />
512 | #if>
513 | <#elseif attachment.html_rendering_hint == "inline_image" >
514 | <#-- inline_image assumes HTML that can not be directly included - but it can be converted to an image which can be embedded -->
515 | <#if displayCommentsAsCaptions > <#noautoesc>Attachment Name: ${attachment.name!}#noautoesc>
516 | <#noautoesc>Attachment Description: ${attachment.description!}#noautoesc>#if>
517 | <#if downloadToFile >
518 |
519 | <#else>
520 |
521 | #if>
522 | <#else>
523 | ${utils.color("Error")}: Unknown html_rendering_hint _${html_rendering_hint}_
524 | #if>
525 | <#else>
526 | <#-- The other attachments can't be embedded. They are downloaded and available as files but we can't embed them directly because of mime type/size -->
527 | <#if !skipNonEmbeddableAttachments || !showContentInline>
528 | <#if displayHeadingOnce> ## ${heading} <#local displayHeadingOnce=false>#if>
529 | <#if downloadToFile>
530 | **Name:** <@attachment_directive downloadFilename=attachment_name_with_id url=attachment.url />
531 | **Fact Id:** ${attachment.fact_id!""}
532 | <#else>
533 | **Name:** ${attachment.name}
534 | #if>
535 | **Description:** ${attachment.description!""}
536 | **Fact Id:** ${attachment.fact_id!""}
537 | #if>
538 | #if>
539 | #macro>
540 |
541 | <#---------------------------------------------------------------------------------
542 | embed_attachments (plural)
543 | embeds a multiple attachments from a seuence/array
544 | - attachments: the attachments JSON array from the data model
545 | - skipNonEmbeddableAttachments: no output for large/complex HTML and binary files that can't be embedded into HTML.
546 | Otherwise just ouput their name and other metadata as a list
547 | - displayCommentsAsCaptions: use the "comments" JSON string and output it as caption over the embedded attachment content
548 | --------------------------------------------------------------------------------->
549 | <#macro embed_attachments attachments heading skipNonEmbeddableAttachments=true displayCommentsAsCaptions=true downloadToFile=false showContentInline=true >
550 | <#local embeddable = false />
551 | <#list attachments as attachment >
552 | <#if ( (attachment.url)?? && attachment.mime?starts_with("image") ) || ( ( attachment.mime?starts_with("text/html") || attachment.mime?starts_with("text/plain") ) && (attachment.html_rendering_hint)?? && attachment.html_rendering_hint?starts_with("inline") ) >
553 | <#local embeddable = true />
554 | #if>
555 | #list>
556 | <#if (embeddable) >
557 | ### Inline Attachments for ${heading}
558 | #if>
559 | <#list attachments as attachment >
560 | <@embed_attachment attachment skipNonEmbeddableAttachments displayCommentsAsCaptions downloadToFile showContentInline/>
561 |
562 | #list>
563 | #macro>
564 |
565 | <#---------------------------------------------------------------------------------
566 | embed_attachment_with_factid (singular)
567 | embeds a single attachment with a given factid out of the array of all attachment
568 | Image attachments are embedded in base64 where possible. Simple HTML content is also directly embedded.
569 | - attachments: the attachments JSON array from the data model
570 | - factid: the fact id of one element in the attachments JSON array from the data model
571 | - skipNonEmbeddableAttachments: no output for large/complex HTML and binary files that can't be embedded into HTML.
572 | Otherwise just ouput their name and other metadata as a list
573 | - displayCommentsAsCaptions: use the "comments" JSON string and output it as caption over the embedded attachment content
574 | - flag to force an attachment (e.g. an image) to NOT be embedded but downloaded (and put into ZIP)
575 | - flag to force an attachment (e.g. an image) to BOTH be embedded AND also downloaded (and put into ZIP)
576 | --------------------------------------------------------------------------------->
577 | <#macro embed_attachment_with_factid attachments factid skipNonEmbeddableAttachments=true displayCommentsAsCaptions=true downloadToFile=false showContentInline=true >
578 | <#local attachments_with_factid = attachments?filter(attachment -> attachment.fact_id == factid) />
579 | <#if attachments_with_factid?has_content >
580 | <@coembed_attachment attachments_with_factid?first skipNonEmbeddableAttachments displayCommentsAsCaptions downloadToFile showContentInline />
581 | <#else><#-- There should only be one attachment with a given id (but that is not enforced yet) -->
582 | ${utils.color("Error")}: No attachment with fact-id _${factid}_ could be found
583 | #if>
584 | #macro>
585 |
586 | <#---------------------------------------------------------------------------------
587 | list_attachment (singular)
588 | does not embed but simply output metadata for a single attachment
589 | - attachment: one attachment entry out of the attachments JSON array from the data model
590 | - skipEmbeddableAttachments: no output for simple HTML files and images which can be embedded into HTML.
591 | - displayCommentsAsCaptions: use the "comments" JSON string and output it as caption over the embedded attachment content
592 | - downloadNonEmbedableAttachments: Make the content of non-embedabble attachments available by downloading them (to the download directory) in addition to listing them
593 | --------------------------------------------------------------------------------->
594 | <#macro list_attachment attachment skipEmbeddableAttachments=true displayCommentsAsCaptions=true downloadNonEmbedableAttachments=false assetUrl="" >
595 |
596 | <#local embeddable = false />
597 | <#if (attachment.url)?? && attachment.mime?starts_with("image")>
598 | <#-- regular image files -->
599 | <#local embeddable = true />
600 | <#elseif (attachment.url)?? && (attachment.mime?starts_with("text/html") || attachment.mime?starts_with("text/plain") ) && (attachment.html_rendering_hint)?? && attachment.html_rendering_hint?starts_with("inline") >
601 | <#-- cell fact (either image or html) -->
602 | <#local embeddable = true />
603 | #if>
604 | <#-- The other attachments can't be embedded. We can include them directly because of mime type/size -->
605 | <#if !skipEmbeddableAttachments || !embeddable >
606 | <#if downloadNonEmbedableAttachments >
607 | <#-- Not only download but also produce a link to the file (but it HTML will only search it in same local directory) -->
608 | **Name:** <@attachment_directive downloadFilename=attachment.name url=attachment.url />
609 | <#-- **Name:** ${attachment.name}
610 | ${attachment.url} -->
611 | <#-- if the attachment has not been downloaded automatically we try to provide a link to the UI where it can be viewed and manually downloaded -->
612 | <#elseif (assetUrl?length > 0) > <#-- if the url to the UI has been provided directly we use that -->
613 | **Name:** ${attachment.name}
614 | <#elseif _cpdUiAssetUrl?? > <#-- if the url to the UI has been set as a global variable we use that -->
615 | **Name:** ${attachment.name}
616 | <#else> <#-- Can't provide a URL to the UI for the asset: Just show the attachment name -->
617 | **Name:** ${attachment.name!""}
618 | #if>
619 | **Comment:** ${attachment.description!""}
620 | **FactId:** ${attachment.fact_id!""}
621 | <#if embeddable >
622 | **Mime:** ${attachment.mime!""}
623 | **HTML Rendering:** ${attachment.html_rendering_hint!""}
624 | #if>
625 | #if>
626 | #macro>
627 |
628 | <#---------------------------------------------------------------------------------
629 | list_attachments (plural)
630 | does not embed but simply outputs metadata for all attachments
631 | - attachments: the attachments JSON array from the data model
632 | - skipEmbeddableAttachments: no output for simple HTML files and images which can be embedded into HTML.
633 | - displayCommentsAsCaptions: use the "comments" JSON string and output it as caption over the embedded attachment content
634 | - downloadNonEmbedableAttachments: Make the content of non-embedabble attachments available by downloading them (to the download directory) in addition to listing them
635 | --------------------------------------------------------------------------------->
636 | <#macro list_attachments attachments heading skipEmbeddableAttachments=false displayCommentsAsCaptions=true downloadNonEmbedableAttachments=false assetUrl="" >
637 | <#local nonembeddable = false />
638 | <#list attachments as attachment >
639 | <#if ( (attachment.url)?? && !attachment.mime?starts_with("image") ) && ( !(attachment.html_rendering_hint)?? ) >
640 | <#local nonembeddable = true />
641 | #if>
642 | #list>
643 | <#if (nonembeddable) >
644 | ### File Attachments for ${heading}
645 | #if>
646 | <#list attachments as attachment >
647 | <@list_attachment attachment skipEmbeddableAttachments displayCommentsAsCaptions downloadNonEmbedableAttachments assetUrl />
648 |
649 | #list>
650 | #macro>
651 |
652 | <#---------------------------------------------------------------------------------
653 | list_attachment_with_factid (singular)
654 | does not embed but simply output metadata for a single attachment referenced by its factid
655 | - attachments: the attachments JSON array from the data model
656 | - factid: the fact id of one element in the attachments JSON array from the data model
657 | - skipEmbeddableAttachments: no output for simple HTML files and images which can be embedded into HTML.
658 | - displayCommentsAsCaptions: use the "comments" JSON string and output it as caption over the embedded attachment content
659 | - downloadNonEmbedableAttachments: Make the content of non-embedabble attachments available by downloading them (to the download directory) in addition to listing them
660 | --------------------------------------------------------------------------------->
661 | <#macro list_attachment_with_factid attachments factid skipEmbeddableAttachments=true displayCommentsAsCaptions=true downloadNonEmbedableAttachments=false>
662 | <#list attachments as attachment >
663 | <#if attachment.fact_id == factid > <#-- There should only be one attachment with a given id (but that is not enforced yet) -->
664 | <@list_attachment attachment skipEmbeddableAttachments displayCommentsAsCaptions downloadNonEmbedableAttachments />
665 | #if>
666 | #list>
667 | #macro>
668 |
669 | <#macro branding_logo logo>
670 | <#if (logo.report_logo)?? >
671 |
672 | <#else>
673 |
674 | #if>
675 | #macro>
676 |
677 | <#macro model_entry_approaches approaches>
678 | #### Approaches used in this model use case
679 |
680 | |Approach name|Description|
681 | |:------------|:----------|
682 | <#list approaches as approach >
683 | |${approach.name}|${approach.description}|
684 | #list>
685 | #macro>
686 |
687 | <#macro model_version_details version_information>
688 | <#if version_information.approach_name?has_content >
689 | **Approach name:** ${version_information.approach_name!}
690 | #if>
691 | <#if version_information.approach_description?has_content >
692 | **Approach description:** ${version_information.approach_description!}
693 | #if>
694 | <#if version_information.version_number?has_content >
695 | **Model version:** ${version_information.version_number!}
696 | #if>
697 | <#if version_information.version_comment?has_content >
698 | **Model version comment:** ${version_information.version_comment!}
699 | #if>
700 | #macro>
701 | <#---------------------------------------------------------------------------------
702 | ----------------------------------------------------------------------------------->
703 | <#-- Unused - delete later once we decide to not pursue this approach to report/module metadata
704 | <#ftl
705 | attributes={
706 | "name": "Model Report",
707 | "description": "A sample template to illustrate reporting against model facts",
708 | "type": "Model"
709 | }
710 | >
711 | -->
712 | <#-- Above is an exploratory/experimental and optional header section to see if/how we could use embedded metadata in a template.
713 | The ftl attributes section above must be the very first in the file.
714 | It allows to put some metadata about the template right into the template itself
715 | This can be read programmatically. The "type" could give an indication that this template needs to be used with
716 | data conforming to the model REST API results (as opposed to Model use case) that would allow for conistency checks.
717 | The "name" and "description" could be used to in the UI to allow users to pick a template by name and description.
718 | Embeded metadata has its advantages (can't be separated/lost from the actual template).
719 | But as a disadvantage it makes the template "ugly" and complex and it's easy to have typos.
720 | But we may still want to explore other ways to store this metadata. -->
721 |
--------------------------------------------------------------------------------
/cloud_pak_for_data/4.8x Version/End To End Flow/AI-usecase Approach 4.8x Edition.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Introduction \n",
8 | "\n",
9 | "This notebook provides a comprehensive guide on utilizing the `watsonx.governance` Factsheets Python client to **create and manage models** and **AI use cases**. It illustrates the application of various **approaches** and **versioning** (Major, Minor, Patch) for effectively tracking models within an AI use case.\n",
10 | "\n",
11 | "For detailed documentation on the `watsonx.governance` Factsheets Python client, please visit the [official documentation](https://s3.us.cloud-object-storage.appdomain.cloud/aifactsheets-client/index.html).\n",
12 | "\n",
13 | "**Required Services:**\n",
14 | "- `watsonx.governance`\n",
15 | "- `watsonx.ai`\n",
16 | "\n",
17 | "**Required Packages:**\n",
18 | "- **IBM Facts Client Python SDK (>=1.0.47)**\n",
19 | "- **IBM-watsonx-ai Python SDK**\n",
20 | "\n",
21 | "\n",
22 | " \n",
23 | "\n"
24 | ]
25 | },
26 | {
27 | "cell_type": "code",
28 | "execution_count": 1,
29 | "metadata": {},
30 | "outputs": [
31 | {
32 | "name": "stdout",
33 | "output_type": "stream",
34 | "text": [
35 | "Requirement already satisfied: matplotlib in /Users/joeleapen/.pyenv/versions/3.10.0/envs/TESTNB/lib/python3.10/site-packages (3.9.1)\n",
36 | "Collecting matplotlib\n",
37 | " Downloading matplotlib-3.9.1.post1-cp310-cp310-macosx_11_0_arm64.whl.metadata (11 kB)\n",
38 | "Requirement already satisfied: contourpy>=1.0.1 in /Users/joeleapen/.pyenv/versions/3.10.0/envs/TESTNB/lib/python3.10/site-packages (from matplotlib) (1.2.0)\n",
39 | "Requirement already satisfied: cycler>=0.10 in /Users/joeleapen/.pyenv/versions/3.10.0/envs/TESTNB/lib/python3.10/site-packages (from matplotlib) (0.12.1)\n",
40 | "Requirement already satisfied: fonttools>=4.22.0 in /Users/joeleapen/.pyenv/versions/3.10.0/envs/TESTNB/lib/python3.10/site-packages (from matplotlib) (4.49.0)\n",
41 | "Requirement already satisfied: kiwisolver>=1.3.1 in /Users/joeleapen/.pyenv/versions/3.10.0/envs/TESTNB/lib/python3.10/site-packages (from matplotlib) (1.4.5)\n",
42 | "Requirement already satisfied: numpy>=1.23 in /Users/joeleapen/.pyenv/versions/3.10.0/envs/TESTNB/lib/python3.10/site-packages (from matplotlib) (1.26.4)\n",
43 | "Requirement already satisfied: packaging>=20.0 in /Users/joeleapen/.pyenv/versions/3.10.0/envs/TESTNB/lib/python3.10/site-packages (from matplotlib) (23.2)\n",
44 | "Requirement already satisfied: pillow>=8 in /Users/joeleapen/.pyenv/versions/3.10.0/envs/TESTNB/lib/python3.10/site-packages (from matplotlib) (10.2.0)\n",
45 | "Requirement already satisfied: pyparsing>=2.3.1 in /Users/joeleapen/.pyenv/versions/3.10.0/envs/TESTNB/lib/python3.10/site-packages (from matplotlib) (3.1.2)\n",
46 | "Requirement already satisfied: python-dateutil>=2.7 in /Users/joeleapen/.pyenv/versions/3.10.0/envs/TESTNB/lib/python3.10/site-packages (from matplotlib) (2.9.0.post0)\n",
47 | "Requirement already satisfied: six>=1.5 in /Users/joeleapen/.pyenv/versions/3.10.0/envs/TESTNB/lib/python3.10/site-packages (from python-dateutil>=2.7->matplotlib) (1.16.0)\n",
48 | "Downloading matplotlib-3.9.1.post1-cp310-cp310-macosx_11_0_arm64.whl (7.8 MB)\n",
49 | "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m7.8/7.8 MB\u001b[0m \u001b[31m7.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n",
50 | "\u001b[?25hInstalling collected packages: matplotlib\n",
51 | " Attempting uninstall: matplotlib\n",
52 | " Found existing installation: matplotlib 3.9.1\n",
53 | " Uninstalling matplotlib-3.9.1:\n",
54 | " Successfully uninstalled matplotlib-3.9.1\n",
55 | "Successfully installed matplotlib-3.9.1.post1\n",
56 | "Requirement already satisfied: scikit-learn in /Users/joeleapen/.pyenv/versions/3.10.0/envs/TESTNB/lib/python3.10/site-packages (1.5.1)\n",
57 | "Requirement already satisfied: numpy>=1.19.5 in /Users/joeleapen/.pyenv/versions/3.10.0/envs/TESTNB/lib/python3.10/site-packages (from scikit-learn) (1.26.4)\n",
58 | "Requirement already satisfied: scipy>=1.6.0 in /Users/joeleapen/.pyenv/versions/3.10.0/envs/TESTNB/lib/python3.10/site-packages (from scikit-learn) (1.11.4)\n",
59 | "Requirement already satisfied: joblib>=1.2.0 in /Users/joeleapen/.pyenv/versions/3.10.0/envs/TESTNB/lib/python3.10/site-packages (from scikit-learn) (1.3.2)\n",
60 | "Requirement already satisfied: threadpoolctl>=3.1.0 in /Users/joeleapen/.pyenv/versions/3.10.0/envs/TESTNB/lib/python3.10/site-packages (from scikit-learn) (3.3.0)\n"
61 | ]
62 | }
63 | ],
64 | "source": [
65 | "!pip install -U ibm-aigov-facts-client --quiet\n",
66 | "!pip install -U ibm-watsonx-ai --quiet\n",
67 | "!pip install -U python-dotenv --quiet\n",
68 | "!pip install -U scikit-learn"
69 | ]
70 | },
71 | {
72 | "cell_type": "code",
73 | "execution_count": 1,
74 | "metadata": {},
75 | "outputs": [
76 | {
77 | "name": "stderr",
78 | "output_type": "stream",
79 | "text": [
80 | "/Users/joeleapen/.pyenv/versions/3.10.0/envs/TESTNB/lib/python3.10/site-packages/pydantic/_internal/_fields.py:160: UserWarning: Field \"model_server_url\" has conflict with protected namespace \"model_\".\n",
81 | "\n",
82 | "You may be able to resolve this warning by setting `model_config['protected_namespaces'] = ()`.\n",
83 | " warnings.warn(\n",
84 | "/Users/joeleapen/.pyenv/versions/3.10.0/envs/TESTNB/lib/python3.10/site-packages/pydantic/_internal/_config.py:334: UserWarning: Valid config keys have changed in V2:\n",
85 | "* 'schema_extra' has been renamed to 'json_schema_extra'\n",
86 | " warnings.warn(message, UserWarning)\n"
87 | ]
88 | },
89 | {
90 | "data": {
91 | "text/plain": [
92 | "True"
93 | ]
94 | },
95 | "execution_count": 1,
96 | "metadata": {},
97 | "output_type": "execute_result"
98 | }
99 | ],
100 | "source": [
101 | "import warnings\n",
102 | "import shutil\n",
103 | "import time\n",
104 | "import os\n",
105 | "import matplotlib.pyplot as plt\n",
106 | "from dotenv import load_dotenv\n",
107 | "import os\n",
108 | "from ibm_watsonx_ai import APIClient\n",
109 | "from ibm_aigov_facts_client import AIGovFactsClient,CloudPakforDataConfig\n",
110 | "from IPython.display import display, Markdown\n",
111 | "\n",
112 | "\n",
113 | "shutil.rmtree('./mlruns', ignore_errors=True)\n",
114 | "load_dotenv()"
115 | ]
116 | },
117 | {
118 | "cell_type": "markdown",
119 | "metadata": {},
120 | "source": [
121 | "- This sample will use IBM Cloud by default. If you prefer to use Cloud pak for data, set `use_cp4d=True`\n",
122 | "- Flag `run_cleanup_at_end` offers option to delete created assets at the end of the notebook.The notebook will show URL to UI for model and model use case at certain cells. By dafault we set it to `run_cleanup_at_end=False` so you can access UI and see the changes. If you decide to cleanup assets at the end, set `run_cleanup_at_end=True` and remember cells showing links to UI will `NOT` work in that case."
123 | ]
124 | },
125 | {
126 | "cell_type": "code",
127 | "execution_count": 2,
128 | "metadata": {},
129 | "outputs": [],
130 | "source": [
131 | "use_software=True\n",
132 | "run_cleanup_at_end=True"
133 | ]
134 | },
135 | {
136 | "cell_type": "markdown",
137 | "metadata": {},
138 | "source": [
139 | "- `Experiment` and `model names` are just for illustration purposes - they can be customized.\n",
140 | "- Model container type can be `space` or `project`"
141 | ]
142 | },
143 | {
144 | "cell_type": "code",
145 | "execution_count": 3,
146 | "metadata": {},
147 | "outputs": [],
148 | "source": [
149 | "experiment_name=\"IrisClassification\"\n",
150 | "MODEL_NAME=\"IrisScikitModel\"\n",
151 | "container_type=\"project\"\n",
152 | "container_id=os.getenv(\"CONTAINER_ID\", \"\") # Project_id where the model will be stored"
153 | ]
154 | },
155 | {
156 | "cell_type": "markdown",
157 | "metadata": {},
158 | "source": [
159 | "---\n",
160 | "## Authentication Setup"
161 | ]
162 | },
163 | {
164 | "cell_type": "markdown",
165 | "metadata": {},
166 | "source": [
167 | "### IBM Cloud \n",
168 | "\n",
169 | "Your Cloud API key can be generated by going to the Users section of the Cloud console. From that page, go to **Manage->Access(IAM)->API keys-> Create**. Give your key a name and click Create, then copy the created key and use as API_KEY.\n",
170 | "\n",
171 | "NOTE: You can also get OpenScale API_KEY using IBM CLOUD CLI.\n",
172 | "\n",
173 | "How to install IBM Cloud (bluemix) console: instruction\n",
174 | "\n",
175 | "How to get api key using console:\n",
176 | "\n",
177 | "```\n",
178 | "bx login --sso\n",
179 | "bx iam api-key-create 'my_key'\n",
180 | "```\n",
181 | "- Get relevant space id from UI `(Deployments -> Spaces-> open space -> Manage -Space GUID)`"
182 | ]
183 | },
184 | {
185 | "cell_type": "code",
186 | "execution_count": 4,
187 | "metadata": {},
188 | "outputs": [],
189 | "source": [
190 | "if not use_software:\n",
191 | " API_KEY=os.getenv(\"CLOUD_API_KEY\", \"\")"
192 | ]
193 | },
194 | {
195 | "cell_type": "markdown",
196 | "metadata": {},
197 | "source": [
198 | "[back to top](#introduction)\n",
199 | "### Watsonx.Gov Platform \n",
200 | "- Service url is the Cloud pak for data platform host URL. For skytap environment, it would be the internal nginx URL.\n",
201 | "- You can either use user `password` or platform `apikey` to authenticate"
202 | ]
203 | },
204 | {
205 | "cell_type": "code",
206 | "execution_count": 5,
207 | "metadata": {},
208 | "outputs": [],
209 | "source": [
210 | "if use_software:\n",
211 | " \n",
212 | " creds=CloudPakforDataConfig(service_url=os.getenv(\"CPD_SERVICE_URL\", \"\"),\n",
213 | " username=os.getenv(\"CPD_USERNAME\", \"\"),\n",
214 | " password=os.getenv(\"CPD_PASSWORD\", \"\"))\n"
215 | ]
216 | },
217 | {
218 | "cell_type": "markdown",
219 | "metadata": {},
220 | "source": [
221 | "[back to top](#introduction)\n",
222 | "## Client Initialization\n",
223 | "- Container type would be either `space` or `project`. To use get/set environment utilities, model asset should be stored in Space.\n",
224 | "- If running this notebook multiple times with same experiment name or anytime face error saying `Experiment with same name already exists`, use `set_as_current_experiment=True` when initiating client\n",
225 | "\n",
226 | "[back to top](#introduction)\n",
227 | "\n"
228 | ]
229 | },
230 | {
231 | "cell_type": "code",
232 | "execution_count": 6,
233 | "metadata": {},
234 | "outputs": [
235 | {
236 | "name": "stdout",
237 | "output_type": "stream",
238 | "text": [
239 | "2024/08/12 14:37:42 INFO : Experiment IrisClassification does not exist, creating new experiment\n",
240 | "2024/08/12 14:37:42 INFO : Experiment successfully created with ID 102829089652578454 and name IrisClassification\n",
241 | "2024/08/12 14:37:42 INFO : Autolog enabled Successfully\n"
242 | ]
243 | }
244 | ],
245 | "source": [
246 | "if use_software:\n",
247 | " facts_client = AIGovFactsClient(cloud_pak_for_data_configs=creds,experiment_name= experiment_name, container_type=container_type,container_id=container_id, set_as_current_experiment=True)\n",
248 | "else: \n",
249 | " facts_client = AIGovFactsClient(api_key=API_KEY, experiment_name= experiment_name, container_type=container_type,container_id=container_id, set_as_current_experiment=True)"
250 | ]
251 | },
252 | {
253 | "cell_type": "markdown",
254 | "metadata": {},
255 | "source": [
256 | "---\n",
257 | "\n",
258 | "## Create and Train Model \n",
259 | "\n",
260 | "- This sample code demonstrates creating and training a model, specifically a classifier.\n",
261 | "- Model development is achieved without writing any IBM or watsonx.governance-specific code.\n",
262 | "- Key training facts are automatically captured in the background and can be saved to a factsheet later.\n"
263 | ]
264 | },
265 | {
266 | "cell_type": "code",
267 | "execution_count": 7,
268 | "metadata": {},
269 | "outputs": [],
270 | "source": [
271 | "import pandas as pd\n",
272 | "from sklearn import svm, datasets\n",
273 | "from sklearn.model_selection import train_test_split\n",
274 | "from sklearn import tree\n",
275 | "from sklearn.metrics import accuracy_score\n",
276 | "import numpy as np\n",
277 | "\n",
278 | "# Get testdata for iris.\n",
279 | "iris=datasets.load_iris()\n",
280 | "\n",
281 | "x=iris.data\n",
282 | "y=iris.target\n",
283 | "\n",
284 | "# Split training and test data\n",
285 | "x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=.5)"
286 | ]
287 | },
288 | {
289 | "cell_type": "markdown",
290 | "metadata": {},
291 | "source": [
292 | "## Model Training\n",
293 | "\n",
294 | "The following code will be used to train the model. During the training process, key facts and metrics will be automatically captured in the background by the `IBM watsonx.governance` factsheets client. These facts can later be saved and reviewed in a factsheet.\n",
295 | "\n",
296 | "**Note:** Although there is no need to include any specific `IBM watsonx.governance` code for this process, you will still observe output\n"
297 | ]
298 | },
299 | {
300 | "cell_type": "code",
301 | "execution_count": 8,
302 | "metadata": {},
303 | "outputs": [
304 | {
305 | "name": "stdout",
306 | "output_type": "stream",
307 | "text": [
308 | "2024/08/12 14:37:51 INFO : logging results to factsheet for run_id d3540e8dd1764c7fb34a95b42c8512d8\n",
309 | "2024/08/12 14:37:54 INFO : Successfully logged results to Factsheet service for run_id d3540e8dd1764c7fb34a95b42c8512d8 under asset_id: e4b88953-1b43-4519-9c6b-a79da3918134 and project_id : a50ca438-c31f-42e4-b72c-04bde15410f4\n",
310 | "0.9333333333333333\n"
311 | ]
312 | }
313 | ],
314 | "source": [
315 | "# Train model\n",
316 | "classifier=tree.DecisionTreeClassifier()\n",
317 | "\n",
318 | "#This is the main training method. No watxonx.governance code is directly needed but as you can see from the output the training details are logged\n",
319 | "classifier.fit(x_train,y_train) \n",
320 | "\n",
321 | "# Predict model\n",
322 | "predictions=classifier.predict(x_test)\n",
323 | "\n",
324 | "# Check accuracy for the model\n",
325 | "print(accuracy_score(y_test,predictions))"
326 | ]
327 | },
328 | {
329 | "cell_type": "markdown",
330 | "metadata": {},
331 | "source": [
332 | "---\n",
333 | "## Store Model as a watsonx.ai / WML Asset in Project \n",
334 | "\n",
335 | "At present, the model exists solely as a Scikit-Learn object in memory. The subsequent step is to register it as a watsonx.ai / WML asset.\n",
336 | "\n",
337 | "In a **Cloud environment**, please use the watsonx.ai / WML URL specific to your region:\n",
338 | "\n",
339 | "- Dallas (US-South): `https://us-south.ml.cloud.ibm.com`\n",
340 | "\n",
341 | "For IBM internal environments, the following watsonx.ai / WML URLs are applicable:\n",
342 | "\n",
343 | "- ypqa: `https://us-south.ml.test.cloud.ibm.com`\n",
344 | "- dev: `https://wml-fvt.ml.test.cloud.ibm.com`\n"
345 | ]
346 | },
347 | {
348 | "cell_type": "code",
349 | "execution_count": 11,
350 | "metadata": {},
351 | "outputs": [],
352 | "source": [
353 | "from ibm_watsonx_ai import APIClient\n",
354 | "from ibm_watsonx_ai import Credentials\n",
355 | "\n",
356 | "\n",
357 | "if use_software:\n",
358 | " Credentials = {\n",
359 | " \"url\": creds.url,\n",
360 | " \"username\": creds.username,\n",
361 | " \"password\" : creds.password,\n",
362 | " \"instance_id\": \"openshift\",\n",
363 | " \"version\" : \"4.8\"\n",
364 | " }\n",
365 | "else:\n",
366 | " Credentials = {\n",
367 | " \"url\": \"https://us-south.ml.cloud.ibm.com\",\n",
368 | " \"apikey\": API_KEY\n",
369 | " }"
370 | ]
371 | },
372 | {
373 | "cell_type": "code",
374 | "execution_count": 12,
375 | "metadata": {},
376 | "outputs": [
377 | {
378 | "data": {
379 | "text/plain": [
380 | "'SUCCESS'"
381 | ]
382 | },
383 | "execution_count": 12,
384 | "metadata": {},
385 | "output_type": "execute_result"
386 | }
387 | ],
388 | "source": [
389 | "watsonx_ai_client = APIClient(Credentials)\n",
390 | "watsonx_ai_client.version\n",
391 | "watsonx_ai_client.set.default_project(container_id)"
392 | ]
393 | },
394 | {
395 | "cell_type": "markdown",
396 | "metadata": {},
397 | "source": [
398 | "### Define and Prepare Model Metadata for watsonx.ai / WML\n",
399 | "\n",
400 | "The following code defines the software specification and model properties, and prepares the model metadata for registration with watsonx.ai / WML.\n"
401 | ]
402 | },
403 | {
404 | "cell_type": "code",
405 | "execution_count": 13,
406 | "metadata": {},
407 | "outputs": [
408 | {
409 | "name": "stdout",
410 | "output_type": "stream",
411 | "text": [
412 | "Software Specification ID: 336b29df-e0e1-5e7d-b6a5-f6ab722625b2\n"
413 | ]
414 | },
415 | {
416 | "data": {
417 | "text/plain": [
418 | "{'name': 'IrisScikitModel',\n",
419 | " 'type': 'scikit-learn_1.1',\n",
420 | " 'software_spec': '336b29df-e0e1-5e7d-b6a5-f6ab722625b2',\n",
421 | " 'label_column': 'target',\n",
422 | " 'custom': {'experiment_id': '1e4f91d953054b278360f4f946c27566',\n",
423 | " 'experiment_name': 'IrisClassification'}}"
424 | ]
425 | },
426 | "execution_count": 13,
427 | "metadata": {},
428 | "output_type": "execute_result"
429 | }
430 | ],
431 | "source": [
432 | "software_spec_uid = watsonx_ai_client.software_specifications.get_id_by_name(\"runtime-23.1-py3.10\")\n",
433 | "print(\"Software Specification ID: {}\".format(software_spec_uid))\n",
434 | "\n",
435 | "model_props = {\n",
436 | " watsonx_ai_client._models.ConfigurationMetaNames.NAME:\"{}\".format(MODEL_NAME),\n",
437 | " watsonx_ai_client._models.ConfigurationMetaNames.TYPE: \"scikit-learn_1.1\",\n",
438 | " watsonx_ai_client._models.ConfigurationMetaNames.SOFTWARE_SPEC_UID: software_spec_uid,\n",
439 | " watsonx_ai_client._models.ConfigurationMetaNames.LABEL_FIELD:\"target\",\n",
440 | "}\n",
441 | "\n",
442 | "facts_client.export_facts.prepare_model_meta(wml_client=watsonx_ai_client,meta_props=model_props)\n",
443 | "\n"
444 | ]
445 | },
446 | {
447 | "cell_type": "markdown",
448 | "metadata": {},
449 | "source": [
450 | "\n",
451 | "### Storing the Model and Retrieving the Model ID\n",
452 | "\n",
453 | "The following code stores the model in Watsonx.ai / WML and retrieves its unique model asset ID.\n",
454 | "\n",
455 | "**Note:** The IDs are unique only within their respective containers.\n",
456 | "`"
457 | ]
458 | },
459 | {
460 | "cell_type": "code",
461 | "execution_count": 14,
462 | "metadata": {},
463 | "outputs": [
464 | {
465 | "name": "stdout",
466 | "output_type": "stream",
467 | "text": [
468 | "Storing model .....\n",
469 | "Done\n",
470 | "Model ID: c312255b-f2fb-46bb-bd15-7d13d5dabfda\n"
471 | ]
472 | }
473 | ],
474 | "source": [
475 | "print(\"Storing model .....\")\n",
476 | "\n",
477 | "published_model_details = watsonx_ai_client.repository.store_model(model=classifier, meta_props=model_props, training_data=x_train, training_target=y_train)\n",
478 | "model_id = watsonx_ai_client.repository.get_model_id(published_model_details)\n",
479 | "print(\"Done\")\n",
480 | "print(\"Model ID: {}\".format(model_id))"
481 | ]
482 | },
483 | {
484 | "cell_type": "markdown",
485 | "metadata": {},
486 | "source": [
487 | "---\n",
488 | "## Retrieve Saved Model with Factsheet Client\n",
489 | "\n",
490 | "The model, saved using watsonx.ai / WML methods, includes comprehensive training documentation in the factsheet. \n",
491 | "Although additional information can be manually added, such as diagrams, this is beyond the scope of this notebook.\n",
492 | "\n",
493 | "To associate the model with an AI use case, retrieve it using the `assets.get_model()` method:\n",
494 | "\n",
495 | "- Use `verbose=True` for detailed information.\n",
496 | "- Retrieve the model by `model_id` with:\n",
497 | " - facts_client.assets.`get_model(model_id=)`\n",
498 | " \n",
499 | " - facts_client.assets.`get_model(model_id=, container_type=, container_id=)`\n"
500 | ]
501 | },
502 | {
503 | "cell_type": "code",
504 | "execution_count": 15,
505 | "metadata": {},
506 | "outputs": [
507 | {
508 | "name": "stdout",
509 | "output_type": "stream",
510 | "text": [
511 | "2024/08/12 14:38:57 INFO : Current model information: {'asset_id': 'c312255b-f2fb-46bb-bd15-7d13d5dabfda', 'container_type': 'project', 'container_id': 'a50ca438-c31f-42e4-b72c-04bde15410f4', 'facts_type': 'modelfacts_user'}\n"
512 | ]
513 | },
514 | {
515 | "data": {
516 | "text/plain": [
517 | "{'name': 'IrisScikitModel',\n",
518 | " 'asset_type': 'wml_model',\n",
519 | " 'url': 'https://cpd-aigov.apps.fs-hotfix-test1.cp.fyre.ibm.com/ml/models/c312255b-f2fb-46bb-bd15-7d13d5dabfda?projectid=a50ca438-c31f-42e4-b72c-04bde15410f4&context=cpdaas',\n",
520 | " 'asset_id': 'c312255b-f2fb-46bb-bd15-7d13d5dabfda',\n",
521 | " 'container_type': 'project',\n",
522 | " 'container_id': 'a50ca438-c31f-42e4-b72c-04bde15410f4',\n",
523 | " 'facts_type': 'modelfacts_user'}"
524 | ]
525 | },
526 | "execution_count": 15,
527 | "metadata": {},
528 | "output_type": "execute_result"
529 | }
530 | ],
531 | "source": [
532 | "watsonx_ai_model=facts_client.assets.get_model(wml_stored_model_details=published_model_details)\n",
533 | "watsonx_ai_model.get_info(verbose=True)"
534 | ]
535 | },
536 | {
537 | "cell_type": "code",
538 | "execution_count": 16,
539 | "metadata": {},
540 | "outputs": [
541 | {
542 | "data": {
543 | "text/markdown": [
544 | "[Click here to see the created model asset and it's factsheet in the UI](https://cpd-aigov.apps.fs-hotfix-test1.cp.fyre.ibm.com/ml/models/c312255b-f2fb-46bb-bd15-7d13d5dabfda?projectid=a50ca438-c31f-42e4-b72c-04bde15410f4&context=cpdaas)"
545 | ],
546 | "text/plain": [
547 | ""
548 | ]
549 | },
550 | "metadata": {},
551 | "output_type": "display_data"
552 | }
553 | ],
554 | "source": [
555 | "model_ui_url = watsonx_ai_model.get_info(verbose=True)[\"url\"]\n",
556 | "display(Markdown(\"[Click here to see the created model asset and it's factsheet in the UI](\" + model_ui_url + \")\"))"
557 | ]
558 | },
559 | {
560 | "cell_type": "markdown",
561 | "metadata": {},
562 | "source": [
563 | "\n",
564 | "\n",
565 | "---\n",
566 | "## Creation of New AI Use Case \n",
567 | "\n",
568 | "An **AI Use Case** tracks model asset lifecycles across environments like development, pre-production, and production. \n",
569 | "\n",
570 | "**Note:** The term \"AI Use Case\" has replaced \"Model Use Case\" to reflect a broader range of AI assets. While some APIs may still use the old terminology, it will be phased out.\n",
571 | "\n",
572 | "- If `ai_usecase_id` is not provided, the default inventory_id is used (requires `EDITOR` access).\n",
573 | "\n",
574 | "- Retrieve the AI Use Case ID from the URL in inventory or by using `get_ai_usecase()`.\n",
575 | "\n",
576 | "- For Cloud Pak for Data, ensure OpenPages integration is disabled (create inventory permission needed).\n"
577 | ]
578 | },
579 | {
580 | "cell_type": "code",
581 | "execution_count": 17,
582 | "metadata": {},
583 | "outputs": [],
584 | "source": [
585 | "ai_usecase_inventory_id = os.getenv(\"INVENTORY_ID\", \"\")\n",
586 | "ai_usecase_name=\"Automatic Iris classification - demonstration use case\" \n",
587 | "ai_usecase_desc=\"AI usecase for iris classification\""
588 | ]
589 | },
590 | {
591 | "cell_type": "markdown",
592 | "metadata": {},
593 | "source": [
594 | "##### ⚠️ Attention: Use of inventories vs. catalogs as input to the `catalog_id` parameter\n",
595 | "\n",
596 | "- The `catalog_id` parameter can specify either an inventory ID or a catalog ID. Inventories which technically are a sub-type of catalogs optimized for watsonx.governance, are recommended.\n",
597 | "\n",
598 | "\n",
599 | "- Catalogs are still supported but will be deprecated over time. As a best practice, use inventories for storing use cases and external models.\n"
600 | ]
601 | },
602 | {
603 | "cell_type": "code",
604 | "execution_count": 22,
605 | "metadata": {},
606 | "outputs": [
607 | {
608 | "name": "stdout",
609 | "output_type": "stream",
610 | "text": [
611 | "2024/08/12 14:41:33 INFO : AI usecase created successfully\n"
612 | ]
613 | },
614 | {
615 | "data": {
616 | "text/plain": [
617 | "{'name': 'Automatic Iris classification - demonstration use case',\n",
618 | " 'description': 'AI usecase for iris classification',\n",
619 | " 'asset_type': 'model_entry',\n",
620 | " 'url': 'https://cpd-aigov.apps.fs-hotfix-test1.cp.fyre.ibm.com/data/catalogs/b1e3758b-ae8e-4708-8ade-d954324e879f/asset/2554e2d2-81ac-4c53-ae7b-851b1fc651d5?context=cpdaas',\n",
621 | " 'model_usecase_id': '2554e2d2-81ac-4c53-ae7b-851b1fc651d5',\n",
622 | " 'container_type': 'catalog',\n",
623 | " 'catalog_id': 'b1e3758b-ae8e-4708-8ade-d954324e879f',\n",
624 | " 'facts_type': 'model_entry_user'}"
625 | ]
626 | },
627 | "execution_count": 22,
628 | "metadata": {},
629 | "output_type": "execute_result"
630 | }
631 | ],
632 | "source": [
633 | "ai_usecase = facts_client.assets.create_ai_usecase(catalog_id=\"b1e3758b-ae8e-4708-8ade-d954324e879f\",name=ai_usecase_name,description=ai_usecase_desc)\n",
634 | "ai_usecase.get_info(True)"
635 | ]
636 | },
637 | {
638 | "cell_type": "markdown",
639 | "metadata": {},
640 | "source": [
641 | "#### Methods Available for AI Use Cases\n",
642 | "\n",
643 | "Explore the following methods for managing AI Use Cases:"
644 | ]
645 | },
646 | {
647 | "cell_type": "code",
648 | "execution_count": 23,
649 | "metadata": {},
650 | "outputs": [
651 | {
652 | "name": "stdout",
653 | "output_type": "stream",
654 | "text": [
655 | "AI usecase name is : Automatic Iris classification - demonstration use case\n",
656 | "AI usecase ID is : 2554e2d2-81ac-4c53-ae7b-851b1fc651d5\n",
657 | "AI usecase catalog is : b1e3758b-ae8e-4708-8ade-d954324e879f\n",
658 | "AI usecase container type is : catalog\n",
659 | "AI usecase description is : AI usecase for iris classification\n"
660 | ]
661 | }
662 | ],
663 | "source": [
664 | "print(\"AI usecase name is : {}\".format(ai_usecase.get_name()))\n",
665 | "print(\"AI usecase ID is : {}\".format(ai_usecase.get_id()))\n",
666 | "print(\"AI usecase catalog is : {}\".format(ai_usecase.get_container_id()))\n",
667 | "print(\"AI usecase container type is : {}\".format(ai_usecase.get_container_type()))\n",
668 | "print(\"AI usecase description is : {}\".format(ai_usecase.get_description()))"
669 | ]
670 | },
671 | {
672 | "cell_type": "markdown",
673 | "metadata": {},
674 | "source": [
675 | "---\n",
676 | "## Create an Approach \n",
677 | "\n",
678 | "- Track multiple models and prompts under a single use case by grouping them into different approaches.\n",
679 | "- Create multiple approaches for various classification algorithms to facilitate comparison and integration.\n",
680 | "- Use approaches to manage different models that need to be combined for a specific use case.\n"
681 | ]
682 | },
683 | {
684 | "cell_type": "markdown",
685 | "metadata": {},
686 | "source": [
687 | "#### Create an Approach for Decision Tree classification\n",
688 | "- Define a new approach specifically for Decision Tree classification within the existing use case.\n",
689 | "- This approach will enable tracking and management of the Decision Tree model alongside other classification algorithms."
690 | ]
691 | },
692 | {
693 | "cell_type": "code",
694 | "execution_count": 24,
695 | "metadata": {},
696 | "outputs": [
697 | {
698 | "name": "stdout",
699 | "output_type": "stream",
700 | "text": [
701 | "2024/08/12 14:41:50 INFO : Approach created successfully\n"
702 | ]
703 | },
704 | {
705 | "data": {
706 | "text/plain": [
707 | "{'approach_id': '7bbd0688-55f6-4315-9123-1150b71314ce',\n",
708 | " 'approach_name': 'Decision Tree classification',\n",
709 | " 'approach_desc': 'Use a descision tree approach to classify iris data',\n",
710 | " 'model_asset_id': '2554e2d2-81ac-4c53-ae7b-851b1fc651d5',\n",
711 | " 'model_container_type': 'catalog',\n",
712 | " 'model_container_id': 'b1e3758b-ae8e-4708-8ade-d954324e879f'}"
713 | ]
714 | },
715 | "execution_count": 24,
716 | "metadata": {},
717 | "output_type": "execute_result"
718 | }
719 | ],
720 | "source": [
721 | "decisiontree_approach = ai_usecase.create_approach(name=\"Decision Tree classification\",description=\"Use a descision tree approach to classify iris data\",icon=\"Sprout\",color=\"Teal\")\n",
722 | "decisiontree_approach.get_info()"
723 | ]
724 | },
725 | {
726 | "cell_type": "markdown",
727 | "metadata": {},
728 | "source": [
729 | "#### Create an Approach for Random Forest Classification\n",
730 | "\n",
731 | "- Define a new approach specifically for Random Forest classification within the existing use case.\n",
732 | "- This approach will enable tracking and management of the Random Forest model alongside other classification algorithms.\n"
733 | ]
734 | },
735 | {
736 | "cell_type": "code",
737 | "execution_count": 25,
738 | "metadata": {},
739 | "outputs": [
740 | {
741 | "name": "stdout",
742 | "output_type": "stream",
743 | "text": [
744 | "2024/08/12 14:41:52 INFO : Approach created successfully\n"
745 | ]
746 | },
747 | {
748 | "data": {
749 | "text/plain": [
750 | "{'approach_id': 'bb93d13d-b450-4da6-bfd7-49a002c81b71',\n",
751 | " 'approach_name': 'Random Forest classification',\n",
752 | " 'approach_desc': 'Use a Random Forest approach to classify iris data',\n",
753 | " 'model_asset_id': '2554e2d2-81ac-4c53-ae7b-851b1fc651d5',\n",
754 | " 'model_container_type': 'catalog',\n",
755 | " 'model_container_id': 'b1e3758b-ae8e-4708-8ade-d954324e879f'}"
756 | ]
757 | },
758 | "execution_count": 25,
759 | "metadata": {},
760 | "output_type": "execute_result"
761 | }
762 | ],
763 | "source": [
764 | "randomforest_approach = ai_usecase.create_approach(name=\"Random Forest classification\",description=\"Use a Random Forest approach to classify iris data\",icon=\"Tree\",color=\"Green\")\n",
765 | "randomforest_approach.get_info()"
766 | ]
767 | },
768 | {
769 | "cell_type": "markdown",
770 | "metadata": {},
771 | "source": [
772 | "### Retrieve All Approaches\n",
773 | "\n",
774 | "- Fetch a list of all approaches associated with the current use case.\n",
775 | "- This allows you to review and manage the various models and methods tracked under the use case.\n",
776 | "#thampp: after creating two approaches in this notebook the users will have three because the default approach is still there. We should teach our users how they can get the two approaches they want (or any number). I don't think we can delete the default. So maybe we will have to change the example such that it renames the default\n"
777 | ]
778 | },
779 | {
780 | "cell_type": "code",
781 | "execution_count": 26,
782 | "metadata": {},
783 | "outputs": [
784 | {
785 | "name": "stdout",
786 | "output_type": "stream",
787 | "text": [
788 | "2024/08/12 14:41:56 INFO : Approaches retrieved successfully\n",
789 | "{'approach_id': 'bb93d13d-b450-4da6-bfd7-49a002c81b71', 'approach_name': 'Random Forest classification', 'approach_desc': 'Use a Random Forest approach to classify iris data', 'model_asset_id': '2554e2d2-81ac-4c53-ae7b-851b1fc651d5', 'model_container_type': 'catalog', 'model_container_id': 'b1e3758b-ae8e-4708-8ade-d954324e879f'}\n",
790 | "{'approach_id': '7bbd0688-55f6-4315-9123-1150b71314ce', 'approach_name': 'Decision Tree classification', 'approach_desc': 'Use a descision tree approach to classify iris data', 'model_asset_id': '2554e2d2-81ac-4c53-ae7b-851b1fc651d5', 'model_container_type': 'catalog', 'model_container_id': 'b1e3758b-ae8e-4708-8ade-d954324e879f'}\n",
791 | "{'approach_id': '00000000-0000-0000-0000-000000000000', 'approach_name': 'Default approach', 'approach_desc': 'A default approach for tracking your AI assets.', 'model_asset_id': '2554e2d2-81ac-4c53-ae7b-851b1fc651d5', 'model_container_type': 'catalog', 'model_container_id': 'b1e3758b-ae8e-4708-8ade-d954324e879f'}\n"
792 | ]
793 | }
794 | ],
795 | "source": [
796 | "approaches = ai_usecase.get_approaches()\n",
797 | "for approach_detail in approaches:\n",
798 | " print(approach_detail.get_info())"
799 | ]
800 | },
801 | {
802 | "cell_type": "markdown",
803 | "metadata": {},
804 | "source": [
805 | "### Retrieve Single Approach\n",
806 | "\n",
807 | "- Retrieve details of a specific approach within the use case.\n",
808 | "- This allows you to access information about a particular model or method tracked under the use case.\n"
809 | ]
810 | },
811 | {
812 | "cell_type": "code",
813 | "execution_count": 28,
814 | "metadata": {},
815 | "outputs": [
816 | {
817 | "name": "stdout",
818 | "output_type": "stream",
819 | "text": [
820 | "2024/08/12 14:43:34 INFO : Approach retrieved successfully\n"
821 | ]
822 | },
823 | {
824 | "data": {
825 | "text/plain": [
826 | "{'approach_id': '00000000-0000-0000-0000-000000000000',\n",
827 | " 'approach_name': 'Default approach',\n",
828 | " 'approach_desc': 'A default approach for tracking your AI assets.',\n",
829 | " 'model_asset_id': '2554e2d2-81ac-4c53-ae7b-851b1fc651d5',\n",
830 | " 'model_container_type': 'catalog',\n",
831 | " 'model_container_id': 'b1e3758b-ae8e-4708-8ade-d954324e879f'}"
832 | ]
833 | },
834 | "execution_count": 28,
835 | "metadata": {},
836 | "output_type": "execute_result"
837 | }
838 | ],
839 | "source": [
840 | "decisiontree_approach = ai_usecase.get_approach(approach_id=approaches[1].get_id())\n",
841 | "decisiontree_approach.get_info()"
842 | ]
843 | },
844 | {
845 | "cell_type": "markdown",
846 | "metadata": {},
847 | "source": [
848 | "> Here, you can see that there are three approaches: two are user-created, and one is the default approach. The default approach cannot be deleted.\n",
849 | " **Note:** We can rename the default approach to **\"Custom Approach\"** as per user requirements and use it accordingly. The original default approach itself cannot be deleted.\n"
850 | ]
851 | },
852 | {
853 | "cell_type": "code",
854 | "execution_count": null,
855 | "metadata": {},
856 | "outputs": [],
857 | "source": [
858 | "default_approach = ai_usecase.get_approach(approach_id=approaches[2].get_id())\n",
859 | "default_approach.get_info()"
860 | ]
861 | },
862 | {
863 | "cell_type": "code",
864 | "execution_count": null,
865 | "metadata": {},
866 | "outputs": [],
867 | "source": [
868 | "default_approach.set_name(name=\"Model Regression classicifation\")\n",
869 | "default_approach.set_description(description=\"Use a Regression tree approach to classify iris data\")\n",
870 | "\n",
871 | "default_approach.get_info()"
872 | ]
873 | },
874 | {
875 | "cell_type": "markdown",
876 | "metadata": {},
877 | "source": [
878 | "### Delete an Approach\n",
879 | "\n",
880 | "- Remove a specific approach from the use case.\n",
881 | "- This action deletes the associated model or method, helping to manage and streamline your tracking process.\n"
882 | ]
883 | },
884 | {
885 | "cell_type": "code",
886 | "execution_count": 30,
887 | "metadata": {},
888 | "outputs": [
889 | {
890 | "name": "stdout",
891 | "output_type": "stream",
892 | "text": [
893 | "2024/08/12 14:45:17 INFO : Can't delete default approach \n"
894 | ]
895 | }
896 | ],
897 | "source": [
898 | "ai_usecase.remove_approach(approach=decisiontree_approach)"
899 | ]
900 | },
901 | {
902 | "cell_type": "code",
903 | "execution_count": 40,
904 | "metadata": {},
905 | "outputs": [
906 | {
907 | "name": "stdout",
908 | "output_type": "stream",
909 | "text": [
910 | "2024/08/05 15:05:38 INFO : Approach removed successfully\n"
911 | ]
912 | }
913 | ],
914 | "source": [
915 | "ai_usecase.remove_approach(approach=approaches[0])"
916 | ]
917 | },
918 | {
919 | "cell_type": "markdown",
920 | "metadata": {},
921 | "source": [
922 | "### Other Methods available for approach\n",
923 | "Explore the following methods for managing approach:"
924 | ]
925 | },
926 | {
927 | "cell_type": "code",
928 | "execution_count": null,
929 | "metadata": {},
930 | "outputs": [],
931 | "source": [
932 | "approaches = ai_usecase.get_approaches()\n",
933 | "decisiontree_approach = ai_usecase.get_approach(approach_id=approaches[0].get_id())\n",
934 | "print(decisiontree_approach)\n",
935 | "\n",
936 | "## Update approach name and description\n",
937 | "decisiontree_approach.set_name(name=\"Decision Tree classification\")\n",
938 | "decisiontree_approach.set_description(description=\"Use a descision tree approach to classify iris data\")\n",
939 | "\n",
940 | "## Get approach versions\n",
941 | "decisiontree_approach.get_versions()\n",
942 | "\n",
943 | "## Get approach info\n",
944 | "decisiontree_approach.get_info()"
945 | ]
946 | },
947 | {
948 | "cell_type": "code",
949 | "execution_count": 42,
950 | "metadata": {},
951 | "outputs": [
952 | {
953 | "name": "stdout",
954 | "output_type": "stream",
955 | "text": [
956 | "Approach name is : Decision Tree classification\n",
957 | "Approach ID is : afb4cd3a-79d7-493a-b6e0-b5d79ed70763\n",
958 | "Approach description is : Use a descision tree approach to classify iris data\n",
959 | "Approach usecase is : ae383786-2bf7-408b-9115-159c557bf561\n",
960 | "Approach usecase container type is : catalog\n",
961 | "Approach usecase container ID is : 0d136f5c-3756-46a8-ab50-979fd585b26e\n"
962 | ]
963 | }
964 | ],
965 | "source": [
966 | "print(\"Approach name is : {}\".format(decisiontree_approach.get_name()))\n",
967 | "print(\"Approach ID is : {}\".format(decisiontree_approach.get_id()))\n",
968 | "print(\"Approach description is : {}\".format(decisiontree_approach.get_description()))\n",
969 | "print(\"Approach usecase is : {}\".format(decisiontree_approach.get_model_useacse_id()))\n",
970 | "print(\"Approach usecase container type is : {}\".format(decisiontree_approach.get_model_usecase_container_type()))\n",
971 | "print(\"Approach usecase container ID is : {}\".format(decisiontree_approach.get_model_usecase_container_id()))"
972 | ]
973 | },
974 | {
975 | "cell_type": "markdown",
976 | "metadata": {},
977 | "source": [
978 | "---\n",
979 | "## Track a Model Under an AI Use Case \n",
980 | "\n",
981 | "- **AI Use Cases** are designed to monitor the lifecycle of model assets across various stages, including development, pre-production, and production.\n",
982 | "- To effectively integrate a model into an AI use case, three critical elements must be addressed: the **model**, the **AI use case**, and the **approach**.\n",
983 | "- Link an existing AI use case by using the following method: `model.track(usecase=, approach=, version_number=\"\")`.\n",
984 | "\n",
985 | "- **Version Numbers** are categorized as follows:\n",
986 | " - **Major Version:** Indicates significant changes, represented as `1.0.0`.\n",
987 | " - **Minor Version:** Reflects incremental improvements, represented as `0.1.0`.\n",
988 | " - **Patch Version:** Denotes minor fixes or updates, represented as `0.0.1`.\n",
989 | " - **Custom Version:** Allows for tailored versioning according to specific user needs.\n",
990 | "\n",
991 | "- Ensure that the `ai_usecase`, `approach`, and `version_number` parameters are provided as mandatory.\n",
992 | "\n"
993 | ]
994 | },
995 | {
996 | "cell_type": "code",
997 | "execution_count": null,
998 | "metadata": {},
999 | "outputs": [],
1000 | "source": [
1001 | "watsonx_ai_model.track(usecase=ai_usecase,approach=decisiontree_approach,version_number=\"major\",version_comment=\"major update to previous version\")"
1002 | ]
1003 | },
1004 | {
1005 | "cell_type": "markdown",
1006 | "metadata": {},
1007 | "source": [
1008 | "#### Retrieve Tracked Models for Use Case\n",
1009 | "\n",
1010 | "- Fetch all models that are currently tracked under a specified AI use case.\n",
1011 | "- This provides an overview of all models associated with the use case, facilitating management and analysis.\n"
1012 | ]
1013 | },
1014 | {
1015 | "cell_type": "code",
1016 | "execution_count": null,
1017 | "metadata": {},
1018 | "outputs": [],
1019 | "source": [
1020 | "ai_usecase.get_tracked_models()"
1021 | ]
1022 | },
1023 | {
1024 | "cell_type": "markdown",
1025 | "metadata": {},
1026 | "source": [
1027 | "## Untrack a Model\n",
1028 | "\n",
1029 | "Remove a model from an AI use case when it is no longer relevant or needs to be managed separately.\n"
1030 | ]
1031 | },
1032 | {
1033 | "cell_type": "code",
1034 | "execution_count": null,
1035 | "metadata": {},
1036 | "outputs": [],
1037 | "source": [
1038 | "watsonx_ai_model.untrack()"
1039 | ]
1040 | },
1041 | {
1042 | "cell_type": "markdown",
1043 | "metadata": {},
1044 | "source": [
1045 | "---\n",
1046 | "## Cleanup"
1047 | ]
1048 | },
1049 | {
1050 | "cell_type": "code",
1051 | "execution_count": null,
1052 | "metadata": {},
1053 | "outputs": [],
1054 | "source": [
1055 | "if run_cleanup_at_end:\n",
1056 | " facts_client.assets.remove_asset(asset_id=ai_usecase.get_info()[\"model_usecase_id\"],container_type=ai_usecase.get_info()[\"container_type\"],container_id=ai_usecase.get_info()[\"catalog_id\"])\n",
1057 | " facts_client.assets.remove_asset(asset_id=watsonx_ai_model.get_info()[\"asset_id\"],container_type=watsonx_ai_model.get_info()[\"container_type\"],container_id=watsonx_ai_model.get_info()[\"container_id\"])\n",
1058 | "else:\n",
1059 | " model_ui_url = watsonx_ai_model.get_info(verbose=True)[\"url\"]\n",
1060 | " display(Markdown(\"[Click here to see the created wml model details in the UI](\" + model_ui_url + \")\"))\n",
1061 | " ai_usecase_ui_url = ai_usecase.get_info(verbose=True)[\"url\"]\n",
1062 | " display(Markdown(\"[Click here to see the created AI use case in the UI](\" + ai_usecase_ui_url + \")\"))"
1063 | ]
1064 | },
1065 | {
1066 | "cell_type": "markdown",
1067 | "metadata": {},
1068 | "source": [
1069 | "**Created by:** \n",
1070 | "\n",
1071 | "\n",
1072 | "IBM watsonx.governance - AI Factsheet Python SDK Team\n",
1073 | "\n",
1074 | "---\n",
1075 | "\n",
1076 | "**Copyright © 2020-2024 IBM** \n",
1077 | "Released under the MIT License.\n"
1078 | ]
1079 | },
1080 | {
1081 | "cell_type": "markdown",
1082 | "metadata": {},
1083 | "source": []
1084 | }
1085 | ],
1086 | "metadata": {
1087 | "interpreter": {
1088 | "hash": "671fb7aa161d7cd648bbbcad6b003541cd9cf13e8157e186cbe0090b82566204"
1089 | },
1090 | "kernelspec": {
1091 | "display_name": "Python 3 (ipykernel)",
1092 | "language": "python",
1093 | "name": "python3"
1094 | },
1095 | "language_info": {
1096 | "codemirror_mode": {
1097 | "name": "ipython",
1098 | "version": 3
1099 | },
1100 | "file_extension": ".py",
1101 | "mimetype": "text/x-python",
1102 | "name": "python",
1103 | "nbconvert_exporter": "python",
1104 | "pygments_lexer": "ipython3",
1105 | "version": "3.10.0"
1106 | },
1107 | "varInspector": {
1108 | "cols": {
1109 | "lenName": 16,
1110 | "lenType": 16,
1111 | "lenVar": 40
1112 | },
1113 | "kernels_config": {
1114 | "python": {
1115 | "delete_cmd_postfix": "",
1116 | "delete_cmd_prefix": "del ",
1117 | "library": "var_list.py",
1118 | "varRefreshCmd": "print(var_dic_list())"
1119 | },
1120 | "r": {
1121 | "delete_cmd_postfix": ") ",
1122 | "delete_cmd_prefix": "rm(",
1123 | "library": "var_list.r",
1124 | "varRefreshCmd": "cat(var_dic_list()) "
1125 | }
1126 | },
1127 | "types_to_exclude": [
1128 | "module",
1129 | "function",
1130 | "builtin_function_or_method",
1131 | "instance",
1132 | "_Feature"
1133 | ],
1134 | "window_display": false
1135 | }
1136 | },
1137 | "nbformat": 4,
1138 | "nbformat_minor": 4
1139 | }
1140 |
--------------------------------------------------------------------------------