├── .github
└── workflows
│ ├── examples-nata.yaml
│ └── test-examples.yml
├── .gitignore
├── .pre-commit-config.yaml
├── SageMaker
├── Linear_example.ipynb
├── README.md
└── random_forest.ipynb
├── distributed-optimizer
├── Dockerfile
├── initialize_optimizer.py
├── job-initialize-comet-optimizer.yaml
├── job-optimizer-uuid.yaml
├── job-optimizer.yaml
├── readme.md
├── requirements.txt
├── run.sh
└── run_optimizer.py
├── fastai
└── README.md
├── guides
├── MPM
│ └── end_to_end_example
│ │ ├── .gitignore
│ │ ├── data_processing
│ │ ├── credit_scoring_dataset.csv
│ │ └── data_processing.py
│ │ ├── readme.md
│ │ ├── requirements.txt
│ │ ├── serving
│ │ └── FastAPI
│ │ │ ├── demo_data.csv
│ │ │ └── main.py
│ │ └── training
│ │ └── model_training.py
├── advanced
│ └── Running_Offline_Experiments.ipynb
├── computer_vision
│ └── Computer_Vision_with_Comet.ipynb
├── get-started
│ ├── Comet_Quickstart.ipynb
│ ├── Comet_Quickstart.py
│ ├── Image_Classification_with_Keras.ipynb
│ └── Using_Comet_with_Structured_Data_Churn_Prediction.ipynb
├── manage_data
│ ├── A_Guide_to_Remote_Artifacts.ipynb
│ └── Introduction_to_Artifacts.ipynb
└── tracking-ml-training
│ └── Comet_in_Notebooks.ipynb
├── integrations
├── annoy
│ ├── README.md
│ ├── annoy_example.py
│ └── requirements.txt
├── data-management
│ └── snowflake
│ │ └── notebooks
│ │ └── Comet_and_Snowflake.ipynb
├── langgraph
│ ├── README.md
│ ├── poetry.lock
│ ├── pyproject.toml
│ └── src
│ │ └── call_summarizer.py
├── llm
│ ├── comet-llm
│ │ └── notebooks
│ │ │ └── CometLLM_hello_world.ipynb
│ ├── finetuning
│ │ └── alpaca-lora
│ │ │ ├── README.md
│ │ │ └── notebooks
│ │ │ └── Alpaca_Lora_Finetuning_with_Comet.ipynb
│ ├── langchain
│ │ └── notebooks
│ │ │ └── Comet_with_Langchain.ipynb
│ └── openai
│ │ └── notebooks
│ │ ├── Comet_and_OpenAI.ipynb
│ │ └── Comet_and_OpenAI_Vision.ipynb
├── model-deployment
│ └── seldon
│ │ └── notebooks
│ │ ├── .gitignore
│ │ └── xgboost_seldon_aws.ipynb
├── model-evaluation
│ ├── gradio
│ │ └── notebooks
│ │ │ ├── Gradio_and_Comet.ipynb
│ │ │ └── Logging_Model_Inferences_with_Comet_and_Gradio.ipynb
│ ├── nerfstudio
│ │ └── notebooks
│ │ │ └── Comet_NerfStudio_sitcoms3d_evaluate_model.ipynb
│ ├── shap
│ │ └── shap-hello-world
│ │ │ ├── README.md
│ │ │ ├── requirements.txt
│ │ │ └── shap-hello-world.py
│ └── tensorflow-model-analysis
│ │ └── notebooks
│ │ └── Comet_with_Tensorflow_Model_Analysis_TFMA.ipynb
├── model-optimization
│ ├── comet-optimizer
│ │ └── notebooks
│ │ │ └── Comet_Optimizer_Keras.ipynb
│ ├── optuna
│ │ ├── notebooks
│ │ │ └── Comet_with_optuna.ipynb
│ │ └── optuna-hello-world
│ │ │ ├── README.md
│ │ │ ├── optuna-hello-world.py
│ │ │ └── requirements.txt
│ └── ray-tune
│ │ └── notebooks
│ │ └── Comet_and_Ray.ipynb
├── model-training
│ ├── accelerate
│ │ └── notebooks
│ │ │ └── Comet_and_Accelerate.ipynb
│ ├── anomalib
│ │ └── notebooks
│ │ │ └── Anomalib_Comet.ipynb
│ ├── catalyst
│ │ └── notebooks
│ │ │ └── Catalyst_x_Comet.ipynb
│ ├── composer
│ │ ├── mosaicml-getting-started
│ │ │ ├── README.md
│ │ │ ├── mosaicml-getting-started.py
│ │ │ └── requirements.txt
│ │ └── notebooks
│ │ │ └── comet_composer.ipynb
│ ├── deepspeed
│ │ ├── deepspeed-cifar
│ │ │ ├── README.md
│ │ │ ├── cifar10_deepspeed.py
│ │ │ └── requirements.txt
│ │ └── notebooks
│ │ │ └── comet_deepspeed.ipynb
│ ├── detectron2
│ │ └── notebooks
│ │ │ └── Comet_with_Detectron2.ipynb
│ ├── fastai
│ │ ├── fastai-hello-world
│ │ │ ├── README.md
│ │ │ ├── fastai_hello_world.py
│ │ │ └── requirements.txt
│ │ └── notebooks
│ │ │ └── fastai_hello_world.ipynb
│ ├── keras
│ │ ├── keras-mnist-dnn
│ │ │ ├── README.md
│ │ │ ├── keras-mnist-dnn.py
│ │ │ └── requirements.txt
│ │ └── notebooks
│ │ │ └── Comet_with_Keras.ipynb
│ ├── lightgbm
│ │ └── notebooks
│ │ │ └── Comet_and_LightGBM.ipynb
│ ├── mlflow
│ │ ├── mlflow-hello-world
│ │ │ ├── README.md
│ │ │ ├── mlflow-hello-world.py
│ │ │ └── requirements.txt
│ │ └── notebooks
│ │ │ └── Comet_and_MLFlow.ipynb
│ ├── nerfstudio
│ │ └── notebooks
│ │ │ └── Comet-NerfStudio_sitcoms3d.ipynb
│ ├── prophet
│ │ └── notebooks
│ │ │ └── Comet_and_Prophet.ipynb
│ ├── pycaret
│ │ └── notebooks
│ │ │ └── comet_pycaret.ipynb
│ ├── pytorch-lightning
│ │ ├── notebooks
│ │ │ └── Comet_and_Pytorch_Lightning.ipynb
│ │ ├── pytorch-lightning-hello-world
│ │ │ ├── README.md
│ │ │ ├── pytorch-lightning-hello-world.py
│ │ │ └── requirements.txt
│ │ └── pytorch-lightning-optimizer
│ │ │ ├── README.md
│ │ │ ├── pytorch-lightning-optimizer.py
│ │ │ └── requirements.txt
│ ├── pytorch
│ │ ├── notebooks
│ │ │ ├── Comet_Pytorch_Tensorboard.ipynb
│ │ │ ├── Comet_Pytorch_TensorboardX.ipynb
│ │ │ ├── Comet_and_Pytorch.ipynb
│ │ │ └── Histogram_Logging_Pytorch.ipynb
│ │ ├── pytorch-mnist
│ │ │ ├── README.md
│ │ │ ├── pytorch-mnist-example.py
│ │ │ └── requirements.txt
│ │ ├── pytorch-rich-logging
│ │ │ ├── README.md
│ │ │ ├── pytorch-rich-logging-example.py
│ │ │ └── requirements.txt
│ │ └── pytorch-tensorboard
│ │ │ ├── README.md
│ │ │ ├── pytorch-tensorboard-example.py
│ │ │ └── requirements.txt
│ ├── ray-train
│ │ ├── notebooks
│ │ │ ├── Comet_with_ray_train_huggingface_transformers.ipynb
│ │ │ ├── Comet_with_ray_train_keras.ipynb
│ │ │ ├── Comet_with_ray_train_pytorch_lightning.ipynb
│ │ │ └── Comet_with_ray_train_xgboost.ipynb
│ │ └── ray-train-hello-world-transformers
│ │ │ ├── Comet_with_ray_train_huggingface_transformers.py
│ │ │ ├── README.md
│ │ │ └── requirements.txt
│ ├── sagemaker
│ │ ├── log_completed_sagemaker_runs
│ │ │ ├── README.md
│ │ │ ├── mnist.py
│ │ │ └── train_mnist.ipynb
│ │ └── log_custom_scripts
│ │ │ ├── README.md
│ │ │ ├── huggingface-text-classification
│ │ │ ├── README.md
│ │ │ ├── src
│ │ │ │ ├── requirements.txt
│ │ │ │ └── text_classification.py
│ │ │ └── train_text_classification.ipynb
│ │ │ ├── pytorch-mnist
│ │ │ ├── README.md
│ │ │ ├── src
│ │ │ │ ├── mnist.py
│ │ │ │ └── requirements.txt
│ │ │ └── train_mnist.ipynb
│ │ │ └── tensorflow-mnist
│ │ │ ├── README.md
│ │ │ ├── src
│ │ │ ├── mnist.py
│ │ │ └── requirements.txt
│ │ │ └── train_mnist.ipynb
│ ├── scikit-learn
│ │ ├── notebooks
│ │ │ └── Comet_with_Scikit_Learn.ipynb
│ │ ├── sklearn-classification-example
│ │ │ ├── README.md
│ │ │ ├── comet-scikit-classification-example.py
│ │ │ └── requirements.txt
│ │ ├── sklearn-model-saving-example
│ │ │ ├── requirements.txt
│ │ │ └── sklearn-model-saving-example.py
│ │ └── sklearn-nlp-example
│ │ │ ├── README.md
│ │ │ ├── comet-scikit-nlp-example.py
│ │ │ └── requirements.txt
│ ├── spark-nlp
│ │ └── notebooks
│ │ │ └── Comet_SparkNLP.ipynb
│ ├── tensorflow
│ │ └── notebooks
│ │ │ └── Comet_and_Tensorflow.ipynb
│ ├── torchtune
│ │ └── notebooks
│ │ │ └── Comet_and_torchtune.ipynb
│ ├── transformers
│ │ ├── notebooks
│ │ │ └── Comet_with_Hugging_Face_Trainer.ipynb
│ │ ├── transformers-distilbert-fine-tuning
│ │ │ ├── README.md
│ │ │ ├── data
│ │ │ │ └── title_conference.csv
│ │ │ ├── requirements.txt
│ │ │ └── transformers-distilbert-fine-tuning.py
│ │ └── transformers-google-bert-fine-tuning
│ │ │ ├── README.md
│ │ │ ├── requirements.txt
│ │ │ └── transformers-google-bert-fine-tuning.py
│ ├── unsloth
│ │ └── notebooks
│ │ │ └── Comet_and_unsloth.ipynb
│ ├── xgboost
│ │ ├── notebooks
│ │ │ ├── Comet_and_XGBoost.ipynb
│ │ │ ├── how_to_use_comet_with_xgboost_tutorial.ipynb
│ │ │ ├── xg_comet.ipynb
│ │ │ └── xg_data_panel.gif
│ │ └── xgboost-california
│ │ │ ├── README.md
│ │ │ ├── requirements.txt
│ │ │ └── xgboost-california.py
│ ├── yolov5
│ │ └── notebooks
│ │ │ └── Comet_and_YOLOv5.ipynb
│ └── yolov8
│ │ └── notebooks
│ │ └── YOLOv8_and_Comet.ipynb
├── reinforcement-learning
│ ├── gymnasium
│ │ └── notebooks
│ │ │ └── comet_gymnasium_example.ipynb
│ └── rllib
│ │ └── notebooks
│ │ └── Comet_and_RLLib.ipynb
└── workflow-orchestration
│ ├── kubeflow
│ └── kubeflow-hello-world
│ │ ├── README.md
│ │ ├── pipeline.py
│ │ └── requirements.txt
│ ├── metaflow
│ ├── metaflow-hello-world
│ │ ├── README.md
│ │ ├── helloworld.py
│ │ └── requirements.txt
│ ├── metaflow-model-evaluation
│ │ ├── .pylintrc
│ │ ├── README.md
│ │ ├── imagenet_labels.json
│ │ ├── metaflow-model-evaluation.py
│ │ └── requirements.txt
│ ├── metaflow-regression
│ │ ├── README.md
│ │ ├── metaflow-regression-example.py
│ │ └── requirements.txt
│ └── notebooks
│ │ ├── metaflow_hello_world.ipynb
│ │ └── metaflow_model_eval.ipynb
│ └── vertex
│ ├── vertex-hello-world
│ ├── .gitignore
│ ├── README.md
│ ├── demo_pipeline.py
│ └── requirements.txt
│ └── vertex-v2-hello-world
│ ├── README.md
│ ├── demo_pipeline.py
│ └── requirements.txt
├── logo
├── comet_badge.png
└── comet_badge.svg
├── notebooks
├── Comet-Confusion-Matrix-Pytorch.ipynb
├── Comet-Confusion-Matrix.ipynb
├── Comet-Python-API.ipynb
├── Comet-R-nnet.ipynb
├── Comet-REST-API.ipynb
├── Comet_Custom_Panels_for_Object_Detection.ipynb
├── Comet_Logging_Curves.ipynb
├── Comet_Tensorflow_Model_Analysis.ipynb
├── Comet_Visualizations_to_Debug_CNNs.ipynb
├── Comet_and_Vega.ipynb
├── ExpiringDataExample.ipynb
├── comet-key.png
├── confusion-matrix.png
└── keras.ipynb
├── opik
└── streamlit
│ └── call-summarizer
│ ├── .env.example
│ ├── .gitignore
│ ├── .pre-commit-config.yaml
│ ├── README.md
│ ├── app.py
│ ├── poetry.lock
│ ├── pyproject.toml
│ ├── src
│ └── call_summarizer
│ │ ├── __init__.py
│ │ ├── config.py
│ │ ├── models
│ │ └── models.py
│ │ ├── services
│ │ ├── category_manager.py
│ │ ├── summarization_workflow.py
│ │ └── vector_store.py
│ │ └── utils
│ │ └── file_utils.py
│ └── tests
│ └── __init__.py
├── panels
├── AudioCompare
│ ├── AudioCompare.py
│ ├── README.md
│ ├── audio-compare.png
│ └── built-in-audio-panel.png
├── AudioComparePanel.py
├── CompareMaxAccuracyOverTime
│ ├── CompareMaxAccuracyOverTime.py
│ ├── Notebook.ipynb
│ ├── README.md
│ └── compare-max-accuracy-over-time.png
├── DataGridViewer.py
├── DataGridViewer
│ ├── DataGridViewer.ipynb
│ ├── DataGridViewer.py
│ ├── README.md
│ ├── group-by.png
│ ├── image-dialog.png
│ └── tabular-view.png
├── HistogramViewer
│ └── HistogramViewer.py
├── NotebookViewer
│ ├── NoteBookViewer.py
│ ├── README.md
│ └── notebookviewer.png
├── OptimizerAnalysis.py
├── OptimizerAnalysis
│ ├── OptimizerAnalysis.py
│ ├── README.md
│ └── optimizer-analysis.png
├── README.md
├── SaveModelAsArtifact
│ ├── README.md
│ ├── SaveModelAsArtifact.py
│ └── save-model-as-artifact.png
├── SmokeTest.py
├── SmokeTest
│ └── SmokeTest.py
├── TensorboardGroupViewer.py
├── TensorboardGroupViewer
│ ├── README.md
│ ├── TensorboardGroupViewer.py
│ └── tensorboard-group-viewer.png
├── TensorboardProfileViewer.py
├── TensorboardProfileViewer
│ ├── README.md
│ ├── TensorboardProfileViewer.py
│ └── tensorboard-profile-viewer.png
├── TotalFidelityMetricPlot.py
├── TotalFidelityMetricPlot
│ ├── README.md
│ ├── TotalFidelityMetricPlot.py
│ ├── organization-settings.png
│ └── totalfidelity.png
├── make.py
└── misc
│ └── Leardle.py
├── pytorch
├── README.md
├── comet-pytorch-ddp-cifar10.py
├── comet-pytorch-ddp-mnist-example.py
├── comet-pytorch-ddp-mnist-single-experiment.py
├── comet-pytorch-horovod-mnist.py
└── online-pytorch-lightning-apex-example.py
├── readme.md
├── resources
├── Loan_application_1.pdf
├── Loan_application_2.pdf
├── harvard.wav
├── harvard_noise.wav
├── hello.wav
├── readme.md
├── workers1.jpeg
├── workers2.jpeg
├── workers3.jpeg
├── workers4.jpeg
└── workers5.jpeg
└── xgboost
├── data
├── store.csv
├── test.csv
└── train.csv
├── online-xgboost-python-train.py
└── requirements-py37.txt
/.github/workflows/examples-nata.yaml:
--------------------------------------------------------------------------------
1 | name: nata-examples
2 | on:
3 | push:
4 | branches:
5 | - CM-9290-investigate-concurrent-futures-thread-pool-executor-performance-and-data-loss-problem
6 | jobs:
7 | test:
8 | runs-on: ubuntu-latest
9 | strategy:
10 | fail-fast: false
11 | matrix:
12 | python-version:
13 | - "3.11"
14 | - "3.10"
15 | - "3.9"
16 | - "3.8"
17 | - "3.7"
18 | steps:
19 | - uses: actions/checkout@v4
20 | - uses: actions/setup-python@v5
21 | with:
22 | python-version: ${{ matrix.python-version }}
23 | - name: Install dependencies
24 | run: |
25 | python -V
26 | python -m pip install --upgrade pip
27 | python -m pip install -U \
28 | ipython \
29 | nbconvert \
30 | 'tensorflow==1.15.2' \
31 | numpy \
32 | matplotlib \
33 | keras \
34 | scikit-learn \
35 | 'torch>=1' \
36 | 'fastai==1.0.38' \
37 | dataclasses \
38 | chainer \
39 | mlflow
40 | - name: Debug installed dependencies
41 | run: |
42 | python -m pip list
43 | - name: Run
44 | run: python pytorch/comet-pytorch-ddp-mnist-single-experiment.py
45 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 | MANIFEST
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | .hypothesis/
48 | .pytest_cache/
49 |
50 | # Translations
51 | *.mo
52 | *.pot
53 |
54 | # Django stuff:
55 | *.log
56 | local_settings.py
57 | db.sqlite3
58 |
59 | # Flask stuff:
60 | instance/
61 | .webassets-cache
62 |
63 | # Scrapy stuff:
64 | .scrapy
65 |
66 | # Sphinx documentation
67 | docs/_build/
68 |
69 | # PyBuilder
70 | target/
71 |
72 | # Jupyter Notebook
73 | .ipynb_checkpoints
74 | .DS_Store
75 |
76 | # pyenv
77 | .python-version
78 |
79 | # celery beat schedule file
80 | celerybeat-schedule
81 |
82 | # SageMath parsed files
83 | *.sage.py
84 |
85 | # Environments
86 | .env
87 | .venv
88 | env/
89 | venv/
90 | ENV/
91 | env.bak/
92 | venv.bak/
93 |
94 | # Spyder project settings
95 | .spyderproject
96 | .spyproject
97 |
98 | # Rope project settings
99 | .ropeproject
100 |
101 | # mkdocs documentation
102 | /site
103 |
104 | # mypy
105 | .mypy_cache/
106 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: https://github.com/pre-commit/pre-commit-hooks
3 | rev: v4.3.0 # Use the ref you want to point at
4 | hooks:
5 | - id: check-json
6 | - id: check-merge-conflict
7 | - id: check-yaml
8 | - id: debug-statements
9 | - id: requirements-txt-fixer
10 | - id: trailing-whitespace
11 | - repo: https://github.com/timothycrosley/isort
12 | rev: 5.10.1
13 | hooks:
14 | - id: isort
15 | - repo: https://github.com/ambv/black
16 | rev: 22.3.0
17 | hooks:
18 | - id: black
19 | args: [--safe]
20 | - id: black-jupyter
21 | args: [--safe]
22 | - repo: https://github.com/pycqa/flake8
23 | rev: 6.0.0
24 | hooks:
25 | - id: flake8
26 | additional_dependencies: ['flake8-coding==1.3.2']
27 |
--------------------------------------------------------------------------------
/SageMaker/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | ## SageMaker Integration with Comet.ml
4 |
5 | Comet's SageMaker integration is available to Enterprise customers only. If you are interested in learning more about Comet Enterprise, or are in a trial period with Comet.ml and would like to evaluate the SageMaker integration, please email support@comet.ml and credentials can be shared to download the correct packages.
6 |
7 | ## Examples Repository
8 |
9 | This repository contains examples of using Comet.ml with SageMaker built-in Algorithms Linear Learner and Random Cut Forests.
10 |
11 |
12 | ## Documentation
13 |
14 | Full [documentation](http://www.comet.ml/docs/) and additional training examples are available on our website.
15 |
16 |
17 | ## Installation
18 |
19 | Please contact us for installation instructions.
20 |
21 | ## Configuration
22 |
23 | The SageMaker integration is following the [Comet.ml Python SDK configuration](http://docs.comet.ml/python-sdk/advanced/#python-configuration) for configuring your Rest API Key, your workspace and project_name for created experiments. It's also following the [Boto configuration](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html) to find your SageMaker training jobs.
24 |
25 | ## Logging SageMaker training runs to Comet
26 |
27 | Below find three different ways you can log your SageMaker jobs to Comet: with an existing regressor/estimator object, with a SageMaker Job Name, or with the last SageMaker job.
28 |
29 | ***
30 |
31 | ### comet_ml_sagemaker.log_sagemaker_job
32 |
33 | `log_sagemaker_job(sagemaker_object, api_key, workspace, project_name)`
34 |
35 | Logs a Sagemaker job based on an estimator/regressor object
36 |
37 | * **estimator/regressor** = Sagemaker estimator/regressor object
38 | * **api_key** = your Comet REST API key
39 | * **workspace** = your Comet workspace
40 | * **project_name** = your Comet project_name
41 |
42 | ***
43 |
44 | ### comet_ml_sagemaker.log_sagemaker_job_by_name
45 |
46 | `log_sagemaker_job_by_name(job_name, api_key, workspace, project_name)`
47 |
48 | Logs a specific Sagemaker training job based on the jobname from the Sagemaker SDK.
49 |
50 | * **job_name** = Cloudwatch/Sagemaker training job name
51 | * **api_key** = your Comet REST API key
52 | * **workspace** = your Comet workspace
53 | * **project_name** = your Comet project_name
54 |
55 | ***
56 |
57 | ### comet_ml_sagemaker.log_last_sagemaker_job
58 |
59 | `log_last_sagemaker_job(api_key, workspace, project_name)`
60 |
61 | Will log the last *started* Sagemaker training job based on the current config.
62 |
63 | * **api_key** = your Comet REST API key
64 | * **workspace** = your Comet workspace
65 | * **project_name** = your Comet project_name
66 |
67 | ***
68 |
69 | ## Tutorials + Examples
70 | - [Linear Learner](Linear_example.ipynb)
71 | - [Random Cut Forests](random_forest.ipynb)
72 |
73 |
74 | ## Support
75 | Have questions? We have answers -
76 | - Try checking our [FAQ Page](https://www.comet.ml/faq)
77 | - Email us at
78 | - For the fastest response, ping us on [Slack](https://join.slack.com/t/cometml/shared_invite/enQtMzM0OTMwNTQ0Mjc5LTM4ZDViODkyYTlmMTVlNWY0NzFjNGQ5Y2Q1Y2EwMjQ5MzQ4YmI2YjhmZTY3YmYxYTYxYTNkYzM4NjgxZmJjMDI)
79 |
80 |
81 | ## Feature Spotlight
82 | Check out new product features and updates through our [Release Notes](https://www.notion.so/cometml/Comet-ml-Release-Notes-93d864bcac584360943a73ae9507bcaa). Also checkout our articles on [Medium](https://medium.com/comet-ml).
83 |
84 |
--------------------------------------------------------------------------------
/distributed-optimizer/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.9.9
2 |
3 | RUN pip install comet_ml
4 |
5 | COPY run_optimizer.py ./run_optimizer.py
6 | COPY initialize_optimizer.py ./initialize_optimizer.py
7 |
--------------------------------------------------------------------------------
/distributed-optimizer/initialize_optimizer.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from comet_ml import Optimizer
3 |
4 |
5 | def run():
6 |
7 | opt_config = {
8 | # We pick the Bayes algorithm:
9 | "algorithm": "grid",
10 | # Declare your hyperparameters in the Vizier-inspired format:
11 | "parameters": {
12 | "x": {"type": "integer", "min": 1, "max": 5},
13 | },
14 | # Declare what we will be optimizing, and how:
15 | "spec": {
16 | "metric": "loss",
17 | "objective": "minimize",
18 | },
19 | }
20 |
21 | # initialize the optimizer object
22 | opt = Optimizer(config=opt_config)
23 |
24 | # print Optimizer id
25 | optimizer_id = opt.get_id()
26 | print(optimizer_id)
27 |
28 |
29 | if __name__ == "__main__":
30 | run()
31 |
--------------------------------------------------------------------------------
/distributed-optimizer/job-initialize-comet-optimizer.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1
2 | kind: Job
3 | metadata:
4 | name: initialize-comet-optimizer
5 | spec:
6 | template:
7 | spec:
8 | containers:
9 | - name: start-optimizer
10 | image: comet-optimizer
11 | env:
12 | - name: COMET_API_KEY
13 | value: "REPLACE_WITH_YOUR_API_KEY"
14 | - name: COMET_WORKSPACE
15 | value: "REPLACE_WITH_YOUR_WORKSPACE"
16 | - name: COMET_PROJECT_NAME
17 | value: "REPLACE_WITH_YOUR_PROJECT_NAME"
18 | command: ['python', 'initialize_optimizer.py']
19 | imagePullPolicy: Never
20 | restartPolicy: Never
21 | backoffLimit: 4
--------------------------------------------------------------------------------
/distributed-optimizer/job-optimizer-uuid.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1
2 | kind: Job
3 | metadata:
4 | name: optimizer-demo
5 | spec:
6 | parallelism: 4
7 | template:
8 | spec:
9 | containers:
10 | - name: optimizer-demo
11 | image: comet-optimizer
12 | command: ['python', 'run_optimizer.py']
13 | env:
14 | - name: COMET_API_KEY
15 | value: "REPLACE_WITH_YOUR_API_KEY"
16 | - name: COMET_WORKSPACE
17 | value: "REPLACE_WITH_YOUR_WORKSPACE"
18 | - name: COMET_PROJECT_NAME
19 | value: "REPLACE_WITH_YOUR_PROJECT_NAME"
20 | - name: COMET_OPTIMIZER_ID
21 | value: ""
22 | imagePullPolicy: Never
23 | restartPolicy: Never
24 | backoffLimit: 4
--------------------------------------------------------------------------------
/distributed-optimizer/job-optimizer.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1
2 | kind: Job
3 | metadata:
4 | name: optimizer-demo
5 | spec:
6 | parallelism: 4
7 | template:
8 | spec:
9 | containers:
10 | - name: optimizer-demo
11 | image: comet-optimizer
12 | command: ['python', 'run_optimizer.py']
13 | env:
14 | - name: COMET_API_KEY
15 | value: "REPLACE_WITH_YOUR_API_KEY"
16 | - name: COMET_WORKSPACE
17 | value: "REPLACE_WITH_YOUR_WORKSPACE"
18 | - name: COMET_PROJECT_NAME
19 | value: "REPLACE_WITH_YOUR_PROJECT_NAME"
20 | - name: COMET_OPTIMIZER_ID
21 | value: "REPLACE_WITH_OPTIMIZER_ID"
22 | imagePullPolicy: Never
23 | restartPolicy: Never
24 | backoffLimit: 4
--------------------------------------------------------------------------------
/distributed-optimizer/readme.md:
--------------------------------------------------------------------------------
1 | # Comet Optimizer and Kubernetes
2 |
3 | An example for using Comet Optimizer on a local Kubernetes instance.
4 |
5 | For information about the Comet Optimizer see [Docs](https://www.comet.ml/docs/python-sdk/Optimizer/).
6 |
7 | ## Setup
8 |
9 | Install Docker Desktop, information can be found on the [Docker Website](https://www.docker.com/products/docker-desktop).
10 |
11 | Install minikube
12 |
13 | ```bash
14 | brew install minikube
15 | ```
16 |
17 | Set your API key, Workspace, and Project Name in job-initialize-comet-optimizer.yaml and job-optimizer.yaml. You can do this by replacing the environment variables placeholders with your values, or by running the following commands in your terminal.
18 |
19 | ```bash
20 | COMET_API_KEY=""
21 | COMET_WORKSPACE=""
22 | COMET_PROJECT_NAME=""
23 |
24 | sed -i '' -e "s/REPLACE_WITH_YOUR_API_KEY/$COMET_API_KEY/g" job-initialize-comet-optimizer.yaml &&
25 | sed -i '' -e "s/REPLACE_WITH_YOUR_API_KEY/$COMET_API_KEY/g" job-optimizer.yaml
26 |
27 | sed -i '' -e "s/REPLACE_WITH_YOUR_WORKSPACE/$COMET_WORKSPACE/g" job-initialize-comet-optimizer.yaml &&
28 | sed -i '' -e "s/REPLACE_WITH_YOUR_WORKSPACE/$COMET_WORKSPACE/g" job-optimizer.yaml
29 |
30 | sed -i '' -e "s/REPLACE_WITH_YOUR_PROJECT_NAME/$COMET_PROJECT_NAME/g" job-initialize-comet-optimizer.yaml &&
31 | sed -i '' -e "s/REPLACE_WITH_YOUR_PROJECT_NAME/$COMET_PROJECT_NAME/g" job-optimizer.yaml
32 | ```
33 |
34 | ## To Run
35 |
36 | Start minikube
37 | ```bash
38 | minikube start
39 | ```
40 | Enable the local use of docker in minikube
41 | ```bash
42 | eval $(minikube docker-env).
43 | ```
44 | Build the docker image
45 | ```bash
46 | docker build ./ -t comet-optimizer
47 | ```
48 | Open a dashboard in your web browser to view job status and logs
49 | ```bash
50 | minikube dashboard
51 | ```
52 |
53 | Run shell script to:
54 | * Intialize Comet Optimizer
55 | * Run Optimization sweeps
56 | ```bash
57 | sh run.sh
58 | ```
59 |
60 | ## Customizing this template
61 |
62 | 1. Add your model training functions in run_optimizer.py.
63 | 2. Change the number of experiments running in parallel by updating "parallelism" value in job-optimizer.yaml (line 6)
64 |
65 |
--------------------------------------------------------------------------------
/distributed-optimizer/requirements.txt:
--------------------------------------------------------------------------------
1 | comet_ml
--------------------------------------------------------------------------------
/distributed-optimizer/run.sh:
--------------------------------------------------------------------------------
1 | # Initialize Comet Optimizer
2 | kubectl apply -f ./job-initialize-comet-optimizer.yaml
3 |
4 | # Check that the Comet Optimizer had been created
5 | kubectl wait --for=condition=complete job/initialize-comet-optimizer
6 |
7 | # Get optimizer ID
8 | OPTIMIZER_ID=$(kubectl logs --tail=1 job/initialize-comet-optimizer | cut -d "=" -f2)
9 | echo $OPTIMIZER_ID
10 |
11 | # Copy job optimizer yaml template
12 | cp job-optimizer.yaml job-optimizer-uuid.yaml
13 |
14 | # Replace "REPLACE_WITH_OPTIMIZER_ID" with Optimizer ID in job-optimizer-uuid.yaml
15 | sed -i '' -e "s/REPLACE_WITH_OPTIMIZER_ID/$OPTIMIZER_ID/g" job-optimizer-uuid.yaml
16 |
17 | # Run the Optimization process
18 | kubectl apply -f ./job-optimizer-uuid.yaml
19 |
--------------------------------------------------------------------------------
/distributed-optimizer/run_optimizer.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import time
3 |
4 | from comet_ml import Optimizer
5 |
6 |
7 | def run():
8 |
9 | # access existing Optimizer object using key
10 | opt = Optimizer()
11 |
12 | # loop over experiments in Optimizer generator
13 | for experiment in opt.get_experiments():
14 | x = experiment.get_parameter("x")
15 | experiment.log_parameter("x", x)
16 | print("Current hyperparameter value: ", x)
17 |
18 | # add model training functions here
19 | print("Training model")
20 |
21 | # add sleep to simulate training
22 | time.sleep(60)
23 |
24 |
25 | if __name__ == "__main__":
26 | run()
27 |
--------------------------------------------------------------------------------
/fastai/README.md:
--------------------------------------------------------------------------------
1 | All of the fastai examples has been moved here: https://github.com/comet-ml/comet-examples/tree/master/integrations/model-training/fastai/.
2 |
--------------------------------------------------------------------------------
/guides/MPM/end_to_end_example/.gitignore:
--------------------------------------------------------------------------------
1 | training/preprocessed_data.csv
2 | __pycache__
--------------------------------------------------------------------------------
/guides/MPM/end_to_end_example/data_processing/data_processing.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import os
3 | from io import StringIO
4 |
5 | import comet_ml
6 |
7 | import pandas as pd
8 |
9 |
10 | def get_raw_data(workspace_name: str, artifact_name: str):
11 | """
12 | In this function, we will check if the raw data exists in Comet Artifacts. If it
13 | does, we will download it from there, if not we will upload it from the local
14 | directory.
15 |
16 | Once the file is available locally, we will load it into a pandas dataframe and
17 | return it.
18 | """
19 | exp = comet_ml.get_running_experiment()
20 |
21 | try:
22 | artifact = exp.get_artifact(artifact_name=f"{artifact_name}_raw")
23 |
24 | # Download the artifact
25 | artifact.download(path="./")
26 | except Exception as e:
27 | print(f"Error downloading artifact: {e}")
28 | artifact = comet_ml.Artifact(
29 | name=f"{artifact_name}_raw", artifact_type="dataset"
30 | )
31 | artifact.add("./credit_scoring_dataset.csv")
32 | exp.log_artifact(artifact)
33 |
34 | df = pd.read_csv("./credit_scoring_dataset.csv")
35 | return df
36 |
37 |
38 | def preprocess_data(df: pd.DataFrame):
39 | """
40 | In this function, we will preprocess the data to make it ready for the model. We
41 | will store the preprocessed data in a new Comet Artifact.
42 | """
43 | # Select the relevant columns
44 | df = df.loc[
45 | :,
46 | [
47 | "CustAge",
48 | "CustIncome",
49 | "EmpStatus",
50 | "UtilRate",
51 | "OtherCC",
52 | "ResStatus",
53 | "TmAtAddress",
54 | "TmWBank",
55 | "probdefault",
56 | ],
57 | ]
58 |
59 | # Rename the target column
60 | df.rename({"probdefault": "probability_default"}, inplace=True, axis=1)
61 |
62 | # Convert the categorical columns to category type
63 | for c in ["EmpStatus", "OtherCC", "ResStatus"]:
64 | df[c] = df[c].astype("category")
65 |
66 | # Save the preprocessed data to a new Comet Artifact
67 | csv_buffer = StringIO()
68 | df.to_csv(csv_buffer, index=False)
69 | csv_buffer.seek(0)
70 |
71 | artifact = comet_ml.Artifact(
72 | name=f"{artifact_name}_preprocessed", artifact_type="dataset"
73 | )
74 | artifact.add(local_path_or_data=csv_buffer, logical_path="preprocessed_data.csv")
75 |
76 | exp = comet_ml.get_running_experiment()
77 | exp.log_artifact(artifact)
78 |
79 | return df
80 |
81 |
82 | if __name__ == "__main__":
83 | workspace_name = os.environ["COMET_WORKSPACE"]
84 | project_name = os.environ["COMET_PROJECT_NAME"]
85 | artifact_name = os.environ["COMET_ARTIFACT_NAME"]
86 |
87 | exp = comet_ml.start(workspace=workspace_name, project_name=project_name)
88 | df = get_raw_data(workspace_name, artifact_name)
89 |
90 | processed_df = preprocess_data(df)
91 |
92 | print("Data preprocessing complete.")
93 |
--------------------------------------------------------------------------------
/guides/MPM/end_to_end_example/readme.md:
--------------------------------------------------------------------------------
1 | # MPM example scripts
2 |
3 | The MPM examples are all based on the same Credit Scoring examples, the goal of the model is to identify users that are likely to default on their loan.
4 |
5 | This folder contains three different set of scripts that showcase MPM:
6 | * `data_processing`: Script that processes the raw data and creates a new CSV file with the model's features
7 | * `training`: Script that trains a machine learning model and uploads it to Comet's Model Registry
8 | * `serving`: FastAPI inference server that downloads a model from Comet's Model Registry who's predictions are logged to MPM
9 |
10 | ## Setup
11 | In order to run these demo scripts you will need to set these environment variables:
12 | ```bash
13 | export COMET_API_KEY=""
14 | export COMET_WORKSPACE=""
15 | export COMET_PROJECT_NAME=""
16 | export COMET_MODEL_REGISTRY_NAME=""
17 |
18 | # These environment variables are only required if you are running MPM locally
19 | export COMET_URL_OVERRIDE=""
20 | export COMET_URL=""
21 | ```
22 |
23 | You will also need to install the Python libraries in `requirements.txt`
24 |
25 | ## Data processing
26 |
27 | For this demo, we will be using a simple credit scoring dataset available in the `data_processing` folder.
28 |
29 | The proprocessing set is quite simple in this demo but showcases how you can use Comet's Artifacts features to track all your data processing steps.
30 |
31 | The code can be run using:
32 | ```
33 | cd data_processing
34 | python data_processing.py
35 | ```
36 |
37 | ## Training
38 | For this demo we train a LightGBM model that we then upload to the model registry.
39 |
40 | The code can be run using:
41 | ```
42 | cd training
43 | python model_training.py
44 | ```
45 |
46 | ## Serving
47 | **Dependency**: In order to use this inference server, you will need to first train a model and upload it to the model registry using the training scripts.
48 |
49 | The inference server is built using FastAPI and demonstrates how to use both the model registry to store models as well as MPM to log predictions.
50 |
51 | The code can be run using:
52 | ```
53 | cd serving
54 | uvicorn main:app --reload
55 | ```
56 |
57 | Once the code has been run, an inference server will be available under `http://localhost:8000` and has the following endpoints:
58 | * `http://localhost:8000/`: returns the string `FastAPI inference service` and indicates the inference server is running
59 | * `http://localhost:8000/health_check`: Simple health check to make sure the server is running and accepting requests
60 | * `http://localhost:8000/prediction`: Make a prediction and log it to MPM
61 | * `http://localhost:8000/create_demo_data`: Creates 10,000 predictions over a one week period to populate MPM dashboards
62 |
63 | **Note:** It can take a few minutes for the data to appear in the debugger tab in the MPM UI.
64 |
--------------------------------------------------------------------------------
/guides/MPM/end_to_end_example/requirements.txt:
--------------------------------------------------------------------------------
1 | comet_ml
2 | pandas
3 | numpy
4 | lightgbm
5 | fastapi
6 | requests
7 | asyncio
8 | tqdm
9 | comet_mpm
10 | uvicorn
--------------------------------------------------------------------------------
/guides/MPM/end_to_end_example/training/model_training.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import os
3 |
4 | import comet_ml
5 |
6 | import lightgbm as lgb
7 | import numpy as np
8 | import pandas as pd
9 |
10 |
11 | def get_training_data(artifact_name: str) -> pd.DataFrame:
12 | exp = comet_ml.get_running_experiment()
13 |
14 | artifact = exp.get_artifact(artifact_name)
15 | artifact.download(path="./")
16 |
17 | df = pd.read_csv("preprocessed_data.csv")
18 | for c in ["EmpStatus", "OtherCC", "ResStatus"]:
19 | df[c] = df[c].astype("category")
20 |
21 | return df
22 |
23 |
24 | def train_model(training_data: pd.DataFrame, model_name: str) -> lgb.Booster:
25 | # Create training dataset
26 | X_train = training_data.drop("probability_default", axis=1)
27 | y_train = training_data["probability_default"] >= 0.5
28 |
29 | training_dataset = lgb.Dataset(data=X_train, label=y_train)
30 |
31 | # Train model
32 | params = {
33 | "num_iterations": 30,
34 | "max_depth": 2,
35 | "objective": "binary",
36 | "metric": ["auc", "average_precision", "l1", "l2"],
37 | }
38 | model = lgb.train(
39 | params=params, train_set=training_dataset, valid_sets=training_dataset
40 | )
41 |
42 | # Evaluate model
43 | y_pred = np.where(model.predict(X_train) > 0.5, 1, 0)
44 | experiment.log_confusion_matrix(y_true=y_train, y_predicted=y_pred)
45 |
46 | # Save model and log to Comet
47 | model.save_model("./model.txt")
48 | experiment.log_model(model_name, "./model.txt")
49 | os.remove("./model.txt")
50 |
51 | return model
52 |
53 |
54 | if __name__ == "__main__":
55 | ARTIFACT_NAME = os.environ["COMET_PROJECT_NAME"]
56 | WORKSPACE = os.environ["COMET_WORKSPACE"]
57 | MODEL_REGISTRY_NAME = os.environ["COMET_MODEL_REGISTRY_NAME"]
58 |
59 | # Model training script
60 | experiment = comet_ml.start()
61 |
62 | training_data = get_training_data(artifact_name=f"{ARTIFACT_NAME}_preprocessed")
63 | model = train_model(training_data, model_name=MODEL_REGISTRY_NAME)
64 |
65 | experiment.register_model(MODEL_REGISTRY_NAME)
66 |
--------------------------------------------------------------------------------
/guides/advanced/Running_Offline_Experiments.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {
6 | "id": "nD0FUYrfkSUT"
7 | },
8 | "source": [
9 | "# Install Comet"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": null,
15 | "metadata": {
16 | "id": "wMWufCyZbZOx"
17 | },
18 | "outputs": [],
19 | "source": [
20 | "%pip install -U \"comet_ml>=3.44.0\""
21 | ]
22 | },
23 | {
24 | "cell_type": "markdown",
25 | "metadata": {
26 | "id": "gIyCqVw6kT7L"
27 | },
28 | "source": [
29 | "# Initalize Comet Project"
30 | ]
31 | },
32 | {
33 | "cell_type": "code",
34 | "execution_count": null,
35 | "metadata": {
36 | "id": "B2s8nND-birg"
37 | },
38 | "outputs": [],
39 | "source": [
40 | "import comet_ml\n",
41 | "\n",
42 | "comet_ml.login(project_name=\"running-offline\")"
43 | ]
44 | },
45 | {
46 | "cell_type": "markdown",
47 | "metadata": {
48 | "id": "X9YELwknkPtq"
49 | },
50 | "source": [
51 | "# Create an Offline Experiment"
52 | ]
53 | },
54 | {
55 | "cell_type": "code",
56 | "execution_count": null,
57 | "metadata": {
58 | "id": "o7UtgSB-bk6K"
59 | },
60 | "outputs": [],
61 | "source": [
62 | "experiment = comet_ml.start(online=False)\n",
63 | "experiment.log_metrics({\"accuracy\": 0.5, \"loss\": 0.001})\n",
64 | "experiment.end()"
65 | ]
66 | },
67 | {
68 | "cell_type": "markdown",
69 | "metadata": {
70 | "id": "TND_ukbJkMoS"
71 | },
72 | "source": [
73 | "# Upload a Completed Experiment"
74 | ]
75 | },
76 | {
77 | "cell_type": "markdown",
78 | "metadata": {
79 | "id": "zDOkoYnaka1F"
80 | },
81 | "source": [
82 | "Once the Experiment has finished running, you can upload it to the Comet UI with the follow command"
83 | ]
84 | },
85 | {
86 | "cell_type": "code",
87 | "execution_count": null,
88 | "metadata": {
89 | "id": "uo53wkSabrnW"
90 | },
91 | "outputs": [],
92 | "source": [
93 | "!comet upload /content/.cometml-runs/*.zip"
94 | ]
95 | }
96 | ],
97 | "metadata": {
98 | "colab": {
99 | "provenance": []
100 | },
101 | "kernelspec": {
102 | "display_name": "Python 3 (ipykernel)",
103 | "language": "python",
104 | "name": "python3"
105 | },
106 | "language_info": {
107 | "codemirror_mode": {
108 | "name": "ipython",
109 | "version": 3
110 | },
111 | "file_extension": ".py",
112 | "mimetype": "text/x-python",
113 | "name": "python",
114 | "nbconvert_exporter": "python",
115 | "pygments_lexer": "ipython3",
116 | "version": "3.9.1"
117 | }
118 | },
119 | "nbformat": 4,
120 | "nbformat_minor": 4
121 | }
122 |
--------------------------------------------------------------------------------
/guides/get-started/Comet_Quickstart.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding: utf-8
3 |
4 | # 💥 Login to Comet and grab your Credentials
5 |
6 | import comet_ml
7 |
8 | comet_ml.login(project_name="comet-example-intro-to-comet")
9 |
10 |
11 | # 🚀 Let's start logging Experiments!
12 |
13 | # A Comet Experiment is a unit of measurable research that defines a single
14 | # run with some data/parameters/code/metrics.
15 |
16 | experiment = comet_ml.start()
17 |
18 |
19 | # Comet supports logging metrics, parameters, source code, system information,
20 | # models and media. You name it, we can log it!
21 |
22 | # In the sections below, we will walkthrough the basic methods for logging
23 | # data to Comet. In addition to these methods, Comet also supports
24 | # auto-logging data based on the framework you are using. This means that once
25 | # you have created the Experiment object in your code, you can run it as is,
26 | # and Comet will take care of the logging for you!
27 |
28 | # If Auto-Logging isn't enough, Comet is infinitely customizable to your
29 | # specific needs!
30 |
31 | # Learn more about Auto-Logging:
32 | # https://www.comet.com/docs/v2/guides/experiment-management/log-data/overview/#automated-logging
33 |
34 | # Logging Metrics
35 |
36 | metrics = {"accuracy": 0.65, "loss": 0.01}
37 | experiment.log_metrics(metrics)
38 |
39 |
40 | # Logging Metrics Over Time
41 |
42 | for step, value in enumerate(range(0, 100)):
43 | metrics = {"train/accuracy": value / 10, "validation/accuracy": value / 20}
44 | experiment.log_metrics(metrics, step=step)
45 |
46 |
47 | # Logging Parameters
48 |
49 | parameters = {"batch_size": 32, "num_samples": 10000}
50 | experiment.log_parameters(parameters)
51 |
52 |
53 | # End the Experiment
54 |
55 | experiment.end()
56 |
--------------------------------------------------------------------------------
/guides/tracking-ml-training/Comet_in_Notebooks.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {
6 | "id": "QFkpR5-IVEGr"
7 | },
8 | "source": [
9 | "# Installing Comet\n",
10 | "\n"
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": null,
16 | "metadata": {
17 | "id": "bjfA7OqbUvc4"
18 | },
19 | "outputs": [],
20 | "source": [
21 | "%pip install -U \"comet_ml>=3.44.0\""
22 | ]
23 | },
24 | {
25 | "cell_type": "markdown",
26 | "metadata": {
27 | "id": "jS7jwZNSVH9_"
28 | },
29 | "source": [
30 | "# Login to Comet"
31 | ]
32 | },
33 | {
34 | "cell_type": "code",
35 | "execution_count": null,
36 | "metadata": {
37 | "id": "-frLwX-YUzDn"
38 | },
39 | "outputs": [],
40 | "source": [
41 | "import comet_ml\n",
42 | "\n",
43 | "comet_ml.login()"
44 | ]
45 | },
46 | {
47 | "cell_type": "markdown",
48 | "metadata": {
49 | "id": "s4mKxi1QVPjN"
50 | },
51 | "source": [
52 | "# Log a Training Run"
53 | ]
54 | },
55 | {
56 | "cell_type": "code",
57 | "execution_count": null,
58 | "metadata": {
59 | "id": "Jw9kxe09Vf6C"
60 | },
61 | "outputs": [],
62 | "source": [
63 | "experiment = comet_ml.start(project_name=\"comet-in-notebooks\")\n",
64 | "\n",
65 | "for idx, value in enumerate(range(0, 100, 10)):\n",
66 | " experiment.log_metric(\"accuracy\", value / 100, step=idx)"
67 | ]
68 | },
69 | {
70 | "cell_type": "markdown",
71 | "metadata": {
72 | "id": "U8qHx2kUV3Ju"
73 | },
74 | "source": [
75 | "# Displaying an Experiment"
76 | ]
77 | },
78 | {
79 | "cell_type": "code",
80 | "execution_count": null,
81 | "metadata": {
82 | "id": "fcj_fe3dV5jN"
83 | },
84 | "outputs": [],
85 | "source": [
86 | "experiment.display(tab=\"charts\")"
87 | ]
88 | },
89 | {
90 | "cell_type": "markdown",
91 | "metadata": {
92 | "id": "oHcBKGxfVuPy"
93 | },
94 | "source": [
95 | "# Ending an Experiment"
96 | ]
97 | },
98 | {
99 | "cell_type": "code",
100 | "execution_count": null,
101 | "metadata": {
102 | "id": "kfSiNZEZVz4Y"
103 | },
104 | "outputs": [],
105 | "source": [
106 | "experiment.end()"
107 | ]
108 | },
109 | {
110 | "cell_type": "markdown",
111 | "metadata": {
112 | "id": "BjLqW2O1Wh_C"
113 | },
114 | "source": [
115 | "# Viewing Cell Execution Order"
116 | ]
117 | },
118 | {
119 | "cell_type": "code",
120 | "execution_count": null,
121 | "metadata": {
122 | "id": "tBbLw06mWfal"
123 | },
124 | "outputs": [],
125 | "source": [
126 | "experiment.display(tab=\"code\")"
127 | ]
128 | },
129 | {
130 | "cell_type": "code",
131 | "execution_count": null,
132 | "metadata": {
133 | "id": "drpje33JXHXN"
134 | },
135 | "outputs": [],
136 | "source": []
137 | }
138 | ],
139 | "metadata": {
140 | "colab": {
141 | "provenance": []
142 | },
143 | "kernelspec": {
144 | "display_name": "Python 3",
145 | "name": "python3"
146 | },
147 | "language_info": {
148 | "name": "python"
149 | }
150 | },
151 | "nbformat": 4,
152 | "nbformat_minor": 0
153 | }
154 |
--------------------------------------------------------------------------------
/integrations/annoy/README.md:
--------------------------------------------------------------------------------
1 | # Annoy integration with Comet.ml
2 |
3 | Comet integrates with [Annoy](https://github.com/spotify/annoy).
4 |
5 | Annoy ([Approximate Nearest Neighbors](http://en.wikipedia.org/wiki/Nearest_neighbor_search#Approximate_nearest_neighbor) Oh Yeah) is a C++ library with Python bindings to search for points in space that are close to a given query point. It also creates large read-only file-based data structures that are[mmapped] (https://en.wikipedia.org/wiki/Mmap) into memory so that many processes may share the same data.
6 |
7 | ## Documentation
8 |
9 | For more information on using and configuring Annoy integration, please see: https://www.comet.ml/docs/v2/integrations/third-party-tools/annoy/
10 |
11 | ## Setup
12 |
13 | Install dependencies
14 |
15 | ```bash
16 | python -m pip install -r requirements.txt
17 | ```
18 |
19 | ## Run the example
20 |
21 | ```bash
22 | python annoy_example.py
23 | ```
24 |
--------------------------------------------------------------------------------
/integrations/annoy/annoy_example.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import random
3 |
4 | import comet_ml
5 |
6 | from annoy import AnnoyIndex
7 |
8 | # Use this if you are using the Cloud version of Comet.ml
9 | # Comment this line if you are using a on-premise version of Comet.ml
10 | comet_ml.login()
11 | # Uncomment this line if you are using a on-premise version of Comet.ml
12 | # comet_ml.init_onprem()
13 |
14 | experiment = comet_ml.start()
15 |
16 | # Annoy hyper-parameters
17 | f = 40 # Length of item vector that will be indexed
18 | metric = "angular"
19 | seed = 42
20 | output_file = "test.ann"
21 |
22 | # Create and fill Annoy Index
23 | t = AnnoyIndex(f, metric)
24 | t.set_seed(seed)
25 |
26 | for i in range(1000):
27 | v = [random.gauss(0, 1) for z in range(f)]
28 | t.add_item(i, v)
29 |
30 | t.build(10) # 10 trees
31 |
32 | t.save(output_file)
33 |
34 | # Comet logging
35 | index_metadata = {
36 | "f": f,
37 | metric: metric,
38 | "n_items": t.get_n_items(),
39 | "n_trees": t.get_n_trees(),
40 | "seed": seed,
41 | }
42 |
43 | experiment.log_parameters(index_metadata, prefix="annoy_index_1")
44 |
45 | experiment.log_asset(output_file, metadata=index_metadata)
46 |
--------------------------------------------------------------------------------
/integrations/annoy/requirements.txt:
--------------------------------------------------------------------------------
1 | annoy
2 | comet_ml>=3.44.0
3 |
--------------------------------------------------------------------------------
/integrations/langgraph/README.md:
--------------------------------------------------------------------------------
1 | # LangGraph Example - Call Summarizer
2 |
3 | This example demonstrates how to use LangGraph with Opik for tracing.
4 |
5 | ## Setup
6 |
7 | ```bash
8 | pyenv local 3.12
9 | poetry env use $(pyenv which python)
10 | poetry install
11 | ```
12 |
13 | ## Run
14 |
15 | ```bash
16 | poetry run python src/call_summarizer.py
17 | ```
18 |
--------------------------------------------------------------------------------
/integrations/langgraph/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "comet-demo-examples-langgraph"
3 | version = "0.1.0"
4 | description = ""
5 | authors = [
6 | {name = "Francisco",email = "22344801+fschlz@users.noreply.github.com"}
7 | ]
8 | readme = "README.md"
9 | requires-python = ">=3.11,<4"
10 | dependencies = [
11 | "openai (>=1.81.0,<2.0.0)",
12 | "opik (>=1.7.26,<2.0.0)",
13 | "langgraph (>=0.4.5,<0.5.0)",
14 | "langchain-openai (>=0.3.17,<0.4.0)",
15 | "langchain (>=0.3.25,<0.4.0)",
16 | "python-dotenv (>=1.1.0,<2.0.0)",
17 | "langchain-anthropic (>=0.3.13,<0.4.0)"
18 | ]
19 |
20 | [tool.poetry]
21 | packages = [{include = "src"}]
22 |
23 | [tool.poetry.group.dev.dependencies]
24 | ipython = "^9.2.0"
25 | black = "^25.1.0"
26 | isort = "^6.0.1"
27 |
28 | [build-system]
29 | requires = ["poetry-core>=2.0.0,<3.0.0"]
30 | build-backend = "poetry.core.masonry.api"
31 |
--------------------------------------------------------------------------------
/integrations/llm/comet-llm/notebooks/CometLLM_hello_world.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {
6 | "id": "PWVljpddz_vN"
7 | },
8 | "source": [
9 | "
"
10 | ]
11 | },
12 | {
13 | "cell_type": "markdown",
14 | "metadata": {
15 | "id": "A0-thQauBRRL"
16 | },
17 | "source": [
18 | "[Comet’s LLMOps tools](https://www.comet.com/site/products/llmops/) are designed to allow users to leverage the latest advancement in Prompt Management and query models in Comet to iterate quicker, identify performance bottlenecks, and visualize the internal state of the Prompt Chains.\n",
19 | "\n",
20 | "Get a preview for what's to come. Check out a completed project created from this notebook [here](https://www.comet.com/examples/comet-example-llm-hello-world/prompts).\n"
21 | ]
22 | },
23 | {
24 | "cell_type": "markdown",
25 | "metadata": {
26 | "id": "V2UZtdWitSLf"
27 | },
28 | "source": [
29 | "# Install Dependencies"
30 | ]
31 | },
32 | {
33 | "cell_type": "code",
34 | "execution_count": null,
35 | "metadata": {
36 | "id": "vIQsPNvatQIU"
37 | },
38 | "outputs": [],
39 | "source": [
40 | "%pip install comet_llm"
41 | ]
42 | },
43 | {
44 | "cell_type": "markdown",
45 | "metadata": {
46 | "id": "lpCFdN33tday"
47 | },
48 | "source": [
49 | "# Login to Comet"
50 | ]
51 | },
52 | {
53 | "cell_type": "code",
54 | "execution_count": null,
55 | "metadata": {
56 | "id": "kGyz_i-dtfk4"
57 | },
58 | "outputs": [],
59 | "source": [
60 | "import comet_llm\n",
61 | "\n",
62 | "comet_llm.init(project=\"comet-example-llm-hello-world\")"
63 | ]
64 | },
65 | {
66 | "cell_type": "markdown",
67 | "metadata": {},
68 | "source": [
69 | "# Log a single prompt and response\n",
70 | "\n",
71 | "This minimal example logs only the minimum amount of data:"
72 | ]
73 | },
74 | {
75 | "cell_type": "code",
76 | "execution_count": null,
77 | "metadata": {},
78 | "outputs": [],
79 | "source": [
80 | "import comet_llm\n",
81 | "\n",
82 | "comet_llm.log_prompt(\n",
83 | " prompt=\"What is your name?\",\n",
84 | " output=\" My name is Alex.\",\n",
85 | ")"
86 | ]
87 | },
88 | {
89 | "cell_type": "markdown",
90 | "metadata": {},
91 | "source": [
92 | "# Log everything\n",
93 | "\n",
94 | "You can also logs a lot more data:"
95 | ]
96 | },
97 | {
98 | "cell_type": "code",
99 | "execution_count": null,
100 | "metadata": {},
101 | "outputs": [],
102 | "source": [
103 | "import comet_llm\n",
104 | "\n",
105 | "comet_llm.log_prompt(\n",
106 | " prompt=\"Answer the question and if the question can't be answered, say \\\"I don't know\\\"\\n\\n---\\n\\nQuestion: What is your name?\\nAnswer:\",\n",
107 | " prompt_template=\"Answer the question and if the question can't be answered, say \\\"I don't know\\\"\\n\\n---\\n\\nQuestion: {{question}}?\\nAnswer:\",\n",
108 | " prompt_template_variables={\"question\": \"What is your name?\"},\n",
109 | " metadata={\n",
110 | " \"usage.prompt_tokens\": 7,\n",
111 | " \"usage.completion_tokens\": 5,\n",
112 | " \"usage.total_tokens\": 12,\n",
113 | " },\n",
114 | " output=\" My name is Alex.\",\n",
115 | " duration=16.598,\n",
116 | ")"
117 | ]
118 | }
119 | ],
120 | "metadata": {
121 | "accelerator": "GPU",
122 | "colab": {
123 | "collapsed_sections": [],
124 | "name": "Comet and Pytorch.ipynb",
125 | "provenance": []
126 | },
127 | "kernelspec": {
128 | "display_name": "Python 3 (ipykernel)",
129 | "language": "python",
130 | "name": "python3"
131 | },
132 | "language_info": {
133 | "codemirror_mode": {
134 | "name": "ipython",
135 | "version": 3
136 | },
137 | "file_extension": ".py",
138 | "mimetype": "text/x-python",
139 | "name": "python",
140 | "nbconvert_exporter": "python",
141 | "pygments_lexer": "ipython3",
142 | "version": "3.11.3"
143 | }
144 | },
145 | "nbformat": 4,
146 | "nbformat_minor": 1
147 | }
148 |
--------------------------------------------------------------------------------
/integrations/llm/finetuning/alpaca-lora/README.md:
--------------------------------------------------------------------------------
1 | # Finetuning Alpaca-Lora with Comet
2 |
3 | The [Alpaca LoRA](https://github.com/tloen/alpaca-lora/tree/main) repository is built with Hugging Face Transformers, which means Comet logging is available right out of the box when finetuning the model.
4 |
5 | In this guide, we will demonstrate how you can configure Comet to log the results of your finetuning run.
6 |
7 | [](https://colab.research.google.com/github/comet-ml/comet-examples/blob/master/integrations/llm/finetuning/alpaca-lora/notebooks/Alpaca_Lora_Finetuning_with_Comet.ipynb)
8 |
9 |
10 | ## Setup
11 |
12 | ### Setup the Alpaca-LoRA repository
13 |
14 | ```shell
15 | git clone https://github.com/tloen/alpaca-lora.git
16 | cd alpaca-lora/ && pip install -r requirements.txt
17 | ```
18 |
19 | ### Install Comet
20 |
21 | ```shell
22 | pip install comet_ml
23 | ```
24 |
25 | ### Configure your Comet Credentials
26 |
27 | ```shell
28 | export COMET_API_KEY="Your Comet API Key"
29 | export COMET_PROJECT_NAME="Your Comet Project Name"
30 | ```
31 |
32 | ## Run the finetuning script
33 |
34 | ```shell
35 | python finetune.py \
36 | --base_model 'decapoda-research/llama-7b-hf' \
37 | --data_path 'yahma/alpaca-cleaned' \
38 | --output_dir './lora-alpaca'
39 | ```
40 |
41 | ## Try it out!
42 |
43 | Finetune an Alpaca model using Colab. Try it out here.
44 |
45 | [](https://colab.research.google.com/github/comet-ml/comet-examples/blob/master/integrations/llm/finetuning/alpaca-lora/notebooks/Alpaca_Lora_Finetuning_with_Comet.ipynb)
46 |
47 | Can't wait? See a completed [experiment here](https://www.comet.com/team-comet-ml/comet-example-alpaca-lora-finetuning/3709d2137e1f410e89648ff926a5dd0a?experiment-tab=panels&showOutliers=true&smoothing=0&xAxis=step)
--------------------------------------------------------------------------------
/integrations/model-deployment/seldon/notebooks/.gitignore:
--------------------------------------------------------------------------------
1 | .s2i/
2 | MyModel.py
3 | model/
4 | output/
5 | requirements.txt
6 |
--------------------------------------------------------------------------------
/integrations/model-evaluation/shap/shap-hello-world/README.md:
--------------------------------------------------------------------------------
1 | # SHAP integration with Comet.ml
2 |
3 | Comet integrates with [SHAP](https://github.com/slundberg/shap).
4 |
5 | SHAP or SHAPley Additive exPlanations is a visualization tool that can be used for making a machine learning model more explainable by visualizing its output. It can be used for explaining the prediction of any model by computing the contribution of each feature to the prediction.
6 |
7 | ## Documentation
8 |
9 | For more information on using and configuring Metaflow integration, please see: https://www.comet.com/docs/v2/integrations/ml-frameworks/shap/
10 |
11 | ## See it
12 |
13 | Take a look at this [public Comet Project](https://www.comet.com/examples/comet-example-shap-hello-world).
14 |
15 | ## Setup
16 |
17 | Install dependencies
18 |
19 | ```bash
20 | python -m pip install -r requirements.txt
21 | ```
22 |
23 | ## Run the example
24 |
25 | This example is based on the Deep learning example from [SHAP Readme](https://github.com/shap/shap?tab=readme-ov-file#deep-learning-example-with-deepexplainer-tensorflowkeras-models).
26 |
27 |
28 | ```bash
29 | python shap-hello-world.py run
30 | ```
--------------------------------------------------------------------------------
/integrations/model-evaluation/shap/shap-hello-world/requirements.txt:
--------------------------------------------------------------------------------
1 | comet_ml
2 | keras
3 | matplotlib
4 | numpy
5 | shap<0.45.0 # Shap output changed radically in version 0.45.0, likely after https://github.com/shap/shap/pull/3318
6 | tensorflow<2.16.0 # https://github.com/shap/shap/issues/3645
7 |
--------------------------------------------------------------------------------
/integrations/model-evaluation/shap/shap-hello-world/shap-hello-world.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from comet_ml import login, start
3 |
4 | import matplotlib
5 | import numpy as np
6 | import shap
7 | from keras.utils import to_categorical
8 |
9 | import keras
10 | from keras import layers
11 |
12 | # Force non-interactive matplotlib backend as Shap images are logged into Comet
13 | matplotlib.use("agg")
14 |
15 | # Login to Comet if needed
16 | login()
17 |
18 | experiment = start(project_name="comet-example-shap-hello-world")
19 |
20 | # Model / data parameters
21 | num_classes = 10
22 | input_shape = (28, 28, 1)
23 |
24 | # Load the data and split it between train and test sets
25 | (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
26 |
27 | # Scale images to the [0, 1] range
28 | x_train = x_train.astype("float32") / 255
29 | x_test = x_test.astype("float32") / 255
30 | # Make sure images have shape (28, 28, 1)
31 | x_train = np.expand_dims(x_train, -1)
32 | x_test = np.expand_dims(x_test, -1)
33 | print("x_train shape:", x_train.shape)
34 | print(x_train.shape[0], "train samples")
35 | print(x_test.shape[0], "test samples")
36 |
37 |
38 | # convert class vectors to binary class matrices
39 | y_train = to_categorical(y_train, num_classes)
40 | y_test = to_categorical(y_test, num_classes)
41 |
42 | batch_size = 128
43 | epochs = 3
44 |
45 | model = keras.Sequential(
46 | [
47 | layers.Input(shape=input_shape),
48 | layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
49 | layers.MaxPooling2D(pool_size=(2, 2)),
50 | layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
51 | layers.MaxPooling2D(pool_size=(2, 2)),
52 | layers.Flatten(),
53 | layers.Dropout(0.5),
54 | layers.Dense(num_classes, activation="softmax"),
55 | ]
56 | )
57 |
58 | model.summary()
59 |
60 | model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
61 |
62 | model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1)
63 |
64 | score = model.evaluate(x_test, y_test, verbose=0)
65 | print("Test loss:", score[0])
66 | print("Test accuracy:", score[1])
67 |
68 | # select a set of background examples to take an expectation over
69 | background = x_train[np.random.choice(x_train.shape[0], 100, replace=False)]
70 |
71 | # explain predictions of the model on four images
72 | e = shap.DeepExplainer(model, background)
73 | # ...or pass tensors directly
74 | # e = shap.DeepExplainer((model.layers[0].input, model.layers[-1].output), background)
75 | shap_values = e.shap_values(x_test[1:5])
76 |
77 | # plot the feature attributions
78 | shap.image_plot(shap_values, -x_test[1:5])
79 |
--------------------------------------------------------------------------------
/integrations/model-optimization/optuna/optuna-hello-world/README.md:
--------------------------------------------------------------------------------
1 | # Optuna integration with Comet.ml
2 |
3 | [Optuna](https://optuna.org/) is an automatic hyperparameter optimization software framework, particularly designed for machine learning.
4 |
5 | Log each Optuna trial to Comet to monitor in real-time the progress of your study and analyse the hyper-parameters importance, giving you full debuggability and reproducibility.
6 |
7 |
8 | ## See it
9 |
10 | Take a look at this [public Comet Project](https://www.comet.com/examples/comet-example-optuna-hello-world/view/45MrjyCtPcJPpKG2gGbkbAHZo/panels).
11 |
12 | ## Setup
13 |
14 | Install dependencies
15 |
16 | ```bash
17 | python -m pip install -r requirements.txt
18 | ```
19 |
20 | ## Run the example
21 |
22 | This example is based on the [official quickstart example](https://colab.research.google.com/github/optuna/optuna-examples/blob/main/quickstart.ipynb).
23 |
24 |
25 | ```bash
26 | python optuna-hello-world.py
27 | ```
28 |
--------------------------------------------------------------------------------
/integrations/model-optimization/optuna/optuna-hello-world/optuna-hello-world.py:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 | from comet_ml import login
3 |
4 | import optuna
5 | from optuna_integration.comet import CometCallback
6 |
7 | # Login to Comet if needed
8 | login()
9 |
10 | study = optuna.create_study()
11 | comet = CometCallback(
12 | study, project_name="comet-example-optuna-hello-world", metric_names=["score"]
13 | )
14 |
15 |
16 | @comet.track_in_comet()
17 | def objective(trial):
18 | x = trial.suggest_float("x", -10, 10)
19 | objective = (x - 2) ** 2
20 |
21 | return objective
22 |
23 |
24 | study.optimize(objective, n_trials=20, callbacks=[comet])
25 |
26 | best_params = study.best_params
27 | found_x = best_params["x"]
28 | print("Found x: {}, (x - 2)^2: {}".format(found_x, (found_x - 2) ** 2))
29 |
--------------------------------------------------------------------------------
/integrations/model-optimization/optuna/optuna-hello-world/requirements.txt:
--------------------------------------------------------------------------------
1 | comet_ml>=3.33.10
2 | optuna>=4.0.0
3 | optuna-integration>=4.0.0
4 |
--------------------------------------------------------------------------------
/integrations/model-training/composer/mosaicml-getting-started/README.md:
--------------------------------------------------------------------------------
1 | # Composer integration with Comet.ml
2 |
3 | [Composer](https://github.com/mosaicml/composer) is an open-source deep learning training library by [MosaicML](https://www.mosaicml.com/). Built on top of PyTorch, the Composer library makes it easier to implement distributed training workflows on large-scale clusters.
4 |
5 | Instrument Composer with Comet to start managing experiments, create dataset versions and track hyperparameters for faster and easier reproducibility and collaboration.
6 |
7 | ## See it
8 |
9 | Take a look at this [public Comet Project](https://www.comet.com/examples/comet-example-mosaicml-getting-started).
10 |
11 | ## Setup
12 |
13 | Install dependencies
14 |
15 | ```bash
16 | python -m pip install -r requirements.txt
17 | ```
18 |
19 | ## Run the example
20 |
21 | This example is based on the [offical Getting Started example](https://colab.research.google.com/github/mosaicml/composer/blob/master/examples/getting_started.ipynb). The code trains an Resnet to detect classes from the Cifar-10 dataset.
22 |
23 |
24 | ```bash
25 | python mosaicml-getting-started.py
26 | ```
27 |
--------------------------------------------------------------------------------
/integrations/model-training/composer/mosaicml-getting-started/requirements.txt:
--------------------------------------------------------------------------------
1 | comet_ml>=3.44.0
2 | matplotlib
3 | mosaicml
4 |
--------------------------------------------------------------------------------
/integrations/model-training/deepspeed/deepspeed-cifar/README.md:
--------------------------------------------------------------------------------
1 | # DeepSpeed integration with Comet.ml
2 |
3 | [DeepSpeed](https://github.com/microsoft/DeepSpeed) empowers ChatGPT-like model training with a single click, offering 15x speedup over SOTA RLHF systems with unprecedented cost reduction at all scales.
4 |
5 | Instrument your runs with Comet to start managing experiments, create dataset versions and track hyperparameters for faster and easier reproducibility and collaboration.
6 |
7 | [Find more information about our integration with DeepSpeed](https://www.comet.ml/docs/v2/integrations/ml-frameworks/deepspeed/)
8 |
9 | ## Documentation
10 |
11 | For more information on using and configuring the DeepSpeed integration, see: [https://www.comet.com/docs/v2/integrations/ml-frameworks/deepspeed/](https://www.comet.com/docs/v2/integrations/ml-frameworks/deepspeed/)
12 |
13 | ## See it
14 |
15 | Take a look at this [public Comet Project](https://www.comet.com/examples/comet-example-deepspeed-cifar/).
16 |
17 | ## Setup
18 |
19 | Install dependencies
20 |
21 | ```bash
22 | python -m pip install -r requirements.txt
23 | ```
24 |
25 | ## Run the example
26 |
27 | This example is based on official example from [DeepSpeed](https://github.com/microsoft/DeepSpeedExamples/tree/master/training/cifar).
28 |
29 |
30 | ```bash
31 | deepspeed --bind_cores_to_rank cifar10_deepspeed.py
32 | ```
33 |
--------------------------------------------------------------------------------
/integrations/model-training/deepspeed/deepspeed-cifar/requirements.txt:
--------------------------------------------------------------------------------
1 | comet_ml>=3.44.0
2 | deepspeed>=0.14.3
3 | torch
4 | torchvision
5 |
--------------------------------------------------------------------------------
/integrations/model-training/fastai/fastai-hello-world/README.md:
--------------------------------------------------------------------------------
1 | # FastAI integration with Comet.ml
2 |
3 | [fastai](https://github.com/fastai/fastai) is a deep learning library which provides practitioners with high-level components that can quickly and easily provide state-of-the-art results in standard deep learning domains, and provides researchers with low-level components that can be mixed and matched to build new approaches.
4 |
5 | Instrument fastai with Comet to start managing experiments, create dataset versions and track hyperparameters for faster and easier reproducibility and collaboration.
6 |
7 | ## Documentation
8 |
9 | For more information on using and configuring the fastai integration, see: [https://www.comet.com/docs/v2/integrations/ml-frameworks/fastai/](https://www.comet.com/docs/v2/integrations/ml-frameworks/fastai/?utm_source=comet-examples&utm_medium=referral&utm_campaign=github_repo_2023&utm_content=fastai)
10 |
11 | ## See it
12 |
13 | Take a look at this [public Comet Project](https://www.comet.com/examples/comet-examples-fastai-hello-world/view/new/panels?utm_source=comet-examples&utm_medium=referral&utm_campaign=github_repo_2023&utm_content=fastai).
14 |
15 | ## Setup
16 |
17 | Install dependencies
18 |
19 | ```bash
20 | python -m pip install -r requirements.txt
21 | ```
22 |
23 | ## Run the example
24 |
25 | This example is fine-tuning a pre-trained resnet 28 model on the Mnist Tiny dataset for 5 epochs:
26 |
27 |
28 | ```bash
29 | python fastai-hello-world.py
30 | ```
31 |
--------------------------------------------------------------------------------
/integrations/model-training/fastai/fastai-hello-world/fastai_hello_world.py:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 | import comet_ml
3 |
4 | from fastai.vision.all import (
5 | Categorize,
6 | Datasets,
7 | GrandparentSplitter,
8 | IntToFloatTensor,
9 | PILImageBW,
10 | ToTensor,
11 | URLs,
12 | error_rate,
13 | get_image_files,
14 | parent_label,
15 | resnet18,
16 | untar_data,
17 | vision_learner,
18 | )
19 |
20 | EPOCHS = 5
21 |
22 | comet_ml.login(project_name="comet-example-fastai-hello-world")
23 | experiment = comet_ml.start()
24 |
25 | path = untar_data(URLs.MNIST_TINY)
26 |
27 | items = get_image_files(path)
28 | tds = Datasets(
29 | items,
30 | [PILImageBW.create, [parent_label, Categorize()]],
31 | splits=GrandparentSplitter()(items),
32 | )
33 | dls = tds.dataloaders(after_item=[ToTensor(), IntToFloatTensor()])
34 |
35 | learn = vision_learner(dls, resnet18, pretrained=True, metrics=error_rate)
36 |
37 | learn.fit_one_cycle(EPOCHS)
38 |
39 | experiment.end()
40 |
--------------------------------------------------------------------------------
/integrations/model-training/fastai/fastai-hello-world/requirements.txt:
--------------------------------------------------------------------------------
1 | comet_ml>=3.44.0
2 | fastai
3 |
--------------------------------------------------------------------------------
/integrations/model-training/keras/keras-mnist-dnn/README.md:
--------------------------------------------------------------------------------
1 | # Keras integration with Comet.ml
2 |
3 | [Keras](https://keras.io/) is an API designed for human beings, not machines. Keras follows best practices for reducing cognitive load: it offers consistent & simple APIs, it minimizes the number of user actions required for common use cases, and it provides clear & actionable error messages. It also has extensive documentation and developer guides.
4 |
5 | Instrument Keras with Comet to start managing experiments, create dataset versions and track hyperparameters for faster and easier reproducibility and collaboration.
6 |
7 | ## Documentation
8 |
9 | For more information on using and configuring the Keras integration, see: [https://www.comet.com/docs/v2/integrations/ml-frameworks/keras/](https://www.comet.com/docs/v2/integrations/ml-frameworks/keras/?utm_source=comet-examples&utm_medium=referral&utm_campaign=github_repo_2023&utm_content=keras)
10 |
11 | ## See it
12 |
13 | Take a look at this [public Comet Experiment](https://www.comet.com/examples/comet-example-keras-mnist-dnn/0b8bd726e4cc45a48d88fc8b7dd5ab6b).
14 |
15 | ## Setup
16 |
17 | Install dependencies
18 |
19 | ```bash
20 | python -m pip install -r requirements.txt
21 | ```
22 |
23 | ## Run the example
24 |
25 |
26 | ```bash
27 | python keras-mnist-dnn.py
28 | ```
--------------------------------------------------------------------------------
/integrations/model-training/keras/keras-mnist-dnn/keras-mnist-dnn.py:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 |
3 | import logging
4 | from pathlib import Path
5 |
6 | import comet_ml
7 |
8 | import tensorflow as tf
9 | from tensorflow.keras.layers import Dense, Dropout, Flatten
10 | from tensorflow.keras.models import Sequential
11 | from tensorflow.keras.utils import to_categorical
12 |
13 | params = {
14 | "dropout": 0.2,
15 | "batch-size": 64,
16 | "epochs": 5,
17 | "layer-1-size": 128,
18 | "layer-2-size": 128,
19 | "optimizer": "adam",
20 | }
21 |
22 | # Login to Comet if needed
23 | comet_ml.login()
24 |
25 |
26 | def main():
27 |
28 | mnist = tf.keras.datasets.mnist
29 |
30 | num_classes = 10
31 |
32 | # the data, shuffled and split between train and test sets
33 | (x_train, y_train), (x_test, y_test) = mnist.load_data()
34 |
35 | x_train = x_train.reshape(60000, 784)
36 | x_test = x_test.reshape(10000, 784)
37 | x_train = x_train.astype("float32")
38 | x_test = x_test.astype("float32")
39 | x_train /= 255
40 | x_test /= 255
41 | print(x_train.shape[0], "train samples")
42 | print(x_test.shape[0], "test samples")
43 |
44 | # convert class vectors to binary class matrices
45 | y_train = to_categorical(y_train, num_classes)
46 | y_test = to_categorical(y_test, num_classes)
47 |
48 | train(x_train, y_train, x_test, y_test)
49 |
50 |
51 | def build_model_graph(experiment, input_shape=(784,)):
52 |
53 | model = Sequential(
54 | [
55 | Flatten(input_shape=(784,)),
56 | Dense(experiment.get_parameter("layer-1-size"), activation="relu"),
57 | Dense(experiment.get_parameter("layer-2-size"), activation="relu"),
58 | Dropout(experiment.get_parameter("dropout")),
59 | Dense(10),
60 | ]
61 | )
62 |
63 | loss_fn = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
64 |
65 | model.compile(optimizer="adam", loss=loss_fn, metrics=["accuracy"])
66 |
67 | return model
68 |
69 |
70 | def finalize_model(experiment, model, x_train, y_train, x_test, y_test):
71 | def test_index_to_example(index):
72 | img = x_test[index].reshape(28, 28)
73 | # log the data to Comet, whether it's log_image, log_text, log_audio, ...
74 | data = experiment.log_image(img, name="test_%d.png" % index)
75 |
76 | if data is None:
77 | return None
78 |
79 | return {"sample": str(index), "assetId": data["imageId"]}
80 |
81 | # Add tags
82 | experiment.add_tag("keras-mnist-ddn")
83 |
84 | # Confusion Matrix
85 | preds = model.predict(x_test)
86 |
87 | experiment.log_confusion_matrix(
88 | y_test, preds, index_to_example_function=test_index_to_example
89 | )
90 |
91 | # Log Histograms
92 | for layer in model.layers:
93 | if layer.get_weights() != []:
94 | x = layer.get_weights()
95 | for _, lst in enumerate(x):
96 | experiment.log_histogram_3d(lst, name=layer.name, step=_)
97 |
98 | # Log Model
99 | Path("models/").mkdir(exist_ok=True)
100 | model.save("models/mnist-nn.keras")
101 | experiment.log_model("mnist-neural-net", "models/mnist-nn.keras")
102 |
103 |
104 | def train(x_train, y_train, x_test, y_test):
105 |
106 | experiment = comet_ml.start(project_name="comet-example-keras-mnist-dnn")
107 |
108 | # Log custom hyperparameters
109 | experiment.log_parameters(params)
110 |
111 | # Define model
112 | model = build_model_graph(experiment)
113 |
114 | model.fit(
115 | x_train,
116 | y_train,
117 | batch_size=experiment.get_parameter("batch-size"),
118 | epochs=experiment.get_parameter("epochs"),
119 | validation_data=(x_test, y_test),
120 | )
121 |
122 | score = model.evaluate(x_test, y_test, verbose=0)
123 | logging.info("Score %s", score)
124 |
125 | finalize_model(experiment, model, x_train, y_train, x_test, y_test)
126 |
127 |
128 | if __name__ == "__main__":
129 | main()
130 |
--------------------------------------------------------------------------------
/integrations/model-training/keras/keras-mnist-dnn/requirements.txt:
--------------------------------------------------------------------------------
1 | comet_ml>=3.44.0
2 | matplotlib
3 | numpy
4 | pillow
5 | tensorflow
6 |
--------------------------------------------------------------------------------
/integrations/model-training/mlflow/mlflow-hello-world/README.md:
--------------------------------------------------------------------------------
1 | # MLflow integration with Comet.ml
2 |
3 | [MLflow](https://github.com/mlflow/mlflow/) is a platform to streamline machine learning development, including tracking experiments, packaging code into reproducible runs, and sharing and deploying models. MLflow offers a set of lightweight APIs that can be used with any existing machine learning application or library (TensorFlow, PyTorch, XGBoost, etc), wherever you currently run ML code (e.g. in notebooks, standalone applications or the cloud).
4 |
5 | ## Documentation
6 |
7 | For more information on using and configuring the MLflow integration, see: https://www.comet.com/docs/v2/integrations/ml-frameworks/mlflow/#configure-comet-for-mlflow
8 |
9 | ## See it
10 |
11 | Take a look at this [public Comet Project](https://www.comet.com/examples/comet-example-mlflow-hello-world/).
12 |
13 | ## Setup
14 |
15 | Install dependencies
16 |
17 | ```bash
18 | python -m pip install -r requirements.txt
19 | ```
20 |
21 | ## Run the example
22 |
23 | This example is based on the following [MLflow tutorial](https://mlflow.org/docs/latest/deep-learning/keras/quickstart/quickstart_keras.html).
24 |
25 | ```bash
26 | python mlflow-hello-world.py
27 | ```
28 |
29 | # Comet-for-MLFlow
30 |
31 | If you have previous MLFlow runs that you would like to visualize in Comet.ml, please see:
32 |
33 | https://github.com/comet-ml/comet-for-mlflow
34 |
--------------------------------------------------------------------------------
/integrations/model-training/mlflow/mlflow-hello-world/mlflow-hello-world.py:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 | import os
3 |
4 | import comet_ml
5 |
6 | # You can use 'tensorflow', 'torch' or 'jax' as backend. Make sure to set the
7 | # environment variable before importing.
8 | os.environ["KERAS_BACKEND"] = "tensorflow"
9 |
10 |
11 | import mlflow.keras # noqa: E402
12 | import numpy as np # noqa: E402
13 |
14 | import keras # noqa: E402
15 |
16 | # Login to Comet if necessary
17 | comet_ml.login(project_name="comet-example-mlflow-hello-world")
18 |
19 | # Load dataset
20 | (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
21 | x_train = np.expand_dims(x_train, axis=3)
22 | x_test = np.expand_dims(x_test, axis=3)
23 | x_train[0].shape
24 |
25 | # Build model
26 | NUM_CLASSES = 10
27 | INPUT_SHAPE = (28, 28, 1)
28 |
29 |
30 | def initialize_model():
31 | return keras.Sequential(
32 | [
33 | keras.Input(shape=INPUT_SHAPE),
34 | keras.layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
35 | keras.layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
36 | keras.layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
37 | keras.layers.GlobalAveragePooling2D(),
38 | keras.layers.Dense(NUM_CLASSES, activation="softmax"),
39 | ]
40 | )
41 |
42 |
43 | model = initialize_model()
44 | model.summary()
45 |
46 | # Train model
47 |
48 | BATCH_SIZE = 64 # adjust this based on the memory of your machine
49 | EPOCHS = 3
50 |
51 | model = initialize_model()
52 |
53 | model.compile(
54 | loss=keras.losses.SparseCategoricalCrossentropy(),
55 | optimizer=keras.optimizers.Adam(),
56 | metrics=["accuracy"],
57 | )
58 |
59 | run = mlflow.start_run()
60 | model.fit(
61 | x_train,
62 | y_train,
63 | batch_size=BATCH_SIZE,
64 | epochs=EPOCHS,
65 | validation_split=0.1,
66 | callbacks=[mlflow.keras.MlflowCallback(run)],
67 | )
68 |
69 | mlflow.keras.log_model(model, "model", registered_model_name="Test Model")
70 |
71 | mlflow.end_run()
72 |
--------------------------------------------------------------------------------
/integrations/model-training/mlflow/mlflow-hello-world/requirements.txt:
--------------------------------------------------------------------------------
1 | comet_ml>=3.44.0
2 | keras
3 | mlflow
4 | tensorflow
5 |
--------------------------------------------------------------------------------
/integrations/model-training/pytorch-lightning/pytorch-lightning-hello-world/README.md:
--------------------------------------------------------------------------------
1 | # Pytorch Lightning integration with Comet.ml
2 |
3 | [PyTorch Lightning](https://lightning.ai/docs/pytorch/stable/) is the deep learning framework for professional AI researchers and machine learning engineers who need maximal flexibility without sacrificing performance at scale. Lightning evolves with you as your projects go from idea to paper/production.
4 |
5 | Instrument PyTorch Lightning with Comet to start managing experiments, create dataset versions and track hyperparameters for faster and easier reproducibility and collaboration.
6 |
7 | ## Documentation
8 |
9 | For more information on using and configuring the PyTorch Lightning integration, see: [https://www.comet.com/docs/v2/integrations/ml-frameworks/pytorch-lightning/](https://www.comet.com/docs/v2/integrations/ml-frameworks/pytorch-lightning/?utm_source=comet-examples&utm_medium=referral&utm_campaign=github_repo_2023&utm_content=pytorch-lightning)
10 |
11 | ## See it
12 |
13 | Take a look at this [public Comet Experiment](https://www.comet.com/examples/comet-example-pytorch-lightning/53ea47db44164a15af3a06a12f112f67).
14 |
15 | ## Setup
16 |
17 | Install dependencies
18 |
19 | ```bash
20 | python -m pip install -r requirements.txt
21 | ```
22 |
23 | ## Run the example
24 |
25 |
26 | ```bash
27 | python pytorch-lightning-hello-world.py
28 | ```
29 |
--------------------------------------------------------------------------------
/integrations/model-training/pytorch-lightning/pytorch-lightning-hello-world/pytorch-lightning-hello-world.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding: utf-8
3 |
4 | import os
5 |
6 | import comet_ml
7 |
8 | import lightning.pytorch as pl
9 | import torch
10 | import torch.nn.functional as F
11 | from lightning import Trainer
12 | from lightning.pytorch.loggers import CometLogger
13 | from torch.utils.data import DataLoader
14 | from torchvision import transforms
15 | from torchvision.datasets import MNIST
16 |
17 | comet_ml.login(project_name="comet-example-pytorch-lightning")
18 |
19 |
20 | # Arguments made to CometLogger are passed on to the comet_ml.Experiment class
21 | comet_logger = CometLogger()
22 |
23 |
24 | class Model(pl.LightningModule):
25 | def __init__(self, layer_size=784):
26 | super().__init__()
27 | self.save_hyperparameters()
28 | self.l1 = torch.nn.Linear(layer_size, 10)
29 |
30 | def forward(self, x):
31 | return torch.relu(self.l1(x.view(x.size(0), -1)))
32 |
33 | def training_step(self, batch, batch_nb):
34 | x, y = batch
35 | loss = F.cross_entropy(self(x), y)
36 | self.log("train_loss", loss)
37 | return loss
38 |
39 | def validation_step(self, batch, batch_nb):
40 | x, y = batch
41 | y_hat = self.forward(x)
42 | loss = F.cross_entropy(y_hat, y)
43 | self.log("val_loss", loss)
44 | return loss
45 |
46 | def configure_optimizers(self):
47 | return torch.optim.Adam(self.parameters(), lr=0.02)
48 |
49 |
50 | PATH_DATASETS = os.environ.get("PATH_DATASETS", ".")
51 | BATCH_SIZE = 256 if torch.cuda.device_count() else 64
52 |
53 |
54 | # Init our model
55 | model = Model()
56 |
57 | # Init DataLoader from MNIST Dataset
58 | train_ds = MNIST(
59 | PATH_DATASETS, train=True, download=True, transform=transforms.ToTensor()
60 | )
61 | train_loader = DataLoader(train_ds, batch_size=BATCH_SIZE)
62 |
63 | eval_ds = MNIST(
64 | PATH_DATASETS, train=False, download=True, transform=transforms.ToTensor()
65 | )
66 | eval_loader = DataLoader(train_ds, batch_size=BATCH_SIZE)
67 |
68 | comet_logger.log_hyperparams({"batch_size": BATCH_SIZE})
69 |
70 | # Initialize a trainer
71 | trainer = Trainer(max_epochs=3, logger=comet_logger)
72 |
73 | # Train the model ⚡
74 | trainer.fit(model, train_loader, eval_loader)
75 |
--------------------------------------------------------------------------------
/integrations/model-training/pytorch-lightning/pytorch-lightning-hello-world/requirements.txt:
--------------------------------------------------------------------------------
1 | comet_ml>=3.44.0
2 | lightning
3 | numpy
4 | torch
5 | torchvision
6 |
--------------------------------------------------------------------------------
/integrations/model-training/pytorch-lightning/pytorch-lightning-optimizer/README.md:
--------------------------------------------------------------------------------
1 | # PytorchPyTorch Lightning integration with Comet
2 |
3 | [PyTorch Lightning](https://pytorch-lightning.readthedocs.io/en/latest/) is the deep learning framework for professional AI researchers and machine learning engineers who need maximal flexibility without sacrificing performance at scale. Lightning evolves with you as your projects go from idea to paper/production.
4 |
5 | Instrument PyTorch Lightning with Comet to start managing experiments, create dataset versions and track hyperparameters for faster and easier reproducibility and collaboration.
6 |
7 |
8 | ## Documentation
9 |
10 | For more information on using and configuring the PyTorch Lightning integration, see: [https://www.comet.com/docs/v2/integrations/ml-frameworks/pytorch-lightning/](https://www.comet.com/docs/v2/integrations/ml-frameworks/pytorch-lightning/?utm_source=comet-examples&utm_medium=referral&utm_campaign=github_repo_2023&utm_content=pytorch-lightning)
11 |
12 | ## See it
13 |
14 | Take a look at this [public Comet Project](https://www.comet.com/examples/comet-example-pytorch-lightning-optimizer/view/4oWgNi4eS5IEr3rZhM1PwzDNq/panels?utm_source=comet-examples&utm_medium=referral&utm_campaign=github_repo_2023&utm_content=pytorch-lightning).
15 |
16 | ## Setup
17 |
18 | Install dependencies
19 |
20 | ```bash
21 | python -m pip install -r requirements.txt
22 | ```
23 |
24 | ## Run the example
25 |
26 | This example shows how to use PyTorch Lightning and Comet Optimizer together to easily and efficiently find the best set of hyper-parameters for your models:
27 |
28 |
29 | ```bash
30 | python pytorch-lightning-optimizer.py
31 | ```
32 |
--------------------------------------------------------------------------------
/integrations/model-training/pytorch-lightning/pytorch-lightning-optimizer/pytorch-lightning-optimizer.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # *******************************************************
3 | # ____ _ _
4 | # / ___|___ _ __ ___ ___| |_ _ __ ___ | |
5 | # | | / _ \| '_ ` _ \ / _ \ __| | '_ ` _ \| |
6 | # | |__| (_) | | | | | | __/ |_ _| | | | | | |
7 | # \____\___/|_| |_| |_|\___|\__(_)_| |_| |_|_|
8 | #
9 | # Sign up for free at http://www.comet.com
10 | # Copyright (C) 2015-2020 Comet ML INC
11 | # This file can not be copied and/or distributed without
12 | # the express permission of Comet ML Inc.
13 | # *******************************************************
14 |
15 | import os
16 |
17 | import comet_ml
18 | from comet_ml import Optimizer
19 |
20 | import torch
21 | import torch.nn.functional as F
22 | import torchvision.transforms as transforms
23 | from pytorch_lightning.loggers import CometLogger
24 | from torch.utils.data import DataLoader
25 | from torchvision.datasets import MNIST
26 |
27 | from pytorch_lightning import LightningModule, Trainer
28 |
29 | # Login to Comet if needed
30 | comet_ml.login(project_name="comet-example-pytorch-lightning-optimizer")
31 |
32 |
33 | class PyTorchLightningModel(LightningModule):
34 | def __init__(self, learning_rate):
35 | super().__init__()
36 | self.save_hyperparameters()
37 | self.l1 = torch.nn.Linear(28 * 28, 10)
38 |
39 | def forward(self, x):
40 | return torch.relu(self.l1(x.view(x.size(0), -1)))
41 |
42 | def training_step(self, batch, batch_idx):
43 | x, y = batch
44 | y_hat = self(x)
45 | return {"loss": F.cross_entropy(y_hat, y)}
46 |
47 | def train_dataloader(self):
48 | return DataLoader(
49 | MNIST(
50 | os.getcwd(), train=True, download=True, transform=transforms.ToTensor()
51 | ),
52 | batch_size=32,
53 | )
54 |
55 | def configure_optimizers(self):
56 | return torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
57 |
58 |
59 | optimizer_config = {
60 | "algorithm": "bayes",
61 | "spec": {"maxCombo": 5},
62 | "parameters": {
63 | "learning_rate": {"min": 0.01, "max": 0.99, "type": "double", "gridSize": 10}
64 | },
65 | }
66 |
67 |
68 | def run():
69 | optimizer = Optimizer(optimizer_config)
70 |
71 | for parameters in optimizer.get_parameters():
72 | model = PyTorchLightningModel(**parameters["parameters"])
73 |
74 | comet_logger = CometLogger(
75 | optimizer_data=parameters,
76 | )
77 |
78 | trainer = Trainer(
79 | max_epochs=1,
80 | logger=[comet_logger],
81 | )
82 |
83 | trainer.fit(model)
84 |
85 |
86 | if __name__ == "__main__":
87 | run()
88 |
--------------------------------------------------------------------------------
/integrations/model-training/pytorch-lightning/pytorch-lightning-optimizer/requirements.txt:
--------------------------------------------------------------------------------
1 | comet_ml>=3.44.0
2 | pytorch_lightning
3 | torch
4 | torchvision
5 |
--------------------------------------------------------------------------------
/integrations/model-training/pytorch/pytorch-mnist/README.md:
--------------------------------------------------------------------------------
1 | # Pytorch integration with Comet.ml
2 |
3 | [PyTorch](https://pytorch.org/) is a popular open source machine learning framework based on the Torch library, used for applications such as computer vision and natural language processing.
4 |
5 | PyTorch enables fast, flexible experimentation and efficient production through a user-friendly front-end, distributed training, and ecosystem of tools and libraries.
6 |
7 | Instrument PyTorch with Comet to start managing experiments, create dataset versions and track hyperparameters for faster and easier reproducibility and collaboration.
8 |
9 |
10 | ## Documentation
11 |
12 | For more information on using and configuring the PyTorch integration, see: [https://www.comet.com/docs/v2/integrations/ml-frameworks/pytorch/](https://www.comet.com/docs/v2/integrations/ml-frameworks/pytorch/?utm_source=comet-examples&utm_medium=referral&utm_campaign=github_repo_2023&utm_content=pytorch)
13 |
14 | ## See it
15 |
16 | Take a look at this [public Comet Project](https://www.comet.com/examples/comet-example-pytorch-mnist?utm_source=comet-examples&utm_medium=referral&utm_campaign=github_repo_2023&utm_content=pytorch).
17 |
18 | ## Setup
19 |
20 | Install dependencies
21 |
22 | ```bash
23 | python -m pip install -r requirements.txt
24 | ```
25 |
26 | ## Run the example
27 |
28 | This example is based on the tutorial from [Yunjey](https://github.com/yunjey/pytorch-tutorial/blob/master/tutorials/01-basics/feedforward_neural_network/main.py). The code trains an RNN to detect hand-written digits from the MNIST dataset.
29 |
30 |
31 | ```bash
32 | python pytorch-mnist-example.py
33 | ```
34 |
--------------------------------------------------------------------------------
/integrations/model-training/pytorch/pytorch-mnist/requirements.txt:
--------------------------------------------------------------------------------
1 | comet_ml>=3.38.0
2 | torch
3 | torchvision
4 |
--------------------------------------------------------------------------------
/integrations/model-training/pytorch/pytorch-rich-logging/README.md:
--------------------------------------------------------------------------------
1 | # Pytorch integration with Comet.ml
2 |
3 | [PyTorch](https://pytorch.org/) is a popular open source machine learning framework based on the Torch library, used for applications such as computer vision and natural language processing.
4 |
5 | PyTorch enables fast, flexible experimentation and efficient production through a user-friendly front-end, distributed training, and ecosystem of tools and libraries.
6 |
7 | Instrument PyTorch with Comet to start managing experiments, create dataset versions and track hyperparameters for faster and easier reproducibility and collaboration.
8 |
9 | ## Documentation
10 |
11 | For more information on using and configuring the PyTorch integration, see: [https://www.comet.com/docs/v2/integrations/ml-frameworks/pytorch/](https://www.comet.com/docs/v2/integrations/ml-frameworks/pytorch/?utm_source=comet-examples&utm_medium=referral&utm_campaign=github_repo_2023&utm_content=pytorch)
12 |
13 | ## See it
14 |
15 | Take a look at this [public Comet Project](https://www.comet.com/examples/comet-example-pytorch-rich-logging/?utm_source=comet-examples&utm_medium=referral&utm_campaign=github_repo_2023&utm_content=pytorch).
16 |
17 | ## Setup
18 |
19 | Install dependencies
20 |
21 | ```bash
22 | python -m pip install -r requirements.txt
23 | ```
24 |
25 | ## Run the example
26 |
27 | This example is based on the tutorial from [Yunjey](https://github.com/yunjey/pytorch-tutorial/blob/master/tutorials/01-basics/feedforward_neural_network/main.py). The code trains an RNN to detect hand-written digits from the MNIST dataset.
28 |
29 |
30 | ```bash
31 | python pytorch-rich-logging.py
32 | ```
33 |
--------------------------------------------------------------------------------
/integrations/model-training/pytorch/pytorch-rich-logging/requirements.txt:
--------------------------------------------------------------------------------
1 | comet_ml>=3.24.1
2 | matplotlib
3 | numpy
4 | pillow
5 | torch
6 | torchvision
7 |
--------------------------------------------------------------------------------
/integrations/model-training/pytorch/pytorch-tensorboard/README.md:
--------------------------------------------------------------------------------
1 | # Pytorch Tensorboard integration with Comet.ml
2 |
3 | [PyTorch](https://pytorch.org/) is a popular open source machine learning framework based on the Torch library, used for applications such as computer vision and natural language processing.
4 |
5 | PyTorch enables fast, flexible experimentation and efficient production through a user-friendly front-end, distributed training, and ecosystem of tools and libraries.
6 |
7 | TensorBoard is a visualization toolkit for machine learning experimentation. TensorBoard allows tracking and visualizing metrics such as loss and accuracy, visualizing the model graph, viewing histograms, displaying images and much more.
8 |
9 | Pytorch now includes native Tensorboard support to let you log PyTorch models, metrics and images.
10 |
11 | Instrument PyTorch's Tensorboard with Comet to start managing experiments, create dataset versions and track hyperparameters for faster and easier reproducibility and collaboration.
12 |
13 | ## Documentation
14 |
15 | For more information on using and configuring the PyTorch integration, see: [https://www.comet.com/docs/v2/integrations/ml-frameworks/pytorch/](https://www.comet.com/docs/v2/integrations/ml-frameworks/pytorch/?utm_source=comet-examples&utm_medium=referral&utm_campaign=github_repo_2023&utm_content=pytorch)
16 |
17 | ## See it
18 |
19 | Take a look at this [public Comet Project](https://www.comet.com/examples/comet-example-pytorch-tensorboard).
20 |
21 | ## Setup
22 |
23 | Install dependencies
24 |
25 | ```bash
26 | python -m pip install -r requirements.txt
27 | ```
28 |
29 | ## Run the example
30 |
31 | This example is based on the [Pytorch tutorial](https://pytorch.org/tutorials/intermediate/tensorboard_tutorial.html). The code trains a CNN to classify clothing using the Fashion-MNIST dataset.
32 |
33 | ```bash
34 | python pytorch-tensorboard-example.py
35 | ```
36 |
--------------------------------------------------------------------------------
/integrations/model-training/pytorch/pytorch-tensorboard/requirements.txt:
--------------------------------------------------------------------------------
1 | comet_ml>=3.44.0
2 | matplotlib>=1.1.0
3 | numpy
4 | tensorboard
5 | torch
6 | torchvision
7 |
--------------------------------------------------------------------------------
/integrations/model-training/ray-train/ray-train-hello-world-transformers/README.md:
--------------------------------------------------------------------------------
1 | # Ray-Train integration with Comet.ml
2 |
3 | [Ray Train](https://docs.ray.io/en/latest/train/train.html) scales model training for popular ML frameworks such as Torch, XGBoost, TensorFlow, and more. It seamlessly integrates with other Ray libraries such as Tune and Predictors.
4 |
5 | Comet integrates with Ray Train by allowing you to easily monitor the resource usage of all of your workers, making sure you are fully using your expensive GPUs and that your CPUs are not the bottleneck in your training.
6 |
7 | ## Documentation
8 |
9 | For more information on using and configuring the Ray-Train integration, see: [https://www.comet.com/docs/v2/integrations/ml-frameworks/ray/#ray-train](https://www.comet.com/docs/v2/integrations/ml-frameworks/ray/#ray-train/?utm_source=comet-examples&utm_medium=referral&utm_campaign=github_repo_2023&utm_content=ray-train)
10 |
11 | ## See it
12 |
13 | Take a look at this [public Comet Project](https://www.comet.com/examples/comet-example-ray-train-hugginface-transformers/).
14 |
15 | ## Setup
16 |
17 | Install dependencies
18 |
19 | ```bash
20 | python -m pip install -r requirements.txt
21 | ```
22 |
23 | ## Run the example
24 |
25 |
26 | ```bash
27 | python Comet_with_ray_train_huggingface_transformers.py
28 | ```
29 |
--------------------------------------------------------------------------------
/integrations/model-training/ray-train/ray-train-hello-world-transformers/requirements.txt:
--------------------------------------------------------------------------------
1 | accelerate>=0.12.0
2 | comet_ml>=3.31.5
3 | datasets
4 | evaluate
5 | protobuf
6 | ray[air]>=2.1.0
7 | scikit-learn
8 | scipy
9 | sentencepiece
10 | torch>=1.3
11 | transformers>=4.43.0
12 |
--------------------------------------------------------------------------------
/integrations/model-training/sagemaker/log_completed_sagemaker_runs/README.md:
--------------------------------------------------------------------------------
1 | # Logging Sagemaker runs to Comet
2 |
3 | Comet supports transferring data from Sagemaker runs to Comet's Experiment Management tool. This approach requires no changes
4 | to existing Sagemake code. It simply transfers data from completed Sagemaker runs to Comet.
5 |
6 | There are three ways to log data from a completed Sagemaker Training Job to Comet
7 |
8 | ### 1. Using the Estimator object
9 |
10 | Teh first method involves passing the Sagemaker `estimator` object directly into Comet's `log_sagemaker_training_job_v1` utility function.
11 |
12 | ```python
13 | from comet_ml.integration.sagemaker import log_sagemaker_training_job_v1
14 |
15 | COMET_API_KEY = ""
16 | COMET_WORKSPACE = ""
17 | COMET_PROJECT_NAME = "Your Comet Project Name"
18 |
19 | log_sagemaker_training_job_v1(
20 | estimator,
21 | api_key=COMET_API_KEY,
22 | workspace=COMET_WORKSPACE,
23 | project_name=COMET_PROJECT_NAME
24 | )
25 | ```
26 |
27 | ### 2. Using the Training Job Name
28 |
29 | ```python
30 | from comet_ml.integration.sagemaker import log_sagemaker_training_job_v1
31 |
32 | COMET_API_KEY = ""
33 | COMET_WORKSPACE = ""
34 | COMET_PROJECT_NAME = "Your Comet Project Name"
35 |
36 | TRAINING_JOB_NAME = ""
37 |
38 | log_sagemaker_training_job_by_name_v1(
39 | TRAINING_JOB_NAME,
40 | api_key=COMET_API_KEY,
41 | workspace=COMET_WORKSPACE,
42 | project_name=COMET_PROJECT_NAME
43 | )
44 | ```
45 |
46 | ### 3. Automatically log data from the last completed Training Job
47 |
48 | ```python
49 | from comet_ml.integration.sagemaker import log_last_sagemaker_training_job_v1
50 |
51 | COMET_API_KEY = ""
52 | COMET_WORKSPACE = ""
53 | COMET_PROJECT_NAME = "Your Comet Project Name"
54 |
55 | log_last_sagemaker_training_job_v1(
56 | api_key=COMET_API_KEY,
57 | workspace=COMET_WORKSPACE,
58 | project_name=COMET_PROJECT_NAME
59 | )
60 | ```
61 |
62 | **Known Limitations:**
63 |
64 | - Data transfer is only compatible with Training Jobs that use Sagemaker's [builtin algorithms](https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html)
65 | - This method only supports logging the following information from Sagemaker
66 | - Hyperparameters
67 | - Metrics
68 | - Sagemaker specific metadata (BillableTimeInSeconds, TrainingImage, etc)
69 | - Sagemaker notebook code
70 | - Real time data logging is not supported from the Sagemaker job
71 | - Metrics are logged based on wall clock time. Step/Epoch information is not captured
72 |
73 |
74 | For more information, please refer to our [Sagemaker Documentation](https://www.comet.com/docs/v2/api-and-sdk/python-sdk/reference/integration.sagemaker/)
75 |
76 |
77 | ## Run the Example
78 |
79 | To run this example, you will need both a Sagemaker account and a [Comet account](https://comet.com/signup)
80 |
81 | 1. Upload the `mnist.py` and `train_mnist.ipynb` to your Sagemaker Notebook instance.
82 |
83 | 2. Run the `train_mnist.ipynb` Notebook to create a Sagemaker Training Job and log the data to Comet.
84 |
85 |
86 | ## Example Project
87 |
88 | Here is an example of a completed training run that has been logged from Sagemaker:
89 |
90 | [Sagemaker Pytorch MNIST project](https://www.comet.com/examples/comet-example-sagemaker-completed-run-pytorch-mnist/fb5b85fa59b24110b9e786e4d237df91?experiment-tab=panels&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=wall)
91 |
92 |
--------------------------------------------------------------------------------
/integrations/model-training/sagemaker/log_custom_scripts/README.md:
--------------------------------------------------------------------------------
1 | # Using Comet with Sagemaker
2 |
3 | Sagemaker is Amazon's end-to-end machine learning service that targets a large swath of data science and machine learning practitioners. With Sagemaker, data scientists and developers can build and train machine learning models, and directly deploy them into a production-ready hosted environment. Sagemaker's product offerings span the needs of data/business analysts, data scientists, and machine learning engineers.
4 |
5 | Comet is an excellent complement to Sagemaker, enhancing the developer experience by allowing users to easily track experiments, collaborate with team members, and visualize results in an intuitive and easy-to-understand way while using the frameworks and tools that they are most comfortable with. Additionally, the platform provides a wide range of customization options, including the ability to create custom visualizations and dashboards, so that users can tailor their experience to meet their specific needs.
6 |
7 | By using Comet, users can streamline their workflows while benefiting from Sagemaker's powerful infrastructure orchestration and model deployment capabilities.
8 |
9 | ## Logging Custom Scripts with Comet and Sagemaker
10 | Comet requires minimal changes to your existing Sagemaker workflow in order to get up and running. Let’s take a look at a simple example that uses the Sagemaker SDK and Notebook instances to run a custom script.
11 |
12 | ```
13 | ├── src
14 | │ ├── train.py
15 | │ └── requirements.txt
16 | └── launch.ipynb
17 | ```
18 |
19 | Your `src` directory would contain the model specific code needed to execute your training run, while `launch.ipynb` would run in your Notebook instance, and contain code related to configuring and launching your job with the Sagemaker SDK.
20 |
21 | To enable Comet logging in this workflow, simply
22 |
23 | 1. Add `comet_ml` as a dependency in your `requirement.txt` file
24 | 2. Import the `comet_ml` library at the top of the `train.py` script
25 | 3. Create a Comet `Experiment` object within the training script
26 | 4. Pass in your Comet Credentials to the Sagemaker Estimator using the environment argument.
27 | 5. Launch your training job in Sagemaker using `estimator.fit`
28 |
29 | ### Examples
30 |
31 | - [Image Classification with Pytorch](/integrations/model-training/sagemaker/log_custom_scripts/pytorch-mnist)
32 | - [Image Classification with Tensorflow](/integrations/model-training/sagemaker/log_custom_scripts/tensorflow-mnist)
33 | - [Text Classification with HuggingFace](/integrations/model-training/sagemaker/log_custom_scripts/huggingface-text-classification)
34 |
35 |
36 |
37 |
--------------------------------------------------------------------------------
/integrations/model-training/sagemaker/log_custom_scripts/huggingface-text-classification/README.md:
--------------------------------------------------------------------------------
1 | # Logging Data from Custom Scripts
2 |
3 | The preferred way to use Comet with Sagemaker is to add Comet to your script before launching your Sagemaker job.
4 |
5 | Executing your training jobs in this manner has several advantages over migrating data from completed runs, including:
6 |
7 | 1. Being able to use Comet's auto-logging capabilities.
8 |
9 | 2. Supporting real-time reporting of metrics with step/epoch information.
10 |
11 | 3. Being able to take advantage of Comet's advanced logging capabilities, such as:
12 | - logging media (image, text, audio)
13 | - logging interactive confusion matrices
14 | - auto-logging system metrics (CPU/GPU usage)
15 | - logging models to Comet's model registry.
16 |
17 | ## Setup
18 | In order to use a customized script with Sagemaker, you must create a directory to hold your training script and requirements file.
19 |
20 | ```shell
21 | ├── README.md
22 | ├── src
23 | │ ├── text_classification.py
24 | │ └── requirements.txt
25 | └── train_text_classification.ipynb
26 | ```
27 |
28 | Next, when creating the Estimator object specify your Comet credentials as environment variables.
29 |
30 | ```python
31 | from sagemaker.huggingface import HuggingFace
32 |
33 | COMET_API_KEY = ""
34 | COMET_WORKSPACE = ""
35 | COMET_PROJECT_NAME = ""
36 |
37 | estimator = HuggingFace(
38 | source_dir="src",
39 | entry_point="text_classification.py",
40 | role=role,
41 | py_version="py38",
42 | framework_version="1.11.0",
43 | environment={
44 | "COMET_API_KEY": COMET_API_KEY,
45 | "COMET_PROJECT_NAME": COMET_PROJECT_NAME,
46 | "COMET_WORKSPACE": COMET_WORKSPACE
47 | }
48 | )
49 | ```
50 |
51 | ## Run the Example
52 |
53 | To run this example, you will need both a Sagemaker account and a [Comet account](https://comet.com/signup)
54 |
55 | 1. Upload the contents of the `src` directory to your Sagemaker Notebook instance.
56 | 2. Upload `train_text_classification.ipynb` to your Sagemake Notebook instance.
57 | 3. Run the `train_text_classification.ipynb` Notebook to create a Sagemaker Training Job and log the data to Comet.
58 |
59 | ## Example Project
60 |
61 | Here is an example of a completed training run that has been logged from Sagemaker:
62 |
63 | [Sagemaker Hugging Face project](https://www.comet.com/examples/comet-example-sagemaker-custom-transformers-text-classification/fa56e29df07245ada88072f7fdd609b7?experiment-tab=panels&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=step)
64 |
--------------------------------------------------------------------------------
/integrations/model-training/sagemaker/log_custom_scripts/huggingface-text-classification/src/requirements.txt:
--------------------------------------------------------------------------------
1 | comet_ml
--------------------------------------------------------------------------------
/integrations/model-training/sagemaker/log_custom_scripts/pytorch-mnist/README.md:
--------------------------------------------------------------------------------
1 | # Using Comet with Pytorch and Sagemaker
2 |
3 | The preferred way to use Comet with Sagemaker is to add Comet to your script before launching your Sagemaker job.
4 |
5 | Executing your training jobs in this manner has several advantages over migrating data from completed runs, including:
6 |
7 | 1. Being able to leverage Comet's auto-logging capabilities.
8 | 2. Supporting real-time reporting of metrics with step/epoch information.
9 | 3. Being able to take advantage of Comet's advanced logging capabilities, such as:
10 | - logging media (image, text, audio)
11 | - logging interactive confusion matrices
12 | - auto-logging system metrics (CPU/GPU usage)
13 | - auto-logging the model graph
14 | - logging models to Comet's model registry.
15 |
16 | ## Setup
17 | In order to use a customized script with Sagemaker, you must create a directory to hold your training script and requirements file.
18 |
19 | ```shell
20 | ├── README.md
21 | ├── src
22 | │ ├── mnist.py
23 | │ └── requirements.txt
24 | └── train_mnist.ipynb
25 | ```
26 |
27 | Next, when creating the Estimator object specify your Comet credentials as environment variables.
28 |
29 | ```python
30 | from sagemaker.pytorch import PyTorch
31 |
32 | COMET_API_KEY = ""
33 | COMET_WORKSPACE = ""
34 | COMET_PROJECT_NAME = ""
35 |
36 | estimator = PyTorch(
37 | source_dir="src",
38 | entry_point="mnist.py",
39 | role=role,
40 | py_version="py38",
41 | framework_version="1.11.0",
42 | environment={
43 | "COMET_API_KEY": COMET_API_KEY,
44 | "COMET_PROJECT_NAME": COMET_PROJECT_NAME,
45 | "COMET_WORKSPACE": COMET_WORKSPACE
46 | }
47 | )
48 | ```
49 |
50 | ## Run the Example
51 |
52 | To run this example, you will need both a Sagemaker account and a [Comet account](https://comet.com/signup)
53 |
54 | 1. Upload the contents of the `src` directory to your Sagemaker Notebook instance.
55 | 2. Upload `train_mnist.ipynb` to your Sagemaker Notebook instance.
56 | 3. Run the `train_mnist.ipynb` Notebook to create a Sagemaker Training Job and log the data to Comet.
57 |
58 | ## Example Project
59 |
60 | Here is an example of a completed training run that has been logged from Sagemaker:
61 |
62 | [Sagemaker Pytorch MNIST project](https://www.comet.com/examples/comet-example-sagemaker-custom-pytorch-mnist/7c9f085e02ef43a58a0ad11527322ea7?experiment-tab=panels&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=step)
63 |
--------------------------------------------------------------------------------
/integrations/model-training/sagemaker/log_custom_scripts/pytorch-mnist/src/requirements.txt:
--------------------------------------------------------------------------------
1 | comet_ml
--------------------------------------------------------------------------------
/integrations/model-training/sagemaker/log_custom_scripts/tensorflow-mnist/README.md:
--------------------------------------------------------------------------------
1 | # Using Comet with Tensorflow and Sagemaker
2 |
3 | The preferred way to use Comet with Sagemaker is to add Comet to your script before launching your Sagemaker job.
4 |
5 | Executing your training jobs in this manner has several advantages over migrating data from completed runs, including:
6 |
7 | 1. Being able to leverage Comet's auto-logging capabilities.
8 | 2. Supporting real-time reporting of metrics with step/epoch information.
9 | 3. Being able to take advantage of Comet's advanced logging capabilities, such as:
10 | - logging media (image, text, audio)
11 | - logging interactive confusion matrices
12 | - auto-logging system metrics (CPU/GPU usage)
13 | - auto-logging the model graph
14 | - logging models to Comet's model registry.
15 |
16 | ## Setup
17 | In order to use a customized script with Sagemaker, you must create a directory to hold your training script and requirements file.
18 |
19 | ```shell
20 | ├── README.md
21 | ├── src
22 | │ ├── mnist.py
23 | │ └── requirements.txt
24 | └── train_mnist.ipynb
25 | ```
26 |
27 | Next, when creating the Estimator object specify your Comet credentials as environment variables.
28 |
29 | ```python
30 | from sagemaker.tensorflow import TensorFlow
31 |
32 | COMET_API_KEY = ""
33 | COMET_WORKSPACE = ""
34 | COMET_PROJECT_NAME = ""
35 |
36 | estimator = TensorFlow(
37 | source_dir="src",
38 | entry_point="mnist.py",
39 | role=role,
40 | framework_version="2.2",
41 | py_version="py37",
42 | environment={
43 | "COMET_API_KEY": COMET_API_KEY,
44 | "COMET_PROJECT_NAME": COMET_PROJECT_NAME,
45 | "COMET_WORKSPACE": COMET_WORKSPACE
46 | }
47 | )
48 | ```
49 |
50 | ## Run the Example
51 |
52 | To run this example, you will need both a Sagemaker account and a [Comet account](https://comet.com/signup)
53 |
54 | 1. Upload the contents of the `src` directory to your Sagemaker Notebook instance.
55 | 2. Upload `train_mnist.ipynb` to your Sagemake Notebook instance.
56 | 3. Run the `train_mnist.ipynb` Notebook to create a Sagemaker Training Job and log the data to Comet.
57 |
58 | ## Example Project
59 |
60 | Here is an example of a completed training run that has been logged from Sagemaker:
61 |
62 | [Sagemaker Tensorflow MNIST project](https://www.comet.com/examples/comet-example-sagemaker-tensorflow-custom-mnist/3766c3d4519844509ca4dab662730598?experiment-tab=panels&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=step)
63 |
--------------------------------------------------------------------------------
/integrations/model-training/sagemaker/log_custom_scripts/tensorflow-mnist/src/requirements.txt:
--------------------------------------------------------------------------------
1 | tensorflow
2 | comet_ml
--------------------------------------------------------------------------------
/integrations/model-training/scikit-learn/sklearn-classification-example/README.md:
--------------------------------------------------------------------------------
1 | # Scikit-Learn integration with Comet.ml
2 |
3 | [Scikit-learn](https://github.com/scikit-learn/scikit-learn) is a free software machine learning library for the Python programming language. It features various classification, regression and clustering algorithms including support-vector machines, random forests, gradient boosting, k-means and DBSCAN, and is designed to interoperate with the Python numerical and scientific libraries NumPy and SciPy.
4 |
5 |
6 | ## Documentation
7 |
8 | For more information on using and configuring the Scikit-Learn integration, see: https://www.comet.com/docs/v2/integrations/ml-frameworks/scikit-learn/
9 |
10 | ## See it
11 |
12 | Take a look at this [public Comet Project](https://www.comet.com/examples/comet-example-scikit-learn-classification/).
13 |
14 | ## Setup
15 |
16 | Install dependencies
17 |
18 | ```bash
19 | python -m pip install -r requirements.txt
20 | ```
21 |
22 | ## Run the example
23 |
24 | This example uses the Cancer dataset and highlight Scikit-Learn LogisticRegression and GridSearchCV.
25 |
26 | ```bash
27 | python comet-scikit-classification-example.py
28 | ```
29 |
--------------------------------------------------------------------------------
/integrations/model-training/scikit-learn/sklearn-classification-example/comet-scikit-classification-example.py:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 | import comet_ml
3 | from comet_ml.integration.sklearn import log_model
4 |
5 | import numpy as np
6 | from sklearn.datasets import load_breast_cancer
7 | from sklearn.linear_model import LogisticRegression
8 | from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score
9 | from sklearn.model_selection import GridSearchCV, train_test_split
10 | from sklearn.preprocessing import StandardScaler
11 |
12 | comet_ml.login()
13 |
14 | experiment = comet_ml.start(project_name="comet-example-scikit-learn-classification")
15 |
16 |
17 | random_state = 42
18 |
19 | cancer = load_breast_cancer()
20 | print("cancer.keys(): {}".format(cancer.keys()))
21 | print("Shape of cancer data: {}\n".format(cancer.data.shape))
22 | print(
23 | "Sample counts per class:\n{}".format(
24 | {n: v for n, v in zip(cancer.target_names, np.bincount(cancer.target))}
25 | )
26 | )
27 | print("\nFeature names:\n{}".format(cancer.feature_names))
28 |
29 | X_train, X_test, y_train, y_test = train_test_split(
30 | cancer.data, cancer.target, stratify=cancer.target, random_state=random_state
31 | )
32 |
33 | scaler = StandardScaler()
34 | X_train_scaled = scaler.fit_transform(X_train)
35 | X_test_scaled = scaler.transform(X_test)
36 |
37 | logreg = LogisticRegression(max_iter=1000)
38 |
39 | param_grid = {"C": [0.001, 0.01, 0.1, 1, 5, 10, 20, 50, 100]}
40 |
41 | clf = GridSearchCV(logreg, param_grid=param_grid, cv=10, n_jobs=-1)
42 |
43 | clf.fit(X_train_scaled, y_train)
44 |
45 | y_pred = clf.predict(X_test_scaled)
46 |
47 | print("\nResults\nConfusion matrix \n {}".format(confusion_matrix(y_test, y_pred)))
48 |
49 | f1 = f1_score(y_test, y_pred)
50 | precision = precision_score(y_test, y_pred)
51 | recall = recall_score(y_test, y_pred)
52 |
53 | params = {
54 | "random_state": random_state,
55 | "model_type": "logreg",
56 | "scaler": "standard scaler",
57 | "param_grid": str(param_grid),
58 | "stratify": True,
59 | }
60 | metrics = {"f1": f1, "recall": recall, "precision": precision}
61 |
62 | experiment.log_dataset_hash(X_train_scaled)
63 | experiment.log_parameters(params)
64 | experiment.log_metrics(metrics)
65 |
66 | # Save the best estimator as a Model to Comet
67 | log_model(experiment, "ScikitLearnClassificationModel", clf)
68 |
--------------------------------------------------------------------------------
/integrations/model-training/scikit-learn/sklearn-classification-example/requirements.txt:
--------------------------------------------------------------------------------
1 | comet_ml>=3.44.0
2 | scikit-learn
3 |
--------------------------------------------------------------------------------
/integrations/model-training/scikit-learn/sklearn-model-saving-example/requirements.txt:
--------------------------------------------------------------------------------
1 | cloudpickle
2 | comet_ml>=3.44.0
3 | scikit_learn
4 |
--------------------------------------------------------------------------------
/integrations/model-training/scikit-learn/sklearn-model-saving-example/sklearn-model-saving-example.py:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 | import os
3 |
4 | import comet_ml
5 | from comet_ml.integration.sklearn import load_model, log_model
6 |
7 | import cloudpickle
8 | from sklearn import datasets, ensemble
9 | from sklearn.model_selection import train_test_split
10 | from sklearn.preprocessing import MinMaxScaler
11 |
12 | MODEL_NAME = "my-sklearn-model"
13 | WORKSPACE = os.environ["COMET_WORKSPACE"]
14 |
15 | # Login to comet and create an Experiment
16 |
17 | comet_ml.login()
18 |
19 | experiment = comet_ml.start(
20 | project_name="comet-example-scikit-learn-model-saving-example"
21 | )
22 |
23 | # Prepare data
24 |
25 | X, y = datasets.fetch_california_housing(return_X_y=True)
26 | X_train, X_test, y_train, y_test = train_test_split(X, y)
27 |
28 | scaler = MinMaxScaler()
29 | X_train_scaled = scaler.fit_transform(X_train)
30 | X_test_scaled = scaler.transform(X_test)
31 |
32 | # Train model
33 |
34 | model = ensemble.RandomForestRegressor().fit(X_train_scaled, y_train)
35 |
36 | # Save model to Comet
37 | log_model(experiment, MODEL_NAME, model, persistence_module=cloudpickle)
38 | experiment.register_model(MODEL_NAME)
39 |
40 | # Upload everything
41 | experiment.end()
42 |
43 | # # Load model from Comet Model Registry
44 | loaded_model = load_model(f"registry://{WORKSPACE}/{MODEL_NAME}")
45 |
46 | print("LOADED", loaded_model)
47 |
--------------------------------------------------------------------------------
/integrations/model-training/scikit-learn/sklearn-nlp-example/README.md:
--------------------------------------------------------------------------------
1 | # Scikit-Learn integration with Comet.ml
2 |
3 | [Scikit-learn](https://github.com/scikit-learn/scikit-learn) is a free software machine learning library for the Python programming language. It features various classification, regression and clustering algorithms including support-vector machines, random forests, gradient boosting, k-means and DBSCAN, and is designed to interoperate with the Python numerical and scientific libraries NumPy and SciPy.
4 |
5 |
6 | ## Documentation
7 |
8 | For more information on using and configuring the Scikit-Learn integration, see: https://www.comet.com/docs/v2/integrations/ml-frameworks/scikit-learn/
9 |
10 | ## See it
11 |
12 | Take a look at this [public Comet Project](https://www.comet.com/examples/comet-example-scikit-learn-nlp/).
13 |
14 | ## Setup
15 |
16 | Install dependencies
17 |
18 | ```bash
19 | python -m pip install -r requirements.txt
20 | ```
21 |
22 | ## Run the example
23 |
24 | This example is based on the [Scikit-Learn tutorial](https://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html).
25 |
26 | ```bash
27 | python comet-scikit-nlp-example.py
28 | ```
29 |
--------------------------------------------------------------------------------
/integrations/model-training/scikit-learn/sklearn-nlp-example/comet-scikit-nlp-example.py:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 | import comet_ml
3 | from comet_ml.integration.sklearn import log_model
4 |
5 | from sklearn.datasets import fetch_20newsgroups
6 | from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
7 | from sklearn.linear_model import SGDClassifier
8 | from sklearn.metrics import accuracy_score
9 | from sklearn.pipeline import Pipeline
10 |
11 | comet_ml.login()
12 |
13 | experiment = comet_ml.start(project_name="comet-example-scikit-learn-nlp")
14 |
15 | # Get dataset and put into train,test lists
16 | categories = ["alt.atheism", "soc.religion.christian", "comp.graphics", "sci.med"]
17 |
18 | twenty_train = fetch_20newsgroups(
19 | subset="train", categories=categories, shuffle=True, random_state=42
20 | )
21 | twenty_test = fetch_20newsgroups(
22 | subset="test", categories=categories, shuffle=True, random_state=42
23 | )
24 |
25 | # log hash of your dataset to Comet.ml
26 | experiment.log_dataset_hash(twenty_train)
27 |
28 | # Build training pipeline
29 |
30 | text_clf = Pipeline(
31 | [
32 | ("vect", CountVectorizer()), # Counts occurrences of each word
33 | # Normalize the counts based on document length
34 | ("tfidf", TfidfTransformer()),
35 | (
36 | "clf",
37 | SGDClassifier(
38 | loss="hinge",
39 | penalty="l2", # Call classifier with vector
40 | alpha=1e-3,
41 | random_state=42,
42 | max_iter=5,
43 | tol=None,
44 | ),
45 | ),
46 | ]
47 | )
48 |
49 | text_clf.fit(twenty_train.data, twenty_train.target)
50 | #
51 | # Predict unseen test data based on fitted classifer
52 | predicted = text_clf.predict(twenty_test.data)
53 |
54 | # Compute accuracy
55 | acc = accuracy_score(twenty_test.target, predicted)
56 | print(acc)
57 | experiment.log_metric(name="accuracy_score", value=acc)
58 |
59 | # Save model to Comet
60 | log_model(experiment, "ScikitLearnNLPModel", text_clf)
61 |
--------------------------------------------------------------------------------
/integrations/model-training/scikit-learn/sklearn-nlp-example/requirements.txt:
--------------------------------------------------------------------------------
1 | comet_ml>=3.44.0
2 | scikit-learn
3 |
--------------------------------------------------------------------------------
/integrations/model-training/transformers/transformers-distilbert-fine-tuning/README.md:
--------------------------------------------------------------------------------
1 | # Transformers integration with Comet.ml
2 |
3 | [Hugging Face Transformers](https://github.com/huggingface/transformers) provide
4 | general-purpose Machine Learning models for Natural Language
5 | Understanding (NLP). Transformers give you easy access to pre-trained model
6 | weights, and interoperability between PyTorch and TensorFlow.
7 |
8 | Instrument Transformers with Comet to start managing experiments, create dataset versions and track hyperparameters for faster and easier reproducibility and collaboration.
9 |
10 | Instrument Transformers with Comet to start managing experiments, create dataset versions and track hyperparameters for faster and easier reproducibility and collaboration.
11 |
12 |
13 | ## Documentation
14 |
15 | For more information on using and configuring the Transformers integration, see: [https://www.comet.com/docs/v2/integrations/ml-frameworks/huggingface/](https://www.comet.com/docs/v2/integrations/ml-frameworks/transformers/?utm_source=comet-examples&utm_medium=referral&utm_campaign=github_repo_2023&utm_content=huggingface)
16 |
17 | ## See it
18 |
19 | Take a look at this [public Comet Project](https://www.comet.com/examples/comet-example-transformers-distilbert-fine-tuning?utm_source=comet-examples&utm_medium=referral&utm_campaign=github_repo_2023&utm_content=pytorch).
20 |
21 | ## Setup
22 |
23 | Install dependencies
24 |
25 | ```bash
26 | python -m pip install -r requirements.txt
27 | ```
28 |
29 | ## Run the example
30 |
31 |
32 | This example shows how to use Comet in a HuggingFace Transformers script.
33 |
34 |
35 | ```bash
36 | python transformers-distilbert-fine-tuning.py
37 | ```
38 |
--------------------------------------------------------------------------------
/integrations/model-training/transformers/transformers-distilbert-fine-tuning/requirements.txt:
--------------------------------------------------------------------------------
1 | accelerate
2 | comet_ml>=3.44.0
3 | pandas
4 | scikit-learn
5 | torch
6 | transformers>=4.43.0
7 |
--------------------------------------------------------------------------------
/integrations/model-training/transformers/transformers-distilbert-fine-tuning/transformers-distilbert-fine-tuning.py:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 | import warnings
3 |
4 | import comet_ml
5 |
6 | import pandas as pd
7 | import torch
8 | import torch.utils.data as data_utils
9 | from sklearn.metrics import accuracy_score, precision_recall_fscore_support
10 | from sklearn.model_selection import train_test_split
11 |
12 | from transformers import (
13 | AutoTokenizer,
14 | BertForSequenceClassification,
15 | Trainer,
16 | TrainingArguments,
17 | )
18 |
19 | EPOCHS = 100
20 |
21 | # Login to Comet if needed
22 | comet_ml.login(project_name="comet-example-transformers-distilbert-fine-tuning")
23 |
24 |
25 | class Dataset(torch.utils.data.Dataset):
26 | def __init__(self, encodings, labels):
27 | self.encodings = encodings
28 | self.labels = labels
29 |
30 | def __getitem__(self, idx):
31 | item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
32 | item["labels"] = torch.tensor(self.labels[idx])
33 | return item
34 |
35 | def __len__(self):
36 | return len(self.labels)
37 |
38 |
39 | def preprocess(texts, labels):
40 | encoded = tokenizer(
41 | texts,
42 | add_special_tokens=True,
43 | truncation=True,
44 | max_length=64,
45 | pad_to_max_length=True,
46 | return_attention_mask=True,
47 | return_tensors="pt",
48 | )
49 |
50 | return encoded, torch.tensor(labels)
51 |
52 |
53 | def compute_metrics(pred):
54 | experiment = comet_ml.get_running_experiment()
55 |
56 | labels = pred.label_ids
57 | preds = pred.predictions.argmax(-1)
58 | precision, recall, f1, _ = precision_recall_fscore_support(
59 | labels, preds, average="macro"
60 | )
61 | acc = accuracy_score(labels, preds)
62 |
63 | if experiment:
64 | experiment.log_confusion_matrix(preds, labels)
65 |
66 | return {"accuracy": acc, "f1": f1, "precision": precision, "recall": recall}
67 |
68 |
69 | warnings.filterwarnings("ignore")
70 |
71 | PRE_TRAINED_MODEL_NAME = "distilbert-base-uncased"
72 | tokenizer = AutoTokenizer.from_pretrained(PRE_TRAINED_MODEL_NAME)
73 |
74 | df = pd.read_csv("data/title_conference.csv")
75 | df.head()
76 | df["Conference"] = pd.Categorical(df["Conference"])
77 | df["Target"] = df["Conference"].cat.codes
78 |
79 | train_data, test_data = train_test_split(df, test_size=0.01, stratify=df["Target"])
80 | train_texts, train_labels = (
81 | train_data["Title"].values.tolist(),
82 | train_data["Target"].values.tolist(),
83 | )
84 | test_texts, test_labels = (
85 | test_data["Title"].values.tolist(),
86 | test_data["Target"].values.tolist(),
87 | )
88 | train_encoded, train_labels = preprocess(train_texts, train_labels)
89 | test_encoded, test_labels = preprocess(test_texts, test_labels)
90 | train_dataset = Dataset(train_encoded, train_labels)
91 | test_dataset = Dataset(test_encoded, test_labels)
92 |
93 | indices = torch.arange(10)
94 | train_dataset = data_utils.Subset(train_dataset, indices)
95 | test_dataset = data_utils.Subset(test_dataset, indices)
96 |
97 |
98 | model = BertForSequenceClassification.from_pretrained(
99 | PRE_TRAINED_MODEL_NAME,
100 | num_labels=len(df["Target"].unique()),
101 | output_attentions=False,
102 | output_hidden_states=False,
103 | )
104 |
105 |
106 | weight_decay = 0.5
107 | learning_rate = 5.0e-5
108 | batch_size = 32
109 |
110 | training_args = TrainingArguments(
111 | seed=42,
112 | output_dir="./results",
113 | overwrite_output_dir=True,
114 | num_train_epochs=EPOCHS,
115 | per_device_train_batch_size=batch_size,
116 | per_device_eval_batch_size=batch_size,
117 | weight_decay=weight_decay,
118 | learning_rate=learning_rate,
119 | evaluation_strategy="epoch",
120 | do_train=True,
121 | do_eval=True,
122 | report_to=["comet_ml"],
123 | )
124 | trainer = Trainer(
125 | model=model,
126 | args=training_args,
127 | train_dataset=train_dataset,
128 | eval_dataset=test_dataset,
129 | compute_metrics=compute_metrics,
130 | )
131 |
132 | trainer.train()
133 |
--------------------------------------------------------------------------------
/integrations/model-training/transformers/transformers-google-bert-fine-tuning/README.md:
--------------------------------------------------------------------------------
1 | # Transformers integration with Comet.ml
2 |
3 | [Hugging Face Transformers](https://github.com/huggingface/transformers) provide
4 | general-purpose Machine Learning models for Natural Language
5 | Understanding (NLP). Transformers give you easy access to pre-trained model
6 | weights, and interoperability between PyTorch and TensorFlow.
7 |
8 | Instrument Transformers with Comet to start managing experiments, create dataset versions and track hyperparameters for faster and easier reproducibility and collaboration.
9 |
10 | Instrument Transformers with Comet to start managing experiments, create dataset versions and track hyperparameters for faster and easier reproducibility and collaboration.
11 |
12 |
13 | ## Documentation
14 |
15 | For more information on using and configuring the Transformers integration, see: [https://www.comet.com/docs/v2/integrations/ml-frameworks/huggingface/](https://www.comet.com/docs/v2/integrations/ml-frameworks/transformers/?utm_source=comet-examples&utm_medium=referral&utm_campaign=github_repo_2023&utm_content=huggingface)
16 |
17 | ## See it
18 |
19 | Take a look at this [public Comet Project](https://www.comet.com/examples/comet-example-transformers-google-bert-fine-tuning/25d673e1153047eda82096f74142e2d0?utm_source=comet-examples&utm_medium=referral&utm_campaign=github_repo_2023&utm_content=pytorch).
20 |
21 | ## Setup
22 |
23 | Install dependencies
24 |
25 | ```bash
26 | python -m pip install -r requirements.txt
27 | ```
28 |
29 | ## Run the example
30 |
31 |
32 | This example shows how to use Comet in a HuggingFace Transformers script.
33 |
34 |
35 | ```bash
36 | python transformers-distilbert-fine-tuning.py
37 | ```
38 |
--------------------------------------------------------------------------------
/integrations/model-training/transformers/transformers-google-bert-fine-tuning/requirements.txt:
--------------------------------------------------------------------------------
1 | comet_ml>=3.43.2
2 | datasets
3 | evaluate
4 | numpy
5 | scikit-learn
6 | torch
7 | transformers[torch]>=4.43.0
8 |
--------------------------------------------------------------------------------
/integrations/model-training/transformers/transformers-google-bert-fine-tuning/transformers-google-bert-fine-tuning.py:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 | import comet_ml
3 |
4 | import evaluate
5 | import numpy as np
6 | from datasets import load_dataset
7 |
8 | from transformers import (
9 | AutoModelForSequenceClassification,
10 | AutoTokenizer,
11 | Trainer,
12 | TrainingArguments,
13 | enable_full_determinism,
14 | )
15 |
16 | SEED = 42
17 |
18 | enable_full_determinism(SEED)
19 |
20 | # Login to Comet if needed
21 | comet_ml.init(project_name="comet-example-transformers-google-bert-fine-tuning")
22 |
23 |
24 | tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased")
25 |
26 |
27 | def tokenize_function(examples):
28 | return tokenizer(examples["text"], padding="max_length", truncation=True)
29 |
30 |
31 | dataset = load_dataset("yelp_review_full")
32 | dataset["train"] = dataset["train"].shuffle(seed=SEED).select(range(100))
33 | dataset["test"] = dataset["test"].shuffle(seed=SEED).select(range(100))
34 |
35 | tokenized_datasets = dataset.map(tokenize_function, batched=True)
36 |
37 | small_train_dataset = tokenized_datasets["train"]
38 | small_eval_dataset = tokenized_datasets["test"]
39 |
40 | model = AutoModelForSequenceClassification.from_pretrained(
41 | "google-bert/bert-base-cased", num_labels=5
42 | )
43 |
44 | metric = evaluate.load("accuracy")
45 |
46 |
47 | def compute_metrics(eval_pred):
48 | logits, labels = eval_pred
49 | predictions = np.argmax(logits, axis=-1)
50 |
51 | experiment = comet_ml.get_running_experiment()
52 | if experiment:
53 | experiment.log_confusion_matrix(predictions, labels)
54 |
55 | return metric.compute(predictions=predictions, references=labels)
56 |
57 |
58 | EPOCHS = 3
59 |
60 | training_args = TrainingArguments(
61 | seed=SEED,
62 | output_dir="./results",
63 | overwrite_output_dir=True,
64 | num_train_epochs=EPOCHS,
65 | eval_strategy="epoch",
66 | do_train=True,
67 | do_eval=True,
68 | report_to=["all"],
69 | )
70 | trainer = Trainer(
71 | model=model,
72 | args=training_args,
73 | train_dataset=small_train_dataset,
74 | eval_dataset=small_eval_dataset,
75 | compute_metrics=compute_metrics,
76 | )
77 |
78 | trainer.train()
79 |
--------------------------------------------------------------------------------
/integrations/model-training/xgboost/notebooks/xg_data_panel.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-ml/comet-examples/6d5d899d5472a3f252f9393785ba1ae343130422/integrations/model-training/xgboost/notebooks/xg_data_panel.gif
--------------------------------------------------------------------------------
/integrations/model-training/xgboost/xgboost-california/README.md:
--------------------------------------------------------------------------------
1 | # XGBoost integration with Comet.ml
2 |
3 | [XGBoost](https://github.com/dmlc/xgboost) is an optimized distributed gradient boosting library designed to be highly efficient, flexible and portable. It implements machine learning algorithms under the Gradient Boosting framework. XGBoost provides a parallel tree boosting (also known as GBDT, GBM) that solve many data science problems in a fast and accurate way. The same code runs on major distributed environment (Kubernetes, Hadoop, SGE, MPI, Dask) and can solve problems beyond billions of examples.
4 |
5 | Instrument xgboost with Comet to start managing experiments, create dataset versions and track hyperparameters for faster and easier reproducibility and collaboration.
6 |
7 |
8 | ## Documentation
9 |
10 | For more information on using and configuring the xgboost integration, see: https://www.comet.com/docs/v2/integrations/ml-frameworks/xgboost/
11 |
12 | ## See it
13 |
14 | Take a look at this [public Comet Project](https://www.comet.com/examples/comet-example-xgboost-california/).
15 |
16 | ## Setup
17 |
18 | Install dependencies
19 |
20 | ```bash
21 | python -m pip install -r requirements.txt
22 | ```
23 |
24 | ## Run the example
25 |
26 | This example showcase a simple regression on the California Housing dataset.
27 |
28 |
29 | ```bash
30 | python xgboost-california.py
31 | ```
32 |
--------------------------------------------------------------------------------
/integrations/model-training/xgboost/xgboost-california/requirements.txt:
--------------------------------------------------------------------------------
1 | comet_ml>=3.24.1
2 | graphviz
3 | pandas
4 | scikit-learn
5 | xgboost
6 |
--------------------------------------------------------------------------------
/integrations/model-training/xgboost/xgboost-california/xgboost-california.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding: utf-8
3 |
4 | # Import Comet
5 | from comet_ml import login, start
6 |
7 | import pandas as pd
8 | from sklearn.datasets import fetch_california_housing
9 | from sklearn.model_selection import train_test_split
10 |
11 | import xgboost as xgb
12 |
13 | # Login to Comet if needed
14 | login()
15 |
16 | experiment = start(project_name="comet-example-xgboost-california")
17 |
18 | # Load and configure california housing dataset
19 | california = fetch_california_housing()
20 | data = pd.DataFrame(california.data)
21 | data.columns = california.feature_names
22 | data["Price"] = california.target
23 | X, y = data.iloc[:, :-1], data.iloc[:, -1]
24 |
25 | # Split data into train and test sets
26 | X_train, X_test, y_train, y_test = train_test_split(
27 | X, y, test_size=0.2, random_state=123
28 | )
29 |
30 | # Define hyperparameters for model
31 | param = {
32 | "objective": "reg:squarederror",
33 | "colsample_bytree": 0.3,
34 | "learning_rate": 0.1,
35 | "max_depth": 5,
36 | "alpha": 10,
37 | "n_estimators": 10,
38 | }
39 |
40 | # Initialize XGBoost Regressor
41 | xg_reg = xgb.XGBRegressor(eval_metric="rmse", **param)
42 |
43 | # Train model
44 | xg_reg.fit(
45 | X_train,
46 | y_train,
47 | eval_set=[(X_train, y_train), (X_test, y_test)],
48 | )
49 |
--------------------------------------------------------------------------------
/integrations/workflow-orchestration/kubeflow/kubeflow-hello-world/README.md:
--------------------------------------------------------------------------------
1 | # Kubeflow integration with Comet
2 |
3 | Comet integrates with Kubeflow.
4 |
5 | [Kubeflow](https://github.com/kubeflow/kubeflow) is an open-source machine learning platform that enables using machine learning pipelines to orchestrate complicated workflows running on Kubernetes.
6 |
7 | ## Documentation
8 |
9 | For more information on using and configuring the Kubeflow integration, see: [https://www.comet.com/docs/v2/integrations/third-party-tools/kubeflow/](https://www.comet.com/docs/v2/integrations/third-party-tools/kubeflow/?utm_source=comet-examples&utm_medium=referral&utm_campaign=github_repo_2023&utm_content=kubeflow)
10 |
11 | ## See it
12 |
13 | Take a look at this [public Comet Project](https://www.comet.com/examples/comet-example-kubeflow-hello-world/?utm_source=comet-examples&utm_medium=referral&utm_campaign=github_repo_2023&utm_content=vertex).
14 |
15 | ## Setup
16 |
17 | Install dependencies
18 |
19 | ```bash
20 | python -m pip install -r requirements.txt
21 | ```
22 |
23 | ## Run the example
24 |
25 | The following example demonstrates how to use the Comet pipelines integration to track the state of pipelines run on Kubeflow. Before running, make sure that you have access to a [Kubeflow environment](https://www.kubeflow.org/docs/started/installing-kubeflow/) or that you have [installed Kubeflow locally](https://www.kubeflow.org/docs/components/pipelines/installation/localcluster-deployment/).
26 |
27 | ```bash
28 | python pipeline.py
29 | ```
30 |
--------------------------------------------------------------------------------
/integrations/workflow-orchestration/kubeflow/kubeflow-hello-world/requirements.txt:
--------------------------------------------------------------------------------
1 | comet_ml>=3.24.1
2 | kfp
3 |
--------------------------------------------------------------------------------
/integrations/workflow-orchestration/metaflow/metaflow-hello-world/README.md:
--------------------------------------------------------------------------------
1 | # Metaflow integration with Comet.ml
2 |
3 | Comet integrates with [Metaflow](https://metaflow.org/).
4 |
5 | Metaflow is a human-friendly Python/R library that helps scientists and engineers build and manage real-life data science projects. Metaflow was originally developed at Netflix to boost productivity of data scientists who work on a wide variety of projects from classical statistics to state-of-the-art deep learning.
6 |
7 | ## Documentation
8 |
9 | For more information on using and configuring Metaflow integration, please see: [https://www.comet.com/docs/v2/integrations/third-party-tools/metaflow/](https://www.comet.com/docs/v2/integrations/third-party-tools/metaflow/?utm_source=comet-examples&utm_medium=referral&utm_campaign=github_repo_2023&utm_content=metaflow)
10 |
11 | ## See it
12 |
13 | Take a look at this [public Comet Project](https://www.comet.com/examples/comet-example-metaflow-hello-world?utm_source=comet-examples&utm_medium=referral&utm_campaign=github_repo_2023&utm_content=metaflow).
14 |
15 | ## Setup
16 |
17 | Install dependencies
18 |
19 | ```bash
20 | python -m pip install -r requirements.txt
21 | ```
22 |
23 | ## Run the example
24 |
25 | This example is adapted from the Metaflow helloworld example which is a simple linear workflow that print out 'Metaflow says: Hi!' to the terminal.
26 |
27 | ```bash
28 | python helloworld.py run
29 | ```
30 |
--------------------------------------------------------------------------------
/integrations/workflow-orchestration/metaflow/metaflow-hello-world/helloworld.py:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 |
3 | from comet_ml import login
4 | from comet_ml.integration.metaflow import comet_flow
5 |
6 | from metaflow import FlowSpec, step
7 |
8 |
9 | @comet_flow(project_name="comet-example-metaflow-hello-world")
10 | class HelloFlow(FlowSpec):
11 | """
12 | A flow where Metaflow prints 'Hi'.
13 |
14 | Run this flow to validate that Metaflow is installed correctly.
15 |
16 | """
17 |
18 | @step
19 | def start(self):
20 | """
21 | This is the 'start' step. All flows must have a step named 'start' that
22 | is the first step in the flow.
23 |
24 | """
25 | print("HelloFlow is starting.")
26 | self.next(self.hello)
27 |
28 | @step
29 | def hello(self):
30 | """
31 | A step for metaflow to introduce itself.
32 |
33 | """
34 | print("Metaflow says: Hi!")
35 | self.next(self.end)
36 |
37 | @step
38 | def end(self):
39 | """
40 | This is the 'end' step. All flows must have an 'end' step, which is the
41 | last step in the flow.
42 |
43 | """
44 | print("HelloFlow is all done.")
45 |
46 |
47 | if __name__ == "__main__":
48 | # Login to Comet if needed
49 | login()
50 |
51 | HelloFlow()
52 |
--------------------------------------------------------------------------------
/integrations/workflow-orchestration/metaflow/metaflow-hello-world/requirements.txt:
--------------------------------------------------------------------------------
1 | comet_ml>=3.31.15
2 | metaflow
3 | numpy
4 |
--------------------------------------------------------------------------------
/integrations/workflow-orchestration/metaflow/metaflow-model-evaluation/.pylintrc:
--------------------------------------------------------------------------------
1 | [TYPECHECK]
2 |
3 | # List of members which are set dynamically and missed by Pylint inference
4 | # system, and so shouldn't trigger E1101 when accessed.
5 | generated-members=numpy.*, torch.*
6 |
--------------------------------------------------------------------------------
/integrations/workflow-orchestration/metaflow/metaflow-model-evaluation/README.md:
--------------------------------------------------------------------------------
1 | # Model Evaluation Flow with Metaflow and Comet
2 |
3 | Comet integrates with [Metaflow](https://metaflow.org/).
4 |
5 | Metaflow is a human-friendly Python/R library that helps scientists and engineers build and manage real-life data science projects. Metaflow was originally developed at Netflix to boost productivity of data scientists who work on a wide variety of projects from classical statistics to state-of-the-art deep learning.
6 |
7 | ## Documentation
8 |
9 | For more information on using and configuring Metaflow integration, please see: [https://www.comet.com/docs/v2/integrations/third-party-tools/metaflow/](https://www.comet.com/docs/v2/integrations/third-party-tools/metaflow/?utm_source=comet-examples&utm_medium=referral&utm_campaign=github_repo_2023&utm_content=metaflow)
10 |
11 | ## See it
12 |
13 | [Here is an example project](https://www.comet.com/examples/comet-example-metaflow-model-evaluation/view/Erns9fTvjSvl7nLabBJoydPxg/panels?utm_source=comet-examples&utm_medium=referral&utm_campaign=github_repo_2023&utm_content=metaflow) with the results of a Metaflow run.
14 |
15 | ## Setup
16 |
17 | Install dependencies
18 |
19 | ```bash
20 | python -m pip install -r requirements.txt
21 | ```
22 |
23 | Set Comet Credentials
24 |
25 | ```shell
26 | export COMET_API_KEY=
27 | export COMET_WORKSPACE=
28 | ```
29 |
30 | ## Run the example
31 |
32 | In this guide, we will demonstrate how to use Comet's Metaflow integration to build a simple model evaluation flow.
33 |
34 | ```shell
35 | python metaflow-model-evaluation.py run --max-workers 1 --n_samples 100
36 | ```
37 |
38 | Our flow consists of two steps.
39 |
40 | ### 1. An evaluation step
41 |
42 | In this step, we will evaluate models from the [timm](https://timm.fast.ai/) library on the [Imagenet Sketches dataset.](https://huggingface.co/datasets/imagenet_sketch)
43 |
44 | For each model under consideration, we are going to create an experiment, stream a fixed number of examples from the dataset, and log the resulting model evaluation data to Comet.
45 |
46 | This data includes:
47 |
48 | 1. The Pretained Model Name
49 | 2. A [Classification Report](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.classification_report.html#sklearn.metrics.classification_report) for the models performance on the dataset examples
50 | 3. Flow related parameters. These are global parameters for our Flow that are autologged by Comet.
51 |
52 | ### 2. A model registration step
53 |
54 | Once we have logged the performance of each model, we will register the model with the highest macro average recall across all classes to the [Comet Model Registry](https://www.comet.com/site/products/machine-learning-model-versioning/?utm_source=comet-examples&utm_medium=referral&utm_campaign=github_repo_2023&utm_content=metaflow).
55 |
56 |
--------------------------------------------------------------------------------
/integrations/workflow-orchestration/metaflow/metaflow-model-evaluation/requirements.txt:
--------------------------------------------------------------------------------
1 | comet_ml>=3.44.0
2 | datasets
3 | metaflow
4 | metaflow-card-html
5 | pandas
6 | plotly
7 | scikit-learn
8 | timm
9 | torch
10 | torchvision
11 | transformers
12 |
--------------------------------------------------------------------------------
/integrations/workflow-orchestration/metaflow/metaflow-regression/README.md:
--------------------------------------------------------------------------------
1 | # Metaflow integration with Comet.ml
2 |
3 | Comet integrates with [Metaflow](https://metaflow.org/).
4 |
5 | Metaflow is a human-friendly Python/R library that helps scientists and engineers build and manage real-life data science projects. Metaflow was originally developed at Netflix to boost productivity of data scientists who work on a wide variety of projects from classical statistics to state-of-the-art deep learning.
6 |
7 | ## Documentation
8 |
9 | For more information on using and configuring Metaflow integration, please see: [https://www.comet.com/docs/v2/integrations/third-party-tools/metaflow/](https://www.comet.com/docs/v2/integrations/third-party-tools/metaflow/?utm_source=comet-examples&utm_medium=referral&utm_campaign=github_repo_2023&utm_content=metaflow)
10 |
11 | ## See it
12 |
13 | Take a look at this [public Comet Project](https://www.comet.com/examples/comet-example-metaflow-regression?utm_source=comet-examples&utm_medium=referral&utm_campaign=github_repo_2023&utm_content=metaflow).
14 |
15 | ## Setup
16 |
17 | Install dependencies
18 |
19 | ```bash
20 | python -m pip install -r requirements.txt
21 | ```
22 |
23 | ## Run the example
24 |
25 | This example is a Metaflow example training three Regression models on the same toy dataset. The models prediction is then logged as an interactive Plotly chart and saved as a Metaflow card.
26 |
27 | ```bash
28 | python metaflow-regression-example.py run
29 | ```
30 |
--------------------------------------------------------------------------------
/integrations/workflow-orchestration/metaflow/metaflow-regression/metaflow-regression-example.py:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 |
3 | from comet_ml import login
4 | from comet_ml.integration.metaflow import comet_flow
5 |
6 | from metaflow import FlowSpec, JSONType, Parameter, card, step
7 |
8 |
9 | @comet_flow(project_name="comet-example-metaflow-regression")
10 | class RegressionFlow(FlowSpec):
11 |
12 | models = Parameter(
13 | "models",
14 | help=("A list of models class to train."),
15 | type=JSONType,
16 | default='["Regression", "Decision Tree", "k-NN"]',
17 | )
18 |
19 | @step
20 | def start(self):
21 | """
22 | Load the data
23 | """
24 | import plotly.express as px
25 |
26 | self.input_df = px.data.tips()
27 |
28 | self.next(self.split_data)
29 |
30 | @step
31 | def split_data(self):
32 | """
33 | Split train data for modelling
34 | """
35 | from sklearn.model_selection import train_test_split
36 |
37 | self.X = self.input_df.total_bill.values[:, None]
38 | self.X_train, self.X_test, self.Y_train, self.Y_test = train_test_split(
39 | self.X, self.input_df.tip, random_state=42
40 | )
41 |
42 | self.next(self.train_model, foreach="models")
43 |
44 | @card(type="html")
45 | @step
46 | def train_model(self):
47 | import numpy as np
48 | import plotly.graph_objects as go
49 | from sklearn import linear_model, neighbors, tree
50 |
51 | model_name = self.input
52 |
53 | if model_name == "Regression":
54 | model = linear_model.LinearRegression()
55 | elif model_name == "Decision Tree":
56 | model = tree.DecisionTreeRegressor()
57 | elif model_name == "k-NN":
58 | model = neighbors.KNeighborsRegressor()
59 | else:
60 | raise ValueError("Invalid model name")
61 |
62 | self.comet_experiment.log_parameter("model", model)
63 |
64 | model.fit(self.X_train, self.Y_train)
65 |
66 | self.score = model.score(self.X_test, self.Y_test)
67 | self.comet_experiment.log_metric("score", self.score)
68 |
69 | # Visualize predictions
70 | x_range = np.linspace(self.X.min(), self.X.max(), 100)
71 | y_range = model.predict(x_range.reshape(-1, 1))
72 |
73 | fig = go.Figure(
74 | [
75 | go.Scatter(
76 | x=self.X_train.squeeze(),
77 | y=self.Y_train,
78 | name="train",
79 | mode="markers",
80 | ),
81 | go.Scatter(
82 | x=self.X_test.squeeze(), y=self.Y_test, name="test", mode="markers"
83 | ),
84 | go.Scatter(x=x_range, y=y_range, name="prediction"),
85 | ],
86 | layout=go.Layout(
87 | title=go.layout.Title(text="Predictions for model %s" % model_name)
88 | ),
89 | )
90 | self.html = fig.to_html()
91 |
92 | self.next(self.join)
93 |
94 | @step
95 | def join(self, inputs):
96 | """
97 | Merge the data artifact from the models
98 | """
99 | from comet_ml import API
100 |
101 | # merge artificate during a join
102 | best_model, best_score = None, float("-inf")
103 |
104 | for _input in inputs:
105 | self.comet_experiment.log_metric("%s_score" % _input.input, _input.score)
106 |
107 | if _input.score > best_score:
108 | best_score = _input.score
109 | best_model = _input.input
110 |
111 | # Logs which model was the best to the Run Experiment to easily
112 | # compare between different Runs
113 | run_experiment = API().get_experiment_by_key(self.run_comet_experiment_key)
114 | run_experiment.log_parameter("Best Model", best_model)
115 |
116 | self.next(self.end)
117 |
118 | @step
119 | def end(self):
120 | pass
121 |
122 |
123 | if __name__ == "__main__":
124 | # Login to Comet if needed
125 | login()
126 |
127 | RegressionFlow()
128 |
--------------------------------------------------------------------------------
/integrations/workflow-orchestration/metaflow/metaflow-regression/requirements.txt:
--------------------------------------------------------------------------------
1 | comet_ml>=3.31.15
2 | metaflow
3 | metaflow-card-html
4 | pandas
5 | plotly
6 | scikit-learn
7 |
--------------------------------------------------------------------------------
/integrations/workflow-orchestration/vertex/vertex-hello-world/.gitignore:
--------------------------------------------------------------------------------
1 | demo_pipeline.json
--------------------------------------------------------------------------------
/integrations/workflow-orchestration/vertex/vertex-hello-world/README.md:
--------------------------------------------------------------------------------
1 | # Vertex AI integration with Comet.ml
2 |
3 | Comet integrates with Google Vertex AI.
4 |
5 | [Google Vertex AI](https://cloud.google.com/vertex-ai/) lets you build, deploy, and scale ML models faster, with pre-trained and custom tooling within a unified artificial intelligence platform.
6 |
7 | > [!NOTE]
8 | > This example uses the first version of the KFP package
9 |
10 | ## Documentation
11 |
12 | For more information on using and configuring the Vertex integration, see: [https://www.comet.com/docs/v2/integrations/third-party-tools/vertex-ai/](https://www.comet.com/docs/v2/integrations/third-party-tools/vertex-ai/?utm_source=comet-examples&utm_medium=referral&utm_campaign=github_repo_2023&utm_content=vertex)
13 |
14 | ## See it
15 |
16 | Take a look at this [public Comet Project](https://www.comet.com/examples/comet-example-vertex-hello-world/?utm_source=comet-examples&utm_medium=referral&utm_campaign=github_repo_2023&utm_content=vertex).
17 |
18 | ## Setup
19 |
20 | Install dependencies
21 |
22 | ```bash
23 | python -m pip install -r requirements.txt
24 | ```
25 |
26 | ## Run the example
27 |
28 | The following example demonstrates how to use the Comet pipelines integration to track the state of pipelines run on Vertex. Before running, make sure that you are correctly authenticated against your Google Cloud Platform account and project, the easiest way to do so is by using the [Google Cloud CLI](https://cloud.google.com/sdk/docs/).
29 |
30 | ```bash
31 | python demo_pipeline.py
32 | ```
33 |
--------------------------------------------------------------------------------
/integrations/workflow-orchestration/vertex/vertex-hello-world/demo_pipeline.py:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 | import os
3 |
4 | from comet_ml import login
5 |
6 | import google.cloud.aiplatform as aip
7 | import kfp
8 | import kfp.v2.dsl as dsl
9 |
10 | # Login to Comet if needed
11 | login()
12 |
13 |
14 | COMET_PROJECT_NAME = "comet-example-vertex-hello-world"
15 |
16 |
17 | @dsl.component(packages_to_install=["comet_ml"])
18 | def data_preprocessing(a: str = None, b: str = None) -> str:
19 | import math
20 | import random
21 | import time
22 |
23 | import comet_ml
24 |
25 | experiment = comet_ml.start()
26 |
27 | for i in range(60):
28 | experiment.log_metric("accuracy", math.log(i + random.random()))
29 | time.sleep(0.1)
30 | experiment.end()
31 |
32 | return a
33 |
34 |
35 | @dsl.component(packages_to_install=["comet_ml"])
36 | def model_training(a: str = None, b: str = None) -> str:
37 | import math
38 | import random
39 | import time
40 |
41 | import comet_ml
42 |
43 | experiment = comet_ml.start()
44 |
45 | for i in range(60):
46 | experiment.log_metric("accuracy", math.log(i + random.random()))
47 | time.sleep(0.1)
48 | experiment.end()
49 |
50 | return a
51 |
52 |
53 | @dsl.component(packages_to_install=["comet_ml"])
54 | def model_evaluation(a: str = None, b: str = None) -> str:
55 | import math
56 | import random
57 | import time
58 |
59 | import comet_ml
60 |
61 | experiment = comet_ml.start()
62 |
63 | for i in range(60):
64 | experiment.log_metric("accuracy", math.log(i + random.random()))
65 | time.sleep(0.1)
66 | experiment.end()
67 |
68 | return a
69 |
70 |
71 | @dsl.pipeline(name="comet-integration-example")
72 | def pipeline():
73 | import comet_ml.integration.vertex
74 |
75 | logger = comet_ml.integration.vertex.CometVertexPipelineLogger(
76 | # api_key=XXX,
77 | project_name=COMET_PROJECT_NAME,
78 | # workspace=XXX
79 | share_api_key_to_workers=True,
80 | )
81 |
82 | task_1 = logger.track_task(data_preprocessing("test"))
83 |
84 | task_2 = logger.track_task(model_training(task_1.output))
85 |
86 | task_3 = logger.track_task(model_training(task_1.output))
87 |
88 | _ = logger.track_task(model_evaluation(task_2.output, task_3.output))
89 |
90 |
91 | if __name__ == "__main__":
92 | print("Running pipeline")
93 | kfp.v2.compiler.Compiler().compile(
94 | pipeline_func=pipeline, package_path="demo_pipeline.json"
95 | )
96 |
97 | job = aip.PipelineJob(
98 | display_name="comet-integration-example",
99 | template_path="demo_pipeline.json",
100 | pipeline_root=os.getenv("PIPELINE_ROOT"),
101 | project=os.getenv("GCP_PROJECT"),
102 | enable_caching=False,
103 | )
104 |
105 | job.submit()
106 |
--------------------------------------------------------------------------------
/integrations/workflow-orchestration/vertex/vertex-hello-world/requirements.txt:
--------------------------------------------------------------------------------
1 | comet_ml>=3.33.10
2 | google-cloud-aiplatform
3 | kfp<2
4 |
--------------------------------------------------------------------------------
/integrations/workflow-orchestration/vertex/vertex-v2-hello-world/README.md:
--------------------------------------------------------------------------------
1 | # Vertex AI integration with Comet.ml
2 |
3 | Comet integrates with Google Vertex AI.
4 |
5 | [Google Vertex AI](https://cloud.google.com/vertex-ai/) lets you build, deploy, and scale ML models faster, with pre-trained and custom tooling within a unified artificial intelligence platform.
6 |
7 | ## Documentation
8 |
9 | For more information on using and configuring the Vertex integration, see: [https://www.comet.com/docs/v2/integrations/third-party-tools/vertex-ai/](https://www.comet.com/docs/v2/integrations/third-party-tools/vertex-ai/?utm_source=comet-examples&utm_medium=referral&utm_campaign=github_repo_2023&utm_content=vertex)
10 |
11 | ## See it
12 |
13 | Take a look at this [public Comet Project](https://www.comet.com/examples/comet-example-vertex-v2-hello-world/view/mz2vYWFTYZ3vNzgWIK0r4ZRUR/panels?utm_source=comet-examples&utm_medium=referral&utm_campaign=github_repo_2023&utm_content=vertex).
14 |
15 | ## Setup
16 |
17 | Install dependencies
18 |
19 | ```bash
20 | python -m pip install -r requirements.txt
21 | ```
22 |
23 | ## Run the example
24 |
25 | The following example demonstrates how to use the Comet pipelines integration to track the state of pipelines run on Vertex. Before running, make sure that you are correctly authenticated against your Google Cloud Platform account and project, the easiest way to do so is by using the [Google Cloud CLI](https://cloud.google.com/sdk/docs/).
26 |
27 | ```bash
28 | python demo_pipeline.py
29 | ```
30 |
--------------------------------------------------------------------------------
/integrations/workflow-orchestration/vertex/vertex-v2-hello-world/demo_pipeline.py:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 | import os
3 |
4 | from comet_ml import login
5 |
6 | import google.cloud.aiplatform as aip
7 | from kfp import compiler, dsl
8 |
9 | # Login to Comet if needed
10 | login()
11 |
12 |
13 | COMET_PROJECT_NAME = "comet-example-vertex-v2-hello-world"
14 |
15 |
16 | @dsl.component(packages_to_install=["comet_ml"])
17 | def data_preprocessing(a: str = None) -> str:
18 | import math
19 | import random
20 | import time
21 |
22 | import comet_ml
23 |
24 | experiment = comet_ml.start()
25 |
26 | for i in range(60):
27 | experiment.log_metric("accuracy", math.log(i + random.random()))
28 | time.sleep(0.1)
29 | experiment.end()
30 |
31 | return a
32 |
33 |
34 | @dsl.component(packages_to_install=["comet_ml"])
35 | def model_training(a: str = None) -> str:
36 | import math
37 | import random
38 | import time
39 |
40 | import comet_ml
41 |
42 | experiment = comet_ml.start()
43 |
44 | for i in range(60):
45 | experiment.log_metric("accuracy", math.log(i + random.random()))
46 | time.sleep(0.1)
47 | experiment.end()
48 |
49 | return a
50 |
51 |
52 | @dsl.component(packages_to_install=["comet_ml"])
53 | def model_evaluation(a: str = None, b: str = None) -> str:
54 | import math
55 | import random
56 | import time
57 |
58 | import comet_ml
59 |
60 | experiment = comet_ml.start()
61 |
62 | for i in range(60):
63 | experiment.log_metric("accuracy", math.log(i + random.random()))
64 | time.sleep(0.1)
65 | experiment.end()
66 |
67 | return a
68 |
69 |
70 | @dsl.pipeline(name="comet-integration-example")
71 | def pipeline():
72 | import comet_ml.integration.vertex
73 |
74 | logger = comet_ml.integration.vertex.CometVertexPipelineLogger(
75 | # api_key=XXX,
76 | project_name=COMET_PROJECT_NAME,
77 | # workspace=XXX
78 | share_api_key_to_workers=True,
79 | )
80 |
81 | task_1 = logger.track_task(data_preprocessing(a="test"))
82 |
83 | task_2 = logger.track_task(model_training(a=task_1.output))
84 |
85 | task_3 = logger.track_task(model_training(a=task_1.output))
86 |
87 | _ = logger.track_task(model_evaluation(a=task_2.output, b=task_3.output))
88 |
89 |
90 | if __name__ == "__main__":
91 | print("Running pipeline")
92 | compiler.Compiler().compile(
93 | pipeline_func=pipeline, package_path="demo_pipeline.json"
94 | )
95 |
96 | job = aip.PipelineJob(
97 | display_name="comet-integration-example",
98 | template_path="demo_pipeline.json",
99 | pipeline_root=os.getenv("PIPELINE_ROOT"),
100 | project=os.getenv("GCP_PROJECT"),
101 | enable_caching=False,
102 | )
103 |
104 | job.submit()
105 |
--------------------------------------------------------------------------------
/integrations/workflow-orchestration/vertex/vertex-v2-hello-world/requirements.txt:
--------------------------------------------------------------------------------
1 | comet_ml>=3.33.10
2 | google-cloud-aiplatform
3 | # Ignore versions impacted by https://github.com/kubeflow/pipelines/issues/9974
4 | kfp>=2,!=2.1.3
5 |
--------------------------------------------------------------------------------
/logo/comet_badge.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-ml/comet-examples/6d5d899d5472a3f252f9393785ba1ae343130422/logo/comet_badge.png
--------------------------------------------------------------------------------
/notebooks/comet-key.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-ml/comet-examples/6d5d899d5472a3f252f9393785ba1ae343130422/notebooks/comet-key.png
--------------------------------------------------------------------------------
/notebooks/confusion-matrix.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-ml/comet-examples/6d5d899d5472a3f252f9393785ba1ae343130422/notebooks/confusion-matrix.png
--------------------------------------------------------------------------------
/opik/streamlit/call-summarizer/.env.example:
--------------------------------------------------------------------------------
1 | # OpenAI API Key
2 | OPENAI_API_KEY=your_openai_api_key_here
3 |
4 | # Application Settings
5 | VECTOR_STORE_PATH=./data/vector_store
6 | DATA_DIR=./data
7 |
8 | # Opik Settings (if needed)
9 | OPIK_API_KEY=your_opik_api_key_here
10 | OPIK_WORKSPACE="default"
11 | OPIK_PROJECT_NAME="demo-langgraph-callsummarizer"
12 |
--------------------------------------------------------------------------------
/opik/streamlit/call-summarizer/.gitignore:
--------------------------------------------------------------------------------
1 | # Python
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 | *.so
6 | .Python
7 | build/
8 | develop-eggs/
9 | dist/
10 | downloads/
11 | eggs/
12 | .eggs/
13 | lib/
14 | lib64/
15 | parts/
16 | sdist/
17 | var/
18 | wheels/
19 | *.egg-info/
20 | .installed.cfg
21 | *.egg
22 |
23 | # Environment variables
24 | .env
25 |
26 | # Virtual Environment
27 | venv/
28 | env/
29 | ENV/
30 |
31 | # IDE
32 | .idea/
33 | .vscode/
34 | *.swp
35 | *.swo
36 |
37 | # Local data
38 | /data/
39 | /vector_store/
40 |
41 | # Logs
42 | *.log
43 |
44 | # OS generated files
45 | .DS_Store
46 | .DS_Store?
47 | ._*
48 | .Spotlight-V100
49 | .Trashes
50 | ehthumbs.db
51 | Thumbs.db
52 |
--------------------------------------------------------------------------------
/opik/streamlit/call-summarizer/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: https://github.com/pre-commit/pre-commit-hooks
3 | rev: v4.6.0
4 | hooks:
5 | - id: trailing-whitespace
6 | - id: end-of-file-fixer
7 | - id: check-yaml
8 | - id: check-toml
9 | - id: check-added-large-files
10 | args: ["--maxkb=10000"]
11 | - id: check-merge-conflict
12 | - id: detect-private-key
13 | - id: check-case-conflict
14 | - id: mixed-line-ending
15 |
16 | - repo: https://github.com/astral-sh/ruff-pre-commit
17 | rev: v0.4.4
18 | hooks:
19 | - id: ruff-format
20 | - id: ruff
21 | args: [--fix, --exit-non-zero-on-fix]
22 |
23 | - repo: https://github.com/commitizen-tools/commitizen
24 | rev: v3.27.0
25 | hooks:
26 | - id: commitizen
27 | stages: [commit-msg]
28 |
--------------------------------------------------------------------------------
/opik/streamlit/call-summarizer/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.poetry]
2 | name = "call-summarizer"
3 | version = "0.1.0"
4 | description = "A Streamlit app for summarizing and categorizing call transcripts using LangGraph and Opik"
5 | authors = ["Francisco <22344801+fschlz@users.noreply.github.com>"]
6 | readme = "README.md"
7 | packages = [{include = "call_summarizer", from = "src"}]
8 |
9 | [tool.poetry.dependencies]
10 | python = "^3.12"
11 | streamlit = "^1.32.0"
12 | opik = "^1.7.26"
13 | langchain = "^0.3.25"
14 | langgraph = "^0.4.5"
15 | langchain-openai = "^0.3.18"
16 | langchain-chroma = "^0.1.1"
17 | chromadb = "^0.5.0"
18 | pydantic = "^2.7.0"
19 | python-dotenv = "^1.0.1"
20 | python-multipart = "^0.0.9"
21 | pyyaml = "^6.0.1"
22 | pydantic-settings = "^2.9.1"
23 | beautifulsoup4 = "^4.12.3"
24 | watchdog = "^6.0.0"
25 | mcp-server-fetch = "^2025.4.7"
26 |
27 | [tool.poetry.group.dev.dependencies]
28 | pytest = "^8.3.5"
29 | ruff = "^0.4.0" # Or latest version
30 | pre-commit = "^3.7.0"
31 | commitizen = "^3.13.0"
32 |
33 | [build-system]
34 | requires = ["poetry-core>=2.0.0"]
35 | build-backend = "poetry.core.masonry.api"
36 |
37 | [tool.ruff]
38 | # Same as Black.
39 | line-length = 140
40 |
41 | # Assume Python 3.12.
42 | target-version = "py312"
43 |
44 | # Specify the source directory for import sorting and other path-dependent checks.
45 | src = ["src"]
46 |
47 | [tool.ruff.lint]
48 | # Enable Pyflakes (F) and pycodestyle (E, W) and McCabe (C90) complexity.
49 | select = ["F", "E", "W", "C90"]
50 | ignore = []
51 |
52 | # Allow unused variables when underscore-prefixed.
53 | # dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$"
54 |
55 | [tool.ruff.format]
56 | # Like Black, use double quotes for strings.
57 | quote-style = "double"
58 |
59 | # Like Black, indent with spaces, rather than tabs.
60 | indent-style = "space"
61 |
62 | # Like Black, respect magic trailing commas.
63 | skip-magic-trailing-comma = false
64 |
65 | # Like Black, automatically detect the appropriate line ending.
66 | line-ending = "auto"
67 |
68 | [tool.commitizen]
69 | name = "cz_conventional_commits"
70 | tag_format = "$version"
71 | version_provider = "poetry"
72 | version_files = [
73 | "pyproject.toml:version",
74 | "src/call_summarizer/__init__.py:__version__" # Assuming you might have version here
75 | ]
76 | # You can add more commitizen settings if needed, like update_changelog_on_bump
77 |
--------------------------------------------------------------------------------
/opik/streamlit/call-summarizer/src/call_summarizer/__init__.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.1.0"
2 |
--------------------------------------------------------------------------------
/opik/streamlit/call-summarizer/src/call_summarizer/config.py:
--------------------------------------------------------------------------------
1 | """Application configuration and settings."""
2 |
3 | import os
4 | from typing import Optional
5 |
6 | from dotenv import load_dotenv
7 | from pydantic import Field
8 | from pydantic_settings import BaseSettings, SettingsConfigDict
9 |
10 | # Load environment variables from .env file
11 | load_dotenv()
12 |
13 |
14 | class Settings(BaseSettings):
15 | """Application settings."""
16 |
17 | # OpenAI
18 | openai_api_key: str = Field(..., alias="OPENAI_API_KEY")
19 |
20 | # Application
21 | vector_store_path: str = Field("./data/vector_store", alias="VECTOR_STORE_PATH")
22 | data_dir: str = Field("./data", alias="DATA_DIR")
23 |
24 | # Opik
25 | opik_api_key: Optional[str] = Field(default=None, alias="OPIK_API_KEY")
26 | opik_workspace: Optional[str] = Field(default=None, alias="OPIK_WORKSPACE")
27 | opik_project_name: Optional[str] = Field(default=None, alias="OPIK_PROJECT_NAME")
28 |
29 | # Pydantic v2 config
30 | model_config = SettingsConfigDict(env_file=".env", env_file_encoding="utf-8", extra="ignore")
31 |
32 | def ensure_dirs_exist(self) -> None:
33 | """Ensure that all required directories exist."""
34 | os.makedirs(self.vector_store_path, exist_ok=True)
35 | os.makedirs(self.data_dir, exist_ok=True)
36 |
37 |
38 | # Initialize settings
39 | settings = Settings()
40 | settings.ensure_dirs_exist()
41 |
--------------------------------------------------------------------------------
/opik/streamlit/call-summarizer/src/call_summarizer/models/models.py:
--------------------------------------------------------------------------------
1 | """Data models for the call summarizer application."""
2 |
3 | from datetime import datetime
4 | from enum import Enum
5 | from typing import Dict, List
6 |
7 | from pydantic import BaseModel, Field
8 |
9 |
10 | class CallCategory(str, Enum):
11 | """Categories for call summaries."""
12 |
13 | SALES = "sales"
14 | SUPPORT = "support"
15 | INTERVIEW = "interview"
16 | MEETING = "meeting"
17 | OTHER = "other"
18 |
19 |
20 | class CallSummary(BaseModel):
21 | """Model for call summary data."""
22 |
23 | id: str = Field(..., description="Unique identifier for the call summary")
24 | transcript: str = Field(..., description="The full text of the call transcript")
25 | summary: str = Field(..., description="Generated summary of the call")
26 | action_items: List[str] = Field(default_factory=list, description="List of action items from the call")
27 | category: CallCategory = Field(default=CallCategory.OTHER, description="Category of the call")
28 | created_at: datetime = Field(default_factory=datetime.utcnow, description="When the summary was created")
29 | metadata: Dict = Field(default_factory=dict, description="Additional metadata about the call")
30 |
31 |
32 | class CallCategoryConfig(BaseModel):
33 | """Configuration for a call category including its prompt template."""
34 |
35 | name: str = Field(..., description="Name of the category")
36 | description: str = Field(..., description="Description of when to use this category")
37 | prompt_template: str = Field(..., description="Template for generating summaries")
38 | created_at: datetime = Field(default_factory=datetime.utcnow, description="When the category was created")
39 | updated_at: datetime = Field(default_factory=datetime.utcnow, description="When the category was last updated")
40 |
41 |
42 | class VectorStoreConfig(BaseModel):
43 | """Configuration for the vector store."""
44 |
45 | persist_dir: str = Field(..., description="Directory to persist the vector store")
46 | collection_name: str = Field("call_summaries", description="Name of the collection in the vector store")
47 |
--------------------------------------------------------------------------------
/opik/streamlit/call-summarizer/src/call_summarizer/utils/file_utils.py:
--------------------------------------------------------------------------------
1 | """Utility functions for file operations."""
2 |
3 | import json
4 | import os
5 | from typing import List, Optional, Type, TypeVar
6 |
7 | from pydantic import BaseModel
8 |
9 | T = TypeVar("T", bound=BaseModel)
10 |
11 |
12 | def ensure_dir_exists(file_path: str) -> None:
13 | """Ensure the directory of the given file path exists."""
14 | os.makedirs(os.path.dirname(file_path), exist_ok=True)
15 |
16 |
17 | def save_config(data: BaseModel, file_path: str) -> None:
18 | """Save a Pydantic model to a JSON file."""
19 | ensure_dir_exists(file_path)
20 | with open(file_path, "w", encoding="utf-8") as f:
21 | json.dump(data.dict(), f, indent=2, default=str)
22 |
23 |
24 | def load_config(file_path: str, model_class: Type[T]) -> Optional[T]:
25 | """Load a Pydantic model from a JSON file."""
26 | if not os.path.exists(file_path):
27 | return None
28 |
29 | with open(file_path, "r", encoding="utf-8") as f:
30 | data = json.load(f)
31 |
32 | return model_class(**data)
33 |
34 |
35 | def save_list_of_configs(data_list: List[BaseModel], file_path: str) -> None:
36 | """Save a list of Pydantic models to a JSON file."""
37 | ensure_dir_exists(file_path)
38 | with open(file_path, "w", encoding="utf-8") as f:
39 | json.dump([item.dict() for item in data_list], f, indent=2, default=str)
40 |
41 |
42 | def load_list_of_configs(file_path: str, model_class: Type[T]) -> List[T]:
43 | """Load a list of Pydantic models from a JSON file."""
44 | if not os.path.exists(file_path):
45 | return []
46 |
47 | with open(file_path, "r", encoding="utf-8") as f:
48 | data_list = json.load(f)
49 |
50 | return [model_class(**item) for item in data_list]
51 |
52 |
53 | def read_text_file(file_path: str) -> str:
54 | """Read text content from a file."""
55 | with open(file_path, "r", encoding="utf-8") as f:
56 | return f.read()
57 |
58 |
59 | def write_text_file(file_path: str, content: str) -> None:
60 | """Write text content to a file."""
61 | ensure_dir_exists(file_path)
62 | with open(file_path, "w", encoding="utf-8") as f:
63 | f.write(content)
64 |
--------------------------------------------------------------------------------
/opik/streamlit/call-summarizer/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-ml/comet-examples/6d5d899d5472a3f252f9393785ba1ae343130422/opik/streamlit/call-summarizer/tests/__init__.py
--------------------------------------------------------------------------------
/panels/AudioCompare/AudioCompare.py:
--------------------------------------------------------------------------------
1 | import os
2 | os.system('pip install "st-audio-spectrogram>=0.0.5"')
3 |
4 | from comet_ml import API
5 | import io
6 | import time
7 | import streamlit as st
8 | from st_audio_spectrogram import st_audio_spectrogram
9 |
10 | try:
11 | st.set_page_config(layout="wide")
12 | except Exception:
13 | pass
14 |
15 | api = API()
16 |
17 | @st.cache_data(persist="disk", show_spinner="Loading asset...")
18 | def get_asset(_experiment, experiment_id, asset_id):
19 | asset = experiment.get_asset(
20 | asset_id=asset_id,
21 | return_type='binary'
22 | )
23 | return asset
24 |
25 | def get_asset_list(_experiment, experiment_id, asset_type):
26 | return _experiment.get_asset_list(asset_type=asset_type)
27 |
28 | def get_all_audio_data(_experiments, experiment_ids):
29 | audio_data = set()
30 | # First, get a selection from asset names:
31 | bar = st.progress(0, "Loading audio list...")
32 | for i, experiment in enumerate(_experiments):
33 | for asset in get_asset_list(experiment, experiment.id, "audio"):
34 | bar.progress(i/len(experiments), "Loading audio...")
35 | audio_data.add((experiment.id, asset["fileName"], asset["assetId"], asset["step"], ))
36 | bar.empty()
37 | return audio_data
38 |
39 | # ----------------------------------------
40 | experiments = api.get_panel_experiments()
41 | experiment_map = {exp.id: exp for exp in experiments}
42 | experiment_ids = sorted([exp.id for exp in experiments])
43 | audio_data = get_all_audio_data(experiments, experiment_ids)
44 |
45 | asset_names = sorted(
46 | list(
47 | set([os.path.basename(item[1]) for item in audio_data])
48 | )
49 | )
50 |
51 | selected_names = st.multiselect("", asset_names, placeholder="Select Audio Files:")
52 |
53 | steps = set()
54 | for asset_name in selected_names:
55 | for experiment_id, filename, asset_id, step in audio_data:
56 | if filename.endswith(asset_name):
57 | if step is not None:
58 | steps.add(step)
59 |
60 | if steps:
61 | if min(steps) != max(steps):
62 | STEP = st.slider(
63 | "Select Step:",
64 | min_value=min(steps),
65 | max_value=max(steps),
66 | value=max(steps),
67 | )
68 | else:
69 | STEP = None
70 | else:
71 | STEP = None
72 |
73 | for asset_name in selected_names:
74 | with st.expander("Compare: **%s**" % asset_name, expanded=len(selected_names) == 1):
75 | for experiment_id, filename, asset_id, step in sorted(
76 | audio_data, key=lambda item: item[0]
77 | ):
78 | if filename.endswith(asset_name) and ((step == STEP) or (STEP is None)):
79 | experiment = experiment_map[experiment_id]
80 | audio = get_asset(experiment, experiment_id, asset_id)
81 | st.markdown("*Experiment*: ***%s***, *step*: ***%s***" % (
82 | experiment.name, step
83 | ))
84 | with st.spinner("Loading component..."):
85 | time.sleep(1)
86 | st_audio_spectrogram(
87 | audio,
88 | key="%s: %s" % (experiment_id, asset_id)
89 | )
90 | st.divider()
91 |
--------------------------------------------------------------------------------
/panels/AudioCompare/README.md:
--------------------------------------------------------------------------------
1 | ### AudioCompare
2 |
3 | The `AudioCompare` panel is used to examine audio waveforms and spectrograms
4 | in a single experiment or across experiments. See also the built-in Audio Panel.
5 |
6 |
7 |
8 |
9 |
10 |
12 |
13 | |
14 |
15 |
16 |
18 |
19 | |
20 |
21 |
22 |
23 |
24 | #### Python Panel
25 |
26 | To include this panel from the github repo, use this code in a Custom Python Panel:
27 |
28 | ```
29 | %include https://raw.githubusercontent.com/comet-ml/comet-examples/refs/heads/master/panels/AudioCompare/AudioCompare.py
30 | ```
31 |
32 | Or, you can simply [copy the code](https://raw.githubusercontent.com/comet-ml/comet-examples/refs/heads/master/panels/AudioCompare/AudioCompare.py) into a custom Python Panel.
33 |
34 | #### Resources
35 |
36 | * Example Comet Project: [www.comet.com/examples/comet-example-audio-compare](https://www.comet.com/examples/comet-example-audio-compare/view/pV46hu7kzY8kOsC77ZWMDJwic/panels)
37 | * [Logging Audio](https://www.comet.com/docs/v2/api-and-sdk/python-sdk/reference/Experiment/#comet_ml.Experiment.log_audio)
38 | * [UI Audio Tab](https://www.comet.com/docs/v2/guides/comet-ui/experiment-management/single-experiment-page/#audio-tab)
39 | * [Get audio assets programmatically](https://www.comet.com/docs/v2/api-and-sdk/python-sdk/reference/APIExperiment/#comet_ml.APIExperiment.get_asset_list)
--------------------------------------------------------------------------------
/panels/AudioCompare/audio-compare.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-ml/comet-examples/6d5d899d5472a3f252f9393785ba1ae343130422/panels/AudioCompare/audio-compare.png
--------------------------------------------------------------------------------
/panels/AudioCompare/built-in-audio-panel.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-ml/comet-examples/6d5d899d5472a3f252f9393785ba1ae343130422/panels/AudioCompare/built-in-audio-panel.png
--------------------------------------------------------------------------------
/panels/AudioComparePanel.py:
--------------------------------------------------------------------------------
1 | import os
2 | os.system('pip install "st-audio-spectrogram>=0.0.5"')
3 |
4 | from comet_ml import API
5 | import io
6 | import time
7 | import streamlit as st
8 | from st_audio_spectrogram import st_audio_spectrogram
9 |
10 | try:
11 | st.set_page_config(layout="wide")
12 | except Exception:
13 | pass
14 |
15 | api = API()
16 |
17 | @st.cache_data(persist="disk", show_spinner="Loading asset...")
18 | def get_asset(_experiment, experiment_id, asset_id):
19 | asset = experiment.get_asset(
20 | asset_id=asset_id,
21 | return_type='binary'
22 | )
23 | return asset
24 |
25 | def get_asset_list(_experiment, experiment_id, asset_type):
26 | return _experiment.get_asset_list(asset_type=asset_type)
27 |
28 | def get_all_audio_data(_experiments, experiment_ids):
29 | audio_data = set()
30 | # First, get a selection from asset names:
31 | bar = st.progress(0, "Loading audio list...")
32 | for i, experiment in enumerate(_experiments):
33 | for asset in get_asset_list(experiment, experiment.id, "audio"):
34 | bar.progress(i/len(experiments), "Loading audio...")
35 | audio_data.add((experiment.id, asset["fileName"], asset["assetId"], asset["step"], ))
36 | bar.empty()
37 | return audio_data
38 |
39 | # ----------------------------------------
40 | experiments = api.get_panel_experiments()
41 | experiment_map = {exp.id: exp for exp in experiments}
42 | experiment_ids = sorted([exp.id for exp in experiments])
43 | audio_data = get_all_audio_data(experiments, experiment_ids)
44 |
45 | asset_names = sorted(
46 | list(
47 | set([os.path.basename(item[1]) for item in audio_data])
48 | )
49 | )
50 |
51 | selected_names = st.multiselect("", asset_names, placeholder="Select Audio Files:")
52 |
53 | steps = set()
54 | for asset_name in selected_names:
55 | for experiment_id, filename, asset_id, step in audio_data:
56 | if filename.endswith(asset_name):
57 | if step is not None:
58 | steps.add(step)
59 |
60 | if steps:
61 | if min(steps) != max(steps):
62 | STEP = st.slider(
63 | "Select Step:",
64 | min_value=min(steps),
65 | max_value=max(steps),
66 | value=max(steps),
67 | )
68 | else:
69 | STEP = None
70 | else:
71 | STEP = None
72 |
73 | for asset_name in selected_names:
74 | with st.expander("Compare: **%s**" % asset_name, expanded=len(selected_names) == 1):
75 | for experiment_id, filename, asset_id, step in sorted(
76 | audio_data, key=lambda item: item[0]
77 | ):
78 | if filename.endswith(asset_name) and ((step == STEP) or (STEP is None)):
79 | experiment = experiment_map[experiment_id]
80 | audio = get_asset(experiment, experiment_id, asset_id)
81 | st.markdown("*Experiment*: ***%s***, *step*: ***%s***" % (
82 | experiment.name, step
83 | ))
84 | with st.spinner("Loading component..."):
85 | time.sleep(1)
86 | st_audio_spectrogram(
87 | audio,
88 | key="%s: %s" % (experiment_id, asset_id)
89 | )
90 | st.divider()
91 |
--------------------------------------------------------------------------------
/panels/CompareMaxAccuracyOverTime/CompareMaxAccuracyOverTime.py:
--------------------------------------------------------------------------------
1 | from comet_ml import API, ui
2 | import plotly.graph_objects as go
3 | from datetime import datetime
4 | import pandas as pd
5 |
6 | # Get available metrics
7 | api = API()
8 | metrics = api.get_panel_metrics_names()
9 |
10 | # Make chart interactive by adding a dropdown menu
11 | selected_metric = ui.dropdown('Select a metric:', metrics)
12 |
13 | # Use API to fetch the metric data for all experiments in the panel scope
14 | experiment_keys = api.get_panel_experiment_keys()
15 | if experiment_keys and selected_metric:
16 | data = api.get_metrics_for_chart(experiment_keys, [selected_metric])
17 | # Prepare data for the scatter plot and calculate averages
18 | x_data = []
19 | y_data = []
20 | hover_text = []
21 |
22 | # To hold date and accuracy values for calculating the average per date
23 | date_accuracy_pairs = []
24 |
25 | for exp_id, exp_data in data.items():
26 | metrics = exp_data["metrics"]
27 | if metrics:
28 | accuracy_metrics = [m for m in metrics if m["metricName"] == selected_metric]
29 | if accuracy_metrics:
30 | max_accuracy = max(accuracy_metrics[0]["values"])
31 | max_accuracy_idx = accuracy_metrics[0]["values"].index(max_accuracy)
32 | timestamp = accuracy_metrics[0]["timestamps"][max_accuracy_idx]
33 |
34 | # Convert timestamp to datetime (only date part)
35 | timestamp_dt = datetime.fromtimestamp(timestamp / 1000).date()
36 |
37 | # Append data
38 | x_data.append(timestamp_dt)
39 | y_data.append(max_accuracy)
40 | hover_text.append(exp_data["experimentName"])
41 |
42 | # Store date and accuracy for average calculation
43 | date_accuracy_pairs.append((timestamp_dt, max_accuracy))
44 |
45 | # Calculate the average accuracy per date
46 | df = pd.DataFrame(date_accuracy_pairs, columns=["date", selected_metric])
47 | average_data = df.groupby("date").mean().reset_index()
48 |
49 | # Create scatter plot using Plotly
50 | fig = go.Figure()
51 |
52 | # Scatter plot for individual experiment points
53 | fig.add_trace(go.Scatter(
54 | x=x_data,
55 | y=y_data,
56 | mode='markers',
57 | marker=dict(size=10),
58 | text=hover_text, # Experiment names for hover
59 | #hoverinfo='text', # Show only hover text
60 | name=selected_metric,
61 | ))
62 |
63 | # Line plot for the average accuracy across all experiments
64 | fig.add_trace(go.Scatter(
65 | x=average_data["date"],
66 | y=average_data[selected_metric],
67 | mode='lines',
68 | name=f"Average {selected_metric}",
69 | line=dict(color='red', width=2)
70 | ))
71 |
72 | # Update layout
73 | fig.update_layout(
74 | title="Max Accuracy vs Date by Experiment",
75 | xaxis_title="Date",
76 | yaxis_title=f"Maximum {selected_metric}",
77 | xaxis=dict(tickformat='%Y-%m-%d'),
78 | hovermode='closest'
79 | )
80 | ui.display(fig)
81 | else:
82 | ui.display("No data to plot")
83 |
--------------------------------------------------------------------------------
/panels/CompareMaxAccuracyOverTime/README.md:
--------------------------------------------------------------------------------
1 | ### CompareMaxAccuracyOverTime
2 |
3 | The `CompareMaxAccuracyOverTime` panel is used to help track how the
4 | retraining of a model each week compares to the previous week. This panel
5 | creates a scatter plot of the max average of a metric (of your choosing)
6 | over time.
7 |
8 |
9 |
10 |
11 |
12 |
14 |
15 | |
16 |
17 |
18 |
19 | #### Python Panel
20 |
21 | To include this panel from the github repo, use this code in a Custom Python Panel:
22 |
23 | ```
24 | %include https://raw.githubusercontent.com/comet-ml/comet-examples/refs/heads/master/panels/CompareMaxAccuracyOverTime/CompareMaxAccuracyOverTime.py
25 | ```
26 |
27 | Or, you can simply [copy the code](https://raw.githubusercontent.com/comet-ml/comet-examples/refs/heads/master/panels/CompareMaxAccuracyOverTime/CompareMaxAccuracyOverTime.py) into a custom Python Panel.
28 |
29 | #### Resources
30 |
31 | * [Colab Notebook](https://colab.research.google.com/github/comet-ml/comet-examples/blob/master/panels/CompareMaxAccuracyOverTime/Notebook.ipynb)
32 | * Example Comet Project: [www.comet.com/comet-demos/cifar10-vision](https://www.comet.com/comet-demos/cifar10-vision/view/kV9XoIkTfTSN0qyKCS1lKCzaF/panels)
33 | * Documentation:
34 | * [Logging metrics](https://www.comet.com/docs/v2/api-and-sdk/python-sdk/reference/Experiment/#comet_ml.Experiment.log_metric)
35 | * [Retrieving metrics](https://www.comet.com/docs/v2/api-and-sdk/python-sdk/reference/APIExperiment/#comet_ml.APIExperiment.get_metrics)
36 | * [Plotly Graph Objects and Scatter Plots](https://plotly.com/python/line-and-scatter/)
37 |
38 |
39 |
40 |
--------------------------------------------------------------------------------
/panels/CompareMaxAccuracyOverTime/compare-max-accuracy-over-time.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-ml/comet-examples/6d5d899d5472a3f252f9393785ba1ae343130422/panels/CompareMaxAccuracyOverTime/compare-max-accuracy-over-time.png
--------------------------------------------------------------------------------
/panels/DataGridViewer/README.md:
--------------------------------------------------------------------------------
1 | ### DataGridViewer
2 |
3 | The `DataGridViewer` panel is used to visualize Comet `DataGrids` which
4 | can contain Images, text, and numeric data.
5 |
6 | The UX is a sophisticated approach to grouping data to see (and select)
7 | images and other data in a tabular format, with a search feature that
8 | allows fast querying of the data (including metadata) using Python syntax.
9 |
10 |
11 |
12 |
13 |
15 |
16 | |
17 |
18 |
20 |
21 | |
22 |
23 |
25 |
26 | |
27 |
28 |
29 |
30 | #### Example Code
31 |
32 | ```
33 | %pip install comet_ml datagrid
34 |
35 | import comet_ml
36 | from datagrid import DataGrid, Image
37 | import requests
38 |
39 | experiment = comet_ml.start(
40 | project_name="demo-datagrids"
41 | )
42 | dg = DataGrid(
43 | columns=["Image", "Score", "Category"],
44 | name="Demo"
45 | )
46 | url = f"https://picsum.photos/300/200"
47 | for i in range(50):
48 | im = PImage.open(requests.get(url, stream=True).raw)
49 | category = random.choice(categories)
50 | score = random.random()
51 | label = random.choice(items)
52 | image = Image(
53 | im,
54 | metadata={"category": category, "score": score},
55 | )
56 | dg.append([image, score, category])
57 |
58 | dg.log(experiment)
59 | experiment.end()
60 | ```
61 |
62 | #### Resources
63 |
64 | * Copy panel to workspace:
65 | * `cometx log YOUR-WORKSPACE DataGridViewer --type panel`
66 | * Via code (see notebook below)
67 | * Example notebook: [DataGridViewer.ipynb](https://github.com/comet-ml/comet-examples/blob/master/panels/DataGridViewer/DataGridViewer.ipynb)
68 | * [Run in colab](https://colab.research.google.com/github/comet-ml/comet-examples/blob/master/panels/DataGridViewer/DataGridViewer.ipynb)
69 | * [Open in NBViewer](https://nbviewer.org/github/comet-ml/comet-examples/blob/master/panels/DataGridViewer/DataGridViewer.ipynb)
70 | * Example Comet Project: [www.comet.com/examples/comet-example-datagrid](https://www.comet.com/examples/comet-example-datagrid/view/dVz9h6RFURYwHVQcgXvJ3RWqU/panels)
71 | * Documentation:
72 | * [DataGrid](https://github.com/dsblank/datagrid)
73 | * [Search syntax](https://github.com/dsblank/datagrid/blob/main/Search.md)
74 |
--------------------------------------------------------------------------------
/panels/DataGridViewer/group-by.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-ml/comet-examples/6d5d899d5472a3f252f9393785ba1ae343130422/panels/DataGridViewer/group-by.png
--------------------------------------------------------------------------------
/panels/DataGridViewer/image-dialog.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-ml/comet-examples/6d5d899d5472a3f252f9393785ba1ae343130422/panels/DataGridViewer/image-dialog.png
--------------------------------------------------------------------------------
/panels/DataGridViewer/tabular-view.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-ml/comet-examples/6d5d899d5472a3f252f9393785ba1ae343130422/panels/DataGridViewer/tabular-view.png
--------------------------------------------------------------------------------
/panels/NotebookViewer/NoteBookViewer.py:
--------------------------------------------------------------------------------
1 | %pip install nbconvert
2 |
3 | from comet_ml import API
4 | import json
5 | from nbconvert import HTMLExporter
6 | from nbformat import read
7 |
8 | st.set_page_config(layout="wide")
9 |
10 |
11 | api = API()
12 |
13 | columns = st.columns(2)
14 |
15 | experiments = api.get_panel_experiments()
16 |
17 | if len(experiments) == 0:
18 | print("No available experiments")
19 | elif len(experiments) == 1:
20 | experiment = experiments[0]
21 | columns[0].markdown("Experiment:\\\n**%s**" % (experiment.name or experiment.id))
22 | else:
23 | experiment = columns[0].selectbox(
24 | "Select an experiment:",
25 | experiments,
26 | format_func=lambda experiment: (experiment.name or experiment.id)
27 | )
28 |
29 | if experiment:
30 | assets = experiment.get_asset_list("notebook")
31 | if len(assets) == 0:
32 | notebook = None
33 | elif len(assets) == 1:
34 | notebook = assets[0]
35 | columns[1].markdown("Notebook:\\\n**%s**" % notebook["fileName"])
36 | else:
37 | notebook = columns[1].selectbox(
38 | "Select a notebook:",
39 | assets,
40 | format_func=lambda asset: asset["fileName"]
41 | )
42 | if notebook:
43 | bytes = experiment.get_asset(
44 | notebook["assetId"],
45 | return_type="binary"
46 | )
47 | with open("notebook.ipynb", "wb") as fp:
48 | fp.write(bytes)
49 |
50 | notebook_json = json.load(open("notebook.ipynb"))
51 | if len(notebook_json["cells"]) == 0:
52 | print("Notebook is empty")
53 | st.stop()
54 | if "metadata" in notebook_json and "widgets" in notebook_json["metadata"]:
55 | del notebook_json["metadata"]["widgets"]
56 | json.dump(notebook_json, open("notebook.ipynb", "w"))
57 |
58 | with open("notebook.ipynb", "r", encoding="utf-8") as f:
59 | nb = read(f, as_version=4)
60 | exporter = HTMLExporter()
61 | (output, resources) = exporter.from_notebook_node(nb)
62 | with open("fixed.html", "w", encoding="utf-8") as f:
63 | f.write(output)
64 | st.html("fixed.html")
65 | else:
66 | print("No notebooks available")
67 | else:
68 | print("No experiment available")
69 |
70 | st.markdown("""
71 |
84 | """,
85 | unsafe_allow_html=True
86 | )
87 |
--------------------------------------------------------------------------------
/panels/NotebookViewer/README.md:
--------------------------------------------------------------------------------
1 | ### NotebookViewer
2 |
3 | The `NotebookViewer` panel is used to render logged Notebooks, either from
4 | [colab.research.google.com](https://colab.research.google.com/) or
5 | any [Jupyter Notebook](https://jupyter.org/).
6 |
7 | Comet will automatically log your Colab notebooks, both as a full
8 | history of commenads as `Code.ipynb', but also as a completed notebook
9 | with images and output. For Jupyter, you can use our
10 | [cometx config --auto-log-notebook yes](https://github.com/comet-ml/cometx/blob/main/README.md#cometx-config)
11 |
12 |
13 |
14 |
15 |
16 |
18 |
19 | |
20 |
21 |
22 |
23 | #### Python Panel
24 |
25 | To include this panel from the github repo, use this code in a Custom Python Panel:
26 |
27 | ```
28 | %include https://raw.githubusercontent.com/comet-ml/comet-examples/refs/heads/master/panels/NotebookViewer/NotebookViewer.py
29 | ```
30 |
31 | Or, you can simply [copy the code](https://raw.githubusercontent.com/comet-ml/comet-examples/refs/heads/master/panels/NotebookViewer/NotebookViewer.py) into a custom Python Panel.
32 |
33 | #### Resources
34 |
35 | * Example Comet Project: [www.comet.com/examples/foodchatbot-eval](https://www.comet.com/examples/foodchatbot-eval/efa8e134778a456dac2e1a85e1604e13)
36 | * Enable auto-logging of your notebooks in Jupyter:
37 | * `cometx config --auto-log-notebook yes`
38 | * [Documentation](https://github.com/comet-ml/cometx/blob/main/README.md#cometx-config)
39 | * Colab Notebooks are logged automatically
40 | * Additional Documentation:
41 | * [Using Comet in a Notebook](https://dev.comet.com/docs/v2/guides/experiment-management/jupyter-notebook/)
42 |
--------------------------------------------------------------------------------
/panels/NotebookViewer/notebookviewer.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-ml/comet-examples/6d5d899d5472a3f252f9393785ba1ae343130422/panels/NotebookViewer/notebookviewer.png
--------------------------------------------------------------------------------
/panels/OptimizerAnalysis/README.md:
--------------------------------------------------------------------------------
1 | ### OptimizerAnalysis
2 |
3 | The `OptimizerAnalysis` panel is used to explore results from an
4 | Optimizer Search or Sweep. The [Comet Optimizer]() is used to
5 | dynamically find the best set of hyperparameter values that will
6 | minimize a Hyper Parameter Optimization tool (HPO) that can be used to
7 | maximize a particular metric. The OptimizerAnalysis panel, combined
8 | with the [Parallel Coordinate Chart](https://www.comet.com/docs/v2/guides/comet-ui/experiment-management/visualizations/parallel-coordinate-chart/)
9 | allows detailed exploration of the results from your grid search or
10 | sweep.
11 |
12 |
13 |
14 |
15 |
16 |
18 |
19 | |
20 |
21 |
22 |
23 | #### Python Panel
24 |
25 | To include this panel from the github repo, use this code in a Custom Python Panel:
26 |
27 | ```
28 | %include https://raw.githubusercontent.com/comet-ml/comet-examples/refs/heads/master/panels/OptimizerAnalysis/OptimizerAnalysis.py
29 | ```
30 |
31 | Or, you can simply [copy the code](https://raw.githubusercontent.com/comet-ml/comet-examples/refs/heads/master/panels/OptimizerAnalysis/OptimizerAnalysis.py) into a custom Python Panel.
32 |
33 | #### Resources
34 |
35 | * Example Comet Project: [www.comet.com/examples/comet-example-optimizer](https://www.comet.com/examples/comet-example-optimizer/view/SA4f2JEsWKDzMaMLbW1yUYlc1/panels)
36 | * [Optimizer Quickstart](https://www.comet.com/docs/v2/guides/optimizer/quickstart/)
37 | * [Running Optimizer in Parallel](https://www.comet.com/docs/v2/guides/optimizer/run-in-parallel/#how-to-parallelize-comet-optimizer)
38 | * [Command-line](https://www.comet.com/docs/v2/api-and-sdk/command-line/reference/#comet-optimize)
39 | * [Using 3rd-Party Optimizers](https://www.comet.com/docs/v2/guides/optimizer/third-party-optimizers/)
40 |
--------------------------------------------------------------------------------
/panels/OptimizerAnalysis/optimizer-analysis.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-ml/comet-examples/6d5d899d5472a3f252f9393785ba1ae343130422/panels/OptimizerAnalysis/optimizer-analysis.png
--------------------------------------------------------------------------------
/panels/SaveModelAsArtifact/README.md:
--------------------------------------------------------------------------------
1 | ### SaveModelAsArtifact
2 |
3 | This panel allows you to save a model as an artifact. Adding
4 | metadata to the model when you log it allows examination,
5 | and saving, by epoch. You can either create a new Artifact,
6 | or use an existing artifact name.
7 |
8 |
9 |
10 |
11 |
13 |
14 | |
15 |
16 |
17 |
18 |
19 | #### Example
20 |
21 | This demo creates fake model checkpoints with fake metrics.
22 |
23 | Note the metadata on the model. That will be used in the custom panel.
24 |
25 | ```python
26 | import comet_ml
27 | import random
28 |
29 | comet_ml.login()
30 |
31 | experiment = comet_ml.start(
32 | project_name="model-to-artifact",
33 | )
34 |
35 | EPOCHS = 200
36 | MODEL_FILE = 'model-a.pkl'
37 |
38 | # Create a dummy checkpoint file:
39 | with open(MODEL_FILE, "w") as fp:
40 | fp.write("This is the model checkpoint")
41 |
42 | last_saved = 0
43 | for i in range(EPOCHS):
44 | experiment.log_metric('metric1', i*2 + random.randint(1, EPOCHS), epoch=i)
45 | experiment.log_metric('metric2', 5000-(i*2 + random.randint(1, EPOCHS)), epoch=i)
46 | if i % 30 == 0:
47 | last_saved = i
48 | experiment.log_model(
49 | name=f'model_chk_{i}',
50 | file_or_folder=MODEL_FILE,
51 | metadata={'epoch': i}
52 | )
53 | else:
54 | if i != last_saved:
55 | # Always log model for the last epoch
56 | experiment.log_model(
57 | name=f'model_chk_{i}',
58 | file_or_folder=MODEL_FILE,
59 | metadata={'epoch': i}
60 | )
61 |
62 | experiment.end()
63 | ```
64 |
65 | #### Python Panel
66 |
67 | To include this panel from the github repo, use this code in a Custom Python Panel:
68 |
69 | ```
70 | %include https://raw.githubusercontent.com/comet-ml/comet-examples/refs/heads/master/panels/SaveModelAsArtifact/SaveModelAsArtifact.py
71 | ```
72 |
73 | Or, you can simply [copy the code](https://raw.githubusercontent.com/comet-ml/comet-examples/refs/heads/master/panels/SaveModelAsArtifact/SaveModelAsArtifact.py) into a custom Python Panel.
74 |
75 | #### Resources
76 |
77 | * Example Comet Project: [www.comet.com/examples/comet-example-save-model-as-artifact](https://www.comet.com/examples/comet-example-save-model-as-artifact/01209266b842498595b71824be5d4ba2)
78 | * Documentation:
79 | * [Logging a model](https://www.comet.com/docs/v2/guides/experiment-management/log-data/models/)
80 | * [Artifacts](https://www.comet.com/docs/v2/guides/artifacts/using-artifacts/)
81 |
--------------------------------------------------------------------------------
/panels/SaveModelAsArtifact/save-model-as-artifact.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-ml/comet-examples/6d5d899d5472a3f252f9393785ba1ae343130422/panels/SaveModelAsArtifact/save-model-as-artifact.png
--------------------------------------------------------------------------------
/panels/SmokeTest.py:
--------------------------------------------------------------------------------
1 | %pip install aitk
2 |
3 | import os
4 | from comet_ml import API
5 |
6 | st.markdown("## Smoke Tests")
7 |
8 | print("You are running compute engine %s" % os.environ["ENGINE_VERSION"])
9 |
10 |
11 | st.markdown("### 1. Using `%pip magic` to install additional packages?")
12 | print(":white_check_mark: Pass! `%pip` works")
13 |
14 | st.markdown("### 2. Import additional packages?")
15 |
16 | try:
17 | import aitk
18 | print(":white_check_mark: Pass! Can `import` %pip-installed packages")
19 | except Exception:
20 | print(":x: Failed! Not a current compute-engine image.")
21 |
22 | st.markdown("### 3. Import pre-installed packages?")
23 | try:
24 | import st_audio_spectrogram
25 | print(":white_check_mark: Pass!")
26 | except ImportError:
27 | print(":x: Failed! Pre-installed packages not found.")
28 |
29 |
30 | print("### 4. Test number of experiments in this project?")
31 | api = API()
32 | count = api.get_panel_experiments()
33 | print(f"There should be {len(count)} experiments selected in this project. Is this correct?")
34 |
35 | print("### 5. Test parallel imports")
36 |
37 | print("Add two copies of this Smoke Test panel to this view," +
38 | " and save the view.")
39 | print("Press the **Restart Session** button below and refresh your browser.")
40 | print("The two panels should load, first one, then the other.")
41 |
42 | if st.button("Restart Session"):
43 | os.system("pkill -9 python")
44 |
--------------------------------------------------------------------------------
/panels/SmokeTest/SmokeTest.py:
--------------------------------------------------------------------------------
1 | %pip install aitk
2 |
3 | import os
4 | from comet_ml import API
5 |
6 | st.markdown("## Smoke Tests")
7 |
8 | print("You are running compute engine %s" % os.environ["ENGINE_VERSION"])
9 |
10 |
11 | st.markdown("### 1. Using `%pip magic` to install additional packages?")
12 | print(":white_check_mark: Pass! `%pip` works")
13 |
14 | st.markdown("### 2. Import additional packages?")
15 |
16 | try:
17 | import aitk
18 | print(":white_check_mark: Pass! Can `import` %pip-installed packages")
19 | except Exception:
20 | print(":x: Failed! Not a current compute-engine image.")
21 |
22 | st.markdown("### 3. Import pre-installed packages?")
23 | try:
24 | import st_audio_spectrogram
25 | print(":white_check_mark: Pass!")
26 | except ImportError:
27 | print(":x: Failed! Pre-installed packages not found.")
28 |
29 |
30 | print("### 4. Test number of experiments in this project?")
31 | api = API()
32 | count = api.get_panel_experiments()
33 | print(f"There should be {len(count)} experiments selected in this project. Is this correct?")
34 |
35 | print("### 5. Test parallel imports")
36 |
37 | print("Add two copies of this Smoke Test panel to this view," +
38 | " and save the view.")
39 | print("Press the **Restart Session** button below and refresh your browser.")
40 | print("The two panels should load, first one, then the other.")
41 |
42 | if st.button("Restart Session"):
43 | os.system("pkill -9 python")
44 |
--------------------------------------------------------------------------------
/panels/TensorboardGroupViewer.py:
--------------------------------------------------------------------------------
1 | # Comet Python Panel for visualizing Tensorboard Data by Group
2 | # >>> experiment.log_other("Group", "GROUP-NAME")
3 | # >>> experiment.log_tensorflow_folder("./logs")
4 | # In the UI, group on "Group"
5 |
6 | # NOTE: there is only one Tensorboard Server for your
7 | # Python Panels; logs are shared across them
8 |
9 | from comet_ml import API
10 | import streamlit as st
11 | import streamlit.components.v1 as components
12 |
13 | import os
14 | import subprocess
15 | import psutil
16 | import time
17 | import zipfile
18 | import random
19 | import glob
20 | import shutil
21 |
22 | st.set_page_config(layout="wide")
23 |
24 | from streamlit_js_eval import get_page_location
25 |
26 | os.makedirs("./tb_cache", exist_ok=True)
27 | os.makedirs("./logs", exist_ok=True)
28 |
29 | DEBUG = False
30 |
31 | # Clear cache and downloads
32 | if DEBUG:
33 | if os.path.exists("./tb_cache"):
34 | shutil.rmtree("./tb_cache")
35 | if os.path.exists("./logs"):
36 | shutil.rmtree("./logs")
37 |
38 | api = API()
39 | experiments = api.get_panel_experiments()
40 |
41 | needs_refresh = False
42 | page_location = get_page_location()
43 | if page_location is not None:
44 | if True:
45 | column = st.columns([.7, .3])
46 | clear = column[1].checkbox("Clear previous logs", value=True)
47 | if column[0].button("Copy Selected Experiment Logs to Tensorboard Server", type="primary"):
48 | needs_refresh = True
49 | if clear and os.path.exists("./logs"):
50 | for filename in glob.glob("./logs/*"):
51 | shutil.move(filename, "./tb_cache/")
52 | bar = st.progress(0, "Downloading log files...")
53 | for i, experiment in enumerate(experiments):
54 | bar.progress(i/len(experiments), "Downloading log files...")
55 | if not os.path.exists("./logs/%s" % experiment.name):
56 | if os.path.exists("./tb_cache/%s" % experiment.name):
57 | if DEBUG: print("found in cache!")
58 | shutil.move(
59 | "./tb_cache/%s" % experiment.name,
60 | "./logs/%s" % experiment.name,
61 | )
62 | else:
63 | if DEBUG: print("downloading...")
64 | assets = experiment.get_asset_list("tensorflow-file")
65 | if assets:
66 | if DEBUG: print(assets[0]["fileName"])
67 | if assets[0]["fileName"].startswith("logs/"):
68 | experiment.download_tensorflow_folder("./")
69 | else:
70 | experiment.download_tensorflow_folder("./logs/")
71 | bar.empty()
72 |
73 | running = False
74 | for process in psutil.process_iter():
75 | try:
76 | if "tensorboard" in process.exe():
77 | running = True
78 | except:
79 | pass
80 | if not running:
81 | command = f"/home/stuser/.local/bin/tensorboard --logdir ./logs --port 6007".split()
82 | env = {} # {"PYTHONPATH": "/home/st_user/.local/lib/python3.9/site-packages"}
83 | process = subprocess.Popen(command, preexec_fn=os.setsid, env=env)
84 | needs_refresh = True
85 |
86 | if needs_refresh:
87 | # Allow to start/update
88 | seconds = 5
89 | bar = st.progress(0, "Updating Tensorboard...")
90 | for i in range(seconds):
91 | bar.progress(((i + 1) / seconds), "Updating Tensorboard...")
92 | time.sleep(1)
93 | bar.empty()
94 |
95 | path, _ = page_location["pathname"].split("/component")
96 | url = page_location["origin"] + path + f"/port/6007/server?x={random.random()}"
97 | st.markdown('⛶ Open in tab' % url, unsafe_allow_html=True)
98 | components.iframe(src=url, height=700)
99 |
--------------------------------------------------------------------------------
/panels/TensorboardGroupViewer/README.md:
--------------------------------------------------------------------------------
1 | ### TensorboardGroupViewer
2 |
3 | The `TensorboardGroupViewer` panel is used to visualize
4 | Tensorboard-logged items inside a Comet Custom Panel, by grouping. This
5 | panel specifically is used to see a group of experiments' log folders.
6 |
7 |
8 |
9 |
10 |
12 |
13 | |
14 |
15 |
16 |
17 | First, run your experiment, including writing and logging the
18 | Tensorboard log folder:
19 |
20 | ```python
21 | # Set up your experiment
22 | writer = tf.summary.create_file_writer("./logs/%s" % experiment.name)
23 | # Log items, including profile, to writer
24 | # Then, log the folder:
25 | experiment.log_tensorflow_folder("./logs")
26 | ```
27 |
28 | Next, in the Comet UI you use the the "Group experiments" option on
29 | the left-hand side of the project view. Select the group you'd like to
30 | see the profiles. Finally click on "Copy Selected Experiment Logs to
31 | Tensorboard Server" in this panel.
32 |
33 | #### Example
34 |
35 | This example logs some dummy data to Tensorflow, and
36 | then logs the Tensorflow folder to Comet.
37 |
38 | ```python
39 | import random
40 | import os
41 | import shutil
42 | from comet_ml import Experiment
43 | import tensorflow as tf
44 |
45 | for e in range(12):
46 | experiment = Experiment(
47 | project_name="tensorboard-group"
48 | )
49 | if os.path.exists("./logs"):
50 | shutil.rmtree("./logs")
51 | writer = tf.summary.create_file_writer("./logs/%s" % experiment.name)
52 | with writer.as_default():
53 | current_loss = random.random()
54 | current_accuracy = random.random()
55 | for i in range(100):
56 | tf.summary.scalar("loss", current_loss, step=i)
57 | tf.summary.scalar("accuracy", current_accuracy, step=i)
58 | current_loss += random.random() * random.choice([1, -1])
59 | current_accuracy += random.random() * random.choice([1, -1])
60 | experiment.log_other("Group", "group-%s" % ((e % 3) + 1))
61 | experiment.log_tensorflow_folder("./logs")
62 | experiment.end()
63 | ```
64 |
65 | #### Python Panel
66 |
67 | To include this panel from the github repo, use this code in a Custom Python Panel:
68 |
69 | ```
70 | %include https://raw.githubusercontent.com/comet-ml/comet-examples/refs/heads/master/panels/TensorboardGroupViewer/TensorboardGroupViewer.py
71 | ```
72 |
73 | Or, you can simply [copy the code](https://raw.githubusercontent.com/comet-ml/comet-examples/refs/heads/master/panels/TensorboardGroupViewer/TensorboardGroupViewer.py) into a custom Python Panel.
74 |
75 | #### How it works
76 |
77 | The Python panel will start a Tensorboard server and make available
78 | the logs from those experiments that are in the group.
79 |
80 | #### Resources
81 |
82 | * Example Comet Project: [www.comet.com/dsblank/tensorboard-group](https://www.comet.com/dsblank/tensorboard-group/view/0xR3Fm81cXMPlXp7pN63jeVgS/panels)
83 | * Documentation:
84 | * [Logging tensorflow folders](https://www.comet.com/docs/v2/api-and-sdk/python-sdk/reference/Experiment/#comet_ml.Experiment.log_tensorflow_folder)
85 | * [Automatic Tensorboard logging](https://www.comet.com/docs/v2/integrations/third-party-tools/tensorboard/#configure-comet-for-tensorboard)
86 | * [Download tensorboard folders](https://www.comet.com/docs/v2/api-and-sdk/python-sdk/reference/APIExperiment/#comet_ml.APIExperiment.download_tensorflow_folder)
87 |
--------------------------------------------------------------------------------
/panels/TensorboardGroupViewer/TensorboardGroupViewer.py:
--------------------------------------------------------------------------------
1 | # Comet Python Panel for visualizing Tensorboard Data by Group
2 | # >>> experiment.log_other("Group", "GROUP-NAME")
3 | # >>> experiment.log_tensorflow_folder("./logs")
4 | # In the UI, group on "Group"
5 |
6 | # NOTE: there is only one Tensorboard Server for your
7 | # Python Panels; logs are shared across them
8 |
9 | from comet_ml import API
10 | import streamlit as st
11 | import streamlit.components.v1 as components
12 |
13 | import os
14 | import subprocess
15 | import psutil
16 | import time
17 | import zipfile
18 | import random
19 | import glob
20 | import shutil
21 |
22 | st.set_page_config(layout="wide")
23 |
24 | from streamlit_js_eval import get_page_location
25 |
26 | os.makedirs("./tb_cache", exist_ok=True)
27 | os.makedirs("./logs", exist_ok=True)
28 |
29 | DEBUG = False
30 |
31 | # Clear cache and downloads
32 | if DEBUG:
33 | if os.path.exists("./tb_cache"):
34 | shutil.rmtree("./tb_cache")
35 | if os.path.exists("./logs"):
36 | shutil.rmtree("./logs")
37 |
38 | api = API()
39 | experiments = api.get_panel_experiments()
40 |
41 | needs_refresh = False
42 | page_location = get_page_location()
43 | if page_location is not None:
44 | if True:
45 | column = st.columns([.7, .3])
46 | clear = column[1].checkbox("Clear previous logs", value=True)
47 | if column[0].button("Copy Selected Experiment Logs to Tensorboard Server", type="primary"):
48 | needs_refresh = True
49 | if clear and os.path.exists("./logs"):
50 | for filename in glob.glob("./logs/*"):
51 | shutil.move(filename, "./tb_cache/")
52 | bar = st.progress(0, "Downloading log files...")
53 | for i, experiment in enumerate(experiments):
54 | bar.progress(i/len(experiments), "Downloading log files...")
55 | if not os.path.exists("./logs/%s" % experiment.name):
56 | if os.path.exists("./tb_cache/%s" % experiment.name):
57 | if DEBUG: print("found in cache!")
58 | shutil.move(
59 | "./tb_cache/%s" % experiment.name,
60 | "./logs/%s" % experiment.name,
61 | )
62 | else:
63 | if DEBUG: print("downloading...")
64 | assets = experiment.get_asset_list("tensorflow-file")
65 | if assets:
66 | if DEBUG: print(assets[0]["fileName"])
67 | if assets[0]["fileName"].startswith("logs/"):
68 | experiment.download_tensorflow_folder("./")
69 | else:
70 | experiment.download_tensorflow_folder("./logs/")
71 | bar.empty()
72 |
73 | running = False
74 | for process in psutil.process_iter():
75 | try:
76 | if "tensorboard" in process.exe():
77 | running = True
78 | except:
79 | pass
80 | if not running:
81 | command = f"/home/stuser/.local/bin/tensorboard --logdir ./logs --port 6007".split()
82 | env = {} # {"PYTHONPATH": "/home/st_user/.local/lib/python3.9/site-packages"}
83 | process = subprocess.Popen(command, preexec_fn=os.setsid, env=env)
84 | needs_refresh = True
85 |
86 | if needs_refresh:
87 | # Allow to start/update
88 | seconds = 5
89 | bar = st.progress(0, "Updating Tensorboard...")
90 | for i in range(seconds):
91 | bar.progress(((i + 1) / seconds), "Updating Tensorboard...")
92 | time.sleep(1)
93 | bar.empty()
94 |
95 | path, _ = page_location["pathname"].split("/component")
96 | url = page_location["origin"] + path + f"/port/6007/server?x={random.random()}"
97 | st.markdown('⛶ Open in tab' % url, unsafe_allow_html=True)
98 | components.iframe(src=url, height=700)
99 |
--------------------------------------------------------------------------------
/panels/TensorboardGroupViewer/tensorboard-group-viewer.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-ml/comet-examples/6d5d899d5472a3f252f9393785ba1ae343130422/panels/TensorboardGroupViewer/tensorboard-group-viewer.png
--------------------------------------------------------------------------------
/panels/TensorboardProfileViewer.py:
--------------------------------------------------------------------------------
1 | # Comet Python Panel for visualizing Tensorboard Profile (and other) Data
2 | # Log the tensorboard profile (and other data) with
3 | # >>> experiment.log_tensorflow_folder("./logs")
4 |
5 | # NOTE: there is only one Tensorboard Server for your
6 | # Python Panels; logs are shared across them
7 |
8 | from comet_ml import API
9 | import streamlit as st
10 | import streamlit.components.v1 as components
11 |
12 | import os
13 | import subprocess
14 | import psutil
15 | import time
16 | import zipfile
17 | import random
18 | import signal
19 |
20 | st.set_page_config(layout="wide")
21 |
22 | if "tensorboard_state" not in st.session_state:
23 | st.session_state["tensorboard_state"] = None
24 |
25 | from streamlit_js_eval import get_page_location
26 |
27 | api = API()
28 | experiments = api.get_panel_experiments()
29 |
30 | class EmptyExperiment:
31 | id = None
32 | name = ""
33 |
34 | experiments_with_log = [EmptyExperiment()]
35 | for experiment in experiments:
36 | asset_list = experiment.get_asset_list("tensorflow-file")
37 | if asset_list:
38 | experiments_with_log.append(experiment)
39 |
40 | if len(experiments_with_log) == 1:
41 | st.write("No experiments with log")
42 | st.stop()
43 | elif len(experiments_with_log) == 2:
44 | selected_experiment = experiments_with_log[1]
45 | else:
46 | selected_experiment = st.selectbox(
47 | "Select Experiment with log:",
48 | experiments_with_log,
49 | format_func=lambda aexp: aexp.name
50 | )
51 |
52 | if selected_experiment.id:
53 | page_location = get_page_location()
54 | if page_location is not None:
55 | if not os.path.exists("./%s" % selected_experiment.id):
56 | bar = st.progress(0, "Downloading log files...")
57 | selected_experiment.download_tensorflow_folder("./%s" % selected_experiment.id)
58 | bar.empty()
59 |
60 | selected_log = st.selectbox(
61 | "Select Profile to view:",
62 | [""] + sorted(os.listdir("./%s/logs/" % selected_experiment.id))
63 | )
64 | if selected_log:
65 | command = f"/home/stuser/.local/bin/tensorboard --logdir ./{selected_experiment.id}/logs/{selected_log} --port 6007".split()
66 | env = {} # {"PYTHONPATH": "/.local/lib/python3.9/site-packages"}
67 | if st.session_state["tensorboard_state"] != (selected_experiment.id, selected_log):
68 | #print("Killing the hard way...")
69 | for process in psutil.process_iter():
70 | try:
71 | if "tensorboard" in process.exe():
72 | os.killpg(os.getpgid(process.pid), signal.SIGTERM)
73 | os.killpg(os.getpgid(process.pid), signal.SIGKILL)
74 | except:
75 | print("Can't kill the server; continuing ...")
76 |
77 | process = subprocess.Popen(command, preexec_fn=os.setsid, env=env)
78 | st.session_state["tensorboard_state"] = (selected_experiment.id, selected_log)
79 |
80 | # Allow to start
81 | seconds = 5
82 | bar = st.progress(0, "Starting Tensorboard...")
83 | for i in range(seconds):
84 | bar.progress(((i + 1) / seconds), "Starting Tensorboard...")
85 | time.sleep(1)
86 | bar.empty()
87 |
88 | path, _ = page_location["pathname"].split("/component")
89 | url = page_location["origin"] + path + f"/port/6007/server?x={random.randint(1,1_000_000)}#profile"
90 | st.markdown('⛶ Open in tab' % url, unsafe_allow_html=True)
91 | components.iframe(src=url, height=700)
92 |
--------------------------------------------------------------------------------
/panels/TensorboardProfileViewer/README.md:
--------------------------------------------------------------------------------
1 | ### TensorboardProfileViewer
2 |
3 | The `TensorboardProfileViewer` panel is used to visualize Tensorboard
4 | Profile data logged data inside a Comet Custom Panel.
5 |
6 |
7 |
8 |
9 |
10 |
12 |
13 | |
14 |
15 |
16 |
17 | First, run your experiment, including writing and logging the
18 | Tensorboard logdir:
19 |
20 | ```python
21 | # Set up your experiment and callbacks:
22 | tboard_callback = tf.keras.callbacks.TensorBoard(
23 | log_dir=logs,
24 | histogram_freq=1,
25 | profile_batch='500,520'
26 | )
27 | model.fit(
28 | ds_train,
29 | epochs=2,
30 | validation_data=ds_test,
31 | callbacks = [tboard_callback]
32 | )
33 | # Then, log the folder:
34 | experiment.log_tensorflow_folder("./logs")
35 | ```
36 |
37 | Finally click on "Select Experiment with log:" in this panel.
38 |
39 | #### Example
40 |
41 | This example logs some dummy data to Tensorflow, and
42 | then logs the Tensorflow folder to Comet.
43 |
44 | ```python
45 | import comet_ml
46 | import tensorflow as tf
47 | import tensorflow_datasets as tfds
48 | from datetime import datetime
49 | from packaging import version
50 | import os
51 |
52 | comet_ml.login()
53 | tfds.disable_progress_bar()
54 |
55 | experiment = comet_ml.Experiment(project_name="tensorboard-profile")
56 | device_name = tf.test.gpu_device_name()
57 | print('Found GPU at: {}'.format(device_name))
58 |
59 | (ds_train, ds_test), ds_info = tfds.load(
60 | 'mnist',
61 | split=['train', 'test'],
62 | shuffle_files=True,
63 | as_supervised=True,
64 | with_info=True,
65 | )
66 |
67 | def normalize_img(image, label):
68 | """Normalizes images: `uint8` -> `float32`."""
69 | return tf.cast(image, tf.float32) / 255., label
70 |
71 | ds_train = ds_train.map(normalize_img)
72 | ds_train = ds_train.batch(128)
73 |
74 | ds_test = ds_test.map(normalize_img)
75 | ds_test = ds_test.batch(128)
76 |
77 | model = tf.keras.models.Sequential([
78 | tf.keras.layers.Flatten(input_shape=(28, 28, 1)),
79 | tf.keras.layers.Dense(128,activation='relu'),
80 | tf.keras.layers.Dense(10, activation='softmax')
81 | ])
82 | model.compile(
83 | loss='sparse_categorical_crossentropy',
84 | optimizer=tf.keras.optimizers.Adam(0.001),
85 | metrics=['accuracy']
86 | )
87 |
88 | # Create a TensorBoard callback
89 | logs = "logs/" + datetime.now().strftime("%Y%m%d-%H%M%S")
90 |
91 | tboard_callback = tf.keras.callbacks.TensorBoard(
92 | log_dir=logs,
93 | histogram_freq=1,
94 | profile_batch='500,520'
95 | )
96 | model.fit(
97 | ds_train,
98 | epochs=2,
99 | validation_data=ds_test,
100 | callbacks = [tboard_callback]
101 | )
102 |
103 | experiment.log_tensorflow_folder("./logs")
104 | experiment.end()
105 | ```
106 |
107 | #### Python Panel
108 |
109 | To include this panel from the github repo, use this code in a Custom Python Panel:
110 |
111 | ```
112 | %include https://raw.githubusercontent.com/comet-ml/comet-examples/refs/heads/master/panels/TensorboardProfileViewer/TensorboardProfileViewer.py
113 | ```
114 |
115 | Or, you can simply [copy the code](https://raw.githubusercontent.com/comet-ml/comet-examples/refs/heads/master/panels/TensorboardProfileViewer/TensorboardProfileViewer.py) into a custom Python Panel.
116 |
117 | #### How it works
118 |
119 | The Python panel will start a Tensorboard server and make available
120 | the logs from the experiment that is selected.
121 |
122 | #### Resources
123 |
124 | * Example Comet Project: [www.comet.com/dsblank/tensorboard-profile](https://www.comet.com/dsblank/tensorboard-profile/)
125 | * Documentation:
126 | * [Logging tensorflow folders](https://www.comet.com/docs/v2/api-and-sdk/python-sdk/reference/Experiment/#comet_ml.Experiment.log_tensorflow_folder)
127 | * [Automatic Tensorboard logging](https://www.comet.com/docs/v2/integrations/third-party-tools/tensorboard/#configure-comet-for-tensorboard)
128 | * [Download tensorboard folders](https://www.comet.com/docs/v2/api-and-sdk/python-sdk/reference/APIExperiment/#comet_ml.APIExperiment.download_tensorflow_folder)
129 |
--------------------------------------------------------------------------------
/panels/TensorboardProfileViewer/TensorboardProfileViewer.py:
--------------------------------------------------------------------------------
1 | # Comet Python Panel for visualizing Tensorboard Profile (and other) Data
2 | # Log the tensorboard profile (and other data) with
3 | # >>> experiment.log_tensorflow_folder("./logs")
4 |
5 | # NOTE: there is only one Tensorboard Server for your
6 | # Python Panels; logs are shared across them
7 |
8 | from comet_ml import API
9 | import streamlit as st
10 | import streamlit.components.v1 as components
11 |
12 | import os
13 | import subprocess
14 | import psutil
15 | import time
16 | import zipfile
17 | import random
18 | import signal
19 |
20 | if "tensorboard_state" not in st.session_state:
21 | st.session_state["tensorboard_state"] = None
22 |
23 | from streamlit_js_eval import get_page_location
24 |
25 | st.set_page_config(layout="wide")
26 |
27 | api = API()
28 | experiments = api.get_panel_experiments()
29 |
30 | class EmptyExperiment:
31 | id = None
32 | name = ""
33 |
34 | experiments_with_log = [EmptyExperiment()]
35 | for experiment in experiments:
36 | asset_list = experiment.get_asset_list("tensorflow-file")
37 | if asset_list:
38 | experiments_with_log.append(experiment)
39 |
40 | if len(experiments_with_log) == 1:
41 | st.write("No experiments with log")
42 | st.stop()
43 | elif len(experiments_with_log) == 2:
44 | selected_experiment = experiments_with_log[1]
45 | else:
46 | names = [exp.name for exp in experiments_with_log]
47 | selected_experiment_name = st.selectbox(
48 | "Select Experiment with log:",
49 | names,
50 | )
51 | selected_experiment = [exp for exp in experiments_with_log if exp.name == selected_experiment_name][0]
52 |
53 |
54 | if selected_experiment.id:
55 | page_location = get_page_location()
56 | if page_location is not None:
57 | if not os.path.exists("./%s" % selected_experiment.id):
58 | bar = st.progress(0, "Downloading log files...")
59 | selected_experiment.download_tensorflow_folder("./%s" % selected_experiment.id)
60 | bar.empty()
61 |
62 | selected_log = st.selectbox(
63 | "Select Profile to view:",
64 | [""] + sorted(os.listdir("./%s/logs/" % selected_experiment.id))
65 | )
66 | if selected_log:
67 | command = f"/home/stuser/.local/bin/tensorboard --logdir ./{selected_experiment.id}/logs/{selected_log} --port 6007".split()
68 | env = {} # {"PYTHONPATH": "/.local/lib/python3.9/site-packages"}
69 | if st.session_state["tensorboard_state"] != (selected_experiment.id, selected_log):
70 | #print("Killing the hard way...")
71 | for process in psutil.process_iter():
72 | try:
73 | if "tensorboard" in process.exe():
74 | os.killpg(os.getpgid(process.pid), signal.SIGTERM)
75 | os.killpg(os.getpgid(process.pid), signal.SIGKILL)
76 | except:
77 | print("Can't kill the server; continuing ...")
78 |
79 | process = subprocess.Popen(command, preexec_fn=os.setsid, env=env)
80 | st.session_state["tensorboard_state"] = (selected_experiment.id, selected_log)
81 |
82 | # Allow to start
83 | seconds = 5
84 | bar = st.progress(0, "Starting Tensorboard...")
85 | for i in range(seconds):
86 | bar.progress(((i + 1) / seconds), "Starting Tensorboard...")
87 | time.sleep(1)
88 | bar.empty()
89 |
90 | path, _ = page_location["pathname"].split("/component")
91 | url = page_location["origin"] + path + f"/port/6007/server?x={random.randint(1,1_000_000)}#profile"
92 | st.markdown('⛶ Open in tab' % url, unsafe_allow_html=True)
93 | components.iframe(src=url, height=700)
94 |
--------------------------------------------------------------------------------
/panels/TensorboardProfileViewer/tensorboard-profile-viewer.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-ml/comet-examples/6d5d899d5472a3f252f9393785ba1ae343130422/panels/TensorboardProfileViewer/tensorboard-profile-viewer.png
--------------------------------------------------------------------------------
/panels/TotalFidelityMetricPlot/README.md:
--------------------------------------------------------------------------------
1 | ### TotalFidelityMetricPlot
2 |
3 | The `TotalFidelityMetricPlot` panel is used to plot Total Fidelity Metrics --- metrics that are not sampled in any way.
4 |
5 | You can have your Comet Adminstrator turn on "Store metrics without sampling" in the `Admin Dashboard` => `Organization settings`.
6 |
7 |
8 |
9 |
10 |
12 |
13 | |
14 |
15 |
17 |
18 | |
19 |
20 |
21 |
22 | #### Sample Code
23 |
24 | Once the setting "Store metrics without sampling" is on, then you merely log metrics as usual:
25 |
26 | ```python
27 | ...
28 | experiment.log_metric("loss", 1.23, step=23)
29 | ...
30 | ```
31 |
32 | To retrieve Total Fidelity metrics, you use the method:
33 |
34 | ```python
35 | df = APIExperiment.get_metric_total_df("loss")
36 | ```
37 |
38 | The returned Pandas `DataFrame` contains the following columns:
39 |
40 | * value - the value of the metric
41 | * timestep - the time of the metric
42 | * step - the step that the metric was logged at
43 | * epoch - the epoch that the metric was logged at
44 | * datetime - the timestamp as a datetime
45 | * duration - the duration time between this row and the previous
46 |
47 | #### Python Panel
48 |
49 | To include this panel from the github repo, use this code in a Custom Python Panel:
50 |
51 | ```
52 | %include https://raw.githubusercontent.com/comet-ml/comet-examples/refs/heads/master/panels/TotalFidelityMetricPlot/TotalFidelityMetricPlot.py
53 | ```
54 |
55 | Or, you can simply [copy the code](https://raw.githubusercontent.com/comet-ml/comet-examples/refs/heads/master/panels/TotalFidelityMetricPlot/TotalFidelityMetricPlot.py) into a custom Python Panel.
56 |
57 | #### Resources
58 |
59 | * Example Comet Project: [www.comet.com/examples/comet-example-total-fidelity-metrics](https://www.comet.com/examples/comet-example-total-fidelity-metrics/view/PQkAHY0HubucyIAvFX9sKF9jI/panels)
60 | * Documentation:
61 | * [Logging metrics](https://www.comet.com/docs/v2/api-and-sdk/python-sdk/reference/Experiment/#comet_ml.Experiment.log_metric)
62 | * [Retrieving Total Fidelity Metrics](https://www.comet.com/docs/v2/api-and-sdk/python-sdk/reference/APIExperiment/#comet_ml.APIExperiment.get_metric_total_df)
63 |
--------------------------------------------------------------------------------
/panels/TotalFidelityMetricPlot/organization-settings.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-ml/comet-examples/6d5d899d5472a3f252f9393785ba1ae343130422/panels/TotalFidelityMetricPlot/organization-settings.png
--------------------------------------------------------------------------------
/panels/TotalFidelityMetricPlot/totalfidelity.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-ml/comet-examples/6d5d899d5472a3f252f9393785ba1ae343130422/panels/TotalFidelityMetricPlot/totalfidelity.png
--------------------------------------------------------------------------------
/panels/make.py:
--------------------------------------------------------------------------------
1 | import glob
2 |
3 | with open("README.md", "w") as fp:
4 | for readme_filename in sorted(glob.glob("*/README.md")):
5 | panel_name, _ = readme_filename.split("/")
6 | for line in open(readme_filename):
7 | if line.startswith("####"):
8 | fp.write(f'\nFor more information, see the panel README.md\n')
9 | break
10 | fp.write(line)
11 |
--------------------------------------------------------------------------------
/pytorch/online-pytorch-lightning-apex-example.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # Copyright (C) 2018-2020 Nvidia
3 | # Released under BSD-3 license https://github.com/NVIDIA/apex/blob/master/LICENSE
4 |
5 | from comet_ml import Experiment
6 |
7 | import torch
8 | from apex import amp
9 |
10 |
11 | def run():
12 | experiment = Experiment()
13 |
14 | torch.cuda.set_device("cuda:0")
15 |
16 | torch.backends.cudnn.benchmark = True
17 |
18 | N, D_in, D_out = 64, 1024, 16
19 |
20 | # Each process receives its own batch of "fake input data" and "fake target data."
21 | # The "training loop" in each process just uses this fake batch over and over.
22 | # https://github.com/NVIDIA/apex/tree/master/examples/imagenet provides a more
23 | # realistic example of distributed data sampling for both training and validation.
24 | x = torch.randn(N, D_in, device="cuda")
25 | y = torch.randn(N, D_out, device="cuda")
26 |
27 | model = torch.nn.Linear(D_in, D_out).cuda()
28 | optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
29 |
30 | model, optimizer = amp.initialize(model, optimizer, opt_level="O1")
31 |
32 | loss_fn = torch.nn.MSELoss()
33 |
34 | for t in range(5000):
35 | optimizer.zero_grad()
36 | y_pred = model(x)
37 | loss = loss_fn(y_pred, y)
38 | with amp.scale_loss(loss, optimizer) as scaled_loss:
39 | scaled_loss.backward()
40 | optimizer.step()
41 |
42 | print("final loss = ", loss)
43 | experiment.log_metric("final_loss", loss)
44 |
45 |
46 | if __name__ == "__main__":
47 | run()
48 |
--------------------------------------------------------------------------------
/readme.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | ## Comet for Machine Learning Experiment Management
4 | **Our Misson:** Comet is doing for ML what GitHub did for code. We allow data science teams to automagically track their datasets, code changes, experimentation history and production models creating efficiency, transparency, and reproducibility.
5 |
6 | We all strive to be data driven and yet every day valuable experiment results are lost and forgotten. Comet provides a dead simple way of fixing that. It works with any workflow, any ML task, any machine, and any piece of code.
7 |
8 | ## Examples Repository
9 |
10 | This repository contains examples of using Comet in many Machine Learning Python libraries, including fastai, torch, sklearn, chainer, caffe, keras, tensorflow, mxnet, Jupyter notebooks, and with just pre Python.
11 |
12 | If you don't see something you need, just let us know! See contact methods below.
13 |
14 | ## Documentation
15 | [](https://badge.fury.io/py/comet-ml)
16 |
17 | Full documentation and additional training examples are available on http://www.comet.com/docs/v2
18 |
19 | ## Installation
20 |
21 | - [Sign up for free!](https://www.comet.com/signup)
22 |
23 | - **Install Comet from PyPI:**
24 |
25 | ```sh
26 | pip install comet_ml
27 | ```
28 | Comet Python SDK is compatible with: __Python 3.5-3.13__.
29 |
30 | ## Tutorials + Examples
31 |
32 | - [fastai](https://github.com/comet-ml/comet-examples/tree/master/integrations/model-training/fastai/)
33 | - [keras](https://github.com/comet-ml/comet-examples/tree/master/keras)
34 | - [pytorch](https://github.com/comet-ml/comet-examples/tree/master/pytorch)
35 | - [scikit](https://github.com/comet-ml/comet-examples/tree/master/integrations/model-training/scikit-learn)
36 | - [tensorflow](https://github.com/comet-ml/comet-examples/tree/master/tensorflow)
37 |
38 | ## Support
39 | Have questions? We have answers -
40 | - Email us at
41 | - For the fastest response, ping us on [Slack](https://chat.comet.com/)
42 |
43 | **Want to request a feature?**
44 | We take feature requests through github at: https://github.com/comet-ml/issue-tracking
45 |
46 | ## Feature Spotlight
47 | Check out new product features and updates through our [Release Notes](https://www.comet.com/docs/v2/api-and-sdk/python-sdk/releases/). Also check out our [blog](https://www.comet.com/site/blog/).
48 |
--------------------------------------------------------------------------------
/resources/Loan_application_1.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-ml/comet-examples/6d5d899d5472a3f252f9393785ba1ae343130422/resources/Loan_application_1.pdf
--------------------------------------------------------------------------------
/resources/Loan_application_2.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-ml/comet-examples/6d5d899d5472a3f252f9393785ba1ae343130422/resources/Loan_application_2.pdf
--------------------------------------------------------------------------------
/resources/harvard.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-ml/comet-examples/6d5d899d5472a3f252f9393785ba1ae343130422/resources/harvard.wav
--------------------------------------------------------------------------------
/resources/harvard_noise.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-ml/comet-examples/6d5d899d5472a3f252f9393785ba1ae343130422/resources/harvard_noise.wav
--------------------------------------------------------------------------------
/resources/hello.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-ml/comet-examples/6d5d899d5472a3f252f9393785ba1ae343130422/resources/hello.wav
--------------------------------------------------------------------------------
/resources/readme.md:
--------------------------------------------------------------------------------
1 | Public resources for scripts and notebooks
2 |
--------------------------------------------------------------------------------
/resources/workers1.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-ml/comet-examples/6d5d899d5472a3f252f9393785ba1ae343130422/resources/workers1.jpeg
--------------------------------------------------------------------------------
/resources/workers2.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-ml/comet-examples/6d5d899d5472a3f252f9393785ba1ae343130422/resources/workers2.jpeg
--------------------------------------------------------------------------------
/resources/workers3.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-ml/comet-examples/6d5d899d5472a3f252f9393785ba1ae343130422/resources/workers3.jpeg
--------------------------------------------------------------------------------
/resources/workers4.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-ml/comet-examples/6d5d899d5472a3f252f9393785ba1ae343130422/resources/workers4.jpeg
--------------------------------------------------------------------------------
/resources/workers5.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-ml/comet-examples/6d5d899d5472a3f252f9393785ba1ae343130422/resources/workers5.jpeg
--------------------------------------------------------------------------------
/xgboost/requirements-py37.txt:
--------------------------------------------------------------------------------
1 | # python3.7
2 | sklearn
3 | xgboost
4 | pandas
5 |
--------------------------------------------------------------------------------