├── 2019 ├── 0-scripts │ └── refresh.ps1 ├── 1-vscode-django-docker │ ├── README.md │ ├── hello │ │ ├── __init__.py │ │ ├── admin.py │ │ ├── apps.py │ │ ├── forms.py │ │ ├── migrations │ │ │ ├── 0001_initial.py │ │ │ └── __init__.py │ │ ├── models.py │ │ ├── static │ │ │ └── hello │ │ │ │ └── site.css │ │ ├── templates │ │ │ └── hello │ │ │ │ ├── about.html │ │ │ │ ├── contact.html │ │ │ │ ├── hello_there.html │ │ │ │ ├── home.html │ │ │ │ ├── layout.html │ │ │ │ └── log_message.html │ │ ├── tests.py │ │ ├── urls.py │ │ └── views.py │ ├── manage.py │ ├── requirements.txt │ ├── uwsgi.ini │ └── web_project │ │ ├── __init__.py │ │ ├── settings.py │ │ ├── urls.py │ │ └── wsgi.py ├── 2a-vscode-flask-dev-container │ ├── README.md │ ├── app.py │ └── requirements.txt ├── 2b-vscode-django-postgres-dev-container │ └── README.md ├── 3-azure-cli-flask-registry-container-instances │ ├── README.md │ ├── dev.Dockerfile │ └── prod.Dockerfile ├── 4-azure-functions-python │ ├── LAB_SETUP.md │ ├── New-DevEnvironment.ps1 │ ├── README.md │ └── python_azure_func │ │ └── prime_calculator │ │ ├── .funcignore │ │ ├── .gitignore │ │ ├── .vscode │ │ └── extensions.json │ │ ├── host.json │ │ ├── is_prime │ │ ├── __init__.py │ │ ├── function.json │ │ └── sample.dat │ │ └── requirements.txt ├── 5-jupyter-azure-cognitive-services-face │ ├── .gitignore │ ├── .vscode │ │ └── settings.json │ ├── DetectingEmotionWithAzureCognitiveServices.ipynb │ ├── LAB_SETUP.md │ ├── README.md │ ├── key_handler │ │ └── __init__.py │ └── requirements.txt ├── 6-azureml-movie-recommendation │ ├── .library.json │ ├── LAB_SETUP.md │ ├── README.md │ ├── aml_data │ │ ├── movielens_100k_data.pkl │ │ └── movielens_1m_data.pkl │ ├── config.json │ ├── deploy_with_azureml.ipynb │ ├── existing-widget.ipynb │ ├── movielens-sar │ │ ├── .amlignore │ │ ├── .azureml │ │ │ ├── conda_dependencies.yml │ │ │ ├── config.json │ │ │ ├── docker.runconfig │ │ │ └── local.runconfig │ │ ├── aml_config │ │ │ ├── conda_dependencies.yml │ │ │ ├── docker.runconfig │ │ │ ├── local.runconfig │ │ │ └── project.json │ │ ├── reco_utils │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── azureml │ │ │ │ ├── __init__.py │ │ │ │ ├── svd_training.py │ │ │ │ └── wide_deep.py │ │ │ ├── common │ │ │ │ ├── __init__.py │ │ │ │ ├── constants.py │ │ │ │ ├── general_utils.py │ │ │ │ ├── gpu_utils.py │ │ │ │ ├── notebook_memory_management.py │ │ │ │ ├── notebook_utils.py │ │ │ │ ├── python_utils.py │ │ │ │ ├── spark_utils.py │ │ │ │ ├── tf_utils.py │ │ │ │ └── timer.py │ │ │ ├── dataset │ │ │ │ ├── __init__.py │ │ │ │ ├── cosmos_cli.py │ │ │ │ ├── criteo.py │ │ │ │ ├── download_utils.py │ │ │ │ ├── movielens.py │ │ │ │ ├── pandas_df_utils.py │ │ │ │ ├── python_splitters.py │ │ │ │ ├── spark_splitters.py │ │ │ │ ├── sparse.py │ │ │ │ └── split_utils.py │ │ │ ├── evaluation │ │ │ │ ├── __init__.py │ │ │ │ ├── parameter_sweep.py │ │ │ │ ├── python_evaluation.py │ │ │ │ └── spark_evaluation.py │ │ │ └── recommender │ │ │ │ ├── __init__.py │ │ │ │ └── sar │ │ │ │ ├── __init__.py │ │ │ │ └── sar_singlenode.py │ │ └── train.py │ ├── movielens_sar_model.pkl │ ├── myenv.yml │ ├── reco_utils │ │ ├── README.md │ │ ├── __init__.py │ │ ├── azureml │ │ │ ├── __init__.py │ │ │ ├── aks_utils.py │ │ │ ├── azureml_utils.py │ │ │ ├── svd_training.py │ │ │ └── wide_deep.py │ │ ├── common │ │ │ ├── __init__.py │ │ │ ├── constants.py │ │ │ ├── general_utils.py │ │ │ ├── gpu_utils.py │ │ │ ├── notebook_memory_management.py │ │ │ ├── notebook_utils.py │ │ │ ├── python_utils.py │ │ │ ├── spark_utils.py │ │ │ ├── tf_utils.py │ │ │ └── timer.py │ │ ├── dataset │ │ │ ├── __init__.py │ │ │ ├── cosmos_cli.py │ │ │ ├── criteo.py │ │ │ ├── download_utils.py │ │ │ ├── movielens.py │ │ │ ├── pandas_df_utils.py │ │ │ ├── python_splitters.py │ │ │ ├── spark_splitters.py │ │ │ ├── sparse.py │ │ │ └── split_utils.py │ │ ├── evaluation │ │ │ ├── __init__.py │ │ │ ├── parameter_sweep.py │ │ │ ├── python_evaluation.py │ │ │ └── spark_evaluation.py │ │ └── recommender │ │ │ ├── __init__.py │ │ │ └── sar │ │ │ ├── __init__.py │ │ │ └── sar_singlenode.py │ ├── sar_movielens_with_azureml.ipynb │ ├── score.py │ └── widget.ipynb ├── 7-azure-pipelines-ci │ ├── README.md │ └── images │ │ ├── 1-fork.png │ │ ├── 1-forking.png │ │ ├── 1-selectaccount.png │ │ ├── 1-signin.png │ │ ├── 2-completeorder.png │ │ ├── 2-confirm.png │ │ ├── 2-installfree.png │ │ ├── 2-marketplace.png │ │ ├── 2-search.png │ │ ├── 2-select.png │ │ ├── 2-setup.png │ │ ├── 3-authorize.png │ │ ├── 3-newaccount.png │ │ ├── 4-edityaml.png │ │ ├── 4-python.png │ │ ├── 4-saveandrun.png │ │ ├── 4-selectrepo.png │ │ ├── 5-paralleljobs.png │ │ ├── 5-success.png │ │ ├── 5-tests.png │ │ ├── 6-builddone.png │ │ ├── 6-buildqueued.png │ │ ├── 6-createpr.png │ │ ├── 6-editreadme.png │ │ ├── 6-gotogithub.png │ │ ├── 6-proposechange.png │ │ └── 6-readme.png ├── 8-azure-service-bus-messaging │ ├── README.md │ ├── app.py │ ├── requirements.txt │ └── setup.sh ├── README.md ├── REQUIREMENTS.md └── requirements.txt ├── 2020 ├── 9-vscodespaces │ ├── README.md │ └── RunFileButton.png └── README.md ├── 2021 ├── 9-azure-cognitive-services │ └── README.md ├── 9-azure-machine-learning │ ├── .cloud │ │ └── deploy.json │ ├── .gitignore │ ├── README.md │ ├── config.json │ ├── images │ │ ├── exp_log_track.png │ │ ├── ml_pane.png │ │ ├── studio_metrics_track.png │ │ └── ws_resources.png │ ├── run_experiment.py │ ├── train.py │ └── utils.py ├── 9-azure-web-apps │ ├── .gitignore │ ├── README.md │ ├── appservicenumberfacts │ │ ├── __init__.py │ │ ├── asgi.py │ │ ├── settings.py │ │ ├── urls.py │ │ └── wsgi.py │ ├── manage.py │ ├── randnum │ │ ├── __init__.py │ │ ├── admin.py │ │ ├── apps.py │ │ ├── migrations │ │ │ └── __init__.py │ │ ├── models.py │ │ ├── static │ │ │ └── css │ │ │ │ └── index.css │ │ ├── templates │ │ │ └── randnum │ │ │ │ └── index.html │ │ ├── tests.py │ │ ├── urls.py │ │ └── views.py │ └── requirements.txt ├── 9-vscode-django-postgres-dev-container │ └── README.md ├── 9-windows-subsystem-for-linux │ └── README.md └── README.md ├── 2022 ├── containerapps-github-python │ └── README.md ├── containerapps-python-fastapi │ └── README.md └── python-vscode-dev │ └── README.md ├── .github ├── ISSUE_TEMPLATE.md └── PULL_REQUEST_TEMPLATE.md ├── .gitignore ├── 01-azure-functions-python-vscode ├── .funcignore ├── .gitignore ├── README.md ├── host.json ├── proxies.json ├── requirements.txt └── sentiment │ ├── __init__.py │ ├── function.json │ └── sample.dat ├── 01-dapr ├── README.md ├── az-components │ ├── secretstore-local-env.yml │ ├── secretstore-local-file.yml │ └── state-azure-blobstorage.yml ├── az-secrets.json ├── help.txt ├── help_dapr_http_get_state.txt ├── main.py ├── my-components │ ├── localSecretStore.yaml │ ├── pubsub.yaml │ └── statestore.yaml ├── my-secrets.json └── requirements.txt ├── 01-msticpy └── README.md ├── 01-postgres-citus ├── README-ADVANCED.md └── README.md ├── 01-postgres ├── .gitignore ├── README.md ├── data.csv └── pg-lab.py ├── CONTRIBUTING.md ├── LICENSE.md └── README.md /.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 4 | > Please provide us with the following information: 5 | > --------------------------------------------------------------- 6 | 7 | ### This issue is for a: (mark with an `x`) 8 | ``` 9 | - [ ] bug report -> please search issues before submitting 10 | - [ ] feature request 11 | - [ ] documentation issue or request 12 | - [ ] regression (a behavior that used to work and stopped in a new release) 13 | ``` 14 | 15 | ### Minimal steps to reproduce 16 | > 17 | 18 | ### Any log messages given by the failure 19 | > 20 | 21 | ### Expected/desired behavior 22 | > 23 | 24 | ### OS and Version? 25 | > Windows 7, 8 or 10. Linux (which distribution). macOS (Yosemite? El Capitan? Sierra?) 26 | 27 | ### Versions 28 | > 29 | 30 | ### Mention any other details that might be useful 31 | 32 | > --------------------------------------------------------------- 33 | > Thanks! We'll be in touch soon. 34 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## Purpose 2 | 3 | * ... 4 | 5 | ## Does this introduce a breaking change? 6 | 7 | ``` 8 | [ ] Yes 9 | [ ] No 10 | ``` 11 | 12 | ## Pull Request Type 13 | What kind of change does this Pull Request introduce? 14 | 15 | 16 | ``` 17 | [ ] Bugfix 18 | [ ] Feature 19 | [ ] Code style update (formatting, local variables) 20 | [ ] Refactoring (no functional changes, no api changes) 21 | [ ] Documentation content changes 22 | [ ] Other... Please describe: 23 | ``` 24 | 25 | ## How to Test 26 | * Get the code 27 | 28 | ``` 29 | git clone [repo-address] 30 | cd [repo-name] 31 | git checkout [branch-name] 32 | npm install 33 | ``` 34 | 35 | * Test the code 36 | 37 | ``` 38 | ``` 39 | 40 | ## What to Check 41 | Verify that the following are valid 42 | * ... 43 | 44 | ## Other Information 45 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Python .gitignore from https://github.com/github/gitignore/blob/master/Python.gitignore 2 | 3 | # Byte-compiled / optimized / DLL files 4 | __pycache__/ 5 | *.pyc 6 | 7 | # Cake - Uncomment if you are using it 8 | # tools/** 9 | # !tools/packages.config 10 | 11 | # Tabs Studio 12 | *.tss 13 | 14 | # Telerik's JustMock configuration file 15 | *.jmconfig 16 | 17 | # BizTalk build output 18 | *.btp.cs 19 | *.btm.cs 20 | *.odx.cs 21 | *.xsd.cs 22 | 23 | # OpenCover UI analysis results 24 | OpenCover/ 25 | 26 | # Azure Stream Analytics local run output 27 | ASALocalRun/ 28 | 29 | # MSBuild Binary and Structured Log 30 | *.binlog 31 | 32 | # NVidia Nsight GPU debugger configuration file 33 | *.nvuser 34 | 35 | # MFractors (Xamarin productivity tool) working folder 36 | .mfractor/ 37 | 38 | # Added by Theckert - avoids having hundreds of python files in the virtual environments getting checked in. 39 | .env/ 40 | 41 | 42 | *.py[cod] 43 | *$py.class 44 | 45 | # C extensions 46 | *.so 47 | 48 | # Distribution / packaging 49 | .Python 50 | build/ 51 | develop-eggs/ 52 | dist/ 53 | downloads/ 54 | eggs/ 55 | .eggs/ 56 | lib/ 57 | lib64/ 58 | parts/ 59 | sdist/ 60 | var/ 61 | wheels/ 62 | pip-wheel-metadata/ 63 | share/python-wheels/ 64 | *.egg-info/ 65 | .installed.cfg 66 | *.egg 67 | MANIFEST 68 | 69 | # PyInstaller 70 | # Usually these files are written by a python script from a template 71 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 72 | *.manifest 73 | *.spec 74 | 75 | # Installer logs 76 | pip-log.txt 77 | pip-delete-this-directory.txt 78 | 79 | # Unit test / coverage reports 80 | htmlcov/ 81 | .tox/ 82 | .nox/ 83 | .coverage 84 | .coverage.* 85 | .cache 86 | nosetests.xml 87 | coverage.xml 88 | *.cover 89 | .hypothesis/ 90 | .pytest_cache/ 91 | 92 | # Translations 93 | *.mo 94 | *.pot 95 | 96 | # Django stuff: 97 | *.log 98 | local_settings.py 99 | db.sqlite3 100 | 101 | # Flask stuff: 102 | instance/ 103 | .webassets-cache 104 | 105 | # Scrapy stuff: 106 | .scrapy 107 | 108 | # Sphinx documentation 109 | docs/_build/ 110 | 111 | # PyBuilder 112 | target/ 113 | 114 | # Jupyter Notebook 115 | .ipynb_checkpoints 116 | 117 | # IPython 118 | profile_default/ 119 | ipython_config.py 120 | 121 | # pyenv 122 | .python-version 123 | 124 | # pipenv 125 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 126 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 127 | # having no cross-platform support, pipenv may install dependencies that don’t work, or not 128 | # install all needed dependencies. 129 | #Pipfile.lock 130 | 131 | # celery beat schedule file 132 | celerybeat-schedule 133 | 134 | # SageMath parsed files 135 | *.sage.py 136 | 137 | # Environments 138 | .env 139 | .venv 140 | .winenv 141 | env/ 142 | venv/ 143 | ENV/ 144 | env.bak/ 145 | venv.bak/ 146 | 147 | # Spyder project settings 148 | .spyderproject 149 | .spyproject 150 | 151 | # Rope project settings 152 | .ropeproject 153 | 154 | # mkdocs documentation 155 | /site 156 | 157 | # mypy 158 | .mypy_cache/ 159 | .dmypy.json 160 | dmypy.json 161 | 162 | # Pyre type checker 163 | .pyre/ 164 | © 2019 GitHub, Inc. 165 | Terms 166 | Privacy 167 | Security 168 | Status 169 | Help 170 | Contact GitHub 171 | Pricing 172 | API 173 | Training 174 | Blog 175 | About 176 | 177 | # VSCode 178 | .vscode 179 | -------------------------------------------------------------------------------- /01-azure-functions-python-vscode/.funcignore: -------------------------------------------------------------------------------- 1 | .git* 2 | .vscode 3 | local.settings.json 4 | test 5 | .venv -------------------------------------------------------------------------------- /01-azure-functions-python-vscode/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | 53 | # Translations 54 | *.mo 55 | *.pot 56 | 57 | # Django stuff: 58 | *.log 59 | local_settings.py 60 | db.sqlite3 61 | 62 | # Flask stuff: 63 | instance/ 64 | .webassets-cache 65 | 66 | # Scrapy stuff: 67 | .scrapy 68 | 69 | # Sphinx documentation 70 | docs/_build/ 71 | 72 | # PyBuilder 73 | target/ 74 | 75 | # Jupyter Notebook 76 | .ipynb_checkpoints 77 | 78 | # IPython 79 | profile_default/ 80 | ipython_config.py 81 | 82 | # pyenv 83 | .python-version 84 | 85 | # pipenv 86 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 87 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 88 | # having no cross-platform support, pipenv may install dependencies that don’t work, or not 89 | # install all needed dependencies. 90 | #Pipfile.lock 91 | 92 | # celery beat schedule file 93 | celerybeat-schedule 94 | 95 | # SageMath parsed files 96 | *.sage.py 97 | 98 | # Environments 99 | .env 100 | .venv 101 | env/ 102 | venv/ 103 | ENV/ 104 | env.bak/ 105 | venv.bak/ 106 | 107 | # Spyder project settings 108 | .spyderproject 109 | .spyproject 110 | 111 | # Rope project settings 112 | .ropeproject 113 | 114 | # mkdocs documentation 115 | /site 116 | 117 | # mypy 118 | .mypy_cache/ 119 | .dmypy.json 120 | dmypy.json 121 | 122 | # Pyre type checker 123 | .pyre/ 124 | 125 | # Azure Functions artifacts 126 | bin 127 | obj 128 | appsettings.json 129 | local.settings.json 130 | .python_packages -------------------------------------------------------------------------------- /01-azure-functions-python-vscode/host.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "2.0", 3 | "extensionBundle": { 4 | "id": "Microsoft.Azure.Functions.ExtensionBundle", 5 | "version": "[2.*, 3.0.0)" 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /01-azure-functions-python-vscode/proxies.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "http://json.schemastore.org/proxies", 3 | "proxies": {} 4 | } 5 | -------------------------------------------------------------------------------- /01-azure-functions-python-vscode/requirements.txt: -------------------------------------------------------------------------------- 1 | azure-functions 2 | vaderSentiment -------------------------------------------------------------------------------- /01-azure-functions-python-vscode/sentiment/__init__.py: -------------------------------------------------------------------------------- 1 | import azure.functions as func 2 | from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer 3 | 4 | 5 | def main(req: func.HttpRequest) -> func.HttpResponse: 6 | analyzer = SentimentIntensityAnalyzer() 7 | text = req.params.get("text") 8 | if text is None: 9 | return func.HttpResponse(status_code=302, headers={"Location":req.url+"?text=I+Love+PyCon"}) 10 | scores = analyzer.polarity_scores(text) 11 | sentiment = "positive" if scores["compound"] > 0 else "negative" 12 | return func.HttpResponse(sentiment) 13 | 14 | -------------------------------------------------------------------------------- /01-azure-functions-python-vscode/sentiment/function.json: -------------------------------------------------------------------------------- 1 | { 2 | "scriptFile": "__init__.py", 3 | "bindings": [ 4 | { 5 | "authLevel": "anonymous", 6 | "type": "httpTrigger", 7 | "direction": "in", 8 | "name": "req", 9 | "methods": [ 10 | "get", 11 | "post" 12 | ] 13 | }, 14 | { 15 | "type": "http", 16 | "direction": "out", 17 | "name": "$return" 18 | } 19 | ] 20 | } 21 | -------------------------------------------------------------------------------- /01-azure-functions-python-vscode/sentiment/sample.dat: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Azure" 3 | } -------------------------------------------------------------------------------- /01-dapr/az-components/secretstore-local-env.yml: -------------------------------------------------------------------------------- 1 | apiVersion: dapr.io/v1alpha1 2 | kind: Component 3 | metadata: 4 | name: envvar-secret-store 5 | namespace: default 6 | spec: 7 | type: secretstores.local.env 8 | version: v1 9 | metadata: 10 | -------------------------------------------------------------------------------- /01-dapr/az-components/secretstore-local-file.yml: -------------------------------------------------------------------------------- 1 | apiVersion: dapr.io/v1alpha1 2 | kind: Component 3 | metadata: 4 | name: my-secret-store 5 | namespace: default 6 | spec: 7 | type: secretstores.local.file 8 | version: v1 9 | metadata: 10 | - name: secretsFile 11 | value: az-secrets.json 12 | - name: nestedSeparator 13 | value: ":" 14 | 15 | -------------------------------------------------------------------------------- /01-dapr/az-components/state-azure-blobstorage.yml: -------------------------------------------------------------------------------- 1 | apiVersion: dapr.io/v1alpha1 2 | kind: Component 3 | metadata: 4 | name: statestore 5 | spec: 6 | type: state.azure.blobstorage 7 | version: v1 8 | metadata: 9 | - name: accountName 10 | value: storage210200 11 | - name: accountKey 12 | secretKeyRef: 13 | name: azure-storage-account-key 14 | - name: containerName 15 | value: container1 16 | 17 | auth: 18 | secretStore: my-secret-store 19 | 20 | -------------------------------------------------------------------------------- /01-dapr/az-secrets.json: -------------------------------------------------------------------------------- 1 | { 2 | "azure-storage-account-key" : "" 3 | } 4 | -------------------------------------------------------------------------------- /01-dapr/help.txt: -------------------------------------------------------------------------------- 1 | dapr_http_get_state 2 | GET /v1.0/state/{store}/{name} 3 | 4 | dapr_http_post_state 5 | POST /v1.0/state/{store} 6 | 7 | dapr_http_get_secret 8 | GET /v1.0/secrets/{store}/{name} 9 | 10 | dapr_get_state 11 | DaprClient().get_state(store_name=store, key=name) 12 | 13 | dapr_save_state 14 | DaprClient().save_state(store_name=store, key=name, value=value) 15 | 16 | dapr_save_state_etag 17 | DaprClient().save_state(store_name=store, key=name, value=value, etag=etag) 18 | 19 | dapr_get_secret 20 | DaprClient().get_secret(store_name=store, key=name) 21 | -------------------------------------------------------------------------------- /01-dapr/help_dapr_http_get_state.txt: -------------------------------------------------------------------------------- 1 | NAME 2 | main.py dapr_http_get_state - GET /v1.0/state/{store}/{name} 3 | 4 | SYNOPSIS 5 | main.py dapr_http_get_state 6 | 7 | DESCRIPTION 8 | GET /v1.0/state/{store}/{name} 9 | 10 | FLAGS 11 | --store=STORE 12 | Default: 'statestore' 13 | --name=NAME 14 | Default: 'name' 15 | -------------------------------------------------------------------------------- /01-dapr/main.py: -------------------------------------------------------------------------------- 1 | import fire 2 | import requests 3 | 4 | import os 5 | import grpc 6 | import dapr.clients 7 | 8 | # http examples 9 | def dapr_http_get_state(store="statestore", name="name"): 10 | """GET /v1.0/state/{store}/{name}""" 11 | port = os.getenv("DAPR_HTTP_PORT") or "3500" 12 | r = requests.get(f"http://localhost:{port}/v1.0/state/{store}/{name}") 13 | print(r.json()) 14 | 15 | def dapr_http_post_state(store="statestore", name="name", value="Bruce Wayne"): 16 | """POST /v1.0/state/{store}""" 17 | port = os.getenv("DAPR_HTTP_PORT") or "3500" 18 | url1 = f"http://localhost:{port}/v1.0/state/{store}" 19 | dict1 = [{"key":name, "value": value}] 20 | r = requests.post(url1, json=dict1) 21 | print(r.status_code) 22 | 23 | def dapr_http_get_secret(store="my-secret-store", name="my-secret"): 24 | """GET /v1.0/secrets/{store}/{name}""" 25 | port = os.getenv("DAPR_HTTP_PORT") or "3500" 26 | r = requests.get(f"http://localhost:{3500}/v1.0/secrets/{store}/{name}") 27 | print(r.json()) 28 | 29 | # sdk examples 30 | def dapr_get_state(store="statestore", name="name"): 31 | """DaprClient().get_state(store_name=store, key=name)""" 32 | with dapr.clients.DaprClient() as d: 33 | # Wait for sidecar to be up within 5 seconds. 34 | d.wait(5) 35 | res = d.get_state(store_name=store, key=name) 36 | print(res.data.decode()) 37 | 38 | def dapr_save_state(store="statestore", name="hello", value="world"): 39 | """DaprClient().save_state(store_name=store, key=name, value=value)""" 40 | with dapr.clients.DaprClient() as d: 41 | # Wait for sidecar to be up within 5 seconds. 42 | d.wait(5) 43 | # Save single state. 44 | d.save_state(store_name=store, key=name, value=value) 45 | print(f"State store has successfully saved {value} with {name} as key") 46 | 47 | def dapr_save_state_etag(store="statestore", name="hello", value="world", etag=""): 48 | """DaprClient().save_state(store_name=store, key=name, value=value, etag=etag)""" 49 | with dapr.clients.DaprClient() as d: 50 | # Wait for sidecar to be up within 5 seconds. 51 | d.wait(5) 52 | if etag == "": 53 | # Save single state. 54 | d.save_state(store_name=store, key=name, value=value) 55 | print(f"State store has successfully saved {value} with {name} as key") 56 | else: 57 | # Save with an etag that is different from the one stored in the database. 58 | try: 59 | d.save_state(store_name=store, key=name, value=value, etag=etag) 60 | print(f"State store has successfully saved {value} with {name} as key with etag {etag}") 61 | except grpc.RpcError as err: 62 | # StatusCode should be StatusCode.ABORTED. 63 | print(f"Cannot save due to bad etag. ErrorCode={err.code()}") 64 | # For detailed error messages from the dapr runtime: 65 | # print(f"Details={err.details()}) 66 | 67 | def dapr_get_secret(store="my-secret-store", name="my-secret"): 68 | """DaprClient().get_secret(store_name=store, key=name)""" 69 | with dapr.clients.DaprClient() as d: 70 | # Wait for sidecar to be up within 5 seconds. 71 | d.wait(5) 72 | res = d.get_secret(store_name=store, key=name) 73 | print(res.secret) 74 | 75 | # your examples 76 | def test(): 77 | print("hello, test") 78 | 79 | if __name__ == '__main__': 80 | fire.Fire() 81 | -------------------------------------------------------------------------------- /01-dapr/my-components/localSecretStore.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: dapr.io/v1alpha1 2 | kind: Component 3 | metadata: 4 | name: my-secret-store 5 | namespace: default 6 | spec: 7 | type: secretstores.local.file 8 | version: v1 9 | metadata: 10 | - name: secretsFile 11 | value: my-secrets.json 12 | - name: nestedSeparator 13 | value: ":" 14 | -------------------------------------------------------------------------------- /01-dapr/my-components/pubsub.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: dapr.io/v1alpha1 2 | kind: Component 3 | metadata: 4 | name: pubsub 5 | spec: 6 | type: pubsub.redis 7 | metadata: 8 | - name: redisHost 9 | value: localhost:6379 10 | - name: redisPassword 11 | value: "" 12 | -------------------------------------------------------------------------------- /01-dapr/my-components/statestore.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: dapr.io/v1alpha1 2 | kind: Component 3 | metadata: 4 | name: statestore 5 | spec: 6 | type: state.redis 7 | metadata: 8 | - name: redisHost 9 | value: localhost:6379 10 | - name: redisPassword 11 | value: "" 12 | - name: actorStateStore 13 | value: "true" 14 | -------------------------------------------------------------------------------- /01-dapr/my-secrets.json: -------------------------------------------------------------------------------- 1 | { 2 | "my-secret" : "I'm Batman" 3 | } 4 | -------------------------------------------------------------------------------- /01-dapr/requirements.txt: -------------------------------------------------------------------------------- 1 | aiohttp==3.7.4.post0 2 | async-timeout==3.0.1 3 | attrs==20.3.0 4 | certifi==2020.12.5 5 | chardet==3.0.4 6 | dapr==1.1.0 7 | fire==0.4.0 8 | grpcio==1.37.1 9 | idna==2.10 10 | multidict==5.1.0 11 | protobuf==3.15.0 12 | python-dateutil==2.8.1 13 | requests==2.25.1 14 | six==1.15.0 15 | termcolor==1.1.0 16 | typing-extensions==3.7.4.3 17 | urllib3==1.26.5 18 | yarl==1.6.3 19 | -------------------------------------------------------------------------------- /01-msticpy/README.md: -------------------------------------------------------------------------------- 1 | # Cyber Security Investigations and Analysis with MSTICPy 2 | 3 | This lab provides you with an interactive introduction to [MSTICPy](https://github.com/microsoft/msticpy). 4 | 5 | MSTICPy is an open source cyber security tool kit created by the Microsoft Threat Intelligence Center to support security analysts during investigations and threat hunting.If you want to know more about MSTICPy before starting this lab take a look at [our documentation over at Read the Docs](https://msticpy.readthedocs.io/en/latest/). 6 | 7 | ## What we will cover in the lab 8 | 9 | In this lab you will learn about and use the main features of MSTICPy. The lab is split into several sections, each one focused on a key element of MSTICPy: 10 | 11 | - Data Acquisition 12 | - Data Enrichment 13 | - Extracting Key Data 14 | - Data Visualization 15 | - Pivots in MSTICPy 16 | - ML in MSTICPy 17 | 18 | In each section you will have a set of guided examples that show you how the features work and how to call them. After the examples are short lab exercises for you to complete, these involve using the features you have just seen examples of. 19 | 20 | Don't worry if you can't complete any one of the lab exercises, they are not required to move onto the next section. You can also view a completed version of the notebook [here](https://github.com/microsoft/msticpy-lab/blob/main/MSTICPy_Lab_Completed.ipynb). 21 | 22 | ### MSTICPy in Jupyter 23 | 24 | ![Screenshot of the lab](https://github.com/microsoft/msticpy-lab/blob/main/data/screenshot.png?raw=true) 25 | 26 | ## Start your lab 27 | 28 | ### Online 29 | 30 | One of the easiest ways to complete the lab is to click the 'Launch Binder' button below to launch the lab using [Binder](https://mybinder.org/). This will load the notebook in a pre-configured environment that you can access straight from the browser without any setup required. The notebook contains all instructions and resources required for the lab. 31 | 32 | [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/microsoft/msticpy-lab/HEAD?filepath=MSTICPy_Lab.ipynb) 33 | 34 | ### Locally 35 | 36 | You can also download the complete lab from the [github.com/microsoft/msticpy-lab](https://github.com/microsoft/msticpy-lab) repo and run the notebook locally via the [Jupyter Notebook Support](https://code.visualstudio.com/docs/python/jupyter-support) in Visual Studio Code or [JupyterLab](https://jupyter.org/install). 37 | 38 | ## Feedback 39 | 40 | If you have any questions or feedback, please open [an issue](https://github.com/microsoft/msticpy-lab/issues) or contact msticpy@microsoft.com. 41 | -------------------------------------------------------------------------------- /01-postgres/.gitignore: -------------------------------------------------------------------------------- 1 | .* 2 | !/.gitignore 3 | -------------------------------------------------------------------------------- /01-postgres/pg-lab.py: -------------------------------------------------------------------------------- 1 | import fire 2 | import psycopg2 3 | import csv 4 | 5 | def writeConfig(conn_string): 6 | with open(".conninfo","w") as source: 7 | source.write(conn_string) 8 | return "Successfully wrote config file" 9 | 10 | def connect(): 11 | with open(".conninfo","r") as source: 12 | connString=source.readline().strip() 13 | #return connString 14 | try: 15 | conn=psycopg2.connect(connString) 16 | return conn 17 | except: 18 | print("Database connection error") 19 | return None 20 | def populateDevices(): 21 | conn=connect() 22 | cursor=conn.cursor() 23 | cursor.execute('INSERT INTO device_list SELECT distinct device_id,location,location_name FROM raw_data') 24 | conn.commit() 25 | return None 26 | 27 | def loadData(filename="data.csv"): 28 | conn=connect() 29 | cursor=conn.cursor() 30 | cursor.execute('CREATE EXTENSION IF NOT EXISTS postgis') 31 | cursor.execute('DROP TABLE IF EXISTS raw_data') 32 | cursor.execute('DROP TABLE IF EXISTS device_list') 33 | cursor.execute('CREATE TABLE raw_data(device_id bigint,time timestamp,location geography(POINT,4326),data jsonb,location_name text);') 34 | cursor.execute('CREATE TABLE device_list(device_id bigint,location geography(POINT,4326),location_name text);') 35 | 36 | conn.commit() 37 | with open(filename,'r') as incoming: 38 | cursor.copy_expert('COPY raw_data FROM stdin CSV',incoming) 39 | conn.commit() 40 | return "Data loaded successfully" 41 | 42 | def getAllData(): 43 | ret=dict() 44 | conn=connect() 45 | cursor=conn.cursor() 46 | cursor.execute('SELECT device_id,data FROM raw_data') 47 | for entry in cursor.fetchall(): 48 | device=entry[0] 49 | data=entry[1] 50 | if device not in ret: 51 | ret[device]=[] 52 | ret[device].append(data) 53 | return ret 54 | 55 | def getNearestDevice(latitude, longitude): 56 | conn=connect() 57 | cursor=conn.cursor() 58 | cursor.execute('SELECT device_id,location_name,ST_DISTANCE(location,ST_SetSRID(ST_MakePoint(%s,%s),4326)) FROM device_list ORDER BY 3 ASC LIMIT 1',(longitude,latitude)) 59 | data=cursor.fetchone() 60 | print("Device number {0} in {1} is closest.".format(data[0],data[1])) 61 | return None 62 | 63 | def getDeviceAverage(device): 64 | conn=connect() 65 | cursor=conn.cursor() 66 | cursor.execute("select data -> 'temperature' -> 'units',avg((data -> 'temperature' ->> 'value')::float) from raw_data where device_id = %s group by 1",(device,)) 67 | data=cursor.fetchone() 68 | return data 69 | 70 | def runSQL(statement): 71 | conn=connect() 72 | cursor=conn.cursor() 73 | cursor.execute(statement) 74 | conn.commit() 75 | return None 76 | 77 | def getAverageTemperatures(): 78 | ret=dict() 79 | conn=connect() 80 | cursor=conn.cursor() 81 | cursor.execute('SELECT device_id,location_name,data FROM raw_data') 82 | for entry in cursor.fetchall(): 83 | location=entry[1] 84 | data=entry[2]['temperature']['value'] 85 | unit=entry[2]['temperature']['units'] 86 | if location not in ret: 87 | ret[location]=[] 88 | ret[location].append(data) 89 | for location in ret: 90 | data=ret[location] 91 | average=sum(data)/len(data) 92 | print("{0} had an average temperature of {1}".format(location,average)) 93 | return None 94 | 95 | if __name__ == '__main__': 96 | fire.Fire({"writeConfig":writeConfig,"loadData":loadData,"getAllData":getAllData,"getAverageTemperatures":getAverageTemperatures,"populateDevices":populateDevices,"getNearestDevice":getNearestDevice,"getDeviceAverage":getDeviceAverage,"runSQL":runSQL}) 97 | 98 | -------------------------------------------------------------------------------- /2019/0-scripts/refresh.ps1: -------------------------------------------------------------------------------- 1 | # 2-vscode-django-postgres-dev-container 2 | docker stop devcontainer_db_1 3 | docker rm devcontainer_db_1 4 | docker stop devcontainer_app_1 5 | docker rm devcontainer_app_1 6 | -------------------------------------------------------------------------------- /2019/1-vscode-django-docker/README.md: -------------------------------------------------------------------------------- 1 | # Containerize a Django application using Visual Studio Code 2 | 3 | This lab teaches you how to use Visual Studio Code's Docker extension to build a Docker container for an existing Django web application which 4 | takes log messages and stores them in an SQLite database. 5 | 6 | The container that you use in this lab will be used in other labs that teach you how to publish a Docker container. 7 | 8 | ## Prerequisites 9 | 10 | If you're doing this lab outside of the Microsoft booth at PyCon 2019, you'll need the following tools installed on your local machine: 11 | 12 | 1. [Docker Desktop](https://www.docker.com/products/docker-desktop) 13 | 1. [Visual Studio Code](https://code.visualstudio.com) 14 | 1. The [VS Code Docker Extension](https://marketplace.visualstudio.com/items?itemName=PeterJausovec.vscode-docker) 15 | 16 | ## Open workspace and build a development container 17 | 18 | 1. Open the lab folder with Visual Studio Code: 19 | 20 | ```bash 21 | cd 1-vscode-django-docker 22 | code-insiders . 23 | ``` 24 | 25 | 1. Run `Ctrl-Shift-P` and type `Add Docker files to Workspace`. 26 | 1. Following the prompts select `Python` and port `8000`. 27 | 1. Change the RUN and CMD lines in the Dockerfile to the following: 28 | 29 | ```Dockerfile 30 | # Using pip: 31 | RUN python3 -m pip install -r requirements.txt 32 | RUN python3 manage.py migrate 33 | CMD ["python3", "manage.py", "runserver", "0.0.0.0:8000"] 34 | ``` 35 | 36 | 1. Right-click on docker-compose.yml and click `Compose up`. 37 | 1. Open [http://localhost:8000](http://localhost:8000) in the browser to view the app. 38 | 39 | ## (Optional) Build a production-ready container 40 | 41 | If you want to build a container using a production webserver using nginx and uwsgi, you can follow the steps below. 42 | 43 | The application already contains a `uwsgi.ini` file which defines how to run the web server, 44 | so you only need to make some small modifications to the Dockerfile. 45 | 46 | 1. Remove the CMD line from the Dockerfile. 47 | 1. Change the FROM line to: 48 | 49 | ```Dockerfile 50 | FROM tiangolo/uwsgi-nginx 51 | ``` 52 | 53 | 1. Replace the EXPOSE line with 54 | 55 | ```Dockerfile 56 | ENV LISTEN_PORT=8000 57 | ``` 58 | 59 | 1. Right-click on docker-compose.yml and click `Compose up`. 60 | 1. Open [http://localhost:8000](http://localhost:8000) in the browser to view the app. 61 | -------------------------------------------------------------------------------- /2019/1-vscode-django-docker/hello/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/1-vscode-django-docker/hello/__init__.py -------------------------------------------------------------------------------- /2019/1-vscode-django-docker/hello/admin.py: -------------------------------------------------------------------------------- 1 | from django.contrib import admin 2 | 3 | from . import models 4 | 5 | admin.site.register(models.LogMessage) 6 | -------------------------------------------------------------------------------- /2019/1-vscode-django-docker/hello/apps.py: -------------------------------------------------------------------------------- 1 | from django.apps import AppConfig 2 | 3 | class HelloConfig(AppConfig): 4 | name = 'hello' 5 | -------------------------------------------------------------------------------- /2019/1-vscode-django-docker/hello/forms.py: -------------------------------------------------------------------------------- 1 | from django import forms 2 | 3 | from hello.models import LogMessage 4 | 5 | class LogMessageForm(forms.ModelForm): 6 | class Meta: 7 | model = LogMessage 8 | fields = ("message",) # NOTE: the trailing comma is required 9 | -------------------------------------------------------------------------------- /2019/1-vscode-django-docker/hello/migrations/0001_initial.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 2.1.1 on 2018-09-11 22:31 2 | 3 | from django.db import migrations, models 4 | 5 | 6 | class Migration(migrations.Migration): 7 | 8 | initial = True 9 | 10 | dependencies = [ 11 | ] 12 | 13 | operations = [ 14 | migrations.CreateModel( 15 | name='LogMessage', 16 | fields=[ 17 | ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), 18 | ('message', models.CharField(max_length=300)), 19 | ('log_date', models.DateTimeField(verbose_name='date logged')), 20 | ], 21 | ), 22 | ] 23 | -------------------------------------------------------------------------------- /2019/1-vscode-django-docker/hello/migrations/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/1-vscode-django-docker/hello/migrations/__init__.py -------------------------------------------------------------------------------- /2019/1-vscode-django-docker/hello/models.py: -------------------------------------------------------------------------------- 1 | from django.db import models 2 | from django.utils import timezone 3 | 4 | class LogMessage(models.Model): 5 | message = models.CharField(max_length=300) 6 | log_date = models.DateTimeField("date logged") 7 | 8 | def __str__(self): 9 | """Returns a string representation of a message.""" 10 | date = timezone.localtime(self.log_date) 11 | return f"'{self.message}' logged on {date.strftime('%A, %d %B, %Y at %X')}" 12 | -------------------------------------------------------------------------------- /2019/1-vscode-django-docker/hello/static/hello/site.css: -------------------------------------------------------------------------------- 1 | .message { 2 | font-weight: 600; 3 | color: blue; 4 | } 5 | 6 | .navbar { 7 | background-color: lightslategray; 8 | font-size: 1em; 9 | font-family: 'Trebuchet MS', 'Lucida Sans Unicode', 'Lucida Grande', 'Lucida Sans', Arial, sans-serif; 10 | color: white; 11 | padding: 8px 5px 8px 5px; 12 | } 13 | 14 | .navbar a { 15 | text-decoration: none; 16 | color: inherit; 17 | } 18 | 19 | .navbar-brand { 20 | font-size: 1.2em; 21 | font-weight: 600; 22 | } 23 | 24 | .navbar-item { 25 | font-variant: small-caps; 26 | margin-left: 30px; 27 | } 28 | 29 | .body-content { 30 | padding: 5px; 31 | font-family:'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; 32 | } 33 | 34 | input[name=message] { 35 | width: 80%; 36 | } 37 | 38 | .message_list th,td { 39 | text-align: left; 40 | padding-right: 15px; 41 | } 42 | -------------------------------------------------------------------------------- /2019/1-vscode-django-docker/hello/templates/hello/about.html: -------------------------------------------------------------------------------- 1 | {% extends "hello/layout.html" %} 2 | {% block title %} 3 | About 4 | {% endblock %} 5 | {% block content %} 6 |

About page for the Visual Studio Code Django tutorial.

7 | {% endblock %} 8 | -------------------------------------------------------------------------------- /2019/1-vscode-django-docker/hello/templates/hello/contact.html: -------------------------------------------------------------------------------- 1 | {% extends "hello/layout.html" %} 2 | {% block title %} 3 | Contact Us 4 | {% endblock %} 5 | {% block content %} 6 |

Contact page for the Visual Studio Code Django tutorial.

7 | {% endblock %} 8 | -------------------------------------------------------------------------------- /2019/1-vscode-django-docker/hello/templates/hello/hello_there.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Hello, Django 6 | {% load static %} 7 | 8 | 9 | 10 | Hello, there {{ name }}! It's {{ date | date:'l, d F, Y' }} at {{ date | time:'H:i:s' }}. 11 | 12 | 13 | -------------------------------------------------------------------------------- /2019/1-vscode-django-docker/hello/templates/hello/home.html: -------------------------------------------------------------------------------- 1 | {% extends "hello/layout.html" %} 2 | {% block title %} 3 | Home 4 | {% endblock %} 5 | {% block content %} 6 |

Logged messages

7 | 8 | {% if message_list %} 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | {% for message in message_list %} 19 | 20 | 21 | 22 | 25 | 26 | {% endfor %} 27 | 28 |
DateTimeMessage
{{ message.log_date | date:'d M Y' }}{{ message.log_date | time:'H:i:s' }} 23 | {{ message.message }} 24 |
29 | {% else %} 30 |

No messages have been logged. Use the Log Message form.

31 | {% endif %} 32 | {% endblock %} 33 | -------------------------------------------------------------------------------- /2019/1-vscode-django-docker/hello/templates/hello/layout.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | {% block title %}{% endblock %} 6 | {% load static %} 7 | 8 | 9 | 10 | 11 | 17 | 18 |
19 | {% block content %} 20 | {% endblock %} 21 |
22 |
23 |

© 2018

24 |
25 |
26 | 27 | 28 | -------------------------------------------------------------------------------- /2019/1-vscode-django-docker/hello/templates/hello/log_message.html: -------------------------------------------------------------------------------- 1 | {% extends "hello/layout.html" %} 2 | {% block title %} 3 | Log a message 4 | {% endblock %} 5 | {% block content %} 6 |
7 | {% csrf_token %} 8 | {{ form.as_p }} 9 | 10 |
11 | {% endblock %} 12 | -------------------------------------------------------------------------------- /2019/1-vscode-django-docker/hello/tests.py: -------------------------------------------------------------------------------- 1 | from django.test import TestCase 2 | 3 | # Create your tests here. 4 | -------------------------------------------------------------------------------- /2019/1-vscode-django-docker/hello/urls.py: -------------------------------------------------------------------------------- 1 | from django.urls import path 2 | 3 | from hello import views 4 | from hello.models import LogMessage 5 | 6 | home_list_view = views.HomeListView.as_view( 7 | queryset=LogMessage.objects.order_by("-log_date")[:5], # :5 limits the results to the five most recent 8 | context_object_name="message_list", 9 | template_name="hello/home.html", 10 | ) 11 | 12 | urlpatterns = [ 13 | path("", home_list_view, name="home"), 14 | path("hello/", views.hello_there, name="hello_there"), 15 | path("about/", views.about, name="about"), 16 | path("contact/", views.contact, name="contact"), 17 | path("log/", views.log_message, name="log"), 18 | ] 19 | -------------------------------------------------------------------------------- /2019/1-vscode-django-docker/hello/views.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | 3 | from django.shortcuts import redirect, render 4 | from django.views.generic import ListView 5 | 6 | from hello.forms import LogMessageForm 7 | from hello.models import LogMessage 8 | 9 | 10 | class HomeListView(ListView): 11 | """Renders the home page, with a list of all polls.""" 12 | 13 | model = LogMessage 14 | 15 | def get_context_data(self, **kwargs): 16 | context = super(HomeListView, self).get_context_data(**kwargs) 17 | return context 18 | 19 | 20 | def about(request): 21 | """Renders the about page.""" 22 | return render(request, "hello/about.html") 23 | 24 | 25 | def contact(request): 26 | """Renders the contact page.""" 27 | return render(request, "hello/contact.html") 28 | 29 | 30 | def hello_there(request, name): 31 | """Renders the hello_there page. 32 | Args: 33 | name: Name to say hello to 34 | """ 35 | return render( 36 | request, "hello/hello_there.html", {"name": name, "date": datetime.now()} 37 | ) 38 | 39 | 40 | def log_message(request): 41 | form = LogMessageForm(request.POST or None) 42 | if request.method == "POST": 43 | if form.is_valid(): 44 | message = form.save(commit=False) 45 | message.log_date = datetime.now() 46 | message.save() 47 | return redirect("home") 48 | else: 49 | return render(request, "hello/log_message.html", {"form": form}) 50 | else: 51 | return render(request, "hello/log_message.html", {"form": form}) 52 | -------------------------------------------------------------------------------- /2019/1-vscode-django-docker/manage.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import os 3 | import sys 4 | 5 | if __name__ == '__main__': 6 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'web_project.settings') 7 | try: 8 | from django.core.management import execute_from_command_line 9 | except ImportError as exc: 10 | raise ImportError( 11 | "Couldn't import Django. Are you sure it's installed and " 12 | "available on your PYTHONPATH environment variable? Did you " 13 | "forget to activate a virtual environment?" 14 | ) from exc 15 | execute_from_command_line(sys.argv) 16 | -------------------------------------------------------------------------------- /2019/1-vscode-django-docker/requirements.txt: -------------------------------------------------------------------------------- 1 | Django>=2.1.2 2 | pytz==2018.5 3 | -------------------------------------------------------------------------------- /2019/1-vscode-django-docker/uwsgi.ini: -------------------------------------------------------------------------------- 1 | [uwsgi] 2 | chdir = . 3 | module = web_project.wsgi:application 4 | env = DJANGO_SETTINGS_MODULE=web_project.settings 5 | uid = 1000 6 | master = true 7 | threads = 2 8 | processes = 4 9 | -------------------------------------------------------------------------------- /2019/1-vscode-django-docker/web_project/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/1-vscode-django-docker/web_project/__init__.py -------------------------------------------------------------------------------- /2019/1-vscode-django-docker/web_project/settings.py: -------------------------------------------------------------------------------- 1 | """ 2 | Django settings for web_project project. 3 | 4 | Generated by 'django-admin startproject' using Django 2.1.1. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/2.1/topics/settings/ 8 | 9 | For the full list of settings and their values, see 10 | https://docs.djangoproject.com/en/2.1/ref/settings/ 11 | """ 12 | 13 | import os 14 | 15 | # Build paths inside the project like this: os.path.join(BASE_DIR, ...) 16 | BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 17 | 18 | 19 | # Quick-start development settings - unsuitable for production 20 | # See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/ 21 | 22 | # SECURITY WARNING: keep the secret key used in production secret! 23 | SECRET_KEY = '2gult1d96#@#b2%tz+k9x1q%-4(%f@va-!sbv*q&$t^gpp8-_=' 24 | 25 | # SECURITY WARNING: don't run with debug turned on in production! 26 | # If you set to False, also add "localhost" to ALLOWED_HOSTS or else 27 | # you'll get "Bad Request" when running locally. 28 | DEBUG = True 29 | 30 | # When deploying to Azure App Service, add you .azurewebsites.net 31 | # domain to ALLOWED_HOSTS; you get an error message if you forget. When you add 32 | # a specific host, you must also add 'localhost' and/or '127.0.0.1' for local 33 | # debugging (which are enabled by default when ALLOWED_HOSTS is empty.) 34 | ALLOWED_HOSTS = [ 35 | #'localhost', 36 | #'127.0.0.1' 37 | #'vscode-django-tutorial.azurewebsites.net' # Sample name only! 38 | ] 39 | 40 | # Application definition 41 | 42 | INSTALLED_APPS = [ 43 | 'django.contrib.admin', 44 | 'django.contrib.auth', 45 | 'django.contrib.contenttypes', 46 | 'django.contrib.sessions', 47 | 'django.contrib.messages', 48 | 'django.contrib.staticfiles', 49 | 'hello', 50 | ] 51 | 52 | MIDDLEWARE = [ 53 | 'django.middleware.security.SecurityMiddleware', 54 | 'django.contrib.sessions.middleware.SessionMiddleware', 55 | 'django.middleware.common.CommonMiddleware', 56 | 'django.middleware.csrf.CsrfViewMiddleware', 57 | 'django.contrib.auth.middleware.AuthenticationMiddleware', 58 | 'django.contrib.messages.middleware.MessageMiddleware', 59 | 'django.middleware.clickjacking.XFrameOptionsMiddleware', 60 | ] 61 | 62 | ROOT_URLCONF = 'web_project.urls' 63 | 64 | TEMPLATES = [ 65 | { 66 | 'BACKEND': 'django.template.backends.django.DjangoTemplates', 67 | 'DIRS': [], 68 | 'APP_DIRS': True, 69 | 'OPTIONS': { 70 | 'context_processors': [ 71 | 'django.template.context_processors.debug', 72 | 'django.template.context_processors.request', 73 | 'django.contrib.auth.context_processors.auth', 74 | 'django.contrib.messages.context_processors.messages', 75 | ], 76 | }, 77 | }, 78 | ] 79 | 80 | WSGI_APPLICATION = 'web_project.wsgi.application' 81 | 82 | 83 | # Database 84 | # https://docs.djangoproject.com/en/2.1/ref/settings/#databases 85 | 86 | DATABASES = { 87 | 'default': { 88 | 'ENGINE': 'django.db.backends.sqlite3', 89 | 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), 90 | } 91 | } 92 | 93 | 94 | # Password validation 95 | # https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators 96 | 97 | AUTH_PASSWORD_VALIDATORS = [ 98 | { 99 | 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', 100 | }, 101 | { 102 | 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 103 | }, 104 | { 105 | 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', 106 | }, 107 | { 108 | 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', 109 | }, 110 | ] 111 | 112 | 113 | # Internationalization 114 | # https://docs.djangoproject.com/en/2.1/topics/i18n/ 115 | 116 | LANGUAGE_CODE = 'en-us' 117 | 118 | TIME_ZONE = 'UTC' 119 | 120 | USE_I18N = True 121 | 122 | USE_L10N = True 123 | 124 | USE_TZ = True 125 | 126 | 127 | # Static files (CSS, JavaScript, Images) 128 | # https://docs.djangoproject.com/en/2.1/howto/static-files/ 129 | 130 | STATIC_URL = '/static/' 131 | 132 | # The location where the collectstatic command collects static files from apps. 133 | # A dedicated static file server is typically used in production to serve files 134 | # from this location, rather than relying on the app server to serve those files 135 | # from various locations in the app. Doing so results in better overall performance. 136 | STATIC_ROOT = os.path.join(BASE_DIR, 'static_collected') 137 | -------------------------------------------------------------------------------- /2019/1-vscode-django-docker/web_project/urls.py: -------------------------------------------------------------------------------- 1 | """web_project URL Configuration 2 | 3 | The `urlpatterns` list routes URLs to views. For more information please see: 4 | https://docs.djangoproject.com/en/2.1/topics/http/urls/ 5 | Examples: 6 | Function views 7 | 1. Add an import: from my_app import views 8 | 2. Add a URL to urlpatterns: path('', views.home, name='home') 9 | Class-based views 10 | 1. Add an import: from other_app.views import Home 11 | 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') 12 | Including another URLconf 13 | 1. Import the include() function: from django.urls import include, path 14 | 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) 15 | """ 16 | 17 | from django.contrib import admin 18 | from django.contrib.staticfiles.urls import staticfiles_urlpatterns 19 | from django.urls import include, path 20 | 21 | urlpatterns = [ 22 | path("", include("hello.urls")), 23 | path("admin/", admin.site.urls), # Activates the admin interface 24 | ] 25 | 26 | urlpatterns += staticfiles_urlpatterns() 27 | -------------------------------------------------------------------------------- /2019/1-vscode-django-docker/web_project/wsgi.py: -------------------------------------------------------------------------------- 1 | """ 2 | WSGI config for web_project project. 3 | 4 | It exposes the WSGI callable as a module-level variable named ``application``. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/ 8 | """ 9 | 10 | import os 11 | 12 | from django.core.wsgi import get_wsgi_application 13 | 14 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'web_project.settings') 15 | 16 | application = get_wsgi_application() 17 | -------------------------------------------------------------------------------- /2019/2a-vscode-flask-dev-container/README.md: -------------------------------------------------------------------------------- 1 | # Developing a new Flask application in a Dev Container 2 | 3 | In this lab we'll use Visual Studio Code remote development features to create a new 4 | hello world Flask application in a dockerized development environment. 5 | 6 | ## Pre-requisites 7 | 1. Install [Docker Desktop](https://www.docker.com/products/docker-desktop) 8 | 1. Install [Visual Studio Code Insiders](https://code.visualstudio.com/insiders) 9 | 1. Install the [VS Code Remote Extensions](https://aka.ms/vscode-remote) 10 | 11 | ## Create dev container 12 | First we'll create a new dev container that we can start building our app in: 13 | 1. Open this folder using Visual Studio Code: 14 | ``` 15 | cd 2a-vscode-flask-dev-container 16 | code-insiders . 17 | ``` 18 | 1. Press `F1` and select `Remote-Containers: Create container configuration file...` 19 | 1. Select `Python 3` from the list 20 | 1. Select the `Reopen in Container` button in the notification that appears. If you miss the notification, 21 | press `F1` and select the `Remote-Containers: Re-open Folder in Container` command 22 | 23 | After the dev container builds and installs, you will now be working in a dev container and you 24 | can start building your app! 25 | 26 | ## Create app 27 | Now let's create a hello world flask app. We'll need to set up the container to install flask 28 | and expose port 8000. 29 | 1. Take a look at the files in the workspace root: 30 | - `requirements.txt` defines the Python libraries to install in the dev container 31 | - `app.py` contains the minimal code to run a flask web server 32 | 1. Open `.devcontainer/.devcontainer.json` and expose port 5000 by adding ```"appPort": 5000```. The .json file should look as follows: 33 | ``` 34 | { 35 | "name": "Python 3", 36 | "context": "..", 37 | "dockerFile": "Dockerfile", 38 | "workspaceFolder": "/workspace", 39 | "extensions": [ 40 | "ms-python.python" 41 | ], 42 | "appPort": 5000 43 | } 44 | ``` 45 | NOTE: Don't forget to press Ctrl-S to save! 46 | 47 | 1. Now let's rebuild the container so that it installs flask via the requirements.txt file and 48 | exposes port 5000. Press `F1` and select `Remote-Containers: Rebuild container`. 49 | 1. Add a debug configuration, `Debug > Add Configuration`. Select `Flask`. 50 | 1. Edit the `launch.json` file generated to have the app bind to host `0.0.0.0` by adding host to args as follows: 51 | ``` 52 | "args": [ 53 | "run", 54 | "--host","0.0.0.0", 55 | "--no-debugger", 56 | "--no-reload" 57 | ], 58 | ``` 59 | 1. Press `F5` to start debugging, browse to your app at [http://localhost:5000](http://localhost:5000). 60 | 1. Optionally set a breakpoint on line 6 of the app and refresh the page 61 | -------------------------------------------------------------------------------- /2019/2a-vscode-flask-dev-container/app.py: -------------------------------------------------------------------------------- 1 | from flask import Flask 2 | app = Flask(__name__) 3 | 4 | @app.route("/") 5 | def index(): 6 | return "Hello World!" 7 | -------------------------------------------------------------------------------- /2019/2a-vscode-flask-dev-container/requirements.txt: -------------------------------------------------------------------------------- 1 | flask -------------------------------------------------------------------------------- /2019/2b-vscode-django-postgres-dev-container/README.md: -------------------------------------------------------------------------------- 1 | # Developing a Django+PostgreSQL application in a Dev Container 2 | 3 | In this lab you use Visual Studio Code remote development features to work on a Django+PostgreSQL application in a dockerized development environment. 4 | 5 | > __IMPORTANT__: Right now, you cannot use WSL as your shell to either open Visual Studio Code or as your default shell inside 6 | > of Visual Studio Code and also use the VS Code Remote Extensions. To change the shell, press `Ctrl-Shift-P` and select 7 | > `Terminal: Select Default Shell`. When prompted for a value, choose either `CMD` or `PowerShell`. Close any existing shells, 8 | > and a new one will open with the selected default. 9 | 10 | ## Prerequisites 11 | 12 | If you're doing this lab outside of the Microsoft booth at PyCon 2019, you'll need the following tools installed on your local machine: 13 | 14 | 1. [Docker Desktop](https://www.docker.com/products/docker-desktop) 15 | 1. [Visual Studio Code Insider Build](https://code.visualstudio.com/insiders) 16 | 1. The [VS Code Remote Extensions](https://aka.ms/vscode-remote) 17 | 1. If you are running on windows, set your git line endings to use LF: 18 | ``` 19 | git config --global core.autocrlf false 20 | ``` 21 | 22 | ## Open the dev container workspace 23 | 24 | 1. Clone the sample app and open using Visual Studio Code: 25 | 26 | ```cmd 27 | git clone https://github.com/Microsoft/python-sample-tweeterapp 28 | cd python-sample-tweeterapp 29 | code-insiders . 30 | ``` 31 | 32 | 1. Click the `Reopen in Container` prompt, or press `F1` and select the `Reopen folder in dev container` command. 33 | 34 | 1. After the workspace terminal loads, open a new terminal using ```Ctrl-Shift-` ``` and type the following to build the React frontend: 35 | 36 | ```cmd 37 | npm install 38 | npm run dev 39 | ``` 40 | 41 | 1. After the container builds, open another terminal using ```Ctrl-Shift-` ``` and type: 42 | 43 | ```cmd 44 | python manage.py migrate 45 | python manage.py loaddata initial_data 46 | python manage.py runserver 47 | ``` 48 | 49 | 1. Open [http://localhost:8000](http://localhost:8000) in the browser to view the app. 50 | 1. Create an account and login to the app 51 | 52 | ## Set up debugging in the container 53 | 54 | 1. Stop the app in the terminal by pressing `Ctrl-C` (otherwise the port will be taken when you debug) 55 | 1. From the `Debug` menu, select `Start Debugging`. 56 | 1. Select the `Django` debug configuration from the menu. 57 | 1. Open `tweeter/views.py`, set a breakpoint on line 26 58 | 1. Refresh the app in the browser to hit the breakpoint 59 | 1. Open the debug console `Views > Debug Console`, and type `request.user` into the debug console to inspect the logged in user 60 | -------------------------------------------------------------------------------- /2019/3-azure-cli-flask-registry-container-instances/dev.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python 2 | 3 | COPY . /app/ 4 | 5 | WORKDIR /app/ 6 | 7 | RUN pip install -r requirements.txt 8 | 9 | ENV FLASK_APP=startup.py 10 | 11 | EXPOSE 5000 12 | 13 | CMD ["flask", "run", "--host=0.0.0.0"] 14 | -------------------------------------------------------------------------------- /2019/3-azure-cli-flask-registry-container-instances/prod.Dockerfile: -------------------------------------------------------------------------------- 1 | # Pull a pre-built alpine docker image with nginx and python3 installed 2 | FROM tiangolo/uwsgi-nginx-flask:python3.6-alpine3.7 3 | 4 | # Set the port on which the app runs; make both values the same. 5 | # 6 | # IMPORTANT: When deploying to Azure App Service, go to the App Service on the Azure 7 | # portal, navigate to the Applications Settings blade, and create a setting named 8 | # WEBSITES_PORT with a value that matches the port here (the Azure default is 80). 9 | # You can also create a setting through the App Service Extension in VS Code. 10 | ENV LISTEN_PORT=5000 11 | EXPOSE 5000 12 | 13 | # Indicate where uwsgi.ini lives 14 | ENV UWSGI_INI uwsgi.ini 15 | 16 | # Tell nginx where static files live. Typically, developers place static files for 17 | # multiple apps in a shared folder, but for the purposes here we can use the one 18 | # app's folder. Note that when multiple apps share a folder, you should create subfolders 19 | # with the same name as the app underneath "static" so there aren't any collisions 20 | # when all those static files are collected together. 21 | ENV STATIC_URL /hello_app/static 22 | 23 | # Set the folder where uwsgi looks for the app 24 | WORKDIR /hello_app 25 | 26 | # Copy the app contents to the image 27 | COPY . /hello_app 28 | 29 | # If you have additional requirements beyond Flask (which is included in the 30 | # base image), generate a requirements.txt file with pip freeze and uncomment 31 | # the next three lines. 32 | #COPY requirements.txt / 33 | #RUN pip install --no-cache-dir -U pip 34 | #RUN pip install --no-cache-dir -r /requirements.txt 35 | -------------------------------------------------------------------------------- /2019/4-azure-functions-python/LAB_SETUP.md: -------------------------------------------------------------------------------- 1 | # Lab Set Up 2 | 3 | > For the team setting up the lab, prior to the attendee working on the lab. 4 | 5 | To ensure that the lab goes as smoothly as possible, run the PowerShell script to set up an environment with every requirement. 6 | 7 | ## Prerequisites 8 | 9 | - [Python 3.6](https://www.python.org/downloads/release/python-368/) 10 | - [Azure Functions Core Tools 2.x](https://docs.microsoft.com/en-us/azure/azure-functions/functions-run-local#v2) 11 | 12 | ## Also Helpful 13 | 14 | - [Visual Studio Code](https://code.visualstudio.com/download) 15 | -------------------------------------------------------------------------------- /2019/4-azure-functions-python/New-DevEnvironment.ps1: -------------------------------------------------------------------------------- 1 | "Installing everything needed for the Calculating Primeness Lab." 2 | 3 | # CONSTANTS ======================================================================================= 4 | $PYTHON_36_DOWNLOAD_URI = "https://www.python.org/ftp/python/3.6.8/python-3.6.8-embed-amd64.zip" 5 | $PYTHON_36_ZIP_PATH = "$PSScriptRoot\python36.zip" 6 | $PYTHON_36_DIR = "$PSScriptRoot\python36" 7 | $NODE_10_DOWNLOAD_URI = "https://nodejs.org/dist/v10.15.3/node-v10.15.3-x64.msi" 8 | $NODE_10_MSI_PATH = "$PSScriptRoot\node.msi" 9 | # ================================================================================================= 10 | 11 | # Check if the user is running PowerShell as an Admin. 12 | # They must be for the script to run properly. 13 | $currentPrincipal = New-Object Security.Principal.WindowsPrincipal( 14 | [Security.Principal.WindowsIdentity]::GetCurrent() 15 | ) 16 | If (!$currentPrincipal.IsInRole([Security.Principal.WindowsBuiltInRole]::Administrator)) { 17 | throw "You are not running this script as an administrator. Try again with an elevated prompt." 18 | } 19 | 20 | # Add the assembly for unpacking zip files. 21 | Add-Type -AssemblyName System.IO.Compression.FileSystem 22 | 23 | # PYTHON 3.6 ====================================================================================== 24 | # Download and Unzip Python Binaries for Windows to $PYTHON_36_DIR 25 | "Downloading Python 3.6 from $PYTHON_36_DOWNLOAD_URI" 26 | (New-Object System.Net.WebClient).DownloadFile($PYTHON_36_DOWNLOAD_URI, $PYTHON_36_ZIP_PATH) 27 | New-Item -Path $PYTHON_36_DIR -ItemType directory -Force 28 | [System.IO.Compression.ZipFile]::ExtractToDirectory($PYTHON_36_ZIP_PATH, $PYTHON_36_DIR) 29 | # ================================================================================================= 30 | 31 | # DotNET CORE 2.X ================================================================================= 32 | If (!$(dotnet --list-sdks | Out-String) -match "2\..\..") { 33 | "Installing dotNet Core 2.2.104" 34 | dotnet-install.ps1 -Version "2.2.104" 35 | } 36 | # ================================================================================================= 37 | 38 | # Node.js ========================================================================================= 39 | If (!$(node -v | Out-String) -match "[ 1][019]\..") { 40 | "Installing Node 10.15.3" 41 | (New-Object System.Net.WebClient).DownloadFile($NODE_10_DOWNLOAD_URI, $NODE_10_MSI_PATH) 42 | Start-Process $NODE_10_MSI_PATH -Wait 43 | $env:Path = [System.Environment]::GetEnvironmentVariable("Path", "Machine") + ";" 44 | + [System.Environment]::GetEnvironmentVariable("Path", "User") 45 | } 46 | # ================================================================================================= 47 | 48 | # Azure Functions Core Tools ====================================================================== 49 | If (!$(func -v | Out-String) -match "2\..") { 50 | "Installing Azure Functions Core Tools." 51 | npm install -g azure-functions-core-tools 52 | } 53 | # ================================================================================================= 54 | -------------------------------------------------------------------------------- /2019/4-azure-functions-python/python_azure_func/prime_calculator/.funcignore: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /2019/4-azure-functions-python/python_azure_func/prime_calculator/.gitignore: -------------------------------------------------------------------------------- 1 | bin 2 | obj 3 | csx 4 | .vs 5 | edge 6 | Publish 7 | 8 | *.user 9 | *.suo 10 | *.cscfg 11 | *.Cache 12 | project.lock.json 13 | 14 | /packages 15 | /TestResults 16 | 17 | /tools/NuGet.exe 18 | /App_Data 19 | /secrets 20 | /data 21 | .secrets 22 | appsettings.json 23 | local.settings.json 24 | 25 | node_modules 26 | dist 27 | 28 | # Local python packages 29 | .python_packages/ 30 | 31 | # Python Environments 32 | .env 33 | .venv 34 | env/ 35 | venv/ 36 | ENV/ 37 | env.bak/ 38 | venv.bak/ 39 | 40 | # Byte-compiled / optimized / DLL files 41 | __pycache__/ 42 | *.py[cod] 43 | *$py.class 44 | 45 | # Managed dependencies folder 46 | ManagedDependencies/ -------------------------------------------------------------------------------- /2019/4-azure-functions-python/python_azure_func/prime_calculator/.vscode/extensions.json: -------------------------------------------------------------------------------- 1 | { 2 | "recommendations": [ 3 | "ms-azuretools.vscode-azurefunctions" 4 | ] 5 | } -------------------------------------------------------------------------------- /2019/4-azure-functions-python/python_azure_func/prime_calculator/host.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "2.0" 3 | } -------------------------------------------------------------------------------- /2019/4-azure-functions-python/python_azure_func/prime_calculator/is_prime/__init__.py: -------------------------------------------------------------------------------- 1 | import math 2 | import logging 3 | 4 | import azure.functions as func 5 | 6 | 7 | def main(req: func.HttpRequest) -> func.HttpResponse: 8 | logging.info("Python HTTP trigger function processed a request.") 9 | 10 | number = req.params.get("number") 11 | try: 12 | number = int(number) 13 | except TypeError: 14 | return func.HttpResponse( 15 | "Please pass an integer corresponding to the key `number` on the query string.", 16 | status_code=400, 17 | ) 18 | 19 | response = "is prime" if is_prime(number) else "is composite" 20 | return func.HttpResponse(f"{number} {response}.") 21 | 22 | 23 | def is_prime(number: int) -> bool: 24 | """Tests primeness of number, returns true if prime.""" 25 | min_divisor = 2 26 | max_divisor = math.ceil(math.sqrt(number)) 27 | for divisor in range(min_divisor, max_divisor + 1): 28 | if number % divisor == 0: 29 | return False 30 | return True 31 | -------------------------------------------------------------------------------- /2019/4-azure-functions-python/python_azure_func/prime_calculator/is_prime/function.json: -------------------------------------------------------------------------------- 1 | { 2 | "scriptFile": "__init__.py", 3 | "bindings": [ 4 | { 5 | "authLevel": "function", 6 | "type": "httpTrigger", 7 | "direction": "in", 8 | "name": "req", 9 | "methods": [ 10 | "get", 11 | "post" 12 | ] 13 | }, 14 | { 15 | "type": "http", 16 | "direction": "out", 17 | "name": "$return" 18 | } 19 | ] 20 | } -------------------------------------------------------------------------------- /2019/4-azure-functions-python/python_azure_func/prime_calculator/is_prime/sample.dat: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Azure" 3 | } -------------------------------------------------------------------------------- /2019/4-azure-functions-python/python_azure_func/prime_calculator/requirements.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/4-azure-functions-python/python_azure_func/prime_calculator/requirements.txt -------------------------------------------------------------------------------- /2019/5-jupyter-azure-cognitive-services-face/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | 53 | # Translations 54 | *.mo 55 | *.pot 56 | 57 | # Django stuff: 58 | *.log 59 | local_settings.py 60 | db.sqlite3 61 | 62 | # Flask stuff: 63 | instance/ 64 | .webassets-cache 65 | 66 | # Scrapy stuff: 67 | .scrapy 68 | 69 | # Sphinx documentation 70 | docs/_build/ 71 | 72 | # PyBuilder 73 | target/ 74 | 75 | # Jupyter Notebook 76 | .ipynb_checkpoints 77 | 78 | # IPython 79 | profile_default/ 80 | ipython_config.py 81 | 82 | # pyenv 83 | .python-version 84 | 85 | # pipenv 86 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 87 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 88 | # having no cross-platform support, pipenv may install dependencies that don’t work, or not 89 | # install all needed dependencies. 90 | #Pipfile.lock 91 | 92 | # celery beat schedule file 93 | celerybeat-schedule 94 | 95 | # SageMath parsed files 96 | *.sage.py 97 | 98 | # Environments 99 | .env 100 | .venv 101 | env/ 102 | venv/ 103 | ENV/ 104 | env.bak/ 105 | venv.bak/ 106 | 107 | # Spyder project settings 108 | .spyderproject 109 | .spyproject 110 | 111 | # Rope project settings 112 | .ropeproject 113 | 114 | # mkdocs documentation 115 | /site 116 | 117 | # mypy 118 | .mypy_cache/ 119 | .dmypy.json 120 | dmypy.json 121 | 122 | # Pyre type checker 123 | .pyre/ 124 | 125 | local.settings.json -------------------------------------------------------------------------------- /2019/5-jupyter-azure-cognitive-services-face/.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "python.pythonPath": ".env\\Scripts\\python.exe:.env/bin/python3.7" 3 | } -------------------------------------------------------------------------------- /2019/5-jupyter-azure-cognitive-services-face/LAB_SETUP.md: -------------------------------------------------------------------------------- 1 | # Detecting Emotion with Azure Cognitive Services (ACS) Lab Setup 2 | 3 | ## Requirements 4 | 5 | - An Azure Cognitive Services API Key. You can get a free key for 7 days at this [link](https://azure.microsoft.com/en-us/try/cognitive-services/?api=face-api). 6 | - Python pip requirements that can be installed globally or to a virtual environment by running `pip install requirements.txt` in the same directory as this file. 7 | 8 | ## Setting Up the Developer Environment 9 | 10 | By default, when you set up your ACS API, you should get two keys. Create a file in the same directory as this file called `local.settings.json`. The file will be gitignored by default. This file will be where we keep our secret keys - editing the `.gitignore` can compromise the privacy of these keys. If you suspect the keys have been deployed to a public repository, the keys can be reset from the Azure portal. 11 | 12 | You can get your keys from the Azure portal or using the Azure CLI by running: 13 | 14 | ``` powershell 15 | az cognitiveservices account keys list -g -n 16 | ``` 17 | 18 | This will return a json resembling: 19 | 20 | ``` json 21 | { 22 | "key1": "", 23 | "key2": "" 24 | } 25 | ``` 26 | 27 | Copy this output to the `local.settings.json` file and you are all set! -------------------------------------------------------------------------------- /2019/5-jupyter-azure-cognitive-services-face/README.md: -------------------------------------------------------------------------------- 1 | # Detecting Emotion with Azure Cognitive Services 2 | 3 | To start the lab, run the command: 4 | 5 | ``` powershell 6 | jupyter notebook DetectingEmotionWithAzureCognitiveServices.ipynb 7 | ``` 8 | 9 | This will open the lab in a browser window. If the browser does not open automatically open, click this [link](http://localhost:8888/notebooks/DetectingEmotionWithAzureCognitiveServices.ipynb) to go to the notebook. -------------------------------------------------------------------------------- /2019/5-jupyter-azure-cognitive-services-face/key_handler/__init__.py: -------------------------------------------------------------------------------- 1 | import json 2 | import sys 3 | import os 4 | 5 | 6 | def get_api_key(): 7 | try: 8 | with open("local.settings.json", "r") as settings: 9 | return json.loads(settings.read())["key1"] 10 | except FileNotFoundError: 11 | raise FileNotFoundError( 12 | "You may not have created a `local.settings.json` file. See LAB_SETUP.md for instructions." 13 | ) 14 | -------------------------------------------------------------------------------- /2019/5-jupyter-azure-cognitive-services-face/requirements.txt: -------------------------------------------------------------------------------- 1 | appdirs==1.4.3 2 | appnope==0.1.0 3 | atomicwrites==1.3.0 4 | attrs==19.1.0 5 | backcall==0.1.0 6 | black==19.3b0 7 | bleach==3.1.4 8 | certifi==2019.3.9 9 | chardet==3.0.4 10 | Click==7.0 11 | cycler==0.10.0 12 | decorator==4.4.0 13 | defusedxml==0.6.0 14 | entrypoints==0.3 15 | idna==2.8 16 | ipykernel==5.1.0 17 | ipython==7.4.0 18 | ipython-genutils==0.2.0 19 | ipywidgets==7.4.2 20 | jedi==0.13.3 21 | Jinja2==2.10.1 22 | jsonschema==3.0.1 23 | jupyter==1.0.0 24 | jupyter-client==5.2.4 25 | jupyter-console==6.0.0 26 | jupyter-core==4.4.0 27 | kiwisolver==1.0.1 28 | MarkupSafe==1.1.1 29 | matplotlib==3.0.3 30 | mistune==0.8.4 31 | more-itertools==7.0.0 32 | mypy==0.701 33 | mypy-extensions==0.4.1 34 | nbconvert==5.4.1 35 | nbformat==4.4.0 36 | notebook==5.7.8 37 | numpy==1.16.2 38 | pandocfilters==1.4.2 39 | parso==0.4.0 40 | pexpect==4.7.0 41 | pickleshare==0.7.5 42 | Pillow==6.0.0 43 | pluggy==0.9.0 44 | prometheus-client==0.6.0 45 | prompt-toolkit==2.0.9 46 | ptyprocess==0.6.0 47 | py==1.8.0 48 | Pygments==2.3.1 49 | pyparsing==2.4.0 50 | pyrsistent==0.14.11 51 | pytest==4.4.1 52 | python-dateutil==2.8.0 53 | pyzmq==18.0.1 54 | qtconsole==4.4.3 55 | requests==2.21.0 56 | Send2Trash==1.5.0 57 | six==1.12.0 58 | terminado==0.8.2 59 | testpath==0.4.2 60 | toml==0.10.0 61 | tornado==6.0.2 62 | traitlets==4.3.2 63 | typed-ast==1.3.4 64 | urllib3==1.24.2 65 | wcwidth==0.1.7 66 | webencodings==0.5.1 67 | widgetsnbextension==3.4.2 -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/.library.json: -------------------------------------------------------------------------------- 1 | {"name":"pycon-recommender","id":"pycon-recommender","created":"4/17/2019 9:42:16 PM +00:00","modified":"4/17/2019 9:42:16 PM +00:00","lastBackedUp":"","accessed":"4/26/2019 2:12:09 PM +00:00","clonedFrom":null,"cloneCount":3,"gitRepositoryUrl":null,"public":"True","starCount":0,"setupSteps":[]} -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/LAB_SETUP.md: -------------------------------------------------------------------------------- 1 | # Lab Setup 2 | 3 | ## Prerequisites 4 | 5 | - **Azure Subscription**: If you don’t have an Azure subscription, create a free account before you begin. Try the [free or paid version of Azure Machine Learning service today](https://azure.microsoft.com/en-us/free/services/machine-learning/). 6 | - **Azure Machine Learning Service Workspace**: You will need an AzureML service workspace to run this lab at home. 7 | 8 | > With a new account, you get credits to spend on Azure services, which will easily cover the cost of running this example notebook. After they're used up, you can keep the account and use [free Azure services](https://azure.microsoft.com/en-us/free/). Your credit card is never charged unless you explicitly change your settings and ask to be charged. Or [activate MSDN subscriber benefits](https://azure.microsoft.com/en-us/pricing/member-offers/credit-for-visual-studio-subscribers/), which give you credits every month that you can use for paid Azure services. 9 | 10 | ## Set Up an Azure Machine Learning Service Workspace 11 | 12 | From the Azure Portal, select the `+` symbol in the left bar to add a new resource. In the blade that appears, type `Machine Learning service workspace` into the search bar. Press `Enter` and click the `Create` button on the blade that appears. You will be prompted to set the following values: `Workspace name`, `Subscription`, `Resource group`, and `Location`. After setting them how you like, click the `Create` button and Azure will begin to deploy your resource. 13 | 14 | Go to the resource once it is deployed. On the resource blade, click `Download config.json` and copy the file to the same directory in which this file is located. You may be replacing an existing `config.json`. 15 | 16 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/README.md: -------------------------------------------------------------------------------- 1 | # Build a Movie Recommendation System with Azure Machine Learning service 2 | Get started in Azure Notebooks[![Azure Notebooks](https://notebooks.azure.com/launch.svg)] 3 | 4 | Recommendation systems are used in a variety of industries, from retail to news and media. If you’ve ever used a streaming service or ecommerce site that has surfaced recommendations for you based on what you’ve previously watched or purchased, you’ve interacted with a recommendation system. With the availability of large amounts of data, many businesses are turning to recommendation systems as a critical revenue driver. However, finding the right recommender algorithms can be very time consuming for data scientists. This is why Microsoft has provided a [GitHub repository](https://github.com/Microsoft/Recommenders) with Python best practice examples to facilitate the building and evaluation of recommendation systems. You can learn more about the repo on the [Azure Blog](https://azure.microsoft.com/en-us/blog/building-recommender-systems-with-azure-machine-learning-service/). 5 | 6 | This tutorial will walk through how to build a Movie Recommender system trained with a Simple Algorithm for Recommenders (SAR) for the [Movielens dataset](https://grouplens.org/datasets/movielens/) on [Azure Machine Learning service](https://docs.microsoft.com/azure/machine-learning/service/overview-what-is-azure-ml). It demonstrates how to use the power of the cloud to manage data, switch to powerful GPU machines, and monitor runs while training a model. You will also be able to test an existing webservice and find the most relevant movie recommendations. 7 | 8 | In this lab you will: 9 | 10 | - Connect to an Azure Machine Learning service workspace 11 | - Access Movielens data from a datastore 12 | - Connect to CPU and GPU machines from [Azure Machine Learning Compute](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute) 13 | - Create a training script using the recommender repo's [util functions](https://github.com/Microsoft/Recommenders/tree/master/reco_utils) for SAR and add logging information 14 | - Submit the training job to AzureML, and monitor the run with a jupyter widget 15 | - Test an existing model with new user data 16 | - **Optional part 2:** Deploy the model to a web service using Azure Container Instance. 17 | 18 | ## Getting Started 19 | 20 | 1. Clone this repository in Azure Notebooks[![Azure Notebooks](https://notebooks.azure.com/launch.svg)](https://notebooks.azure.com/heatherbshapiro/projects/pycon-recommender?clone=true) 21 | 2. Open the `sar_movielens_with_azureml.ipynb` notebook and run through the lab. 22 | 3. **Optional**: Run through the `deploy_with_azureml.ipynb` jupyter notebook 23 | 24 | ## What is Azure Machine Learning service? 25 | The **[Azure Machine Learning service (AzureML)](https://docs.microsoft.com/azure/machine-learning/service/overview-what-is-azure-ml)** provides a cloud-based environment you can use to prep data, train, test, deploy, manage, and track machine learning models. By using Azure Machine Learning service, you can start training on your local machine and then scale out to the cloud. With many available compute targets, like [Azure Machine Learning Compute](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute) and [Azure Databricks](https://docs.microsoft.com/en-us/azure/azure-databricks/what-is-azure-databricks), and with [advanced hyperparameter tuning services](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-tune-hyperparameters), you can build better models faster by using the power of the cloud. 26 | 27 | Data scientists and AI developers use the main [Azure Machine Learning Python SDK](https://docs.microsoft.com/en-us/python/api/overview/azure/ml/intro?view=azure-ml-py) to build and run machine learning workflows with the Azure Machine Learning service. You can interact with the service in any Python environment, including Jupyter Notebooks or your favorite Python IDE. The Azure Machine Learning SDK allows you the choice of using local or cloud compute resources, while managing and maintaining the complete data science workflow from the cloud. 28 | ![AzureML Workflow](https://docs.microsoft.com/en-us/azure/machine-learning/service/media/overview-what-is-azure-ml/aml.png) 29 | 30 | ### Advantages of using AzureML: 31 | - Manage cloud resources for monitoring, logging, and organizing your machine learning experiments. 32 | - Train models either locally or by using cloud resources, including GPU-accelerated model training. 33 | - Easy to scale out when dataset grows - by just creating and pointing to new compute target 34 | 35 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/aml_data/movielens_100k_data.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/6-azureml-movie-recommendation/aml_data/movielens_100k_data.pkl -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/aml_data/movielens_1m_data.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/6-azureml-movie-recommendation/aml_data/movielens_1m_data.pkl -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "subscription_id": "d5aa990f-2452-4701-bd8e-21959f91194c", 3 | "resource_group": "190500-labs-azureml", 4 | "workspace_name": "pycon_azureml" 5 | } 6 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/existing-widget.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "metadata": { 5 | "trusted": true 6 | }, 7 | "cell_type": "code", 8 | "source": "import ipywidgets as widgets\nimport pandas as pd\nimport requests\nimport json\nfrom IPython.display import clear_output\nfrom sklearn.externals import joblib\n\ncounter = 0\nscores = []\n# modelTest =joblib.load('movielens_sar_model.pkl')\nmostPopular =modelTest.get_popularity_based_topk(top_k=30,sort_top_k=True).join(data[['MovieId', 'Title']].drop_duplicates().set_index('MovieId'), \n on='MovieId', \n how='inner')[['MovieId','Title']].sample(5)\nstyle = {'description_width': 'initial'}\n# title= widgets.Label(\"Let's rate a few popular movies\",value = r'\\(\\color{red} {highlighted}\\)')\ntitle= widgets.Label(\"Let's rate a few popular movies\")\nmovie =widgets.RadioButtons(\n options=[1.0,2.0,3.0,4.0,5.0],\n value=3.0,\n description= str(counter+1) + \". \" +str(mostPopular.iloc[0,1])+ ':',\n disabled=False,\n style=style\n)\nbutton = widgets.Button(description='Next', style = style)\ndef on_button_clicked(b):\n global counter\n global scores \n global movie\n global button\n global title\n\n scores.append(movie.value)\n if counter <4:\n movie =widgets.RadioButtons(\n options=[1.0,2.0,3.0,4.0,5.0],\n value=3.0,\n description= str(counter+2) + \". \" + str(mostPopular.iloc[counter+1,1])+ ':',\n disabled=False,\n style=style\n )\n vbox.children = [title,movie,button]\n\n if counter ==3:\n button.description = \"Submit\"\n button.style.button_color = 'lightgreen'\n if counter ==4:\n clear_output()\n title = widgets.Label(\"Here are the recommended movies based on your ratings.\", style=style)\n display(title)\n d = {'MovieId': mostPopular['MovieId'].tolist(), 'Rating': scores}\n df = pd.DataFrame(data=d).to_json()\n test_sample_encoded = bytes(df,encoding = 'utf8')\n scoringURI = \"http://52.147.184.63:80/score\"\n headers = {'content-type': 'application/json'}\n # sending post request and saving response as response object \n similar = requests.post(url = scoringURI, data=test_sample_encoded, headers=headers).json()\n # similar =service.run(input_data = test_sample_encoded)\n temp = pd.read_json(similar).join(data[['MovieId', 'Title']].drop_duplicates().set_index('MovieId'), \n on='MovieId', \n how='inner').sort_values(by=['prediction'], ascending=False)\n display(temp)\n counter +=1\n \nbutton.on_click(on_button_clicked)\nvbox = widgets.VBox([title,movie,button])\ndisplay(vbox)", 9 | "execution_count": null, 10 | "outputs": [] 11 | }, 12 | { 13 | "metadata": { 14 | "trusted": true 15 | }, 16 | "cell_type": "code", 17 | "source": "", 18 | "execution_count": null, 19 | "outputs": [] 20 | } 21 | ], 22 | "metadata": { 23 | "kernelspec": { 24 | "name": "python36", 25 | "display_name": "Python 3.6", 26 | "language": "python" 27 | }, 28 | "language_info": { 29 | "mimetype": "text/x-python", 30 | "nbconvert_exporter": "python", 31 | "name": "python", 32 | "pygments_lexer": "ipython3", 33 | "version": "3.6.6", 34 | "file_extension": ".py", 35 | "codemirror_mode": { 36 | "version": 3, 37 | "name": "ipython" 38 | } 39 | } 40 | }, 41 | "nbformat": 4, 42 | "nbformat_minor": 2 43 | } -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/movielens-sar/.amlignore: -------------------------------------------------------------------------------- 1 | .ipynb_checkpoints 2 | azureml-logs 3 | .azureml 4 | .git 5 | outputs 6 | azureml-setup 7 | docs 8 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/movielens-sar/.azureml/conda_dependencies.yml: -------------------------------------------------------------------------------- 1 | # Conda environment specification. The dependencies defined in this file will 2 | # be automatically provisioned for runs with userManagedDependencies=False. 3 | 4 | # Details about the Conda environment file format: 5 | # https://conda.io/docs/user-guide/tasks/manage-environments.html#create-env-file-manually 6 | 7 | name: project_environment 8 | dependencies: 9 | # The python interpreter version. 10 | # Currently Azure ML only supports 3.5.2 and later. 11 | - python=3.6.2 12 | 13 | - pip: 14 | # Required packages for AzureML execution, history, and data preparation. 15 | - azureml-defaults 16 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/movielens-sar/.azureml/config.json: -------------------------------------------------------------------------------- 1 | {"Id": "movielens-sar", "Scope": "/subscriptions/fac34303-435d-4486-8c3f-7094d82a0b60/resourceGroups/pycon-recipes/providers/Microsoft.MachineLearningServices/workspaces/pycon/projects/movielens-sar"} -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/movielens-sar/aml_config/conda_dependencies.yml: -------------------------------------------------------------------------------- 1 | # Conda environment specification. The dependencies defined in this file will 2 | # be automatically provisioned for runs with userManagedDependencies=False. 3 | 4 | # Details about the Conda environment file format: 5 | # https://conda.io/docs/user-guide/tasks/manage-environments.html#create-env-file-manually 6 | 7 | name: project_environment 8 | dependencies: 9 | # The python interpreter version. 10 | # Currently Azure ML only supports 3.5.2 and later. 11 | - python=3.6.2 12 | 13 | - pip: 14 | # Required packages for AzureML execution, history, and data preparation. 15 | - azureml-defaults 16 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/movielens-sar/aml_config/docker.runconfig: -------------------------------------------------------------------------------- 1 | # The script to run. 2 | script: train.py 3 | # The arguments to the script file. 4 | arguments: [] 5 | # The name of the compute target to use for this run. 6 | target: local 7 | # Framework to execute inside. Allowed values are "Python" , "PySpark", "CNTK", "TensorFlow", and "PyTorch". 8 | framework: PySpark 9 | # Communicator for the given framework. Allowed values are "None" , "ParameterServer", "OpenMpi", and "IntelMpi". 10 | communicator: None 11 | # Automatically prepare the run environment as part of the run itself. 12 | autoPrepareEnvironment: true 13 | # Maximum allowed duration for the run. 14 | maxRunDurationSeconds: 15 | # Number of nodes to use for running job. 16 | nodeCount: 1 17 | # Environment details. 18 | environment: 19 | # Environment variables set for the run. 20 | environmentVariables: 21 | EXAMPLE_ENV_VAR: EXAMPLE_VALUE 22 | # Python details 23 | python: 24 | # user_managed_dependencies=True indicates that the environmentwill be user managed. False indicates that AzureML willmanage the user environment. 25 | userManagedDependencies: false 26 | # The python interpreter path 27 | interpreterPath: python 28 | # Path to the conda dependencies file to use for this run. If a project 29 | # contains multiple programs with different sets of dependencies, it may be 30 | # convenient to manage those environments with separate files. 31 | condaDependenciesFile: aml_config/conda_dependencies.yml 32 | # Docker details 33 | docker: 34 | # Set True to perform this run inside a Docker container. 35 | enabled: true 36 | # Base image used for Docker-based runs. 37 | baseImage: mcr.microsoft.com/azureml/base:0.2.2 38 | # Set False if necessary to work around shared volume bugs. 39 | sharedVolumes: true 40 | # Run with NVidia Docker extension to support GPUs. 41 | gpuSupport: false 42 | # Shared memory size for Docker container. Default is 1g. 43 | shmSize: 1g 44 | # Extra arguments to the Docker run command. 45 | arguments: [] 46 | # Image registry that contains the base image. 47 | baseImageRegistry: 48 | # DNS name or IP address of azure container registry(ACR) 49 | address: 50 | # The username for ACR 51 | username: 52 | # The password for ACR 53 | password: 54 | # Spark details 55 | spark: 56 | # List of spark repositories. 57 | repositories: 58 | - https://mmlspark.azureedge.net/maven 59 | # The packages to use. 60 | packages: 61 | - group: com.microsoft.ml.spark 62 | artifact: mmlspark_2.11 63 | version: '0.12' 64 | # Whether to precache the packages. 65 | precachePackages: true 66 | # Databricks details 67 | databricks: 68 | # List of maven libraries. 69 | mavenLibraries: [] 70 | # List of PyPi libraries 71 | pypiLibraries: [] 72 | # List of RCran libraries 73 | rcranLibraries: [] 74 | # List of JAR libraries 75 | jarLibraries: [] 76 | # List of Egg libraries 77 | eggLibraries: [] 78 | # History details. 79 | history: 80 | # Enable history tracking -- this allows status, logs, metrics, and outputs 81 | # to be collected for a run. 82 | outputCollection: true 83 | # Whether to take snapshots for history. 84 | snapshotProject: true 85 | # Spark configuration details. 86 | spark: 87 | # The Spark configuration. 88 | configuration: 89 | spark.app.name: Azure ML Experiment 90 | spark.yarn.maxAppAttempts: 1 91 | # HDI details. 92 | hdi: 93 | # Yarn deploy mode. Options are cluster and client. 94 | yarnDeployMode: cluster 95 | # Tensorflow details. 96 | tensorflow: 97 | # The number of worker tasks. 98 | workerCount: 1 99 | # The number of parameter server tasks. 100 | parameterServerCount: 1 101 | # Mpi details. 102 | mpi: 103 | # When using MPI, number of processes per node. 104 | processCountPerNode: 1 105 | # data reference configuration details 106 | dataReferences: {} 107 | # Project share datastore reference. 108 | sourceDirectoryDataStore: 109 | # AmlCompute details. 110 | amlcompute: 111 | # VM size of the Cluster to be created.Allowed values are Azure vm sizes.The list of vm sizes is available in 'https://docs.microsoft.com/en-us/azure/cloud-services/cloud-services-sizes-specs 112 | vmSize: 113 | # VM priority of the Cluster to be created. Allowed values are:"dedicated" , "lowpriority". 114 | vmPriority: 115 | # A bool that indicates if the cluster has to be retained after job completion. 116 | retainCluster: false 117 | # Name of the cluster to be created. If not specified, runId will be used as cluster name. 118 | name: 119 | # Maximum number of nodes in the AmlCompute cluster to be created. Minimum number of nodes will always be set to 0. 120 | clusterMaxNodeCount: 1 121 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/movielens-sar/aml_config/project.json: -------------------------------------------------------------------------------- 1 | {"Id": "movielens-sar", "Scope": "/subscriptions/fac34303-435d-4486-8c3f-7094d82a0b60/resourceGroups/pycon-recipes/providers/Microsoft.MachineLearningServices/workspaces/pycon/projects/movielens-sar"} -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/movielens-sar/reco_utils/README.md: -------------------------------------------------------------------------------- 1 | # Recommender Utilities 2 | 3 | This module (reco_utils) contains functions to simplify common tasks used when developing and evaluating recommender systems. A short description of the sub-modules is provided below. For more details about what functions are available and how to use them, please review the doc-strings provided with the code. 4 | 5 | ## Sub-Modules 6 | 7 | ### [Common](./common) 8 | This submodule contains high-level utilities for defining constants used in most algorithms as well as helper functions for managing aspects of different frameworks: gpu, spark, jupyter notebook. 9 | 10 | ### [Dataset](./dataset) 11 | Dataset includes helper functions for interacting with Azure Cosmos databases, pulling different sizes of the Movielens dataset and formatting them appropriately as well as utilities for splitting data for training / testing. 12 | 13 | #### Data Loading 14 | The movielens module will allow you to load a dataframe in pandas or spark formats from the Movielens dataset, with sizes of 100k, 1M, 10M, or 20M to test algorithms and evaluate performance benchmarks. 15 | ```python 16 | df = movielens.load_pandas_df(size="100k") 17 | ``` 18 | 19 | #### Splitting Techniques: 20 | Currently three methods are available for splitting datasets. All of them support splitting by user or item and filtering out minimal samples (for instance users that have not rated enough item, or items that have not been rated by enough users). 21 | - Random: this is the basic approach where entries are randomly assigned to each group based on the ratio desired 22 | - Chronological: this uses provided timestamps to order the data and selects a cut-off time that will split the desired ratio of data to train before that time and test after that time 23 | - Stratified: this is similar to random sampling, but the splits are stratified, for example if the datasets are split by user, the splitting approach will attempt to maintain the same set of items used in both training and test splits. The converse is true if splitting by item. 24 | 25 | ### [Evaluation](./evaluation) 26 | The evaluation submodule includes functionality for performing hyperparameter sweeps as well as calculating common recommender metrics directly in python or in a Spark environment using pyspark. 27 | 28 | Currently available metrics include: 29 | - Root Mean Squared Error 30 | - Mean Absolute Error 31 | - R2 32 | - Explained Variance 33 | - Precision at K 34 | - Recall at K 35 | - Normalized Discounted Cumulative Gain at K 36 | - Mean Average Precision at K 37 | - Area Under Curve 38 | - Logistic Loss 39 | 40 | ### [Recommender](./recommender) 41 | The recommender submodule contains implementations of various algorithms that can be used in addition to external packages to evaluate and develop new recommender system approaches. 42 | Currently the Simple Adaptive Recommender (SAR) algorithm is implemented in python for running on a single node. 43 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/movielens-sar/reco_utils/__init__.py: -------------------------------------------------------------------------------- 1 | __title__ = "Microsoft Recommenders" 2 | __version__ = "2019.02" 3 | __author__ = "RecoDev Team at Microsoft" 4 | __license__ = "MIT" 5 | __copyright__ = "Copyright 2018-present Microsoft Corporation" 6 | 7 | # Version synonym 8 | VERSION = __version__ 9 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/movielens-sar/reco_utils/azureml/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/6-azureml-movie-recommendation/movielens-sar/reco_utils/azureml/__init__.py -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/movielens-sar/reco_utils/common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/6-azureml-movie-recommendation/movielens-sar/reco_utils/common/__init__.py -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/movielens-sar/reco_utils/common/constants.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. All rights reserved. 2 | # Licensed under the MIT License. 3 | 4 | # Default column names 5 | DEFAULT_USER_COL = "userID" 6 | DEFAULT_ITEM_COL = "itemID" 7 | DEFAULT_RATING_COL = "rating" 8 | DEFAULT_LABEL_COL = "label" 9 | DEFAULT_TIMESTAMP_COL = "timestamp" 10 | PREDICTION_COL = "prediction" 11 | DEFAULT_PREDICTION_COL = PREDICTION_COL 12 | 13 | # Filtering variables 14 | DEFAULT_K = 10 15 | DEFAULT_THRESHOLD = 10 16 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/movielens-sar/reco_utils/common/general_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. All rights reserved. 2 | # Licensed under the MIT License. 3 | 4 | import os 5 | import psutil 6 | 7 | 8 | def invert_dictionary(dictionary): 9 | """Invert a dictionary 10 | NOTE: If the dictionary has unique keys and unique values, the invertion would be perfect. However, if there are 11 | repeated values, the invertion can take different keys 12 | 13 | Args: 14 | dictionary (dict): A dictionary 15 | 16 | Returns: 17 | dict: inverted dictionary 18 | """ 19 | return {v: k for k, v in dictionary.items()} 20 | 21 | 22 | def get_physical_memory(): 23 | """Get the physical memory in GBs. 24 | 25 | Returns: 26 | float: Physical memory in GBs. 27 | """ 28 | return psutil.virtual_memory()[0] / 1073741824 29 | 30 | 31 | def get_number_processors(): 32 | """Get the number of processors in a CPU. 33 | 34 | Returns: 35 | int: Number of processors. 36 | """ 37 | try: 38 | num = os.cpu_count() 39 | except Exception: 40 | import multiprocessing # force exception in case mutiprocessing is not installed 41 | 42 | num = multiprocessing.cpu_count() 43 | return num 44 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/movielens-sar/reco_utils/common/gpu_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. All rights reserved. 2 | # Licensed under the MIT License. 3 | 4 | import sys 5 | import os 6 | import glob 7 | from numba import cuda 8 | from numba.cuda.cudadrv.error import CudaSupportError 9 | 10 | 11 | DEFAULT_CUDA_PATH_LINUX = "/usr/local/cuda/version.txt" 12 | 13 | 14 | def get_number_gpus(): 15 | """Get the number of GPUs in the system. 16 | 17 | Returns: 18 | int: Number of GPUs. 19 | """ 20 | try: 21 | return len(cuda.gpus) 22 | except CudaSupportError: 23 | return 0 24 | 25 | 26 | def clear_memory_all_gpus(): 27 | """Clear memory of all GPUs.""" 28 | try: 29 | for gpu in cuda.gpus: 30 | with gpu: 31 | cuda.current_context().deallocations.clear() 32 | except CudaSupportError: 33 | print("No CUDA available") 34 | 35 | 36 | def get_cuda_version(unix_path=DEFAULT_CUDA_PATH_LINUX): 37 | """Get CUDA version 38 | 39 | Args: 40 | unix_path (str): Path to CUDA version file in Linux/Mac. 41 | 42 | Returns: 43 | str: Version of the library. 44 | """ 45 | if sys.platform == "win32": 46 | raise NotImplementedError("Implement this!") 47 | elif sys.platform in ["linux", "darwin"]: 48 | if os.path.isfile(unix_path): 49 | with open(unix_path, "r") as f: 50 | data = f.read().replace("\n", "") 51 | return data 52 | else: 53 | return "No CUDA in this machine" 54 | else: 55 | raise ValueError("Not in Windows, Linux or Mac") 56 | 57 | 58 | def get_cudnn_version(): 59 | """Get the CuDNN version 60 | 61 | Returns: 62 | str: Version of the library. 63 | 64 | """ 65 | 66 | def find_cudnn_in_headers(candidates): 67 | for c in candidates: 68 | file = glob.glob(c) 69 | if file: 70 | break 71 | if file: 72 | with open(file[0], "r") as f: 73 | version = "" 74 | for line in f: 75 | if "#define CUDNN_MAJOR" in line: 76 | version = line.split()[-1] 77 | if "#define CUDNN_MINOR" in line: 78 | version += "." + line.split()[-1] 79 | if "#define CUDNN_PATCHLEVEL" in line: 80 | version += "." + line.split()[-1] 81 | if version: 82 | return version 83 | else: 84 | return "Cannot find CUDNN version" 85 | else: 86 | return "No CUDNN in this machine" 87 | 88 | if sys.platform == "win32": 89 | candidates = ["C:\\NVIDIA\\cuda\\include\\cudnn.h", 90 | "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v*\\include\\cudnn.h"] 91 | elif sys.platform == "linux": 92 | candidates = [ 93 | "/usr/include/x86_64-linux-gnu/cudnn_v*.h", 94 | "/usr/local/cuda/include/cudnn.h", 95 | "/usr/include/cudnn.h", 96 | ] 97 | elif sys.platform == "darwin": 98 | candidates = ["/usr/local/cuda/include/cudnn.h", "/usr/include/cudnn.h"] 99 | else: 100 | raise ValueError("Not in Windows, Linux or Mac") 101 | return find_cudnn_in_headers(candidates) 102 | 103 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/movielens-sar/reco_utils/common/notebook_memory_management.py: -------------------------------------------------------------------------------- 1 | # Original code: https://raw.githubusercontent.com/miguelgfierro/codebase/master/python/system/notebook_memory_management.py 2 | # 3 | # Profile memory usage envelope of IPython commands and report interactively. 4 | # Usage (inside a python notebook): 5 | # from notebook_memory_management import start_watching_memory, stop_watching_memory 6 | # To start profile: 7 | # start_watching_memory() 8 | # To stop profile: 9 | # stop_watching_memory() 10 | # 11 | # Based on: https://github.com/ianozsvald/ipython_memory_usage 12 | # 13 | 14 | from __future__ import division # 1/2 == 0.5, as in Py3 15 | from __future__ import absolute_import # avoid hiding global modules with locals 16 | from __future__ import print_function # force use of print("hello") 17 | from __future__ import ( 18 | unicode_literals 19 | ) # force unadorned strings "" to be unicode without prepending u"" 20 | import time 21 | import memory_profiler 22 | from IPython import get_ipython 23 | import psutil 24 | import warnings 25 | 26 | 27 | # keep a global accounting for the last known memory usage 28 | # which is the reference point for the memory delta calculation 29 | previous_call_memory_usage = memory_profiler.memory_usage()[0] 30 | t1 = time.time() # will be set to current time later 31 | keep_watching = True 32 | watching_memory = True 33 | try: 34 | input_cells = get_ipython().user_ns["In"] 35 | except: 36 | warnings.warn("Not running on notebook") 37 | 38 | 39 | def start_watching_memory(): 40 | """Register memory profiling tools to IPython instance.""" 41 | global watching_memory 42 | watching_memory = True 43 | ip = get_ipython() 44 | ip.events.register("post_run_cell", watch_memory) 45 | ip.events.register("pre_run_cell", pre_run_cell) 46 | 47 | 48 | def stop_watching_memory(): 49 | """Unregister memory profiling tools from IPython instance.""" 50 | global watching_memory 51 | watching_memory = False 52 | ip = get_ipython() 53 | try: 54 | ip.events.unregister("post_run_cell", watch_memory) 55 | except ValueError: 56 | print("ERROR: problem when unregistering") 57 | pass 58 | try: 59 | ip.events.unregister("pre_run_cell", pre_run_cell) 60 | except ValueError: 61 | print("ERROR: problem when unregistering") 62 | pass 63 | 64 | 65 | def watch_memory(): 66 | # bring in the global memory usage value from the previous iteration 67 | global previous_call_memory_usage, keep_watching, watching_memory, input_cells 68 | new_memory_usage = memory_profiler.memory_usage()[0] 69 | memory_delta = new_memory_usage - previous_call_memory_usage 70 | keep_watching = False 71 | total_memory = psutil.virtual_memory()[0] / 1024 / 1024 # in Mb 72 | # calculate time delta using global t1 (from the pre-run event) and current time 73 | time_delta_secs = time.time() - t1 74 | num_commands = len(input_cells) - 1 75 | cmd = "In [{}]".format(num_commands) 76 | # convert the results into a pretty string 77 | output_template = ( 78 | "{cmd} used {memory_delta:0.4f} Mb RAM in " 79 | "{time_delta:0.2f}s, total RAM usage " 80 | "{memory_usage:0.2f} Mb, total RAM " 81 | "memory {total_memory:0.2f} Mb" 82 | ) 83 | output = output_template.format( 84 | time_delta=time_delta_secs, 85 | cmd=cmd, 86 | memory_delta=memory_delta, 87 | memory_usage=new_memory_usage, 88 | total_memory=total_memory, 89 | ) 90 | if watching_memory: 91 | print(str(output)) 92 | previous_call_memory_usage = new_memory_usage 93 | 94 | 95 | def pre_run_cell(): 96 | """Capture current time before we execute the current command""" 97 | global t1 98 | t1 = time.time() 99 | 100 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/movielens-sar/reco_utils/common/notebook_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. All rights reserved. 2 | # Licensed under the MIT License. 3 | 4 | import os 5 | 6 | 7 | def is_jupyter(): 8 | """Check if the module is running on Jupyter notebook/console 9 | 10 | Returns: 11 | bool: True if the module is running on Jupyter notebook or Jupyter console, 12 | False otherwise. 13 | """ 14 | try: 15 | shell_name = get_ipython().__class__.__name__ 16 | if shell_name == 'ZMQInteractiveShell': 17 | return True 18 | else: 19 | return False 20 | except NameError: 21 | return False 22 | 23 | 24 | def is_databricks(): 25 | """Check if the module is running on Databricks 26 | 27 | Returns: 28 | bool: True if the module is running on Databricks notebook, 29 | False otherwise. 30 | """ 31 | try: 32 | if os.path.realpath(".") == "/databricks/driver": 33 | return True 34 | else: 35 | return False 36 | except NameError: 37 | return False 38 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/movielens-sar/reco_utils/common/python_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. All rights reserved. 2 | # Licensed under the MIT License. 3 | 4 | import logging 5 | 6 | import numpy as np 7 | from scipy import sparse 8 | 9 | 10 | logger = logging.getLogger() 11 | 12 | 13 | def exponential_decay(value, max_val, half_life): 14 | """Compute decay factor for a given value based on an exponential decay 15 | Values greater than max_val will be set to 1 16 | Args: 17 | value (numeric): value to calculate decay factor 18 | max_val (numeric): value at which decay factor will be 1 19 | half_life (numeric): value at which decay factor will be 0.5 20 | Returns: 21 | float: decay factor 22 | """ 23 | 24 | return np.minimum(1.0, np.power(0.5, (max_val - value) / half_life)) 25 | 26 | 27 | def jaccard(cooccurrence): 28 | """Helper method to calculate the Jaccard similarity of a matrix of co-occurrences 29 | Args: 30 | cooccurrence (np.array): the symmetric matrix of co-occurrences of items 31 | Returns: 32 | np.array: The matrix of Jaccard similarities between any two items 33 | """ 34 | 35 | diag = cooccurrence.diagonal() 36 | diag_rows = np.expand_dims(diag, axis=0) 37 | diag_cols = np.expand_dims(diag, axis=1) 38 | 39 | with np.errstate(invalid="ignore", divide="ignore"): 40 | result = cooccurrence / (diag_rows + diag_cols - cooccurrence) 41 | 42 | return np.array(result) 43 | 44 | 45 | def lift(cooccurrence): 46 | """Helper method to calculate the Lift of a matrix of co-occurrences 47 | Args: 48 | cooccurrence (np.array): the symmetric matrix of co-occurrences of items 49 | Returns: 50 | np.array: The matrix of Lifts between any two items 51 | """ 52 | 53 | diag = cooccurrence.diagonal() 54 | diag_rows = np.expand_dims(diag, axis=0) 55 | diag_cols = np.expand_dims(diag, axis=1) 56 | 57 | with np.errstate(invalid="ignore", divide="ignore"): 58 | result = cooccurrence / (diag_rows * diag_cols) 59 | 60 | return np.array(result) 61 | 62 | 63 | def get_top_k_scored_items(scores, top_k, sort_top_k=False): 64 | """Extract top K items from a matrix of scores for each user-item pair, optionally sort results per user 65 | 66 | Args: 67 | scores (np.array): score matrix (users x items) 68 | top_k (int): number of top items to recommend 69 | sort_top_k (bool): flag to sort top k results 70 | 71 | Returns: 72 | np.array, np.array: indices into score matrix for each users top items, scores corresponding to top items 73 | """ 74 | 75 | # ensure we're working with a dense ndarray 76 | if isinstance(scores, sparse.spmatrix): 77 | scores = scores.todense() 78 | 79 | if scores.shape[1] < top_k: 80 | logger.warning( 81 | "Number of items is less than top_k, limiting top_k to number of items" 82 | ) 83 | k = min(top_k, scores.shape[1]) 84 | 85 | test_user_idx = np.arange(scores.shape[0])[:, None] 86 | 87 | # get top K items and scores 88 | # this determines the un-ordered top-k item indices for each user 89 | top_items = np.argpartition(scores, -k, axis=1)[:, -k:] 90 | top_scores = scores[test_user_idx, top_items] 91 | 92 | if sort_top_k: 93 | sort_ind = np.argsort(-top_scores) 94 | top_items = top_items[test_user_idx, sort_ind] 95 | top_scores = top_scores[test_user_idx, sort_ind] 96 | 97 | return np.array(top_items), np.array(top_scores) 98 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/movielens-sar/reco_utils/common/spark_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. All rights reserved. 2 | # Licensed under the MIT License. 3 | 4 | import os 5 | import sys 6 | 7 | 8 | try: 9 | from pyspark.sql import SparkSession 10 | except ImportError: 11 | pass # skip this import if we are in pure python environment 12 | 13 | 14 | def start_or_get_spark( 15 | app_name="Sample", 16 | url="local[*]", 17 | memory="10G", 18 | packages=None, 19 | jars=None, 20 | repository=None 21 | ): 22 | """Start Spark if not started 23 | 24 | Args: 25 | app_name (str): Set name of the application 26 | url (str): URL for spark master 27 | memory (str): Size of memory for spark driver 28 | packages (list): list of packages to install 29 | jars (list): list of jar files to add 30 | repository (str): The maven repository 31 | 32 | Returns: 33 | obj: Spark context. 34 | """ 35 | 36 | submit_args = '' 37 | if packages is not None: 38 | submit_args = '--packages {} '.format(','.join(packages)) 39 | if jars is not None: 40 | submit_args += '--jars {} '.format(','.join(jars)) 41 | if repository is not None: 42 | submit_args += "--repositories {}".format(repository) 43 | if submit_args: 44 | os.environ['PYSPARK_SUBMIT_ARGS'] = '{} pyspark-shell'.format(submit_args) 45 | 46 | spark = ( 47 | SparkSession.builder.appName(app_name) 48 | .master(url) 49 | .config("spark.driver.memory", memory) 50 | .getOrCreate() 51 | ) 52 | 53 | return spark 54 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/movielens-sar/reco_utils/common/timer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. All rights reserved. 2 | # Licensed under the MIT License. 3 | 4 | from timeit import default_timer 5 | from datetime import timedelta 6 | 7 | 8 | class Timer(object): 9 | """Timer class. 10 | Original code: https://github.com/miguelgfierro/codebase 11 | 12 | Examples: 13 | >>> import time 14 | >>> t = Timer() 15 | >>> t.start() 16 | >>> time.sleep(1) 17 | >>> t.stop() 18 | >>> t.interval < 1 19 | True 20 | >>> with Timer() as t: 21 | ... time.sleep(1) 22 | >>> t.interval < 1 23 | True 24 | >>> "Time elapsed {}".format(t) #doctest: +ELLIPSIS 25 | 'Time elapsed 0:00:...' 26 | """ 27 | 28 | def __init__(self): 29 | self._timer = default_timer 30 | self._interval = 0 31 | self.running = False 32 | 33 | def __enter__(self): 34 | self.start() 35 | return self 36 | 37 | def __exit__(self, *args): 38 | self.stop() 39 | 40 | def __str__(self): 41 | return str(timedelta(seconds=self._interval)) 42 | 43 | def start(self): 44 | """Start the timer.""" 45 | self.init = self._timer() 46 | self.running = True 47 | 48 | def stop(self): 49 | """Stop the timer. Calculate the interval in seconds.""" 50 | self.end = self._timer() 51 | try: 52 | self._interval = self.end - self.init 53 | self.running = False 54 | except AttributeError: 55 | raise ValueError( 56 | "Timer has not been initialized: use start() or the contextual form with Timer() as t:" 57 | ) 58 | 59 | @property 60 | def interval(self): 61 | if self.running: 62 | raise ValueError("Timer has not been stopped, please use stop().") 63 | else: 64 | return self._interval 65 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/movielens-sar/reco_utils/dataset/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/6-azureml-movie-recommendation/movielens-sar/reco_utils/dataset/__init__.py -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/movielens-sar/reco_utils/dataset/cosmos_cli.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. All rights reserved. 2 | # Licensed under the MIT License. 3 | import pydocumentdb.errors as errors 4 | 5 | 6 | def find_collection(client, dbid, id): 7 | """Find whether or not a CosmosDB collection exists. 8 | Args: 9 | client (obj): A pydocumentdb client object. 10 | dbid (str): Database ID. 11 | id (str): Collection ID. 12 | Returns: 13 | bool: True if the collection exists, False otherwise. 14 | """ 15 | database_link = "dbs/" + dbid 16 | collections = list( 17 | client.QueryCollections( 18 | database_link, 19 | { 20 | "query": "SELECT * FROM r WHERE r.id=@id", 21 | "parameters": [{"name": "@id", "value": id}], 22 | }, 23 | ) 24 | ) 25 | if len(collections) > 0: 26 | return True 27 | else: 28 | return False 29 | 30 | 31 | def read_collection(client, dbid, id): 32 | """Read a CosmosDB collection. 33 | Args: 34 | client (obj): A pydocumentdb client object. 35 | dbid (str): Database ID. 36 | id (str): Collection ID. 37 | Returns: 38 | obj: A collection. 39 | """ 40 | try: 41 | database_link = "dbs/" + dbid 42 | collection_link = database_link + "/colls/{0}".format(id) 43 | collection = client.ReadCollection(collection_link) 44 | return collection 45 | except errors.DocumentDBError as e: 46 | if e.status_code == 404: 47 | print("A collection with id '{0}' does not exist".format(id)) 48 | else: 49 | raise errors.HTTPFailure(e.status_code) 50 | 51 | 52 | def read_database(client, id): 53 | """Read a CosmosDB database. 54 | Args: 55 | client (obj): A pydocumentdb client object. 56 | id (str): Database ID. 57 | Returns: 58 | obj: A database. 59 | """ 60 | try: 61 | database_link = "dbs/" + id 62 | database = client.ReadDatabase(database_link) 63 | return database 64 | except errors.DocumentDBError as e: 65 | if e.status_code == 404: 66 | print("A database with id '{0}' does not exist".format(id)) 67 | else: 68 | raise errors.HTTPFailure(e.status_code) 69 | 70 | 71 | def find_database(client, id): 72 | """Find whether or not a CosmosDB database exists. 73 | Args: 74 | client (obj): A pydocumentdb client object. 75 | id (str): Database ID. 76 | Returns: 77 | bool: True if the database exists, False otherwise. 78 | """ 79 | databases = list( 80 | client.QueryDatabases( 81 | { 82 | "query": "SELECT * FROM r WHERE r.id=@id", 83 | "parameters": [{"name": "@id", "value": id}], 84 | } 85 | ) 86 | ) 87 | if len(databases) > 0: 88 | return True 89 | else: 90 | return False 91 | 92 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/movielens-sar/reco_utils/dataset/download_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. All rights reserved. 2 | # Licensed under the MIT License. 3 | 4 | import os 5 | from urllib.request import urlretrieve 6 | import logging 7 | from contextlib import contextmanager 8 | from tempfile import TemporaryDirectory 9 | from tqdm import tqdm 10 | 11 | 12 | log = logging.getLogger(__name__) 13 | 14 | 15 | class TqdmUpTo(tqdm): 16 | """Wrapper class for the progress bar tqdm to get `update_to(n)` functionality""" 17 | 18 | def update_to(self, b=1, bsize=1, tsize=None): 19 | """A progress bar showing how much is left to finish the opperation 20 | 21 | Args: 22 | b (int): Number of blocks transferred so far. 23 | bsize (int): Size of each block (in tqdm units). 24 | tsize (int): Total size (in tqdm units). 25 | """ 26 | if tsize is not None: 27 | self.total = tsize 28 | self.update(b * bsize - self.n) # will also set self.n = b * bsize 29 | 30 | 31 | def maybe_download(url, filename=None, work_directory=".", expected_bytes=None): 32 | """Download a file if it is not already downloaded. 33 | 34 | Args: 35 | filename (str): File name. 36 | work_directory (str): Working directory. 37 | url (str): URL of the file to download. 38 | expected_bytes (int): Expected file size in bytes. 39 | 40 | Returns: 41 | str: File path of the file downloaded. 42 | """ 43 | if filename is None: 44 | filename = url.split("/")[-1] 45 | filepath = os.path.join(work_directory, filename) 46 | if not os.path.exists(filepath): 47 | with TqdmUpTo(unit="B", unit_scale=True) as t: 48 | filepath, _ = urlretrieve(url, filepath, reporthook=t.update_to) 49 | else: 50 | log.debug("File {} already downloaded".format(filepath)) 51 | if expected_bytes is not None: 52 | statinfo = os.stat(filepath) 53 | if statinfo.st_size != expected_bytes: 54 | os.remove(filepath) 55 | raise IOError("Failed to verify {}".format(filepath)) 56 | 57 | return filepath 58 | 59 | 60 | @contextmanager 61 | def download_path(path=None): 62 | """Return a path to download data. If `path=None`, then it yields a temporal path that is eventually deleted, 63 | otherwise the real path of the input. 64 | 65 | Args: 66 | path (str): Path to download data. 67 | 68 | Returns: 69 | str: Real path where the data is stored. 70 | 71 | Examples: 72 | >>> with download_path() as path: 73 | >>> ... maybe_download(url="http://example.com/file.zip", work_directory=path) 74 | 75 | """ 76 | if path is None: 77 | tmp_dir = TemporaryDirectory() 78 | try: 79 | yield tmp_dir.name 80 | finally: 81 | tmp_dir.cleanup() 82 | else: 83 | path = os.path.realpath(path) 84 | yield path 85 | 86 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/movielens-sar/reco_utils/evaluation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/6-azureml-movie-recommendation/movielens-sar/reco_utils/evaluation/__init__.py -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/movielens-sar/reco_utils/evaluation/parameter_sweep.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. All rights reserved. 2 | # Licensed under the MIT License. 3 | # 4 | # Utility functions for parameter sweep. 5 | 6 | from itertools import product 7 | 8 | 9 | def generate_param_grid(params): 10 | """Generator of parameter grids 11 | Generate parameter lists from a paramater dictionary in the form of 12 | { 13 | "param1": [value1, value2], 14 | "param2": [value1, value2] 15 | } 16 | 17 | to 18 | 19 | [ 20 | {"param1": value1, "param2": value1}, 21 | {"param1": value2, "param2": value1}, 22 | {"param1": value1, "param2": value2}, 23 | {"param1": value2, "param2": value2} 24 | ] 25 | 26 | Args: 27 | param_dict (dict): dictionary of parameters and values (in a list). 28 | 29 | Return: 30 | list: A list of parameter dictionary string that can be fed directly into 31 | model builder as keyword arguments. 32 | """ 33 | param_new = {} 34 | param_fixed = {} 35 | 36 | for key, value in params.items(): 37 | if isinstance(value, list): 38 | param_new[key] = value 39 | else: 40 | param_fixed[key] = value 41 | 42 | items = sorted(param_new.items()) 43 | keys, values = zip(*items) 44 | 45 | params_exp = [] 46 | for v in product(*values): 47 | param_exp = dict(zip(keys, v)) 48 | param_exp.update(param_fixed) 49 | params_exp.append(param_exp) 50 | 51 | return params_exp 52 | 53 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/movielens-sar/reco_utils/recommender/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/6-azureml-movie-recommendation/movielens-sar/reco_utils/recommender/__init__.py -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/movielens-sar/reco_utils/recommender/sar/__init__.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | 3 | # Time since epoch in seconds 4 | EPOCH = datetime.datetime.utcfromtimestamp(0) 5 | # Default value for time decay parameter in SAR 6 | TIME_DECAY_COEFFICIENT = 30 7 | # Switch to trigger groupby in TimeDecay calculation 8 | TIMEDECAY_FORMULA = False 9 | # cooccurrence matrix threshold 10 | THRESHOLD = 1 11 | # Current time 12 | # TIME_NOW = (datetime.datetime.now() - EPOCH).total_seconds() 13 | TIME_NOW = None 14 | # Default names for functions which change the item-item cooccurrence matrix 15 | SIM_COOCCUR = "cooccurrence" 16 | SIM_JACCARD = "jaccard" 17 | SIM_LIFT = "lift" 18 | 19 | INDEXED_ITEMS = "indexedItems" 20 | INDEXED_USERS = "indexedUsers" 21 | 22 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/movielens-sar/train.py: -------------------------------------------------------------------------------- 1 | 2 | import argparse 3 | import os 4 | import numpy as np 5 | import pandas as pd 6 | import itertools 7 | import logging 8 | import time 9 | 10 | from azureml.core import Run 11 | from sklearn.externals import joblib 12 | 13 | from reco_utils.dataset import movielens 14 | from reco_utils.dataset.python_splitters import python_random_split 15 | from reco_utils.evaluation.python_evaluation import map_at_k, ndcg_at_k, precision_at_k, recall_at_k 16 | from reco_utils.recommender.sar.sar_singlenode import SARSingleNode 17 | 18 | TARGET_DIR = 'movielens' 19 | OUTPUT_FILE_NAME = 'outputs/movielens_sar_model.pkl' 20 | MODEL_FILE_NAME = 'movielens_sar_model.pkl' 21 | 22 | # get hold of the current run 23 | run = Run.get_context() 24 | 25 | # let user feed in 2 parameters, the location of the data files (from datastore), and the regularization rate of the logistic regression model 26 | parser = argparse.ArgumentParser() 27 | parser.add_argument('--data-folder', type=str, dest='data_folder', help='data folder mounting point') 28 | parser.add_argument('--data-file', type=str, dest='data_file', help='data file name') 29 | parser.add_argument('--top-k', type=int, dest='top_k', default=10, help='top k items to recommend') 30 | parser.add_argument('--data-size', type=str, dest='data_size', default=10, help='Movielens data size: 100k, 1m, 10m, or 20m') 31 | args = parser.parse_args() 32 | 33 | run.log("top-k",args.top_k) 34 | run.log("data-size", args.data_size) 35 | data_pickle_path = os.path.join(args.data_folder, args.data_file) 36 | 37 | data = pd.read_pickle(path=data_pickle_path) 38 | 39 | train, test = python_random_split(data,0.75) 40 | 41 | # instantiate the SAR algorithm and set the index 42 | header = { 43 | "col_user": "UserId", 44 | "col_item": "MovieId", 45 | "col_rating": "Rating", 46 | "col_timestamp": "Timestamp", 47 | } 48 | 49 | logging.basicConfig(level=logging.DEBUG, 50 | format='%(asctime)s %(levelname)-8s %(message)s') 51 | 52 | model = SARSingleNode( 53 | remove_seen=True, similarity_type="jaccard", 54 | time_decay_coefficient=30, time_now=None, timedecay_formula=True, **header 55 | ) 56 | 57 | # train the SAR model 58 | start_time = time.time() 59 | 60 | model.fit(train) 61 | 62 | train_time = time.time() - start_time 63 | run.log(name="Training time", value=train_time) 64 | 65 | start_time = time.time() 66 | 67 | top_k = model.recommend_k_items(test) 68 | 69 | test_time = time.time() - start_time 70 | run.log(name="Prediction time", value=test_time) 71 | 72 | # TODO: remove this call when the model returns same type as input 73 | top_k['UserId'] = pd.to_numeric(top_k['UserId']) 74 | top_k['MovieId'] = pd.to_numeric(top_k['MovieId']) 75 | 76 | # evaluate 77 | eval_map = map_at_k(test, top_k, col_user="UserId", col_item="MovieId", 78 | col_rating="Rating", col_prediction="prediction", 79 | relevancy_method="top_k", k=args.top_k) 80 | eval_ndcg = ndcg_at_k(test, top_k, col_user="UserId", col_item="MovieId", 81 | col_rating="Rating", col_prediction="prediction", 82 | relevancy_method="top_k", k=args.top_k) 83 | eval_precision = precision_at_k(test, top_k, col_user="UserId", col_item="MovieId", 84 | col_rating="Rating", col_prediction="prediction", 85 | relevancy_method="top_k", k=args.top_k) 86 | eval_recall = recall_at_k(test, top_k, col_user="UserId", col_item="MovieId", 87 | col_rating="Rating", col_prediction="prediction", 88 | relevancy_method="top_k", k=args.top_k) 89 | 90 | run.log("map", eval_map) 91 | run.log("ndcg", eval_ndcg) 92 | run.log("precision", eval_precision) 93 | run.log("recall", eval_recall) 94 | # run.log_table("topk", top_k.to_dict()) 95 | 96 | # automatic upload of everything in ./output folder doesn't work for very large model file 97 | # model file has to be saved to a temp location, then uploaded by upload_file function 98 | joblib.dump(value=model, filename=MODEL_FILE_NAME) 99 | 100 | run.upload_file(OUTPUT_FILE_NAME, MODEL_FILE_NAME) 101 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/movielens_sar_model.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/6-azureml-movie-recommendation/movielens_sar_model.pkl -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/myenv.yml: -------------------------------------------------------------------------------- 1 | # Conda environment specification. The dependencies defined in this file will 2 | # be automatically provisioned for runs with userManagedDependencies=False. 3 | 4 | # Details about the Conda environment file format: 5 | # https://conda.io/docs/user-guide/tasks/manage-environments.html#create-env-file-manually 6 | 7 | name: project_environment 8 | dependencies: 9 | # The python interpreter version. 10 | # Currently Azure ML only supports 3.5.2 and later. 11 | - python=3.6.2 12 | 13 | - pip: 14 | # Required packages for AzureML execution, history, and data preparation. 15 | - azureml-defaults 16 | - sklearn 17 | - numpy 18 | - pandas 19 | - tqdm 20 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/reco_utils/README.md: -------------------------------------------------------------------------------- 1 | # Recommender Utilities 2 | 3 | This module (reco_utils) contains functions to simplify common tasks used when developing and evaluating recommender systems. A short description of the sub-modules is provided below. For more details about what functions are available and how to use them, please review the doc-strings provided with the code. 4 | 5 | ## Sub-Modules 6 | 7 | ### [Common](./common) 8 | This submodule contains high-level utilities for defining constants used in most algorithms as well as helper functions for managing aspects of different frameworks: gpu, spark, jupyter notebook. 9 | 10 | ### [Dataset](./dataset) 11 | Dataset includes helper functions for interacting with Azure Cosmos databases, pulling different sizes of the Movielens dataset and formatting them appropriately as well as utilities for splitting data for training / testing. 12 | 13 | #### Data Loading 14 | The movielens module will allow you to load a dataframe in pandas or spark formats from the Movielens dataset, with sizes of 100k, 1M, 10M, or 20M to test algorithms and evaluate performance benchmarks. 15 | ```python 16 | df = movielens.load_pandas_df(size="100k") 17 | ``` 18 | 19 | #### Splitting Techniques: 20 | Currently three methods are available for splitting datasets. All of them support splitting by user or item and filtering out minimal samples (for instance users that have not rated enough item, or items that have not been rated by enough users). 21 | - Random: this is the basic approach where entries are randomly assigned to each group based on the ratio desired 22 | - Chronological: this uses provided timestamps to order the data and selects a cut-off time that will split the desired ratio of data to train before that time and test after that time 23 | - Stratified: this is similar to random sampling, but the splits are stratified, for example if the datasets are split by user, the splitting approach will attempt to maintain the same set of items used in both training and test splits. The converse is true if splitting by item. 24 | 25 | ### [Evaluation](./evaluation) 26 | The evaluation submodule includes functionality for performing hyperparameter sweeps as well as calculating common recommender metrics directly in python or in a Spark environment using pyspark. 27 | 28 | Currently available metrics include: 29 | - Root Mean Squared Error 30 | - Mean Absolute Error 31 | - R2 32 | - Explained Variance 33 | - Precision at K 34 | - Recall at K 35 | - Normalized Discounted Cumulative Gain at K 36 | - Mean Average Precision at K 37 | - Area Under Curve 38 | - Logistic Loss 39 | 40 | ### [Recommender](./recommender) 41 | The recommender submodule contains implementations of various algorithms that can be used in addition to external packages to evaluate and develop new recommender system approaches. 42 | Currently the Simple Adaptive Recommender (SAR) algorithm is implemented in python for running on a single node. 43 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/reco_utils/__init__.py: -------------------------------------------------------------------------------- 1 | __title__ = "Microsoft Recommenders" 2 | __version__ = "2019.02" 3 | __author__ = "RecoDev Team at Microsoft" 4 | __license__ = "MIT" 5 | __copyright__ = "Copyright 2018-present Microsoft Corporation" 6 | 7 | # Version synonym 8 | VERSION = __version__ 9 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/reco_utils/azureml/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/6-azureml-movie-recommendation/reco_utils/azureml/__init__.py -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/reco_utils/azureml/aks_utils.py: -------------------------------------------------------------------------------- 1 | from math import ceil, floor 2 | import logging 3 | 4 | logger = logging.getLogger(__name__) 5 | 6 | def qps_to_replicas(target_qps, processing_time, max_qp_replica=1, target_utilization=0.7): 7 | """Provide a rough estimate of the number of replicas to support a given load (queries per second) 8 | 9 | Args: 10 | target_qps (int): target queries per second that you want to support 11 | processing_time (float): the estimated amount of time (in seconds) your service call takes 12 | max_qp_replica (int): maximum number of concurrent queries per replica 13 | target_utilization (float): proportion of CPU utilization you think is ideal 14 | 15 | Returns: 16 | replicas: Number of estimated replicas required to support a target number of queries per second 17 | """ 18 | concurrent_queries = target_qps * processing_time / target_utilization 19 | replicas = ceil(concurrent_queries / max_qp_replica) 20 | logger.info('Approximately {} replicas are estimated to support {} queries per second.'.format(replicas, target_qps)) 21 | return replicas 22 | 23 | def replicas_to_qps(num_replicas, processing_time, max_qp_replica=1, target_utilization=0.7): 24 | """Provide a rough estimate of the queries per second supported by a number of replicas 25 | 26 | Args: 27 | num_replicas (int): number of replicas 28 | processing_time (float): the estimated amount of time (in seconds) your service call takes 29 | max_qp_replica (int): maximum number of concurrent queries per replica 30 | target_utilization (float): proportion of CPU utilization you think is ideal 31 | 32 | Returns: 33 | qps: queries per second supported by the number of replicas 34 | """ 35 | qps = floor(num_replicas*max_qp_replica*target_utilization/processing_time) 36 | logger.info('Approximately {} queries per second are supported by {} replicas.'.format(qps, num_replicas)) 37 | return qps 38 | 39 | 40 | def total_cores_to_replicas(n_cores, cpu_cores_per_replica=0.1, overhead=0.1): 41 | """Provide a rough estimate of the number of replicas supported by a particular number of cores. 42 | 43 | Args: 44 | n_cores (int): Total number of cores within an AKS cluster that you want to use 45 | cpu_cores_per_replica (float): Cores assigned to each replica. This can be fractional and corresponds to the 46 | cpu_cores argument passed to AksWebservice.deploy_configuration() configuration 47 | overhead (float): Amount of overhead (as a proportion) 48 | 49 | Returns: 50 | replicas: Total number of replicas supported by n_cores 51 | """ 52 | replicas = floor((1 - overhead)*n_cores/(cpu_cores_per_replica)) 53 | logger.info('Approximately {} replicas are supported by {} cores.'.format(replicas, n_cores)) 54 | return replicas -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/reco_utils/azureml/azureml_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. All rights reserved. 2 | # Licensed under the MIT License. 3 | 4 | import os 5 | 6 | from azureml.core import Workspace 7 | 8 | 9 | def get_or_create_workspace( 10 | config_path=None, 11 | subscription_id=None, 12 | resource_group=None, 13 | workspace_name=None, 14 | workspace_region=None, 15 | ): 16 | """Get or create AzureML Workspace this will save the config to the path specified for later use 17 | 18 | Args: 19 | config_path (str): optional directory to look for / store config.json file (defaults to current directory) 20 | subscription_id (str): subscription id 21 | resource_group (str): resource group 22 | workspace_name (str): workspace name 23 | workspace_region (str): region 24 | 25 | Returns: 26 | Workspace 27 | """ 28 | 29 | # use environment variables if needed 30 | if subscription_id is None: 31 | subscription_id = os.getenv("SUBSCRIPTION_ID") 32 | if resource_group is None: 33 | resource_group = os.getenv("RESOURCE_GROUP") 34 | if workspace_name is None: 35 | workspace_name = os.getenv("WORKSPACE_NAME") 36 | if workspace_region is None: 37 | workspace_region = os.getenv("WORKSPACE_REGION") 38 | 39 | # define fallback options in order to try 40 | options = [ 41 | ( 42 | Workspace, 43 | dict( 44 | subscription_id=subscription_id, 45 | resource_group=resource_group, 46 | workspace_name=workspace_name, 47 | ), 48 | ), 49 | (Workspace.from_config, dict(path=config_path)), 50 | ( 51 | Workspace.create, 52 | dict( 53 | subscription_id=subscription_id, 54 | resource_group=resource_group, 55 | name=workspace_name, 56 | location=workspace_region, 57 | create_resource_group=True, 58 | exist_ok=True, 59 | ), 60 | ), 61 | ] 62 | 63 | for function, kwargs in options: 64 | try: 65 | ws = function(**kwargs) 66 | break 67 | except Exception: 68 | continue 69 | else: 70 | raise ValueError( 71 | "Failed to get or create AzureML Workspace with the configuration information provided" 72 | ) 73 | 74 | ws.write_config(path=config_path) 75 | return ws 76 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/reco_utils/common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/6-azureml-movie-recommendation/reco_utils/common/__init__.py -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/reco_utils/common/constants.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. All rights reserved. 2 | # Licensed under the MIT License. 3 | 4 | # Default column names 5 | DEFAULT_USER_COL = "userID" 6 | DEFAULT_ITEM_COL = "itemID" 7 | DEFAULT_RATING_COL = "rating" 8 | DEFAULT_LABEL_COL = "label" 9 | DEFAULT_TIMESTAMP_COL = "timestamp" 10 | PREDICTION_COL = "prediction" 11 | DEFAULT_PREDICTION_COL = PREDICTION_COL 12 | 13 | # Filtering variables 14 | DEFAULT_K = 10 15 | DEFAULT_THRESHOLD = 10 16 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/reco_utils/common/general_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. All rights reserved. 2 | # Licensed under the MIT License. 3 | 4 | import os 5 | import psutil 6 | 7 | 8 | def invert_dictionary(dictionary): 9 | """Invert a dictionary 10 | NOTE: If the dictionary has unique keys and unique values, the invertion would be perfect. However, if there are 11 | repeated values, the invertion can take different keys 12 | 13 | Args: 14 | dictionary (dict): A dictionary 15 | 16 | Returns: 17 | dict: inverted dictionary 18 | """ 19 | return {v: k for k, v in dictionary.items()} 20 | 21 | 22 | def get_physical_memory(): 23 | """Get the physical memory in GBs. 24 | 25 | Returns: 26 | float: Physical memory in GBs. 27 | """ 28 | return psutil.virtual_memory()[0] / 1073741824 29 | 30 | 31 | def get_number_processors(): 32 | """Get the number of processors in a CPU. 33 | 34 | Returns: 35 | int: Number of processors. 36 | """ 37 | try: 38 | num = os.cpu_count() 39 | except Exception: 40 | import multiprocessing # force exception in case mutiprocessing is not installed 41 | 42 | num = multiprocessing.cpu_count() 43 | return num 44 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/reco_utils/common/gpu_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. All rights reserved. 2 | # Licensed under the MIT License. 3 | 4 | import sys 5 | import os 6 | import glob 7 | from numba import cuda 8 | from numba.cuda.cudadrv.error import CudaSupportError 9 | 10 | 11 | DEFAULT_CUDA_PATH_LINUX = "/usr/local/cuda/version.txt" 12 | 13 | 14 | def get_number_gpus(): 15 | """Get the number of GPUs in the system. 16 | 17 | Returns: 18 | int: Number of GPUs. 19 | """ 20 | try: 21 | return len(cuda.gpus) 22 | except CudaSupportError: 23 | return 0 24 | 25 | 26 | def clear_memory_all_gpus(): 27 | """Clear memory of all GPUs.""" 28 | try: 29 | for gpu in cuda.gpus: 30 | with gpu: 31 | cuda.current_context().deallocations.clear() 32 | except CudaSupportError: 33 | print("No CUDA available") 34 | 35 | 36 | def get_cuda_version(unix_path=DEFAULT_CUDA_PATH_LINUX): 37 | """Get CUDA version 38 | 39 | Args: 40 | unix_path (str): Path to CUDA version file in Linux/Mac. 41 | 42 | Returns: 43 | str: Version of the library. 44 | """ 45 | if sys.platform == "win32": 46 | raise NotImplementedError("Implement this!") 47 | elif sys.platform in ["linux", "darwin"]: 48 | if os.path.isfile(unix_path): 49 | with open(unix_path, "r") as f: 50 | data = f.read().replace("\n", "") 51 | return data 52 | else: 53 | return "No CUDA in this machine" 54 | else: 55 | raise ValueError("Not in Windows, Linux or Mac") 56 | 57 | 58 | def get_cudnn_version(): 59 | """Get the CuDNN version 60 | 61 | Returns: 62 | str: Version of the library. 63 | 64 | """ 65 | 66 | def find_cudnn_in_headers(candidates): 67 | for c in candidates: 68 | file = glob.glob(c) 69 | if file: 70 | break 71 | if file: 72 | with open(file[0], "r") as f: 73 | version = "" 74 | for line in f: 75 | if "#define CUDNN_MAJOR" in line: 76 | version = line.split()[-1] 77 | if "#define CUDNN_MINOR" in line: 78 | version += "." + line.split()[-1] 79 | if "#define CUDNN_PATCHLEVEL" in line: 80 | version += "." + line.split()[-1] 81 | if version: 82 | return version 83 | else: 84 | return "Cannot find CUDNN version" 85 | else: 86 | return "No CUDNN in this machine" 87 | 88 | if sys.platform == "win32": 89 | candidates = ["C:\\NVIDIA\\cuda\\include\\cudnn.h", 90 | "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v*\\include\\cudnn.h"] 91 | elif sys.platform == "linux": 92 | candidates = [ 93 | "/usr/include/x86_64-linux-gnu/cudnn_v*.h", 94 | "/usr/local/cuda/include/cudnn.h", 95 | "/usr/include/cudnn.h", 96 | ] 97 | elif sys.platform == "darwin": 98 | candidates = ["/usr/local/cuda/include/cudnn.h", "/usr/include/cudnn.h"] 99 | else: 100 | raise ValueError("Not in Windows, Linux or Mac") 101 | return find_cudnn_in_headers(candidates) 102 | 103 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/reco_utils/common/notebook_memory_management.py: -------------------------------------------------------------------------------- 1 | # Original code: https://raw.githubusercontent.com/miguelgfierro/codebase/master/python/system/notebook_memory_management.py 2 | # 3 | # Profile memory usage envelope of IPython commands and report interactively. 4 | # Usage (inside a python notebook): 5 | # from notebook_memory_management import start_watching_memory, stop_watching_memory 6 | # To start profile: 7 | # start_watching_memory() 8 | # To stop profile: 9 | # stop_watching_memory() 10 | # 11 | # Based on: https://github.com/ianozsvald/ipython_memory_usage 12 | # 13 | 14 | from __future__ import division # 1/2 == 0.5, as in Py3 15 | from __future__ import absolute_import # avoid hiding global modules with locals 16 | from __future__ import print_function # force use of print("hello") 17 | from __future__ import ( 18 | unicode_literals 19 | ) # force unadorned strings "" to be unicode without prepending u"" 20 | import time 21 | import memory_profiler 22 | from IPython import get_ipython 23 | import psutil 24 | import warnings 25 | 26 | 27 | # keep a global accounting for the last known memory usage 28 | # which is the reference point for the memory delta calculation 29 | previous_call_memory_usage = memory_profiler.memory_usage()[0] 30 | t1 = time.time() # will be set to current time later 31 | keep_watching = True 32 | watching_memory = True 33 | try: 34 | input_cells = get_ipython().user_ns["In"] 35 | except: 36 | warnings.warn("Not running on notebook") 37 | 38 | 39 | def start_watching_memory(): 40 | """Register memory profiling tools to IPython instance.""" 41 | global watching_memory 42 | watching_memory = True 43 | ip = get_ipython() 44 | ip.events.register("post_run_cell", watch_memory) 45 | ip.events.register("pre_run_cell", pre_run_cell) 46 | 47 | 48 | def stop_watching_memory(): 49 | """Unregister memory profiling tools from IPython instance.""" 50 | global watching_memory 51 | watching_memory = False 52 | ip = get_ipython() 53 | try: 54 | ip.events.unregister("post_run_cell", watch_memory) 55 | except ValueError: 56 | print("ERROR: problem when unregistering") 57 | pass 58 | try: 59 | ip.events.unregister("pre_run_cell", pre_run_cell) 60 | except ValueError: 61 | print("ERROR: problem when unregistering") 62 | pass 63 | 64 | 65 | def watch_memory(): 66 | # bring in the global memory usage value from the previous iteration 67 | global previous_call_memory_usage, keep_watching, watching_memory, input_cells 68 | new_memory_usage = memory_profiler.memory_usage()[0] 69 | memory_delta = new_memory_usage - previous_call_memory_usage 70 | keep_watching = False 71 | total_memory = psutil.virtual_memory()[0] / 1024 / 1024 # in Mb 72 | # calculate time delta using global t1 (from the pre-run event) and current time 73 | time_delta_secs = time.time() - t1 74 | num_commands = len(input_cells) - 1 75 | cmd = "In [{}]".format(num_commands) 76 | # convert the results into a pretty string 77 | output_template = ( 78 | "{cmd} used {memory_delta:0.4f} Mb RAM in " 79 | "{time_delta:0.2f}s, total RAM usage " 80 | "{memory_usage:0.2f} Mb, total RAM " 81 | "memory {total_memory:0.2f} Mb" 82 | ) 83 | output = output_template.format( 84 | time_delta=time_delta_secs, 85 | cmd=cmd, 86 | memory_delta=memory_delta, 87 | memory_usage=new_memory_usage, 88 | total_memory=total_memory, 89 | ) 90 | if watching_memory: 91 | print(str(output)) 92 | previous_call_memory_usage = new_memory_usage 93 | 94 | 95 | def pre_run_cell(): 96 | """Capture current time before we execute the current command""" 97 | global t1 98 | t1 = time.time() 99 | 100 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/reco_utils/common/notebook_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. All rights reserved. 2 | # Licensed under the MIT License. 3 | 4 | import os 5 | 6 | 7 | def is_jupyter(): 8 | """Check if the module is running on Jupyter notebook/console 9 | 10 | Returns: 11 | bool: True if the module is running on Jupyter notebook or Jupyter console, 12 | False otherwise. 13 | """ 14 | try: 15 | shell_name = get_ipython().__class__.__name__ 16 | if shell_name == 'ZMQInteractiveShell': 17 | return True 18 | else: 19 | return False 20 | except NameError: 21 | return False 22 | 23 | 24 | def is_databricks(): 25 | """Check if the module is running on Databricks 26 | 27 | Returns: 28 | bool: True if the module is running on Databricks notebook, 29 | False otherwise. 30 | """ 31 | try: 32 | if os.path.realpath(".") == "/databricks/driver": 33 | return True 34 | else: 35 | return False 36 | except NameError: 37 | return False 38 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/reco_utils/common/python_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. All rights reserved. 2 | # Licensed under the MIT License. 3 | 4 | import logging 5 | 6 | import numpy as np 7 | from scipy import sparse 8 | 9 | 10 | logger = logging.getLogger() 11 | 12 | 13 | def exponential_decay(value, max_val, half_life): 14 | """Compute decay factor for a given value based on an exponential decay 15 | Values greater than max_val will be set to 1 16 | Args: 17 | value (numeric): value to calculate decay factor 18 | max_val (numeric): value at which decay factor will be 1 19 | half_life (numeric): value at which decay factor will be 0.5 20 | Returns: 21 | float: decay factor 22 | """ 23 | 24 | return np.minimum(1.0, np.power(0.5, (max_val - value) / half_life)) 25 | 26 | 27 | def jaccard(cooccurrence): 28 | """Helper method to calculate the Jaccard similarity of a matrix of co-occurrences 29 | Args: 30 | cooccurrence (np.array): the symmetric matrix of co-occurrences of items 31 | Returns: 32 | np.array: The matrix of Jaccard similarities between any two items 33 | """ 34 | 35 | diag = cooccurrence.diagonal() 36 | diag_rows = np.expand_dims(diag, axis=0) 37 | diag_cols = np.expand_dims(diag, axis=1) 38 | 39 | with np.errstate(invalid="ignore", divide="ignore"): 40 | result = cooccurrence / (diag_rows + diag_cols - cooccurrence) 41 | 42 | return np.array(result) 43 | 44 | 45 | def lift(cooccurrence): 46 | """Helper method to calculate the Lift of a matrix of co-occurrences 47 | Args: 48 | cooccurrence (np.array): the symmetric matrix of co-occurrences of items 49 | Returns: 50 | np.array: The matrix of Lifts between any two items 51 | """ 52 | 53 | diag = cooccurrence.diagonal() 54 | diag_rows = np.expand_dims(diag, axis=0) 55 | diag_cols = np.expand_dims(diag, axis=1) 56 | 57 | with np.errstate(invalid="ignore", divide="ignore"): 58 | result = cooccurrence / (diag_rows * diag_cols) 59 | 60 | return np.array(result) 61 | 62 | 63 | def get_top_k_scored_items(scores, top_k, sort_top_k=False): 64 | """Extract top K items from a matrix of scores for each user-item pair, optionally sort results per user 65 | 66 | Args: 67 | scores (np.array): score matrix (users x items) 68 | top_k (int): number of top items to recommend 69 | sort_top_k (bool): flag to sort top k results 70 | 71 | Returns: 72 | np.array, np.array: indices into score matrix for each users top items, scores corresponding to top items 73 | """ 74 | 75 | # ensure we're working with a dense ndarray 76 | if isinstance(scores, sparse.spmatrix): 77 | scores = scores.todense() 78 | 79 | if scores.shape[1] < top_k: 80 | logger.warning( 81 | "Number of items is less than top_k, limiting top_k to number of items" 82 | ) 83 | k = min(top_k, scores.shape[1]) 84 | 85 | test_user_idx = np.arange(scores.shape[0])[:, None] 86 | 87 | # get top K items and scores 88 | # this determines the un-ordered top-k item indices for each user 89 | top_items = np.argpartition(scores, -k, axis=1)[:, -k:] 90 | top_scores = scores[test_user_idx, top_items] 91 | 92 | if sort_top_k: 93 | sort_ind = np.argsort(-top_scores) 94 | top_items = top_items[test_user_idx, sort_ind] 95 | top_scores = top_scores[test_user_idx, sort_ind] 96 | 97 | return np.array(top_items), np.array(top_scores) 98 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/reco_utils/common/spark_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. All rights reserved. 2 | # Licensed under the MIT License. 3 | 4 | import os 5 | import sys 6 | 7 | 8 | try: 9 | from pyspark.sql import SparkSession 10 | except ImportError: 11 | pass # skip this import if we are in pure python environment 12 | 13 | 14 | def start_or_get_spark( 15 | app_name="Sample", 16 | url="local[*]", 17 | memory="10G", 18 | packages=None, 19 | jars=None, 20 | repository=None 21 | ): 22 | """Start Spark if not started 23 | 24 | Args: 25 | app_name (str): Set name of the application 26 | url (str): URL for spark master 27 | memory (str): Size of memory for spark driver 28 | packages (list): list of packages to install 29 | jars (list): list of jar files to add 30 | repository (str): The maven repository 31 | 32 | Returns: 33 | obj: Spark context. 34 | """ 35 | 36 | submit_args = '' 37 | if packages is not None: 38 | submit_args = '--packages {} '.format(','.join(packages)) 39 | if jars is not None: 40 | submit_args += '--jars {} '.format(','.join(jars)) 41 | if repository is not None: 42 | submit_args += "--repositories {}".format(repository) 43 | if submit_args: 44 | os.environ['PYSPARK_SUBMIT_ARGS'] = '{} pyspark-shell'.format(submit_args) 45 | 46 | spark = ( 47 | SparkSession.builder.appName(app_name) 48 | .master(url) 49 | .config("spark.driver.memory", memory) 50 | .getOrCreate() 51 | ) 52 | 53 | return spark 54 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/reco_utils/common/timer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. All rights reserved. 2 | # Licensed under the MIT License. 3 | 4 | from timeit import default_timer 5 | from datetime import timedelta 6 | 7 | 8 | class Timer(object): 9 | """Timer class. 10 | Original code: https://github.com/miguelgfierro/codebase 11 | 12 | Examples: 13 | >>> import time 14 | >>> t = Timer() 15 | >>> t.start() 16 | >>> time.sleep(1) 17 | >>> t.stop() 18 | >>> t.interval < 1 19 | True 20 | >>> with Timer() as t: 21 | ... time.sleep(1) 22 | >>> t.interval < 1 23 | True 24 | >>> "Time elapsed {}".format(t) #doctest: +ELLIPSIS 25 | 'Time elapsed 0:00:...' 26 | """ 27 | 28 | def __init__(self): 29 | self._timer = default_timer 30 | self._interval = 0 31 | self.running = False 32 | 33 | def __enter__(self): 34 | self.start() 35 | return self 36 | 37 | def __exit__(self, *args): 38 | self.stop() 39 | 40 | def __str__(self): 41 | return str(timedelta(seconds=self._interval)) 42 | 43 | def start(self): 44 | """Start the timer.""" 45 | self.init = self._timer() 46 | self.running = True 47 | 48 | def stop(self): 49 | """Stop the timer. Calculate the interval in seconds.""" 50 | self.end = self._timer() 51 | try: 52 | self._interval = self.end - self.init 53 | self.running = False 54 | except AttributeError: 55 | raise ValueError( 56 | "Timer has not been initialized: use start() or the contextual form with Timer() as t:" 57 | ) 58 | 59 | @property 60 | def interval(self): 61 | if self.running: 62 | raise ValueError("Timer has not been stopped, please use stop().") 63 | else: 64 | return self._interval 65 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/reco_utils/dataset/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/6-azureml-movie-recommendation/reco_utils/dataset/__init__.py -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/reco_utils/dataset/cosmos_cli.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. All rights reserved. 2 | # Licensed under the MIT License. 3 | import pydocumentdb.errors as errors 4 | 5 | 6 | def find_collection(client, dbid, id): 7 | """Find whether or not a CosmosDB collection exists. 8 | Args: 9 | client (obj): A pydocumentdb client object. 10 | dbid (str): Database ID. 11 | id (str): Collection ID. 12 | Returns: 13 | bool: True if the collection exists, False otherwise. 14 | """ 15 | database_link = "dbs/" + dbid 16 | collections = list( 17 | client.QueryCollections( 18 | database_link, 19 | { 20 | "query": "SELECT * FROM r WHERE r.id=@id", 21 | "parameters": [{"name": "@id", "value": id}], 22 | }, 23 | ) 24 | ) 25 | if len(collections) > 0: 26 | return True 27 | else: 28 | return False 29 | 30 | 31 | def read_collection(client, dbid, id): 32 | """Read a CosmosDB collection. 33 | Args: 34 | client (obj): A pydocumentdb client object. 35 | dbid (str): Database ID. 36 | id (str): Collection ID. 37 | Returns: 38 | obj: A collection. 39 | """ 40 | try: 41 | database_link = "dbs/" + dbid 42 | collection_link = database_link + "/colls/{0}".format(id) 43 | collection = client.ReadCollection(collection_link) 44 | return collection 45 | except errors.DocumentDBError as e: 46 | if e.status_code == 404: 47 | print("A collection with id '{0}' does not exist".format(id)) 48 | else: 49 | raise errors.HTTPFailure(e.status_code) 50 | 51 | 52 | def read_database(client, id): 53 | """Read a CosmosDB database. 54 | Args: 55 | client (obj): A pydocumentdb client object. 56 | id (str): Database ID. 57 | Returns: 58 | obj: A database. 59 | """ 60 | try: 61 | database_link = "dbs/" + id 62 | database = client.ReadDatabase(database_link) 63 | return database 64 | except errors.DocumentDBError as e: 65 | if e.status_code == 404: 66 | print("A database with id '{0}' does not exist".format(id)) 67 | else: 68 | raise errors.HTTPFailure(e.status_code) 69 | 70 | 71 | def find_database(client, id): 72 | """Find whether or not a CosmosDB database exists. 73 | Args: 74 | client (obj): A pydocumentdb client object. 75 | id (str): Database ID. 76 | Returns: 77 | bool: True if the database exists, False otherwise. 78 | """ 79 | databases = list( 80 | client.QueryDatabases( 81 | { 82 | "query": "SELECT * FROM r WHERE r.id=@id", 83 | "parameters": [{"name": "@id", "value": id}], 84 | } 85 | ) 86 | ) 87 | if len(databases) > 0: 88 | return True 89 | else: 90 | return False 91 | 92 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/reco_utils/dataset/download_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. All rights reserved. 2 | # Licensed under the MIT License. 3 | 4 | import os 5 | from urllib.request import urlretrieve 6 | import logging 7 | from contextlib import contextmanager 8 | from tempfile import TemporaryDirectory 9 | from tqdm import tqdm 10 | 11 | 12 | log = logging.getLogger(__name__) 13 | 14 | 15 | class TqdmUpTo(tqdm): 16 | """Wrapper class for the progress bar tqdm to get `update_to(n)` functionality""" 17 | 18 | def update_to(self, b=1, bsize=1, tsize=None): 19 | """A progress bar showing how much is left to finish the opperation 20 | 21 | Args: 22 | b (int): Number of blocks transferred so far. 23 | bsize (int): Size of each block (in tqdm units). 24 | tsize (int): Total size (in tqdm units). 25 | """ 26 | if tsize is not None: 27 | self.total = tsize 28 | self.update(b * bsize - self.n) # will also set self.n = b * bsize 29 | 30 | 31 | def maybe_download(url, filename=None, work_directory=".", expected_bytes=None): 32 | """Download a file if it is not already downloaded. 33 | 34 | Args: 35 | filename (str): File name. 36 | work_directory (str): Working directory. 37 | url (str): URL of the file to download. 38 | expected_bytes (int): Expected file size in bytes. 39 | 40 | Returns: 41 | str: File path of the file downloaded. 42 | """ 43 | if filename is None: 44 | filename = url.split("/")[-1] 45 | filepath = os.path.join(work_directory, filename) 46 | if not os.path.exists(filepath): 47 | with TqdmUpTo(unit="B", unit_scale=True) as t: 48 | filepath, _ = urlretrieve(url, filepath, reporthook=t.update_to) 49 | else: 50 | log.debug("File {} already downloaded".format(filepath)) 51 | if expected_bytes is not None: 52 | statinfo = os.stat(filepath) 53 | if statinfo.st_size != expected_bytes: 54 | os.remove(filepath) 55 | raise IOError("Failed to verify {}".format(filepath)) 56 | 57 | return filepath 58 | 59 | 60 | @contextmanager 61 | def download_path(path=None): 62 | """Return a path to download data. If `path=None`, then it yields a temporal path that is eventually deleted, 63 | otherwise the real path of the input. 64 | 65 | Args: 66 | path (str): Path to download data. 67 | 68 | Returns: 69 | str: Real path where the data is stored. 70 | 71 | Examples: 72 | >>> with download_path() as path: 73 | >>> ... maybe_download(url="http://example.com/file.zip", work_directory=path) 74 | 75 | """ 76 | if path is None: 77 | tmp_dir = TemporaryDirectory() 78 | try: 79 | yield tmp_dir.name 80 | finally: 81 | tmp_dir.cleanup() 82 | else: 83 | path = os.path.realpath(path) 84 | yield path 85 | 86 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/reco_utils/evaluation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/6-azureml-movie-recommendation/reco_utils/evaluation/__init__.py -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/reco_utils/evaluation/parameter_sweep.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. All rights reserved. 2 | # Licensed under the MIT License. 3 | # 4 | # Utility functions for parameter sweep. 5 | 6 | from itertools import product 7 | 8 | 9 | def generate_param_grid(params): 10 | """Generator of parameter grids 11 | Generate parameter lists from a paramater dictionary in the form of 12 | { 13 | "param1": [value1, value2], 14 | "param2": [value1, value2] 15 | } 16 | 17 | to 18 | 19 | [ 20 | {"param1": value1, "param2": value1}, 21 | {"param1": value2, "param2": value1}, 22 | {"param1": value1, "param2": value2}, 23 | {"param1": value2, "param2": value2} 24 | ] 25 | 26 | Args: 27 | param_dict (dict): dictionary of parameters and values (in a list). 28 | 29 | Return: 30 | list: A list of parameter dictionary string that can be fed directly into 31 | model builder as keyword arguments. 32 | """ 33 | param_new = {} 34 | param_fixed = {} 35 | 36 | for key, value in params.items(): 37 | if isinstance(value, list): 38 | param_new[key] = value 39 | else: 40 | param_fixed[key] = value 41 | 42 | items = sorted(param_new.items()) 43 | keys, values = zip(*items) 44 | 45 | params_exp = [] 46 | for v in product(*values): 47 | param_exp = dict(zip(keys, v)) 48 | param_exp.update(param_fixed) 49 | params_exp.append(param_exp) 50 | 51 | return params_exp 52 | 53 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/reco_utils/recommender/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/6-azureml-movie-recommendation/reco_utils/recommender/__init__.py -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/reco_utils/recommender/sar/__init__.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | 3 | # Time since epoch in seconds 4 | EPOCH = datetime.datetime.utcfromtimestamp(0) 5 | # Default value for time decay parameter in SAR 6 | TIME_DECAY_COEFFICIENT = 30 7 | # Switch to trigger groupby in TimeDecay calculation 8 | TIMEDECAY_FORMULA = False 9 | # cooccurrence matrix threshold 10 | THRESHOLD = 1 11 | # Current time 12 | # TIME_NOW = (datetime.datetime.now() - EPOCH).total_seconds() 13 | TIME_NOW = None 14 | # Default names for functions which change the item-item cooccurrence matrix 15 | SIM_COOCCUR = "cooccurrence" 16 | SIM_JACCARD = "jaccard" 17 | SIM_LIFT = "lift" 18 | 19 | INDEXED_ITEMS = "indexedItems" 20 | INDEXED_USERS = "indexedUsers" 21 | 22 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/score.py: -------------------------------------------------------------------------------- 1 | 2 | import json 3 | import numpy 4 | import numpy as np 5 | import pandas as pd 6 | import os 7 | import pickle 8 | from sklearn.externals import joblib 9 | from azureml.core.model import Model 10 | from reco_utils.dataset import movielens 11 | from reco_utils.dataset.python_splitters import python_random_split 12 | from reco_utils.evaluation.python_evaluation import map_at_k, ndcg_at_k, precision_at_k, recall_at_k 13 | from reco_utils.recommender.sar.sar_singlenode import SARSingleNode 14 | 15 | # load the model 16 | def init(): 17 | global model 18 | # retrieve the path to the model file using the model name 19 | model_path = Model.get_model_path(model_name='movielens_sar_model') 20 | model = joblib.load(model_path) 21 | 22 | # Passes data to the model and returns the prediction 23 | def run(raw_data): 24 | # make prediction 25 | try: 26 | data = raw_data 27 | data = pd.read_json(data) 28 | return model.get_item_based_topk(items=data, sort_top_k=True).to_json() 29 | except Exception as e: 30 | error = str(e) 31 | return error 32 | -------------------------------------------------------------------------------- /2019/6-azureml-movie-recommendation/widget.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "metadata": { 5 | "trusted": true 6 | }, 7 | "cell_type": "code", 8 | "source": "import pandas as pd\nimport ipywidgets as widgets\nfrom IPython.display import clear_output\nfrom sklearn.externals import joblib\ncounter = 0\nscores = []\nmodelTest =joblib.load('movielens_sar_model.pkl')\nmostPopular =modelTest.get_popularity_based_topk(top_k=30,sort_top_k=True).join(data[['MovieId', 'Title']].drop_duplicates().set_index('MovieId'), \n on='MovieId', \n how='inner')[['MovieId','Title']].sample(5)\nstyle = {'description_width': 'initial'}\n# title= widgets.Label(\"Let's rate a few popular movies\",value = r'\\(\\color{red} {highlighted}\\)')\ntitle= widgets.Label(\"Let's rate a few popular movies\")\nmovie =widgets.RadioButtons(\n options=[1.0,2.0,3.0,4.0,5.0],\n value=3.0,\n description= str(counter+1) + \". \" +str(mostPopular.iloc[0,1])+ ':',\n disabled=False,\n style=style\n)\nbutt = widgets.Button(description='Next', style = style)\ndef on_butt_clicked(b):\n global counter\n global scores \n global movie\n global butt\n global title\n\n scores.append(movie.value)\n if counter <4:\n movie =widgets.RadioButtons(\n options=[1.0,2.0,3.0,4.0,5.0],\n value=3.0,\n description= str(counter+2) + \". \" + str(mostPopular.iloc[counter+1,1])+ ':',\n disabled=False,\n style=style\n )\n vbox.children = [title,movie,butt]\n\n if counter ==3:\n butt.description = \"Submit\"\n butt.style.button_color = 'lightgreen'\n if counter ==4:\n clear_output()\n title = widgets.Label(\"Here are the recommended movies based on your ratings.\", style=style)\n display(title)\n d = {'MovieId': mostPopular['MovieId'].tolist(), 'Rating': scores}\n df = pd.DataFrame(data=d).to_json()\n test_sample_encoded = bytes(df,encoding = 'utf8')\n similar =service.run(input_data = test_sample_encoded)\n temp = pd.read_json(similar).join(data[['MovieId', 'Title']].drop_duplicates().set_index('MovieId'), \n on='MovieId', \n how='inner').sort_values(by=['prediction'], ascending=False)\n display(temp)\n counter +=1\n \nbutt.on_click(on_butt_clicked)\nvbox = widgets.VBox([title,movie,butt])\ndisplay(vbox)", 9 | "execution_count": 1, 10 | "outputs": [ 11 | { 12 | "output_type": "error", 13 | "ename": "NameError", 14 | "evalue": "name 'data' is not defined", 15 | "traceback": [ 16 | "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", 17 | "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", 18 | "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[0mscores\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 7\u001b[0m \u001b[0mmodelTest\u001b[0m \u001b[0;34m=\u001b[0m\u001b[0mjoblib\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'movielens_sar_model.pkl'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 8\u001b[0;31m mostPopular =modelTest.get_popularity_based_topk(top_k=30,sort_top_k=True).join(data[['MovieId', 'Title']].drop_duplicates().set_index('MovieId'), \n\u001b[0m\u001b[1;32m 9\u001b[0m \u001b[0mon\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'MovieId'\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 10\u001b[0m how='inner')[['MovieId','Title']].sample(5)\n", 19 | "\u001b[0;31mNameError\u001b[0m: name 'data' is not defined" 20 | ] 21 | } 22 | ] 23 | } 24 | ], 25 | "metadata": { 26 | "kernelspec": { 27 | "name": "python36", 28 | "display_name": "Python 3.6", 29 | "language": "python" 30 | }, 31 | "language_info": { 32 | "mimetype": "text/x-python", 33 | "nbconvert_exporter": "python", 34 | "name": "python", 35 | "pygments_lexer": "ipython3", 36 | "version": "3.6.6", 37 | "file_extension": ".py", 38 | "codemirror_mode": { 39 | "version": 3, 40 | "name": "ipython" 41 | } 42 | } 43 | }, 44 | "nbformat": 4, 45 | "nbformat_minor": 2 46 | } -------------------------------------------------------------------------------- /2019/7-azure-pipelines-ci/images/1-fork.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/7-azure-pipelines-ci/images/1-fork.png -------------------------------------------------------------------------------- /2019/7-azure-pipelines-ci/images/1-forking.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/7-azure-pipelines-ci/images/1-forking.png -------------------------------------------------------------------------------- /2019/7-azure-pipelines-ci/images/1-selectaccount.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/7-azure-pipelines-ci/images/1-selectaccount.png -------------------------------------------------------------------------------- /2019/7-azure-pipelines-ci/images/1-signin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/7-azure-pipelines-ci/images/1-signin.png -------------------------------------------------------------------------------- /2019/7-azure-pipelines-ci/images/2-completeorder.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/7-azure-pipelines-ci/images/2-completeorder.png -------------------------------------------------------------------------------- /2019/7-azure-pipelines-ci/images/2-confirm.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/7-azure-pipelines-ci/images/2-confirm.png -------------------------------------------------------------------------------- /2019/7-azure-pipelines-ci/images/2-installfree.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/7-azure-pipelines-ci/images/2-installfree.png -------------------------------------------------------------------------------- /2019/7-azure-pipelines-ci/images/2-marketplace.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/7-azure-pipelines-ci/images/2-marketplace.png -------------------------------------------------------------------------------- /2019/7-azure-pipelines-ci/images/2-search.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/7-azure-pipelines-ci/images/2-search.png -------------------------------------------------------------------------------- /2019/7-azure-pipelines-ci/images/2-select.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/7-azure-pipelines-ci/images/2-select.png -------------------------------------------------------------------------------- /2019/7-azure-pipelines-ci/images/2-setup.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/7-azure-pipelines-ci/images/2-setup.png -------------------------------------------------------------------------------- /2019/7-azure-pipelines-ci/images/3-authorize.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/7-azure-pipelines-ci/images/3-authorize.png -------------------------------------------------------------------------------- /2019/7-azure-pipelines-ci/images/3-newaccount.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/7-azure-pipelines-ci/images/3-newaccount.png -------------------------------------------------------------------------------- /2019/7-azure-pipelines-ci/images/4-edityaml.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/7-azure-pipelines-ci/images/4-edityaml.png -------------------------------------------------------------------------------- /2019/7-azure-pipelines-ci/images/4-python.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/7-azure-pipelines-ci/images/4-python.png -------------------------------------------------------------------------------- /2019/7-azure-pipelines-ci/images/4-saveandrun.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/7-azure-pipelines-ci/images/4-saveandrun.png -------------------------------------------------------------------------------- /2019/7-azure-pipelines-ci/images/4-selectrepo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/7-azure-pipelines-ci/images/4-selectrepo.png -------------------------------------------------------------------------------- /2019/7-azure-pipelines-ci/images/5-paralleljobs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/7-azure-pipelines-ci/images/5-paralleljobs.png -------------------------------------------------------------------------------- /2019/7-azure-pipelines-ci/images/5-success.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/7-azure-pipelines-ci/images/5-success.png -------------------------------------------------------------------------------- /2019/7-azure-pipelines-ci/images/5-tests.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/7-azure-pipelines-ci/images/5-tests.png -------------------------------------------------------------------------------- /2019/7-azure-pipelines-ci/images/6-builddone.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/7-azure-pipelines-ci/images/6-builddone.png -------------------------------------------------------------------------------- /2019/7-azure-pipelines-ci/images/6-buildqueued.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/7-azure-pipelines-ci/images/6-buildqueued.png -------------------------------------------------------------------------------- /2019/7-azure-pipelines-ci/images/6-createpr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/7-azure-pipelines-ci/images/6-createpr.png -------------------------------------------------------------------------------- /2019/7-azure-pipelines-ci/images/6-editreadme.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/7-azure-pipelines-ci/images/6-editreadme.png -------------------------------------------------------------------------------- /2019/7-azure-pipelines-ci/images/6-gotogithub.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/7-azure-pipelines-ci/images/6-gotogithub.png -------------------------------------------------------------------------------- /2019/7-azure-pipelines-ci/images/6-proposechange.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/7-azure-pipelines-ci/images/6-proposechange.png -------------------------------------------------------------------------------- /2019/7-azure-pipelines-ci/images/6-readme.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2019/7-azure-pipelines-ci/images/6-readme.png -------------------------------------------------------------------------------- /2019/8-azure-service-bus-messaging/app.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import json 4 | import logging 5 | 6 | from flask import Flask, request, make_response 7 | from azure.servicebus import ServiceBusClient, Message 8 | from azure.servicebus.exceptions import MessageSendFailed 9 | 10 | connection_string = os.environ.get('SB_CONNECTION') 11 | if connection_string is None: 12 | print('ERROR: Requires `SB_CONNECTION` value to be set', file=sys.stderr) 13 | sys.exit(1) 14 | 15 | queue_name='PyconLabQueue' 16 | sb_client = ServiceBusClient.from_connection_string(connection_string) 17 | queue_sender = sb_client.get_queue_sender(queue_name) 18 | 19 | app = Flask(__name__) 20 | app.logger.setLevel(logging.INFO) 21 | 22 | @app.route('/send', methods=['POST']) 23 | def send(): 24 | message = request.get_data(cache=False, as_text=True) 25 | app.logger.info(f'SEND: {message}') 26 | try: 27 | queue_sender.send_messages(Message(message)) 28 | success = True 29 | except MessageSendFailed: 30 | success = False 31 | return str(success) 32 | 33 | @app.route('/receive', methods=['GET']) 34 | def get(): 35 | with sb_client.get_queue_receiver(queue_name, max_wait_time=3) as receiver: 36 | messages = [] 37 | for message in receiver: 38 | app.logger.info(f'Got message: {message}') 39 | messages.append(str(message)) 40 | message.complete() 41 | return make_response(json.dumps(messages), { 42 | 'Content-Type': 'application/json; charset=utf-8' 43 | }) -------------------------------------------------------------------------------- /2019/8-azure-service-bus-messaging/requirements.txt: -------------------------------------------------------------------------------- 1 | flask 2 | azure-servicebus==7.0.0b5 -------------------------------------------------------------------------------- /2019/8-azure-service-bus-messaging/setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | az group create --name PyconLab --location westus2 4 | busName="PyconLabBus$(cat /dev/urandom | base64 | head -c6)" 5 | az servicebus namespace create --name $busName --resource-group PyconLab 6 | az servicebus queue create --name PyconLabQueue --resource-group PyconLab --namespace-name $busName 7 | accessRule=$(az servicebus namespace authorization-rule list --namespace-name $busName \ 8 | --resource-group PyconLab \ 9 | --query '[0].name' \ 10 | --output tsv) 11 | SB_CONNECTION=$(az servicebus namespace authorization-rule keys list \ 12 | --resource-group PyconLab \ 13 | --namespace-name $busName \ 14 | --name $accessRule \ 15 | --query 'primaryConnectionString' \ 16 | --output tsv) 17 | 18 | echo $SB_CONNECTION | tee .servicebus.uri -------------------------------------------------------------------------------- /2019/REQUIREMENTS.md: -------------------------------------------------------------------------------- 1 | # REQUIREMENTS 2 | 3 | - [Python 3.7](https://www.python.org/downloads/) 4 | - [Visual Studio Code](https://code.visualstudio.com/) 5 | - [Python Extension for Visual Studio Code](https://marketplace.visualstudio.com/itemdetails?itemName=ms-python.python) 6 | - [VS Code Remote Development](https://aka.ms/vscode-remote) 7 | - [Docker Desktop](https://www.docker.com/products/docker-desktop) 8 | - [Azure Functions Core Tools 2.x](https://docs.microsoft.com/en-us/azure/azure-functions/functions-run-local#v2) 9 | - [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) 10 | ([Local Install (Preferred)](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) or [Cloud Shell](https://docs.microsoft.com/en-ca/azure/cloud-shell/overview)) 11 | - bash via Linux, macOS, Windows 10 ([Windows Subsystem for Linux](https://docs.microsoft.com/en-us/windows/wsl/install-win10)), [Docker](https://docs.microsoft.com/en-us/cli/azure/run-azure-cli-docker), or [Cloud Shell](https://docs.microsoft.com/en-ca/azure/cloud-shell/overview) 12 | - [jq](https://stedolan.github.io/jq/) 13 | - Jupyter Notebooks ([Azure Notebooks](https://notebooks.azure.com/) or [Docker](https://github.com/jupyter/docker-stacks) recommended) 14 | -------------------------------------------------------------------------------- /2019/requirements.txt: -------------------------------------------------------------------------------- 1 | adal==1.2.1 2 | asn1crypto==0.24.0 3 | astroid==2.2.5 4 | azure-common==1.1.18 5 | azure-servicebus==0.50.0 6 | certifi==2019.3.9 7 | cffi==1.12.2 8 | chardet==3.0.4 9 | Click==7.0 10 | colorama==0.4.1 11 | cryptography==3.3.2 12 | Flask==1.0.2 13 | idna==2.8 14 | isodate==0.6.0 15 | isort==4.3.17 16 | itsdangerous==1.1.0 17 | Jinja2==2.11.3 18 | lazy-object-proxy==1.3.1 19 | MarkupSafe==1.1.1 20 | mccabe==0.6.1 21 | msrest==0.6.6 22 | msrestazure==0.6.0 23 | oauthlib==3.0.1 24 | pycparser==2.19 25 | PyJWT==1.7.1 26 | pylint==2.3.1 27 | python-dateutil==2.8.0 28 | requests==2.21.0 29 | requests-oauthlib==1.2.0 30 | six==1.12.0 31 | typed-ast==1.3.4 32 | uamqp==1.1.0 33 | urllib3==1.24.2 34 | Werkzeug==0.15.3 35 | wrapt==1.11.1 36 | -------------------------------------------------------------------------------- /2020/9-vscodespaces/README.md: -------------------------------------------------------------------------------- 1 | # Hello World in Visual Studio Codespaces 2 | 3 | In this lab you will create a hello world script in Visual Studio Codespaces to experiment with the development experience on the browser. 4 | 5 | ## Prerequisites 6 | 7 | You'll need the following tools installed on your local machine: 8 | 9 | 1. A [supported browser](https://docs.microsoft.com/en-us/visualstudio/online/resources/troubleshooting#partially-supported-browsers) 10 | 1. Azure Credentials. 11 | 12 | ## Create a Codespace 13 | 1. Navigate to https://aka.ms/vscodespaces, click on the "Sign in" button and enter your Azure credentials. 14 | 1. Click on the "Create Codespace" button, and add "HelloWorld" to the `Codespace Name` field. 15 | 1. Add https://github.com/asw101/hello-vscodespaces to the `Git repository` field. This repo contains a simple Hello World Flask application. 16 | 1. Click on the "Create" button. You can leave the other fields with the default values. 17 | 18 | ## Create a Hello World script 19 | 1. Open the Command Palette (Ctrl + Shift + P, or Command + Shift + P if you're on macOS) and run the `File: New File` command. 20 | 1. Name it `hello.py`, and open it on VS Codespaces. 21 | 1. Add `print("Hello, VS Codespaces!")` to the file and save it (Ctrl + S). 22 | 1. Right click on the editor select `Run Python File in Terminal`. This will run your hello world script in the terminal. 23 | 24 | ## Debug the Hello World Flask app 25 | 26 | 1. Check that VS Codespaces created a virtual environment called `pythonenv3.7` located on the top of the project. 27 | 1. Make sure this environment is selected by clicking on the Python environment information on the status bar, localted on the bottom left of the screen. 28 | 1. Start a debug session : 29 | 30 | - Open the Run/Debug view by clicking on the play + bug icon on the activity bar, on the left side of the screen. Then click on the "Run and Debug" button. 31 | 32 | - Or press F5 33 | 1. From the configuration drop-down, select "Flask" 34 | 1. You should see the following message on the terminal: 35 | ``` 36 | * Serving Flask app "app.py" 37 | * Environment: development 38 | * Debug mode: off 39 | * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit) 40 | ``` 41 | 6. Press Ctrl and click on the link on the terminal to access the application on a new tab. 42 | 43 | 44 | ## Additional things to try 45 | 1. **Edit:** 46 | - Open `app.py` 47 | - Try adding some code and check out the language features. 48 | 49 | 1. **Try out Flask live reloading while debugging:** 50 | 51 | - Create a configuration file for the debugger by opening the Run/Debug view and clicking on "create a launch.json file" 52 | - Select Flask from the configuration options 53 | - Delete lines 19 and 20 ("--no-debugger" and "--no-reload") 54 | - Change "FLASK_DEBUG" on line 15 to "1". 55 | - Press F5 to start the debug session using that new configuration 56 | - Open the `app.py` file, make a change and save it. 57 | 1. **Add a logpoint**: 58 | 59 | - Open `app.py` and right click on the left side of line 5 60 | - Select `Add logpoint...` 61 | - Enter "Executing endpoint" and hit Enter 62 | - Press F5 to run the Flask app and Ctrl + Click on the link to open it on a new tab 63 | - Open the Debug Console panel (next to the terminal) and see the logged message. 64 | 65 | 66 | ## Other samples 67 | - [Tweeter App - Python and Django](https://github.com/Microsoft/python-sample-tweeterapp) 68 | - [The Cat Said No - Python and Flask](https://github.com/luabud/TheCatSaidNo) 69 | -------------------------------------------------------------------------------- /2020/9-vscodespaces/RunFileButton.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2020/9-vscodespaces/RunFileButton.png -------------------------------------------------------------------------------- /2020/README.md: -------------------------------------------------------------------------------- 1 | # Azure Python Labs (2020) [[back](../)] 2 | 3 | ## Hello World in Visual Studio Codespaces 4 | 5 | Create and work with an environment in Visual Studio Codespaces, all on the browser. 6 | 7 | - How to get started with a GitHub repo 8 | - How to create and run a Hello World in Python 9 | - How to run a Flask app 10 | 11 | [Hello World in Visual Studio Codespaces lab](9-vscodespaces/README.md) 12 | -------------------------------------------------------------------------------- /2021/9-azure-machine-learning/.gitignore: -------------------------------------------------------------------------------- 1 | aml_config/* 2 | .ipynb_aml_checkpoints/* 3 | .ipynb_checkpoints/* 4 | .amlignore 5 | .amlignore.amltmp 6 | *.ipynb.amltmp 7 | .vscode/* -------------------------------------------------------------------------------- /2021/9-azure-machine-learning/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "subscription_id": "", 3 | "resource_group": "", 4 | "workspace_name": "pyconworkspace" 5 | } -------------------------------------------------------------------------------- /2021/9-azure-machine-learning/images/exp_log_track.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2021/9-azure-machine-learning/images/exp_log_track.png -------------------------------------------------------------------------------- /2021/9-azure-machine-learning/images/ml_pane.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2021/9-azure-machine-learning/images/ml_pane.png -------------------------------------------------------------------------------- /2021/9-azure-machine-learning/images/studio_metrics_track.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2021/9-azure-machine-learning/images/studio_metrics_track.png -------------------------------------------------------------------------------- /2021/9-azure-machine-learning/images/ws_resources.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2021/9-azure-machine-learning/images/ws_resources.png -------------------------------------------------------------------------------- /2021/9-azure-machine-learning/run_experiment.py: -------------------------------------------------------------------------------- 1 | from azureml.core.compute import AmlCompute, ComputeTarget 2 | from azureml.core.compute_target import ComputeTargetException 3 | from azureml.core import Experiment, Environment, ScriptRunConfig, Workspace 4 | from azureml.core.conda_dependencies import CondaDependencies 5 | 6 | def submit(): 7 | # define workspace 8 | ws = Workspace.from_config() 9 | 10 | # create compute if it does not already exist 11 | cluster_name = "goazurego" 12 | 13 | try: 14 | target = ComputeTarget(workspace=ws, name=cluster_name) 15 | print(f"Found existing cluster - {cluster_name}.") 16 | 17 | except ComputeTargetException: 18 | # create a configuration 19 | compute_config = AmlCompute.provisioning_configuration(vm_size="STANDARD_D2_V2", max_nodes=2, min_nodes=0) 20 | 21 | target = ComputeTarget.create(ws, cluster_name, compute_config) 22 | 23 | target.wait_for_completion(show_output=True) 24 | 25 | # use the curated tensorflow 1.15 environment 26 | environment_name = "AzureML-TensorFlow-1.15-Inference-CPU" 27 | tf_env = Environment.get(workspace=ws, name=environment_name) 28 | 29 | # create script run configuration 30 | src = ScriptRunConfig(source_directory=".", script="train.py", 31 | compute_target=target, environment=tf_env) 32 | 33 | src.run_config.target = target 34 | 35 | # create an experiment 36 | experiment_name = "pycon-experiment" 37 | experiment = Experiment(workspace=ws, name=experiment_name) 38 | 39 | # run experiment 40 | run = experiment.submit(config=src) 41 | run.wait_for_completion(show_output=True) 42 | 43 | return True 44 | 45 | if __name__ == "__main__": 46 | submit() 47 | -------------------------------------------------------------------------------- /2021/9-azure-machine-learning/train.py: -------------------------------------------------------------------------------- 1 | import os 2 | import gzip 3 | import struct 4 | import sys 5 | import argparse 6 | 7 | import numpy as np 8 | import tensorflow as tf 9 | 10 | from utils import prepare_data 11 | 12 | # todo: import Azure Machine Learning Run class 13 | 14 | # todo: initialize run context 15 | 16 | # download MNIST dataset for training 17 | X_train, X_test, y_train, y_test = prepare_data('mnist', './data') 18 | 19 | training_set_size = X_train.shape[0] 20 | 21 | n_inputs = 28 * 28 22 | n_h1 = 300 23 | n_h2 = 100 24 | n_outputs = 10 25 | learning_rate = 0.01 26 | n_epochs = 100 27 | batch_size = 50 28 | 29 | with tf.name_scope('network'): 30 | # construct the DNN 31 | X = tf.placeholder(tf.float32, shape = (None, n_inputs), name = 'X') 32 | y = tf.placeholder(tf.int64, shape = (None), name = 'y') 33 | h1 = tf.layers.dense(X, n_h1, activation = tf.nn.relu, name = 'h1') 34 | h2 = tf.layers.dense(h1, n_h2, activation = tf.nn.relu, name = 'h2') 35 | output = tf.layers.dense(h2, n_outputs, name = 'output') 36 | 37 | with tf.name_scope('train'): 38 | cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels = y, logits = output) 39 | loss = tf.reduce_mean(cross_entropy, name = 'loss') 40 | optimizer = tf.train.GradientDescentOptimizer(learning_rate) 41 | train_op = optimizer.minimize(loss) 42 | 43 | with tf.name_scope('eval'): 44 | correct = tf.nn.in_top_k(output, y, 1) 45 | acc_op = tf.reduce_mean(tf.cast(correct, tf.float32)) 46 | 47 | init = tf.global_variables_initializer() 48 | saver = tf.train.Saver() 49 | 50 | with tf.Session() as sess: 51 | init.run() 52 | for epoch in range(n_epochs): 53 | 54 | # randomly shuffle training set 55 | indices = np.random.permutation(training_set_size) 56 | X_train = X_train[indices] 57 | y_train = y_train[indices] 58 | 59 | # batch index 60 | b_end = batch_size 61 | for b_start in range(0, training_set_size, batch_size): 62 | # get a batch 63 | X_batch, y_batch = X_train[b_start: b_end], y_train[b_start: b_end] 64 | 65 | # update batch index for the next batch 66 | b_end = min(b_start + (batch_size * 2), training_set_size) 67 | 68 | # train 69 | sess.run(train_op, feed_dict = {X: X_batch, y: y_batch}) 70 | 71 | # evaluate training set 72 | acc_train = acc_op.eval(feed_dict = {X: X_batch, y: y_batch}) 73 | # evaluate validation set 74 | acc_val = acc_op.eval(feed_dict = {X: X_test, y: y_test}) 75 | 76 | # todo: Log validation and training accuracies through Azure Machine Learning 77 | 78 | # print out training and validation accuracy 79 | print(epoch, '-- Training accuracy:', acc_train, '\b Validation accuracy:', acc_val) 80 | y_hat = np.argmax(output.eval(feed_dict = {X: X_test}), axis = 1) 81 | 82 | os.makedirs('./outputs/model', exist_ok = True) 83 | saver.save(sess, './outputs/model/mnist-tf.model') 84 | -------------------------------------------------------------------------------- /2021/9-azure-machine-learning/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import gzip 3 | import struct 4 | 5 | import urllib 6 | from urllib import request 7 | import numpy as np 8 | 9 | # load compressed MNIST gz files and return numpy arrays 10 | def load_data(filename, label = False): 11 | with gzip.open(filename) as gz: 12 | magic_number = struct.unpack('I', gz.read(4)) 13 | n_items = struct.unpack('>I', gz.read(4)) 14 | if not label: 15 | n_rows = struct.unpack('>I', gz.read(4))[0] 16 | n_cols = struct.unpack('>I', gz.read(4))[0] 17 | res = np.frombuffer(gz.read(n_items[0] * n_rows * n_cols), dtype = np.uint8) 18 | res = res.reshape(n_items[0], n_rows * n_cols) 19 | else: 20 | res = np.frombuffer(gz.read(n_items[0]), dtype = np.uint8) 21 | res = res.reshape(n_items[0], 1) 22 | return res 23 | 24 | # one-hot encode a 1-D array 25 | def one_hot_encode(array, num_of_classes): 26 | return np.eye(num_of_classes)[array.reshape(-1)] 27 | 28 | def prepare_data(dataset, data_folder): 29 | data_folder = os.path.join(data_folder, dataset) 30 | print('making data directory ' + data_folder + '...') 31 | os.makedirs(data_folder, exist_ok = True) 32 | 33 | def download_data(url, filename): 34 | if not os.path.isfile(filename): 35 | print('downloading ' + url) 36 | urllib.request.urlretrieve(url, filename = filename) 37 | else: 38 | print(filename + ' exists, using it') 39 | 40 | print('downloading training data ...') 41 | download_data('http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz', './data/mnist/train-images.gz') 42 | download_data('http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz', './data/mnist/train-labels.gz') 43 | print('done.') 44 | print('downloading testing data ...') 45 | download_data('http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz', './data/mnist/test-images.gz') 46 | download_data('http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz', './data/mnist/test-labels.gz') 47 | print('done.') 48 | 49 | print('Prepared training dataset is stored here:', data_folder) 50 | 51 | X_train = load_data(os.path.join(data_folder, 'train-images.gz'), False) / 255.0 52 | X_test = load_data(os.path.join(data_folder, 'test-images.gz'), False) / 255.0 53 | 54 | y_train = load_data(os.path.join(data_folder, 'train-labels.gz'), True).reshape(-1) 55 | y_test = load_data(os.path.join(data_folder, 'test-labels.gz'), True).reshape(-1) 56 | 57 | print(X_train.shape, y_train.shape, X_test.shape, y_test.shape, sep = '\n') 58 | 59 | return X_train, X_test, y_train, y_test -------------------------------------------------------------------------------- /2021/9-azure-web-apps/.gitignore: -------------------------------------------------------------------------------- 1 | # Created by https://www.gitignore.io/api/django 2 | # Edit at https://www.gitignore.io/?templates=django 3 | 4 | ### Azure CLI ## 5 | .azure 6 | .venv 7 | .vscode 8 | 9 | ### Django ### 10 | *.log 11 | *.pot 12 | *.pyc 13 | __pycache__/ 14 | local_settings.py 15 | db.sqlite3 16 | db.sqlite3-journal 17 | media 18 | 19 | # If your build process includes running collectstatic, then you probably don't need or want to include staticfiles/ 20 | # in your Git repository. Update and uncomment the following line accordingly. 21 | # /staticfiles/ 22 | 23 | ### Django.Python Stack ### 24 | # Byte-compiled / optimized / DLL files 25 | *.py[cod] 26 | *$py.class 27 | 28 | # C extensions 29 | *.so 30 | 31 | # Distribution / packaging 32 | .Python 33 | build/ 34 | develop-eggs/ 35 | dist/ 36 | downloads/ 37 | eggs/ 38 | .eggs/ 39 | lib/ 40 | lib64/ 41 | parts/ 42 | sdist/ 43 | var/ 44 | wheels/ 45 | pip-wheel-metadata/ 46 | share/python-wheels/ 47 | *.egg-info/ 48 | .installed.cfg 49 | *.egg 50 | MANIFEST 51 | 52 | # PyInstaller 53 | # Usually these files are written by a python script from a template 54 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 55 | *.manifest 56 | *.spec 57 | 58 | # Installer logs 59 | pip-log.txt 60 | pip-delete-this-directory.txt 61 | 62 | # Unit test / coverage reports 63 | htmlcov/ 64 | .tox/ 65 | .nox/ 66 | .coverage 67 | .coverage.* 68 | .cache 69 | nosetests.xml 70 | coverage.xml 71 | *.cover 72 | .hypothesis/ 73 | .pytest_cache/ 74 | 75 | # Translations 76 | *.mo 77 | 78 | # Scrapy stuff: 79 | .scrapy 80 | 81 | # Sphinx documentation 82 | docs/_build/ 83 | 84 | # PyBuilder 85 | target/ 86 | 87 | # pyenv 88 | .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # celery beat schedule file 98 | celerybeat-schedule 99 | 100 | # SageMath parsed files 101 | *.sage.py 102 | 103 | # Spyder project settings 104 | .spyderproject 105 | .spyproject 106 | 107 | # Rope project settings 108 | .ropeproject 109 | 110 | # Mr Developer 111 | .mr.developer.cfg 112 | .project 113 | .pydevproject 114 | 115 | # mkdocs documentation 116 | /site 117 | 118 | # mypy 119 | .mypy_cache/ 120 | .dmypy.json 121 | dmypy.json 122 | 123 | # Pyre type checker 124 | .pyre/ 125 | 126 | # End of https://www.gitignore.io/api/django -------------------------------------------------------------------------------- /2021/9-azure-web-apps/README.md: -------------------------------------------------------------------------------- 1 | # Number Facts with Python Web Apps 2 | 3 | In this lab, you will learn how to deploy a python app to App Service using the Azure CLI. 4 | 5 | You will learn to: 6 | 7 | - Use `az webapp up` to quickly provision Azure resources and deploy your app to Azure App Service. 8 | - Leverage the `local context` feature of Azure CLI to ease management operations. 9 | 10 | ## Prerequisites 11 | 12 | - macOS, Windows, or Linux 13 | - Azure Subscription 14 | - [Python](https://www.python.org/downloads/) 3.7, or 3.8 15 | - [Git](https://git-scm.com/) 16 | - [Azure CLI](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest) 17 | 18 | ## Deploy the app to Azure using the Azure CLI 19 | 20 | ### Create a local copy of this repository 21 | 22 | - You can create a local clone, or just download the *.zip archive 23 | - Open a terminal window and navigate to the folder containing the Web App sample 24 | 25 | ### Login with Azure CLI 26 | 27 | Log in to Azure using the `az login` command. This will open a browser window where you can authenticate. [See az login](https://docs.microsoft.com/cli/azure/reference-index?view=azure-cli-latest#az-login). 28 | 29 | Choose the subscription you will use with the `az account set --subscription` command. You will only need to do this if you have more than one Azure subscription. 30 | [See az account](https://docs.microsoft.com/cli/azure/account?view=azure-cli-latest#az-account-set). 31 | 32 | ### Deploy your application with az webapp up 33 | 34 | App Service provides `az webapp up` as a quick way to get started with the service. We are going to leverage that functionality to: 35 | 36 | - Create all the necessary resources to host our app 37 | - Build and package the application 38 | - Publish the app to the cloud. 39 | 40 | Run the following command: 41 | 42 | ``` bash 43 | az webapp up -n -l --sku FREE 44 | ``` 45 | 46 | - **name** should be a unique string. **name** is used as the Azure resource name as well as part of the url for your app. 47 | 48 | - **location** will determine the Azure region where your resource will be created. You can get a list of supported locations with `az account list-locations`. 49 | 50 | - **sku** lets you choose across different service offerings. For this sample we will use the FREE option, however for production we recommend `P1V2` or higher. [Learn more about App Service Pricing](https://azure.microsoft.com/pricing/details/app-service/windows/) 51 | 52 | > Note: App Service has limits on how many free plans you can have in a subscription. If you have problems creating a free plan, try choosing a different region, or using one of the paid options such as B1. 53 | 54 | Running `az webapp up` for the first time will take a few minutes to complete. Behind the scenes it will provision all the necessary resources including an [Azure Resource Group](https://docs.microsoft.com/azure/azure-resource-manager/management/overview#resource-groups), [App Service plan](https://docs.microsoft.com/azure/app-service/overview-hosting-plans) and [Azure Webapp](https://docs.microsoft.com/azure/app-service/containers/app-service-linux-intro). 55 | 56 | Locally `az webapp up` creates a directory called `.azure` this is used to store the *local context*. Local context stores the configuration that you passed in through parameters to `az webapp up` to use later. For example you can now use `az webapp browse` without any more parameters to browse to your app or `az webapp logs tail` to stream the runtime logs of your application. 57 | 58 | You can also modify this sample and run `az webapp up` again with no parameters to push any change you have made to the resources you already created on the first run. 59 | 60 | [Learn more about az webapp up](https://docs.microsoft.com/cli/azure/webapp?view=azure-cli-latest#az-webapp-up) 61 | -------------------------------------------------------------------------------- /2021/9-azure-web-apps/appservicenumberfacts/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2021/9-azure-web-apps/appservicenumberfacts/__init__.py -------------------------------------------------------------------------------- /2021/9-azure-web-apps/appservicenumberfacts/asgi.py: -------------------------------------------------------------------------------- 1 | """ 2 | ASGI config for appservicenumberfacts project. 3 | 4 | It exposes the ASGI callable as a module-level variable named ``application``. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/ 8 | """ 9 | 10 | import os 11 | 12 | from django.core.asgi import get_asgi_application 13 | 14 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'appservicenumberfacts.settings') 15 | 16 | application = get_asgi_application() 17 | -------------------------------------------------------------------------------- /2021/9-azure-web-apps/appservicenumberfacts/settings.py: -------------------------------------------------------------------------------- 1 | """ 2 | Django settings for appservicenumberfacts project. 3 | 4 | Generated by 'django-admin startproject' using Django 3.0.5. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/3.0/topics/settings/ 8 | 9 | For the full list of settings and their values, see 10 | https://docs.djangoproject.com/en/3.0/ref/settings/ 11 | """ 12 | 13 | import os 14 | 15 | # Build paths inside the project like this: os.path.join(BASE_DIR, ...) 16 | BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 17 | 18 | 19 | # Quick-start development settings - unsuitable for production 20 | # See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/ 21 | 22 | # SECURITY WARNING: keep the secret key used in production secret! 23 | SECRET_KEY = '(%%ve&0wpx$1v)x1behkrmit62@keja4s^&tt)$ewpb01rpr34' 24 | 25 | # SECURITY WARNING: don't run with debug turned on in production! 26 | DEBUG = True 27 | 28 | ALLOWED_HOSTS = ['*'] 29 | 30 | 31 | # Application definition 32 | 33 | INSTALLED_APPS = [ 34 | 'django.contrib.admin', 35 | 'django.contrib.auth', 36 | 'django.contrib.contenttypes', 37 | 'django.contrib.sessions', 38 | 'django.contrib.messages', 39 | 'django.contrib.staticfiles', 40 | 'randnum' 41 | ] 42 | 43 | MIDDLEWARE = [ 44 | 'django.middleware.security.SecurityMiddleware', 45 | 'whitenoise.middleware.WhiteNoiseMiddleware', 46 | 'django.contrib.sessions.middleware.SessionMiddleware', 47 | 'django.middleware.common.CommonMiddleware', 48 | 'django.middleware.csrf.CsrfViewMiddleware', 49 | 'django.contrib.auth.middleware.AuthenticationMiddleware', 50 | 'django.contrib.messages.middleware.MessageMiddleware', 51 | 'django.middleware.clickjacking.XFrameOptionsMiddleware', 52 | ] 53 | 54 | ROOT_URLCONF = 'appservicenumberfacts.urls' 55 | 56 | TEMPLATES = [ 57 | { 58 | 'BACKEND': 'django.template.backends.django.DjangoTemplates', 59 | 'DIRS': [], 60 | 'APP_DIRS': True, 61 | 'OPTIONS': { 62 | 'context_processors': [ 63 | 'django.template.context_processors.debug', 64 | 'django.template.context_processors.request', 65 | 'django.contrib.auth.context_processors.auth', 66 | 'django.contrib.messages.context_processors.messages', 67 | ], 68 | }, 69 | }, 70 | ] 71 | 72 | WSGI_APPLICATION = 'appservicenumberfacts.wsgi.application' 73 | 74 | 75 | # Database 76 | # https://docs.djangoproject.com/en/3.0/ref/settings/#databases 77 | 78 | DATABASES = { 79 | 'default': { 80 | 'ENGINE': 'django.db.backends.sqlite3', 81 | 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), 82 | } 83 | } 84 | 85 | 86 | # Password validation 87 | # https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators 88 | 89 | AUTH_PASSWORD_VALIDATORS = [ 90 | { 91 | 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', 92 | }, 93 | { 94 | 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 95 | }, 96 | { 97 | 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', 98 | }, 99 | { 100 | 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', 101 | }, 102 | ] 103 | 104 | 105 | # Internationalization 106 | # https://docs.djangoproject.com/en/3.0/topics/i18n/ 107 | 108 | LANGUAGE_CODE = 'en-us' 109 | 110 | TIME_ZONE = 'UTC' 111 | 112 | USE_I18N = True 113 | 114 | USE_L10N = True 115 | 116 | USE_TZ = True 117 | 118 | 119 | # Static files (CSS, JavaScript, Images) 120 | # https://docs.djangoproject.com/en/3.0/howto/static-files/ 121 | 122 | STATIC_URL = '/static/' 123 | -------------------------------------------------------------------------------- /2021/9-azure-web-apps/appservicenumberfacts/urls.py: -------------------------------------------------------------------------------- 1 | """appservicenumberfacts URL Configuration 2 | 3 | The `urlpatterns` list routes URLs to views. For more information please see: 4 | https://docs.djangoproject.com/en/3.0/topics/http/urls/ 5 | Examples: 6 | Function views 7 | 1. Add an import: from my_app import views 8 | 2. Add a URL to urlpatterns: path('', views.home, name='home') 9 | Class-based views 10 | 1. Add an import: from other_app.views import Home 11 | 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') 12 | Including another URLconf 13 | 1. Import the include() function: from django.urls import include, path 14 | 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) 15 | """ 16 | from django.contrib import admin 17 | from django.urls import include, path 18 | 19 | urlpatterns = [ 20 | path('', include('randnum.urls')), 21 | path('admin/', admin.site.urls), 22 | ] 23 | -------------------------------------------------------------------------------- /2021/9-azure-web-apps/appservicenumberfacts/wsgi.py: -------------------------------------------------------------------------------- 1 | """ 2 | WSGI config for appservicenumberfacts project. 3 | 4 | It exposes the WSGI callable as a module-level variable named ``application``. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/ 8 | """ 9 | 10 | import os 11 | 12 | from django.core.wsgi import get_wsgi_application 13 | 14 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'appservicenumberfacts.settings') 15 | 16 | application = get_wsgi_application() 17 | -------------------------------------------------------------------------------- /2021/9-azure-web-apps/manage.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """Django's command-line utility for administrative tasks.""" 3 | import os 4 | import sys 5 | 6 | 7 | def main(): 8 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'appservicenumberfacts.settings') 9 | try: 10 | from django.core.management import execute_from_command_line 11 | except ImportError as exc: 12 | raise ImportError( 13 | "Couldn't import Django. Are you sure it's installed and " 14 | "available on your PYTHONPATH environment variable? Did you " 15 | "forget to activate a virtual environment?" 16 | ) from exc 17 | execute_from_command_line(sys.argv) 18 | 19 | 20 | if __name__ == '__main__': 21 | main() 22 | -------------------------------------------------------------------------------- /2021/9-azure-web-apps/randnum/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2021/9-azure-web-apps/randnum/__init__.py -------------------------------------------------------------------------------- /2021/9-azure-web-apps/randnum/admin.py: -------------------------------------------------------------------------------- 1 | from django.contrib import admin 2 | 3 | # Register your models here. 4 | -------------------------------------------------------------------------------- /2021/9-azure-web-apps/randnum/apps.py: -------------------------------------------------------------------------------- 1 | from django.apps import AppConfig 2 | 3 | 4 | class RandnumConfig(AppConfig): 5 | name = 'randnum' 6 | -------------------------------------------------------------------------------- /2021/9-azure-web-apps/randnum/migrations/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/azure-python-labs/a18e71b25876f1f0809678f179719ab78cef6123/2021/9-azure-web-apps/randnum/migrations/__init__.py -------------------------------------------------------------------------------- /2021/9-azure-web-apps/randnum/models.py: -------------------------------------------------------------------------------- 1 | from django.db import models 2 | 3 | # Create your models here. 4 | -------------------------------------------------------------------------------- /2021/9-azure-web-apps/randnum/static/css/index.css: -------------------------------------------------------------------------------- 1 | body { 2 | background-color: #e9ecef; 3 | } 4 | 5 | .fact-container { 6 | color:blue; 7 | } 8 | 9 | .button-container { 10 | text-align: center; 11 | margin-top: 50px; 12 | } -------------------------------------------------------------------------------- /2021/9-azure-web-apps/randnum/templates/randnum/index.html: -------------------------------------------------------------------------------- 1 | {% load static %} 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 11 | 12 | Random Number Facts 13 | 14 | 15 | 16 | 17 |
18 |

{{ fact }}

19 | 20 |

21 | Get Another Fact! 22 |

23 |
24 |

Powered by numpersapi.com

25 | 26 |
27 | 28 | 29 | 30 | -------------------------------------------------------------------------------- /2021/9-azure-web-apps/randnum/tests.py: -------------------------------------------------------------------------------- 1 | from django.test import TestCase 2 | 3 | # Create your tests here. 4 | -------------------------------------------------------------------------------- /2021/9-azure-web-apps/randnum/urls.py: -------------------------------------------------------------------------------- 1 | from django.urls import path 2 | 3 | from . import views 4 | 5 | urlpatterns = [ 6 | path('', views.index, name='index'), 7 | ] -------------------------------------------------------------------------------- /2021/9-azure-web-apps/randnum/views.py: -------------------------------------------------------------------------------- 1 | from django.shortcuts import render 2 | import requests 3 | 4 | def index(request): 5 | r = requests.get('http://numbersapi.com/random/') 6 | 7 | context = {'fact': r.text} 8 | return render(request, 'randnum/index.html', context) -------------------------------------------------------------------------------- /2021/9-azure-web-apps/requirements.txt: -------------------------------------------------------------------------------- 1 | asgiref==3.2.7 2 | astroid==2.2.5 3 | azure-mgmt-nspkg==3.0.2 4 | azure-nspkg==3.0.2 5 | certifi==2020.4.5.1 6 | chardet==3.0.4 7 | colorama==0.4.1 8 | Django==3.0.14 9 | idna==2.9 10 | isort==4.3.20 11 | lazy-object-proxy==1.4.1 12 | mccabe==0.6.1 13 | pylint==2.3.1 14 | pytz==2020.1 15 | requests==2.23.0 16 | six==1.12.0 17 | sqlparse==0.3.1 18 | typed-ast==1.4.0 19 | urllib3==1.25.9 20 | whitenoise==5.0.1 21 | wrapt==1.11.2 22 | -------------------------------------------------------------------------------- /2021/9-vscode-django-postgres-dev-container/README.md: -------------------------------------------------------------------------------- 1 | # Developing a Django + PostgreSQL application in a Dev Container 2 | 3 | In this lab you use Visual Studio Code remote development features to work on a Django + PostgreSQL application in a dockerized development environment. 4 | 5 | 6 | ## Prerequisites 7 | 8 | You'll need the following tools installed on your local machine: 9 | 10 | 1. [Docker Desktop](https://www.docker.com/products/docker-desktop) 11 | 1. [Visual Studio Code](https://code.visualstudio.com/) 12 | 1. The [VS Code Remote Extensions](https://aka.ms/vscode-remote) 13 | 1. If you are running on windows, set your git line endings to use LF: 14 | ``` 15 | git config --global core.autocrlf false 16 | ``` 17 | 18 | ## Create the dev container workspace 19 | 20 | 1. On a terminal, clone the sample app and open using Visual Studio Code: 21 | 22 | ```cmd 23 | git clone https://github.com/Microsoft/python-sample-tweeterapp 24 | cd python-sample-tweeterapp 25 | ``` 26 | and: 27 | ```cmd 28 | code . 29 | ``` 30 | or if you are using [Visual Studio Code Insiders](https://code.visualstudio.com/insiders/): 31 | ```cmd 32 | code-insiders . 33 | ``` 34 | 35 | 1. Click the `Reopen in Container` prompt, or press `F1` and select the `Reopen folder in dev container` command. 36 | 37 | 1. After the workspace terminal loads, open a new terminal using ```Ctrl-Shift-` ``` and type the following to build the React frontend: 38 | 39 | ```cmd 40 | npm install 41 | npm run dev 42 | ``` 43 | 44 | 1. After the container builds, open another terminal using ```Ctrl-Shift-` ``` and type: 45 | 46 | ```cmd 47 | python manage.py migrate 48 | python manage.py loaddata initial_data 49 | python manage.py runserver 50 | ``` 51 | 52 | 1. Open [http://localhost:8000](http://localhost:8000) in the browser to view the app. 53 | 1. Create an account and login to the app 54 | 55 | ## Set up debugging in the container 56 | 57 | 1. Stop the app in the terminal by pressing `Ctrl-C` (otherwise the port will be taken when you debug) 58 | 1. From the `Debug` menu, select `Start Debugging`. 59 | 1. Select the `Django` debug configuration from the menu. 60 | 1. Open `tweeter/views.py`, set a breakpoint on line 26 61 | 1. Refresh the app in the browser to hit the breakpoint 62 | 1. Open the debug console `Views > Debug Console`, and type `request.user` into the debug console to inspect the logged in user 63 | -------------------------------------------------------------------------------- /2021/9-windows-subsystem-for-linux/README.md: -------------------------------------------------------------------------------- 1 | # Debugging a Flask App with WSL in VS Code 2 | 3 | ## Prerequisites 4 | 5 | You'll need the following tools installed on your local machine: 6 | 7 | 1. A machine with [Windows 10](https://www.microsoft.com/en-us/windows/get-windows-10). Alternatively, you can create a [Windows 10 Virtual Machine on Azure](https://azure.microsoft.com/en-us/pricing/details/virtual-machines/windows/). 8 | 1. The Windows Subsystem for Linux [(WSL)](https://docs.microsoft.com/en-us/windows/wsl/install-win10). 9 | 1. [Visual Studio Code](https://code.visualstudio.com/) 10 | 1. The [VS Code Remote - WSL Extension](https://aka.ms/vscode-wsl) for VS Code 11 | 1. The [Python extension](https://marketplace.visualstudio.com/items?itemName=ms-python.python) for VS Code 12 | 1. Ubuntu 18.04 for WSL from the [Microsoft Store](https://www.microsoft.com/en-us/p/ubuntu-1804-lts/9n9tngvndl3q?activetab=pivot:overviewtab) 13 | 14 | 15 | ## Open VS Code from WSL terminal 16 | 17 | 1. Open a WSL terminal window (from the start menu or by typing `wsl` from a Windows terminal). 18 | 1. Navigate (`cd` command) to a folder where you can clone a project in. 19 | 1. Clone [this repo](https://github.com/luabud/TheCatSaidNo) by typing: 20 | `git clone https://github.com/luabud/TheCatSaidNo.git` 21 | 1. Navigate to the folder (`cd TheCatSaidNo`) 22 | 1. Type `code .` to open this folder in VS Code 23 | 1. Open the `app.py` file to activate the Python extension 24 | 25 | ## Install the dependencies 26 | 1. Open the terminal in VS Code (Ctrl + `) 27 | 1. Install python3-venv by typing: 28 | 29 | ```sudo apt-get update && sudo apt-get install python3-venv``` 30 | 31 | 1. Create a virtual environment by typing: 32 | 33 | ```python3 -m venv env``` 34 | 35 | 1. If a notification prompt is displayed asking if you want to select this newly created environment, click on "Yes". Otherwise, click on the Python information displayed on the status bar, located on bottom left of the screen 36 | 1. Create a new terminal to activate the virtual environment (Ctrl + Shift + `) 37 | 1. Install the dependencies: 38 | 39 | ``` python -m pip install -r requirements.txt``` 40 | 41 | ## Run the Flask App in VS Code 42 | 1. Press F5 to start a debug session, and select "Flask" from the configuration options. 43 | 1. Ctrl + Click on the link that is displayed on the terminal to access the application. 44 | 1. Open the Debug Console in VS Code (next to the Terminal) and enter: 45 | ``` 46 | import sys 47 | print(sys.platform) 48 | ``` 49 | 50 | ## Configure and run the application tests 51 | 1. Open the command palette (Ctrl + Shift + P or Command + Shift + P if you're on macOS) 52 | 1. Run the "Python: Configure Tests" command 53 | 1. Select "pytest" and then "." (root directory) 54 | 1. Click on the test beaker icon on the activity bar, on the left side. 55 | 1. Click on the "Run All Tests" icon on the top left. 56 | 57 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to [project-title] 2 | 3 | This project welcomes contributions and suggestions. Most contributions require you to agree to a 4 | Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us 5 | the rights to use your contribution. For details, visit https://cla.microsoft.com. 6 | 7 | When you submit a pull request, a CLA-bot will automatically determine whether you need to provide 8 | a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions 9 | provided by the bot. You will only need to do this once across all repos using our CLA. 10 | 11 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). 12 | For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or 13 | contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. 14 | 15 | - [Code of Conduct](#coc) 16 | - [Issues and Bugs](#issue) 17 | - [Feature Requests](#feature) 18 | - [Submission Guidelines](#submit) 19 | 20 | ## Code of Conduct 21 | Help us keep this project open and inclusive. Please read and follow our [Code of Conduct](https://opensource.microsoft.com/codeofconduct/). 22 | 23 | ## Found an Issue? 24 | If you find a bug in the source code or a mistake in the documentation, you can help us by 25 | [submitting an issue](#submit-issue) to the GitHub Repository. Even better, you can 26 | [submit a Pull Request](#submit-pr) with a fix. 27 | 28 | ## Want a Feature? 29 | You can *request* a new feature by [submitting an issue](#submit-issue) to the GitHub 30 | Repository. If you would like to *implement* a new feature, please submit an issue with 31 | a proposal for your work first, to be sure that we can use it. 32 | 33 | * **Small Features** can be crafted and directly [submitted as a Pull Request](#submit-pr). 34 | 35 | ## Submission Guidelines 36 | 37 | ### Submitting an Issue 38 | Before you submit an issue, search the archive, maybe your question was already answered. 39 | 40 | If your issue appears to be a bug, and hasn't been reported, open a new issue. 41 | Help us to maximize the effort we can spend fixing issues and adding new 42 | features, by not reporting duplicate issues. Providing the following information will increase the 43 | chances of your issue being dealt with quickly: 44 | 45 | * **Overview of the Issue** - if an error is being thrown a non-minified stack trace helps 46 | * **Version** - what version is affected (e.g. 0.1.2) 47 | * **Motivation for or Use Case** - explain what are you trying to do and why the current behavior is a bug for you 48 | * **Browsers and Operating System** - is this a problem with all browsers? 49 | * **Reproduce the Error** - provide a live example or a unambiguous set of steps 50 | * **Related Issues** - has a similar issue been reported before? 51 | * **Suggest a Fix** - if you can't fix the bug yourself, perhaps you can point to what might be 52 | causing the problem (line of code or commit) 53 | 54 | You can file new issues by providing the above information at the corresponding repository's issues link: https://github.com/[organization-name]/[repository-name]/issues/new]. 55 | 56 | ### Submitting a Pull Request (PR) 57 | Before you submit your Pull Request (PR) consider the following guidelines: 58 | 59 | * Search the repository (https://github.com/[organization-name]/[repository-name]/pulls) for an open or closed PR 60 | that relates to your submission. You don't want to duplicate effort. 61 | 62 | * Make your changes in a new git fork: 63 | 64 | * Commit your changes using a descriptive commit message 65 | * Push your fork to GitHub: 66 | * In GitHub, create a pull request 67 | * If we suggest changes then: 68 | * Make the required updates. 69 | * Rebase your fork and force push to your GitHub repository (this will update your Pull Request): 70 | 71 | ```shell 72 | git rebase master -i 73 | git push -f 74 | ``` 75 | 76 | That's it! Thank you for your contribution! 77 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) Microsoft Corporation. All rights reserved. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE --------------------------------------------------------------------------------