├── Azure ├── .DS_Store ├── .ipynb_checkpoints │ └── Salary-checkpoint.ipynb ├── Salary.ipynb ├── Salary.py ├── data │ └── sal.csv ├── outputs │ └── sal_model.pkl ├── salenv.yml └── score.py ├── Local ├── data │ └── sal.csv ├── infer.py ├── model │ └── sal_model.pkl └── train.py └── README.md /Azure/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/janakiramm/azureml-tutorial/153e82ff4482468b461b812e731dbb8f5ecc3004/Azure/.DS_Store -------------------------------------------------------------------------------- /Azure/.ipynb_checkpoints/Salary-checkpoint.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "#### Import standard Python modules" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 1, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [ 16 | "import datetime\n", 17 | "import numpy as np\n", 18 | "import pandas as pd\n", 19 | "from sklearn.model_selection import train_test_split\n", 20 | "from sklearn.linear_model import LinearRegression\n", 21 | "from sklearn.externals import joblib" 22 | ] 23 | }, 24 | { 25 | "cell_type": "markdown", 26 | "metadata": {}, 27 | "source": [ 28 | "#### Import Azure ML SDK modules" 29 | ] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": 2, 34 | "metadata": {}, 35 | "outputs": [], 36 | "source": [ 37 | "import azureml.core\n", 38 | "from azureml.core import Workspace\n", 39 | "from azureml.core.model import Model\n", 40 | "from azureml.core import Experiment\n", 41 | "from azureml.core.webservice import Webservice\n", 42 | "from azureml.core.image import ContainerImage\n", 43 | "from azureml.core.webservice import AciWebservice\n", 44 | "from azureml.core.conda_dependencies import CondaDependencies" 45 | ] 46 | }, 47 | { 48 | "cell_type": "markdown", 49 | "metadata": {}, 50 | "source": [ 51 | "#### Check Azure ML SDK version" 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": 3, 57 | "metadata": {}, 58 | "outputs": [ 59 | { 60 | "name": "stdout", 61 | "output_type": "stream", 62 | "text": [ 63 | "1.0.6\n" 64 | ] 65 | } 66 | ], 67 | "source": [ 68 | "print(azureml.core.VERSION)" 69 | ] 70 | }, 71 | { 72 | "cell_type": "markdown", 73 | "metadata": {}, 74 | "source": [ 75 | "#### Create Azure ML Workspace" 76 | ] 77 | }, 78 | { 79 | "cell_type": "code", 80 | "execution_count": null, 81 | "metadata": {}, 82 | "outputs": [], 83 | "source": [ 84 | "AZ_SUBSCRIPTION_ID='Your_Azure_Subscription_ID'\n", 85 | "ws = Workspace.create(name='salary',\n", 86 | " subscription_id=AZ_SUBSCRIPTION_ID, \n", 87 | " resource_group='mi2',\n", 88 | " create_resource_group=True,\n", 89 | " location='southeastasia'\n", 90 | " )" 91 | ] 92 | }, 93 | { 94 | "cell_type": "markdown", 95 | "metadata": {}, 96 | "source": [ 97 | "#### Write configuration to local file" 98 | ] 99 | }, 100 | { 101 | "cell_type": "code", 102 | "execution_count": 5, 103 | "metadata": {}, 104 | "outputs": [ 105 | { 106 | "name": "stdout", 107 | "output_type": "stream", 108 | "text": [ 109 | "Wrote the config file config.json to: /Volumes/Data/Dropbox/Work/Content/Webinars/MI2/Jan18/AzureML/Demo/Azure/aml_config/config.json\n" 110 | ] 111 | } 112 | ], 113 | "source": [ 114 | "ws.write_config()" 115 | ] 116 | }, 117 | { 118 | "cell_type": "markdown", 119 | "metadata": {}, 120 | "source": [ 121 | "#### Create Azure ML Experiment" 122 | ] 123 | }, 124 | { 125 | "cell_type": "code", 126 | "execution_count": 6, 127 | "metadata": {}, 128 | "outputs": [], 129 | "source": [ 130 | "exp = Experiment(workspace=ws, name='salexp')" 131 | ] 132 | }, 133 | { 134 | "cell_type": "markdown", 135 | "metadata": {}, 136 | "source": [ 137 | "#### Start logging metrics" 138 | ] 139 | }, 140 | { 141 | "cell_type": "code", 142 | "execution_count": 7, 143 | "metadata": {}, 144 | "outputs": [], 145 | "source": [ 146 | "run = exp.start_logging() \n", 147 | "run.log(\"Experiment start time\", str(datetime.datetime.now()))" 148 | ] 149 | }, 150 | { 151 | "cell_type": "markdown", 152 | "metadata": {}, 153 | "source": [ 154 | "#### Load salary dataset" 155 | ] 156 | }, 157 | { 158 | "cell_type": "code", 159 | "execution_count": 8, 160 | "metadata": {}, 161 | "outputs": [], 162 | "source": [ 163 | "sal = pd.read_csv('data/sal.csv',header=0, index_col=None)\n", 164 | "X = sal[['x']]\n", 165 | "y = sal['y']" 166 | ] 167 | }, 168 | { 169 | "cell_type": "markdown", 170 | "metadata": {}, 171 | "source": [ 172 | "#### Split the train and test data" 173 | ] 174 | }, 175 | { 176 | "cell_type": "code", 177 | "execution_count": 9, 178 | "metadata": {}, 179 | "outputs": [], 180 | "source": [ 181 | "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=10)" 182 | ] 183 | }, 184 | { 185 | "cell_type": "markdown", 186 | "metadata": {}, 187 | "source": [ 188 | "#### Train the model" 189 | ] 190 | }, 191 | { 192 | "cell_type": "code", 193 | "execution_count": 10, 194 | "metadata": {}, 195 | "outputs": [ 196 | { 197 | "data": { 198 | "text/plain": [ 199 | "LinearRegression(copy_X=True, fit_intercept=True, n_jobs=None,\n", 200 | " normalize=False)" 201 | ] 202 | }, 203 | "execution_count": 10, 204 | "metadata": {}, 205 | "output_type": "execute_result" 206 | } 207 | ], 208 | "source": [ 209 | "lm = LinearRegression() \n", 210 | "lm.fit(X_train,y_train) " 211 | ] 212 | }, 213 | { 214 | "cell_type": "markdown", 215 | "metadata": {}, 216 | "source": [ 217 | "#### Freeze the model" 218 | ] 219 | }, 220 | { 221 | "cell_type": "code", 222 | "execution_count": 11, 223 | "metadata": {}, 224 | "outputs": [ 225 | { 226 | "data": { 227 | "text/plain": [ 228 | "['outputs/sal_model.pkl']" 229 | ] 230 | }, 231 | "execution_count": 11, 232 | "metadata": {}, 233 | "output_type": "execute_result" 234 | } 235 | ], 236 | "source": [ 237 | "filename = 'outputs/sal_model.pkl'\n", 238 | "joblib.dump(lm, filename)" 239 | ] 240 | }, 241 | { 242 | "cell_type": "markdown", 243 | "metadata": {}, 244 | "source": [ 245 | "#### Test the model" 246 | ] 247 | }, 248 | { 249 | "cell_type": "code", 250 | "execution_count": 12, 251 | "metadata": {}, 252 | "outputs": [ 253 | { 254 | "name": "stdout", 255 | "output_type": "stream", 256 | "text": [ 257 | "141760.56910569107\n" 258 | ] 259 | } 260 | ], 261 | "source": [ 262 | "filename = 'outputs/sal_model.pkl'\n", 263 | "loaded_model=joblib.load(filename)\n", 264 | "y=loaded_model.predict([[21]])[0]\n", 265 | "print(y)" 266 | ] 267 | }, 268 | { 269 | "cell_type": "markdown", 270 | "metadata": {}, 271 | "source": [ 272 | "#### Log metrics to Azure ML Experiment" 273 | ] 274 | }, 275 | { 276 | "cell_type": "code", 277 | "execution_count": 13, 278 | "metadata": {}, 279 | "outputs": [], 280 | "source": [ 281 | "run.log('Intercept :', lm.intercept_)\n", 282 | "run.log('Slope :', lm.coef_[0])" 283 | ] 284 | }, 285 | { 286 | "cell_type": "markdown", 287 | "metadata": {}, 288 | "source": [ 289 | "#### End Azure ML Experiment" 290 | ] 291 | }, 292 | { 293 | "cell_type": "code", 294 | "execution_count": 14, 295 | "metadata": {}, 296 | "outputs": [], 297 | "source": [ 298 | "run.log(\"Experiment end time\", str(datetime.datetime.now()))\n", 299 | "run.complete()" 300 | ] 301 | }, 302 | { 303 | "cell_type": "markdown", 304 | "metadata": {}, 305 | "source": [ 306 | "#### Get Portal URL" 307 | ] 308 | }, 309 | { 310 | "cell_type": "code", 311 | "execution_count": 15, 312 | "metadata": {}, 313 | "outputs": [ 314 | { 315 | "name": "stdout", 316 | "output_type": "stream", 317 | "text": [ 318 | "https://mlworkspace.azure.ai/portal/subscriptions/9be00a6f-5335-4d37-9847-2f7013522146/resourceGroups/mi2-stage/providers/Microsoft.MachineLearningServices/workspaces/salary/experiments/salexp/runs/d9aec621-6498-44be-991b-f2504ad47ab1\n" 319 | ] 320 | } 321 | ], 322 | "source": [ 323 | "print(run.get_portal_url())" 324 | ] 325 | }, 326 | { 327 | "cell_type": "markdown", 328 | "metadata": {}, 329 | "source": [ 330 | "#### Register the model" 331 | ] 332 | }, 333 | { 334 | "cell_type": "code", 335 | "execution_count": 16, 336 | "metadata": {}, 337 | "outputs": [ 338 | { 339 | "name": "stdout", 340 | "output_type": "stream", 341 | "text": [ 342 | "Registering model sal_model\n" 343 | ] 344 | } 345 | ], 346 | "source": [ 347 | "model = Model.register(model_path = \"outputs/sal_model.pkl\",\n", 348 | " model_name = \"sal_model\",\n", 349 | " tags = {\"key\": \"1\"},\n", 350 | " description = \"Salary Prediction\",\n", 351 | " workspace = ws)" 352 | ] 353 | }, 354 | { 355 | "cell_type": "markdown", 356 | "metadata": {}, 357 | "source": [ 358 | "#### Define Azure ML Deploymemt configuration" 359 | ] 360 | }, 361 | { 362 | "cell_type": "code", 363 | "execution_count": 17, 364 | "metadata": {}, 365 | "outputs": [], 366 | "source": [ 367 | "aciconfig = AciWebservice.deploy_configuration(cpu_cores=1, \n", 368 | " memory_gb=1, \n", 369 | " tags={\"data\": \"Salary\", \"method\" : \"sklearn\"}, \n", 370 | " description='Predict Stackoverflow Salary')" 371 | ] 372 | }, 373 | { 374 | "cell_type": "markdown", 375 | "metadata": {}, 376 | "source": [ 377 | "#### Create enviroment configuration file" 378 | ] 379 | }, 380 | { 381 | "cell_type": "code", 382 | "execution_count": 18, 383 | "metadata": {}, 384 | "outputs": [ 385 | { 386 | "name": "stdout", 387 | "output_type": "stream", 388 | "text": [ 389 | "# Conda environment specification. The dependencies defined in this file will\n", 390 | "# be automatically provisioned for runs with userManagedDependencies=False.\n", 391 | "\n", 392 | "# Details about the Conda environment file format:\n", 393 | "# https://conda.io/docs/user-guide/tasks/manage-environments.html#create-env-file-manually\n", 394 | "\n", 395 | "name: project_environment\n", 396 | "dependencies:\n", 397 | " # The python interpreter version.\n", 398 | " # Currently Azure ML only supports 3.5.2 and later.\n", 399 | "- python=3.6.2\n", 400 | "\n", 401 | "- pip:\n", 402 | " # Required packages for AzureML execution, history, and data preparation.\n", 403 | " - azureml-defaults\n", 404 | "- scikit-learn\n", 405 | "\n" 406 | ] 407 | } 408 | ], 409 | "source": [ 410 | "salenv = CondaDependencies()\n", 411 | "salenv.add_conda_package(\"scikit-learn\")\n", 412 | "\n", 413 | "with open(\"salenv.yml\",\"w\") as f:\n", 414 | " f.write(salenv.serialize_to_string())\n", 415 | "with open(\"salenv.yml\",\"r\") as f:\n", 416 | " print(f.read())" 417 | ] 418 | }, 419 | { 420 | "cell_type": "markdown", 421 | "metadata": {}, 422 | "source": [ 423 | "#### Create Azure ML Scoring file" 424 | ] 425 | }, 426 | { 427 | "cell_type": "code", 428 | "execution_count": 19, 429 | "metadata": {}, 430 | "outputs": [ 431 | { 432 | "name": "stdout", 433 | "output_type": "stream", 434 | "text": [ 435 | "Overwriting score.py\n" 436 | ] 437 | } 438 | ], 439 | "source": [ 440 | "%%writefile score.py\n", 441 | "import json\n", 442 | "import numpy as np\n", 443 | "import os\n", 444 | "import pickle\n", 445 | "from sklearn.externals import joblib\n", 446 | "from sklearn.linear_model import LogisticRegression\n", 447 | "\n", 448 | "from azureml.core.model import Model\n", 449 | "\n", 450 | "def init():\n", 451 | " global model\n", 452 | " # retrieve the path to the model file using the model name\n", 453 | " model_path = Model.get_model_path('sal_model')\n", 454 | " model = joblib.load(model_path)\n", 455 | "\n", 456 | "def run(raw_data):\n", 457 | " data = np.array(json.loads(raw_data)['data'])\n", 458 | " # make prediction\n", 459 | " y_hat = model.predict(data)\n", 460 | " return json.dumps(y_hat.tolist())" 461 | ] 462 | }, 463 | { 464 | "cell_type": "markdown", 465 | "metadata": {}, 466 | "source": [ 467 | "#### Deploy the model to Azure Container Instance\n" 468 | ] 469 | }, 470 | { 471 | "cell_type": "code", 472 | "execution_count": 20, 473 | "metadata": {}, 474 | "outputs": [ 475 | { 476 | "name": "stdout", 477 | "output_type": "stream", 478 | "text": [ 479 | "CPU times: user 478 µs, sys: 714 µs, total: 1.19 ms\n", 480 | "Wall time: 560 µs\n" 481 | ] 482 | } 483 | ], 484 | "source": [ 485 | "%%time\n", 486 | "image_config = ContainerImage.image_configuration(execution_script=\"score.py\", \n", 487 | " runtime=\"python\", \n", 488 | " conda_file=\"salenv.yml\")" 489 | ] 490 | }, 491 | { 492 | "cell_type": "markdown", 493 | "metadata": {}, 494 | "source": [ 495 | "#### Expose web service" 496 | ] 497 | }, 498 | { 499 | "cell_type": "code", 500 | "execution_count": 21, 501 | "metadata": { 502 | "scrolled": true 503 | }, 504 | "outputs": [ 505 | { 506 | "name": "stdout", 507 | "output_type": "stream", 508 | "text": [ 509 | "Creating image\n", 510 | "Image creation operation finished for image salary-svc:1, operation \"Succeeded\"\n", 511 | "Creating service\n", 512 | "Running......................\n", 513 | "SucceededACI service creation operation finished, operation \"Succeeded\"\n" 514 | ] 515 | } 516 | ], 517 | "source": [ 518 | "service = Webservice.deploy_from_model(workspace=ws,\n", 519 | " name='salary-svc',\n", 520 | " deployment_config=aciconfig,\n", 521 | " models=[model],\n", 522 | " image_config=image_config)\n", 523 | "\n", 524 | "service.wait_for_deployment(show_output=True)" 525 | ] 526 | }, 527 | { 528 | "cell_type": "markdown", 529 | "metadata": {}, 530 | "source": [ 531 | "#### Get the Web Service URL" 532 | ] 533 | }, 534 | { 535 | "cell_type": "code", 536 | "execution_count": 23, 537 | "metadata": { 538 | "scrolled": true 539 | }, 540 | "outputs": [ 541 | { 542 | "name": "stdout", 543 | "output_type": "stream", 544 | "text": [ 545 | "http://40.119.206.56:80/score\n" 546 | ] 547 | } 548 | ], 549 | "source": [ 550 | "print(service.scoring_uri)" 551 | ] 552 | }, 553 | { 554 | "cell_type": "code", 555 | "execution_count": 26, 556 | "metadata": {}, 557 | "outputs": [ 558 | { 559 | "name": "stdout", 560 | "output_type": "stream", 561 | "text": [ 562 | "\"[185924.7967479675]\"" 563 | ] 564 | } 565 | ], 566 | "source": [ 567 | "!curl -X POST \\\n", 568 | "\t-H 'Content-Type':'application/json' \\\n", 569 | "\t-d '{\"data\":[[45]]}' \\\n", 570 | "\thttp://40.119.206.56:80/score" 571 | ] 572 | }, 573 | { 574 | "cell_type": "markdown", 575 | "metadata": {}, 576 | "source": [ 577 | "#### Delete Workspace and clean up resources" 578 | ] 579 | }, 580 | { 581 | "cell_type": "code", 582 | "execution_count": 31, 583 | "metadata": {}, 584 | "outputs": [], 585 | "source": [ 586 | "ws.delete()" 587 | ] 588 | }, 589 | { 590 | "cell_type": "code", 591 | "execution_count": null, 592 | "metadata": {}, 593 | "outputs": [], 594 | "source": [] 595 | } 596 | ], 597 | "metadata": { 598 | "kernelspec": { 599 | "display_name": "Python 3", 600 | "language": "python", 601 | "name": "python3" 602 | }, 603 | "language_info": { 604 | "codemirror_mode": { 605 | "name": "ipython", 606 | "version": 3 607 | }, 608 | "file_extension": ".py", 609 | "mimetype": "text/x-python", 610 | "name": "python", 611 | "nbconvert_exporter": "python", 612 | "pygments_lexer": "ipython3", 613 | "version": "3.6.7" 614 | } 615 | }, 616 | "nbformat": 4, 617 | "nbformat_minor": 2 618 | } 619 | -------------------------------------------------------------------------------- /Azure/Salary.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "#### Import standard Python modules" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 1, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [ 16 | "import datetime\n", 17 | "import numpy as np\n", 18 | "import pandas as pd\n", 19 | "from sklearn.model_selection import train_test_split\n", 20 | "from sklearn.linear_model import LinearRegression\n", 21 | "from sklearn.externals import joblib" 22 | ] 23 | }, 24 | { 25 | "cell_type": "markdown", 26 | "metadata": {}, 27 | "source": [ 28 | "#### Import Azure ML SDK modules" 29 | ] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": 2, 34 | "metadata": {}, 35 | "outputs": [], 36 | "source": [ 37 | "import azureml.core\n", 38 | "from azureml.core import Workspace\n", 39 | "from azureml.core.model import Model\n", 40 | "from azureml.core import Experiment\n", 41 | "from azureml.core.webservice import Webservice\n", 42 | "from azureml.core.image import ContainerImage\n", 43 | "from azureml.core.webservice import AciWebservice\n", 44 | "from azureml.core.conda_dependencies import CondaDependencies" 45 | ] 46 | }, 47 | { 48 | "cell_type": "markdown", 49 | "metadata": {}, 50 | "source": [ 51 | "#### Check Azure ML SDK version" 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": 3, 57 | "metadata": {}, 58 | "outputs": [ 59 | { 60 | "name": "stdout", 61 | "output_type": "stream", 62 | "text": [ 63 | "1.0.6\n" 64 | ] 65 | } 66 | ], 67 | "source": [ 68 | "print(azureml.core.VERSION)" 69 | ] 70 | }, 71 | { 72 | "cell_type": "markdown", 73 | "metadata": {}, 74 | "source": [ 75 | "#### Create Azure ML Workspace" 76 | ] 77 | }, 78 | { 79 | "cell_type": "code", 80 | "execution_count": null, 81 | "metadata": {}, 82 | "outputs": [], 83 | "source": [ 84 | "AZ_SUBSCRIPTION_ID='Your_Azure_Subscription_ID'\n", 85 | "ws = Workspace.create(name='salary',\n", 86 | " subscription_id=AZ_SUBSCRIPTION_ID, \n", 87 | " resource_group='mi2',\n", 88 | " create_resource_group=True,\n", 89 | " location='southeastasia'\n", 90 | " )" 91 | ] 92 | }, 93 | { 94 | "cell_type": "markdown", 95 | "metadata": {}, 96 | "source": [ 97 | "#### Write configuration to local file" 98 | ] 99 | }, 100 | { 101 | "cell_type": "code", 102 | "execution_count": 5, 103 | "metadata": {}, 104 | "outputs": [ 105 | { 106 | "name": "stdout", 107 | "output_type": "stream", 108 | "text": [ 109 | "Wrote the config file config.json to: /Volumes/Data/Dropbox/Work/Content/Webinars/MI2/Jan18/AzureML/Demo/Azure/aml_config/config.json\n" 110 | ] 111 | } 112 | ], 113 | "source": [ 114 | "ws.write_config()" 115 | ] 116 | }, 117 | { 118 | "cell_type": "markdown", 119 | "metadata": {}, 120 | "source": [ 121 | "#### Create Azure ML Experiment" 122 | ] 123 | }, 124 | { 125 | "cell_type": "code", 126 | "execution_count": 6, 127 | "metadata": {}, 128 | "outputs": [], 129 | "source": [ 130 | "exp = Experiment(workspace=ws, name='salexp')" 131 | ] 132 | }, 133 | { 134 | "cell_type": "markdown", 135 | "metadata": {}, 136 | "source": [ 137 | "#### Start logging metrics" 138 | ] 139 | }, 140 | { 141 | "cell_type": "code", 142 | "execution_count": 7, 143 | "metadata": {}, 144 | "outputs": [], 145 | "source": [ 146 | "run = exp.start_logging() \n", 147 | "run.log(\"Experiment start time\", str(datetime.datetime.now()))" 148 | ] 149 | }, 150 | { 151 | "cell_type": "markdown", 152 | "metadata": {}, 153 | "source": [ 154 | "#### Load salary dataset" 155 | ] 156 | }, 157 | { 158 | "cell_type": "code", 159 | "execution_count": 8, 160 | "metadata": {}, 161 | "outputs": [], 162 | "source": [ 163 | "sal = pd.read_csv('data/sal.csv',header=0, index_col=None)\n", 164 | "X = sal[['x']]\n", 165 | "y = sal['y']" 166 | ] 167 | }, 168 | { 169 | "cell_type": "markdown", 170 | "metadata": {}, 171 | "source": [ 172 | "#### Split the train and test data" 173 | ] 174 | }, 175 | { 176 | "cell_type": "code", 177 | "execution_count": 9, 178 | "metadata": {}, 179 | "outputs": [], 180 | "source": [ 181 | "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=10)" 182 | ] 183 | }, 184 | { 185 | "cell_type": "markdown", 186 | "metadata": {}, 187 | "source": [ 188 | "#### Train the model" 189 | ] 190 | }, 191 | { 192 | "cell_type": "code", 193 | "execution_count": 10, 194 | "metadata": {}, 195 | "outputs": [ 196 | { 197 | "data": { 198 | "text/plain": [ 199 | "LinearRegression(copy_X=True, fit_intercept=True, n_jobs=None,\n", 200 | " normalize=False)" 201 | ] 202 | }, 203 | "execution_count": 10, 204 | "metadata": {}, 205 | "output_type": "execute_result" 206 | } 207 | ], 208 | "source": [ 209 | "lm = LinearRegression() \n", 210 | "lm.fit(X_train,y_train) " 211 | ] 212 | }, 213 | { 214 | "cell_type": "markdown", 215 | "metadata": {}, 216 | "source": [ 217 | "#### Freeze the model" 218 | ] 219 | }, 220 | { 221 | "cell_type": "code", 222 | "execution_count": 11, 223 | "metadata": {}, 224 | "outputs": [ 225 | { 226 | "data": { 227 | "text/plain": [ 228 | "['outputs/sal_model.pkl']" 229 | ] 230 | }, 231 | "execution_count": 11, 232 | "metadata": {}, 233 | "output_type": "execute_result" 234 | } 235 | ], 236 | "source": [ 237 | "filename = 'outputs/sal_model.pkl'\n", 238 | "joblib.dump(lm, filename)" 239 | ] 240 | }, 241 | { 242 | "cell_type": "markdown", 243 | "metadata": {}, 244 | "source": [ 245 | "#### Test the model" 246 | ] 247 | }, 248 | { 249 | "cell_type": "code", 250 | "execution_count": 12, 251 | "metadata": {}, 252 | "outputs": [ 253 | { 254 | "name": "stdout", 255 | "output_type": "stream", 256 | "text": [ 257 | "141760.56910569107\n" 258 | ] 259 | } 260 | ], 261 | "source": [ 262 | "filename = 'outputs/sal_model.pkl'\n", 263 | "loaded_model=joblib.load(filename)\n", 264 | "y=loaded_model.predict([[21]])[0]\n", 265 | "print(y)" 266 | ] 267 | }, 268 | { 269 | "cell_type": "markdown", 270 | "metadata": {}, 271 | "source": [ 272 | "#### Log metrics to Azure ML Experiment" 273 | ] 274 | }, 275 | { 276 | "cell_type": "code", 277 | "execution_count": 13, 278 | "metadata": {}, 279 | "outputs": [], 280 | "source": [ 281 | "run.log('Intercept :', lm.intercept_)\n", 282 | "run.log('Slope :', lm.coef_[0])" 283 | ] 284 | }, 285 | { 286 | "cell_type": "markdown", 287 | "metadata": {}, 288 | "source": [ 289 | "#### End Azure ML Experiment" 290 | ] 291 | }, 292 | { 293 | "cell_type": "code", 294 | "execution_count": 14, 295 | "metadata": {}, 296 | "outputs": [], 297 | "source": [ 298 | "run.log(\"Experiment end time\", str(datetime.datetime.now()))\n", 299 | "run.complete()" 300 | ] 301 | }, 302 | { 303 | "cell_type": "markdown", 304 | "metadata": {}, 305 | "source": [ 306 | "#### Get Portal URL" 307 | ] 308 | }, 309 | { 310 | "cell_type": "code", 311 | "execution_count": 15, 312 | "metadata": {}, 313 | "outputs": [ 314 | { 315 | "name": "stdout", 316 | "output_type": "stream", 317 | "text": [ 318 | "https://mlworkspace.azure.ai/portal/subscriptions/9be00a6f-5335-4d37-9847-2f7013522146/resourceGroups/mi2-stage/providers/Microsoft.MachineLearningServices/workspaces/salary/experiments/salexp/runs/d9aec621-6498-44be-991b-f2504ad47ab1\n" 319 | ] 320 | } 321 | ], 322 | "source": [ 323 | "print(run.get_portal_url())" 324 | ] 325 | }, 326 | { 327 | "cell_type": "markdown", 328 | "metadata": {}, 329 | "source": [ 330 | "#### Register the model" 331 | ] 332 | }, 333 | { 334 | "cell_type": "code", 335 | "execution_count": 16, 336 | "metadata": {}, 337 | "outputs": [ 338 | { 339 | "name": "stdout", 340 | "output_type": "stream", 341 | "text": [ 342 | "Registering model sal_model\n" 343 | ] 344 | } 345 | ], 346 | "source": [ 347 | "model = Model.register(model_path = \"outputs/sal_model.pkl\",\n", 348 | " model_name = \"sal_model\",\n", 349 | " tags = {\"key\": \"1\"},\n", 350 | " description = \"Salary Prediction\",\n", 351 | " workspace = ws)" 352 | ] 353 | }, 354 | { 355 | "cell_type": "markdown", 356 | "metadata": {}, 357 | "source": [ 358 | "#### Define Azure ML Deploymemt configuration" 359 | ] 360 | }, 361 | { 362 | "cell_type": "code", 363 | "execution_count": 17, 364 | "metadata": {}, 365 | "outputs": [], 366 | "source": [ 367 | "aciconfig = AciWebservice.deploy_configuration(cpu_cores=1, \n", 368 | " memory_gb=1, \n", 369 | " tags={\"data\": \"Salary\", \"method\" : \"sklearn\"}, \n", 370 | " description='Predict Stackoverflow Salary')" 371 | ] 372 | }, 373 | { 374 | "cell_type": "markdown", 375 | "metadata": {}, 376 | "source": [ 377 | "#### Create enviroment configuration file" 378 | ] 379 | }, 380 | { 381 | "cell_type": "code", 382 | "execution_count": 18, 383 | "metadata": {}, 384 | "outputs": [ 385 | { 386 | "name": "stdout", 387 | "output_type": "stream", 388 | "text": [ 389 | "# Conda environment specification. The dependencies defined in this file will\n", 390 | "# be automatically provisioned for runs with userManagedDependencies=False.\n", 391 | "\n", 392 | "# Details about the Conda environment file format:\n", 393 | "# https://conda.io/docs/user-guide/tasks/manage-environments.html#create-env-file-manually\n", 394 | "\n", 395 | "name: project_environment\n", 396 | "dependencies:\n", 397 | " # The python interpreter version.\n", 398 | " # Currently Azure ML only supports 3.5.2 and later.\n", 399 | "- python=3.6.2\n", 400 | "\n", 401 | "- pip:\n", 402 | " # Required packages for AzureML execution, history, and data preparation.\n", 403 | " - azureml-defaults\n", 404 | "- scikit-learn\n", 405 | "\n" 406 | ] 407 | } 408 | ], 409 | "source": [ 410 | "salenv = CondaDependencies()\n", 411 | "salenv.add_conda_package(\"scikit-learn\")\n", 412 | "\n", 413 | "with open(\"salenv.yml\",\"w\") as f:\n", 414 | " f.write(salenv.serialize_to_string())\n", 415 | "with open(\"salenv.yml\",\"r\") as f:\n", 416 | " print(f.read())" 417 | ] 418 | }, 419 | { 420 | "cell_type": "markdown", 421 | "metadata": {}, 422 | "source": [ 423 | "#### Create Azure ML Scoring file" 424 | ] 425 | }, 426 | { 427 | "cell_type": "code", 428 | "execution_count": 19, 429 | "metadata": {}, 430 | "outputs": [ 431 | { 432 | "name": "stdout", 433 | "output_type": "stream", 434 | "text": [ 435 | "Overwriting score.py\n" 436 | ] 437 | } 438 | ], 439 | "source": [ 440 | "%%writefile score.py\n", 441 | "import json\n", 442 | "import numpy as np\n", 443 | "import os\n", 444 | "import pickle\n", 445 | "from sklearn.externals import joblib\n", 446 | "from sklearn.linear_model import LogisticRegression\n", 447 | "\n", 448 | "from azureml.core.model import Model\n", 449 | "\n", 450 | "def init():\n", 451 | " global model\n", 452 | " # retrieve the path to the model file using the model name\n", 453 | " model_path = Model.get_model_path('sal_model')\n", 454 | " model = joblib.load(model_path)\n", 455 | "\n", 456 | "def run(raw_data):\n", 457 | " data = np.array(json.loads(raw_data)['data'])\n", 458 | " # make prediction\n", 459 | " y_hat = model.predict(data)\n", 460 | " return json.dumps(y_hat.tolist())" 461 | ] 462 | }, 463 | { 464 | "cell_type": "markdown", 465 | "metadata": {}, 466 | "source": [ 467 | "#### Deploy the model to Azure Container Instance\n" 468 | ] 469 | }, 470 | { 471 | "cell_type": "code", 472 | "execution_count": 20, 473 | "metadata": {}, 474 | "outputs": [ 475 | { 476 | "name": "stdout", 477 | "output_type": "stream", 478 | "text": [ 479 | "CPU times: user 478 µs, sys: 714 µs, total: 1.19 ms\n", 480 | "Wall time: 560 µs\n" 481 | ] 482 | } 483 | ], 484 | "source": [ 485 | "%%time\n", 486 | "image_config = ContainerImage.image_configuration(execution_script=\"score.py\", \n", 487 | " runtime=\"python\", \n", 488 | " conda_file=\"salenv.yml\")" 489 | ] 490 | }, 491 | { 492 | "cell_type": "markdown", 493 | "metadata": {}, 494 | "source": [ 495 | "#### Expose web service" 496 | ] 497 | }, 498 | { 499 | "cell_type": "code", 500 | "execution_count": 21, 501 | "metadata": { 502 | "scrolled": true 503 | }, 504 | "outputs": [ 505 | { 506 | "name": "stdout", 507 | "output_type": "stream", 508 | "text": [ 509 | "Creating image\n", 510 | "Image creation operation finished for image salary-svc:1, operation \"Succeeded\"\n", 511 | "Creating service\n", 512 | "Running......................\n", 513 | "SucceededACI service creation operation finished, operation \"Succeeded\"\n" 514 | ] 515 | } 516 | ], 517 | "source": [ 518 | "service = Webservice.deploy_from_model(workspace=ws,\n", 519 | " name='salary-svc',\n", 520 | " deployment_config=aciconfig,\n", 521 | " models=[model],\n", 522 | " image_config=image_config)\n", 523 | "\n", 524 | "service.wait_for_deployment(show_output=True)" 525 | ] 526 | }, 527 | { 528 | "cell_type": "markdown", 529 | "metadata": {}, 530 | "source": [ 531 | "#### Get the Web Service URL" 532 | ] 533 | }, 534 | { 535 | "cell_type": "code", 536 | "execution_count": 23, 537 | "metadata": { 538 | "scrolled": true 539 | }, 540 | "outputs": [ 541 | { 542 | "name": "stdout", 543 | "output_type": "stream", 544 | "text": [ 545 | "http://40.119.206.56:80/score\n" 546 | ] 547 | } 548 | ], 549 | "source": [ 550 | "print(service.scoring_uri)" 551 | ] 552 | }, 553 | { 554 | "cell_type": "code", 555 | "execution_count": 26, 556 | "metadata": {}, 557 | "outputs": [ 558 | { 559 | "name": "stdout", 560 | "output_type": "stream", 561 | "text": [ 562 | "\"[185924.7967479675]\"" 563 | ] 564 | } 565 | ], 566 | "source": [ 567 | "!curl -X POST \\\n", 568 | "\t-H 'Content-Type':'application/json' \\\n", 569 | "\t-d '{\"data\":[[45]]}' \\\n", 570 | "\thttp://40.119.206.56:80/score" 571 | ] 572 | }, 573 | { 574 | "cell_type": "markdown", 575 | "metadata": {}, 576 | "source": [ 577 | "#### Delete Workspace and clean up resources" 578 | ] 579 | }, 580 | { 581 | "cell_type": "code", 582 | "execution_count": 31, 583 | "metadata": {}, 584 | "outputs": [], 585 | "source": [ 586 | "ws.delete()" 587 | ] 588 | }, 589 | { 590 | "cell_type": "code", 591 | "execution_count": null, 592 | "metadata": {}, 593 | "outputs": [], 594 | "source": [] 595 | } 596 | ], 597 | "metadata": { 598 | "kernelspec": { 599 | "display_name": "Python 3", 600 | "language": "python", 601 | "name": "python3" 602 | }, 603 | "language_info": { 604 | "codemirror_mode": { 605 | "name": "ipython", 606 | "version": 3 607 | }, 608 | "file_extension": ".py", 609 | "mimetype": "text/x-python", 610 | "name": "python", 611 | "nbconvert_exporter": "python", 612 | "pygments_lexer": "ipython3", 613 | "version": "3.6.7" 614 | } 615 | }, 616 | "nbformat": 4, 617 | "nbformat_minor": 2 618 | } 619 | -------------------------------------------------------------------------------- /Azure/Salary.py: -------------------------------------------------------------------------------- 1 | # Import standard Python modules 2 | import datetime 3 | import numpy as np 4 | import pandas as pd 5 | from sklearn.model_selection import train_test_split 6 | from sklearn.linear_model import LinearRegression 7 | from sklearn.externals import joblib 8 | 9 | 10 | # Import Azure ML SDK modules 11 | import azureml.core 12 | from azureml.core import Workspace 13 | from azureml.core.model import Model 14 | from azureml.core import Experiment 15 | from azureml.core.webservice import Webservice 16 | from azureml.core.image import ContainerImage 17 | from azureml.core.webservice import AciWebservice 18 | from azureml.core.conda_dependencies import CondaDependencies 19 | 20 | # Check Azure ML SDK version 21 | print(azureml.core.VERSION) 22 | 23 | # Create Azure ML Workspace 24 | ws = Workspace.create(name='salary', 25 | subscription_id='9be00a6f-5335-4d37-9847-2f7013522146', 26 | resource_group='mi2', 27 | create_resource_group=True, 28 | location='southeastasia' 29 | ) 30 | 31 | # Write configuration to local file 32 | ws.write_config() 33 | 34 | # Create Azure ML Experiment 35 | exp = Experiment(workspace=ws, name='salexp') 36 | 37 | # Start logging metrics 38 | run = exp.start_logging() 39 | run.log("Experiment start time", str(datetime.datetime.now())) 40 | 41 | # Load salary dataset 42 | sal = pd.read_csv('data/sal.csv',header=0, index_col=None) 43 | X = sal[['x']] 44 | y = sal['y'] 45 | 46 | # Split the train and test data 47 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=10) 48 | 49 | # Train the model 50 | lm = LinearRegression() 51 | lm.fit(X_train,y_train) 52 | 53 | # Freeze the model 54 | filename = 'outputs/sal_model.pkl' 55 | joblib.dump(lm, filename) 56 | 57 | # Test the model 58 | filename = 'outputs/sal_model.pkl' 59 | loaded_model=joblib.load(filename) 60 | y=loaded_model.predict([[21]])[0] 61 | print(y) 62 | 63 | # Log metrics to Azure ML Experiment 64 | run.log('Intercept :', lm.intercept_) 65 | run.log('Slope :', lm.coef_[0]) 66 | 67 | # End Azure ML Experiment 68 | run.log("Experiment end time", str(datetime.datetime.now())) 69 | run.complete() 70 | 71 | # Get Portal URL 72 | print(run.get_portal_url()) 73 | 74 | # Register the model 75 | model = Model.register(model_path = "outputs/sal_model.pkl", 76 | model_name = "sal_model", 77 | tags = {"key": "1"}, 78 | description = "Salary Prediction", 79 | workspace = ws) 80 | 81 | # Define Azure ML Deploymemt configuration 82 | aciconfig = AciWebservice.deploy_configuration(cpu_cores=1, 83 | memory_gb=1, 84 | tags={"data": "Salary", "method" : "sklearn"}, 85 | description='Predict Stackoverflow Salary') 86 | 87 | # Create enviroment configuration file 88 | salenv = CondaDependencies() 89 | salenv.add_conda_package("scikit-learn") 90 | 91 | with open("salenv.yml","w") as f: 92 | f.write(salenv.serialize_to_string()) 93 | with open("salenv.yml","r") as f: 94 | print(f.read()) 95 | 96 | 97 | # Create Azure ML Scoring file 98 | ''' 99 | %%writefile score.py 100 | import json 101 | import numpy as np 102 | import os 103 | import pickle 104 | from sklearn.externals import joblib 105 | from sklearn.linear_model import LogisticRegression 106 | 107 | from azureml.core.model import Model 108 | 109 | def init(): 110 | global model 111 | # retrieve the path to the model file using the model name 112 | model_path = Model.get_model_path('sal_model') 113 | model = joblib.load(model_path) 114 | 115 | def run(raw_data): 116 | data = np.array(json.loads(raw_data)['data']) 117 | # make prediction 118 | y_hat = model.predict(data) 119 | return json.dumps(y_hat.tolist()) 120 | ''' 121 | # Deploy the model to Azure Container Instance 122 | # %%time 123 | image_config = ContainerImage.image_configuration(execution_script="score.py", 124 | runtime="python", 125 | conda_file="salenv.yml") 126 | # Expose web service 127 | service = Webservice.deploy_from_model(workspace=ws, 128 | name='salary-svc', 129 | deployment_config=aciconfig, 130 | models=[model], 131 | image_config=image_config) 132 | 133 | service.wait_for_deployment(show_output=True) 134 | 135 | # Get the Web Service URL 136 | print(service.scoring_uri) 137 | 138 | # Clean up resources 139 | ws.delete() 140 | 141 | 142 | -------------------------------------------------------------------------------- /Azure/data/sal.csv: -------------------------------------------------------------------------------- 1 | x,y 0,103100 1,104900 2,106800 3,108700 4,110400 5,112300 6,114200 7,116100 8,117800 9,119700 10,121600 11,123300 12,125200 13,127100 14,128900 15,130700 16,132600 17,134400 18,136300 19,138000 20,139900 -------------------------------------------------------------------------------- /Azure/outputs/sal_model.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/janakiramm/azureml-tutorial/153e82ff4482468b461b812e731dbb8f5ecc3004/Azure/outputs/sal_model.pkl -------------------------------------------------------------------------------- /Azure/salenv.yml: -------------------------------------------------------------------------------- 1 | # Conda environment specification. The dependencies defined in this file will 2 | # be automatically provisioned for runs with userManagedDependencies=False. 3 | 4 | # Details about the Conda environment file format: 5 | # https://conda.io/docs/user-guide/tasks/manage-environments.html#create-env-file-manually 6 | 7 | name: project_environment 8 | dependencies: 9 | # The python interpreter version. 10 | # Currently Azure ML only supports 3.5.2 and later. 11 | - python=3.6.2 12 | 13 | - pip: 14 | # Required packages for AzureML execution, history, and data preparation. 15 | - azureml-defaults 16 | - scikit-learn 17 | -------------------------------------------------------------------------------- /Azure/score.py: -------------------------------------------------------------------------------- 1 | import json 2 | import numpy as np 3 | import os 4 | import pickle 5 | from sklearn.externals import joblib 6 | from sklearn.linear_model import LogisticRegression 7 | 8 | from azureml.core.model import Model 9 | 10 | def init(): 11 | global model 12 | # retrieve the path to the model file using the model name 13 | model_path = Model.get_model_path('sal_model') 14 | model = joblib.load(model_path) 15 | 16 | def run(raw_data): 17 | data = np.array(json.loads(raw_data)['data']) 18 | # make prediction 19 | y_hat = model.predict(data) 20 | return json.dumps(y_hat.tolist()) 21 | -------------------------------------------------------------------------------- /Local/data/sal.csv: -------------------------------------------------------------------------------- 1 | x,y 0,103100 1,104900 2,106800 3,108700 4,110400 5,112300 6,114200 7,116100 8,117800 9,119700 10,121600 11,123300 12,125200 13,127100 14,128900 15,130700 16,132600 17,134400 18,136300 19,138000 20,139900 -------------------------------------------------------------------------------- /Local/infer.py: -------------------------------------------------------------------------------- 1 | #!flask/bin/python 2 | from flask import Flask, jsonify 3 | from sklearn.externals import joblib 4 | 5 | 6 | filename = 'model/sal_model.pkl' 7 | 8 | app = Flask(__name__) 9 | 10 | @app.route('/') 11 | def index(): 12 | return "Stackoverflow Salary Predictor" 13 | 14 | @app.route('/sal/', methods=['GET']) 15 | def predict(x): 16 | loaded_model=joblib.load(filename) 17 | y=loaded_model.predict([[x]])[0] 18 | sal=jsonify({'salary': round(y,2)}) 19 | return sal 20 | 21 | if __name__ == '__main__': 22 | app.run(host='0.0.0.0', port=80) 23 | 24 | -------------------------------------------------------------------------------- /Local/model/sal_model.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/janakiramm/azureml-tutorial/153e82ff4482468b461b812e731dbb8f5ecc3004/Local/model/sal_model.pkl -------------------------------------------------------------------------------- /Local/train.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | from sklearn.model_selection import train_test_split 4 | from sklearn.linear_model import LinearRegression 5 | 6 | sal = pd.read_csv('data/sal.csv',header=0, index_col=None) 7 | X = sal[['x']] 8 | y = sal['y'] 9 | 10 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=10) 11 | 12 | lm = LinearRegression() 13 | lm.fit(X_train,y_train) 14 | 15 | print('Intercept :', round(lm.intercept_,2)) 16 | print('Slope :', round(lm.coef_[0],2)) 17 | 18 | from sklearn.metrics import mean_squared_error 19 | y_predict= lm.predict(X_test) 20 | mse = mean_squared_error(y_predict,y_test) 21 | print('MSE :', round(mse,2)) 22 | 23 | from sklearn.externals import joblib 24 | filename = 'model/sal_model.pkl' 25 | joblib.dump(lm, filename) -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Getting Started with Azure ML Service 2 | 3 | For detailed walkthrough, refer to the [tutorial](https://thenewstack.io/build-and-deploy-a-machine-learning-model-with-azure-ml-service/) published at The New Stack. 4 | --------------------------------------------------------------------------------