├── ai ├── __init__.py ├── .gitignore ├── cloud │ ├── request.json │ ├── deploy │ │ ├── endpoint.yml │ │ ├── deployment.yml │ │ └── deploy.yml │ ├── environment │ │ ├── environment_deploy.yaml │ │ ├── environment.yaml │ │ ├── deploy │ │ │ └── Dockerfile │ │ └── train │ │ │ └── Dockerfile │ ├── job.yml │ └── dataset.yml ├── .amlignore ├── requirements.txt ├── config.yml ├── trainer.py ├── data.py ├── model.py ├── notebooks │ └── refactor.ipynb └── score.py ├── README.md ├── .gitignore ├── web ├── next-env.d.ts ├── public │ ├── favicon.ico │ ├── images │ │ ├── logo.png │ │ └── logo-white.png │ └── vercel.svg ├── postcss.config.js ├── styles │ └── globals.css ├── server.js ├── pages │ ├── _app.tsx │ ├── api │ │ └── analyze.ts │ ├── theme.tsx │ ├── train.tsx │ └── index.tsx ├── tailwind.config.js ├── util │ └── helpers.ts ├── .gitignore ├── tsconfig.json ├── components │ ├── DeviceSelector.tsx │ ├── layout │ │ ├── theme.tsx │ │ ├── navigation.tsx │ │ └── footer.tsx │ └── Video.tsx ├── package.json ├── README.md └── .eslint.js ├── roshambo.model ├── .devcontainer ├── Dockerfile └── devcontainer.json ├── LICENSE ├── .vscode └── launch.json └── .github └── workflows ├── web-deploy.yml └── aml-training.yml /ai/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # roshambo -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | azureml_outputs 2 | cert/ 3 | *.zip -------------------------------------------------------------------------------- /ai/.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__/ 2 | .venv/ 3 | data/ 4 | mlruns/ 5 | outputs/ 6 | -------------------------------------------------------------------------------- /ai/cloud/request.json: -------------------------------------------------------------------------------- 1 | { "image": "https://aiadvocate.z5.web.core.windows.net/rock.png" } -------------------------------------------------------------------------------- /web/next-env.d.ts: -------------------------------------------------------------------------------- 1 | /// 2 | /// 3 | -------------------------------------------------------------------------------- /web/public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiadvocates/roshambo/HEAD/web/public/favicon.ico -------------------------------------------------------------------------------- /web/public/images/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiadvocates/roshambo/HEAD/web/public/images/logo.png -------------------------------------------------------------------------------- /web/public/images/logo-white.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiadvocates/roshambo/HEAD/web/public/images/logo-white.png -------------------------------------------------------------------------------- /web/postcss.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | plugins: { 3 | tailwindcss: {}, 4 | autoprefixer: {}, 5 | }, 6 | } 7 | -------------------------------------------------------------------------------- /web/styles/globals.css: -------------------------------------------------------------------------------- 1 | @tailwind base; 2 | @tailwind components; 3 | 4 | #video { 5 | @apply border-gray-900; 6 | } 7 | 8 | @tailwind utilities; -------------------------------------------------------------------------------- /ai/.amlignore: -------------------------------------------------------------------------------- 1 | __pycache__/ 2 | .venv/ 3 | cloud/ 4 | data/ 5 | mlruns/ 6 | notebooks/ 7 | outputs/ 8 | .gitignore 9 | requirements.txt 10 | score.py 11 | -------------------------------------------------------------------------------- /ai/cloud/deploy/endpoint.yml: -------------------------------------------------------------------------------- 1 | $schema: https://azuremlschemas.azureedge.net/latest/managedOnlineEndpoint.schema.json 2 | name: roshambo-endpoint 3 | auth_mode: key -------------------------------------------------------------------------------- /web/server.js: -------------------------------------------------------------------------------- 1 | const cli = require("next/dist/cli/next-start"); 2 | console.log(`using port ${process.env.PORT || 3000}`); 3 | cli.nextStart(["-p", process.env.PORT || 3000]); 4 | -------------------------------------------------------------------------------- /web/pages/_app.tsx: -------------------------------------------------------------------------------- 1 | import "../styles/globals.css"; 2 | import type { AppProps } from "next/app"; 3 | 4 | function MyApp({ Component, pageProps }: AppProps) { 5 | return ; 6 | } 7 | 8 | export default MyApp; 9 | -------------------------------------------------------------------------------- /ai/requirements.txt: -------------------------------------------------------------------------------- 1 | --extra-index-url https://download.pytorch.org/whl/cu116 2 | torch 3 | torchvision 4 | torchaudio 5 | mlflow 6 | jsonargparse[signatures] 7 | pytorch-lightning[extra] 8 | ipykernel 9 | onnxruntime-gpu 10 | inference-schema -------------------------------------------------------------------------------- /web/tailwind.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | purge: [ 3 | './pages/**/*.tsx', 4 | './components/**/*.tsx' 5 | ], 6 | darkMode: false, // or 'media' or 'class' 7 | theme: { 8 | extend: {}, 9 | }, 10 | variants: { 11 | extend: {}, 12 | }, 13 | plugins: [], 14 | } 15 | -------------------------------------------------------------------------------- /ai/cloud/environment/environment_deploy.yaml: -------------------------------------------------------------------------------- 1 | $schema: https://azuremlschemas.azureedge.net/latest/environment.schema.json 2 | name: pytorch-lightning-gpu-deploy 3 | version: 4 4 | tags: 5 | framework: onnxruntime-gpu 6 | contact: Seth Juarez 7 | ort_version: 1.12.1 8 | build: 9 | path: deploy -------------------------------------------------------------------------------- /web/util/helpers.ts: -------------------------------------------------------------------------------- 1 | 2 | 3 | export const getMediaDevices = async (): Promise => { 4 | if(navigator.mediaDevices) { 5 | const items = await navigator.mediaDevices.enumerateDevices() 6 | return items.filter(device => device.kind === 'videoinput') 7 | } else { 8 | return [] 9 | } 10 | } -------------------------------------------------------------------------------- /ai/cloud/environment/environment.yaml: -------------------------------------------------------------------------------- 1 | $schema: https://azuremlschemas.azureedge.net/latest/environment.schema.json 2 | name: pytorch-lightning 3 | version: 9 4 | tags: 5 | framework: PyTorch Lightning 6 | contact: Seth Juarez 7 | pl_version: 1.7.2 8 | pt_version: 1.12.1 9 | build: 10 | path: train -------------------------------------------------------------------------------- /roshambo.model: -------------------------------------------------------------------------------- 1 | { 2 | "title": "GPT3 Chat", 3 | "runtime": "https://ghrt-app-prod-3pqjhgdpctklbiec.proudbush-c7568808.westus.azurecontainerapps.io/", 4 | "api": "chat", 5 | "type": "multiturn", 6 | "ui": { 7 | "panel" :{ 8 | "text": "your question / comment", 9 | "label": "response" 10 | } 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /.devcontainer/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM mcr.microsoft.com/azureml/openmpi4.1.0-cuda11.1-cudnn8-ubuntu18.04:20220815.v1 2 | RUN python -m pip install --upgrade pip 3 | RUN pip install \ 4 | --extra-index-url https://download.pytorch.org/whl/cu116 \ 5 | azureml-defaults \ 6 | torch \ 7 | torchvision \ 8 | pandas \ 9 | inference-schema \ 10 | onnxruntime-gpu 11 | -------------------------------------------------------------------------------- /ai/cloud/environment/deploy/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM mcr.microsoft.com/azureml/openmpi4.1.0-cuda11.1-cudnn8-ubuntu18.04:20220815.v1 2 | RUN python -m pip install --upgrade pip 3 | RUN pip install \ 4 | --extra-index-url https://download.pytorch.org/whl/cu116 \ 5 | azureml-defaults \ 6 | torch \ 7 | torchvision \ 8 | pandas \ 9 | inference-schema \ 10 | onnxruntime-gpu 11 | -------------------------------------------------------------------------------- /ai/cloud/deploy/deployment.yml: -------------------------------------------------------------------------------- 1 | $schema: https://azuremlschemas.azureedge.net/latest/managedOnlineDeployment.schema.json 2 | name: blue 3 | endpoint_name: REPLACE 4 | app_insights_enabled: true 5 | model: azureml:roshambo-model@latest 6 | code_configuration: 7 | code: ./ 8 | scoring_script: score.py 9 | environment: azureml:pytorch-lightning-gpu-deploy@latest 10 | instance_type: Standard_NC6s_v3 11 | instance_count: 1 -------------------------------------------------------------------------------- /ai/cloud/environment/train/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM mcr.microsoft.com/azureml/openmpi4.1.0-cuda11.1-cudnn8-ubuntu18.04:20220815.v1 2 | RUN python -m pip install --upgrade pip 3 | RUN pip install \ 4 | --extra-index-url https://download.pytorch.org/whl/cu116 \ 5 | torch \ 6 | torchvision \ 7 | torchaudio \ 8 | mlflow \ 9 | azureml-mlflow \ 10 | jsonargparse[signatures] \ 11 | pytorch-lightning[extra] \ 12 | ipykernel \ 13 | onnxruntime-gpu \ 14 | inference-schema 15 | -------------------------------------------------------------------------------- /ai/cloud/job.yml: -------------------------------------------------------------------------------- 1 | $schema: https://azuremlschemas.azureedge.net/latest/commandJob.schema.json 2 | experiment_name: roshambo 3 | display_name: roshambo 4 | code: ../ 5 | command: >- 6 | python trainer.py fit 7 | --config config.yml 8 | --trainer.default_root_dir ./outputs 9 | --data.data_dir ${{inputs.training_data}} 10 | inputs: 11 | training_data: 12 | type: uri_folder 13 | path: azureml:rpsn:1 14 | mode: ro_mount 15 | environment: azureml:pytorch-lightning:9 16 | compute: azureml:gandalf -------------------------------------------------------------------------------- /ai/cloud/dataset.yml: -------------------------------------------------------------------------------- 1 | $schema: https://azuremlschemas.azureedge.net/latest/data.schema.json 2 | 3 | # Supported paths include: 4 | # local: ./ 5 | # blob: https://.blob.core.windows.net// 6 | # ADLS gen2: abfss://@.dfs.core.windows.net// 7 | # Datastore: azureml://datastores//paths/ 8 | type: uri_folder 9 | name: rpsn 10 | version: 1 11 | description: Rock, Paper, Scissors, None Image dataset 12 | path: azureml://datastores/experiments/paths/roshambo -------------------------------------------------------------------------------- /web/.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | /.pnp 6 | .pnp.js 7 | 8 | # testing 9 | /coverage 10 | 11 | # next.js 12 | /.next/ 13 | /out/ 14 | 15 | # production 16 | /build 17 | 18 | # misc 19 | .DS_Store 20 | *.pem 21 | 22 | # debug 23 | npm-debug.log* 24 | yarn-debug.log* 25 | yarn-error.log* 26 | 27 | # local env files 28 | .env.local 29 | .env.development.local 30 | .env.test.local 31 | .env.production.local 32 | 33 | # vercel 34 | .vercel 35 | 36 | # env 37 | .env -------------------------------------------------------------------------------- /ai/cloud/deploy/deploy.yml: -------------------------------------------------------------------------------- 1 | $schema: https://azuremlschemas.azureedge.net/latest/managedOnlineDeployment.schema.json 2 | name: roshambo-endpoint 3 | type: online 4 | auth_mode: key 5 | traffic: 6 | blue: 100 7 | 8 | deployments: 9 | #blue deployment 10 | - name: blue 11 | app_insights_enabled: true 12 | model: azureml:roshambo:1 13 | code_configuration: 14 | code: 15 | local_path: ../ 16 | scoring_script: score.py 17 | environment: azureml:pytorch-lightning-gpu-deploy:3 18 | instance_type: Standard_NC6s_v3 19 | scale_settings: 20 | scale_type: Manual 21 | instance_count: 1 22 | min_instances: 1 23 | max_instances: 1 -------------------------------------------------------------------------------- /web/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "es5", 4 | "lib": [ 5 | "dom", 6 | "dom.iterable", 7 | "esnext" 8 | ], 9 | "allowJs": true, 10 | "skipLibCheck": true, 11 | "strict": true, 12 | "forceConsistentCasingInFileNames": true, 13 | "noEmit": true, 14 | "esModuleInterop": true, 15 | "module": "esnext", 16 | "moduleResolution": "node", 17 | "resolveJsonModule": true, 18 | "isolatedModules": true, 19 | "jsx": "preserve", 20 | "baseUrl": ".", 21 | "paths": { 22 | "~/*": ["./*"] 23 | } 24 | }, 25 | "include": [ 26 | "next-env.d.ts", 27 | "**/*.ts", 28 | "**/*.tsx" 29 | ], 30 | "exclude": [ 31 | "node_modules" 32 | ] 33 | } 34 | -------------------------------------------------------------------------------- /web/components/DeviceSelector.tsx: -------------------------------------------------------------------------------- 1 | import { getMediaDevices } from "~/util/helpers" 2 | import React, { useState, useEffect, useRef } from 'react'; 3 | 4 | interface DeviceProps { 5 | onSelect(id: any): any 6 | } 7 | 8 | export const DeviceSelector = ({onSelect}: DeviceProps) => { 9 | const [devices, setDevices] = useState([]) 10 | const selectEl = useRef(null) 11 | 12 | const selectDevice = () => selectEl.current && onSelect(selectEl.current?.value); 13 | 14 | useEffect(() => { 15 | (async () => { 16 | const d = await getMediaDevices() 17 | setDevices(d) 18 | })() 19 | }) 20 | 21 | return ( 22 | 25 | ) 26 | } 27 | 28 | export default DeviceSelector -------------------------------------------------------------------------------- /ai/config.yml: -------------------------------------------------------------------------------- 1 | # pytorch_lightning==1.7.2 2 | seed_everything: true 3 | trainer: 4 | enable_checkpointing: true 5 | callbacks: 6 | - class_path: pytorch_lightning.callbacks.EarlyStopping 7 | init_args: 8 | monitor: val_loss 9 | min_delta: 0.0001 10 | patience: 5 11 | verbose: True 12 | mode: min 13 | - class_path: pytorch_lightning.callbacks.ModelCheckpoint 14 | init_args: 15 | monitor: val_acc 16 | dirpath: outputs/checkpoints 17 | filename: roshambo-{epoch:02d}-{val_acc:.2f} 18 | save_top_k: 3 19 | mode: min 20 | - class_path: pytorch_lightning.callbacks.LearningRateMonitor 21 | init_args: 22 | logging_interval: 'step' 23 | default_root_dir: outputs 24 | auto_select_gpus: true 25 | gpus: 1 26 | max_epochs: 100 27 | logger: true 28 | model: 29 | classes: 4 30 | lr: .01 31 | data: 32 | data_dir: data/images 33 | batch_size: 32 34 | train_split: 0.8 35 | -------------------------------------------------------------------------------- /web/pages/api/analyze.ts: -------------------------------------------------------------------------------- 1 | import axios from "axios"; 2 | import { RestError } from "@azure/core-http"; 3 | import type { NextApiRequest, NextApiResponse } from "next"; 4 | 5 | const inferenceApi = process.env["INFERENCE_ENDPOINT"] || ''; 6 | const inferencekey = process.env["INFERENCE_KEY"] || ''; 7 | 8 | export default async (req: NextApiRequest, res: NextApiResponse) => { 9 | try { 10 | const response = await axios.post(inferenceApi, req.body, 11 | { 12 | headers: { 13 | "Content-Type": "application/json", 14 | Authorization: `Bearer ${inferencekey}`, 15 | }, 16 | } 17 | ); 18 | res.status(200).json(response.data); 19 | } catch (error) { 20 | const e = error; 21 | res.status(200).json({ 22 | error: { 23 | code: e.code, 24 | details: e.details, 25 | message: e.message, 26 | name: e.name, 27 | statusCode: e.statusCode, 28 | }, 29 | }); 30 | } 31 | }; 32 | -------------------------------------------------------------------------------- /web/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "web", 3 | "version": "0.1.0", 4 | "private": true, 5 | "scripts": { 6 | "dev": "next dev", 7 | "build": "next build", 8 | "start": "node server.js", 9 | "export": "next build && next export" 10 | }, 11 | "dependencies": { 12 | "@azure/core-http": "^2.2.5", 13 | "axios": "^0.27.2", 14 | "next": "10.2.3", 15 | "react": "17.0.2", 16 | "react-dom": "17.0.2" 17 | }, 18 | "devDependencies": { 19 | "@types/react": "^17.0.11", 20 | "@typescript-eslint/eslint-plugin": "^4.26.1", 21 | "@typescript-eslint/parser": "^4.26.1", 22 | "autoprefixer": "^10.2.6", 23 | "eslint": "^7.28.0", 24 | "eslint-config-prettier": "^8.3.0", 25 | "eslint-plugin-jsx-a11y": "^6.4.1", 26 | "eslint-plugin-prettier": "^3.4.0", 27 | "eslint-plugin-react": "^7.24.0", 28 | "eslint-plugin-react-hooks": "^4.2.0", 29 | "eslint-plugin-tailwind": "^0.2.1", 30 | "postcss": "^8.3.2", 31 | "tailwindcss": "^2.1.4", 32 | "typescript": "^4.3.2" 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /ai/trainer.py: -------------------------------------------------------------------------------- 1 | import mlflow 2 | import warnings 3 | from pathlib import Path 4 | from model import RoshamboModel 5 | from data import RoshamboDataModule 6 | from pytorch_lightning.utilities.cli import LightningCLI 7 | 8 | class RoshamboCLI(LightningCLI): 9 | def after_fit(self): 10 | print('Saving model!') 11 | 12 | best_model = self.trainer.checkpoint_callback.best_model_path 13 | model = RoshamboModel.load_from_checkpoint(best_model) 14 | model_dir = Path(self.trainer.default_root_dir).resolve() / 'model' 15 | model_params, file_size = model.save(model_dir, self.datamodule.classes) 16 | mlflow.log_params({ 17 | "param_size": "{:,}".format(model_params), 18 | "model_size": "{:,}".format(file_size), 19 | "model_type": model.model_type 20 | }) 21 | 22 | if __name__ == '__main__': 23 | warnings.filterwarnings("ignore") 24 | mlflow.pytorch.autolog() 25 | with mlflow.start_run() as run: 26 | cli = RoshamboCLI(RoshamboModel, RoshamboDataModule) 27 | -------------------------------------------------------------------------------- /web/public/vercel.svg: -------------------------------------------------------------------------------- 1 | 3 | 4 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 aiadvocates 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /web/components/layout/theme.tsx: -------------------------------------------------------------------------------- 1 | import React, { ReactNode } from 'react' 2 | import Head from 'next/head' 3 | import Navigation from './navigation' 4 | import Footer from './footer' 5 | import { useRouter } from 'next/router' 6 | 7 | type Props = { 8 | title: string 9 | children?: ReactNode 10 | 11 | } 12 | 13 | const Theme = ({ title, children }: Props) => { 14 | 15 | return ( 16 | <> 17 | 18 | { title } 19 | 20 | 21 | 22 | 23 | 24 |
25 | 26 |
27 | {children} 28 |
29 |
30 |
31 |
32 |
33 | 34 | ) 35 | } 36 | 37 | export default Theme -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "0.2.0", 3 | "configurations": [ 4 | { 5 | "name": "Next.js: debug server-side", 6 | "type": "node-terminal", 7 | "cwd": "${workspaceFolder}/web", 8 | "request": "launch", 9 | "command": "yarn dev" 10 | }, 11 | { 12 | "name": "Next.js: debug client-side", 13 | "type": "chrome", 14 | "cwd": "${workspaceFolder}/web", 15 | "request": "launch", 16 | "url": "http://localhost:3000" 17 | }, 18 | { 19 | "name": "Next.js: debug full stack", 20 | "type": "node-terminal", 21 | "request": "launch", 22 | "command": "yarn dev", 23 | "cwd": "${workspaceFolder}/src", 24 | "serverReadyAction": { 25 | "pattern": "started server on .+, url: (https?://.+)", 26 | "uriFormat": "%s", 27 | "action": "debugWithEdge" 28 | } 29 | }, 30 | { 31 | "name": "PL Trainer", 32 | "type": "python", 33 | "request": "launch", 34 | "cwd": "${workspaceFolder}/ai", 35 | "program": "trainer.py", 36 | "args": ["fit", "--config", "config.yml"], 37 | "console": "integratedTerminal" 38 | }, 39 | { 40 | "name": "Run teh filez", 41 | "type": "python", 42 | "request": "launch", 43 | "program": "${file}", 44 | "cwd": "${fileDirname}", 45 | "console": "integratedTerminal" 46 | } 47 | ] 48 | } 49 | -------------------------------------------------------------------------------- /.github/workflows/web-deploy.yml: -------------------------------------------------------------------------------- 1 | name: roshambo.ai deployment 2 | 3 | on: 4 | workflow_dispatch: 5 | 6 | push: 7 | branches: 8 | - main 9 | paths: 10 | - 'web/**' 11 | pull_request: 12 | types: [closed] 13 | branches: 14 | - main 15 | paths: 16 | - 'web/**' 17 | 18 | env: 19 | NODE_VERSION: '16.x' 20 | APP_NAME: rocksie 21 | APP_RG: roshambo 22 | ARTIFACT: webapp-deploy.zip 23 | 24 | jobs: 25 | build-and-deploy: 26 | name: Build and Deploy 27 | runs-on: ubuntu-latest 28 | steps: 29 | - name: 'Checkout' 30 | uses: actions/checkout@master 31 | 32 | - name: 'Login via Azure CLI' 33 | uses: azure/login@v1 34 | with: 35 | creds: ${{ secrets.AZURE_WEBAPP_TOKEN }} 36 | 37 | - name: Use Node.js ${{ env.NODE_VERSION }} 38 | uses: actions/setup-node@v1 39 | with: 40 | node-version: ${{ env.NODE_VERSION }} 41 | 42 | - name: 'create package' 43 | run: | 44 | # Build site and create artifact 45 | cd web 46 | yarn install 47 | yarn build 48 | # verify contents 49 | tree -av --dirsfirst -L 2 50 | # zip folder 51 | zip -r -q ../${{ env.ARTIFACT }} . -x .git 52 | # verify zip file 53 | zip -sf ../${{ env.ARTIFACT }} 54 | cd .. 55 | 56 | - name: 'Deploy to Azure WebApp' 57 | run: | 58 | # deploy using azure CLI 59 | az webapp deploy --resource-group ${{ env.APP_RG }} --name ${{ env.APP_NAME }} --src-path ${{ env.ARTIFACT }} --type zip -------------------------------------------------------------------------------- /web/README.md: -------------------------------------------------------------------------------- 1 | This is a [Next.js](https://nextjs.org/) project bootstrapped with [`create-next-app`](https://github.com/vercel/next.js/tree/canary/packages/create-next-app). 2 | 3 | ## Getting Started 4 | 5 | First, run the development server: 6 | 7 | ```bash 8 | npm run dev 9 | # or 10 | yarn dev 11 | ``` 12 | 13 | Open [http://localhost:3000](http://localhost:3000) with your browser to see the result. 14 | 15 | You can start editing the page by modifying `pages/index.js`. The page auto-updates as you edit the file. 16 | 17 | [API routes](https://nextjs.org/docs/api-routes/introduction) can be accessed on [http://localhost:3000/api/hello](http://localhost:3000/api/hello). This endpoint can be edited in `pages/api/hello.js`. 18 | 19 | The `pages/api` directory is mapped to `/api/*`. Files in this directory are treated as [API routes](https://nextjs.org/docs/api-routes/introduction) instead of React pages. 20 | 21 | ## Learn More 22 | 23 | To learn more about Next.js, take a look at the following resources: 24 | 25 | - [Next.js Documentation](https://nextjs.org/docs) - learn about Next.js features and API. 26 | - [Learn Next.js](https://nextjs.org/learn) - an interactive Next.js tutorial. 27 | 28 | You can check out [the Next.js GitHub repository](https://github.com/vercel/next.js/) - your feedback and contributions are welcome! 29 | 30 | ## Deploy on Vercel 31 | 32 | The easiest way to deploy your Next.js app is to use the [Vercel Platform](https://vercel.com/new?utm_medium=default-template&filter=next.js&utm_source=create-next-app&utm_campaign=create-next-app-readme) from the creators of Next.js. 33 | 34 | Check out our [Next.js deployment documentation](https://nextjs.org/docs/deployment) for more details. 35 | -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | // For format details, see https://aka.ms/devcontainer.json. For config options, see the README at: 2 | // https://github.com/microsoft/vscode-dev-containers/tree/v0.194.0/containers/python-3 3 | { 4 | "name": "Roshambo AI Container", 5 | "build": { 6 | "dockerfile": "Dockerfile", 7 | "context": "." 8 | }, 9 | 10 | // Set *default* container specific settings.json values on container create. 11 | "settings": { 12 | "python.pythonPath": "/usr/local/bin/python", 13 | "python.languageServer": "Pylance", 14 | "python.linting.enabled": true, 15 | "python.linting.pylintEnabled": true, 16 | "python.formatting.autopep8Path": "/usr/local/py-utils/bin/autopep8", 17 | "python.formatting.blackPath": "/usr/local/py-utils/bin/black", 18 | "python.formatting.yapfPath": "/usr/local/py-utils/bin/yapf", 19 | "python.linting.banditPath": "/usr/local/py-utils/bin/bandit", 20 | "python.linting.flake8Path": "/usr/local/py-utils/bin/flake8", 21 | "python.linting.mypyPath": "/usr/local/py-utils/bin/mypy", 22 | "python.linting.pycodestylePath": "/usr/local/py-utils/bin/pycodestyle", 23 | "python.linting.pydocstylePath": "/usr/local/py-utils/bin/pydocstyle", 24 | "python.linting.pylintPath": "/usr/local/py-utils/bin/pylint" 25 | }, 26 | 27 | // Add the IDs of extensions you want installed when the container is created. 28 | "extensions": [ 29 | "ms-python.python", 30 | "ms-python.vscode-pylance" 31 | ], 32 | "runArgs": [ 33 | "--gpus=all" 34 | ] 35 | 36 | // Use 'forwardPorts' to make a list of ports inside the container available locally. 37 | // "forwardPorts": [], 38 | 39 | // Use 'postCreateCommand' to run commands after the container is created. 40 | // "postCreateCommand": "pip3 install --user -r requirements.txt", 41 | 42 | // Comment out connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root. 43 | //"remoteUser": "vscode" 44 | } 45 | -------------------------------------------------------------------------------- /ai/data.py: -------------------------------------------------------------------------------- 1 | import math 2 | import torch 3 | from typing import Optional 4 | import pytorch_lightning as pl 5 | from torchvision import datasets, transforms 6 | from torch.utils.data import DataLoader, random_split 7 | 8 | class RoshamboDataModule(pl.LightningDataModule): 9 | def __init__(self, data_dir: str = "path/to/dir", 10 | batch_size: int = 32, 11 | train_split: float = .8): 12 | super().__init__() 13 | self.data_dir = data_dir 14 | self.batch_size = batch_size 15 | self.train_split = train_split 16 | 17 | def setup(self, stage: Optional[str] = None): 18 | self.transform = transforms.Compose([ 19 | transforms.Resize(256), 20 | transforms.CenterCrop(224), 21 | transforms.ToTensor(), 22 | transforms.Normalize(mean=[0.485, 0.456, 0.406], 23 | std=[0.229, 0.224, 0.225]) 24 | ]) 25 | 26 | #torch.zeros(size, 3, dtype=torch.float).scatter_(1, y.view(-1, 1), 1) 27 | 28 | self.target_transform = transforms.Compose([ 29 | transforms.Lambda(lambda y: torch.zeros(len(self.classes), dtype=torch.float).scatter_(0, torch.tensor(y), value=1)) 30 | ]) 31 | 32 | self.raw_data = datasets.ImageFolder(self.data_dir, 33 | transform=self.transform, 34 | target_transform=self.target_transform) 35 | self.classes = self.raw_data.classes 36 | 37 | sz = len(self.raw_data) 38 | train_sz = math.floor(self.train_split * sz) 39 | val_sz = sz - train_sz 40 | 41 | self.train_dataset, self.val_dataset = random_split(self.raw_data, 42 | [train_sz, val_sz]) 43 | 44 | def train_dataloader(self): 45 | return DataLoader(self.train_dataset, batch_size=self.batch_size) 46 | 47 | def val_dataloader(self): 48 | return DataLoader(self.val_dataset, batch_size=self.batch_size) 49 | -------------------------------------------------------------------------------- /web/.eslint.js: -------------------------------------------------------------------------------- 1 | // .eslintrc.js 2 | module.exports = { 3 | root: true, 4 | env: { 5 | node: true, 6 | es6: true, 7 | }, 8 | parserOptions: { ecmaVersion: 8 }, // to enable features such as async/await 9 | extends: ["eslint:recommended", "plugin:tailwind/recommended"], 10 | overrides: [ 11 | // This configuration will apply only to TypeScript files 12 | { 13 | files: ["**/*.ts", "**/*.tsx"], 14 | parser: "@typescript-eslint/parser", 15 | settings: { react: { version: "detect" } }, 16 | env: { 17 | browser: true, 18 | node: true, 19 | es6: true, 20 | }, 21 | extends: [ 22 | "plugin:tailwind/recommended", 23 | "eslint:recommended", 24 | "plugin:@typescript-eslint/recommended", // TypeScript rules 25 | "plugin:react/recommended", // React rules 26 | "plugin:react-hooks/recommended", // React hooks rules 27 | "plugin:jsx-a11y/recommended", // Accessibility rules 28 | "plugin:prettier/recommended", // Prettier plugin 29 | ], 30 | rules: { 31 | "prettier/prettier": ["error", {}, { usePrettierrc: true }], // Includes .prettierrc.js rules 32 | 33 | // We will use TypeScript's types for component props instead 34 | "react/prop-types": "off", 35 | 36 | // No need to import React when using Next.js 37 | "react/react-in-jsx-scope": "off", 38 | "react/no-unescaped-entities": "off", 39 | 40 | // This rule is not compatible with Next.js's components 41 | "jsx-a11y/anchor-is-valid": "off", 42 | "jsx-a11y/no-static-element-interactions": "off", 43 | "jsx-a11y/click-events-have-key-events": "off", 44 | 45 | // Why would you want unused vars? 46 | "@typescript-eslint/no-unused-vars": ["error"], 47 | 48 | "@typescript-eslint/no-non-null-assertion": "off", 49 | "@typescript-eslint/no-non-null-asserted-optional-chain": "off", 50 | 51 | // I suggest this setting for requiring return types on functions only where useful 52 | "@typescript-eslint/explicit-function-return-type": [ 53 | "warn", 54 | { 55 | allowExpressions: true, 56 | allowConciseArrowFunctionExpressionsStartingWithVoid: true, 57 | }, 58 | ], 59 | }, 60 | }, 61 | ], 62 | }; 63 | -------------------------------------------------------------------------------- /web/components/Video.tsx: -------------------------------------------------------------------------------- 1 | import { 2 | useEffect, 3 | useImperativeHandle, 4 | useRef, 5 | forwardRef, 6 | } from "react"; 7 | 8 | interface Props { 9 | device: string; 10 | onVideoSet(settings: MediaTrackSettings): void; 11 | className?: string; 12 | height?: number; 13 | width?: number; 14 | } 15 | 16 | export interface VideoRef { 17 | getFrame(): string | null; 18 | } 19 | 20 | export const Video = forwardRef( 21 | ({ device, onVideoSet, className, height, width }: Props, ref) => { 22 | 23 | const videoRef = useRef(null); 24 | 25 | useImperativeHandle(ref, () => ({ 26 | getFrame, 27 | })); 28 | 29 | const getFrame = () => { 30 | if (videoRef.current) { 31 | const m = 256; 32 | const canvas = document.createElement("canvas"); 33 | canvas.width = Math.floor( 34 | videoRef.current.videoWidth * (m / videoRef.current.videoHeight) 35 | ); 36 | canvas.height = Math.floor( 37 | videoRef.current.videoHeight * (m / videoRef.current.videoHeight) 38 | ); 39 | const ctx = canvas.getContext("2d"); 40 | if (ctx) { 41 | ctx.drawImage(videoRef.current, 0, 0, canvas.width, canvas.height); 42 | return canvas.toDataURL(); 43 | } 44 | } 45 | return null; 46 | }; 47 | 48 | useEffect(() => { 49 | if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) { 50 | (async () => { 51 | try { 52 | let stream = await navigator.mediaDevices.getUserMedia({ 53 | video: { 54 | deviceId: { 55 | exact: device, 56 | }, 57 | }, 58 | }); 59 | const tracks = stream.getVideoTracks(); 60 | if (videoRef.current && tracks.length >= 1) { 61 | onVideoSet(tracks[0].getSettings()); 62 | videoRef.current.srcObject = stream; 63 | videoRef.current.play(); 64 | } 65 | } catch (err) { 66 | console.log(err); 67 | } 68 | })(); 69 | } 70 | }, [device]); 71 | 72 | return ( 73 | 80 | ); 81 | } 82 | ); 83 | -------------------------------------------------------------------------------- /web/pages/theme.tsx: -------------------------------------------------------------------------------- 1 | import Head from "next/head"; 2 | import Navigation from "~/components/layout/navigation" 3 | import Footer from "~/components/layout/footer" 4 | 5 | export default function Theme() { 6 | return ( 7 |
8 | 9 |
10 |
11 |
12 | image placeholder 17 |
18 |
19 |

20 | So if on advanced addition absolute received replying 21 |

22 |

23 | When, while the lovely valley teems with vapour around me, and the 24 | meridian sun strikes the waiting be females upper surface of the 25 | impenetrable foliage of my trees 26 |

27 |
28 |
29 |
30 |
31 |
32 |
33 | image placeholder 38 |
39 |
40 |

41 | So if on advanced addition absolute received replying 42 |

43 |

44 | When, while the lovely valley teems with vapour around me, and the 45 | meridian sun strikes the waiting be females upper surface of the 46 | impenetrable foliage of my trees 47 |

48 |
49 |
50 |
51 |
52 |
53 | ); 54 | } 55 | -------------------------------------------------------------------------------- /ai/model.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import torch 4 | import mlflow 5 | import torch.nn as nn 6 | from typing import List 7 | from pathlib import Path 8 | import torch.optim as optim 9 | from datetime import datetime 10 | import pytorch_lightning as pl 11 | from torchvision import models 12 | import torch.nn.functional as F 13 | 14 | class RoshamboModel(pl.LightningModule): 15 | def __init__(self, classes: int, lr: float): 16 | super().__init__() 17 | 18 | self.save_hyperparameters() 19 | self.classes = classes 20 | self.lr = lr 21 | self.model_type = "resnet34" 22 | self.xfer = models.resnet34(pretrained=True) 23 | self.fc1 = nn.Linear(1000, classes) 24 | 25 | self.param_size = 0 26 | 27 | def forward(self, x): 28 | x = F.relu(self.xfer(x)) 29 | return F.softmax(self.fc1(x), dim=1) 30 | 31 | def __compute(self, batch): 32 | x, y = batch 33 | y_hat = self(x) 34 | 35 | # loss 36 | loss = F.binary_cross_entropy(y_hat, y) 37 | 38 | # accuracy 39 | _, preds = torch.max(y_hat, 1) 40 | _, truth = torch.max(y, 1) 41 | accuracy = torch.sum((preds == truth).float()).item() / len(x) 42 | 43 | return loss, accuracy 44 | 45 | def training_step(self, batch, batch_idx): 46 | loss, acc = self.__compute(batch) 47 | self.log('loss', loss, prog_bar=True) 48 | self.log('acc', acc, prog_bar=True) 49 | return loss 50 | 51 | def validation_step(self, batch, batch_idx): 52 | loss, acc = self.__compute(batch) 53 | self.log('val_loss', loss, prog_bar=True) 54 | self.log('val_acc', acc, prog_bar=True) 55 | 56 | def configure_optimizers(self): 57 | optimizer = optim.SGD(self.parameters(), lr=self.lr) 58 | scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.1) 59 | return [optimizer], [scheduler] 60 | 61 | def save(self, model_dir: Path, classes: List[str]): 62 | now = datetime.now() 63 | 64 | if not model_dir.exists(): 65 | os.makedirs(str(model_dir)) 66 | 67 | self.to_onnx(model_dir / 'model.onnx', 68 | torch.rand((1,3,224,224)), 69 | export_params=True, 70 | input_names=['image'], 71 | output_names=['prediction']) 72 | 73 | file_size = os.path.getsize(str(model_dir / 'model.onnx')) 74 | 75 | param_size = sum(p.numel() for p in self.parameters()) 76 | 77 | with open(model_dir / 'meta.json', 'w') as f: 78 | f.write(json.dumps({ 79 | 'classes': classes, 80 | 'model': self.model_type, 81 | 'params': param_size, 82 | 'size': file_size, 83 | 'timestamp': datetime.now().isoformat() 84 | }, indent=4)) 85 | 86 | return (param_size, file_size) 87 | -------------------------------------------------------------------------------- /web/components/layout/navigation.tsx: -------------------------------------------------------------------------------- 1 | export const Navigation = () => { 2 | return ( 3 |
4 |
5 | 75 |
76 |
77 | ); 78 | }; 79 | 80 | export default Navigation; 81 | -------------------------------------------------------------------------------- /ai/notebooks/refactor.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 5, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import os\n", 10 | "import torch\n", 11 | "from torch import nn\n", 12 | "import torch.nn.functional as F\n", 13 | "from torchvision import transforms\n", 14 | "from torchvision.datasets import MNIST\n", 15 | "from torch.utils.data import DataLoader, random_split\n", 16 | "import pytorch_lightning as pl\n", 17 | "data_dir = '../data'" 18 | ] 19 | }, 20 | { 21 | "cell_type": "code", 22 | "execution_count": 6, 23 | "metadata": {}, 24 | "outputs": [], 25 | "source": [ 26 | "class RoshamboResnet18(pl.LightningModule):\n", 27 | " def __init__(self):\n", 28 | " super().__init__()\n", 29 | " self.model = nn.Sequential(\n", 30 | " models.resnet18(pretrained=True),\n", 31 | " nn.ReLU(),\n", 32 | " nn.Linear(1000, 256),\n", 33 | " nn.ReLU(),\n", 34 | " nn.Linear(256, len(classes)),\n", 35 | " nn.Softmax(dim=-1))\n", 36 | " \n", 37 | " def training_step(self, batch, batch_idx):\n", 38 | " loss = self.shared_step(batch)\n", 39 | " return loss\n", 40 | "\n", 41 | " def validation_step(self, batch, batch_idx):\n", 42 | " loss = self.shared_step(batch)\n", 43 | " self.log('val_loss', loss)\n", 44 | "\n", 45 | " def shared_step(self, batch):\n", 46 | " X, Y = batch\n", 47 | " # execute model\n", 48 | " outputs = model(X)\n", 49 | " # find predictions\n", 50 | " _, preds = torch.max(outputs, 1)\n", 51 | " # loss function\n", 52 | " return nn.functional.cross_entropy(outputs, Y)\n", 53 | "\n", 54 | " def configure_optimizers(self):\n", 55 | " return torch.optim.SGD(self.parameters(), lr=0.01)" 56 | ] 57 | }, 58 | { 59 | "cell_type": "code", 60 | "execution_count": 3, 61 | "metadata": {}, 62 | "outputs": [], 63 | "source": [ 64 | "item = train_dataset[0][0]" 65 | ] 66 | }, 67 | { 68 | "cell_type": "code", 69 | "execution_count": null, 70 | "metadata": {}, 71 | "outputs": [], 72 | "source": [] 73 | } 74 | ], 75 | "metadata": { 76 | "kernelspec": { 77 | "display_name": "Python 3.9.12 ('.venv': venv)", 78 | "language": "python", 79 | "name": "python3" 80 | }, 81 | "language_info": { 82 | "codemirror_mode": { 83 | "name": "ipython", 84 | "version": 3 85 | }, 86 | "file_extension": ".py", 87 | "mimetype": "text/x-python", 88 | "name": "python", 89 | "nbconvert_exporter": "python", 90 | "pygments_lexer": "ipython3", 91 | "version": "3.9.12" 92 | }, 93 | "orig_nbformat": 2, 94 | "vscode": { 95 | "interpreter": { 96 | "hash": "85e382c6ec5dbec0d1089dbe27b79b8ee0e6f818b34358f492bf1fae3529bc66" 97 | } 98 | } 99 | }, 100 | "nbformat": 4, 101 | "nbformat_minor": 2 102 | } 103 | -------------------------------------------------------------------------------- /web/pages/train.tsx: -------------------------------------------------------------------------------- 1 | import Head from "next/head"; 2 | import { useState, useEffect, useRef } from "react"; 3 | import Image from "next/image"; 4 | import { Video, VideoRef } from "~/components/Video"; 5 | import DeviceSelector from "~/components/DeviceSelector"; 6 | import Theme from "~/components/layout/theme"; 7 | 8 | interface TrainingImage { 9 | key: number 10 | image: string; 11 | label: string; 12 | } 13 | 14 | export default function Train() { 15 | const [videoId, setVideoId] = useState(""); 16 | const [settings, setSettings] = useState({}); 17 | const [currentLabel, setCurrentLabel] = useState("none"); 18 | const videoRef = useRef(null); 19 | const [images, setImages] = useState([]); 20 | 21 | const gestures = ["rock", "paper", "scissors", "none"]; 22 | 23 | const addImage = () => { 24 | if (videoRef.current) { 25 | const frame = videoRef.current?.getFrame(); 26 | if (frame) { 27 | setImages([...images, { key: images.length, image: frame, label: currentLabel }]); 28 | } 29 | } 30 | }; 31 | 32 | const removeImage = (key: number) => { 33 | setImages(images.splice(key, 1)); 34 | } 35 | 36 | return ( 37 | 38 |
39 |
40 | Welcome to roshambo.ai - training 41 |
42 |
43 |
44 |
45 |
46 |
47 | 48 |
49 |
50 |
59 |
60 | {gestures && 61 | gestures.map((g) => ( 62 | 63 | 73 | 74 | ))} 75 |
76 |
77 | 83 |
84 |
85 |
86 | {images && 87 | images.map((g) => ( 88 |
removeImage(g.key)}> 89 | {g.label} 90 | Key: {g.key} Label:{g.label} 91 |
92 | ))} 93 |
94 |
95 |
96 |
97 | ); 98 | } 99 | -------------------------------------------------------------------------------- /web/pages/index.tsx: -------------------------------------------------------------------------------- 1 | import Head from "next/head"; 2 | import { useState, useEffect, useRef } from "react"; 3 | import Image from "next/image"; 4 | import { Video, VideoRef } from "~/components/Video"; 5 | import DeviceSelector from "~/components/DeviceSelector"; 6 | import Theme from "~/components/layout/theme"; 7 | 8 | interface Scores { 9 | none: number; 10 | paper: number; 11 | rock: number; 12 | scissors: number; 13 | } 14 | 15 | interface Prediction { 16 | time: number; 17 | prediction: string; 18 | scores: Scores; 19 | timestamp: string; 20 | model_update: string; 21 | message: string; 22 | } 23 | 24 | export default function Home() { 25 | const [videoId, setVideoId] = useState(""); 26 | const [settings, setSettings] = useState({}); 27 | const [prediction, setPrediction] = useState(null); 28 | const image = useRef(null); 29 | const videoRef = useRef (null); 30 | 31 | const setFrame = (frame: string) => { 32 | (async () => { 33 | const options: RequestInit = { 34 | method: "POST", 35 | body: JSON.stringify({ image: frame }), 36 | headers: { 37 | "Content-Type": "application/json", 38 | }, 39 | }; 40 | 41 | image.current && (image.current.src = frame); 42 | const response = await fetch("/api/analyze", options); 43 | const pred: Prediction = await response.json(); 44 | setPrediction(pred); 45 | })(); 46 | }; 47 | 48 | const handleSubmit = () => { 49 | if (videoRef.current) { 50 | const frame = videoRef.current?.getFrame(); 51 | if (frame) { 52 | setFrame(frame); 53 | } 54 | } 55 | }; 56 | 57 | return ( 58 | 59 |
60 |
61 | Welcome to roshambo.ai 62 |
63 |
64 |
65 |
66 |
67 |
68 | 74 |
75 |
76 | 77 |
78 |
79 |
88 |
89 |
90 |
91 | current 96 |
97 |
98 | {prediction?.prediction} 99 |
100 |
    101 |
  • 102 | none: {((prediction?.scores.none ?? 0) * 100).toFixed(2)}% 103 |
  • 104 |
  • 105 | paper: {((prediction?.scores.paper ?? 0) * 100).toFixed(2)}% 106 |
  • 107 |
  • 108 | rock: {((prediction?.scores.rock ?? 0) * 100).toFixed(2)}% 109 |
  • 110 |
  • 111 | scissors:{" "} 112 | {((prediction?.scores.scissors ?? 0) * 100).toFixed(2)}% 113 |
  • 114 |
115 |
116 |
117 |
118 |
119 | 125 |
126 |
127 | ); 128 | } 129 | -------------------------------------------------------------------------------- /web/components/layout/footer.tsx: -------------------------------------------------------------------------------- 1 | export const Footer = () => { 2 | return ( 3 | 63 | ); 64 | }; 65 | 66 | export default Footer; 67 | -------------------------------------------------------------------------------- /.github/workflows/aml-training.yml: -------------------------------------------------------------------------------- 1 | name: roshambo.ai training 2 | 3 | on: 4 | workflow_dispatch: 5 | 6 | push: 7 | branches: 8 | - main 9 | paths: 10 | - 'ai/**.py' 11 | - 'ai/**.yml' 12 | - 'ai/**.yaml' 13 | 14 | env: 15 | APP_NAME: roshambo 16 | 17 | jobs: 18 | experiment: 19 | runs-on: ubuntu-latest 20 | 21 | steps: 22 | - name: Checkout 23 | uses: actions/checkout@v3 24 | 25 | - name: Setup Python 26 | uses: actions/setup-python@v4 27 | with: 28 | python-version: 3.9 29 | 30 | - name: add az ml extension 31 | run: | 32 | az extension add -n ml -y 33 | 34 | - name: azure login 35 | uses: azure/login@v1 36 | with: 37 | creds: ${{secrets.AZURE_TOKEN}} 38 | 39 | - name: set aml defaults 40 | run: | 41 | az config set defaults.workspace=${{secrets.AML_WORKSPACE}} 42 | az config set defaults.group=${{secrets.AML_RG}} 43 | 44 | - name: Run Job 45 | run: | 46 | az ml job create --file ./ai/cloud/job.yml --name ${APP_NAME}_${GITHUB_RUN_ID} --stream 47 | 48 | register: 49 | needs: [experiment] 50 | runs-on: ubuntu-latest 51 | steps: 52 | - name: Checkout 53 | uses: actions/checkout@v3 54 | 55 | - name: Setup Python 56 | uses: actions/setup-python@v4 57 | with: 58 | python-version: 3.9 59 | 60 | - name: add az ml extension 61 | run: | 62 | az extension add -n ml -y 63 | 64 | - name: azure login 65 | uses: azure/login@v1 66 | with: 67 | creds: ${{secrets.AZURE_TOKEN}} 68 | 69 | - name: set aml defaults 70 | run: | 71 | az config set defaults.workspace=${{secrets.AML_WORKSPACE}} 72 | az config set defaults.group=${{secrets.AML_RG}} 73 | - name: Register Model 74 | id: register 75 | run: | 76 | LAST_MODEL_VER=0 77 | MODEL_EXISTS=$(az ml model list -o tsv --query "[?name=='${APP_NAME}-model'][name]" | wc -l) 78 | if [[ MODEL_EXISTS -eq 1 ]]; then 79 | LAST_MODEL_VER=$(az ml model list -n ${APP_NAME}-model --query "[].version.to_number(@) | max(@)" | tr -d "\r") 80 | fi 81 | # create model from run output 82 | az ml model create --name roshambo-model --version $((LAST_MODEL_VER+1)) --path azureml://jobs/${APP_NAME}_${GITHUB_RUN_ID}/outputs/artifacts/outputs/model/ 83 | 84 | endpoint: 85 | runs-on: ubuntu-latest 86 | steps: 87 | - name: Checkout 88 | uses: actions/checkout@v3 89 | 90 | - name: Setup Python 91 | uses: actions/setup-python@v4 92 | with: 93 | python-version: 3.9 94 | 95 | - name: add az ml extension 96 | run: | 97 | az extension add -n ml -y 98 | 99 | - name: azure login 100 | uses: azure/login@v1 101 | with: 102 | creds: ${{secrets.AZURE_TOKEN}} 103 | 104 | - name: set aml defaults 105 | run: | 106 | az config set defaults.workspace=${{secrets.AML_WORKSPACE}} 107 | az config set defaults.group=${{secrets.AML_RG}} 108 | - id: deployments 109 | name: check endpoint 110 | run: | 111 | ENDPOINT_EXISTS=$(az ml online-endpoint list -o tsv --query "[?name=='${APP_NAME}-app'][name]" | wc -l) 112 | if [[ ENDPOINT_EXISTS -ne 1 ]]; then 113 | az ml online-endpoint create -n ${APP_NAME}-app -f ./ai/cloud/deploy/endpoint.yml 114 | else 115 | echo "endpoint exists" 116 | fi 117 | 118 | deployment: 119 | needs: [endpoint, register] 120 | runs-on: ubuntu-latest 121 | steps: 122 | - name: Checkout 123 | uses: actions/checkout@v3 124 | 125 | - name: Setup Python 126 | uses: actions/setup-python@v4 127 | with: 128 | python-version: 3.7 129 | 130 | - name: add az ml extension 131 | run: | 132 | az extension add -n ml -y 133 | 134 | - name: azure login 135 | uses: azure/login@v1 136 | with: 137 | creds: ${{secrets.AZURE_TOKEN}} 138 | 139 | - name: set aml defaults 140 | run: | 141 | az config set defaults.workspace=${{secrets.AML_WORKSPACE}} 142 | az config set defaults.group=${{secrets.AML_RG}} 143 | - name: Add Deployment 144 | run: | 145 | 146 | # move scoring script to deployment folder 147 | cp ./ai/score.py ./ai/cloud/deploy/score.py 148 | 149 | # create deployment 150 | az ml online-deployment create --name ${APP_NAME}-${GITHUB_RUN_ID} --endpoint ${APP_NAME}-app -f ./ai/cloud/deploy/deployment.yml 151 | 152 | # if PROD does not exist, mark this as prod and exit 153 | PROD_DEPLOYMENT=$(az ml online-endpoint show -n ${APP_NAME}-app -o tsv --query "tags.PROD_DEPLOYMENT") 154 | if [[ -z "$PROD_DEPLOYMENT" ]]; then 155 | # tag the current deployment as prod and set traffic to 100% 156 | az ml online-endpoint update --name ${APP_NAME}-app --traffic "${APP_NAME}-${GITHUB_RUN_ID}=100" --set tags.PROD_DEPLOYMENT=${APP_NAME}_${GITHUB_RUN_ID} 157 | else 158 | # modify traffic to siphon 10% to new deployment 159 | az ml online-endpoint update -n ${APP_NAME}-app --traffic "$PROD_DEPLOYMENT=90 ${APP_NAME}-${GITHUB_RUN_ID}=10" 160 | fi 161 | -------------------------------------------------------------------------------- /ai/score.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import json 4 | import time 5 | import base64 6 | import logging 7 | import requests 8 | import datetime 9 | import numpy as np 10 | from PIL import Image 11 | from io import BytesIO 12 | from pathlib import Path 13 | import onnxruntime as rt 14 | from torchvision import transforms 15 | from inference_schema.schema_decorators import input_schema, output_schema 16 | from inference_schema.parameter_types.standard_py_parameter_type import StandardPythonParameterType 17 | 18 | session, transform, classes, input_name, model_stamp = None, None, None, None, None 19 | logger = logging.getLogger() 20 | 21 | def init(): 22 | global session, transform, classes, input_name, logger, model_stamp 23 | logger.info('Attempting to load model artifacts') 24 | 25 | if 'AZUREML_MODEL_DIR' in os.environ: 26 | logger.info('using AZUREML_MODEL_DIR') 27 | root_dir = Path(os.environ['AZUREML_MODEL_DIR']).resolve() / 'model' 28 | else: 29 | logger.info('using local') 30 | root_dir = Path('outputs/model').absolute().resolve() 31 | 32 | logger.info(f'using model path {root_dir}') 33 | meta_file = root_dir / 'meta.json' 34 | model_file = root_dir / 'model.onnx' 35 | logger.info(f'metadata path: {meta_file}') 36 | logger.info(f'model path: {model_file}') 37 | 38 | logger.info('loading metadata') 39 | with open(meta_file, 'r') as f: 40 | model_meta = json.load(f) 41 | logger.info(f'metadata load complete: {model_meta}') 42 | 43 | classes = model_meta['classes'] 44 | model_stamp = model_meta['timestamp'] 45 | 46 | logger.info('loading model') 47 | session = rt.InferenceSession(str(model_file), 48 | providers=['CUDAExecutionProvider']) 49 | input_name = session.get_inputs()[0].name 50 | logger.info(f'model load complete (entry: {input_name})') 51 | transform = transforms.Compose([ 52 | transforms.Resize(256), 53 | transforms.CenterCrop(224), 54 | transforms.ToTensor(), 55 | transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) 56 | ]) 57 | 58 | logger.info(f'transforms initialized') 59 | logger.info(f'init complete!') 60 | 61 | @input_schema('image', StandardPythonParameterType("https://aiadvocate.z5.web.core.windows.net/scissors.png")) 62 | @output_schema(StandardPythonParameterType({ 63 | 'time': StandardPythonParameterType(0.060392), 64 | 'prediction': StandardPythonParameterType("paper"), 65 | 'scores': StandardPythonParameterType({ 66 | 'none': StandardPythonParameterType(0.20599432), 67 | 'paper': StandardPythonParameterType(0.31392053), 68 | 'rock': StandardPythonParameterType(0.2621823), 69 | 'scissors': StandardPythonParameterType(0.21790285) 70 | }), 71 | 'timestamp': StandardPythonParameterType(datetime.datetime.now().isoformat()), 72 | 'model_update': StandardPythonParameterType(datetime.datetime.now().isoformat()), 73 | 'message': StandardPythonParameterType("Success!") 74 | })) 75 | def run(image): 76 | global session, transform, classes, input_name, logger, model_stamp 77 | 78 | print('starting inference clock') 79 | prev_time = time.time() 80 | 81 | # process data 82 | try: 83 | if image.startswith('http'): 84 | print(f'loading web image {image}') 85 | response = requests.get(image) 86 | img = Image.open(BytesIO(response.content)) 87 | elif image.startswith('data:image'): 88 | data = image.split(",") 89 | print(f'loading base64 image {data[0]}') 90 | b64image = base64.b64decode(data[1]) 91 | img = Image.open(BytesIO(b64image)) 92 | else: 93 | print(f'loading base64 image') 94 | b64image = base64.b64decode(image) 95 | img = Image.open(BytesIO(b64image)) 96 | 97 | v = transform(img.convert('RGB')) 98 | 99 | # predict with model 100 | print('pre-prediction') 101 | pred_onnx = session.run(None, {input_name: v.unsqueeze(0).numpy()})[0][0] 102 | print('prediction complete') 103 | 104 | predictions = {} 105 | for i in range(len(classes)): 106 | predictions[classes[i]] = float(pred_onnx[i]) 107 | 108 | print('preparing payload') 109 | payload = { 110 | 'time': float(0), 111 | 'prediction': classes[int(np.argmax(pred_onnx))], 112 | 'scores': predictions, 113 | 'timestamp': datetime.datetime.now().isoformat(), 114 | 'model_update': model_stamp, 115 | 'message': 'Success!' 116 | } 117 | 118 | except Exception as e: 119 | predictions = {} 120 | for i in range(len(classes)): 121 | predictions[classes[i]] = float(0) 122 | 123 | print('preparing payload') 124 | payload = { 125 | 'time': float(0), 126 | 'prediction': "none", 127 | 'scores': predictions, 128 | 'timestamp': datetime.datetime.now().isoformat(), 129 | 'model_update': model_stamp, 130 | 'message': f'{e}' 131 | } 132 | 133 | current_time = time.time() 134 | print('stopping clock') 135 | inference_time = datetime.timedelta(seconds=current_time - prev_time) 136 | payload['time'] = float(inference_time.total_seconds()) 137 | 138 | print(f'payload: {json.dumps(payload)}') 139 | print('inference complete') 140 | 141 | return payload 142 | 143 | 144 | if __name__ == '__main__': 145 | init() 146 | def inf(uri, truth): 147 | print(f'---->Inference with {truth}:') 148 | o = run(uri) 149 | print(json.dumps(o, indent=4)) 150 | print(f'---->End Inference with [{truth}] => [{o["prediction"]}]') 151 | 152 | inf('https://aiadvocate.z5.web.core.windows.net/rock.png', 'rock') 153 | inf('https://aiadvocate.z5.web.core.windows.net/paper.png', 'paper') 154 | inf('https://aiadvocate.z5.web.core.windows.net/scissors.png', 'scissors') 155 | inf('bad_uri', 'Bad Uri') 156 | 157 | with open('testimage.txt', 'r') as f: 158 | img = f.read() 159 | inf(img, 'rock') 160 | 161 | 162 | with open('fullimage.txt', 'r') as f: 163 | img = f.read() 164 | inf(img, 'none') 165 | --------------------------------------------------------------------------------