├── .gitignore ├── .gitmodules ├── python ├── sqlflow_playground │ ├── __init__.py │ ├── k8s.py │ ├── server.py │ └── playground_server_design.md ├── _version.py ├── gen_cert.sh └── setup.py ├── Vagrantfile ├── figures ├── arch.dot ├── arch_vm.dot ├── arch.svg └── arch_vm.svg ├── release.sh ├── index.html ├── README.md ├── play.sh ├── provision.bash ├── dev.md └── start.bash /.gitignore: -------------------------------------------------------------------------------- 1 | *.log 2 | *~ 3 | .vagrant 4 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "sqlflow"] 2 | path = sqlflow 3 | url = https://github.com/sql-machine-learning/sqlflow 4 | -------------------------------------------------------------------------------- /python/sqlflow_playground/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 The SQLFlow Authors. All rights reserved. 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | # Unless required by applicable law or agreed to in writing, software 9 | # distributed under the License is distributed on an "AS IS" BASIS, 10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | # See the License for the specific language governing permissions and 12 | # limitations under the License. 13 | -------------------------------------------------------------------------------- /python/_version.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 The SQLFlow Authors. All rights reserved. 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | # Unless required by applicable law or agreed to in writing, software 9 | # distributed under the License is distributed on an "AS IS" BASIS, 10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | # See the License for the specific language governing permissions and 12 | # limitations under the License. 13 | 14 | VERSION = (0, 1, 0, 'dev') 15 | 16 | __version__ = '.'.join(map(str, VERSION)) -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | Vagrant.configure("2") do |config| 5 | config.vm.box = "ubuntu/bionic64" 6 | config.vm.provision "shell", path: "provision.bash" 7 | 8 | # Enlarge disk size from default '10G' to '20G' 9 | # This need the vagrant-disksize plugin which is installed in play.sh. 10 | config.disksize.size = '20GB' 11 | 12 | # Don't forward 22. Even if we do so, the exposed port only binds 13 | # to 127.0.0.1, but not 0.0.0.0. Other ports binds to all IPs. 14 | config.vm.network "forwarded_port", guest: 3306, host: 3306, 15 | auto_correct: true 16 | config.vm.network "forwarded_port", guest: 50051, host: 50051, 17 | auto_correct: true 18 | # Jupyter Notebook 19 | config.vm.network "forwarded_port", guest: 8888, host: 8888, 20 | auto_correct: true 21 | # minikube dashboard 22 | config.vm.network "forwarded_port", guest: 9000, host: 9000, 23 | auto_correct: true 24 | # Argo dashboard 25 | config.vm.network "forwarded_port", guest: 9001, host: 9001, 26 | auto_correct: true 27 | 28 | config.vm.provider "virtualbox" do |v| 29 | v.memory = 8192 30 | v.cpus = 4 31 | end 32 | 33 | # Bind the host directory ./ into the VM. 34 | config.vm.synced_folder "./", "/home/vagrant/desktop" 35 | end 36 | -------------------------------------------------------------------------------- /figures/arch.dot: -------------------------------------------------------------------------------- 1 | digraph G { 2 | node [shape=box]; 3 | 4 | User1 [shape=oval, label="Lily"]; 5 | User2 [shape=oval, label="Bob"]; 6 | User3 [shape=oval, label="Eva"]; 7 | 8 | {rank = same; User1; User2; User3} 9 | 10 | Browser1 [label="Web browser"]; 11 | Browser2 [label="Web browser"]; 12 | 13 | {rank = same; Browser1, Browser2, Client} 14 | 15 | Jupyter [label="Jupyter Notebook server +\n SQLFlow magic command"]; 16 | SQLFlow [label="SQLFlow server"]; 17 | Argo [label="Tekton on Kubernetes\n(each workflow step is a container)"]; 18 | AI [label="AI engine\n(Alibaba PAI, KubeFlow+Kuberntes, etc)"]; 19 | DBMS [label="database system\n(Hive, MySQL, MaxCompute, etc)"]; 20 | 21 | User1 -> Browser1; 22 | User2 -> Browser2; 23 | Browser1 -> Jupyter [label="SQL/Flow program"]; 24 | Browser2 -> Jupyter; 25 | 26 | Jupyter -> SQLFlow [label="SQL/Flow program"]; 27 | SQLFlow -> Argo [label="Argo workflow"]; 28 | Argo -> DBMS [label="submit SQL statement"]; 29 | Argo -> AI [label="submit AI job"]; 30 | Argo -> DBMS [label="verify data schema"]; 31 | 32 | Client [label="sqlflow command-line client"]; 33 | 34 | User3 -> Client; 35 | Client -> SQLFlow [label="SQL/Flow program"]; 36 | } 37 | -------------------------------------------------------------------------------- /release.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright 2020 The SQLFlow Authors. All rights reserved. 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | 17 | echo "Stoping the vm ..." 18 | vagrant halt 19 | echo "Done." 20 | 21 | echo "Finding playground vm ..." 22 | vm=$(VBoxManage list vms | grep "playground_default" | head -1) 23 | if [[ ! "$vm" =~ playground_default* ]]; then 24 | echo "No palyground virtual machine found." 25 | exit 1 26 | fi 27 | vm=$(echo $vm | awk -F"\"" '{print $2}') 28 | echo "Found $vm ." 29 | 30 | echo "Remove shared folder ..." 31 | VBoxManage sharedfolder remove "$vm" --name home_vagrant_desktop 32 | VBoxManage sharedfolder remove "$vm" --name vagrant 33 | echo "Done." 34 | 35 | echo "Rebind serial port file and disable it because it does not work on Windows" 36 | VBoxManage modifyvm "$vm" --uartmode1 file /tmp/playground.log 37 | VBoxManage modifyvm "$vm" --uart1 off 38 | echo "Done." 39 | 40 | echo "Exporting vm ..." 41 | VBoxManage export "$vm" -o SQLFlowPlayground.ova 42 | echo "Done." 43 | -------------------------------------------------------------------------------- /figures/arch_vm.dot: -------------------------------------------------------------------------------- 1 | digraph G { 2 | node [shape=box]; 3 | 4 | User1 [shape=oval, label="Lily"]; 5 | User2 [shape=oval, label="Bob"]; 6 | User3 [shape=oval, label="Eva"]; 7 | 8 | {rank = same; User1; User2; User3} 9 | 10 | Browser1 [label="Web browser"]; 11 | Browser2 [label="Web browser"]; 12 | 13 | {rank = same; Browser1, Browser2, Client} 14 | 15 | subgraph cluster_vm { 16 | label="VM" 17 | subgraph cluster_container { 18 | label="sqlflow/sqlflow:latest"; 19 | Jupyter [label="Jupyter Notebook server +\n SQLFlow magic command"]; 20 | SQLFlow [label="SQLFlow server"]; 21 | DBMS [label="MySQL"]; 22 | } 23 | subgraph cluster_minikube { 24 | label="minikube"; 25 | Argo [label="Argo"]; 26 | AI [label="AI engine:\ncontainer-local run"]; 27 | } 28 | } 29 | 30 | User1 -> Browser1; 31 | User2 -> Browser2; 32 | Browser1 -> Jupyter [label="SQL/Flow program"]; 33 | Browser2 -> Jupyter; 34 | 35 | Jupyter -> SQLFlow [label="SQL/Flow program"]; 36 | SQLFlow -> Argo [label="Argo workflow"]; 37 | Argo -> DBMS [label="submit SQL statement"]; 38 | Argo -> AI [label="submit AI job"]; 39 | Argo -> DBMS [label="verify data schema"]; 40 | 41 | Client [label="sqlflow command-line client"]; 42 | 43 | User3 -> Client; 44 | Client -> SQLFlow [label="SQL/Flow program"]; 45 | } 46 | -------------------------------------------------------------------------------- /index.html: -------------------------------------------------------------------------------- 1 | 30 | 31 | 32 | 33 | 34 | 35 | 38 | 39 | 40 |

Redirecting to https://github.com/sql-machine-learning/playground

41 | 42 | 43 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Release SQLFlow Desktop Distribution as a VM Image 2 | 3 | This is an experimental work to check deploying the whole 4 | [SQLFlow](https://sqlflow.org/sqlflow) service mesh on Windows, Linux, 5 | or macOS desktop. 6 | 7 | The general architecture of SQLFlow is as the following: 8 | 9 | ![](figures/arch.svg) 10 | 11 | In this deployment, we have Jupyter Notebook server, SQLFlow server, 12 | and MySQL running in a container executing the 13 | `sqlflow/sqlflow:latest` image. Argo runs on a minikube cluster 14 | running on the VM. The deployment is shown in the folllowing figure: 15 | 16 | ![](figures/arch_vm.svg) 17 | 18 | I chose this deployment plan for reasons: 19 | 20 | 1. We don't have a well-written local workflow engine, and at the 21 | right moment, we need to focus on the Kubernetes-native engine. 22 | So, we use minikube and install Argo on minikube. 23 | 24 | 1. We can install minikube directly on users' desktop computers 25 | running Windows, Linux, macOS. However, writing a shell script to 26 | do that requires us to consider many edge cases. To have a clear 27 | deployment environment, I introduced VM. 28 | 29 | 1. To make the VM manageable in a programmatic way, I used Vagrant. 30 | Please be aware that Vagrant is the only software users need to 31 | install to use SQLFlow on their desktop computer. And Vagrant 32 | provides official support for Windows, Linux, and macOS. 33 | 34 | 1. We can run the SQLFlow server container (`sqlflow/sqlflow:latest`) 35 | on minikube as well. But that would add challenge to export ports. 36 | Running the container directly in the VM but out of minikube, we 37 | 38 | 1. expoe the in-container port by adding `EXPOSE` statement in the 39 | Dockerfile, and 40 | 1. expose the docker port for accessing from outside of the VM by 41 | adding the following code snippet to the Vagrantfile. 42 | 43 | ```ruby 44 | config.vm.network "forwarded_port", guest: 3306, host: 3306 45 | config.vm.network "forwarded_port", guest: 50051, host: 50051 46 | config.vm.network "forwarded_port", guest: 8888, host: 8888 47 | ``` 48 | -------------------------------------------------------------------------------- /play.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright 2020 The SQLFlow Authors. All rights reserved. 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | set -e 17 | 18 | cat </dev/null; then 27 | cat <ca_cert.conf <${target}.conf </etc/apt/sources.list 13 | apt-get update 14 | 15 | echo "Installing Docker ..." 16 | # c.f. https://dockr.ly/3cExcay 17 | if which docker > /dev/null; then 18 | echo "Docker had been installed. Skip." 19 | else 20 | best_install_url=$(find_fastest_docker_url) 21 | docker_ce_mirror=$(find_fastest_docker_ce_mirror) 22 | echo "Using ${best_install_url}..." 23 | curl -sSL "${best_install_url}" | DOWNLOAD_URL=$docker_ce_mirror bash - 24 | best_docker_mirror=$(find_fastest_docker_registry) 25 | if [[ -n "${best_docker_mirror}" ]]; then 26 | mkdir -p /etc/docker 27 | cat <<-EOF >/etc/docker/daemon.json 28 | { 29 | "graph": "/mnt/docker-data", 30 | "storage-driver": "overlay", 31 | "registry-mirrors":[ "${best_docker_mirror}" ] 32 | } 33 | EOF 34 | fi 35 | usermod -aG docker vagrant 36 | fi 37 | echo "Done." 38 | 39 | echo "Install axel ..." 40 | if which axel > /dev/null; then 41 | echo "axel installed. Skip." 42 | else 43 | $VAGRANT_SHARED_FOLDER/sqlflow/scripts/travis/install_axel.sh 44 | fi 45 | 46 | echo "Export Kubernetes environment variables ..." 47 | # NOTE: According to https://stackoverflow.com/a/16619261/724872, 48 | # source is very necessary here. 49 | source $VAGRANT_SHARED_FOLDER/sqlflow/scripts/travis/export_k8s_vars.sh 50 | 51 | echo "Installing kubectl ..." 52 | if which kubectl > /dev/null; then 53 | echo "kubectl installed. Skip." 54 | else 55 | $VAGRANT_SHARED_FOLDER/sqlflow/scripts/travis/install_kubectl.sh 56 | fi 57 | echo "Done." 58 | 59 | echo "Installing minikube ..." 60 | if which minikube > /dev/null; then 61 | echo "minikube installed. Skip." 62 | else 63 | $VAGRANT_SHARED_FOLDER/sqlflow/scripts/travis/install_minikube.sh 64 | minikube config set WantUpdateNotification false 65 | fi 66 | echo "Done." 67 | 68 | echo "Copy files ..." 69 | # In non-develop mode, we want the user see the start.bash 70 | # immediately after she/he logs on the vm 71 | cp "$VAGRANT_SHARED_FOLDER/start.bash" "/root/" 72 | 73 | read -r -d '\t' files < 31 | | | 32 | Clients <--> Playground Server <--> Playground[SQLFlow Server, MySQL Server...] 33 | ``` 34 | 35 | ## Supported API 36 | 37 | Request URL path is composed by the prefix `/api/` and the API name, like: 38 | 39 | ```url 40 | https://playground.sqlflow.tech/api/heart_beat 41 | ``` 42 | This service always uses `HTTPS` and only accepts authorized clients 43 | by checking their certification file. So there is no dedicated API 44 | for user authentication. 45 | 46 | Currently supported API are: 47 | | name | method | params | description | 48 | | - | - | - | - | 49 | | create_db | POST | {"user_id": "id"} | create a DB for given user, json param | 50 | | heart_beat| GET | user_id=id | report a heart beat of given client | 51 | 52 | 53 | ## How to Use 54 | 55 | ### For Service Maintainer 56 | The maintainer should [provide the playground cluster](../dev.md), and 57 | bootup a `SQLFlow Playground Server`. The server should have privillege 58 | to access the `kubectl` command of the cluster. To install the server, 59 | maintainer can use below command: 60 | ```bash 61 | mkdir $HOME/workspace 62 | cd $HOME/workspace 63 | pip install sqlflow_playground 64 | mkdir key_store 65 | gen_cert.sh server 66 | sqlflow_playground --port=50052 \ 67 | --ca_crt=key_store/ca/ca.crt \ 68 | --server_key=key_store/server/server.key \ 69 | --server_crt=key_store/server/server.crt 70 | ``` 71 | In the above commands, we first installed the sqlflow playground package 72 | which carries the main cluster operation logic. Then, we use the key 73 | tool to generate a server certification file (Of course, it's not necessary 74 | if you have your own certification files) which enables us to provide 75 | `HTTPS` service. Finally, we start the `REST API` service at port 50052. 76 | 77 | Our playground service uses bi-directional validation. So, the maintainer 78 | needs to generate a certification file for a trusted user. Use below command and 79 | send the generated `.crt` and `.key` file together with the `ca.crt` to 80 | the user. 81 | 82 | ```bash 83 | gen_cert.sh some_client 84 | ``` 85 | 86 | ### For The User 87 | 88 | To use this service, the user should get authorized from the playground's maintainer. 89 | In detail, user should get `ca.crt`, `client.key` and the `client.crt` file from 90 | the maintainer and keep them in some very-safe place. Also, the user should ask 91 | the maintainer for the sqlflow server address and the sqlflow playground server 92 | address. Then, the user will install Jupyter Notebook and the SQLFlow plugin package 93 | and do some configuration. Finally, the user can experience SQLFlow in his Jupyter 94 | Notebook. 95 | 96 | ```bash 97 | pip3 install notebook sqlflow==0.15.0 98 | 99 | cat >$HOME/.sqlflow_playground.env </dev/null 2>&1 & 137 | fi 138 | wait_or_exit "Kubernetes Dashboard" "is_pod_ready kubernetes-dashboard k8s-app=kubernetes-dashboard" "yes" 139 | 140 | echo "Strat SQLFlow ..." 141 | sqlflow_alive=$(is_pod_ready "default" "app=sqlflow-server") 142 | if [[ "$sqlflow_alive" == "yes" ]]; then 143 | echo "Already in running." 144 | else 145 | kubectl apply -f $filebase/install-sqlflow.yaml 146 | fi 147 | wait_or_exit "SQLFlow" "is_pod_ready default app=sqlflow-server" "yes" 148 | 149 | # Kill port exposing if it already exist 150 | function stop_expose() { 151 | ps -elf | grep "kubectl port-forward" | grep "$1" | grep "$2" | awk '{print $4}' | xargs kill >/dev/null 2>&1 152 | } 153 | 154 | # Kubernetes port-forwarding 155 | # "$1" should be namespace 156 | # "$2" should be resource, e.g. service/argo-server 157 | # "$3" should be port mapping, e.g. 8000:80 158 | function expose() { 159 | stop_expose "$2" "$3" 160 | echo "Exposing port for $2 at $3 ..." 161 | nohup kubectl port-forward -n $1 --address='0.0.0.0' $2 $3 >>port-forward-log 2>&1 & 162 | } 163 | 164 | # (NOTE) after re-deploy sqlflow we have to re-expose the service ports. 165 | expose kubernetes-dashboard service/kubernetes-dashboard 9000:80 166 | expose argo service/argo-server 9001:2746 167 | expose default pod/sqlflow-server 8888:8888 168 | expose default pod/sqlflow-server 3306:3306 169 | expose default pod/sqlflow-server 50051:50051 170 | expose default pod/sqlflow-server 50055:50055 171 | 172 | # Get Jupyter Notebook's token, for single-user mode, we disabled the token checking 173 | # jupyter_addr=$(kubectl logs pod/sqlflow-server notebook | grep -o -E "http://127.0.0.1[^?]+\?token=.*" | head -1) 174 | mysql_addr="mysql://root:root@tcp($(kubectl get -o jsonpath='{.status.podIP}' pod/sqlflow-server))/?maxAllowedPacket=0" 175 | 176 | echo -e " 177 | \033[32m 178 | Congratulations, SQLFlow playground is up! 179 | 180 | Access Jupyter Notebook at: http://localhost:8888 181 | Access Kubernetes Dashboard at: http://localhost:9000 182 | Access Argo Dashboard at: http://localhost:9001 183 | Access SQLFlow with cli: ./sqlflow --data-source="\"$mysql_addr\"" 184 | Access SQLFlow Model Zoo at: localhost:50055 185 | 186 | Stop minikube with: minikube stop 187 | Stop vagrant vm with: vagrant halt 188 | 189 | [Dangerous] 190 | Destroy minikube with: minikube delete && rm -rf ~/.minikube 191 | Destroy vagrant vm with: vagrant destroy 192 | \033[0m 193 | " 194 | -------------------------------------------------------------------------------- /figures/arch.svg: -------------------------------------------------------------------------------- 1 | 2 | 4 | 6 | 7 | 9 | 10 | G 11 | 12 | 13 | 14 | User1 15 | 16 | Lily 17 | 18 | 19 | 20 | Browser1 21 | 22 | Web browser 23 | 24 | 25 | 26 | User1->Browser1 27 | 28 | 29 | 30 | 31 | 32 | User2 33 | 34 | Bob 35 | 36 | 37 | 38 | Browser2 39 | 40 | Web browser 41 | 42 | 43 | 44 | User2->Browser2 45 | 46 | 47 | 48 | 49 | 50 | User3 51 | 52 | Eva 53 | 54 | 55 | 56 | Client 57 | 58 | sqlflow command-line client 59 | 60 | 61 | 62 | User3->Client 63 | 64 | 65 | 66 | 67 | 68 | Jupyter 69 | 70 | Jupyter Notebook server + 71 | SQLFlow magic command 72 | 73 | 74 | 75 | Browser1->Jupyter 76 | 77 | 78 | SQL/Flow program 79 | 80 | 81 | 82 | Browser2->Jupyter 83 | 84 | 85 | 86 | 87 | 88 | SQLFlow 89 | 90 | SQLFlow server 91 | 92 | 93 | 94 | Client->SQLFlow 95 | 96 | 97 | SQL/Flow program 98 | 99 | 100 | 101 | Jupyter->SQLFlow 102 | 103 | 104 | SQL/Flow program 105 | 106 | 107 | 108 | Argo 109 | 110 | Tekton on Kubernetes 111 | (each workflow step is a container) 112 | 113 | 114 | 115 | SQLFlow->Argo 116 | 117 | 118 | Argo workflow 119 | 120 | 121 | 122 | AI 123 | 124 | AI engine 125 | (Alibaba PAI, KubeFlow+Kuberntes, etc) 126 | 127 | 128 | 129 | Argo->AI 130 | 131 | 132 | submit AI job 133 | 134 | 135 | 136 | DBMS 137 | 138 | database system 139 | (Hive, MySQL, MaxCompute, etc) 140 | 141 | 142 | 143 | Argo->DBMS 144 | 145 | 146 | submit SQL statement 147 | 148 | 149 | 150 | Argo->DBMS 151 | 152 | 153 | verify data schema 154 | 155 | 156 | 157 | -------------------------------------------------------------------------------- /figures/arch_vm.svg: -------------------------------------------------------------------------------- 1 | 2 | 4 | 6 | 7 | 9 | 10 | G 11 | 12 | 13 | cluster_vm 14 | 15 | VM 16 | 17 | 18 | cluster_container 19 | 20 | sqlflow/sqlflow:latest 21 | 22 | 23 | cluster_minikube 24 | 25 | minikube 26 | 27 | 28 | 29 | User1 30 | 31 | Lily 32 | 33 | 34 | 35 | Browser1 36 | 37 | Web browser 38 | 39 | 40 | 41 | User1->Browser1 42 | 43 | 44 | 45 | 46 | 47 | User2 48 | 49 | Bob 50 | 51 | 52 | 53 | Browser2 54 | 55 | Web browser 56 | 57 | 58 | 59 | User2->Browser2 60 | 61 | 62 | 63 | 64 | 65 | User3 66 | 67 | Eva 68 | 69 | 70 | 71 | Client 72 | 73 | sqlflow command-line client 74 | 75 | 76 | 77 | User3->Client 78 | 79 | 80 | 81 | 82 | 83 | Jupyter 84 | 85 | Jupyter Notebook server + 86 | SQLFlow magic command 87 | 88 | 89 | 90 | Browser1->Jupyter 91 | 92 | 93 | SQL/Flow program 94 | 95 | 96 | 97 | Browser2->Jupyter 98 | 99 | 100 | 101 | 102 | 103 | SQLFlow 104 | 105 | SQLFlow server 106 | 107 | 108 | 109 | Client->SQLFlow 110 | 111 | 112 | SQL/Flow program 113 | 114 | 115 | 116 | Jupyter->SQLFlow 117 | 118 | 119 | SQL/Flow program 120 | 121 | 122 | 123 | Argo 124 | 125 | Argo 126 | 127 | 128 | 129 | SQLFlow->Argo 130 | 131 | 132 | Argo workflow 133 | 134 | 135 | 136 | DBMS 137 | 138 | MySQL 139 | 140 | 141 | 142 | Argo->DBMS 143 | 144 | 145 | submit SQL statement 146 | 147 | 148 | 149 | Argo->DBMS 150 | 151 | 152 | verify data schema 153 | 154 | 155 | 156 | AI 157 | 158 | AI engine: 159 | container-local run 160 | 161 | 162 | 163 | Argo->AI 164 | 165 | 166 | submit AI job 167 | 168 | 169 | 170 | --------------------------------------------------------------------------------