├── image ├── aws-architect.png ├── aws-developer.png └── machine-learning-certificate.png ├── mobile_apps ├── mobile_bert │ ├── .DS_Store │ ├── assets │ │ ├── .DS_Store │ │ ├── images │ │ │ ├── icon.png │ │ │ ├── tfjs.jpg │ │ │ ├── splash.png │ │ │ ├── robot-dev.png │ │ │ └── robot-prod.png │ │ └── fonts │ │ │ └── SpaceMono-Regular.ttf │ ├── babel.config.js │ ├── .expo │ │ ├── settings.json │ │ └── packager-info.json │ ├── README.md │ ├── components │ │ ├── StyledText.js │ │ ├── __tests__ │ │ │ ├── StyledText-test.js │ │ │ └── __snapshots__ │ │ │ │ └── StyledText-test.js.snap │ │ └── TabBarIcon.js │ ├── constants │ │ ├── Layout.js │ │ └── Colors.js │ ├── navigation │ │ ├── useLinking.js │ │ └── BottomTabNavigator.js │ ├── app.json │ ├── package.json │ ├── App.js │ └── screens │ │ ├── LinksScreen.js │ │ ├── HomeScreen.js │ │ └── QnaScreen.js ├── objects_detection │ ├── assets │ │ ├── .DS_Store │ │ ├── images │ │ │ ├── icon.png │ │ │ ├── splash.png │ │ │ ├── tfjs.jpg │ │ │ ├── robot-dev.png │ │ │ └── robot-prod.png │ │ └── fonts │ │ │ └── SpaceMono-Regular.ttf │ ├── babel.config.js │ ├── components │ │ ├── StyledText.js │ │ ├── __tests__ │ │ │ ├── StyledText-test.js │ │ │ └── __snapshots__ │ │ │ │ └── StyledText-test.js.snap │ │ └── TabBarIcon.js │ ├── constants │ │ ├── Layout.js │ │ └── Colors.js │ ├── README.md │ ├── navigation │ │ ├── useLinking.js │ │ └── BottomTabNavigator.js │ ├── app.json │ ├── package.json │ ├── App.js │ └── screens │ │ ├── LinksScreen.js │ │ ├── HomeScreen.js │ │ └── CocoSsdScreen.js ├── image_classification │ ├── assets │ │ ├── .DS_Store │ │ ├── images │ │ │ ├── icon.png │ │ │ ├── tfjs.jpg │ │ │ ├── splash.png │ │ │ ├── robot-dev.png │ │ │ └── robot-prod.png │ │ └── fonts │ │ │ └── SpaceMono-Regular.ttf │ ├── babel.config.js │ ├── constants │ │ ├── Layout.js │ │ └── Colors.js │ ├── README.md │ ├── navigation │ │ ├── useLinking.js │ │ └── BottomTabNavigator.js │ ├── app.json │ ├── package.json │ ├── App.js │ └── screens │ │ ├── LinksScreen.js │ │ ├── HomeScreen.js │ │ └── ImageClassificationScreen.js └── expo_cli_docker │ ├── entrypoint.sh │ ├── Dockerfile │ ├── LICENSE.md │ └── README.md ├── certificates ├── Yuefeng_certificate_11_28_2017.pdf ├── AWS_Certified_Developer_Associate_Certificate.pdf └── AWS_Certified_Solutions_Architect_Associate_certificate.pdf ├── Random_Forest └── interview_attendance │ ├── images │ └── CRISP-DM_Process_Diagram.png │ └── README.md ├── deep_learning ├── finance │ └── fraud_detection │ │ └── README.md ├── unsupervised_learning │ └── dec_keras_clustering │ │ └── README.md ├── nlp │ ├── spam-classification-with-word2vec-keras │ │ └── README.md │ └── spark-nlp-docker │ │ ├── README.md │ │ └── Dockerfile ├── multi_input_transfer_learning │ └── README.md └── Multi_Layer_Perceptron │ └── breast-cancer-wisconsin │ ├── README.md │ ├── breast-cancer-wisconsin.names.txt │ ├── breast-cancer-wisconsin.csv │ └── breast-cancer-wisconsin.data ├── README.md └── recommender └── netflix-demo.ipynb /image/aws-architect.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yzzhang/machine-learning/HEAD/image/aws-architect.png -------------------------------------------------------------------------------- /image/aws-developer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yzzhang/machine-learning/HEAD/image/aws-developer.png -------------------------------------------------------------------------------- /mobile_apps/mobile_bert/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yzzhang/machine-learning/HEAD/mobile_apps/mobile_bert/.DS_Store -------------------------------------------------------------------------------- /image/machine-learning-certificate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yzzhang/machine-learning/HEAD/image/machine-learning-certificate.png -------------------------------------------------------------------------------- /mobile_apps/mobile_bert/assets/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yzzhang/machine-learning/HEAD/mobile_apps/mobile_bert/assets/.DS_Store -------------------------------------------------------------------------------- /mobile_apps/mobile_bert/assets/images/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yzzhang/machine-learning/HEAD/mobile_apps/mobile_bert/assets/images/icon.png -------------------------------------------------------------------------------- /mobile_apps/mobile_bert/assets/images/tfjs.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yzzhang/machine-learning/HEAD/mobile_apps/mobile_bert/assets/images/tfjs.jpg -------------------------------------------------------------------------------- /mobile_apps/objects_detection/assets/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yzzhang/machine-learning/HEAD/mobile_apps/objects_detection/assets/.DS_Store -------------------------------------------------------------------------------- /certificates/Yuefeng_certificate_11_28_2017.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yzzhang/machine-learning/HEAD/certificates/Yuefeng_certificate_11_28_2017.pdf -------------------------------------------------------------------------------- /mobile_apps/image_classification/assets/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yzzhang/machine-learning/HEAD/mobile_apps/image_classification/assets/.DS_Store -------------------------------------------------------------------------------- /mobile_apps/mobile_bert/assets/images/splash.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yzzhang/machine-learning/HEAD/mobile_apps/mobile_bert/assets/images/splash.png -------------------------------------------------------------------------------- /mobile_apps/mobile_bert/assets/images/robot-dev.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yzzhang/machine-learning/HEAD/mobile_apps/mobile_bert/assets/images/robot-dev.png -------------------------------------------------------------------------------- /mobile_apps/mobile_bert/assets/images/robot-prod.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yzzhang/machine-learning/HEAD/mobile_apps/mobile_bert/assets/images/robot-prod.png -------------------------------------------------------------------------------- /mobile_apps/objects_detection/assets/images/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yzzhang/machine-learning/HEAD/mobile_apps/objects_detection/assets/images/icon.png -------------------------------------------------------------------------------- /mobile_apps/objects_detection/assets/images/splash.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yzzhang/machine-learning/HEAD/mobile_apps/objects_detection/assets/images/splash.png -------------------------------------------------------------------------------- /mobile_apps/objects_detection/assets/images/tfjs.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yzzhang/machine-learning/HEAD/mobile_apps/objects_detection/assets/images/tfjs.jpg -------------------------------------------------------------------------------- /mobile_apps/image_classification/assets/images/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yzzhang/machine-learning/HEAD/mobile_apps/image_classification/assets/images/icon.png -------------------------------------------------------------------------------- /mobile_apps/image_classification/assets/images/tfjs.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yzzhang/machine-learning/HEAD/mobile_apps/image_classification/assets/images/tfjs.jpg -------------------------------------------------------------------------------- /mobile_apps/mobile_bert/babel.config.js: -------------------------------------------------------------------------------- 1 | module.exports = function(api) { 2 | api.cache(true); 3 | return { 4 | presets: ['babel-preset-expo'], 5 | }; 6 | }; 7 | -------------------------------------------------------------------------------- /mobile_apps/image_classification/assets/images/splash.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yzzhang/machine-learning/HEAD/mobile_apps/image_classification/assets/images/splash.png -------------------------------------------------------------------------------- /mobile_apps/mobile_bert/assets/fonts/SpaceMono-Regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yzzhang/machine-learning/HEAD/mobile_apps/mobile_bert/assets/fonts/SpaceMono-Regular.ttf -------------------------------------------------------------------------------- /mobile_apps/objects_detection/assets/images/robot-dev.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yzzhang/machine-learning/HEAD/mobile_apps/objects_detection/assets/images/robot-dev.png -------------------------------------------------------------------------------- /mobile_apps/objects_detection/assets/images/robot-prod.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yzzhang/machine-learning/HEAD/mobile_apps/objects_detection/assets/images/robot-prod.png -------------------------------------------------------------------------------- /mobile_apps/objects_detection/babel.config.js: -------------------------------------------------------------------------------- 1 | module.exports = function(api) { 2 | api.cache(true); 3 | return { 4 | presets: ['babel-preset-expo'], 5 | }; 6 | }; 7 | -------------------------------------------------------------------------------- /mobile_apps/image_classification/assets/images/robot-dev.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yzzhang/machine-learning/HEAD/mobile_apps/image_classification/assets/images/robot-dev.png -------------------------------------------------------------------------------- /mobile_apps/image_classification/assets/images/robot-prod.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yzzhang/machine-learning/HEAD/mobile_apps/image_classification/assets/images/robot-prod.png -------------------------------------------------------------------------------- /mobile_apps/image_classification/babel.config.js: -------------------------------------------------------------------------------- 1 | module.exports = function(api) { 2 | api.cache(true); 3 | return { 4 | presets: ['babel-preset-expo'], 5 | }; 6 | }; 7 | -------------------------------------------------------------------------------- /certificates/AWS_Certified_Developer_Associate_Certificate.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yzzhang/machine-learning/HEAD/certificates/AWS_Certified_Developer_Associate_Certificate.pdf -------------------------------------------------------------------------------- /mobile_apps/objects_detection/assets/fonts/SpaceMono-Regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yzzhang/machine-learning/HEAD/mobile_apps/objects_detection/assets/fonts/SpaceMono-Regular.ttf -------------------------------------------------------------------------------- /mobile_apps/image_classification/assets/fonts/SpaceMono-Regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yzzhang/machine-learning/HEAD/mobile_apps/image_classification/assets/fonts/SpaceMono-Regular.ttf -------------------------------------------------------------------------------- /Random_Forest/interview_attendance/images/CRISP-DM_Process_Diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yzzhang/machine-learning/HEAD/Random_Forest/interview_attendance/images/CRISP-DM_Process_Diagram.png -------------------------------------------------------------------------------- /mobile_apps/mobile_bert/.expo/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "hostType": "lan", 3 | "lanType": "ip", 4 | "dev": true, 5 | "minify": false, 6 | "urlRandomness": "gf-qgh", 7 | "https": false 8 | } 9 | -------------------------------------------------------------------------------- /certificates/AWS_Certified_Solutions_Architect_Associate_certificate.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yzzhang/machine-learning/HEAD/certificates/AWS_Certified_Solutions_Architect_Associate_certificate.pdf -------------------------------------------------------------------------------- /mobile_apps/mobile_bert/README.md: -------------------------------------------------------------------------------- 1 | # Deep Learning for Natural Language Processing on Mobile Devices 2 | 3 | This Github is for the article [Deep Learning for Natural Language Processing on Mobile Devices]() in Towards Data Science by Yuefeng Zhang. 4 | -------------------------------------------------------------------------------- /mobile_apps/mobile_bert/components/StyledText.js: -------------------------------------------------------------------------------- 1 | import * as React from 'react'; 2 | import { Text } from 'react-native'; 3 | 4 | export function MonoText(props) { 5 | return ; 6 | } 7 | -------------------------------------------------------------------------------- /mobile_apps/objects_detection/components/StyledText.js: -------------------------------------------------------------------------------- 1 | import * as React from 'react'; 2 | import { Text } from 'react-native'; 3 | 4 | export function MonoText(props) { 5 | return ; 6 | } 7 | -------------------------------------------------------------------------------- /mobile_apps/mobile_bert/.expo/packager-info.json: -------------------------------------------------------------------------------- 1 | { 2 | "devToolsPort": 19002, 3 | "expoServerPort": null, 4 | "packagerPort": null, 5 | "packagerPid": null, 6 | "expoServerNgrokUrl": null, 7 | "packagerNgrokUrl": null, 8 | "ngrokPid": null 9 | } 10 | -------------------------------------------------------------------------------- /mobile_apps/mobile_bert/constants/Layout.js: -------------------------------------------------------------------------------- 1 | import { Dimensions } from 'react-native'; 2 | 3 | const width = Dimensions.get('window').width; 4 | const height = Dimensions.get('window').height; 5 | 6 | export default { 7 | window: { 8 | width, 9 | height, 10 | }, 11 | isSmallDevice: width < 375, 12 | }; 13 | -------------------------------------------------------------------------------- /mobile_apps/image_classification/constants/Layout.js: -------------------------------------------------------------------------------- 1 | import { Dimensions } from 'react-native'; 2 | 3 | const width = Dimensions.get('window').width; 4 | const height = Dimensions.get('window').height; 5 | 6 | export default { 7 | window: { 8 | width, 9 | height, 10 | }, 11 | isSmallDevice: width < 375, 12 | }; 13 | -------------------------------------------------------------------------------- /mobile_apps/objects_detection/constants/Layout.js: -------------------------------------------------------------------------------- 1 | import { Dimensions } from 'react-native'; 2 | 3 | const width = Dimensions.get('window').width; 4 | const height = Dimensions.get('window').height; 5 | 6 | export default { 7 | window: { 8 | width, 9 | height, 10 | }, 11 | isSmallDevice: width < 375, 12 | }; 13 | -------------------------------------------------------------------------------- /deep_learning/finance/fraud_detection/README.md: -------------------------------------------------------------------------------- 1 | # Automatic Machine Learning in Fraud Detection Using H2O AutoML 2 | 3 | This Github is for the article [Automatic Machine Learning in Fraud Detection Using H2O AutoML](https://medium.com/@zhangyuefeng1/automatic-machine-learning-in-fraud-detection-using-h2o-automl-6ba5cbf5c79b) in Towards Data Science by Yuefeng Zhang. 4 | -------------------------------------------------------------------------------- /mobile_apps/mobile_bert/components/__tests__/StyledText-test.js: -------------------------------------------------------------------------------- 1 | import * as React from 'react'; 2 | import renderer from 'react-test-renderer'; 3 | 4 | import { MonoText } from '../StyledText'; 5 | 6 | it(`renders correctly`, () => { 7 | const tree = renderer.create(Snapshot test!).toJSON(); 8 | 9 | expect(tree).toMatchSnapshot(); 10 | }); 11 | -------------------------------------------------------------------------------- /mobile_apps/mobile_bert/components/__tests__/__snapshots__/StyledText-test.js.snap: -------------------------------------------------------------------------------- 1 | // Jest Snapshot v1, https://goo.gl/fbAQLP 2 | 3 | exports[`renders correctly 1`] = ` 4 | 14 | Snapshot test! 15 | 16 | `; 17 | -------------------------------------------------------------------------------- /deep_learning/unsupervised_learning/dec_keras_clustering/README.md: -------------------------------------------------------------------------------- 1 | # Deep Clustering for Financial Market Segmentation 2 | 3 | This Github is for the article [Deep Clustering for Financial Market Segmentation](https://medium.com/@zhangyuefeng1/deep-clustering-for-financial-market-segmentation-2a41573618cf?sk=a3f7a7e5d21f0ef47b2167ce3f0d0cb6) in Towards Data Science by Yuefeng Zhang. 4 | -------------------------------------------------------------------------------- /mobile_apps/objects_detection/components/__tests__/StyledText-test.js: -------------------------------------------------------------------------------- 1 | import * as React from 'react'; 2 | import renderer from 'react-test-renderer'; 3 | 4 | import { MonoText } from '../StyledText'; 5 | 6 | it(`renders correctly`, () => { 7 | const tree = renderer.create(Snapshot test!).toJSON(); 8 | 9 | expect(tree).toMatchSnapshot(); 10 | }); 11 | -------------------------------------------------------------------------------- /mobile_apps/objects_detection/components/__tests__/__snapshots__/StyledText-test.js.snap: -------------------------------------------------------------------------------- 1 | // Jest Snapshot v1, https://goo.gl/fbAQLP 2 | 3 | exports[`renders correctly 1`] = ` 4 | 14 | Snapshot test! 15 | 16 | `; 17 | -------------------------------------------------------------------------------- /deep_learning/nlp/spam-classification-with-word2vec-keras/README.md: -------------------------------------------------------------------------------- 1 | # Deep Learning for Natural Language Processing Using word2vec-keras 2 | 3 | This Github is for the article [Deep Learning for Natural Language Processing Using word2vec-keras](https://medium.com/@zhangyuefeng1/deep-learning-for-natural-language-processing-using-word2vec-keras-d9a240c7bb9d) in Towards Data Science by Yuefeng Zhang. -------------------------------------------------------------------------------- /mobile_apps/mobile_bert/constants/Colors.js: -------------------------------------------------------------------------------- 1 | const tintColor = '#2f95dc'; 2 | 3 | export default { 4 | tintColor, 5 | tabIconDefault: '#ccc', 6 | tabIconSelected: tintColor, 7 | tabBar: '#fefefe', 8 | errorBackground: 'red', 9 | errorText: '#fff', 10 | warningBackground: '#EAEB5E', 11 | warningText: '#666804', 12 | noticeBackground: tintColor, 13 | noticeText: '#fff', 14 | }; 15 | -------------------------------------------------------------------------------- /mobile_apps/objects_detection/constants/Colors.js: -------------------------------------------------------------------------------- 1 | const tintColor = '#2f95dc'; 2 | 3 | export default { 4 | tintColor, 5 | tabIconDefault: '#ccc', 6 | tabIconSelected: tintColor, 7 | tabBar: '#fefefe', 8 | errorBackground: 'red', 9 | errorText: '#fff', 10 | warningBackground: '#EAEB5E', 11 | warningText: '#666804', 12 | noticeBackground: tintColor, 13 | noticeText: '#fff', 14 | }; 15 | -------------------------------------------------------------------------------- /deep_learning/multi_input_transfer_learning/README.md: -------------------------------------------------------------------------------- 1 | # Deep Multi-Input Models Transfer Learning for Image and Word Tag Recognition 2 | 3 | This Github is for the article [Deep Multi-Input Models Transfer Learning for Image and Word Tag Recognition](https://towardsdatascience.com/deep-multi-input-models-transfer-learning-for-image-and-word-tag-recognition-7ae0462253dc) in Towards Data Science by Yuefeng Zhang. 4 | -------------------------------------------------------------------------------- /mobile_apps/image_classification/constants/Colors.js: -------------------------------------------------------------------------------- 1 | const tintColor = '#2f95dc'; 2 | 3 | export default { 4 | tintColor, 5 | tabIconDefault: '#ccc', 6 | tabIconSelected: tintColor, 7 | tabBar: '#fefefe', 8 | errorBackground: 'red', 9 | errorText: '#fff', 10 | warningBackground: '#EAEB5E', 11 | warningText: '#666804', 12 | noticeBackground: tintColor, 13 | noticeText: '#fff', 14 | }; 15 | -------------------------------------------------------------------------------- /Random_Forest/interview_attendance/README.md: -------------------------------------------------------------------------------- 1 | # Object-Oriented Machine Learning Pipeline with mlflow for Pandas and Koalas DataFrames 2 | 3 | This Github is for the article [Object-Oriented Machine Learning Pipeline with mlflow for Pandas and Koalas DataFrames](https://towardsdatascience.com/object-oriented-machine-learning-pipeline-with-mlflow-for-pandas-and-koalas-dataframes-ef8517d39a12) in Towards Data Science by Yuefeng Zhang. -------------------------------------------------------------------------------- /mobile_apps/objects_detection/README.md: -------------------------------------------------------------------------------- 1 | # Deep Learning for Detecting Objects in an Image on Mobile Devices 2 | 3 | This Github is for the article [Deep Learning for Detecting Objects in an Image on Mobile Devices](https://medium.com/p/deep-learning-for-detecting-objects-in-an-image-on-mobile-devices-7d5b2e5621f9?source=email-80e8f2faf4bc--writer.postDistributed&sk=19e420fca7758dd3391adcf6f3df5c55) in Towards Data Science by Yuefeng Zhang. 4 | -------------------------------------------------------------------------------- /mobile_apps/mobile_bert/components/TabBarIcon.js: -------------------------------------------------------------------------------- 1 | import { Ionicons } from '@expo/vector-icons'; 2 | import * as React from 'react'; 3 | 4 | import Colors from '../constants/Colors'; 5 | 6 | export default function TabBarIcon(props) { 7 | return ( 8 | 14 | ); 15 | } 16 | -------------------------------------------------------------------------------- /mobile_apps/objects_detection/components/TabBarIcon.js: -------------------------------------------------------------------------------- 1 | import { Ionicons } from '@expo/vector-icons'; 2 | import * as React from 'react'; 3 | 4 | import Colors from '../constants/Colors'; 5 | 6 | export default function TabBarIcon(props) { 7 | return ( 8 | 14 | ); 15 | } 16 | -------------------------------------------------------------------------------- /mobile_apps/expo_cli_docker/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | # authenticate without showing password 6 | if [ -n "$EXPO_CLI_USERNAME" ] && [ -n "$EXPO_CLI_PASSWORD" ]; then 7 | expo-cli login --non-interactive --username $EXPO_CLI_USERNAME 8 | fi 9 | 10 | # see: https://github.com/nodejs/docker-node/blob/4a29572/12/stretch/docker-entrypoint.sh#L4-L6 11 | if [ "${1#-}" != "${1}" ] || [ -z "$(command -v "${1}")" ]; then 12 | set -- expo-cli "$@" 13 | fi 14 | 15 | exec "$@" 16 | -------------------------------------------------------------------------------- /mobile_apps/image_classification/README.md: -------------------------------------------------------------------------------- 1 | # Deep Learning for Image Classification on Mobile Devices 2 | 3 | This Github is for the article [Deep Learning for Image Classification on Mobile Devices](https://towardsdatascience.com/deep-learning-for-image-classification-on-mobile-devices-f93efac860fd?source=email-80e8f2faf4bc-1587344316988-layerCake.autoLayerCakeWriterNotification-------------------------6d3c9f8a_2a4d_49a4_b49e_64eaa6c28eb9&sk=fdd9218d4f879cda3cc064d20701a939) in Towards Data Science by Yuefeng Zhang. 4 | -------------------------------------------------------------------------------- /mobile_apps/mobile_bert/navigation/useLinking.js: -------------------------------------------------------------------------------- 1 | import { useLinking } from '@react-navigation/native'; 2 | import { Linking } from 'expo'; 3 | 4 | export default function(containerRef) { 5 | return useLinking(containerRef, { 6 | prefixes: [Linking.makeUrl('/')], 7 | config: { 8 | Root: { 9 | path: 'root', 10 | screens: { 11 | Home: 'home', 12 | Links: 'links', 13 | Settings: 'settings', 14 | }, 15 | }, 16 | }, 17 | }); 18 | } 19 | -------------------------------------------------------------------------------- /mobile_apps/objects_detection/navigation/useLinking.js: -------------------------------------------------------------------------------- 1 | import { useLinking } from '@react-navigation/native'; 2 | import { Linking } from 'expo'; 3 | 4 | export default function(containerRef) { 5 | return useLinking(containerRef, { 6 | prefixes: [Linking.makeUrl('/')], 7 | config: { 8 | Root: { 9 | path: 'root', 10 | screens: { 11 | Home: 'home', 12 | Links: 'links', 13 | Settings: 'settings', 14 | }, 15 | }, 16 | }, 17 | }); 18 | } 19 | -------------------------------------------------------------------------------- /mobile_apps/image_classification/navigation/useLinking.js: -------------------------------------------------------------------------------- 1 | import { useLinking } from '@react-navigation/native'; 2 | import { Linking } from 'expo'; 3 | 4 | export default function(containerRef) { 5 | return useLinking(containerRef, { 6 | prefixes: [Linking.makeUrl('/')], 7 | config: { 8 | Root: { 9 | path: 'root', 10 | screens: { 11 | Home: 'home', 12 | Links: 'links', 13 | Settings: 'settings', 14 | }, 15 | }, 16 | }, 17 | }); 18 | } 19 | -------------------------------------------------------------------------------- /mobile_apps/expo_cli_docker/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG NODE_VERSION="13" 2 | 3 | FROM node:$NODE_VERSION 4 | 5 | LABEL name="Expo Cli for Docker" 6 | LABEL repository="https://github.com/yzzhang/machine-learning/tree/master/mobile_apps/expo_cli_docker" 7 | LABEL homepage="https://github.com/yzzhang/machine-learning/tree/master/mobile_apps/expo_cli_docker" 8 | LABEL maintainer="Yuefeng Zhang" 9 | 10 | EXPOSE 19000 11 | EXPOSE 19001 12 | 13 | ENV REACT_NATIVE_PACKAGER_HOSTNAME="10.0.1.198" 14 | 15 | ARG EXPO_VERSION="latest" 16 | 17 | RUN yarn global add expo-cli@$EXPO_VERSION \ 18 | && yarn cache clean 19 | 20 | COPY entrypoint.sh LICENSE.md README.md / 21 | 22 | ENTRYPOINT ["/entrypoint.sh"] 23 | CMD ["--help"] 24 | -------------------------------------------------------------------------------- /mobile_apps/mobile_bert/app.json: -------------------------------------------------------------------------------- 1 | { 2 | "expo": { 3 | "name": "ObjectsDetection", 4 | "slug": "react-native-deeplearning", 5 | "platforms": [ 6 | "ios", 7 | "android", 8 | "web" 9 | ], 10 | "version": "1.0.0", 11 | "orientation": "portrait", 12 | "icon": "./assets/images/icon.png", 13 | "scheme": "myapp", 14 | "splash": { 15 | "image": "./assets/images/splash.png", 16 | "resizeMode": "contain", 17 | "backgroundColor": "#ffffff" 18 | }, 19 | "updates": { 20 | "fallbackToCacheTimeout": 0 21 | }, 22 | "assetBundlePatterns": [ 23 | "**/*" 24 | ], 25 | "ios": { 26 | "supportsTablet": true 27 | } 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /mobile_apps/objects_detection/app.json: -------------------------------------------------------------------------------- 1 | { 2 | "expo": { 3 | "name": "ObjectsDetection", 4 | "slug": "react-native-deeplearning", 5 | "platforms": [ 6 | "ios", 7 | "android", 8 | "web" 9 | ], 10 | "version": "1.0.0", 11 | "orientation": "portrait", 12 | "icon": "./assets/images/icon.png", 13 | "scheme": "myapp", 14 | "splash": { 15 | "image": "./assets/images/splash.png", 16 | "resizeMode": "contain", 17 | "backgroundColor": "#ffffff" 18 | }, 19 | "updates": { 20 | "fallbackToCacheTimeout": 0 21 | }, 22 | "assetBundlePatterns": [ 23 | "**/*" 24 | ], 25 | "ios": { 26 | "supportsTablet": true 27 | } 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /mobile_apps/image_classification/app.json: -------------------------------------------------------------------------------- 1 | { 2 | "expo": { 3 | "name": "ImageClassification", 4 | "slug": "react-native-deeplearning", 5 | "platforms": [ 6 | "ios", 7 | "android", 8 | "web" 9 | ], 10 | "version": "1.0.0", 11 | "orientation": "portrait", 12 | "icon": "./assets/images/icon.png", 13 | "scheme": "myapp", 14 | "splash": { 15 | "image": "./assets/images/splash.png", 16 | "resizeMode": "contain", 17 | "backgroundColor": "#ffffff" 18 | }, 19 | "updates": { 20 | "fallbackToCacheTimeout": 0 21 | }, 22 | "assetBundlePatterns": [ 23 | "**/*" 24 | ], 25 | "ios": { 26 | "supportsTablet": true 27 | } 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /mobile_apps/expo_cli_docker/LICENSE.md: -------------------------------------------------------------------------------- 1 | # The MIT License (MIT) 2 | 3 | Copyright (c) 2020-present Yuefeng Zhang 4 | 5 | > Permission is hereby granted, free of charge, to any person obtaining a copy 6 | > of this software and associated documentation files (the "Software"), to deal 7 | > in the Software without restriction, including without limitation the rights 8 | > to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | > copies of the Software, and to permit persons to whom the Software is 10 | > furnished to do so, subject to the following conditions: 11 | > 12 | > The above copyright notice and this permission notice shall be included in 13 | > all copies or substantial portions of the Software. 14 | > 15 | > THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | > IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | > FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | > AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | > LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | > OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | > THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /deep_learning/nlp/spark-nlp-docker/README.md: -------------------------------------------------------------------------------- 1 | # Running Spark NLP in Docker Container 2 | 3 | This repository is to show how to setup a docker environment for running [Spark NLP](https://www.johnsnowlabs.com/spark-nlp) in docker container. It is based on the [Spark NLP Docker image](https://hub.docker.com/r/johnsnowlabs/spark-nlp-workshop) with the following modifications: 4 | * removed tutorials and related notebooks and data files 5 | * replaced Spark NLP 2.4.5 with Spark NLP 2.5.1 6 | * adjusted docker hub username 7 | * adjusted the home directory name in docker container 8 | * added the command line --volumn option to map the current host direcrory to the home directory in docker container 9 | * removed Jupyter notebook configuration 10 | 11 | ## Docker setup 12 | 13 | Method 1- Get the existing docker image for spark-nlp: 14 | 15 | ```bash 16 | docker pull zhangyuefeng123/sparknlp:1.0 17 | ``` 18 | 19 | Method 2- Build a new docker image 20 | 21 | ```bash 22 | docker build -t zhangyuefeng123/sparknlp:1.0 . 23 | ``` 24 | 25 | 2- Run the image locally with port binding 26 | 27 | ```bash 28 | docker run -it --volume $PWD:/home/yuefeng -p 8888:8888 -p 4040:4040 zhangyuefeng123/sparknlp:1.0 29 | ``` 30 | 31 | 3- Open Jupyter notebooks inside your browser by using the token printed on the console like below: 32 | 33 | ```bash 34 | http://127.0.0.1:8888/?token=d7e598479d9f3ce5b62d6e91276d9a557f68b6e3c919ddbc 35 | ``` 36 | 37 | Once the Jupyter notebook starts, we can use it as usual. 38 | 39 | -------------------------------------------------------------------------------- /deep_learning/Multi_Layer_Perceptron/breast-cancer-wisconsin/README.md: -------------------------------------------------------------------------------- 1 | 2 | # Deep Learning in Wisconsin Breast Cancer Diagnosis 3 | 4 | 5 | ## Install 6 | 7 | The Jupyter notebook requires **Python 2.7 or 3.6** and the following Python libraries installed: 8 | 9 | - [NumPy](http://www.numpy.org/) 10 | - [Pandas](http://pandas.pydata.org/) 11 | - [matplotlib](http://matplotlib.org/) 12 | - [scikit-learn](http://scikit-learn.org/stable/) 13 | - [Kares](https://keras.io/) 14 | - [Tensorflow](https://www.tensorflow.org/) 15 | 16 | You will also need to have software installed to run and execute a [Jupyter Notebook](http://ipython.org/notebook.html) 17 | 18 | If you do not have Python installed yet, it is highly recommended that you install the [Anaconda](http://continuum.io/downloads) distribution of Python, which already has the above packages and more included. 19 | 20 | The Kares and Tensorflow can be installed as follows: 21 | 22 | ``` 23 | pip install tensorflow 24 | pip install keras 25 | ``` 26 | 27 | ## Run 28 | 29 | In a terminal or command window, navigate to the top-level project directory `breast-cancer-wisconsin/` (that contains this README) and run the following command: 30 | 31 | ``` 32 | jupyter notebook breast-cancer-wisconsin.ipynb 33 | 34 | ``` 35 | 36 | ## Data 37 | 38 | The Wisconsin Breast Cancer Dataset is publically avilable at: http://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data 39 | 40 | The instructions of the dataset are available at: http://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.names -------------------------------------------------------------------------------- /mobile_apps/mobile_bert/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "main": "node_modules/expo/AppEntry.js", 3 | "scripts": { 4 | "start": "expo start", 5 | "android": "expo start --android", 6 | "ios": "expo start --ios", 7 | "web": "expo start --web", 8 | "eject": "expo eject", 9 | "test": "jest --watchAll" 10 | }, 11 | "jest": { 12 | "preset": "jest-expo" 13 | }, 14 | "dependencies": { 15 | "@expo/vector-icons": "~10.0.6", 16 | "@react-native-community/async-storage": "^1.9.0", 17 | "@react-native-community/masked-view": "0.1.6", 18 | "@react-navigation/bottom-tabs": "^5.0.0", 19 | "@react-navigation/native": "^5.0.0", 20 | "@react-navigation/stack": "^5.0.0", 21 | "@react-navigation/web": "~1.0.0-alpha.9", 22 | "@tensorflow-models/qna": "^1.0.0", 23 | "@tensorflow/tfjs": "^1.7.2", 24 | "@tensorflow/tfjs-react-native": "^0.2.3", 25 | "expo": "~37.0.3", 26 | "expo-asset": "~8.1.3", 27 | "expo-constants": "~9.0.0", 28 | "expo-camera": "^8.2.0", 29 | "expo-font": "~8.1.0", 30 | "expo-gl": "^8.1.0", 31 | "expo-web-browser": "~8.1.0", 32 | "react": "~16.9.0", 33 | "react-dom": "~16.9.0", 34 | "react-native": "https://github.com/expo/react-native/archive/sdk-37.0.1.tar.gz", 35 | "react-native-fs": "^2.16.6", 36 | "react-native-gesture-handler": "~1.6.0", 37 | "react-native-safe-area-context": "0.7.3", 38 | "react-native-screens": "~2.2.0", 39 | "react-native-svg": "11.0.1", 40 | "react-native-web": "~0.11.7" 41 | }, 42 | "devDependencies": { 43 | "@babel/core": "^7.8.6", 44 | "babel-preset-expo": "~8.1.0", 45 | "jest-expo": "~37.0.0" 46 | }, 47 | "private": true 48 | } 49 | -------------------------------------------------------------------------------- /mobile_apps/image_classification/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "main": "node_modules/expo/AppEntry.js", 3 | "scripts": { 4 | "start": "expo start", 5 | "android": "expo start --android", 6 | "ios": "expo start --ios", 7 | "web": "expo start --web", 8 | "eject": "expo eject", 9 | "test": "jest --watchAll" 10 | }, 11 | "jest": { 12 | "preset": "jest-expo" 13 | }, 14 | "dependencies": { 15 | "@expo/vector-icons": "~10.0.6", 16 | "@react-native-community/async-storage": "^1.9.0", 17 | "@react-native-community/masked-view": "0.1.6", 18 | "@react-navigation/bottom-tabs": "^5.0.0", 19 | "@react-navigation/native": "^5.0.0", 20 | "@react-navigation/stack": "^5.0.0", 21 | "@react-navigation/web": "~1.0.0-alpha.9", 22 | "@tensorflow-models/mobilenet": "^2.0.4", 23 | "@tensorflow/tfjs": "^1.7.2", 24 | "@tensorflow/tfjs-react-native": "^0.2.3", 25 | "expo": "~37.0.3", 26 | "expo-asset": "~8.1.3", 27 | "expo-camera": "^8.2.0", 28 | "expo-constants": "~9.0.0", 29 | "expo-font": "~8.1.0", 30 | "expo-gl": "^8.1.0", 31 | "expo-web-browser": "~8.1.0", 32 | "jpeg-js": "^0.3.7", 33 | "react": "~16.9.0", 34 | "react-dom": "~16.9.0", 35 | "react-native": "https://github.com/expo/react-native/archive/sdk-37.0.1.tar.gz", 36 | "react-native-fs": "^2.16.6", 37 | "react-native-gesture-handler": "~1.6.0", 38 | "react-native-safe-area-context": "0.7.3", 39 | "react-native-screens": "~2.2.0", 40 | "react-native-web": "~0.11.7", 41 | "expo-permissions": "~8.1.0", 42 | "expo-image-picker": "~8.1.0" 43 | }, 44 | "devDependencies": { 45 | "@babel/core": "^7.8.6", 46 | "babel-preset-expo": "~8.1.0", 47 | "jest-expo": "~37.0.0" 48 | }, 49 | "private": true 50 | } 51 | -------------------------------------------------------------------------------- /deep_learning/nlp/spark-nlp-docker/Dockerfile: -------------------------------------------------------------------------------- 1 | #Download base image ubuntu 18.04 2 | FROM ubuntu:18.04 3 | 4 | ENV NB_USER yuefeng 5 | ENV NB_UID 1000 6 | ENV HOME /home/${NB_USER} 7 | 8 | ENV PYSPARK_PYTHON=python3 9 | ENV PYSPARK_DRIVER_PYTHON=python3 10 | 11 | RUN apt-get update && apt-get install -y \ 12 | tar \ 13 | wget \ 14 | bash \ 15 | rsync \ 16 | gcc \ 17 | libfreetype6-dev \ 18 | libhdf5-serial-dev \ 19 | libpng-dev \ 20 | libzmq3-dev \ 21 | python3 \ 22 | python3-dev \ 23 | python3-pip \ 24 | unzip \ 25 | pkg-config \ 26 | software-properties-common \ 27 | graphviz 28 | 29 | RUN adduser --disabled-password \ 30 | --gecos "Default user" \ 31 | --uid ${NB_UID} \ 32 | ${NB_USER} 33 | 34 | # Install OpenJDK-8 35 | RUN apt-get update && \ 36 | apt-get install -y openjdk-8-jdk && \ 37 | apt-get install -y ant && \ 38 | apt-get clean; 39 | 40 | # Fix certificate issues 41 | RUN apt-get update && \ 42 | apt-get install ca-certificates-java && \ 43 | apt-get clean && \ 44 | update-ca-certificates -f; 45 | # Setup JAVA_HOME -- useful for docker commandline 46 | ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64/ 47 | RUN export JAVA_HOME 48 | 49 | RUN echo "export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/" >> ~/.bashrc 50 | 51 | RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 52 | 53 | RUN pip3 install --upgrade pip 54 | RUN pip3 install --no-cache-dir notebook==5.* numpy pyspark==2.4.4 spark-nlp==2.5.1 pandas mlflow Keras scikit-spark scikit-learn scipy matplotlib pydot tensorflow graphviz 55 | 56 | USER root 57 | RUN chown -R ${NB_UID} ${HOME} 58 | USER ${NB_USER} 59 | 60 | WORKDIR ${HOME} 61 | 62 | # Specify the default command to run 63 | CMD ["jupyter", "notebook", "--ip", "0.0.0.0"] -------------------------------------------------------------------------------- /mobile_apps/objects_detection/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "main": "node_modules/expo/AppEntry.js", 3 | "scripts": { 4 | "start": "expo start", 5 | "android": "expo start --android", 6 | "ios": "expo start --ios", 7 | "web": "expo start --web", 8 | "eject": "expo eject", 9 | "test": "jest --watchAll" 10 | }, 11 | "jest": { 12 | "preset": "jest-expo" 13 | }, 14 | "dependencies": { 15 | "@expo/vector-icons": "~10.0.6", 16 | "@react-native-community/async-storage": "^1.9.0", 17 | "@react-native-community/masked-view": "0.1.6", 18 | "@react-navigation/bottom-tabs": "^5.0.0", 19 | "@react-navigation/native": "^5.0.0", 20 | "@react-navigation/stack": "^5.0.0", 21 | "@react-navigation/web": "~1.0.0-alpha.9", 22 | "@tensorflow-models/coco-ssd": "^2.0.3", 23 | "@tensorflow/tfjs": "^1.7.2", 24 | "@tensorflow/tfjs-react-native": "^0.2.3", 25 | "expo": "~37.0.3", 26 | "expo-asset": "~8.1.3", 27 | "expo-camera": "^8.2.0", 28 | "expo-constants": "~9.0.0", 29 | "expo-font": "~8.1.0", 30 | "expo-gl": "^8.1.0", 31 | "expo-image-picker": "~8.1.0", 32 | "expo-permissions": "~8.1.0", 33 | "expo-web-browser": "~8.1.0", 34 | "jpeg-js": "^0.3.7", 35 | "react": "~16.9.0", 36 | "react-dom": "~16.9.0", 37 | "react-native": "https://github.com/expo/react-native/archive/sdk-37.0.1.tar.gz", 38 | "react-native-fs": "^2.16.6", 39 | "react-native-gesture-handler": "~1.6.0", 40 | "react-native-safe-area-context": "0.7.3", 41 | "react-native-screens": "~2.2.0", 42 | "react-native-web": "~0.11.7", 43 | "react-native-svg": "11.0.1" 44 | }, 45 | "devDependencies": { 46 | "@babel/core": "^7.8.6", 47 | "babel-preset-expo": "~8.1.0", 48 | "jest-expo": "~37.0.0" 49 | }, 50 | "private": true 51 | } 52 | -------------------------------------------------------------------------------- /mobile_apps/mobile_bert/navigation/BottomTabNavigator.js: -------------------------------------------------------------------------------- 1 | import { createBottomTabNavigator } from '@react-navigation/bottom-tabs'; 2 | import * as React from 'react'; 3 | 4 | import TabBarIcon from '../components/TabBarIcon'; 5 | import HomeScreen from '../screens/HomeScreen'; 6 | import QnaScreen from '../screens/QnaScreen'; 7 | import LinksScreen from '../screens/LinksScreen'; 8 | 9 | const BottomTab = createBottomTabNavigator(); 10 | const INITIAL_ROUTE_NAME = 'Home'; 11 | 12 | export default function BottomTabNavigator({ navigation, route }) { 13 | // Set the header title on the parent stack navigator depending on the 14 | // currently active tab. Learn more in the documentation: 15 | // https://reactnavigation.org/docs/en/screen-options-resolution.html 16 | navigation.setOptions({ headerTitle: getHeaderTitle(route) }); 17 | 18 | return ( 19 | 20 | , 26 | }} 27 | /> 28 | , 34 | }} 35 | /> 36 | , 42 | }} 43 | /> 44 | 45 | ); 46 | } 47 | 48 | function getHeaderTitle(route) { 49 | const routeName = route.state?.routes[route.state.index]?.name ?? INITIAL_ROUTE_NAME; 50 | 51 | switch (routeName) { 52 | case 'Home': 53 | return 'Introduction'; 54 | case 'Qna': 55 | return 'Qna'; 56 | case 'Links': 57 | return 'References'; 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /mobile_apps/objects_detection/navigation/BottomTabNavigator.js: -------------------------------------------------------------------------------- 1 | import { createBottomTabNavigator } from '@react-navigation/bottom-tabs'; 2 | import * as React from 'react'; 3 | 4 | import TabBarIcon from '../components/TabBarIcon'; 5 | import HomeScreen from '../screens/HomeScreen'; 6 | import CocoSsdScreen from '../screens/CocoSsdScreen'; 7 | import LinksScreen from '../screens/LinksScreen'; 8 | 9 | const BottomTab = createBottomTabNavigator(); 10 | const INITIAL_ROUTE_NAME = 'Home'; 11 | 12 | export default function BottomTabNavigator({ navigation, route }) { 13 | // Set the header title on the parent stack navigator depending on the 14 | // currently active tab. Learn more in the documentation: 15 | // https://reactnavigation.org/docs/en/screen-options-resolution.html 16 | navigation.setOptions({ headerTitle: getHeaderTitle(route) }); 17 | 18 | return ( 19 | 20 | , 26 | }} 27 | /> 28 | , 34 | }} 35 | /> 36 | , 42 | }} 43 | /> 44 | 45 | ); 46 | } 47 | 48 | function getHeaderTitle(route) { 49 | const routeName = route.state?.routes[route.state.index]?.name ?? INITIAL_ROUTE_NAME; 50 | 51 | switch (routeName) { 52 | case 'Home': 53 | return 'Introduction'; 54 | case 'CocoSsd': 55 | return 'COCO-SSD'; 56 | case 'Links': 57 | return 'References'; 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /mobile_apps/image_classification/navigation/BottomTabNavigator.js: -------------------------------------------------------------------------------- 1 | import { createBottomTabNavigator } from '@react-navigation/bottom-tabs'; 2 | import * as React from 'react'; 3 | 4 | import TabBarIcon from '../components/TabBarIcon'; 5 | import HomeScreen from '../screens/HomeScreen'; 6 | import ImageClassificationScreen from '../screens/ImageClassificationScreen'; 7 | import LinksScreen from '../screens/LinksScreen'; 8 | 9 | const BottomTab = createBottomTabNavigator(); 10 | const INITIAL_ROUTE_NAME = 'Home'; 11 | 12 | export default function BottomTabNavigator({ navigation, route }) { 13 | // Set the header title on the parent stack navigator depending on the 14 | // currently active tab. Learn more in the documentation: 15 | // https://reactnavigation.org/docs/en/screen-options-resolution.html 16 | navigation.setOptions({ headerTitle: getHeaderTitle(route) }); 17 | 18 | return ( 19 | 20 | , 26 | }} 27 | /> 28 | , 34 | }} 35 | /> 36 | , 42 | }} 43 | /> 44 | 45 | ); 46 | } 47 | 48 | function getHeaderTitle(route) { 49 | const routeName = route.state?.routes[route.state.index]?.name ?? INITIAL_ROUTE_NAME; 50 | 51 | switch (routeName) { 52 | case 'Home': 53 | return 'Introduction'; 54 | case 'ImageClassification': 55 | return 'Image Classification'; 56 | case 'Links': 57 | return 'References'; 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /mobile_apps/mobile_bert/App.js: -------------------------------------------------------------------------------- 1 | import * as React from 'react'; 2 | import { Platform, StatusBar, StyleSheet, View } from 'react-native'; 3 | import { SplashScreen } from 'expo'; 4 | import * as Font from 'expo-font'; 5 | import { Ionicons } from '@expo/vector-icons'; 6 | import { NavigationContainer } from '@react-navigation/native'; 7 | import { createStackNavigator } from '@react-navigation/stack'; 8 | 9 | import BottomTabNavigator from './navigation/BottomTabNavigator'; 10 | import useLinking from './navigation/useLinking'; 11 | 12 | const Stack = createStackNavigator(); 13 | 14 | export default function App(props) { 15 | const [isLoadingComplete, setLoadingComplete] = React.useState(false); 16 | const [initialNavigationState, setInitialNavigationState] = React.useState(); 17 | const containerRef = React.useRef(); 18 | const { getInitialState } = useLinking(containerRef); 19 | 20 | // Load any resources or data that we need prior to rendering the app 21 | React.useEffect(() => { 22 | async function loadResourcesAndDataAsync() { 23 | try { 24 | SplashScreen.preventAutoHide(); 25 | 26 | // Load our initial navigation state 27 | setInitialNavigationState(await getInitialState()); 28 | 29 | // Load fonts 30 | await Font.loadAsync({ 31 | ...Ionicons.font, 32 | 'space-mono': require('./assets/fonts/SpaceMono-Regular.ttf'), 33 | }); 34 | } catch (e) { 35 | // We might want to provide this error information to an error reporting service 36 | console.warn(e); 37 | } finally { 38 | setLoadingComplete(true); 39 | SplashScreen.hide(); 40 | } 41 | } 42 | 43 | loadResourcesAndDataAsync(); 44 | }, []); 45 | 46 | if (!isLoadingComplete && !props.skipLoadingScreen) { 47 | return null; 48 | } else { 49 | return ( 50 | 51 | {Platform.OS === 'ios' && } 52 | 53 | 54 | 55 | 56 | 57 | 58 | ); 59 | } 60 | } 61 | 62 | const styles = StyleSheet.create({ 63 | container: { 64 | flex: 1, 65 | backgroundColor: '#fff', 66 | }, 67 | }); 68 | -------------------------------------------------------------------------------- /mobile_apps/image_classification/App.js: -------------------------------------------------------------------------------- 1 | import * as React from 'react'; 2 | import { Platform, StatusBar, StyleSheet, View } from 'react-native'; 3 | import { SplashScreen } from 'expo'; 4 | import * as Font from 'expo-font'; 5 | import { Ionicons } from '@expo/vector-icons'; 6 | import { NavigationContainer } from '@react-navigation/native'; 7 | import { createStackNavigator } from '@react-navigation/stack'; 8 | 9 | import BottomTabNavigator from './navigation/BottomTabNavigator'; 10 | import useLinking from './navigation/useLinking'; 11 | 12 | const Stack = createStackNavigator(); 13 | 14 | export default function App(props) { 15 | const [isLoadingComplete, setLoadingComplete] = React.useState(false); 16 | const [initialNavigationState, setInitialNavigationState] = React.useState(); 17 | const containerRef = React.useRef(); 18 | const { getInitialState } = useLinking(containerRef); 19 | 20 | // Load any resources or data that we need prior to rendering the app 21 | React.useEffect(() => { 22 | async function loadResourcesAndDataAsync() { 23 | try { 24 | SplashScreen.preventAutoHide(); 25 | 26 | // Load our initial navigation state 27 | setInitialNavigationState(await getInitialState()); 28 | 29 | // Load fonts 30 | await Font.loadAsync({ 31 | ...Ionicons.font, 32 | 'space-mono': require('./assets/fonts/SpaceMono-Regular.ttf'), 33 | }); 34 | } catch (e) { 35 | // We might want to provide this error information to an error reporting service 36 | console.warn(e); 37 | } finally { 38 | setLoadingComplete(true); 39 | SplashScreen.hide(); 40 | } 41 | } 42 | 43 | loadResourcesAndDataAsync(); 44 | }, []); 45 | 46 | if (!isLoadingComplete && !props.skipLoadingScreen) { 47 | return null; 48 | } else { 49 | return ( 50 | 51 | {Platform.OS === 'ios' && } 52 | 53 | 54 | 55 | 56 | 57 | 58 | ); 59 | } 60 | } 61 | 62 | const styles = StyleSheet.create({ 63 | container: { 64 | flex: 1, 65 | backgroundColor: '#fff', 66 | }, 67 | }); 68 | -------------------------------------------------------------------------------- /mobile_apps/objects_detection/App.js: -------------------------------------------------------------------------------- 1 | import * as React from 'react'; 2 | import { Platform, StatusBar, StyleSheet, View } from 'react-native'; 3 | import { SplashScreen } from 'expo'; 4 | import * as Font from 'expo-font'; 5 | import { Ionicons } from '@expo/vector-icons'; 6 | import { NavigationContainer } from '@react-navigation/native'; 7 | import { createStackNavigator } from '@react-navigation/stack'; 8 | 9 | import BottomTabNavigator from './navigation/BottomTabNavigator'; 10 | import useLinking from './navigation/useLinking'; 11 | 12 | const Stack = createStackNavigator(); 13 | 14 | export default function App(props) { 15 | const [isLoadingComplete, setLoadingComplete] = React.useState(false); 16 | const [initialNavigationState, setInitialNavigationState] = React.useState(); 17 | const containerRef = React.useRef(); 18 | const { getInitialState } = useLinking(containerRef); 19 | 20 | // Load any resources or data that we need prior to rendering the app 21 | React.useEffect(() => { 22 | async function loadResourcesAndDataAsync() { 23 | try { 24 | SplashScreen.preventAutoHide(); 25 | 26 | // Load our initial navigation state 27 | setInitialNavigationState(await getInitialState()); 28 | 29 | // Load fonts 30 | await Font.loadAsync({ 31 | ...Ionicons.font, 32 | 'space-mono': require('./assets/fonts/SpaceMono-Regular.ttf'), 33 | }); 34 | } catch (e) { 35 | // We might want to provide this error information to an error reporting service 36 | console.warn(e); 37 | } finally { 38 | setLoadingComplete(true); 39 | SplashScreen.hide(); 40 | } 41 | } 42 | 43 | loadResourcesAndDataAsync(); 44 | }, []); 45 | 46 | if (!isLoadingComplete && !props.skipLoadingScreen) { 47 | return null; 48 | } else { 49 | return ( 50 | 51 | {Platform.OS === 'ios' && } 52 | 53 | 54 | 55 | 56 | 57 | 58 | ); 59 | } 60 | } 61 | 62 | const styles = StyleSheet.create({ 63 | container: { 64 | flex: 1, 65 | backgroundColor: '#fff', 66 | }, 67 | }); 68 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Selected List of Publications on Data Science 2 | 3 | ### Also see my [Web site](https://yzzhang.github.io/) for details. 4 | 5 | 6 | ### 1. Deep Learning 7 | 8 | #### 1.1 Deep Clustering for Financial Market Segmentation 9 | See publication: [A unsupervised deep learning approach for credit card customer clustering, Towards Data Science, Nov 23, 2019](https://medium.com/@zhangyuefeng1/deep-clustering-for-financial-market-segmentation-2a41573618cf?sk=a3f7a7e5d21f0ef47b2167ce3f0d0cb6) 10 | 11 | #### 1.2 Deep Multi-Input Models Transfer Learning for Image and Word Tag Recognition 12 | See publication: [A multi-models deep learning approach for image and text understanding, Towards Data Science, Nov 20, 2019](https://towardsdatascience.com/deep-multi-input-models-transfer-learning-for-image-and-word-tag-recognition-7ae0462253dc) 13 | 14 | #### 1.3 Deep Learning for Natural Language Processing Using word2vec-keras 15 | See publication: [A deep learning approach for NLP by combining Word2Vec with Keras LSTM, Towards Data Science, Nov 3, 2019](https://towardsdatascience.com/deep-learning-for-natural-language-processing-using-word2vec-keras-d9a240c7bb9d) 16 | 17 | #### 1.4 Deep Learning in Winonsin Breast Cancer Diagnosis 18 | See publication: [A deep learning approach for healthcare, Towards Data Science, Oct 30, 2019](https://towardsdatascience.com/deep-learning-in-winonsin-breast-cancer-diagnosis-6bab13838abd) 19 | 20 | 21 | ### 2. Machine Learning 22 | 23 | #### 2.1 Automatic Machine Learning in Fraud Detection Using H2O AutoML 24 | See publication: [Machine Learning Automation in Finance, Towards Data Science, Nov 13, 2019](https://towardsdatascience.com/automatic-machine-learning-in-fraud-detection-using-h2o-automl-6ba5cbf5c79b) 25 | 26 | #### 2.2 Object-Oriented Machine Learning Pipeline with mlflow for Pandas and Koalas DataFrames 27 | See publication: [End-to-end process of developing Spark-enabled machine learning pipeline in Python using Pandas, Koalas, scikit-learn, and mlflow, Towards Data Science, Oct 24, 2019](https://towardsdatascience.com/object-oriented-machine-learning-pipeline-with-mlflow-for-pandas-and-koalas-dataframes-ef8517d39a12) 28 | 29 | 30 | #### 2.3 Python Data Preprocessing Using Pandas DataFrame, Spark DataFrame, and Koalas DataFrame 31 | See publication: [Preparing data for machine learning in Python, Towards Data Science, Oct 14, 2019](https://towardsdatascience.com/python-data-preprocessing-using-pandas-dataframe-spark-dataframe-and-koalas-dataframe-e44c42258a8f) 32 | 33 | 34 | 35 | ## Certificates 36 | 37 | * [Udacity Machine Learning Engineer Nanodegree](https://github.com/yzzhang/machine-learning/blob/master/certificates/Yuefeng_certificate_11_28_2017.pdf) 38 | 39 | ![machine learning certificate](./image/machine-learning-certificate.png) 40 | 41 | * [AWS Certified Solutions Architect - Associate](https://github.com/yzzhang/machine-learning/blob/master/certificates/AWS_Certified_Solutions_Architect_Associate_certificate.pdf) 42 | 43 | ![AWS architect certificate](./image/aws-architect.png) 44 | 45 | * [AWS Certified Developer - Associate](https://github.com/yzzhang/machine-learning/blob/master/certificates/AWS_Certified_Developer_Associate_Certificate.pdf) 46 | 47 | ![AWS developer certificate](./image/aws-developer.png) 48 | -------------------------------------------------------------------------------- /mobile_apps/objects_detection/screens/LinksScreen.js: -------------------------------------------------------------------------------- 1 | import { Ionicons } from '@expo/vector-icons'; 2 | import * as WebBrowser from 'expo-web-browser'; 3 | import * as React from 'react'; 4 | import { Image, StyleSheet, Text, View } from 'react-native'; 5 | import { RectButton, ScrollView } from 'react-native-gesture-handler'; 6 | 7 | export default function LinksScreen() { 8 | return ( 9 | 10 | 11 | 12 | 20 | 21 | 22 | WebBrowser.openBrowserAsync('https://www.tensorflow.org/js')} 26 | /> 27 | 28 | WebBrowser.openBrowserAsync('https://github.com/tensorflow/tfjs-models/tree/master/coco-ssd')} 32 | /> 33 | 34 | WebBrowser.openBrowserAsync('https://docs.expo.io')} 38 | /> 39 | 40 | WebBrowser.openBrowserAsync('https://reactnative.dev/docs/getting-started')} 44 | /> 45 | 46 | ); 47 | } 48 | 49 | function OptionButton({ icon, label, onPress, isLastOption }) { 50 | return ( 51 | 52 | 53 | 54 | 55 | 56 | 57 | {label} 58 | 59 | 60 | 61 | ); 62 | } 63 | 64 | const styles = StyleSheet.create({ 65 | container: { 66 | flex: 1, 67 | backgroundColor: '#fafafa', 68 | }, 69 | welcomeContainer: { 70 | alignItems: 'center', 71 | marginTop: 10, 72 | marginBottom: 20, 73 | }, 74 | welcomeImage: { 75 | width: 100, 76 | height: 80, 77 | resizeMode: 'contain', 78 | marginTop: 3, 79 | marginLeft: -10, 80 | }, 81 | contentContainer: { 82 | paddingTop: 30, 83 | }, 84 | optionIconContainer: { 85 | marginRight: 12, 86 | }, 87 | option: { 88 | backgroundColor: '#fdfdfd', 89 | paddingHorizontal: 15, 90 | paddingVertical: 15, 91 | borderWidth: StyleSheet.hairlineWidth, 92 | borderBottomWidth: 0, 93 | borderColor: '#ededed', 94 | }, 95 | lastOption: { 96 | borderBottomWidth: StyleSheet.hairlineWidth, 97 | }, 98 | optionText: { 99 | fontSize: 15, 100 | alignSelf: 'flex-start', 101 | marginTop: 1, 102 | }, 103 | }); 104 | -------------------------------------------------------------------------------- /mobile_apps/image_classification/screens/LinksScreen.js: -------------------------------------------------------------------------------- 1 | import { Ionicons } from '@expo/vector-icons'; 2 | import * as WebBrowser from 'expo-web-browser'; 3 | import * as React from 'react'; 4 | import { Image, StyleSheet, Text, View } from 'react-native'; 5 | import { RectButton, ScrollView } from 'react-native-gesture-handler'; 6 | 7 | export default function LinksScreen() { 8 | return ( 9 | 10 | 11 | 12 | 20 | 21 | 22 | WebBrowser.openBrowserAsync('https://www.tensorflow.org/js')} 26 | /> 27 | 28 | WebBrowser.openBrowserAsync('https://github.com/tensorflow/tfjs-models/tree/master/mobilenet')} 32 | /> 33 | 34 | WebBrowser.openBrowserAsync('https://docs.expo.io')} 38 | /> 39 | 40 | WebBrowser.openBrowserAsync('https://reactnative.dev/docs/getting-started')} 44 | /> 45 | 46 | ); 47 | } 48 | 49 | function OptionButton({ icon, label, onPress, isLastOption }) { 50 | return ( 51 | 52 | 53 | 54 | 55 | 56 | 57 | {label} 58 | 59 | 60 | 61 | ); 62 | } 63 | 64 | const styles = StyleSheet.create({ 65 | container: { 66 | flex: 1, 67 | backgroundColor: '#fafafa', 68 | }, 69 | welcomeContainer: { 70 | alignItems: 'center', 71 | marginTop: 10, 72 | marginBottom: 20, 73 | }, 74 | welcomeImage: { 75 | width: 100, 76 | height: 80, 77 | resizeMode: 'contain', 78 | marginTop: 3, 79 | marginLeft: -10, 80 | }, 81 | contentContainer: { 82 | paddingTop: 30, 83 | }, 84 | optionIconContainer: { 85 | marginRight: 12, 86 | }, 87 | option: { 88 | backgroundColor: '#fdfdfd', 89 | paddingHorizontal: 15, 90 | paddingVertical: 15, 91 | borderWidth: StyleSheet.hairlineWidth, 92 | borderBottomWidth: 0, 93 | borderColor: '#ededed', 94 | }, 95 | lastOption: { 96 | borderBottomWidth: StyleSheet.hairlineWidth, 97 | }, 98 | optionText: { 99 | fontSize: 15, 100 | alignSelf: 'flex-start', 101 | marginTop: 1, 102 | }, 103 | }); 104 | -------------------------------------------------------------------------------- /mobile_apps/expo_cli_docker/README.md: -------------------------------------------------------------------------------- 1 | # Expo Cli for Docker 2 | 3 | This repository is for building a new Expo Cli docker image. It is based on the following great work: 4 | * C.V. Putten, https://github.com/byCedric/expo-cli-images 5 | * H. Majid, https://medium.com/@hmajid2301/running-expo-react-native-in-docker-ff9c4f2a4388 6 | 7 | The image to be built shall contain all necessary libraries to perform all commands of the Expo CLI. 8 | 9 | ## Supported versions 10 | 11 | image | node | expo | status 12 | --- | --- | --- | --- 13 | `zhangyuefeng123/expo-cli:3` | `13` | `3` | **latest** 14 | 15 | ## What's inside the Dokerfile? 16 | 17 | The entry point of this image forwards to the [Expo CLI][link-cli]. 18 | It automatically authenticates when both `EXPO_CLI_USERNAME` and `EXPO_CLI_PASSWORD` are defined. 19 | When these environment variables are undefined, it skips this step and forwards the command directly to Expo. 20 | 21 | As Majid pointed out, the REACT_NATIVE_PACKAGER_HOSTNAME environment variable in Dockerfile is very important because it sets which IP address Expo (cli) is running on, this is the IP Address your phone will try to connect to. If this is not set correctly, you’ll run into an error for the following reason. When you run in a Docker container you cannot connect to it because it’s trying to use the Docker IP address (one of the ones starting with 172.xxx.xxx.xxx). You can work out the correct IP address on Mac by using the ifconfig command. The IP address with en0 should be the host IP (10.0.1.198 on my Mac laptop). 22 | 23 | ## How to build a new image? 24 | 25 | You can build a custom version of both node and expo-cli as below. 26 | It accepts both `NODE_VERSION` and `EXPO_VERSION` as build arguments. 27 | 28 | ```bash 29 | # create a node 10 and expo cli 2 image 30 | $ docker build . \ 31 | --build-arg NODE_VERSION=13 \ 32 | --build-arg EXPO_VERSION=3 \ 33 | --tag awsomeorg/expo-cli 34 | ``` 35 | 36 | ## How to run the new image? 37 | 38 | In order to run the new image in a container, you should go to your Expo project directory first and then run the following command: 39 | 40 | ```bash 41 | # perform authenticated expo commands directly 42 | $ docker run \ 43 | --tty \ 44 | --env EXPO_CLI_USERNAME=byyuefeng \ 45 | --env EXPO_CLI_PASSWORD=mypass \ 46 | awsomeorg/expo-cli publish 47 | 48 | # mount your project as volume and use the cli manually 49 | $ docker run \ 50 | --tty \ 51 | --interactive \ 52 | --workdir /code \ 53 | --volume $PWD:/code \ 54 | --env EXPO_CLI_USERNAME=byyuefeng \ 55 | --env EXPO_CLI_PASSWORD=mypass \ 56 | -p 19000:19000 -p 19001:19001 \ 57 | awsomeorg/expo-cli bash 58 | ``` 59 | 60 | ## How to start Expo project? 61 | 62 | Since the docker container workdir is mapped to the current directory (your Expo project location) on the host machine, you can start the Expo project as usual: 63 | 64 | ```bash 65 | $ expo start 66 | ``` 67 | 68 | ## License 69 | 70 | The MIT License (MIT). Please see [License File](LICENSE.md) for more information. 71 | 72 | --- --- 73 | 74 |

75 | with :heart: byCedric 76 |

77 | 78 | [link-cli]: https://docs.expo.io/versions/latest/workflow/expo-cli 79 | [link-docs]: https://docs.expo.io/versions/latest/guides/setting-up-continuous-integration 80 | -------------------------------------------------------------------------------- /mobile_apps/mobile_bert/screens/LinksScreen.js: -------------------------------------------------------------------------------- 1 | import { Ionicons } from '@expo/vector-icons'; 2 | import * as WebBrowser from 'expo-web-browser'; 3 | import * as React from 'react'; 4 | import { Image, StyleSheet, Text, View } from 'react-native'; 5 | import { RectButton, ScrollView } from 'react-native-gesture-handler'; 6 | 7 | export default function LinksScreen() { 8 | return ( 9 | 10 | 11 | 12 | 20 | 21 | 22 | WebBrowser.openBrowserAsync('https://www.tensorflow.org/js')} 26 | /> 27 | 28 | WebBrowser.openBrowserAsync('https://arxiv.org/abs/1810.04805')} 32 | /> 33 | 34 | WebBrowser.openBrowserAsync('https://arxiv.org/abs/2004.02984')} 38 | /> 39 | 40 | WebBrowser.openBrowserAsync('https://github.com/tensorflow/tfjs-models/tree/master/qna')} 44 | /> 45 | 46 | WebBrowser.openBrowserAsync('https://docs.expo.io')} 50 | /> 51 | 52 | WebBrowser.openBrowserAsync('https://reactnative.dev/docs/getting-started')} 56 | /> 57 | 58 | ); 59 | } 60 | 61 | function OptionButton({ icon, label, onPress, isLastOption }) { 62 | return ( 63 | 64 | 65 | 66 | 67 | 68 | 69 | {label} 70 | 71 | 72 | 73 | ); 74 | } 75 | 76 | const styles = StyleSheet.create({ 77 | container: { 78 | flex: 1, 79 | backgroundColor: '#fafafa', 80 | }, 81 | welcomeContainer: { 82 | alignItems: 'center', 83 | marginTop: 10, 84 | marginBottom: 20, 85 | }, 86 | welcomeImage: { 87 | width: 100, 88 | height: 80, 89 | resizeMode: 'contain', 90 | marginTop: 3, 91 | marginLeft: -10, 92 | }, 93 | contentContainer: { 94 | paddingTop: 30, 95 | }, 96 | optionIconContainer: { 97 | marginRight: 12, 98 | }, 99 | option: { 100 | backgroundColor: '#fdfdfd', 101 | paddingHorizontal: 15, 102 | paddingVertical: 15, 103 | borderWidth: StyleSheet.hairlineWidth, 104 | borderBottomWidth: 0, 105 | borderColor: '#ededed', 106 | }, 107 | lastOption: { 108 | borderBottomWidth: StyleSheet.hairlineWidth, 109 | }, 110 | optionText: { 111 | fontSize: 15, 112 | alignSelf: 'flex-start', 113 | marginTop: 1, 114 | }, 115 | }); 116 | -------------------------------------------------------------------------------- /mobile_apps/image_classification/screens/HomeScreen.js: -------------------------------------------------------------------------------- 1 | import * as WebBrowser from 'expo-web-browser'; 2 | import * as React from 'react'; 3 | import { Image, Platform, StyleSheet, Text, TouchableOpacity, View } from 'react-native'; 4 | import { ScrollView } from 'react-native-gesture-handler'; 5 | 6 | import { MonoText } from '../components/StyledText'; 7 | 8 | export default function HomeScreen() { 9 | return ( 10 | 11 | 12 | 13 | 21 | 22 | 23 | 24 | 25 | This mobile application is to demonstrate how to use TensorFlow.js and a pre-trained 26 | deep convolutional neural network model MobileNet to classify images on mobile devices. 27 | It is developed in Expo and React-Native, and thus should work on both iOS and 28 | Android mobile devices. 29 | 30 | 31 | 32 | Click the image classification tab to start the demo. 33 | 34 | 35 | 36 | Click the references tab for more information. 37 | 38 | 39 | 40 | 41 | 42 | ); 43 | } 44 | 45 | HomeScreen.navigationOptions = { 46 | header: null, 47 | }; 48 | 49 | const styles = StyleSheet.create({ 50 | container: { 51 | flex: 1, 52 | backgroundColor: '#fff', 53 | }, 54 | developmentModeText: { 55 | marginBottom: 20, 56 | color: 'rgba(0,0,0,0.4)', 57 | fontSize: 14, 58 | lineHeight: 19, 59 | textAlign: 'center', 60 | }, 61 | contentContainer: { 62 | paddingTop: 30, 63 | }, 64 | welcomeContainer: { 65 | alignItems: 'center', 66 | marginTop: 10, 67 | marginBottom: 20, 68 | }, 69 | welcomeImage: { 70 | width: 100, 71 | height: 80, 72 | resizeMode: 'contain', 73 | marginTop: 3, 74 | marginLeft: -10, 75 | }, 76 | getStartedContainer: { 77 | alignItems: 'center', 78 | marginHorizontal: 50, 79 | }, 80 | homeScreenFilename: { 81 | marginVertical: 7, 82 | }, 83 | codeHighlightText: { 84 | color: 'rgba(96,100,109, 0.8)', 85 | }, 86 | codeHighlightContainer: { 87 | backgroundColor: 'rgba(0,0,0,0.05)', 88 | borderRadius: 3, 89 | paddingHorizontal: 4, 90 | }, 91 | getStartedText: { 92 | fontSize: 17, 93 | color: 'rgba(96,100,109, 1)', 94 | lineHeight: 24, 95 | textAlign: 'center', 96 | }, 97 | tabBarInfoContainer: { 98 | position: 'absolute', 99 | bottom: 0, 100 | left: 0, 101 | right: 0, 102 | ...Platform.select({ 103 | ios: { 104 | shadowColor: 'black', 105 | shadowOffset: { width: 0, height: -3 }, 106 | shadowOpacity: 0.1, 107 | shadowRadius: 3, 108 | }, 109 | android: { 110 | elevation: 20, 111 | }, 112 | }), 113 | alignItems: 'center', 114 | backgroundColor: '#fbfbfb', 115 | paddingVertical: 20, 116 | }, 117 | tabBarInfoText: { 118 | fontSize: 17, 119 | color: 'rgba(96,100,109, 1)', 120 | textAlign: 'center', 121 | }, 122 | navigationFilename: { 123 | marginTop: 5, 124 | }, 125 | helpContainer: { 126 | marginTop: 15, 127 | alignItems: 'center', 128 | }, 129 | helpLink: { 130 | paddingVertical: 15, 131 | }, 132 | helpLinkText: { 133 | fontSize: 14, 134 | color: '#2e78b7', 135 | }, 136 | }); 137 | -------------------------------------------------------------------------------- /mobile_apps/objects_detection/screens/HomeScreen.js: -------------------------------------------------------------------------------- 1 | import * as WebBrowser from 'expo-web-browser'; 2 | import * as React from 'react'; 3 | import { Image, Platform, StyleSheet, Text, TouchableOpacity, View } from 'react-native'; 4 | import { ScrollView } from 'react-native-gesture-handler'; 5 | 6 | import { MonoText } from '../components/StyledText'; 7 | 8 | export default function HomeScreen() { 9 | return ( 10 | 11 | 12 | 13 | 21 | 22 | 23 | 24 | 25 | This mobile application is to demonstrate how to use TensorFlow.js and a pre-trained 26 | deep convolutional neural network model COCO-SSD to detect objects from an image on mobile devices. 27 | It is developed in Expo and React-Native, and thus should work on both iOS and 28 | Android mobile devices. 29 | 30 | 31 | 32 | Click the COCO-SSD tab to start the demo. 33 | 34 | 35 | 36 | Click the references tab for more information. 37 | 38 | 39 | 40 | 41 | 42 | ); 43 | } 44 | 45 | HomeScreen.navigationOptions = { 46 | header: null, 47 | }; 48 | 49 | const styles = StyleSheet.create({ 50 | container: { 51 | flex: 1, 52 | backgroundColor: '#fff', 53 | }, 54 | developmentModeText: { 55 | marginBottom: 20, 56 | color: 'rgba(0,0,0,0.4)', 57 | fontSize: 14, 58 | lineHeight: 19, 59 | textAlign: 'center', 60 | }, 61 | contentContainer: { 62 | paddingTop: 30, 63 | }, 64 | welcomeContainer: { 65 | alignItems: 'center', 66 | marginTop: 10, 67 | marginBottom: 20, 68 | }, 69 | welcomeImage: { 70 | width: 100, 71 | height: 80, 72 | resizeMode: 'contain', 73 | marginTop: 3, 74 | marginLeft: -10, 75 | }, 76 | getStartedContainer: { 77 | alignItems: 'center', 78 | marginHorizontal: 50, 79 | }, 80 | homeScreenFilename: { 81 | marginVertical: 7, 82 | }, 83 | codeHighlightText: { 84 | color: 'rgba(96,100,109, 0.8)', 85 | }, 86 | codeHighlightContainer: { 87 | backgroundColor: 'rgba(0,0,0,0.05)', 88 | borderRadius: 3, 89 | paddingHorizontal: 4, 90 | }, 91 | getStartedText: { 92 | fontSize: 17, 93 | color: 'rgba(96,100,109, 1)', 94 | lineHeight: 24, 95 | textAlign: 'center', 96 | }, 97 | tabBarInfoContainer: { 98 | position: 'absolute', 99 | bottom: 0, 100 | left: 0, 101 | right: 0, 102 | ...Platform.select({ 103 | ios: { 104 | shadowColor: 'black', 105 | shadowOffset: { width: 0, height: -3 }, 106 | shadowOpacity: 0.1, 107 | shadowRadius: 3, 108 | }, 109 | android: { 110 | elevation: 20, 111 | }, 112 | }), 113 | alignItems: 'center', 114 | backgroundColor: '#fbfbfb', 115 | paddingVertical: 20, 116 | }, 117 | tabBarInfoText: { 118 | fontSize: 17, 119 | color: 'rgba(96,100,109, 1)', 120 | textAlign: 'center', 121 | }, 122 | navigationFilename: { 123 | marginTop: 5, 124 | }, 125 | helpContainer: { 126 | marginTop: 15, 127 | alignItems: 'center', 128 | }, 129 | helpLink: { 130 | paddingVertical: 15, 131 | }, 132 | helpLinkText: { 133 | fontSize: 14, 134 | color: '#2e78b7', 135 | }, 136 | }); 137 | -------------------------------------------------------------------------------- /mobile_apps/mobile_bert/screens/HomeScreen.js: -------------------------------------------------------------------------------- 1 | import * as WebBrowser from 'expo-web-browser'; 2 | import * as React from 'react'; 3 | import { Image, Platform, StyleSheet, Text, TouchableOpacity, View } from 'react-native'; 4 | import { ScrollView } from 'react-native-gesture-handler'; 5 | 6 | import { MonoText } from '../components/StyledText'; 7 | 8 | export default function HomeScreen() { 9 | return ( 10 | 11 | 12 | 13 | 21 | 22 | 23 | 24 | 25 | This mobile application is to demonstrate how to use TensorFlow.js and a pre-trained 26 | deep natural language processing model MobileBERT to answer question on a given related passage 27 | on mobile devices. It is developed in Expo and React-Native, and thus should work on both iOS and 28 | Android mobile devices. 29 | 30 | 31 | 32 | Click the Qna tab to start the demo. 33 | 34 | 35 | 36 | Click the references tab for more information. 37 | 38 | 39 | 40 | 41 | 42 | ); 43 | } 44 | 45 | HomeScreen.navigationOptions = { 46 | header: null, 47 | }; 48 | 49 | const styles = StyleSheet.create({ 50 | container: { 51 | flex: 1, 52 | backgroundColor: '#fff', 53 | }, 54 | developmentModeText: { 55 | marginBottom: 20, 56 | color: 'rgba(0,0,0,0.4)', 57 | fontSize: 14, 58 | lineHeight: 19, 59 | textAlign: 'center', 60 | }, 61 | contentContainer: { 62 | paddingTop: 30, 63 | }, 64 | welcomeContainer: { 65 | alignItems: 'center', 66 | marginTop: 10, 67 | marginBottom: 20, 68 | }, 69 | welcomeImage: { 70 | width: 100, 71 | height: 80, 72 | resizeMode: 'contain', 73 | marginTop: 3, 74 | marginLeft: -10, 75 | }, 76 | getStartedContainer: { 77 | alignItems: 'center', 78 | marginHorizontal: 50, 79 | }, 80 | homeScreenFilename: { 81 | marginVertical: 7, 82 | }, 83 | codeHighlightText: { 84 | color: 'rgba(96,100,109, 0.8)', 85 | }, 86 | codeHighlightContainer: { 87 | backgroundColor: 'rgba(0,0,0,0.05)', 88 | borderRadius: 3, 89 | paddingHorizontal: 4, 90 | }, 91 | getStartedText: { 92 | fontSize: 17, 93 | color: 'rgba(96,100,109, 1)', 94 | lineHeight: 24, 95 | textAlign: 'center', 96 | }, 97 | tabBarInfoContainer: { 98 | position: 'absolute', 99 | bottom: 0, 100 | left: 0, 101 | right: 0, 102 | ...Platform.select({ 103 | ios: { 104 | shadowColor: 'black', 105 | shadowOffset: { width: 0, height: -3 }, 106 | shadowOpacity: 0.1, 107 | shadowRadius: 3, 108 | }, 109 | android: { 110 | elevation: 20, 111 | }, 112 | }), 113 | alignItems: 'center', 114 | backgroundColor: '#fbfbfb', 115 | paddingVertical: 20, 116 | }, 117 | tabBarInfoText: { 118 | fontSize: 17, 119 | color: 'rgba(96,100,109, 1)', 120 | textAlign: 'center', 121 | }, 122 | navigationFilename: { 123 | marginTop: 5, 124 | }, 125 | helpContainer: { 126 | marginTop: 15, 127 | alignItems: 'center', 128 | }, 129 | helpLink: { 130 | paddingVertical: 15, 131 | }, 132 | helpLinkText: { 133 | fontSize: 14, 134 | color: '#2e78b7', 135 | }, 136 | }); 137 | -------------------------------------------------------------------------------- /deep_learning/Multi_Layer_Perceptron/breast-cancer-wisconsin/breast-cancer-wisconsin.names.txt: -------------------------------------------------------------------------------- 1 | Citation Request: 2 | This breast cancer databases was obtained from the University of Wisconsin 3 | Hospitals, Madison from Dr. William H. Wolberg. If you publish results 4 | when using this database, then please include this information in your 5 | acknowledgements. Also, please cite one or more of: 6 | 7 | 1. O. L. Mangasarian and W. H. Wolberg: "Cancer diagnosis via linear 8 | programming", SIAM News, Volume 23, Number 5, September 1990, pp 1 & 18. 9 | 10 | 2. William H. Wolberg and O.L. Mangasarian: "Multisurface method of 11 | pattern separation for medical diagnosis applied to breast cytology", 12 | Proceedings of the National Academy of Sciences, U.S.A., Volume 87, 13 | December 1990, pp 9193-9196. 14 | 15 | 3. O. L. Mangasarian, R. Setiono, and W.H. Wolberg: "Pattern recognition 16 | via linear programming: Theory and application to medical diagnosis", 17 | in: "Large-scale numerical optimization", Thomas F. Coleman and Yuying 18 | Li, editors, SIAM Publications, Philadelphia 1990, pp 22-30. 19 | 20 | 4. K. P. Bennett & O. L. Mangasarian: "Robust linear programming 21 | discrimination of two linearly inseparable sets", Optimization Methods 22 | and Software 1, 1992, 23-34 (Gordon & Breach Science Publishers). 23 | 24 | 1. Title: Wisconsin Breast Cancer Database (January 8, 1991) 25 | 26 | 2. Sources: 27 | -- Dr. WIlliam H. Wolberg (physician) 28 | University of Wisconsin Hospitals 29 | Madison, Wisconsin 30 | USA 31 | -- Donor: Olvi Mangasarian (mangasarian@cs.wisc.edu) 32 | Received by David W. Aha (aha@cs.jhu.edu) 33 | -- Date: 15 July 1992 34 | 35 | 3. Past Usage: 36 | 37 | Attributes 2 through 10 have been used to represent instances. 38 | Each instance has one of 2 possible classes: benign or malignant. 39 | 40 | 1. Wolberg,~W.~H., \& Mangasarian,~O.~L. (1990). Multisurface method of 41 | pattern separation for medical diagnosis applied to breast cytology. In 42 | {\it Proceedings of the National Academy of Sciences}, {\it 87}, 43 | 9193--9196. 44 | -- Size of data set: only 369 instances (at that point in time) 45 | -- Collected classification results: 1 trial only 46 | -- Two pairs of parallel hyperplanes were found to be consistent with 47 | 50% of the data 48 | -- Accuracy on remaining 50% of dataset: 93.5% 49 | -- Three pairs of parallel hyperplanes were found to be consistent with 50 | 67% of data 51 | -- Accuracy on remaining 33% of dataset: 95.9% 52 | 53 | 2. Zhang,~J. (1992). Selecting typical instances in instance-based 54 | learning. In {\it Proceedings of the Ninth International Machine 55 | Learning Conference} (pp. 470--479). Aberdeen, Scotland: Morgan 56 | Kaufmann. 57 | -- Size of data set: only 369 instances (at that point in time) 58 | -- Applied 4 instance-based learning algorithms 59 | -- Collected classification results averaged over 10 trials 60 | -- Best accuracy result: 61 | -- 1-nearest neighbor: 93.7% 62 | -- trained on 200 instances, tested on the other 169 63 | -- Also of interest: 64 | -- Using only typical instances: 92.2% (storing only 23.1 instances) 65 | -- trained on 200 instances, tested on the other 169 66 | 67 | 4. Relevant Information: 68 | 69 | Samples arrive periodically as Dr. Wolberg reports his clinical cases. 70 | The database therefore reflects this chronological grouping of the data. 71 | This grouping information appears immediately below, having been removed 72 | from the data itself: 73 | 74 | Group 1: 367 instances (January 1989) 75 | Group 2: 70 instances (October 1989) 76 | Group 3: 31 instances (February 1990) 77 | Group 4: 17 instances (April 1990) 78 | Group 5: 48 instances (August 1990) 79 | Group 6: 49 instances (Updated January 1991) 80 | Group 7: 31 instances (June 1991) 81 | Group 8: 86 instances (November 1991) 82 | ----------------------------------------- 83 | Total: 699 points (as of the donated datbase on 15 July 1992) 84 | 85 | Note that the results summarized above in Past Usage refer to a dataset 86 | of size 369, while Group 1 has only 367 instances. This is because it 87 | originally contained 369 instances; 2 were removed. The following 88 | statements summarizes changes to the original Group 1's set of data: 89 | 90 | ##### Group 1 : 367 points: 200B 167M (January 1989) 91 | ##### Revised Jan 10, 1991: Replaced zero bare nuclei in 1080185 & 1187805 92 | ##### Revised Nov 22,1991: Removed 765878,4,5,9,7,10,10,10,3,8,1 no record 93 | ##### : Removed 484201,2,7,8,8,4,3,10,3,4,1 zero epithelial 94 | ##### : Changed 0 to 1 in field 6 of sample 1219406 95 | ##### : Changed 0 to 1 in field 8 of following sample: 96 | ##### : 1182404,2,3,1,1,1,2,0,1,1,1 97 | 98 | 5. Number of Instances: 699 (as of 15 July 1992) 99 | 100 | 6. Number of Attributes: 10 plus the class attribute 101 | 102 | 7. Attribute Information: (class attribute has been moved to last column) 103 | 104 | # Attribute Domain 105 | -- ----------------------------------------- 106 | 1. Sample code number id number 107 | 2. Clump Thickness 1 - 10 108 | 3. Uniformity of Cell Size 1 - 10 109 | 4. Uniformity of Cell Shape 1 - 10 110 | 5. Marginal Adhesion 1 - 10 111 | 6. Single Epithelial Cell Size 1 - 10 112 | 7. Bare Nuclei 1 - 10 113 | 8. Bland Chromatin 1 - 10 114 | 9. Normal Nucleoli 1 - 10 115 | 10. Mitoses 1 - 10 116 | 11. Class: (2 for benign, 4 for malignant) 117 | 118 | 8. Missing attribute values: 16 119 | 120 | There are 16 instances in Groups 1 to 6 that contain a single missing 121 | (i.e., unavailable) attribute value, now denoted by "?". 122 | 123 | 9. Class distribution: 124 | 125 | Benign: 458 (65.5%) 126 | Malignant: 241 (34.5%) 127 | -------------------------------------------------------------------------------- /mobile_apps/image_classification/screens/ImageClassificationScreen.js: -------------------------------------------------------------------------------- 1 | 2 | import React, { Component } from 'react'; 3 | import { StyleSheet, Text, View, StatusBar, ActivityIndicator, TouchableOpacity, Image } from 'react-native' 4 | import { ScrollView } from 'react-native-gesture-handler'; 5 | 6 | import * as tf from '@tensorflow/tfjs'; 7 | import '@tensorflow/tfjs-react-native'; 8 | 9 | import * as mobilenet from '@tensorflow-models/mobilenet' 10 | 11 | import Constants from 'expo-constants' 12 | import * as Permissions from 'expo-permissions' 13 | import * as jpeg from 'jpeg-js' 14 | import * as ImagePicker from 'expo-image-picker' 15 | 16 | import { fetch } from '@tensorflow/tfjs-react-native' 17 | 18 | export default class ImageClassificationScreen extends React.Component { 19 | constructor(props) { 20 | super(props); 21 | this.state = { 22 | isTfReady: false, 23 | isModelReady: false, 24 | predictions: null, 25 | image: null 26 | }; 27 | } 28 | 29 | async componentDidMount() { 30 | await tf.ready(); // preparing TensorFlow 31 | this.setState({ isTfReady: true,}); 32 | 33 | this.model = await mobilenet.load(); // preparing MobileNet model 34 | this.setState({ isModelReady: true }); 35 | 36 | this.getPermissionAsync(); // get permission for accessing camera on mobile device 37 | } 38 | 39 | getPermissionAsync = async () => { 40 | if (Constants.platform.ios) { 41 | const { status } = await Permissions.askAsync(Permissions.CAMERA_ROLL) 42 | if (status !== 'granted') { 43 | alert('Please grant camera roll permission for this project!') 44 | } 45 | } 46 | } 47 | 48 | imageToTensor(rawImageData) { 49 | const TO_UINT8ARRAY = true 50 | const { width, height, data } = jpeg.decode(rawImageData, TO_UINT8ARRAY) 51 | // Drop the alpha channel info for mobilenet 52 | const buffer = new Uint8Array(width * height * 3) 53 | let offset = 0 // offset into original data 54 | for (let i = 0; i < buffer.length; i += 3) { 55 | buffer[i] = data[offset] 56 | buffer[i + 1] = data[offset + 1] 57 | buffer[i + 2] = data[offset + 2] 58 | 59 | offset += 4 60 | } 61 | 62 | return tf.tensor3d(buffer, [height, width, 3]) 63 | } 64 | 65 | classifyImage = async () => { 66 | try { 67 | const imageAssetPath = Image.resolveAssetSource(this.state.image) 68 | const response = await fetch(imageAssetPath.uri, {}, { isBinary: true }) 69 | const rawImageData = await response.arrayBuffer() 70 | const imageTensor = this.imageToTensor(rawImageData) 71 | const predictions = await this.model.classify(imageTensor) 72 | this.setState({ predictions: predictions }) 73 | } catch (error) { 74 | console.log('Exception Error: ', error) 75 | } 76 | } 77 | 78 | selectImage = async () => { 79 | try { 80 | let response = await ImagePicker.launchImageLibraryAsync({ 81 | mediaTypes: ImagePicker.MediaTypeOptions.All, 82 | allowsEditing: true, 83 | aspect: [4, 3] 84 | }) 85 | 86 | if (!response.cancelled) { 87 | const source = { uri: response.uri } 88 | this.setState({ image: source }) 89 | this.classifyImage() 90 | } 91 | } catch (error) { 92 | console.log(error) 93 | } 94 | } 95 | 96 | renderPrediction = (prediction) => { 97 | return ( 98 | 99 | 100 | Prediction: {prediction.className} {', '} Probability: {prediction.probability} 101 | 102 | 103 | ) 104 | } 105 | 106 | render() { 107 | const { isTfReady, isModelReady, predictions, image } = this.state 108 | 109 | return ( 110 | 111 | 112 | 113 | 114 | 122 | 123 | 124 | 125 | 126 | TensorFlow.js ready? {isTfReady ? : ''} 127 | 128 | 129 | 130 | MobileNet model ready? 131 | {isModelReady ? ( 132 | 133 | ) : ( 134 | 135 | )} 136 | 137 | 138 | 141 | {image && } 142 | 143 | {isModelReady && !image && ( 144 | Tap to choose image 145 | )} 146 | 147 | 148 | {isModelReady && image && ( 149 | 150 | Predictions: {predictions ? '' : 'Predicting...'} 151 | 152 | )} 153 | {isModelReady && 154 | predictions && 155 | predictions.map(p => this.renderPrediction(p))} 156 | 157 | 158 | 159 | 160 | ) 161 | } 162 | } 163 | 164 | ImageClassificationScreen.navigationOptions = { 165 | header: null, 166 | }; 167 | 168 | const styles = StyleSheet.create({ 169 | container: { 170 | flex: 1, 171 | backgroundColor: '#171f24' 172 | }, 173 | welcomeContainer: { 174 | alignItems: 'center', 175 | marginTop: 10, 176 | marginBottom: 20, 177 | }, 178 | welcomeImage: { 179 | width: 100, 180 | height: 80, 181 | resizeMode: 'contain', 182 | marginTop: 3, 183 | marginLeft: -10, 184 | }, 185 | contentContainer: { 186 | paddingTop: 30, 187 | }, 188 | loadingContainer: { 189 | marginTop: 80, 190 | justifyContent: 'center' 191 | }, 192 | text: { 193 | color: '#ffffff', 194 | fontSize: 16 195 | }, 196 | loadingModelContainer: { 197 | flexDirection: 'row', 198 | marginTop: 10 199 | }, 200 | imageWrapper: { 201 | width: 280, 202 | height: 280, 203 | padding: 10, 204 | borderColor: '#cf667f', 205 | borderWidth: 5, 206 | borderStyle: 'dashed', 207 | marginTop: 40, 208 | marginBottom: 10, 209 | position: 'relative', 210 | justifyContent: 'center', 211 | alignItems: 'center' 212 | }, 213 | imageContainer: { 214 | width: 250, 215 | height: 250, 216 | position: 'absolute', 217 | top: 10, 218 | left: 10, 219 | bottom: 10, 220 | right: 10 221 | }, 222 | predictionWrapper: { 223 | height: 100, 224 | width: '100%', 225 | flexDirection: 'column', 226 | alignItems: 'center' 227 | }, 228 | transparentText: { 229 | color: '#ffffff', 230 | opacity: 0.7 231 | }, 232 | footer: { 233 | marginTop: 40 234 | }, 235 | poweredBy: { 236 | fontSize: 20, 237 | color: '#e69e34', 238 | marginBottom: 6 239 | }, 240 | tfLogo: { 241 | width: 125, 242 | height: 70 243 | } 244 | }) -------------------------------------------------------------------------------- /mobile_apps/objects_detection/screens/CocoSsdScreen.js: -------------------------------------------------------------------------------- 1 | 2 | import React, { Component } from 'react'; 3 | import { StyleSheet, Text, View, StatusBar, ActivityIndicator, TouchableOpacity, Image } from 'react-native' 4 | import { ScrollView } from 'react-native-gesture-handler'; 5 | 6 | import * as tf from '@tensorflow/tfjs'; 7 | import '@tensorflow/tfjs-react-native'; 8 | 9 | // import * as mobilenet from '@tensorflow-models/mobilenet' 10 | // see https://github.com/tensorflow/tfjs-models 11 | import * as cocossd from '@tensorflow-models/coco-ssd' 12 | 13 | import Constants from 'expo-constants' 14 | import * as Permissions from 'expo-permissions' 15 | import * as jpeg from 'jpeg-js' 16 | import * as ImagePicker from 'expo-image-picker' 17 | 18 | import { fetch } from '@tensorflow/tfjs-react-native' 19 | 20 | export default class CocoSsdScreen extends React.Component { 21 | constructor(props) { 22 | super(props); 23 | this.state = { 24 | isTfReady: false, 25 | isModelReady: false, 26 | predictions: null, 27 | image: null 28 | }; 29 | } 30 | 31 | async componentDidMount() { 32 | await tf.ready(); // preparing TensorFlow 33 | this.setState({ isTfReady: true,}); 34 | 35 | // this.model = await mobilenet.load(); // preparing MobileNet model 36 | this.model = await cocossd.load(); // preparing COCO-SSD model 37 | this.setState({ isModelReady: true }); 38 | 39 | this.getPermissionAsync(); // get permission for accessing camera on mobile device 40 | } 41 | 42 | getPermissionAsync = async () => { 43 | if (Constants.platform.ios) { 44 | const { status } = await Permissions.askAsync(Permissions.CAMERA_ROLL) 45 | if (status !== 'granted') { 46 | alert('Please grant camera roll permission for this project!') 47 | } 48 | } 49 | } 50 | 51 | imageToTensor(rawImageData) { 52 | const TO_UINT8ARRAY = true 53 | const { width, height, data } = jpeg.decode(rawImageData, TO_UINT8ARRAY) 54 | // Drop the alpha channel info for mobilenet 55 | const buffer = new Uint8Array(width * height * 3) 56 | let offset = 0 // offset into original data 57 | for (let i = 0; i < buffer.length; i += 3) { 58 | buffer[i] = data[offset] 59 | buffer[i + 1] = data[offset + 1] 60 | buffer[i + 2] = data[offset + 2] 61 | 62 | offset += 4 63 | } 64 | 65 | return tf.tensor3d(buffer, [height, width, 3]) 66 | } 67 | 68 | detectObjects = async () => { 69 | try { 70 | const imageAssetPath = Image.resolveAssetSource(this.state.image) 71 | 72 | const response = await fetch(imageAssetPath.uri, {}, { isBinary: true }) 73 | const rawImageData = await response.arrayBuffer() 74 | const imageTensor = this.imageToTensor(rawImageData) 75 | const predictions = await this.model.detect(imageTensor) 76 | 77 | this.setState({ predictions: predictions }) 78 | // this.setState({ image_uri: imageAssetPath.uri }) 79 | 80 | console.log('----------- predictions: ', predictions); 81 | 82 | } catch (error) { 83 | console.log('Exception Error: ', error) 84 | } 85 | } 86 | 87 | selectImage = async () => { 88 | try { 89 | let response = await ImagePicker.launchImageLibraryAsync({ 90 | mediaTypes: ImagePicker.MediaTypeOptions.All, 91 | allowsEditing: true, 92 | aspect: [4, 3] 93 | }) 94 | 95 | if (!response.cancelled) { 96 | const source = { uri: response.uri } 97 | this.setState({ image: source }) 98 | this.detectObjects() 99 | } 100 | } catch (error) { 101 | console.log(error) 102 | } 103 | } 104 | 105 | /* 106 | [{ 107 | bbox: [x, y, width, height], 108 | class: "person", 109 | score: 0.8380282521247864 110 | }, { 111 | bbox: [x, y, width, height], 112 | class: "kite", 113 | score: 0.74644153267145157 114 | }] 115 | */ 116 | renderPrediction = (prediction, index) => { 117 | const pclass = prediction.class; 118 | const score = prediction.score; 119 | const x = prediction.bbox[0]; 120 | const y = prediction.bbox[1]; 121 | const w = prediction.bbox[2]; 122 | const h = prediction.bbox[3]; 123 | 124 | return ( 125 | 126 | 127 | Prediction: {pclass} {', '} Probability: {score} {', '} Bbox: {x} {', '} {y} {', '} {w} {', '} {h} 128 | 129 | 130 | ) 131 | } 132 | 133 | render() { 134 | const { isTfReady, isModelReady, predictions, image } = this.state 135 | 136 | return ( 137 | 138 | 139 | 140 | 141 | 149 | 150 | 151 | 152 | 153 | TensorFlow.js ready? {isTfReady ? : ''} 154 | 155 | 156 | 157 | COCO-SSD model ready? 158 | {isModelReady ? ( 159 | 160 | ) : ( 161 | 162 | )} 163 | 164 | 165 | 168 | {image && } 169 | 170 | {isModelReady && !image && ( 171 | Tap to choose image 172 | )} 173 | 174 | 175 | {isModelReady && image && ( 176 | 177 | Predictions: {predictions ? '' : 'Detecting...'} 178 | 179 | )} 180 | 181 | {isModelReady && 182 | predictions && 183 | predictions.map((p, index) => this.renderPrediction(p, index))} 184 | 185 | 186 | 187 | 188 | ) 189 | } 190 | } 191 | 192 | CocoSsdScreen.navigationOptions = { 193 | header: null, 194 | }; 195 | 196 | const styles = StyleSheet.create({ 197 | container: { 198 | flex: 1, 199 | backgroundColor: '#171f24' 200 | }, 201 | welcomeContainer: { 202 | alignItems: 'center', 203 | marginTop: 10, 204 | marginBottom: 20, 205 | }, 206 | welcomeImage: { 207 | width: 100, 208 | height: 80, 209 | resizeMode: 'contain', 210 | marginTop: 3, 211 | marginLeft: -10, 212 | }, 213 | contentContainer: { 214 | paddingTop: 30, 215 | }, 216 | loadingContainer: { 217 | marginTop: 80, 218 | justifyContent: 'center' 219 | }, 220 | text: { 221 | color: '#ffffff', 222 | fontSize: 16 223 | }, 224 | loadingModelContainer: { 225 | flexDirection: 'row', 226 | marginTop: 10 227 | }, 228 | imageWrapper: { 229 | width: 280, 230 | height: 280, 231 | padding: 10, 232 | borderColor: '#cf667f', 233 | borderWidth: 5, 234 | borderStyle: 'dashed', 235 | marginTop: 40, 236 | marginBottom: 10, 237 | position: 'relative', 238 | justifyContent: 'center', 239 | alignItems: 'center' 240 | }, 241 | imageContainer: { 242 | width: 250, 243 | height: 250, 244 | position: 'absolute', 245 | top: 10, 246 | left: 10, 247 | bottom: 10, 248 | right: 10 249 | }, 250 | predictionWrapper: { 251 | height: 100, 252 | width: '100%', 253 | flexDirection: 'column', 254 | alignItems: 'center' 255 | }, 256 | transparentText: { 257 | color: '#ffffff', 258 | opacity: 0.7 259 | }, 260 | footer: { 261 | marginTop: 40 262 | }, 263 | poweredBy: { 264 | fontSize: 20, 265 | color: '#e69e34', 266 | marginBottom: 6 267 | }, 268 | tfLogo: { 269 | width: 125, 270 | height: 70 271 | } 272 | }) -------------------------------------------------------------------------------- /mobile_apps/mobile_bert/screens/QnaScreen.js: -------------------------------------------------------------------------------- 1 | 2 | import React, { Component } from 'react'; 3 | import { StyleSheet, Text, TextInput, Image, Button, View, StatusBar, ActivityIndicator, TouchableOpacity } from 'react-native' 4 | import { ScrollView } from 'react-native-gesture-handler'; 5 | 6 | import * as tf from '@tensorflow/tfjs'; 7 | import '@tensorflow/tfjs-react-native'; 8 | 9 | import * as qna from '@tensorflow-models/qna' 10 | 11 | import Constants from 'expo-constants' 12 | 13 | export default class QnaScreen extends React.Component { 14 | constructor(props) { 15 | super(props); 16 | this.state = { 17 | isTfReady: false, 18 | isModelReady: false, 19 | default_question: "Who is the CEO of Google?", 20 | default_passage: "Google LLC is an American multinational technology company that specializes in Internet-related services and products, which include online advertising technologies, search engine, cloud computing, software, and hardware. It is considered one of the Big Four technology companies, alongside Amazon, Apple, and Facebook. Google was founded in September 1998 by Larry Page and Sergey Brin while they were Ph.D. students at Stanford University in California. Together they own about 14 percent of its shares and control 56 percent of the stockholder voting power through supervoting stock. They incorporated Google as a California privately held company on September 4, 1998, in California. Google was then reincorporated in Delaware on October 22, 2002. An initial public offering (IPO) took place on August 19, 2004, and Google moved to its headquarters in Mountain View, California, nicknamed the Googleplex. In August 2015, Google announced plans to reorganize its various interests as a conglomerate called Alphabet Inc. Google is Alphabet's leading subsidiary and will continue to be the umbrella company for Alphabet's Internet interests. Sundar Pichai was appointed CEO of Google, replacing Larry Page who became the CEO of Alphabet.", 21 | question: ' default question', 22 | passage: ' default pessage', 23 | answers: null, 24 | }; 25 | } 26 | 27 | async componentDidMount() { 28 | await tf.ready(); // preparing TensorFlow 29 | this.setState({ isTfReady: true,}); 30 | 31 | this.model = await qna.load(); // preparing MobileBERT model qna 32 | this.setState({ isModelReady: true }); 33 | } 34 | 35 | findAnswers = async () => { 36 | try { 37 | const question = this.state.default_question; 38 | const passage = this.state.default_passage; 39 | 40 | const answers = await this.model.findAnswers(question, passage); 41 | 42 | console.log('answers: '); 43 | console.log(answers); 44 | 45 | return answers; 46 | 47 | } catch (error) { 48 | console.log('Exception Error: ', error) 49 | } 50 | } 51 | 52 | /* 53 | answers: 54 | 55 | Array [ 56 | Object { 57 | "endIndex": 1206, 58 | "score": 12.2890625, 59 | "startIndex": 1186, 60 | "text": "replacing Larry Page", 61 | }, 62 | Object { 63 | "endIndex": 1206, 64 | "score": 10.87109375, 65 | "startIndex": 1150, 66 | "text": "Pichai was appointed CEO of Google, replacing Larry Page", 67 | }, 68 | Object { 69 | "endIndex": 1206, 70 | "score": 9.658203125, 71 | "startIndex": 1196, 72 | "text": "Larry Page", 73 | }, 74 | Object { 75 | "endIndex": 1156, 76 | "score": 5.2802734375, 77 | "startIndex": 1150, 78 | "text": "Pichai", 79 | }, 80 | ] 81 | */ 82 | renderAnswer = (answer, index) => { 83 | const text = answer.text; 84 | const score = answer.score; 85 | const startIndex = answer.startIndex; 86 | const endIndex = answer.endIndex; 87 | 88 | return ( 89 | 90 | 91 | Answer: {text} {', '} Probability: {score} {', '} start: {startIndex} {', '} end: {endIndex} 92 | 93 | 94 | ) 95 | } 96 | 97 | render() { 98 | const { isTfReady, isModelReady, passage, question, answers } = this.state 99 | 100 | const onPress = () => { 101 | this.findAnswers().then((the_answers) => { 102 | this.setState({ answers: the_answers }); 103 | }) 104 | } 105 | 106 | return ( 107 | 108 | 109 | 110 | 111 | 119 | 120 | 121 | 122 | 123 | TensorFlow.js ready? {isTfReady ? : ''} 124 | 125 | 126 | 127 | MobileBERT model qna ready? 128 | {isModelReady ? ( 129 | 130 | ) : ( 131 | 132 | )} 133 | 134 | 135 | 136 | 137 | 138 | Passage: 139 | 140 | this.setState({passage: text})} 146 | value={this.state.passage} 147 | /> 148 | 149 | 150 | 151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | Question: 159 | 160 | 161 | this.setState({question: text})} 165 | value={this.state.question} 166 | /> 167 | 168 | 169 | 170 | 171 | 172 | 173 | 174 | 175 | 176 | 177 |