├── server
├── .gitignore
├── __pycache__
│ ├── main.cpython-310.pyc
│ └── main.cpython-311.pyc
├── requirements.txt
├── .dockerignore
├── README.Docker.md
├── compose.yaml
├── Dockerfile
└── main.py
├── client
├── src
│ ├── context
│ │ ├── AppContext.jsx
│ │ ├── PermissionsContext.jsx
│ │ ├── FormContext.jsx
│ │ └── ImageContext.jsx
│ ├── constants
│ │ ├── color.js
│ │ └── svg.js
│ ├── assets
│ │ └── svg
│ │ │ ├── camera-icon.svg
│ │ │ ├── gallery-icon.svg
│ │ │ ├── allowgallery.svg
│ │ │ └── allowcamera.svg
│ ├── components
│ │ ├── input
│ │ │ ├── Slider.jsx
│ │ │ ├── Input.jsx
│ │ │ └── Checkbox.jsx
│ │ ├── ProgressStepBar.jsx
│ │ ├── Footer.jsx
│ │ ├── button
│ │ │ ├── SecondaryButton.jsx
│ │ │ └── PrimaryButton.jsx
│ │ └── common
│ │ │ └── BaseScreen.jsx
│ ├── helpers
│ │ └── scale.js
│ ├── navigation
│ │ └── MainNavigator.jsx
│ ├── screens
│ │ ├── DiagnosisScreen.jsx
│ │ ├── Welcome.jsx
│ │ ├── GrantGalleryPermissionScreen.jsx
│ │ ├── GrantCameraPermissionScreen.jsx
│ │ ├── FormScreen.jsx
│ │ └── ScanPhotoScreen.jsx
│ └── hooks
│ │ └── useGlobalStyle.js
├── assets
│ ├── icon.png
│ ├── splash.png
│ ├── favicon.png
│ └── adaptive-icon.png
├── .prettierrc
├── eas.json
├── .gitignore
├── js.config.json
├── babel.config.js
├── metro.config.js
├── App.js
├── app.json
└── package.json
├── model
├── .gitignore
├── __pycache__
│ ├── handcrafted.cpython-311.pyc
│ ├── modelcrafted.cpython-311.pyc
│ └── imgCropBinaryMask.cpython-311.pyc
├── .dockerignore
├── README.Docker.md
├── requirements.txt
├── compose.yaml
├── smote.py
├── Dockerfile
├── modelcrafted.py
├── fusion_stage.py
├── python-compile-scripts
│ └── pycompiler-args.py
├── notebooks
│ ├── test_dataset.ipynb
│ └── handcrafted.ipynb
├── img-crop-binary-mask.py
├── handcrafted.py
└── imgCropBinaryMask.py
├── README.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
└── LICENSE
/server/.gitignore:
--------------------------------------------------------------------------------
1 | *pyc
--------------------------------------------------------------------------------
/client/src/context/AppContext.jsx:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/model/.gitignore:
--------------------------------------------------------------------------------
1 | __pyache__
2 | notebooks
--------------------------------------------------------------------------------
/client/assets/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VukIG/Melanoma-Detector/HEAD/client/assets/icon.png
--------------------------------------------------------------------------------
/client/assets/splash.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VukIG/Melanoma-Detector/HEAD/client/assets/splash.png
--------------------------------------------------------------------------------
/client/.prettierrc:
--------------------------------------------------------------------------------
1 | {
2 | "jsxBracketSameLine": false,
3 | "semi": true,
4 | "singleQuote": true
5 | }
6 |
--------------------------------------------------------------------------------
/client/assets/favicon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VukIG/Melanoma-Detector/HEAD/client/assets/favicon.png
--------------------------------------------------------------------------------
/client/assets/adaptive-icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VukIG/Melanoma-Detector/HEAD/client/assets/adaptive-icon.png
--------------------------------------------------------------------------------
/server/__pycache__/main.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VukIG/Melanoma-Detector/HEAD/server/__pycache__/main.cpython-310.pyc
--------------------------------------------------------------------------------
/server/__pycache__/main.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VukIG/Melanoma-Detector/HEAD/server/__pycache__/main.cpython-311.pyc
--------------------------------------------------------------------------------
/model/__pycache__/handcrafted.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VukIG/Melanoma-Detector/HEAD/model/__pycache__/handcrafted.cpython-311.pyc
--------------------------------------------------------------------------------
/model/__pycache__/modelcrafted.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VukIG/Melanoma-Detector/HEAD/model/__pycache__/modelcrafted.cpython-311.pyc
--------------------------------------------------------------------------------
/server/requirements.txt:
--------------------------------------------------------------------------------
1 | fastapi==0.109.2
2 | numpy==1.26.4
3 | Pillow==10.2.0
4 | pydantic==2.6.1
5 | uvicorn==0.25.0
6 | requests==2.31.0
7 | python-multipart
--------------------------------------------------------------------------------
/model/__pycache__/imgCropBinaryMask.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VukIG/Melanoma-Detector/HEAD/model/__pycache__/imgCropBinaryMask.cpython-311.pyc
--------------------------------------------------------------------------------
/client/src/constants/color.js:
--------------------------------------------------------------------------------
1 | export const colors = {
2 | primary: '#5caafe',
3 | secondary: '#46c3ff',
4 | white: '#F5F5F5',
5 | black: '#000000',
6 | gray: '#AFB4B8',
7 | };
8 |
--------------------------------------------------------------------------------
/client/eas.json:
--------------------------------------------------------------------------------
1 | {
2 | "cli": {
3 | "version": ">= 7.5.0"
4 | },
5 | "build": {
6 | "development": {
7 | "developmentClient": true,
8 | "distribution": "internal"
9 | },
10 | "preview": {
11 | "distribution": "internal"
12 | },
13 | "production": {}
14 | },
15 | "submit": {
16 | "production": {}
17 | }
18 | }
19 |
--------------------------------------------------------------------------------
/client/src/constants/svg.js:
--------------------------------------------------------------------------------
1 | import WelcomeSvg from '@assets/svg/welcome.svg';
2 | import AllowGallery from '@assets/svg/allowgallery.svg';
3 | import AllowCamera from '@assets/svg/allowcamera.svg';
4 | import CameraIcon from '@assets/svg/camera-icon.svg';
5 | import GalleryIcon from '@assets/svg/gallery-icon.svg';
6 |
7 | export { WelcomeSvg, AllowCamera, AllowGallery, CameraIcon, GalleryIcon };
8 |
--------------------------------------------------------------------------------
/client/.gitignore:
--------------------------------------------------------------------------------
1 | # Learn more https://docs.github.com/en/get-started/getting-started-with-git/ignoring-files
2 |
3 | # dependencies
4 | node_modules/
5 |
6 | # Expo
7 | .expo/
8 | dist/
9 | web-build/
10 |
11 | # Native
12 | *.orig.*
13 | *.jks
14 | *.p8
15 | *.p12
16 | *.key
17 | *.mobileprovision
18 |
19 | # Metro
20 | .metro-health-check*
21 |
22 | # debug
23 | npm-debug.*
24 | yarn-debug.*
25 | yarn-error.*
26 |
27 | # macOS
28 | .DS_Store
29 | *.pem
30 |
31 | # local env files
32 | .env*.local
33 |
34 | # typescript
35 | *.tsbuildinfo
36 |
--------------------------------------------------------------------------------
/client/js.config.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "expo/jsconfig.base",
3 | "compilerOptions": {
4 | "strict": true,
5 | "baseUrl": ".",
6 | "paths": {
7 | "@assets/*": ["src/assets/*"],
8 | "@components/*": ["src/components/*"],
9 | "@constants/*": ["src/constants/*"],
10 | "@hooks/*": ["src/hooks/*"],
11 | "@helpers/*": ["src/helpers/*"],
12 | "@screens/*": ["src/screens/*"],
13 | "@navigation/*": ["src/navigation/*"]
14 | }
15 | },
16 | "exclude": ["node_modules", "build", "dist"]
17 | }
18 |
--------------------------------------------------------------------------------
/client/src/assets/svg/camera-icon.svg:
--------------------------------------------------------------------------------
1 |
5 |
--------------------------------------------------------------------------------
/client/babel.config.js:
--------------------------------------------------------------------------------
1 | module.exports = function (api) {
2 | api.cache(true);
3 | return {
4 | presets: ['babel-preset-expo'],
5 | plugins: [
6 | [
7 | 'module-resolver',
8 | {
9 | root: ['.'],
10 | alias: {
11 | '@assets': './src/assets',
12 | '@components': './src/components',
13 | '@constants': './src/constants',
14 | '@hooks': './src/hooks',
15 | '@helpers': './src/helpers',
16 | '@screens': './src/screens',
17 | '@navigation': './src/navigation',
18 | },
19 | },
20 | ],
21 | ],
22 | };
23 | };
24 |
--------------------------------------------------------------------------------
/client/metro.config.js:
--------------------------------------------------------------------------------
1 | /* eslint-disable no-undef */
2 | const path = require('path');
3 |
4 | const { getDefaultConfig } = require('expo/metro-config');
5 |
6 | module.exports = (() => {
7 | const config = getDefaultConfig(__dirname);
8 |
9 | const { transformer, resolver } = config;
10 |
11 | config.transformer = {
12 | ...transformer,
13 | babelTransformerPath: require.resolve('react-native-svg-transformer'),
14 | };
15 | config.resolver = {
16 | ...resolver,
17 | assetExts: resolver.assetExts.filter((ext) => ext !== 'svg'),
18 | sourceExts: [...resolver.sourceExts, 'svg'],
19 | extraNodeModules: {
20 | src: path.resolve(__dirname, 'src'),
21 | },
22 | };
23 |
24 | return config;
25 | })();
26 |
--------------------------------------------------------------------------------
/client/App.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import 'react-native-gesture-handler';
3 | import { SafeAreaProvider } from 'react-native-safe-area-context';
4 | import MainNavigator from './src/navigation/MainNavigator';
5 | import { PermissionProvider } from './src/context/PermissionsContext';
6 | import { ImageProvider } from './src/context/ImageContext';
7 | import { FormProvider } from './src/context/FormContext';
8 |
9 | export default function App() {
10 | return (
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 | );
21 | }
22 |
--------------------------------------------------------------------------------
/model/.dockerignore:
--------------------------------------------------------------------------------
1 | # Include any files or directories that you don't want to be copied to your
2 | # container here (e.g., local build artifacts, temporary files, etc.).
3 | #
4 | # For more help, visit the .dockerignore file reference guide at
5 | # https://docs.docker.com/go/build-context-dockerignore/
6 |
7 | **/.DS_Store
8 | **/__pycache__
9 | **/.venv
10 | **/.classpath
11 | **/.dockerignore
12 | **/.env
13 | **/.git
14 | **/.gitignore
15 | **/.project
16 | **/.settings
17 | **/.toolstarget
18 | **/.vs
19 | **/.vscode
20 | **/*.*proj.user
21 | **/*.dbmdl
22 | **/*.jfm
23 | **/bin
24 | **/charts
25 | **/docker-compose*
26 | **/compose*
27 | **/Dockerfile*
28 | **/node_modules
29 | **/npm-debug.log
30 | **/obj
31 | **/secrets.dev.yaml
32 | **/values.dev.yaml
33 | LICENSE
34 | README.md
35 |
--------------------------------------------------------------------------------
/server/.dockerignore:
--------------------------------------------------------------------------------
1 | # Include any files or directories that you don't want to be copied to your
2 | # container here (e.g., local build artifacts, temporary files, etc.).
3 | #
4 | # For more help, visit the .dockerignore file reference guide at
5 | # https://docs.docker.com/go/build-context-dockerignore/
6 |
7 | **/.DS_Store
8 | **/__pycache__
9 | **/.venv
10 | **/.classpath
11 | **/.dockerignore
12 | **/.env
13 | **/.git
14 | **/.gitignore
15 | **/.project
16 | **/.settings
17 | **/.toolstarget
18 | **/.vs
19 | **/.vscode
20 | **/*.*proj.user
21 | **/*.dbmdl
22 | **/*.jfm
23 | **/bin
24 | **/charts
25 | **/docker-compose*
26 | **/compose*
27 | **/Dockerfile*
28 | **/node_modules
29 | **/npm-debug.log
30 | **/obj
31 | **/secrets.dev.yaml
32 | **/values.dev.yaml
33 | LICENSE
34 | README.md
35 |
--------------------------------------------------------------------------------
/client/src/components/input/Slider.jsx:
--------------------------------------------------------------------------------
1 | import { View } from 'react-native';
2 | import DropDownPicker from 'react-native-dropdown-picker';
3 | import { useGlobalStyle } from '../../hooks/useGlobalStyle';
4 | import { useState } from 'react';
5 |
6 | const Slider = ({ items, setItems, locVal, setLocVal }) => {
7 | const [open, setOpen] = useState(false);
8 |
9 | const basicStyles = useGlobalStyle();
10 | return (
11 |
12 |
22 |
23 | );
24 | };
25 |
26 | export default Slider;
27 |
--------------------------------------------------------------------------------
/model/README.Docker.md:
--------------------------------------------------------------------------------
1 | ### Building and running your application
2 |
3 | When you're ready, start your application by running:
4 | `docker compose up --build`.
5 |
6 | Your application will be available at http://localhost:8000.
7 |
8 | ### Deploying your application to the cloud
9 |
10 | First, build your image, e.g.: `docker build -t myapp .`.
11 | If your cloud uses a different CPU architecture than your development
12 | machine (e.g., you are on a Mac M1 and your cloud provider is amd64),
13 | you'll want to build the image for that platform, e.g.:
14 | `docker build --platform=linux/amd64 -t myapp .`.
15 |
16 | Then, push it to your registry, e.g. `docker push myregistry.com/myapp`.
17 |
18 | Consult Docker's [getting started](https://docs.docker.com/go/get-started-sharing/)
19 | docs for more detail on building and pushing.
20 |
21 | ### References
22 | * [Docker's Python guide](https://docs.docker.com/language/python/)
--------------------------------------------------------------------------------
/server/README.Docker.md:
--------------------------------------------------------------------------------
1 | ### Building and running your application
2 |
3 | When you're ready, start your application by running:
4 | `docker compose up --build`.
5 |
6 | Your application will be available at http://localhost:7999.
7 |
8 | ### Deploying your application to the cloud
9 |
10 | First, build your image, e.g.: `docker build -t myapp .`.
11 | If your cloud uses a different CPU architecture than your development
12 | machine (e.g., you are on a Mac M1 and your cloud provider is amd64),
13 | you'll want to build the image for that platform, e.g.:
14 | `docker build --platform=linux/amd64 -t myapp .`.
15 |
16 | Then, push it to your registry, e.g. `docker push myregistry.com/myapp`.
17 |
18 | Consult Docker's [getting started](https://docs.docker.com/go/get-started-sharing/)
19 | docs for more detail on building and pushing.
20 |
21 | ### References
22 | * [Docker's Python guide](https://docs.docker.com/language/python/)
--------------------------------------------------------------------------------
/client/app.json:
--------------------------------------------------------------------------------
1 | {
2 | "expo": {
3 | "name": "DermAI",
4 | "slug": "DermAI",
5 | "version": "1.0.1",
6 | "orientation": "portrait",
7 | "icon": "./assets/icon.png",
8 | "userInterfaceStyle": "light",
9 | "splash": {
10 | "image": "./assets/splash.png",
11 | "resizeMode": "contain",
12 | "backgroundColor": "#ffffff"
13 | },
14 | "assetBundlePatterns": [
15 | "**/*"
16 | ],
17 | "ios": {
18 | "supportsTablet": true
19 | },
20 | "android": {
21 | "package": "com.vukig.DermAI",
22 | "versionCode": 2,
23 | "adaptiveIcon": {
24 | "foregroundImage": "./assets/adaptive-icon.png",
25 | "backgroundColor": "#ffffff"
26 | }
27 | },
28 | "web": {
29 | "favicon": "./assets/favicon.png"
30 | },
31 | "plugins": [
32 | "expo-font"
33 | ],
34 | "extra": {
35 | "eas": {
36 | "projectId": "77959661-4551-4b86-865e-4d15027f67f1"
37 | }
38 | }
39 | }
40 | }
41 |
--------------------------------------------------------------------------------
/client/src/components/ProgressStepBar.jsx:
--------------------------------------------------------------------------------
1 | import { View } from 'react-native';
2 | import PropTypes from 'prop-types';
3 | import { scale } from '../helpers/scale';
4 | import { useGlobalStyle } from '../hooks/useGlobalStyle';
5 | import { colors } from '../constants/color';
6 |
7 | export const ProgressStepBar = ({ stepSize = 5, currentStepIndex = 0 }) => {
8 | const basicStyles = useGlobalStyle();
9 |
10 | const StepCircle = ({ isPending = false }) => (
11 |
19 | );
20 | return (
21 |
22 | {Array.from({ length: stepSize }).map((item, index) => (
23 |
27 | ))}
28 |
29 | );
30 | };
31 |
32 | ProgressStepBar.propTypes = {
33 | stepSize: PropTypes.number,
34 | currentStepIndex: PropTypes.number,
35 | };
36 |
--------------------------------------------------------------------------------
/client/src/helpers/scale.js:
--------------------------------------------------------------------------------
1 | import { Dimensions } from 'react-native';
2 |
3 | // Guideline sizes are based on standard screen mobile device
4 | const guidelineBaseWidth = 428;
5 | const guidelineBaseHeight = 926;
6 |
7 | const realWidth = Dimensions.get('screen').width;
8 | const realHeight = Dimensions.get('screen').height;
9 |
10 | const innerScale =
11 | Dimensions.get('screen').scale / Dimensions.get('window').scale;
12 | const baseRatio = guidelineBaseWidth / guidelineBaseHeight;
13 | const realRatio = realWidth / realHeight;
14 |
15 | const width = Dimensions.get('window').width * innerScale;
16 | const height =
17 | (Dimensions.get('window').height * innerScale) /
18 | (realRatio > baseRatio ? realRatio / baseRatio : 1);
19 |
20 | const scale = (size) => (width / guidelineBaseWidth) * size;
21 | const scaleVertical = (size) => (height / guidelineBaseHeight) * size;
22 | const scaleModerate = (size, factor = 0.5) =>
23 | size + (scale(size) - size) * factor;
24 | const scaleImage = (width, height, targetHeight) => ({
25 | width: (targetHeight / height) * width,
26 | height: targetHeight,
27 | });
28 |
29 | export {
30 | baseRatio,
31 | width,
32 | height,
33 | scale,
34 | scaleVertical,
35 | scaleModerate,
36 | scaleImage,
37 | };
38 |
--------------------------------------------------------------------------------
/client/src/components/Footer.jsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { Text, View, TouchableOpacity, StyleSheet } from 'react-native';
3 | import { AntDesign } from '@expo/vector-icons';
4 | import { Linking } from 'react-native';
5 |
6 | function Footer() {
7 | return (
8 |
9 |
10 | Made by the MDA team
11 |
13 | Linking.openURL(
14 | 'https://www.linkedin.com/in/vuk-ignjatovic-53152a248/',
15 | )
16 | }
17 | >
18 |
19 |
20 |
21 |
22 | );
23 | }
24 |
25 | const styles = StyleSheet.create({
26 | footerContainer: {
27 | backgroundColor: '#4a90e2',
28 | padding: 10,
29 | position: 'absolute',
30 | bottom: 0,
31 | width: '120%',
32 | },
33 | flexContainer: {
34 | flexDirection: 'row',
35 | justifyContent: 'center',
36 | alignItems: 'center',
37 | },
38 | textStyle: {
39 | color: 'white',
40 | marginRight: 10,
41 | },
42 | });
43 |
44 | export default Footer;
45 |
--------------------------------------------------------------------------------
/client/src/components/input/Input.jsx:
--------------------------------------------------------------------------------
1 | import { View, Text, TouchableOpacity, TextInput } from 'react-native';
2 | import { useGlobalStyle } from '../../hooks/useGlobalStyle';
3 | import { scaleVertical } from '../../helpers/scale';
4 | import { colors } from '../../constants/color';
5 | const Input = ({ placeholder, type, setState, value }) => {
6 | const basicStyles = useGlobalStyle();
7 |
8 | const handleTextChange = (text) => {
9 | const numericValue = text.replace(/[^0-9]/g, '');
10 | setState(numericValue);
11 | };
12 |
13 | return (
14 |
24 |
25 |
42 |
43 | );
44 | };
45 |
46 | export default Input;
47 |
--------------------------------------------------------------------------------
/client/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "client",
3 | "version": "1.0.0",
4 | "main": "node_modules/expo/AppEntry.js",
5 | "scripts": {
6 | "prettier": "prettier 'src/**/*.js'",
7 | "_comment": "This is optional, tunneling is not needed when you are on pc -Vuk Stefanovic Karadzic",
8 | "start": "npx expo start --tunnel",
9 | "android": "expo start --android",
10 | "ios": "expo start --ios",
11 | "web": "expo start --web"
12 | },
13 | "dependencies": {
14 | "@expo-google-fonts/montserrat": "^0.2.3",
15 | "@react-native-community/checkbox": "^0.5.17",
16 | "@react-navigation/native": "^6.1.9",
17 | "@react-navigation/stack": "^6.3.20",
18 | "axios": "^1.6.7",
19 | "expo": "~50.0.7",
20 | "expo-font": "~11.10.3",
21 | "expo-image": "~1.10.6",
22 | "expo-image-picker": "^14.7.1",
23 | "expo-linear-gradient": "~12.7.2",
24 | "expo-status-bar": "~1.11.1",
25 | "react": "18.2.0",
26 | "react-native": "0.73.4",
27 | "react-native-bouncy-checkbox": "^3.0.7",
28 | "react-native-dropdown-picker": "^5.4.6",
29 | "react-native-gesture-handler": "~2.14.0",
30 | "react-native-safe-area-context": "4.8.2",
31 | "react-native-svg": "14.1.0",
32 | "react-native-webview": "^13.8.1"
33 | },
34 | "devDependencies": {
35 | "@babel/core": "^7.20.0",
36 | "babel-plugin-module-resolver": "^5.0.0",
37 | "prettier": "^3.2.5",
38 | "prop-types": "^15.8.1",
39 | "react-native-svg-transformer": "^1.3.0"
40 | },
41 | "private": true
42 | }
43 |
--------------------------------------------------------------------------------
/client/src/assets/svg/gallery-icon.svg:
--------------------------------------------------------------------------------
1 |
25 |
--------------------------------------------------------------------------------
/model/requirements.txt:
--------------------------------------------------------------------------------
1 | absl-py==2.1.0
2 | astunparse==1.6.3
3 | certifi==2024.2.2
4 | charset-normalizer==3.3.2
5 | flatbuffers==24.3.25
6 | gast==0.5.4
7 | google-pasta==0.2.0
8 | grpcio==1.62.1
9 | h5py==3.10.0
10 | idna==3.6
11 | joblib==1.3.2
12 | keras==3.1.1
13 | libclang==18.1.1
14 | Markdown==3.6
15 | markdown-it-py==3.0.0
16 | MarkupSafe==2.1.5
17 | mdurl==0.1.2
18 | ml-dtypes==0.3.2
19 | namex==0.0.7
20 | numpy==1.26.4
21 | nvidia-cublas-cu12==12.3.4.1
22 | nvidia-cuda-cupti-cu12==12.3.101
23 | nvidia-cuda-nvcc-cu12==12.3.107
24 | nvidia-cuda-nvrtc-cu12==12.3.107
25 | nvidia-cuda-runtime-cu12==12.3.101
26 | nvidia-cudnn-cu12==8.9.7.29
27 | nvidia-cufft-cu12==11.0.12.1
28 | nvidia-curand-cu12==10.3.4.107
29 | nvidia-cusolver-cu12==11.5.4.101
30 | nvidia-cusparse-cu12==12.2.0.103
31 | nvidia-nccl-cu12==2.19.3
32 | nvidia-nvjitlink-cu12==12.3.101
33 | opt-einsum==3.3.0
34 | optree==0.11.0
35 | packaging==24.0
36 | pandas==2.2.1
37 | protobuf==4.25.3
38 | Pygments==2.17.2
39 | python-dateutil==2.9.0.post0
40 | pytz==2024.1
41 | requests==2.31.0
42 | rich==13.7.1
43 | scikit-learn==1.4.1.post1
44 | scikit-rvm @ https://github.com/JamesRitchie/scikit-rvm/archive/master.zip#sha256=ff645f89e04965397981ea644c66599f6e0f10d955008d82c19fe3f6ae51e166
45 | scipy==1.12.0
46 | six==1.16.0
47 | tensorboard==2.16.2
48 | tensorboard-data-server==0.7.2
49 | tensorflow==2.16.1
50 | tensorflow-io-gcs-filesystem==0.36.0
51 | termcolor==2.4.0
52 | threadpoolctl==3.4.0
53 | typing_extensions==4.10.0
54 | tzdata==2024.1
55 | urllib3==2.2.1
56 | Werkzeug==3.0.1
57 | wrapt==1.16.0
58 |
--------------------------------------------------------------------------------
/client/src/components/button/SecondaryButton.jsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { Text, TouchableOpacity, View } from 'react-native';
3 | import { colors } from '../../constants/color';
4 | import { useGlobalStyle } from '../../hooks/useGlobalStyle';
5 | import { scale, scaleVertical } from '../../helpers/scale';
6 | import PropTypes from 'prop-types';
7 |
8 | export const SecondaryButton = ({ title, icon, style, onPress }) => {
9 | const basicStyles = useGlobalStyle();
10 |
11 | return (
12 |
27 |
30 |
37 | {title}
38 |
39 | {icon}
40 |
41 |
42 | );
43 | };
44 |
45 | SecondaryButton.propTypes = {
46 | title: PropTypes.string.isRequired,
47 | style: PropTypes.object,
48 | onPress: PropTypes.func,
49 | icon: PropTypes.element,
50 | };
51 |
--------------------------------------------------------------------------------
/client/src/context/PermissionsContext.jsx:
--------------------------------------------------------------------------------
1 | import { createContext, useState } from 'react';
2 | import * as ImagePicker from 'expo-image-picker';
3 |
4 | const PermissionsContext = createContext();
5 |
6 | export const PermissionProvider = ({ children }) => {
7 | const [lastPressed, setLastPressed] = useState('none');
8 |
9 | const [permissions, setPermissions] = useState({
10 | camera: false,
11 | gallery: false,
12 | });
13 |
14 | const grantPermission = async (isCamera) => {
15 | const keyValue = isCamera ? 'camera' : 'gallery';
16 | let optionStatus;
17 | if (isCamera) {
18 | optionStatus = await ImagePicker.requestCameraPermissionsAsync();
19 | } else {
20 | optionStatus = await ImagePicker.requestMediaLibraryPermissionsAsync();
21 | }
22 | if (optionStatus.status == 'granted') {
23 | setPermissions((prev) => ({
24 | ...prev,
25 | [keyValue]: true,
26 | }));
27 | console.log('CHIPICHIPI DABA DUBI DUBI DABA');
28 | } else {
29 | setPermissions((prev) => ({
30 | ...prev,
31 | [keyValue]: false,
32 | }));
33 | }
34 | setLastPressed((prev) => ({
35 | ...prev,
36 | [keyValue]: true,
37 | }));
38 | };
39 |
40 | return (
41 |
50 | {children}
51 |
52 | );
53 | };
54 |
55 | export default PermissionsContext;
56 |
--------------------------------------------------------------------------------
/client/src/navigation/MainNavigator.jsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { createStackNavigator } from '@react-navigation/stack';
3 | import { NavigationContainer } from '@react-navigation/native';
4 |
5 | import WelcomeScreen from '@screens/Welcome';
6 | import GrantCameraPermissionScreen from '@screens/GrantCameraPermissionScreen';
7 | import GrantGalleryPermissionScreen from '@screens/GrantGalleryPermissionScreen';
8 | import ScanPhotoScreen from '@screens/ScanPhotoScreen';
9 | import DiagnosisScreen from '@screens/DiagnosisScreen';
10 | import FormScreen from '@screens/FormScreen';
11 |
12 | const MainStack = createStackNavigator();
13 |
14 | const MainNavigator = () => {
15 | return (
16 |
17 |
22 |
23 |
27 |
31 |
35 |
36 |
40 |
41 |
42 | );
43 | };
44 | export default MainNavigator;
45 |
--------------------------------------------------------------------------------
/client/src/components/button/PrimaryButton.jsx:
--------------------------------------------------------------------------------
1 | import { TouchableOpacity, Text, View } from 'react-native';
2 | import PropTypes from 'prop-types';
3 | import { LinearGradient } from 'expo-linear-gradient';
4 | import { colors } from '../../constants/color';
5 | import { useGlobalStyle } from '../../hooks/useGlobalStyle';
6 | import { scale, scaleVertical } from '../../helpers/scale';
7 |
8 | export const PrimaryButton = ({ title, style, onPress, icon }) => {
9 | const basicStyles = useGlobalStyle();
10 |
11 | return (
12 |
13 |
29 |
30 |
31 | {title}
32 |
33 | {icon}
34 |
35 |
36 |
37 | );
38 | };
39 |
40 | PrimaryButton.propTypes = {
41 | title: PropTypes.string.isRequired,
42 | style: PropTypes.object,
43 | onPress: PropTypes.func,
44 | icon: PropTypes.element,
45 | };
46 |
47 | PrimaryButton.propTypes = {
48 | title: PropTypes.string.isRequired,
49 | style: PropTypes.object,
50 | onPress: PropTypes.func,
51 | icon: PropTypes.element,
52 | };
53 |
--------------------------------------------------------------------------------
/model/compose.yaml:
--------------------------------------------------------------------------------
1 | # Comments are provided throughout this file to help you get started.
2 | # If you need more help, visit the Docker Compose reference guide at
3 | # https://docs.docker.com/go/compose-spec-reference/
4 |
5 | # Here the instructions define your application as a service called "server".
6 | # This service is built from the Dockerfile in the current directory.
7 | # You can add other services your application may depend on here, such as a
8 | # database or a cache. For examples, see the Awesome Compose repository:
9 | # https://github.com/docker/awesome-compose
10 | services:
11 | server:
12 | build:
13 | context: .
14 | ports:
15 | - 8000:8000
16 |
17 | # The commented out section below is an example of how to define a PostgreSQL
18 | # database that your application can use. `depends_on` tells Docker Compose to
19 | # start the database before your application. The `db-data` volume persists the
20 | # database data between container restarts. The `db-password` secret is used
21 | # to set the database password. You must create `db/password.txt` and add
22 | # a password of your choosing to it before running `docker compose up`.
23 | # depends_on:
24 | # db:
25 | # condition: service_healthy
26 | # db:
27 | # image: postgres
28 | # restart: always
29 | # user: postgres
30 | # secrets:
31 | # - db-password
32 | # volumes:
33 | # - db-data:/var/lib/postgresql/data
34 | # environment:
35 | # - POSTGRES_DB=example
36 | # - POSTGRES_PASSWORD_FILE=/run/secrets/db-password
37 | # expose:
38 | # - 5432
39 | # healthcheck:
40 | # test: [ "CMD", "pg_isready" ]
41 | # interval: 10s
42 | # timeout: 5s
43 | # retries: 5
44 | # volumes:
45 | # db-data:
46 | # secrets:
47 | # db-password:
48 | # file: db/password.txt
49 |
50 |
--------------------------------------------------------------------------------
/server/compose.yaml:
--------------------------------------------------------------------------------
1 | # Comments are provided throughout this file to help you get started.
2 | # If you need more help, visit the Docker Compose reference guide at
3 | # https://docs.docker.com/go/compose-spec-reference/
4 |
5 | # Here the instructions define your application as a service called "server".
6 | # This service is built from the Dockerfile in the current directory.
7 | # You can add other services your application may depend on here, such as a
8 | # database or a cache. For examples, see the Awesome Compose repository:
9 | # https://github.com/docker/awesome-compose
10 | services:
11 | server:
12 | build:
13 | context: .
14 | ports:
15 | - 7999:7999
16 |
17 | # The commented out section below is an example of how to define a PostgreSQL
18 | # database that your application can use. `depends_on` tells Docker Compose to
19 | # start the database before your application. The `db-data` volume persists the
20 | # database data between container restarts. The `db-password` secret is used
21 | # to set the database password. You must create `db/password.txt` and add
22 | # a password of your choosing to it before running `docker compose up`.
23 | # depends_on:
24 | # db:
25 | # condition: service_healthy
26 | # db:
27 | # image: postgres
28 | # restart: always
29 | # user: postgres
30 | # secrets:
31 | # - db-password
32 | # volumes:
33 | # - db-data:/var/lib/postgresql/data
34 | # environment:
35 | # - POSTGRES_DB=example
36 | # - POSTGRES_PASSWORD_FILE=/run/secrets/db-password
37 | # expose:
38 | # - 5432
39 | # healthcheck:
40 | # test: [ "CMD", "pg_isready" ]
41 | # interval: 10s
42 | # timeout: 5s
43 | # retries: 5
44 | # volumes:
45 | # db-data:
46 | # secrets:
47 | # db-password:
48 | # file: db/password.txt
49 |
50 |
--------------------------------------------------------------------------------
/server/Dockerfile:
--------------------------------------------------------------------------------
1 | # syntax=docker/dockerfile:1
2 |
3 | # Comments are provided throughout this file to help you get started.
4 | # If you need more help, visit the Dockerfile reference guide at
5 | # https://docs.docker.com/go/dockerfile-reference/
6 |
7 | # Want to help us make this template better? Share your feedback here: https://forms.gle/ybq9Krt8jtBL3iCk7
8 |
9 | ARG PYTHON_VERSION=3.11.5
10 | FROM python:${PYTHON_VERSION}-slim as base
11 |
12 | # Prevents Python from writing pyc files.
13 | ENV PYTHONDONTWRITEBYTECODE=1
14 |
15 | # Keeps Python from buffering stdout and stderr to avoid situations where
16 | # the application crashes without emitting any logs due to buffering.
17 | ENV PYTHONUNBUFFERED=1
18 |
19 | WORKDIR /app
20 |
21 | # Create a non-privileged user that the app will run under.
22 | # See https://docs.docker.com/go/dockerfile-user-best-practices/
23 | ARG UID=10001
24 | RUN adduser \
25 | --disabled-password \
26 | --gecos "" \
27 | --home "/nonexistent" \
28 | --shell "/sbin/nologin" \
29 | --no-create-home \
30 | --uid "${UID}" \
31 | appuser
32 |
33 | # Download dependencies as a separate step to take advantage of Docker's caching.
34 | # Leverage a cache mount to /root/.cache/pip to speed up subsequent builds.
35 | # Leverage a bind mount to requirements.txt to avoid having to copy them into
36 | # into this layer.
37 | RUN --mount=type=cache,target=/root/.cache/pip \
38 | --mount=type=bind,source=requirements.txt,target=requirements.txt \
39 | python -m pip install -r requirements.txt
40 |
41 | # Switch to the non-privileged user to run the application.
42 | USER appuser
43 |
44 | # Copy the source code into the container.
45 | COPY . .
46 |
47 | # Expose the port that the application listens on.
48 | EXPOSE 7999
49 |
50 | # Run the application.
51 | CMD uvicorn 'main:app' --host=0.0.0.0 --port=7999
52 |
--------------------------------------------------------------------------------
/model/smote.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 | from keras.preprocessing.image import ImageDataGenerator, array_to_img
4 | from imblearn.over_sampling import SMOTE
5 |
6 | patho = {'benign': 0, 'malignant': 1}
7 |
8 | imagegen = ImageDataGenerator()
9 | # Load train data from drive
10 | train_generator = imagegen.flow_from_directory("/home/vuk/Documents/ML_Data/HAM/processed/roi/", class_mode="binary", shuffle=False, batch_size=128, target_size=(224, 224), seed=42)
11 | x = np.concatenate([train_generator.next()[0] for i in range(train_generator.__len__())])
12 | y = np.concatenate([train_generator.next()[1] for i in range(train_generator.__len__())])
13 |
14 | # Convert color images to a vector
15 | X_train = x.reshape(1293, 224*224*3)
16 |
17 | # Apply SMOTE method to the minority class (malignant)
18 | sm = SMOTE(random_state=2)
19 | X_smote, y_smote = sm.fit_resample(X_train, y)
20 |
21 | Xsmote_img = X_smote.reshape(6700, 224, 224, 3)
22 |
23 | # This function returns the label name
24 | def get_key(val):
25 | for key, value in patho.items():
26 | if val == value:
27 | return key
28 |
29 | # Adjusted saving part
30 | for i in range(len(Xsmote_img)):
31 | # Check if the label is 'malignant'
32 | if y_smote[i] == patho['malignant']:
33 | # Define the directory for malignant images
34 | malignant_dir = '/home/vuk/Documents/ML_Data/HAM/processed/roi/malignant/'
35 |
36 | # Check if the directory exists, if not, create it
37 | if not os.path.exists(malignant_dir):
38 | os.mkdir(malignant_dir)
39 |
40 | # Convert the image array to a PIL image
41 | pil_img = array_to_img(Xsmote_img[i] * 255)
42 |
43 | # Save the image with a naming convention that reflects their index in the oversampled dataset
44 | pil_img.save(malignant_dir + 'smote_' + str(1293 + i) + '.jpg')
--------------------------------------------------------------------------------
/model/Dockerfile:
--------------------------------------------------------------------------------
1 | # syntax=docker/dockerfile:1
2 |
3 | # Comments are provided throughout this file to help you get started.
4 | # If you need more help, visit the Dockerfile reference guide at
5 | # https://docs.docker.com/go/dockerfile-reference/
6 |
7 | # Want to help us make this template better? Share your feedback here: https://forms.gle/ybq9Krt8jtBL3iCk7
8 |
9 | ARG PYTHON_VERSION=3.11.5
10 | FROM python:${PYTHON_VERSION}-slim as base
11 |
12 | # Prevents Python from writing pyc files.
13 | ENV PYTHONDONTWRITEBYTECODE=1
14 |
15 | # Keeps Python from buffering stdout and stderr to avoid situations where
16 | # the application crashes without emitting any logs due to buffering.
17 | ENV PYTHONUNBUFFERED=1
18 |
19 | WORKDIR /app
20 |
21 | # Create a non-privileged user that the app will run under.
22 | # See https://docs.docker.com/go/dockerfile-user-best-practices/
23 | ARG UID=10001
24 | RUN adduser \
25 | --disabled-password \
26 | --gecos "" \
27 | --home "/nonexistent" \
28 | --shell "/sbin/nologin" \
29 | --no-create-home \
30 | --uid "${UID}" \
31 | appuser
32 |
33 | # Download dependencies as a separate step to take advantage of Docker's caching.
34 | # Leverage a cache mount to /root/.cache/pip to speed up subsequent builds.
35 | # Leverage a bind mount to requirements.txt to avoid having to copy them into
36 | # into this layer.
37 | RUN --mount=type=cache,target=/root/.cache/pip \
38 | --mount=type=bind,source=requirements.txt,target=requirements.txt \
39 | python -m pip install -r requirements.txt
40 |
41 | # Switch to the non-privileged user to run the application.
42 | USER appuser
43 |
44 | # Copy the source code into the container.
45 | COPY . .
46 |
47 | # Expose the port that the application listens on.
48 | EXPOSE 8000
49 |
50 | # Run the application.
51 | CMD /home/vuk/miniconda3/bin/python /home/vuk/Melanoma-Detector/model/fusion_stage.py
52 |
--------------------------------------------------------------------------------
/client/src/screens/DiagnosisScreen.jsx:
--------------------------------------------------------------------------------
1 | import { View, Text, Image } from 'react-native';
2 | import { BaseScreen } from '../components/common/BaseScreen';
3 | import { useContext } from 'react';
4 | import PermissionsContext from '../context/PermissionsContext';
5 | import PrimaryButton from '../components/button/PrimaryButton';
6 | import SecondaryButton from '../components/button/SecondaryButton';
7 | import { Ionicons } from '@expo/vector-icons';
8 | import ImageContext from '../context/ImageContext';
9 | import { useGlobalStyle } from '../hooks/useGlobalStyle';
10 | import { scale, scaleImage } from '../helpers/scale';
11 | import Footer from '../components/Footer';
12 |
13 | const DiagnosisScreen = () => {
14 | const { imgUri } = useContext(ImageContext);
15 | const basicStyles = useGlobalStyle();
16 |
17 | const { width: scaledWidth, height: scaledHeight } = scaleImage(
18 | 100,
19 | 100,
20 | 340,
21 | );
22 |
23 | return (
24 |
25 |
26 |
27 |
40 |
41 | Your naveus is probably NOT cancerous
42 |
43 |
44 | Disclaimer: This model makes mistakes and is NOT* a replacement for
45 | a doctor, if you have serious doubts make sure to contact your
46 | dermatologist.
47 |
48 |
49 |
50 |
51 |
52 | );
53 | };
54 |
55 | export default DiagnosisScreen;
56 |
--------------------------------------------------------------------------------
/client/src/components/input/Checkbox.jsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { View, Text, TouchableOpacity, StyleSheet } from 'react-native';
3 | import BouncyCheckbox from 'react-native-bouncy-checkbox';
4 | import { useGlobalStyle } from '../../hooks/useGlobalStyle';
5 | import { colors } from '../../constants/color';
6 |
7 | const Checkbox = ({ gender, setGender }) => {
8 | const basicStyles = useGlobalStyle();
9 |
10 | const handleGenderChange = (newGender) => {
11 | setGender({ male: newGender === 'male', female: newGender === 'female' });
12 | };
13 |
14 | return (
15 |
16 |
17 | Choose your gender:
18 |
19 |
20 | handleGenderChange('male')}
23 | fillColor={colors.primary}
24 | unfillColor="#FFFFFF"
25 | iconStyle={{ borderColor: colors.primary }}
26 | />
27 | handleGenderChange('male')}>
28 |
29 | Male
30 |
31 |
32 |
33 |
34 | handleGenderChange('female')}
37 | fillColor={colors.primary}
38 | unfillColor="#FFFFFF"
39 | iconStyle={{ borderColor: colors.primary }}
40 | />
41 | handleGenderChange('female')}>
42 |
43 | Female
44 |
45 |
46 |
47 |
48 | );
49 | };
50 |
51 | const styles = StyleSheet.create({
52 | row: {
53 | flexDirection: 'row',
54 | alignItems: 'center',
55 | marginBottom: 20,
56 | },
57 | });
58 |
59 | export default Checkbox;
60 |
--------------------------------------------------------------------------------
/model/modelcrafted.py:
--------------------------------------------------------------------------------
1 | # %%
2 | import tensorflow as tf
3 | import numpy as np
4 | from keras.preprocessing.image import ImageDataGenerator
5 | import cv2
6 |
7 | # %%
8 | mobile = tf.keras.applications.MobileNetV2(weights='imagenet', include_top=False, input_shape = (224,224,3))
9 |
10 | # %%
11 | mobile.summary()
12 |
13 | # %%
14 | datagen = ImageDataGenerator(rescale=1./255)
15 | batch_size = 32
16 |
17 | #BUG
18 | def dl_extract_features(directory, sample_count):
19 | features = np.zeros(shape=(sample_count, 7, 7, 1280)) # Must be equal to the output of the convolutional base
20 | labels = np.zeros(shape=(sample_count))
21 | # Preprocess data
22 | generator = datagen.flow_from_directory(directory,
23 | target_size=(224,224),
24 | batch_size = batch_size,
25 | class_mode='binary')
26 | # Pass data through convolutional base
27 | i = 0
28 | for inputs_batch, labels_batch in generator:
29 | features_batch = mobile.predict(inputs_batch)
30 | features[i * batch_size: (i + 1) * batch_size] = features_batch
31 | labels[i * batch_size: (i + 1) * batch_size] = labels_batch
32 | i += 1
33 | if i * batch_size >= sample_count:
34 | break
35 | return features, labels
36 |
37 | #%%
38 | def dl_extract_features_from_img(img):
39 | resized_img = cv2.resize(img,(224,224))
40 | features = mobile.predict(resized_img)
41 | return features
42 | # %%
43 | numerOfImages = 10005
44 | train_dir = '/home/vuk/Documents/ML_Data/HAM/processed/roi'
45 | valid_dir = '/home/vuk/Documents/ML_Data/HAM/valid'
46 | test_dir = '/home/vuk/Documents/ML_Data/HAM/test'
47 | train_features, train_labels = dl_extract_features(train_dir, numerOfImages) # Agree with our small dataset size
48 | validation_features, validation_labels = dl_extract_features(valid_dir, numerOfImages)
49 | test_features, test_labels = dl_extract_features(test_dir, numerOfImages)
50 |
51 | # test_features, test_labels = extract_features(test_dir, test_size)
--------------------------------------------------------------------------------
/client/src/screens/Welcome.jsx:
--------------------------------------------------------------------------------
1 | import { View, Text } from 'react-native';
2 | import { BaseScreen } from '../components/common/BaseScreen';
3 | import { WelcomeSvg } from '../constants/svg';
4 | import { useGlobalStyle } from '../hooks/useGlobalStyle';
5 | import { scale, scaleVertical } from '../helpers/scale';
6 | import { PrimaryButton } from '../components/button/PrimaryButton';
7 | import { ProgressStepBar } from '../components/ProgressStepBar';
8 |
9 | const WelcomeScreen = ({ navigation }) => {
10 | const basicStyles = useGlobalStyle();
11 |
12 | return (
13 |
14 |
15 |
16 |
22 |
32 |
33 | Welcome to CDA
34 |
35 |
45 | {
46 | 'Lorem ipsum dolor sit amet,\nconsectetur adipiscing elit,\nsed do eiusmod tempor'
47 | }
48 |
49 | navigation.navigate('GrantCameraPermissionScreen')}
56 | />
57 |
58 |
59 |
60 |
61 |
62 | );
63 | };
64 | export default WelcomeScreen;
65 |
--------------------------------------------------------------------------------
/client/src/context/FormContext.jsx:
--------------------------------------------------------------------------------
1 | import ImageContext from './ImageContext';
2 | import axios from 'axios';
3 | import * as FileSystem from 'expo-file-system';
4 | import { createContext, useContext, useState } from 'react';
5 |
6 | const FormContext = createContext();
7 |
8 | export const FormProvider = ({ children }) => {
9 | const { image, imgUri, setImage } = useContext(ImageContext);
10 |
11 | const [age, setAge] = useState();
12 | const [gender, setGender] = useState({ male: false, female: false }); // not sure how to get the string of 'male' or 'female', so i would suggest to just get the value that is selected from this form
13 | const [locVal, setLocVal] = useState(null);
14 | const [location, setLocation] = useState([
15 | { label: 'Abdomen ', value: 'abdomen' },
16 | { label: 'Back', value: 'back' },
17 | { label: 'Trunk', value: 'trunk' },
18 | { label: 'Upper extremity', value: 'upper extremity' },
19 | { label: 'Lower extremity', value: 'lower extremity' },
20 | ]);
21 |
22 | const sendData = async () => {
23 | try {
24 | const formData = new FormData();
25 | const date = new Date();
26 |
27 | formData.append('age', age);
28 | formData.append('gender', 'male');
29 | formData.append('localization', locVal);
30 | formData.append('photo', {
31 | uri: imgUri,
32 | type: 'image/jpeg',
33 | name: 'userImg.jpg',
34 | // name: date.getTime().toString() + "_USER_IMAGE" + ".jpg"
35 | });
36 |
37 | console.log(age);
38 |
39 | const res = await axios.post(
40 | 'http://192.168.1.172:8000/predict',
41 | formData,
42 | {
43 | headers: {
44 | 'Content-Type': 'multipart/form-data',
45 | },
46 | },
47 | );
48 |
49 | console.log(res.data);
50 | } catch (err) {
51 | console.log('Error sending data: ', err);
52 | }
53 | };
54 |
55 | return (
56 |
69 | {children}
70 |
71 | );
72 | };
73 |
74 | export default FormContext;
75 |
--------------------------------------------------------------------------------
/client/src/context/ImageContext.jsx:
--------------------------------------------------------------------------------
1 | import { createContext, useState } from 'react';
2 | import * as ImagePicker from 'expo-image-picker';
3 | import axios from 'axios';
4 | import PermissionsContext from '../context/PermissionsContext';
5 | import { useContext } from 'react';
6 |
7 | const ImageContext = createContext();
8 |
9 | export const ImageProvider = ({ children }) => {
10 | const [image, setImage] = useState('');
11 | const [imgUri, setImgUri] = useState('');
12 | const { permissions } = useContext(PermissionsContext);
13 |
14 | const captureImage = async (isCamera) => {
15 | const permission = isCamera ? permissions.camera : permissions.gallery;
16 |
17 | const options = {
18 | mediaTypes: ImagePicker.MediaTypeOptions.All,
19 | allowsEditing: true,
20 | aspect: [1, 1],
21 | quality: 1,
22 | base64: true,
23 | };
24 |
25 | if (permission === true) {
26 | let result;
27 | if (isCamera) {
28 | result = await ImagePicker.launchCameraAsync(options);
29 | } else {
30 | result = await ImagePicker.launchImageLibraryAsync(options);
31 | }
32 |
33 | // console.log(result);
34 |
35 | if (!result.canceled) {
36 | setImage(result.assets[0]?.base64);
37 | setImgUri(result.assets[0]?.uri);
38 | }
39 | } else {
40 | setErrors((prevState) => ({
41 | ...prevState,
42 | [errorKey]: true,
43 | }));
44 | }
45 | };
46 |
47 | const sendImage = async (base64Image) => {
48 | try {
49 | const formData = new FormData();
50 | formData.append('photo', { base64: base64Image });
51 |
52 | const res = await axios.post(
53 | 'http://192.168.1.172:8000/predict1',
54 | {
55 | image: formData,
56 | },
57 | {
58 | headers: {
59 | 'Content-Type': 'application/json',
60 | },
61 | },
62 | );
63 |
64 | // console.log(res);
65 | } catch (error) {
66 | console.error('Error uploading image:', error);
67 | }
68 | };
69 |
70 | return (
71 |
80 | {children}
81 |
82 | );
83 | };
84 |
85 | export default ImageContext;
86 |
--------------------------------------------------------------------------------
/client/src/screens/GrantGalleryPermissionScreen.jsx:
--------------------------------------------------------------------------------
1 | import { View, Text } from 'react-native';
2 | import { useContext } from 'react';
3 | import PermissionsContext from '../context/PermissionsContext';
4 | import { AllowGallery } from '../constants/svg';
5 | import { useGlobalStyle } from '../hooks/useGlobalStyle';
6 | import { PrimaryButton } from '../components/button/PrimaryButton';
7 | import { SecondaryButton } from '../components/button/SecondaryButton';
8 | import { scaleVertical } from '../helpers/scale';
9 | import { BaseScreen } from '../components/common/BaseScreen';
10 | import { ProgressStepBar } from '../components/ProgressStepBar';
11 |
12 | const GrantGalleryPermissionScreen = ({ navigation }) => {
13 | const basicStyles = useGlobalStyle();
14 | const { grantPermission } = useContext(PermissionsContext);
15 |
16 | return (
17 |
18 |
24 |
31 |
32 |
33 |
39 | Grant gallery access
40 |
41 |
44 |
45 | We need an image of your mole. Please grant us the permission to
46 | access your gallery in order to get the image.
47 |
48 |
49 | {
52 | grantPermission(false);
53 | navigation.navigate('ScanPhotoScreen');
54 | }}
55 | />
56 | navigation.navigate('ScanPhotoScreen')}
60 | />
61 |
62 |
63 |
64 |
65 |
66 | );
67 | };
68 |
69 | export default GrantGalleryPermissionScreen;
70 |
--------------------------------------------------------------------------------
/client/src/screens/GrantCameraPermissionScreen.jsx:
--------------------------------------------------------------------------------
1 | import { View, Text } from 'react-native';
2 | import { BaseScreen } from '../components/common/BaseScreen';
3 | import { useGlobalStyle } from '../hooks/useGlobalStyle';
4 | import { PrimaryButton } from '../components/button/PrimaryButton';
5 | import { SecondaryButton } from '../components/button/SecondaryButton';
6 | import { ProgressStepBar } from '../components/ProgressStepBar';
7 | import { scaleVertical } from '../helpers/scale';
8 | import { AllowCamera } from '../constants/svg';
9 | import PermissionsContext from '../context/PermissionsContext';
10 | import { useContext } from 'react';
11 |
12 | const GrantCameraPermissionScreen = ({ navigation }) => {
13 | const basicStyles = useGlobalStyle();
14 |
15 | const { grantPermission } = useContext(PermissionsContext);
16 |
17 | return (
18 |
19 |
25 |
32 |
33 |
34 |
40 | Allow your camera
41 |
42 |
43 |
44 | We need an image of your mole. Please grant us the permission to
45 | access your camera in order to take the image.
46 |
47 |
48 | {
51 | grantPermission(true);
52 | navigation.navigate('GrantGalleryPermissionScreen');
53 | }}
54 | />
55 |
59 | navigation.navigate('GrantGalleryPermissionScreen')
60 | }
61 | />
62 |
63 |
64 |
65 |
66 |
67 | );
68 | };
69 |
70 | export default GrantCameraPermissionScreen;
71 |
--------------------------------------------------------------------------------
/client/src/components/common/BaseScreen.jsx:
--------------------------------------------------------------------------------
1 | import { SafeAreaView } from 'react-native-safe-area-context';
2 | import { StatusBar } from 'expo-status-bar';
3 | import { StyleSheet, View, Platform, Text } from 'react-native';
4 | import { Image } from 'expo-image';
5 | import PropTypes from 'prop-types';
6 |
7 | import { scale, scaleVertical } from '@helpers/scale';
8 | import { useGlobalStyle } from '../../hooks/useGlobalStyle';
9 | import { colors } from '../../constants/color';
10 | import { useFonts } from 'expo-font';
11 | import { Montserrat_400Regular } from '@expo-google-fonts/montserrat';
12 |
13 | export const BaseScreen = ({
14 | contentStyle,
15 | backgroundImage,
16 | backgroundColor,
17 | children,
18 | hasBottomTab = false,
19 | fill = false,
20 | paddingHorizontal = 32,
21 | }) => {
22 | const basicStyles = useGlobalStyle();
23 |
24 | const [fontsLoaded] = useFonts({
25 | Montserrat_400Regular,
26 | });
27 |
28 | const BaseComponent = fill ? View : SafeAreaView;
29 |
30 | if (!fontsLoaded) {
31 | return Loading...;
32 | }
33 |
34 | return (
35 |
44 | {backgroundImage && (
45 |
50 | )}
51 |
65 | {children}
66 |
67 |
68 |
69 | );
70 | };
71 |
72 | BaseScreen.propTypes = {
73 | children: PropTypes.node,
74 | contentStyle: PropTypes.object,
75 | backgroundImage: PropTypes.object,
76 | backgroundColor: PropTypes.string,
77 | hasBottomTab: PropTypes.bool,
78 | fill: PropTypes.bool,
79 | paddingHorizontal: PropTypes.number,
80 | };
81 |
82 | const styles = StyleSheet.create({
83 | container: {
84 | flex: 1,
85 | },
86 | contentStyle: {
87 | flex: 1,
88 | width: '100%',
89 | },
90 | danger: {
91 | paddingHorizontal: scale(24),
92 | paddingVertical: scaleVertical(4),
93 | },
94 | });
95 |
--------------------------------------------------------------------------------
/server/main.py:
--------------------------------------------------------------------------------
1 | import binascii
2 | from fastapi import Request
3 | from fastapi import Request
4 | from typing import Optional
5 | from fastapi import FastAPI, File, Form, UploadFile
6 | from fastapi import FastAPI, File, Form, UploadFile
7 | from fastapi.middleware.cors import CORSMiddleware
8 | from PIL import Image
9 | import numpy as np
10 | import io
11 | import base64
12 | from pydantic import BaseModel
13 | #pip installation required + add the version to requirements.txt
14 |
15 | app = FastAPI()
16 |
17 | #CORS ERRORI SKINUTI
18 |
19 | class PredictFormData(BaseModel):
20 | age: int
21 | gender: str
22 | localization: str
23 |
24 | app.add_middleware(
25 | CORSMiddleware,
26 | allow_origins=["*"],
27 | allow_credentials=True,
28 | allow_methods=["*"],
29 | allow_headers=["*"],
30 | )
31 |
32 | class PredictFormData(BaseModel):
33 | age: int
34 | gender: str
35 | localization: str
36 |
37 | @app.get('/')
38 | def read_root():
39 | print("yo")
40 | return { "message" : "jesam" }
41 |
42 | @app.post('/user-data')
43 | def read_root(data: dict):
44 | print("user info received!")
45 | print(data)
46 | return { "message" : data["data"]["_parts"][0] }
47 |
48 | def resize_image(image_path, width, height):
49 | image = Image.open(image_path)
50 |
51 | newImage = image.resize((width, height))
52 | newImage.save("resizedImage.jpg")
53 |
54 | @app.post("/predict")
55 | async def predict(
56 | age: int = Form(...),
57 | gender: str = Form(...),
58 | localization: str = Form(...),
59 | photo: UploadFile = File(...)):
60 | try:
61 | image_path = f"images/{photo.filename}"
62 |
63 | print('age: ', age)
64 | print('gender: ', gender)
65 | print('localization: ', localization)
66 | print(photo)
67 | print('image_path: ', image_path)
68 |
69 | with open(image_path, "wb") as file:
70 | contents = await photo.read()
71 | file.write(contents)
72 |
73 | image_to_array(contents, 450, 600)
74 |
75 | file.close()
76 | return {"file_contents": contents.decode("utf-8", "ignore")}
77 | except Exception as e:
78 | print("Error receiving image:", e)
79 | return {"error": e}
80 |
81 | def image_to_array(img, width, height):
82 | image = Image.open(io.BytesIO(img))
83 |
84 | # image_grayscale = image.convert("L") # probably want to keep original image color
85 | # resized_image = image_grayscale.resize((width, height))
86 | resized_image = image.resize((width, height)) # probably want to keep original image color
87 | image_array = np.array(resized_image)
88 |
89 | # print("image_array: ", image_array)
90 |
91 | normalized_image = image_array / 255.0 # value depends on the pixel value range
92 | print(normalized_image)
--------------------------------------------------------------------------------
/client/src/screens/FormScreen.jsx:
--------------------------------------------------------------------------------
1 | import { View, Text, Image } from 'react-native';
2 | import { BaseScreen } from '../components/common/BaseScreen';
3 | import { useContext, useState } from 'react';
4 | import PermissionsContext from '../context/PermissionsContext';
5 | import ImageContext from '../context/ImageContext';
6 | import FormContext from '../context/FormContext';
7 |
8 | import { PrimaryButton } from '../components/button/PrimaryButton';
9 | import { SecondaryButton } from '../components/button/SecondaryButton';
10 | import { Ionicons } from '@expo/vector-icons';
11 | import { useGlobalStyle } from '../hooks/useGlobalStyle';
12 | import { scaleImage } from '../helpers/scale';
13 |
14 | import Input from '../components/input/Input';
15 | import Checkbox from '../components/input/Checkbox';
16 | import Slider from '../components/input/Slider';
17 |
18 | const FormScreen = ({ navigation }) => {
19 | const basicStyles = useGlobalStyle();
20 | const { lastPressed, setLastPressed } = useContext(PermissionsContext);
21 | const { imgUri, image } = useContext(ImageContext);
22 | const { age, setAge, gender, setGender, location, setLocation,
23 | sendData, locVal, setLocVal } =
24 | useContext(FormContext);
25 |
26 | console.log(lastPressed);
27 |
28 | const { width: scaledWidth, height: scaledHeight } = scaleImage(
29 | 100,
30 | 100,
31 | 340,
32 | );
33 |
34 | return (
35 |
36 |
37 |
50 |
51 |
58 | Enter your personal info:
59 |
60 |
61 |
67 |
68 |
74 | {
77 | sendData();
78 | navigation.navigate('DiagnosisScreen');
79 | }}
80 | />
81 |
82 |
83 |
84 |
85 |
86 | *Personal information helps our model give more accurate
87 | predictions. Your personal information is NOT being stored.
88 |
89 |
90 |
91 |
92 | );
93 | };
94 |
95 | export default FormScreen;
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Melanoma Detector 📸💻
2 |
3 | ## Project Overview
4 | This is an open-source project dedicated to helping people living in regions with a lack of dermatologists! 🚀 We've developed a Skin Cancer Detection App using React Native for the front end and TensorFlow, NumPy, and Python for the back end. The app empowers users to check if a naevus (mole) is benign or malignant.
5 |
6 | ## Features
7 | - 📷 **Camera Integration:** Capture photos directly from your phone's camera.
8 | - 🔄 **Real-time Detection:** Instantly send the photo to the TensorFlow model for analysis.
9 | - 🤖 **Machine Learning Magic:** Utilizing TensorFlow, NumPy, and Python to distinguish between benign and malignant moles.
10 |
11 | ## How It Works
12 | 1. 📱 **User Permission:** The app prompts the user for camera permissions.
13 | 2. 📸 **Capture Photo:** Users can take photos of the naevus they want to analyze.
14 | 3. 🚀 **Model Processing:** The app sends the photo to the TensorFlow model for analysis.
15 | 4. 🩺 **Diagnosis Result:** The model processes the image and provides feedback on whether the naevus is benign or malignant.
16 |
17 | ## Technologies Used
18 | - ⚛️ **React Native:** For the frontend development.
19 | - 🧠 **TensorFlow:** Powering the machine learning model.
20 | - 🐍 **Python:** Backend development and model training.
21 | - 📊 **NumPy:** Handling numerical operations efficiently.
22 | - 📷 **Expo:** Leveraging the React Native's cross-platform capability
23 |
24 | ## Training Data
25 | - 📊 **Kaggle Dataset:** The model has been trained on a curated dataset from Kaggle, ensuring robust and accurate predictions.
26 |
27 | ## Future Enhancements
28 | - 🌐 **Web Deployment:** I am considering deploying the app on the web for broader accessibility.
29 | - 🌈 **Improved UX/UI:** I plan to enhance the user interface with nativewind.
30 |
31 | ## Acknowledgments
32 | A big shoutout to the open-source community and the incredible tools and libraries that made this project possible.
33 | Special thanks to my team members for contributing so much to this project! 🎉
34 |
35 | Also a big thank you to the authors of the
36 | ### Melanoma and Nevus Skin Lesion Classification Using Handcraft and Deep Learning Feature Fusion via Mutual Information Measures
37 | research paper for sharing their CAD system
38 |
39 | Happy Coding! 🚀👩💻👨💻
40 |
41 | ## Requirements
42 |
43 | - Android or iOS device with a camera
44 | - Internet connection for TensorFlow.js model updates (if applicable)
45 |
46 | ## Installation
47 |
48 | 1. Clone the repository:
49 |
50 | ```bash
51 | git clone https://github.com/VukIG/Melanoma-Detector.git
52 | ```
53 |
54 | 2. Install dependencies:
55 |
56 | ```bash
57 | cd Melanoma-Detector
58 | npm install
59 | ```
60 |
61 | 3. Run the app and Scan the QR code with the Expo app from Play Store :
62 |
63 | ```bash
64 | npx expo start --tunnel
65 | ```
66 | 4. Run the app on your emulator ( Optional if you don't want to use the expo app ):
67 | ```bash
68 | Press w for web, a for android emulator ( Requires the AndroidSDK setup ) or i for ios emulator ( requires xcode )
69 | ```
70 | ## How to Contribute
71 | Feel free to fork the repository and contribute to the development. Your suggestions and enhancements are more than welcome! 🙌
72 |
73 |
74 | We welcome contributions! If you have suggestions, found a bug, or want to improve the app, please open an issue or submit a pull request.
75 |
76 | ## License
77 |
78 | This project is licensed under the [Apache 2.0](LICENSE).
79 |
80 | ## Detailed explanation
81 |
82 | The detailed explanation on how this app should work is in Serbian and can be accessed through this url: https://docs.google.com/document/d/1NwlALtB-bNRuoXDWS3nWsnSi3bMCoCZO5Nre84uz1rA/edit?usp=sharing
83 |
--------------------------------------------------------------------------------
/client/src/hooks/useGlobalStyle.js:
--------------------------------------------------------------------------------
1 | import { StyleSheet } from 'react-native';
2 | import { colors } from '../constants/color';
3 | import { scale } from '../helpers/scale';
4 |
5 | export const useGlobalStyle = () => {
6 | return StyleSheet.create({
7 | // layout
8 | CENTER_ROW: {
9 | display: 'flex',
10 | flexDirection: 'row',
11 | justifyContent: 'center',
12 | alignItems: 'center',
13 | },
14 | CENTER_COL: {
15 | display: 'flex',
16 | flexDirection: 'column',
17 | justifyContent: 'center',
18 | alignItems: 'center',
19 | },
20 | REVERSE_COL: {
21 | display: 'flex',
22 | flexDirection: 'column-reverse',
23 | justifyContent: 'center',
24 | alignItems: 'center',
25 | },
26 | REVERSE_ROW: {
27 | display: 'flex',
28 | flexDirection: 'row-reverse',
29 | justifyContent: 'center',
30 | alignItems: 'center',
31 | },
32 | CENTER_COL_BETWEEN: {
33 | display: 'flex',
34 | flexDirection: 'column',
35 | justifyContent: 'space-between',
36 | alignItems: 'center',
37 | },
38 | CENTER_ROW_BETWEEN: {
39 | display: 'flex',
40 | flexDirection: 'row',
41 | justifyContent: 'space-between',
42 | alignItems: 'center',
43 | },
44 | START_ROW_LAYOUT: {
45 | display: 'flex',
46 | flexDirection: 'row',
47 | justifyContent: 'flex-start',
48 | alignItems: 'center',
49 | },
50 | START_COL_CENTER: {
51 | display: 'flex',
52 | flexDirection: 'column',
53 | justifyContent: 'flex-start',
54 | alignItems: 'center',
55 | },
56 | BETWEEN_ROW_START: {
57 | display: 'flex',
58 | flexDirection: 'row',
59 | justifyContent: 'space-between',
60 | alignItems: 'flex-start',
61 | },
62 | START_ROW_START: {
63 | display: 'flex',
64 | flexDirection: 'row',
65 | justifyContent: 'flex-start',
66 | alignItems: 'flex-start',
67 | },
68 | CENTER_COL_START: {
69 | display: 'flex',
70 | flexDirection: 'column',
71 | justifyContent: 'center',
72 | alignItems: 'flex-start',
73 | },
74 | CENTER_COL_END: {
75 | display: 'flex',
76 | flexDirection: 'column',
77 | justifyContent: 'flex-end',
78 | alignItems: 'center',
79 | },
80 | END_COL_END: {
81 | display: 'flex',
82 | flexDirection: 'column',
83 | justifyContent: 'flex-end',
84 | alignItems: 'flex-end',
85 | },
86 | CENTER_ROW_END: {
87 | display: 'flex',
88 | flexDirection: 'row',
89 | justifyContent: 'flex-end',
90 | alignItems: 'center',
91 | },
92 | //font
93 | FONT32: {
94 | fontSize: scale(32),
95 | fontStyle: 'normal',
96 | fontFamily: 'Montserrat_400Regular',
97 | },
98 | FONT28: {
99 | fontSize: scale(28),
100 | fontStyle: 'normal',
101 | fontFamily: 'Montserrat_400Regular',
102 | },
103 | FONT20: {
104 | fontSize: scale(20),
105 | fontStyle: 'normal',
106 | fontFamily: 'Montserrat_400Regular',
107 | },
108 | FONT16: {
109 | fontSize: scale(16),
110 | fontStyle: 'normal',
111 | fontFamily: 'Montserrat_400Regular',
112 | },
113 | FONT12: {
114 | fontSize: scale(12),
115 | fontStyle: 'normal',
116 | fontFamily: 'Montserrat_400Regular',
117 | },
118 | FONTPRIMARY: {
119 | color: colors.primary,
120 | fontFamily: 'Montserrat_400Regular',
121 | },
122 | FONTBLACK: {
123 | color: colors.black,
124 | fontFamily: 'Montserrat_400Regular',
125 | },
126 | FONTWHITE: {
127 | color: colors.white,
128 | fontFamily: 'Montserrat_400Regular',
129 | },
130 | });
131 | };
132 |
--------------------------------------------------------------------------------
/client/src/screens/ScanPhotoScreen.jsx:
--------------------------------------------------------------------------------
1 | import { View, Text, ScrollView } from 'react-native';
2 | import { BaseScreen } from '../components/common/BaseScreen';
3 | import { useGlobalStyle } from '../hooks/useGlobalStyle';
4 | import { scaleVertical } from '../helpers/scale';
5 | import { PrimaryButton } from '../components/button/PrimaryButton';
6 | import { SecondaryButton } from '../components/button/SecondaryButton';
7 | import { ProgressStepBar } from '../components/ProgressStepBar';
8 | import { WelcomeSvg } from '../constants/svg';
9 | import { Ionicons } from '@expo/vector-icons';
10 | import { useContext, useEffect, useState } from 'react';
11 | import PermissionsContext from '../context/PermissionsContext';
12 | import ImageContext from '../context/ImageContext';
13 |
14 | const ScanPhotoScreen = ({ navigation }) => {
15 | const basicStyles = useGlobalStyle();
16 |
17 | const { permissions, lastPressed, setLastPressed } =
18 | useContext(PermissionsContext);
19 |
20 | const { captureImage } = useContext(ImageContext);
21 |
22 | const promptUser = () => {
23 | let prompt;
24 |
25 | if (!permissions.camera == true && lastPressed == 'camera') {
26 | prompt = 'Please make sure that you granted the app camera rights';
27 | } else if (!permissions.gallery == true && lastPressed == 'gallery') {
28 | prompt =
29 | 'Please make sure to grant the app access to the photos of your gallery';
30 | } else {
31 | prompt =
32 | 'Upload a picture of your naveus from the gallery, or take a picture of it with your camera';
33 | }
34 | return prompt;
35 | };
36 |
37 | return (
38 |
39 |
49 |
50 |
60 |
61 | Let's start
62 |
63 |
72 | {promptUser()}
73 |
74 |
75 |
83 | }
86 | onPress={() => {
87 | captureImage(true);
88 | setLastPressed('camera');
89 | navigation.navigate('FormScreen');
90 | }}
91 | />
92 |
96 | }
97 | onPress={() => {
98 | captureImage(false);
99 | setLastPressed('gallery');
100 | navigation.navigate('FormScreen');
101 | }}
102 | />
103 |
104 |
105 |
106 |
107 | );
108 | };
109 | export default ScanPhotoScreen;
110 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Code of Conduct - Melanoma-Detector
2 |
3 | ## Our Pledge
4 |
5 | In the interest of fostering an open and welcoming environment, we as
6 | contributors and maintainers pledge to make participation in our project and
7 | our community a harassment-free experience for everyone, regardless of age, body
8 | size, disability, ethnicity, sex characteristics, gender identity and expression,
9 | level of experience, education, socio-economic status, nationality, personal
10 | appearance, race, religion, or sexual identity and orientation.
11 |
12 | ## Our Standards
13 |
14 | Examples of behavior that contributes to a positive environment for our
15 | community include:
16 |
17 | * Demonstrating empathy and kindness toward other people
18 | * Being respectful of differing opinions, viewpoints, and experiences
19 | * Giving and gracefully accepting constructive feedback
20 | * Accepting responsibility and apologizing to those affected by our mistakes,
21 | and learning from the experience
22 | * Focusing on what is best not just for us as individuals, but for the
23 | overall community
24 |
25 | Examples of unacceptable behavior include:
26 |
27 | * The use of sexualized language or imagery, and sexual attention or
28 | advances
29 | * Trolling, insulting or derogatory comments, and personal or political attacks
30 | * Public or private harassment
31 | * Publishing others' private information, such as a physical or email
32 | address, without their explicit permission
33 | * Other conduct which could reasonably be considered inappropriate in a
34 | professional setting
35 |
36 | ## Our Responsibilities
37 |
38 | Project maintainers are responsible for clarifying and enforcing our standards of
39 | acceptable behavior and will take appropriate and fair corrective action in
40 | response to any behavior that they deem inappropriate,
41 | threatening, offensive, or harmful.
42 |
43 | Project maintainers have the right and responsibility to remove, edit, or reject
44 | comments, commits, code, wiki edits, issues, and other contributions that are
45 | not aligned to this Code of Conduct, and will
46 | communicate reasons for moderation decisions when appropriate.
47 |
48 | ## Scope
49 |
50 | This Code of Conduct applies within all community spaces, and also applies when
51 | an individual is officially representing the community in public spaces.
52 | Examples of representing our community include using an official e-mail address,
53 | posting via an official social media account, or acting as an appointed
54 | representative at an online or offline event.
55 |
56 | ## Enforcement
57 |
58 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
59 | reported to the community leaders responsible for enforcement at .
60 | All complaints will be reviewed and investigated promptly and fairly.
61 |
62 | All community leaders are obligated to respect the privacy and security of the
63 | reporter of any incident.
64 |
65 | ## Enforcement Guidelines
66 |
67 | Community leaders will follow these Community Impact Guidelines in determining
68 | the consequences for any action they deem in violation of this Code of Conduct:
69 |
70 | ### 1. Correction
71 |
72 | **Community Impact**: Use of inappropriate language or other behavior deemed
73 | unprofessional or unwelcome in the community.
74 |
75 | **Consequence**: A private, written warning from community leaders, providing
76 | clarity around the nature of the violation and an explanation of why the
77 | behavior was inappropriate. A public apology may be requested.
78 |
79 | ### 2. Warning
80 |
81 | **Community Impact**: A violation through a single incident or series
82 | of actions.
83 |
84 | **Consequence**: A warning with consequences for continued behavior. No
85 | interaction with the people involved, including unsolicited interaction with
86 | those enforcing the Code of Conduct, for a specified period of time. This
87 | includes avoiding interactions in community spaces as well as external channels
88 | like social media. Violating these terms may lead to a temporary or
89 | permanent ban.
90 |
91 | ### 3. Temporary Ban
92 |
93 | **Community Impact**: A serious violation of community standards, including
94 | sustained inappropriate behavior.
95 |
96 | **Consequence**: A temporary ban from any sort of interaction or public
97 | communication with the community for a specified period of time. No public or
98 | private interaction with the people involved, including unsolicited interaction
99 | with those enforcing the Code of Conduct, is allowed during this period.
100 | Violating these terms may lead to a permanent ban.
101 |
102 | ### 4. Permanent Ban
103 |
104 | **Community Impact**: Demonstrating a pattern of violation of community
105 | standards, including sustained inappropriate behavior, harassment of an
106 | individual, or aggression toward or disparagement of classes of individuals.
107 |
108 | **Consequence**: A permanent ban from any sort of public interaction within
109 | the community.
110 |
111 | ## Attribution
112 |
113 | This Code of Conduct is adapted from the [Contributor Covenant](https://contributor-covenant.org/), version
114 | [1.4](https://www.contributor-covenant.org/version/1/4/code-of-conduct/code_of_conduct.md) and
115 | [2.0](https://www.contributor-covenant.org/version/2/0/code_of_conduct/code_of_conduct.md),
116 | and was generated by [contributing-gen](https://github.com/bttger/contributing-gen).
--------------------------------------------------------------------------------
/model/fusion_stage.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pandas as pd
3 | import argparse
4 | import os
5 | from sklearn.feature_selection import mutual_info_classif, SelectKBest
6 | from sklearn.model_selection import GridSearchCV
7 | from sklearn.linear_model import LogisticRegression
8 | from sklearn.svm import SVC
9 | from sklearn.preprocessing import LabelEncoder
10 | from sklearn.metrics import accuracy_score, classification_report
11 | from skrvm import RVC
12 | from tensorflow.keras.preprocessing.image import ImageDataGenerator
13 |
14 | from imgCropBinaryMask import image_data_gen_preprocessing
15 | from handcrafted import extract_handcrafted
16 | from modelcrafted import dl_extract_features_from_img
17 |
18 | """Argparse input for script to eliminate errors when running on non native machines
19 |
20 | Example Usage:
21 | python fusion_stage.py -tr-dir /path/to -ts-dir /path/to -val-dir /path/to -csv /path/to
22 |
23 | """
24 | parser = argparse.ArgumentParser(description='Directory arguments for datasets')
25 | parser.add_argument('-tr-dir',
26 | '--train_dir',
27 | required=True,
28 | help='Input train dataset')
29 |
30 | parser.add_argument('-ts-dir',
31 | '--test_dir',
32 | required=True,
33 | help='Input test dataset')
34 |
35 | parser.add_argument('-val-dir',
36 | '--valid_dir',
37 | required=True,
38 | help='Input validated dataset')
39 |
40 | parser.add_argument('-csv',
41 | '--csv_file',
42 | required=True,
43 | help='Input csv dataset')
44 | args = parser.parse_args()
45 |
46 | train_dir = args.train_dir
47 | output_dir = args.test_dir
48 | valid_dir = args.valid_dir
49 | csv_file = args.csv_file
50 |
51 |
52 | """ Old hard coded values for redundancy
53 | # Specify the directory containing your training and validation images
54 | #train_dir = '/home/vuk/Documents/ML_Data/HAM/train'
55 | #test_dir = '/home/vuk/Documents/ML_Data/HAM/test'
56 | #valid_dir = '/home/vuk/Documents/ML_Data/HAM/validate'
57 | #csv_file = '/home/vuk/Documents/ML_Data/HAM/HAM10000_metadata.csv'
58 | """
59 |
60 |
61 | numerOfImages = 10005
62 | image_height = image_width = 224
63 | batch_size = 32
64 | num_epochs = 9
65 |
66 |
67 | # Load labels from CSV file
68 | labels_df = pd.read_csv(csv_file)
69 | labels_dict = dict(zip(labels_df['image_id'], labels_df['dx']))
70 |
71 | # maybe use this? dataset = tf.data.Dataset.from_tensor_slices(images)
72 | def crop_img(img):
73 | cropped_img, _ = image_data_gen_preprocessing(img)
74 | return cropped_img
75 |
76 | def bin_mask(img):
77 | _, cropped_mask, _ = image_data_gen_preprocessing(img)
78 | return cropped_mask
79 |
80 |
81 |
82 | # Define the wrapping_features function as requested
83 | def wrapping_features(img):
84 | print(img)
85 | img_bin = bin_mask(img)
86 | hf = extract_handcrafted(img, img_bin)
87 | dlf = dl_extract_features_from_img(img)
88 | # Ensure hf and dlf are 2D arrays before concatenation
89 | extracted_features = np.concatenate([hf[np.newaxis, :], dlf[np.newaxis, :]], axis=1)
90 | return extracted_features
91 |
92 | datagen = ImageDataGenerator(preprocessing_function=crop_img)
93 |
94 | # Create data generators
95 | #%%
96 | train_generator = datagen.flow_from_directory(
97 | train_dir,
98 | target_size=(image_height, image_width),
99 | batch_size=batch_size,
100 | class_mode='binary' # Set class_mode to 'categorical' if you have multiple classes
101 | )
102 |
103 | valid_generator = datagen.flow_from_directory(
104 | valid_dir,
105 | target_size=(image_height, image_width),
106 | batch_size=batch_size,
107 | class_mode='binary' # Set class_mode to 'categorical' if you have multiple classes
108 | )
109 |
110 | test_generator = datagen.flow_from_directory(
111 | test_dir,
112 | target_size=(image_height, image_width),
113 | batch_size=batch_size,
114 | class_mode='binary' # Set class_mode to 'categorical' if you have multiple classes
115 | )
116 |
117 | #%%
118 | # Extract features from images
119 | train_features = np.array([wrapping_features(image) for image, _ in train_generator])
120 | test_features = np.array([wrapping_features(image) for image, _ in test_generator])
121 | valid_features = np.array([wrapping_features(image) for image, _ in valid_generator])
122 |
123 | # Encode labels
124 | label_encoder = LabelEncoder()
125 | train_labels = label_encoder.fit_transform([labels_dict[os.path.basename(image_path)] for image_path in train_generator.filepaths])
126 | test_labels = label_encoder.transform([labels_dict[os.path.basename(image_path)] for image_path in test_generator.filepaths])
127 | valid_labels = label_encoder.transform([labels_dict[os.path.basename(image_path)] for image_path in valid_generator.filepaths])
128 |
129 | #%%
130 | # Select top N features based on mutual information
131 | N = 100 # Adjust N as needed
132 | selector = SelectKBest(mutual_info_classif, k=N).fit(train_features, train_labels)
133 | train_features_selected = selector.transform(train_features)
134 | test_features_selected = selector.transform(test_features)
135 | valid_features_selected = selector.transform(valid_features)
136 |
137 | #%%
138 | # Models to be tested, including RVC now
139 | models = {
140 | "LogisticRegression": LogisticRegression(max_iter=1000),
141 | "SVC": SVC(),
142 | "RVC": RVC(kernel='rbf') # Using RBF kernel as per the example
143 | }
144 |
145 | # Hyperparameter tuning setup for RVC and others
146 | param_grid = {
147 | "LogisticRegression": {"C": [0.01, 0.1, 1, 10]},
148 | "SVC": {"C": [0.1, 1, 10], "kernel": ["linear", "rbf"]},
149 | "RVC": {"alpha": [1e-6, 1e-4, 1e-2], "beta": [1e-6, 1e-4, 1e-2]}
150 | }
151 |
152 | #%%
153 | # Model selection and hyperparameter tuning
154 | best_models = {}
155 | for model_name in models:
156 | grid_search = GridSearchCV(models[model_name], param_grid[model_name], cv=5, scoring='accuracy', n_jobs=-1)
157 | grid_search.fit(train_features_selected, train_labels)
158 | best_models[model_name] = grid_search.best_estimator_
159 | print(f"{model_name} best score: {grid_search.best_score_}")
160 |
161 | # Model evaluation
162 | for model_name, model in best_models.items():
163 | predictions = model.predict(test_features_selected)
164 | accuracy = accuracy_score(test_labels, predictions)
165 | print(f"{model_name} Test Accuracy: {accuracy}")
166 | print(f"Classification Report for {model_name}:")
167 | print(classification_report(test_labels, predictions))
168 | print("---------------------------------------------------\n")
--------------------------------------------------------------------------------
/model/python-compile-scripts/pycompiler-args.py:
--------------------------------------------------------------------------------
1 | # Dep: Python Script Compiler with args
2 | # Author: Sabian Hibbs
3 | # Circa: 2024 - 16/03/2024
4 |
5 | """Compile a Python script into a standalone executable.
6 |
7 | Usage: python pycompiler-args.py -i img-crop-binary-mask.py -o /path/to/output/directory
8 | -i = Input python script to be converted to exe
9 | -o = Output exe directory + aditional files
10 |
11 | react.js Example: ------------------------------------------------------------------\
12 | |
13 | import RNFS from 'react-native-fs'; |
14 | |
15 | // Function to execute the binary |
16 | const executeBinary = async () => { |
17 | const binaryPath = RNFS.DocumentDirectoryPath + '/path/to/img_crop_binary_mask'; |
18 | const outputDir = RNFS.DocumentDirectoryPath + '/output/'; |
19 | |
20 | try { |
21 | // Execute the binary with the desired flags |
22 | const result = await RNFS.execFile(binaryPath, [ |
23 | '-bd', '/path/to/base/dir', |
24 | '-sd', '/path/to/starting/dir', |
25 | '-bnd', '/path/to/benign/dir', |
26 | '-md', '/path/to/malignant/dir', |
27 | '-o', outputDir |
28 | ]); |
29 | console.log('Binary execution result:', result); |
30 | } catch (error) { |
31 | console.error('Error executing binary:', error); |
32 | } |
33 | }; |
34 | |
35 | ------------------------------------------------------------------------------------/
36 | """
37 |
38 | import os
39 | import subprocess
40 | import shutil
41 | import argparse
42 | import importlib.util
43 |
44 |
45 | def is_pyinstaller_installed():
46 | """Check if PyInstaller is installed."""
47 | try:
48 | spec = importlib.util.find_spec('PyInstaller')
49 | return spec is not None
50 | except ImportError:
51 | return False
52 |
53 |
54 | def install_pyinstaller():
55 | """Install PyInstaller using pip."""
56 | print("PyInstaller is not installed.")
57 | while True:
58 | choice = input("Do you want to install PyInstaller? [y/n]: ").lower()
59 | if choice == 'y':
60 | subprocess.run(['pip', 'install', 'pyinstaller'], check=True)
61 | print("PyInstaller installed successfully.")
62 | return True
63 | elif choice == 'n':
64 | print("PyInstaller installation canceled.")
65 | return False
66 | else:
67 | print("Invalid choice. Please enter 'y' or 'n'.")
68 |
69 |
70 | def compile_script(script_path, output_dir):
71 | """Compile the Python script into a standalone executable.
72 |
73 | Args:
74 | script_path: Path to the input Python script.
75 | output_dir: Directory to store the compiled executable.
76 | """
77 | # Set the name of the executable file based on the input script name
78 | script_name = os.path.splitext(os.path.basename(script_path))[0]
79 | executable_name = script_name.replace('-', '_')
80 |
81 | # Set the PyInstaller command and options
82 | pyinstaller_cmd = [
83 | 'pyinstaller',
84 | '--onefile',
85 | '--name', executable_name,
86 | '--hidden-import', 'opencv-python',
87 | '--hidden-import', 'scikit-image',
88 | '--collect-data', 'skimage.transform',
89 | '--collect-data', 'cv2',
90 | script_path
91 | ]
92 |
93 | # Run PyInstaller to create the standalone executable
94 | subprocess.run(pyinstaller_cmd, check=True)
95 |
96 | # Create the output directory if it doesn't exist
97 | os.makedirs(output_dir, exist_ok=True)
98 |
99 | # Move the executable to the output directory
100 | executable_path = os.path.join(output_dir, executable_name)
101 | if os.path.exists(executable_path):
102 | os.remove(executable_path)
103 | shutil.move(os.path.join('dist', executable_name), output_dir)
104 |
105 | # Clean up the temporary files created by PyInstaller
106 | shutil.rmtree('build')
107 | os.remove(f'{executable_name}.spec')
108 |
109 | print(f"Standalone executable created: {executable_path}")
110 |
111 |
112 | def main():
113 | """Parse command-line arguments and compile the Python script."""
114 |
115 | parser = argparse.ArgumentParser(
116 | description='Compile a Python script into a standalone executable -i input script | -o output_dir')
117 | #parser.add_argument('--help', action='store_true', help='-i input script | -o output_dir')
118 |
119 | parser.add_argument('-i',
120 | '--input_script',
121 | required=True,
122 | help='Path to the input Python script')
123 |
124 | parser.add_argument('-o',
125 | '--output_dir',
126 | default='dist',
127 | help='Directory to store the compiled executable '
128 | '(default: dist)')
129 | args = parser.parse_args()
130 |
131 | script_path = args.input_script
132 | output_dir = args.output_dir
133 |
134 | if not os.path.exists(script_path):
135 | print(f"Input script not found: {script_path}")
136 | return
137 |
138 | if not is_pyinstaller_installed():
139 | if not install_pyinstaller():
140 | return
141 |
142 | compile_script(script_path, output_dir)
143 |
144 |
145 | if __name__ == '__main__':
146 | main()
147 |
--------------------------------------------------------------------------------
/client/src/assets/svg/allowgallery.svg:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/model/notebooks/test_dataset.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 4,
6 | "id": "ce845088-960b-4d50-8033-9b6c975b69a8",
7 | "metadata": {},
8 | "outputs": [],
9 | "source": [
10 | "import os\n",
11 | "import numpy as np\n",
12 | "import pandas as pd\n",
13 | "import shutil\n",
14 | "import cv2"
15 | ]
16 | },
17 | {
18 | "cell_type": "code",
19 | "execution_count": 14,
20 | "id": "3d6b3cde-fcc1-4de1-91ba-3c2018dc8da5",
21 | "metadata": {},
22 | "outputs": [],
23 | "source": [
24 | "base_dir = '/home/vuk/Documents/ML_Data/CDA1'\n",
25 | "csv_loc = '/home/vuk/Documents/ML_Data/CDA1/HAM10000_metadata.csv'\n",
26 | "ham_dir ='/home/vuk/Documents/ML_Data/CDA1/HAM10000'\n",
27 | "test_dataset_dir = '/home/vuk/Melanoma-Detector/model/test_dataset'"
28 | ]
29 | },
30 | {
31 | "cell_type": "code",
32 | "execution_count": 21,
33 | "id": "c4e603b7-de02-4ca2-ad8c-65424bae95d9",
34 | "metadata": {},
35 | "outputs": [
36 | {
37 | "name": "stdout",
38 | "output_type": "stream",
39 | "text": [
40 | "ls: cannot access '/home/vuk/Documents/ML_Data/CDA1/HAM1000': No such file or directory\n"
41 | ]
42 | }
43 | ],
44 | "source": [
45 | "!ls -l /home/vuk/Documents/ML_Data/CDA1/HAM10000"
46 | ]
47 | },
48 | {
49 | "cell_type": "code",
50 | "execution_count": 28,
51 | "id": "87366a1c-9b55-4465-97f8-a11304835ee6",
52 | "metadata": {},
53 | "outputs": [
54 | {
55 | "name": "stdout",
56 | "output_type": "stream",
57 | "text": [
58 | "Directory '/home/vuk/Melanoma-Detector/model/test_dataset/benign' already exists.\n",
59 | "Directory '/home/vuk/Melanoma-Detector/model/test_dataset/malignant' already exists.\n"
60 | ]
61 | }
62 | ],
63 | "source": [
64 | "for subfolder in ['benign','malignant']:\n",
65 | " subfolder_path = os.path.join(test_dataset_dir, subfolder)\n",
66 | " if not os.path.exists(subfolder_path):\n",
67 | " os.makedirs(subfolder_path)\n",
68 | " else:\n",
69 | " print(f\"Directory '{subfolder_path}' already exists.\")"
70 | ]
71 | },
72 | {
73 | "cell_type": "code",
74 | "execution_count": 36,
75 | "id": "de0b5f0e-3828-431f-a521-8929d469d218",
76 | "metadata": {},
77 | "outputs": [
78 | {
79 | "name": "stdout",
80 | "output_type": "stream",
81 | "text": [
82 | "-rw-rw-r-- 1 vuk vuk 311921 okt 6 2019 ISIC_0027419.jpg\n"
83 | ]
84 | }
85 | ],
86 | "source": [
87 | "!ls -l /home/vuk/Documents/ML_Data/CDA1/HAM10000 | grep 'ISIC_0027419'"
88 | ]
89 | },
90 | {
91 | "cell_type": "code",
92 | "execution_count": 43,
93 | "id": "e549955e-875c-47a3-94b2-cc6779f11cdc",
94 | "metadata": {},
95 | "outputs": [],
96 | "source": [
97 | "with open(csv_loc) as file:\n",
98 | " # Skip the header line\n",
99 | " next(file)\n",
100 | " \n",
101 | " # Counter to track the number of images moved\n",
102 | " num_images_moved = 0\n",
103 | " \n",
104 | " # Iterate through each line in the file\n",
105 | " for line in file:\n",
106 | " # Split the line by comma\n",
107 | " split_line = line.split(',')\n",
108 | " \n",
109 | " # Extract the image file and tumor type\n",
110 | " img_file = split_line[1].strip()\n",
111 | " tumor_type = split_line[2].strip()\n",
112 | "\n",
113 | " # Determine the class directory based on the tumor type\n",
114 | " class_dir = 'benign' if tumor_type not in ['bcc', 'mel'] else 'malignant'\n",
115 | " \n",
116 | " # Construct the source file path\n",
117 | " source_file_path = os.path.join(ham_dir, img_file + '.jpg')\n",
118 | " \n",
119 | " # Construct the destination directory path\n",
120 | " destination_dir = os.path.join(test_dataset_dir, class_dir)\n",
121 | " \n",
122 | " # Create the destination directory if it doesn't exist\n",
123 | " if not os.path.exists(destination_dir):\n",
124 | " os.makedirs(destination_dir)\n",
125 | " \n",
126 | " # Construct the destination file path\n",
127 | " destination_file_path = os.path.join(destination_dir, img_file + '.jpg')\n",
128 | " \n",
129 | " # Move the image to the destination directory only if the filename is not \"ISIC_0027419\"\n",
130 | " if img_file != \"ISIC_0027419\":\n",
131 | " shutil.move(source_file_path, destination_file_path)\n",
132 | " num_images_moved += 1\n",
133 | " \n",
134 | " # Break the loop if 200 images have been moved\n",
135 | " if num_images_moved >= 200:\n",
136 | " break\n"
137 | ]
138 | },
139 | {
140 | "cell_type": "code",
141 | "execution_count": null,
142 | "id": "83979782-eeff-42a9-9db0-ef57ce009b6d",
143 | "metadata": {},
144 | "outputs": [],
145 | "source": [
146 | "def pre_processing(img):\n",
147 | " image = cv2.imread(img_path)\n",
148 | " image_blurred = cv2.GaussianBlur(image, (5, 5), 1)\n",
149 | " lab_image = cv2.cvtColor(image_blurred, cv2.COLOR_BGR2LAB)\n",
150 | " gray_image = cv2.cvtColor(image_blurred, cv2.COLOR_BGR2GRAY)\n",
151 | " \n",
152 | " # To find upper threshold, we need to apply Otsu's thresholding\n",
153 | " upper_threshold, thresh_image = cv2.threshold(\n",
154 | " gray_image, thresh=0, maxval=255, type=cv2.THRESH_BINARY + cv2.THRESH_OTSU\n",
155 | " )\n",
156 | " # Calculate lower threshold\n",
157 | " lower_threshold = 0.5 * upper_threshold\n",
158 | " \n",
159 | " # Apply canny edge detection\n",
160 | " canny = cv2.Canny(gray_image, lower_threshold, upper_threshold)\n",
161 | " # Finding the non-zero points of canny\n",
162 | " pts = np.argwhere(canny > 0)\n",
163 | " \n",
164 | " # Finding the min and max points\n",
165 | " y1, x1 = pts.min(axis=0)\n",
166 | " y2, x2 = pts.max(axis=0)\n",
167 | " \n",
168 | " # Crop ROI from the givn image\n",
169 | " output_image = image[y1:y2, x1:x2]\n",
170 | "\n",
171 | " return output_image"
172 | ]
173 | },
174 | {
175 | "cell_type": "code",
176 | "execution_count": null,
177 | "id": "0b1b63c3-7c6c-4b83-a3b0-e880348c17f9",
178 | "metadata": {},
179 | "outputs": [],
180 | "source": [
181 | "def binary_mask(image):\n",
182 | " IL, Ia, Ib = cv2.split(img)\n",
183 | " avg_IL = np.mean(IL)\n",
184 | " avg_Ia = np.mean(Ia)\n",
185 | " avg_Ib = np.mean(Ib)\n",
186 | "\n",
187 | " print(avg_IL,avg_Ia,avg_Ib)\n",
188 | " \n",
189 | " IThL = np.where(IL >= avg_IL, 1, 0)\n",
190 | " ITha = np.where(Ia >= avg_Ia, 1, 0)\n",
191 | " IThb = np.where(Ib >= avg_Ib, 1, 0)\n",
192 | "\n",
193 | " Ibin = cv2.bitwise_and(IThL, ITha)\n",
194 | " Ibin = cv2.bitwise_and(Ibin, IThb)\n",
195 | " Ibin_uint8 = Ibin.astype(np.uint8)\n",
196 | " return Ibin"
197 | ]
198 | },
199 | {
200 | "cell_type": "code",
201 | "execution_count": null,
202 | "id": "75d32630-054e-4095-a891-8ef076b1c588",
203 | "metadata": {},
204 | "outputs": [],
205 | "source": [
206 | "def save_images(img, imgBin, destination_folder):\n",
207 | " cv2.imwrite(os.path.join(destination_folder, 'roi_' + img + '.jpg'), roi_image)\n",
208 | " cv2.imwrite(os.path.join(destination_folder, 'binary_' + imgBin + '.jpg'), binary_mask)"
209 | ]
210 | },
211 | {
212 | "cell_type": "code",
213 | "execution_count": null,
214 | "id": "dd989846-7234-408a-b902-e6d7808d45e2",
215 | "metadata": {},
216 | "outputs": [],
217 | "source": [
218 | "for image in os.path('test_dataset'):\n",
219 | " img = output_image(image)\n",
220 | " imgBin = binary_mask(image)\n",
221 | " save_images(img,imgBin, 'test_dataset')\n",
222 | " \n",
223 | " "
224 | ]
225 | }
226 | ],
227 | "metadata": {
228 | "kernelspec": {
229 | "display_name": "Python 3 (ipykernel)",
230 | "language": "python",
231 | "name": "python3"
232 | },
233 | "language_info": {
234 | "codemirror_mode": {
235 | "name": "ipython",
236 | "version": 3
237 | },
238 | "file_extension": ".py",
239 | "mimetype": "text/x-python",
240 | "name": "python",
241 | "nbconvert_exporter": "python",
242 | "pygments_lexer": "ipython3",
243 | "version": "3.11.5"
244 | }
245 | },
246 | "nbformat": 4,
247 | "nbformat_minor": 5
248 | }
249 |
--------------------------------------------------------------------------------
/model/img-crop-binary-mask.py:
--------------------------------------------------------------------------------
1 | # This script is used to crop an image based on a
2 | # binary mask and generate an inverted version of the cropped image.
3 | # The cropped image, inverted image, and the binary mask are then
4 | # saved to the specified output directory.
5 | #
6 | # Example usage: python img-crop-binary-mask.py -i input.jpg -o output
7 | # Input: input.jpg
8 | # Output: output/original_cropped.jpg,
9 | # output/inverted_cropped.jpg,
10 | # output/binary_mask_cropped.jpg
11 | #
12 | # Dep: Computer Vision
13 | # Author: Sabian Hibbs
14 | # Circa: 2024 - 14/03/2024
15 |
16 | # The authors code was modified by removing the inverted_cropped version of an image
17 | # Using inverted images as a way to train the model has not yet been tested and
18 | # thus it will not be used. Also glob library was added to loop over the benign and
19 | # malignant folders in order to prepeare the HAM dataset for model training
20 |
21 | import glob
22 | import argparse
23 | import cv2
24 | import numpy as np
25 | import os
26 | from skimage.transform import resize
27 |
28 |
29 | # Constants
30 | IMAGE_PADDING = 10 # Pixle padding around ROI before cropping.
31 | IMG_CONTRAST = 1.0 # Image contrast factor. (0.0 = no contrast, 2.0 = double contrast)
32 |
33 | # Define the directories
34 |
35 | base_dir = '/home/vuk/Documents/ML_Data/HAM/processed'
36 | starting_dir = '/home/vuk/Documents/ML_Data/HAM/train/'
37 | benign_dir = '/home/vuk/Documents/ML_Data/HAM/train/benign'
38 | malignant_dir = '/home/vuk/Documents/ML_Data/HAM/train/malignant'
39 | roi_dir = os.path.join(base_dir, 'roi')
40 | binary_dir = os.path.join(base_dir, 'binary')
41 |
42 | # Create the output directories if they don't exist
43 | os.makedirs(roi_dir, exist_ok=True)
44 | os.makedirs(binary_dir, exist_ok=True)
45 |
46 |
47 | def read_image(image_path):
48 | """
49 | Loads an image from a specified path.
50 |
51 | Args:
52 | image_path (str): The path to the image file.
53 |
54 | Returns:
55 | numpy.ndarray: The loaded image as a NumPy array.
56 | """
57 | return cv2.imread(image_path) # Read image from file.
58 |
59 |
60 | def preprocess_image(image):
61 | """
62 | Applies preprocessing steps to the image.
63 |
64 | Args:
65 | image (numpy.ndarray): The input image.
66 |
67 | Returns:
68 | numpy.ndarray: The preprocessed image.
69 | """
70 | print("Original image shape:", image.shape) # Debug print
71 | # Apply Gaussian blur to the image.
72 | image_blurred = cv2.GaussianBlur(image, (5, 5), 1)
73 | # Convert the image to the LAB color space.
74 | return cv2.cvtColor(image_blurred, cv2.COLOR_BGR2LAB)
75 |
76 | def generate_binary_mask(image):
77 | """
78 | Generates a binary mask for the image.
79 |
80 | Args:
81 | image (numpy.ndarray): The input image.
82 |
83 | Returns:
84 | numpy.ndarray: The binary mask generated from the input image.
85 | """
86 | # Convert the image to grayscale.
87 | gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
88 | # Apply Otsu's thresholding to the grayscale image.
89 | _, binary_mask = cv2.threshold(gray_image, 0, 256, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
90 | return binary_mask
91 |
92 |
93 | def crop_image(original_image, binary_mask, buffer=IMAGE_PADDING):
94 | """
95 | Crops the region of interest from the given image and binary mask.
96 |
97 | Args:
98 | original_image (numpy.ndarray): The original image.
99 | binary_mask (numpy.ndarray): The binary mask indicating the region of interest.
100 | buffer (int, optional): The buffer size around the region of interest. Defaults to 0.
101 |
102 | Returns:
103 | tuple: A tuple containing the cropped original image and the cropped binary mask.
104 | """
105 | # Find the contours in the binary mask
106 | contours, _ = cv2.findContours(binary_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
107 | # If no contours are found, return None
108 | if not contours:
109 | print("No contours detected; unable to crop the image.")
110 | return None, None
111 |
112 | # Find the largest contour and compute the bounding box
113 | largest_contour = max(contours, key=cv2.contourArea)
114 | x, y, w, h = cv2.boundingRect(largest_contour)
115 |
116 | # Compute the square crop dimensions with buffer
117 | side_length = max(w, h) + 2 * buffer
118 | center_x, center_y = x + w // 2, y + h // 2
119 | x = max(center_x - side_length // 2, 0)
120 | y = max(center_y - side_length // 2, 0)
121 | # Ensure the crop dimensions do not exceed the image size
122 | if x + side_length > original_image.shape[1]:
123 | side_length = original_image.shape[1] - x
124 | if y + side_length > original_image.shape[0]:
125 | side_length = original_image.shape[0] - y
126 |
127 | # Crop the original image and the binary mask
128 | cropped_original = original_image[y:y + side_length, x:x + side_length]
129 | cropped_mask = binary_mask[y:y + side_length, x:x + side_length]
130 | return cropped_original, cropped_mask
131 |
132 |
133 | def invert_colors(image):
134 | """
135 | Inverts the colors of the given image and increases contrast.
136 |
137 | Args:
138 | image (numpy.ndarray): The input image to be processed.
139 |
140 | Returns:
141 | numpy.ndarray: The inverted image with increased contrast.
142 | """
143 | # Convert the image to the HSV color space
144 | hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
145 | hsv_image[:, :, 1] = 255 - hsv_image[:, :, 1]
146 |
147 | # Increase contrast
148 | hsv_image[:, :, 2] = np.clip(hsv_image[:, :, 2] * IMG_CONTRAST, 0, 255)
149 |
150 | # Convert the image back to the BGR color space
151 | inverted_image = cv2.cvtColor(hsv_image, cv2.COLOR_HSV2BGR)
152 | return inverted_image
153 |
154 |
155 | def save_cropped_images(cropped_original, cropped_mask, i, class_name):
156 | """
157 | Saves the cropped original and the cropped binary mask images.
158 |
159 | Args:
160 | cropped_original (numpy.ndarray): The cropped original image.
161 | cropped_mask (numpy.ndarray): The cropped binary mask image.
162 | output_path (str): The path where the images will be saved.
163 | i (int): The index of the image.
164 | class_name (str): The class name of the image (benign or malignant).
165 |
166 | Returns:
167 | None
168 | """
169 | os.makedirs(os.path.join(roi_dir, class_name), exist_ok=True)
170 | os.makedirs(os.path.join(binary_dir, class_name), exist_ok=True)
171 | # Save the cropped original image
172 | cv2.imwrite(os.path.join(roi_dir,class_name, f"{i}.jpg"), cropped_original)
173 | # Save the cropped image with the binary mask applied
174 | cv2.imwrite(os.path.join(binary_dir,class_name, f"{i}.jpg"), cropped_mask)
175 |
176 | def main():
177 | """
178 | Image Cropping and Mask Generation Tool.
179 |
180 | This tool takes an input image file, crops the image based on a binary mask,
181 | and generates an inverted version of the cropped image. The cropped image,
182 | inverted image, and the binary mask are then saved to the specified output directory.
183 |
184 | Args:
185 | -i, --input (str): Path to the input image file.
186 | -o, --output (str, optional): Directory to save the output images. If not provided,
187 | the output images will be saved in the same directory as the input image.
188 |
189 | Returns:
190 | None
191 | """
192 | #parser = argparse.ArgumentParser(description="Image Cropping and Mask Generation Tool")
193 | #parser.add_argument("-i", "--input", required=True, help="Path to the input image file")
194 | #parser.add_argument("-o", "--output", default="", help="Directory to save the output images")
195 | #args = parser.parse_args()
196 |
197 | #input_path = args.input
198 | #output_path = args.output if args.output else os.path.dirname(input_path)
199 |
200 | image_paths = glob.glob(os.path.join(starting_dir, '*', '*'))
201 | for i, image_path in enumerate(image_paths):
202 | print(image_path)
203 | original_image = read_image(image_path)
204 | if original_image is None:
205 | print(f"Failed to load image: {image_path}")
206 | continue # Skip to the next image if loading failed
207 | preprocessed_image = preprocess_image(original_image)
208 | binary_mask = generate_binary_mask(preprocessed_image)
209 | cropped_original, cropped_mask = crop_image(original_image, binary_mask)
210 | if 'benign' in image_path:
211 | class_name = 'benign'
212 | elif 'malignant' in image_path:
213 | class_name = 'malignant'
214 | else:
215 | print(f"Unknown class for image: {image_path}")
216 | continue
217 | save_cropped_images(cropped_original, cropped_mask, i, class_name)
218 | if __name__ == "__main__":
219 | main()
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 |
2 | # Contributing to Melanoma-Detector
3 |
4 | First off, thanks for taking the time to contribute! ❤️
5 |
6 | All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉
7 |
8 | > And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about:
9 | > - Star the project
10 | > - Tweet about it
11 | > - Refer this project in your project's readme
12 | > - Mention the project at local meetups and tell your friends/colleagues
13 |
14 |
15 | ## Table of Contents
16 |
17 | - [Code of Conduct](#code-of-conduct)
18 | - [I Have a Question](#i-have-a-question)
19 | - [I Want To Contribute](#i-want-to-contribute)
20 | - [Reporting Bugs](#reporting-bugs)
21 | - [Suggesting Enhancements](#suggesting-enhancements)
22 | - [Your First Code Contribution](#your-first-code-contribution)
23 | - [Improving The Documentation](#improving-the-documentation)
24 | - [Styleguides](#styleguides)
25 | - [Commit Messages](#commit-messages)
26 | - [Join The Project Team](#join-the-project-team)
27 |
28 |
29 | ## Code of Conduct
30 |
31 | This project and everyone participating in it is governed by the
32 | [Melanoma-Detector Code of Conduct](https://github.com/VukIG/Melanoma-Detectorblob/master/CODE_OF_CONDUCT.md).
33 | By participating, you are expected to uphold this code. Please report unacceptable behavior
34 | to .
35 |
36 |
37 | ## I Have a Question
38 |
39 | > If you want to ask a question, we assume that you have read the available [Documentation]().
40 |
41 | Before you ask a question, it is best to search for existing [Issues](https://github.com/VukIG/Melanoma-Detector/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to search the internet for answers first.
42 |
43 | If you then still feel the need to ask a question and need clarification, we recommend the following:
44 |
45 | - Open an [Issue](https://github.com/VukIG/Melanoma-Detector/issues/new).
46 | - Provide as much context as you can about what you're running into.
47 | - Provide project and platform versions (nodejs, npm, etc), depending on what seems relevant.
48 |
49 | We will then take care of the issue as soon as possible.
50 |
51 |
65 |
66 | ## I Want To Contribute
67 |
68 | > ### Legal Notice
69 | > When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license.
70 |
71 | ### Reporting Bugs
72 |
73 |
74 | #### Before Submitting a Bug Report
75 |
76 | A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible.
77 |
78 | - Make sure that you are using the latest version.
79 | - Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](). If you are looking for support, you might want to check [this section](#i-have-a-question)).
80 | - To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/VukIG/Melanoma-Detectorissues?q=label%3Abug).
81 | - Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue.
82 | - Collect information about the bug:
83 | - Stack trace (Traceback)
84 | - OS, Platform and Version (Windows, Linux, macOS, x86, ARM)
85 | - Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant.
86 | - Possibly your input and the output
87 | - Can you reliably reproduce the issue? And can you also reproduce it with older versions?
88 |
89 |
90 | #### How Do I Submit a Good Bug Report?
91 |
92 | > You must never report security related issues, vulnerabilities or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to .
93 |
94 |
95 | We use GitHub issues to track bugs and errors. If you run into an issue with the project:
96 |
97 | - Open an [Issue](https://github.com/VukIG/Melanoma-Detector/issues/new). (Since we can't be sure at this point whether it is a bug or not, we ask you not to talk about a bug yet and not to label the issue.)
98 | - Explain the behavior you would expect and the actual behavior.
99 | - Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case.
100 | - Provide the information you collected in the previous section.
101 |
102 | Once it's filed:
103 |
104 | - The project team will label the issue accordingly.
105 | - A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `needs-repro`. Bugs with the `needs-repro` tag will not be addressed until they are reproduced.
106 | - If the team is able to reproduce the issue, it will be marked `needs-fix`, as well as possibly other tags (such as `critical`), and the issue will be left to be [implemented by someone](#your-first-code-contribution).
107 |
108 |
109 |
110 |
111 | ### Suggesting Enhancements
112 |
113 | This section guides you through submitting an enhancement suggestion for Melanoma-Detector, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions.
114 |
115 |
116 | #### Before Submitting an Enhancement
117 |
118 | - Make sure that you are using the latest version.
119 | - Read the [documentation]() carefully and find out if the functionality is already covered, maybe by an individual configuration.
120 | - Perform a [search](https://github.com/VukIG/Melanoma-Detector/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one.
121 | - Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library.
122 |
123 |
124 | #### How Do I Submit a Good Enhancement Suggestion?
125 |
126 | Enhancement suggestions are tracked as [GitHub issues](https://github.com/VukIG/Melanoma-Detector/issues).
127 |
128 | - Use a **clear and descriptive title** for the issue to identify the suggestion.
129 | - Provide a **step-by-step description of the suggested enhancement** in as many details as possible.
130 | - **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you.
131 | - You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux.
132 | - **Explain why this enhancement would be useful** to most Melanoma-Detector users. You may also want to point out the other projects that solved it better and which could serve as inspiration.
133 |
134 |
135 |
136 | ### Your First Code Contribution
137 |
141 |
142 | ### Improving The Documentation
143 |
147 |
148 | ## Styleguides
149 | ### Commit Messages
150 |
153 |
154 | ## Join The Project Team
155 |
156 |
157 |
158 | ## Attribution
159 | This guide is based on the **contributing-gen**. [Make your own](https://github.com/bttger/contributing-gen)!
160 |
--------------------------------------------------------------------------------
/model/handcrafted.py:
--------------------------------------------------------------------------------
1 |
2 | # %%
3 | from skimage.feature import graycomatrix, graycoprops
4 | import matplotlib.pyplot as plt
5 | import numpy as np
6 | import cv2
7 | # %%
8 | def calculate_color_features(image):
9 | # Convert image to RGB color space
10 | rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
11 |
12 | # Convert image to LAB color space
13 | lab_image = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
14 |
15 | # Extract the chosen channel from RGB and LAB color spaces
16 | # For example, to choose the blue channel from RGB and the b* channel from LAB
17 | channel_rgb = rgb_image[:, :, 2] # Assuming blue channel
18 | channel_lab = lab_image[:, :, 2] # Assuming b* channel
19 |
20 | # Calculate statistical characteristics
21 | min_channel = np.min(channel_rgb)
22 | max_channel = np.max(channel_rgb)
23 | var_channel = np.var(channel_rgb)
24 | mean_channel = np.mean(channel_rgb)
25 |
26 | min_lab = np.min(channel_lab)
27 | max_lab = np.max(channel_lab)
28 | var_lab = np.var(channel_lab)
29 | mean_lab = np.mean(channel_lab)
30 |
31 | return {
32 | 'min_rgb': min_channel,
33 | 'max_rgb': max_channel,
34 | 'var_rgb': var_channel,
35 | 'mean_rgb': mean_channel,
36 | 'min_lab': min_lab,
37 | 'max_lab': max_lab,
38 | 'var_lab': var_lab,
39 | 'mean_lab': mean_lab
40 | }
41 |
42 | #%% Calculate sigma's
43 | def calculate_sigma_x_y(glcm):
44 | N_g = len(glcm)
45 | sigma_x = 0
46 | sigma_y = 0
47 | mew_x, mew_y = calculate_mew_x_y(glcm)
48 |
49 | for i in range(N_g):
50 | for j in range(N_g):
51 | sigma_x += ((i - mew_x) ** 2) * glcm[i][j]
52 | sigma_y += ((j - mew_y) ** 2) * glcm[i][j]
53 |
54 | sigma_x = np.sqrt(sigma_x)
55 | sigma_y = np.sqrt(sigma_y)
56 |
57 | return sigma_x, sigma_y
58 |
59 | #%% Calculate mew's
60 | def calculate_mew_x_y(P_d):
61 | N_g = P_d.shape[0]
62 | mew_x = 0
63 | mew_y = 0
64 | mew_xy = 0
65 | for i in range(1, N_g + 1):
66 | for j in range(1, N_g + 1):
67 | mew_x += i * P_d[i - 1, j - 1]
68 | mew_y += j * P_d[i - 1, j - 1]
69 | mu_xy += abs(mew_x - mew_y)
70 | return mew_x, mew_y, mu_xy
71 | #%% Calculate Pxminusy
72 |
73 | def calculate_Pxy(glcm, Pd):
74 | N_g = len(glcm)
75 | Pxy = 0
76 |
77 | for i in range(N_g):
78 | for j in range(N_g):
79 | Pd[i][j] = glcm[i][j]
80 |
81 | for i in range(N_g):
82 | for j in range(N_g):
83 | if abs(i - j) == N_g:
84 | Pxy += Pd[i][j]
85 |
86 | return Pxy
87 |
88 | # %%Entropy function
89 | def compute_entropy(p):
90 | # Ensure p is an array to handle division by zero
91 | p = np.array(p)
92 | # Avoid division by zero by adding a small constant
93 | p = p + np.finfo(float).eps
94 | return -np.sum(p * np.log10(p))
95 |
96 | #%% Compute entropy H(xy1)
97 | def compute_entropy_xy1(Px, Py, Pd, Ng):
98 | # Avoid division by zero by adding a small constant
99 | Px = Px + np.finfo(float).eps
100 | Py = Py + np.finfo(float).eps
101 |
102 | H_xy1 = 0
103 | for i in range(Ng):
104 | for j in range(Ng):
105 | H_xy1 -= Pd[i][j] * np.log10(Px[i] * Py[j])
106 |
107 | return H_xy1
108 |
109 | def compute_entropy_xy2(Px, Py, Ng):
110 | # Avoid division by zero by adding a small constant
111 | Px = Px + np.finfo(float).eps
112 | Py = Py + np.finfo(float).eps
113 |
114 | H_xy1 = 0
115 | for i in range(Ng):
116 | for j in range(Ng):
117 | H_xy1 -= Px[i]*Py[j] * np.log10(Px[i] * Py[j])
118 |
119 | return H_xy1
120 | def computeIMC(Hx, Hy, Hxy, Hxy1, Hxy2):
121 |
122 | # Calculate IMCorr1 and IMCorr2
123 | IMCorr1 = ( Hxy - Hxy1) / max(Hx, Hy)
124 | IMCorr2 = np.sqrt(1 - np.exp(-2 * (Hxy2 - Hxy)))
125 |
126 | # Return IMCorr1 and IMCorr2 values
127 | return IMCorr1, IMCorr2
128 | #%% Calculate entropies
129 | def calculate_entropies(Px,Py,Pd,glcm,Ng):
130 | Hx = compute_entropy(Px)
131 | Hy = compute_entropy(Py)
132 | Hxy = calculate_Pxy(glcm, Pd,Ng) #BUG REDOSLED
133 |
134 | Hxy1 = compute_entropy_xy1(Pd, Py, Px, Ng)
135 | Hxy2 = compute_entropy_xy2(Px, Py, Ng)
136 |
137 | return Hx,Hy,Hxy,Hxy1,Hxy2
138 | # %%
139 | def calculate_texture_features(gray_image):
140 | # Convert image to grayscale
141 | # Compute the GLCM
142 | distances = [0] # You can adjust the distances if needed
143 | angles = [0, np.pi/4, np.pi/2, 3*np.pi/4] # Adjust angles as needed
144 | glcm = graycomatrix(gray_image, distances=distances, angles=angles, symmetric=True, normed=True)
145 |
146 |
147 | #potential bug in dimensionality reduction
148 | glcm_2d = glcm[0, 0, :, :]
149 |
150 | Ng = glcm_2d.shape[0] # Number of gray levels
151 | print(glcm.shape)
152 |
153 | Px = np.sum(glcm_2d, axis=1).flatten() # Sum along rows, then flatten to 1D array
154 | Py = np.sum(glcm_2d, axis=0).flatten() # Sum along columns, then flatten to 1D array
155 | Pd = np.sum(glcm, axis=(2, 3)) # sum last two axes to obtain joint probabilities
156 |
157 | Px_y = calculate_Pxy(glcm, k)
158 |
159 |
160 | mewx, mewy, mewxy = calculate_mew_x_y(Pd, Ng)
161 | sigma_x, sigma_y = calculate_sigma_x_y(glcm, Ng)
162 | Hx, Hy,Hxy, Hxy1, Hxy2 = calculate_entropies(Px,Py,Pd,Ng)
163 |
164 |
165 | # Calculate statistical texture features
166 | asm = graycoprops(glcm, 'ASM').flatten().mean()
167 | contrast = graycoprops(glcm, 'contrast').flatten().mean()
168 | correlation = graycoprops(glcm, 'correlation').flatten().mean()
169 | variance = graycoprops(glcm, 'ASM').flatten().var()
170 | idm = np.sum(1 / (1 + np.arange(Ng)[:, None] - np.arange(Ng)[None, :]) ** 2 * glcm)
171 | entropy = -np.sum(glcm * np.log(glcm + 1e-15)) # Avoid division by zero
172 |
173 |
174 |
175 | sum_variance = 0
176 | for k in range(2, 2 * Ng):
177 | sum_variance += (k - (mewx + mewy)) ** 2 * Px_y[k]
178 |
179 | sum_entropy = 0
180 | for k in range(2, 2 * Ng):
181 | sum_entropy -= Px_y[k] * np.log(Px_y[k])
182 |
183 | difference_variance = 0
184 | for k in range(Ng):
185 | difference_variance += (k - mewxy) ** 2 * Px_y[k]
186 | difference_entropy -= Px_y[k] * np.log(Px_y[k])
187 |
188 |
189 |
190 | imcorr1, imcorr2 = computeIMC(Px, Py, Pd)
191 |
192 |
193 | return {
194 | 'H(x)': Hx,
195 | 'H(y)': Hy,
196 | 'H(xy)': Hxy,
197 | 'H(xy1)': Hxy1,
198 | 'H(xy2)': Hxy2,
199 | 'Px_y': Px_y,
200 | 'sigma_x': sigma_x,
201 | 'sigma_y': sigma_y,
202 | 'mew_x': mewx,
203 | 'mew_y': mewy,
204 | 'ASM': asm,
205 | 'contrast': contrast,
206 | 'correlation': correlation,
207 | 'variance': variance,
208 | 'IDM': idm,
209 | 'entropy': entropy, #BUG
210 | 'Sum_Variance': sum_variance,
211 | 'Sum_Entropy': sum_entropy,
212 | 'Difference_Variance': difference_variance,
213 | 'Difference_Entropy': difference_entropy,
214 | 'IMCorr1': imcorr1,
215 | 'IMCorr2': imcorr2,
216 | }
217 |
218 | # %%
219 | def calculate_symmetry(bin_image):
220 |
221 | # Calculate the symmetry mask
222 | symmetry_mask = cv2.bitwise_or(bin_image, cv2.flip(bin_image, 1))
223 |
224 | # Calculate the synthetic image A
225 | synthetic_image = cv2.bitwise_or(bin_image, symmetry_mask)
226 |
227 | # Calculate the symmetry value at 0 degrees
228 | symmetry_0_degrees = 1 - (cv2.countNonZero(synthetic_image) / cv2.countNonZero(bin_image))
229 |
230 | # Calculate the symmetry value at 90 degrees
231 | rotated_image = cv2.rotate(synthetic_image, cv2.ROTATE_90_CLOCKWISE)
232 | symmetry_90_degrees = 1 - (cv2.countNonZero(rotated_image) / cv2.countNonZero(bin_image))
233 |
234 | # Calculate the average symmetry value
235 | average_symmetry = (symmetry_0_degrees + symmetry_90_degrees) / 2
236 |
237 | return average_symmetry
238 | # %%
239 | def calculate_border_irregularity(contours):
240 | # Return some default or error value if no contours are found
241 | if not contours:
242 | return 0, 0
243 |
244 | largest_contour = max(contours, key=cv2.contourArea)
245 | perimeter = cv2.arcLength(largest_contour, True)
246 | convex_hull = cv2.convexHull(largest_contour)
247 | hull_perimeter = cv2.arcLength(convex_hull, True)
248 |
249 | # Higher values indicate more irregularity
250 | irregularity = perimeter / hull_perimeter
251 |
252 | return irregularity
253 |
254 | # %%
255 | def extract_handcrafted(Iroi, Ibin):
256 | gray_image = cv2.cvtColor(Iroi, cv2.COLOR_BGR2GRAY)
257 | contours, _ = cv2.findContours(gray_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
258 |
259 | moments = cv2.moments(contours[0])
260 |
261 | # Calculate the central moments
262 | mu20 = moments['mu20']
263 | mu02 = moments['mu02']
264 | mu11 = moments['mu11']
265 |
266 | #potential bug in the value of the area
267 | total_area = 0
268 | for contour in contours:
269 | area = cv2.contourArea(contour)
270 | total_area += area
271 |
272 |
273 | perimeter = cv2.arcLength(contours[0], True)
274 | circularity = (4 * np.pi * area) / (perimeter ** 2)
275 | diameter = np.sqrt(4 * area / np.pi)
276 | eccentricity = np.sqrt((mu20 - mu02) ** 2 + 4 * mu11 ** 2) / (mu20 + mu02 + np.sqrt((mu20 - mu02) ** 2 + 4 * mu11 ** 2))
277 |
278 | color_features = calculate_color_features(Iroi)
279 | color_features = np.array([color_features])
280 |
281 | texture_features = calculate_texture_features(gray_image)
282 | texture_features = np.array([texture_features])
283 |
284 | symmetry_value = calculate_symmetry(Ibin)
285 | symmetry_value = np.array([symmetry_value])
286 |
287 | border_irregularity = calculate_border_irregularity(contours)
288 | border_irregularity = np.array([border_irregularity])
289 |
290 | handcrafted_features = np.concatenate([np.array([area, perimeter, circularity, diameter, eccentricity]),
291 | color_features, texture_features, symmetry_value, border_irregularity])
292 | return handcrafted_features
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [2024] [Vuk Ignjatović]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/model/notebooks/handcrafted.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 2,
6 | "id": "1ea0c311-7f2d-452d-bedc-2dcf6389ac1a",
7 | "metadata": {},
8 | "outputs": [],
9 | "source": [
10 | "from skimage.feature import graycomatrix, graycoprops\n",
11 | "import matplotlib.pyplot as plt\n",
12 | "import numpy as np\n",
13 | "import cv2"
14 | ]
15 | },
16 | {
17 | "cell_type": "code",
18 | "execution_count": null,
19 | "id": "20d42aed-059a-4fc6-af2b-60cd56189a9f",
20 | "metadata": {},
21 | "outputs": [
22 | {
23 | "name": "stdout",
24 | "output_type": "stream",
25 | "text": [
26 | "Area: 26928.0\n",
27 | "Diameter: 90470880.49274547\n",
28 | "Eccentricity: 0.2035088220995739\n"
29 | ]
30 | }
31 | ],
32 | "source": [
33 | "# Compute the moments of the contour\n",
34 | "moments = cv2.moments(contours[0])\n",
35 | "\n",
36 | "# Calculate the central moments\n",
37 | "mu20 = moments['mu20']\n",
38 | "mu02 = moments['mu02']\n",
39 | "mu11 = moments['mu11']\n",
40 | "\n",
41 | "# Calculate the diameter\n",
42 | "diameter = 12 * np.sqrt(mu20 + mu02) + 4 * np.sqrt((mu20 - mu02) ** 2 + 4 * mu11 ** 2) / np.sqrt(2)\n",
43 | "\n",
44 | "# Calculate the area\n",
45 | "area = moments['m00']\n",
46 | "\n",
47 | "# Calculate eccentricity\n",
48 | "eccentricity = np.sqrt((mu20 - mu02) ** 2 + 4 * mu11 ** 2) / (mu20 + mu02 + np.sqrt((mu20 - mu02) ** 2 + 4 * mu11 ** 2))\n",
49 | "\n",
50 | "# Print results\n",
51 | "print(\"Area:\", area)\n",
52 | "print(\"Diameter:\", diameter)\n",
53 | "print(\"Eccentricity:\", eccentricity)\n"
54 | ]
55 | },
56 | {
57 | "cell_type": "code",
58 | "execution_count": null,
59 | "id": "9b9fd241-1383-4d16-bd97-84daaf32f6b6",
60 | "metadata": {},
61 | "outputs": [],
62 | "source": [
63 | "def calculate_color_features(image):\n",
64 | " # Convert image to RGB color space\n",
65 | " rgb_image = cv2.cvtColor(output_image, cv2.COLOR_BGR2RGB)\n",
66 | " \n",
67 | " # Convert image to LAB color space\n",
68 | " lab_image = cv2.cvtColor(output_image, cv2.COLOR_BGR2LAB)\n",
69 | " \n",
70 | " # Extract the chosen channel from RGB and LAB color spaces\n",
71 | " # For example, to choose the blue channel from RGB and the b* channel from LAB\n",
72 | " channel_rgb = rgb_image[:, :, 2] # Assuming blue channel\n",
73 | " channel_lab = lab_image[:, :, 2] # Assuming b* channel\n",
74 | " \n",
75 | " # Calculate statistical characteristics\n",
76 | " min_channel = np.min(channel_rgb)\n",
77 | " max_channel = np.max(channel_rgb)\n",
78 | " var_channel = np.var(channel_rgb)\n",
79 | " mean_channel = np.mean(channel_rgb)\n",
80 | " \n",
81 | " min_lab = np.min(channel_lab)\n",
82 | " max_lab = np.max(channel_lab)\n",
83 | " var_lab = np.var(channel_lab)\n",
84 | " mean_lab = np.mean(channel_lab)\n",
85 | " \n",
86 | " return {\n",
87 | " 'min_rgb': min_channel,\n",
88 | " 'max_rgb': max_channel,\n",
89 | " 'var_rgb': var_channel,\n",
90 | " 'mean_rgb': mean_channel,\n",
91 | " 'min_lab': min_lab,\n",
92 | " 'max_lab': max_lab,\n",
93 | " 'var_lab': var_lab,\n",
94 | " 'mean_lab': mean_lab\n",
95 | " }"
96 | ]
97 | },
98 | {
99 | "cell_type": "code",
100 | "execution_count": null,
101 | "id": "6ccf5cbf-b9ec-40e8-ab1c-e4cdbddfcd56",
102 | "metadata": {},
103 | "outputs": [],
104 | "source": [
105 | "\n",
106 | "def compute_entropy(p):\n",
107 | " # Ensure p is an array to handle division by zero\n",
108 | " p = np.array(p)\n",
109 | " # Avoid division by zero by adding a small constant\n",
110 | " p = p + np.finfo(float).eps\n",
111 | " return -np.sum(p * np.log2(p))\n",
112 | "\n",
113 | "def compute_mutual_information(p_xy, p_x, p_y):\n",
114 | " # Calculate entropies\n",
115 | " H_X = compute_entropy(p_x)\n",
116 | " H_Y = compute_entropy(p_y)\n",
117 | " H_XY = compute_entropy(p_xy)\n",
118 | " # Calculate mutual information\n",
119 | " return H_X + H_Y - H_XY\n",
120 | "\n",
121 | "\n",
122 | "def computeIMC(Px, Py, Pd):\n",
123 | " # Calculate entropies\n",
124 | " H_X = compute_entropy(Px)\n",
125 | " H_Y = compute_entropy(Py)\n",
126 | " H_XY = compute_entropy(Pd)\n",
127 | "\n",
128 | " # Assuming p_xy1 and p_xy2 are defined elsewhere\n",
129 | " joint_entropy_xy1 = compute_entropy(p_xy1)\n",
130 | " joint_entropy_xy2 = compute_entropy(p_xy2)\n",
131 | "\n",
132 | " # Calculate mutual information\n",
133 | " mutual_information_xy = compute_mutual_information(Pd, Px, Py)\n",
134 | "\n",
135 | " # Calculate joint entropies for scenarios 1 and 2\n",
136 | " joint_entropy_xy1 = compute_entropy(p_xy1)\n",
137 | " joint_entropy_xy2 = compute_entropy(p_xy2)\n",
138 | "\n",
139 | " # Calculate IMCorr1 and IMCorr2\n",
140 | " IMCorr1 = (mutual_information_xy - mutual_information_x_y1) / max(H_X, H_Y)\n",
141 | " IMCorr2 = np.sqrt(1 - np.exp(-2 * (joint_entropy_xy2 - joint_entropy_xy)))\n",
142 | "\n",
143 | " # Return IMCorr1 and IMCorr2 values\n",
144 | " return IMCorr1, IMCorr2\n"
145 | ]
146 | },
147 | {
148 | "cell_type": "code",
149 | "execution_count": null,
150 | "id": "76534fcd-1966-4a51-a737-e2b7cc5b40a3",
151 | "metadata": {},
152 | "outputs": [],
153 | "source": [
154 | "def calculate_texture_features():\n",
155 | " # Convert image to grayscale \n",
156 | " # Compute the GLCM\n",
157 | " distances = [0] # You can adjust the distances if needed\n",
158 | " angles = [0, np.pi/4, np.pi/2, 3*np.pi/4] # Adjust angles as needed\n",
159 | " glcm = graycomatrix(gray_image, distances=distances, angles=angles, symmetric=True, normed=True)\n",
160 | "\n",
161 | "\n",
162 | " #potential bug in dimensionality reduction\n",
163 | " glcm_2d = glcm[0, 0, :, :]\n",
164 | "\n",
165 | " N = glcm_2d.shape[0] # Number of gray levels\n",
166 | " print(glcm.shape)\n",
167 | " \n",
168 | " # Calculate statistical texture features\n",
169 | " asm = graycoprops(glcm, 'ASM').flatten().mean()\n",
170 | " contrast = graycoprops(glcm, 'contrast').flatten().mean()\n",
171 | " correlation = graycoprops(glcm, 'correlation').flatten().mean()\n",
172 | " variance = graycoprops(glcm, 'ASM').flatten().var()\n",
173 | " idm = np.sum(1 / (1 + np.arange(N)[:, None] - np.arange(N)[None, :]) ** 2 * glcm)\n",
174 | " entropy = -np.sum(glcm * np.log(glcm + 1e-15)) # Avoid division by zero\n",
175 | "\n",
176 | " imcorr1, imcorr2 = computeIMC()\n",
177 | " \n",
178 | " return {\n",
179 | " 'ASM': asm,\n",
180 | " 'contrast': contrast,\n",
181 | " 'correlation': correlation,\n",
182 | " 'variance': variance,\n",
183 | " 'IDM': idm,\n",
184 | " 'entropy': entropy, #BUG\n",
185 | " 'Sum_Variance': sum_variance,\n",
186 | " 'Sum_Entropy': sum_entropy,\n",
187 | " 'Difference_Variance': difference_variance,\n",
188 | " 'Difference_Entropy': difference_entropy,\n",
189 | " 'IMCorr1': imcorr1,\n",
190 | " 'IMCorr2': imcorr2,\n",
191 | " }"
192 | ]
193 | },
194 | {
195 | "cell_type": "code",
196 | "execution_count": null,
197 | "id": "2ef8fc00-31e2-4b1c-9f48-c598949309e0",
198 | "metadata": {},
199 | "outputs": [],
200 | "source": [
201 | "def calculate_symmetry(bin_image): \n",
202 | " \n",
203 | " # Calculate the symmetry mask\n",
204 | " symmetry_mask = cv2.bitwise_or(bin_image, cv2.flip(bin_img, 1))\n",
205 | " \n",
206 | " # Calculate the synthetic image A\n",
207 | " synthetic_image = cv2.bitwise_or(bin_img, symmetry_mask)\n",
208 | " \n",
209 | " # Calculate the symmetry value at 0 degrees\n",
210 | " symmetry_0_degrees = 1 - (cv2.countNonZero(synthetic_image) / cv2.countNonZero(bin_img))\n",
211 | " \n",
212 | " # Calculate the symmetry value at 90 degrees\n",
213 | " rotated_image = cv2.rotate(synthetic_image, cv2.ROTATE_90_CLOCKWISE)\n",
214 | " symmetry_90_degrees = 1 - (cv2.countNonZero(rotated_image) / cv2.countNonZero(bin_img))\n",
215 | " \n",
216 | " # Calculate the average symmetry value\n",
217 | " average_symmetry = (symmetry_0_degrees + symmetry_90_degrees) / 2\n",
218 | " \n",
219 | " return average_symmetry"
220 | ]
221 | },
222 | {
223 | "cell_type": "code",
224 | "execution_count": null,
225 | "id": "ac7a9dc5",
226 | "metadata": {},
227 | "outputs": [],
228 | "source": [
229 | "def calculate_border_irregularity(contours):\n",
230 | " # Return some default or error value if no contours are found\n",
231 | " if not contours:\n",
232 | " return 0, 0 \n",
233 | " \n",
234 | " largest_contour = max(contours, key=cv2.contourArea)\n",
235 | " perimeter = cv2.arcLength(largest_contour, True)\n",
236 | " convex_hull = cv2.convexHull(largest_contour)\n",
237 | " hull_perimeter = cv2.arcLength(convex_hull, True)\n",
238 | " \n",
239 | " # Higher values indicate more irregularity\n",
240 | " irregularity = perimeter / hull_perimeter \n",
241 | " \n",
242 | " return irregularity\n"
243 | ]
244 | },
245 | {
246 | "cell_type": "code",
247 | "execution_count": null,
248 | "id": "a86c5a8d",
249 | "metadata": {},
250 | "outputs": [],
251 | "source": [
252 | "def extract_handcrafted(img):\n",
253 | " gray_image = cv2.cvtColor(output_image, cv2.COLOR_BGR2GRAY)\n",
254 | " contours, _ = cv2.findContours(gray_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
255 | "\n",
256 | " moments = cv2.moments(contours[0])\n",
257 | "\n",
258 | " # Calculate the central moments\n",
259 | " mu20 = moments['mu20']\n",
260 | " mu02 = moments['mu02']\n",
261 | " mu11 = moments['mu11']\n",
262 | "\n",
263 | " #potential bug in the value of the area\n",
264 | " total_area = 0\n",
265 | " for contour in contours:\n",
266 | " area = cv2.contourArea(contour)\n",
267 | " total_area += area\n",
268 | " \n",
269 | "\n",
270 | " perimeter = cv2.arcLength(contours[0], True)\n",
271 | " circularity = (4 * np.pi * area) / (perimeter ** 2) \n",
272 | " diameter = np.sqrt(4 * area / np.pi)\n",
273 | " eccentricity = np.sqrt((mu20 - mu02) ** 2 + 4 * mu11 ** 2) / (mu20 + mu02 + np.sqrt((mu20 - mu02) ** 2 + 4 * mu11 ** 2))\n",
274 | "\n",
275 | " color_features = calculate_color_features(image)\n",
276 | " color_features = np.array([color_features])\n",
277 | "\n",
278 | " texture_features = calculate_texture_features()\n",
279 | " texture_features = np.array([texture_features])\n",
280 | "\n",
281 | "\n",
282 | " symmetry_value = calculate_symmetry()\n",
283 | " symmetry_value = np.array([symmetry_value])\n",
284 | "\n",
285 | " border_irregularity = calculate_border_irregularity(contours) \n",
286 | " border_irregularity = np.array([border_irregularity])\n",
287 | "\n",
288 | " handcrafted_features = np.concatenate([np.array([area, perimeter, circularity, diameter, eccentricity]),\n",
289 | " color_features, texture_features, symmetry_value, border_irregularity])\n",
290 | " return handcrafted_features"
291 | ]
292 | }
293 | ],
294 | "metadata": {
295 | "kernelspec": {
296 | "display_name": "Python 3 (ipykernel)",
297 | "language": "python",
298 | "name": "python3"
299 | },
300 | "language_info": {
301 | "codemirror_mode": {
302 | "name": "ipython",
303 | "version": 3
304 | },
305 | "file_extension": ".py",
306 | "mimetype": "text/x-python",
307 | "name": "python",
308 | "nbconvert_exporter": "python",
309 | "pygments_lexer": "ipython3",
310 | "version": "3.11.5"
311 | }
312 | },
313 | "nbformat": 4,
314 | "nbformat_minor": 5
315 | }
316 |
--------------------------------------------------------------------------------
/model/imgCropBinaryMask.py:
--------------------------------------------------------------------------------
1 | # This script is used to crop an image based on a
2 | # binary mask and generate an inverted version of the cropped image.
3 | # The cropped image, inverted image, and the binary mask are then
4 | # saved to the specified output directory.
5 | #
6 | # Example usage: python img-crop-binary-mask.py -i input.jpg -o output
7 | # Input: input.jpg
8 | # Output: output/original_cropped.jpg,
9 | # output/inverted_cropped.jpg,
10 | # output/binary_mask_cropped.jpg
11 | #
12 | # Dep: Computer Vision
13 | # Author: Sabian Hibbs
14 | # Circa: 2024 - 14/03/2024
15 |
16 | # The authors code was modified by removing the inverted_cropped version of an image
17 | # Using inverted images as a way to train the model has not yet been tested and
18 | # thus it will not be used. Also glob library was added to loop over the benign and
19 | # malignant folders in order to prepeare the HAM dataset for model training
20 |
21 |
22 | """ MOST RECENT COMMIT DETAILS
23 |
24 | Commit: Improve image cropping and handle low-quality images + Added Argparse for flags.
25 |
26 | Changes:
27 | - Modified the `crop_image` function to return a boolean `is_valid` indicating if the cropped image is valid or not.
28 | - Added a check to determine if the cropped image is empty or completely white.
29 | - If the cropped image is invalid, the function returns `None` for the cropped
30 | original and cropped mask, and `False` for `is_valid`.
31 |
32 | - Updated the `save_cropped_images` function to handle invalid cropped images.
33 | - Added a new parameter `is_valid` to indicate whether the cropped image is valid or not.
34 | - If `is_valid` is `False`, the function saves the rejected image with the caption
35 | "Image not valid - Image Quality Too Low" using `cv2.putText`.
36 | - The rejected image is saved in the `output_path` directory with the prefix
37 | `{class_name}_rejected_{i}.jpg`.
38 |
39 | - Modified the `main` function to incorporate the changes in `crop_image` and `save_cropped_images`.
40 | - After calling `crop_image`, the function checks the value of `is_valid`.
41 | - If `is_valid` is `False`, it prints a message indicating that the image is not valid due to low quality.
42 | - The `save_cropped_images` function is called with the `is_valid` parameter to determine whether
43 | to save the cropped images or the rejected image with the caption.
44 |
45 | These changes improve the image cropping process by handling cases where the cropped image
46 | is empty or completely white. The script now saves the rejected images with the caption
47 | "Image not valid - Image Quality Too Low" in the `output_path` directory, while still
48 | saving the valid cropped images and binary masks as before. This helps in identifying
49 | and managing low-quality images in the dataset.
50 |
51 | """
52 |
53 | import glob
54 | import argparse
55 | import cv2
56 | import numpy as np
57 | import os
58 | from skimage.transform import resize
59 |
60 |
61 | # Constants
62 | IMAGE_PADDING = 10 # Pixle padding around ROI before cropping.
63 | IMG_CONTRAST = 1.0 # Image contrast factor. (0.0 = no contrast, 2.0 = double contrast)
64 |
65 | def image_data_gen_preprocessing(image_path):
66 | img = read_image(image_path)
67 | Ipro = preprocess_image(img)
68 | Ibin = generate_binary_mask(Ipro)
69 | Iroi = crop_image(Ipro,Ibin)
70 |
71 | return Iroi;
72 |
73 | def read_image(image_path):
74 | """
75 | Loads an image from a specified path.
76 |
77 | Args:
78 | image_path (str): The path to the image file.
79 |
80 | Returns:
81 | numpy.ndarray: The loaded image as a NumPy array.
82 | """
83 | return cv2.imread(image_path) # Read image from file.
84 |
85 |
86 | def preprocess_image(image):
87 | """
88 | Applies preprocessing steps to the image.
89 |
90 | Args:
91 | image (numpy.ndarray): The input image.
92 |
93 | Returns:
94 | numpy.ndarray: The preprocessed image.
95 | """
96 | print("Original image shape:", image.shape) # Debug print
97 | # Apply Gaussian blur to the image.
98 | image_blurred = cv2.GaussianBlur(image, (5, 5), 1)
99 | # Convert the image to the LAB color space.
100 | return cv2.cvtColor(image_blurred, cv2.COLOR_BGR2LAB)
101 |
102 | def generate_binary_mask(image):
103 | """
104 | Generates a binary mask for the image.
105 |
106 | Args:
107 | image (numpy.ndarray): The input image.
108 |
109 | Returns:
110 | numpy.ndarray: The binary mask generated from the input image.
111 | """
112 |
113 | # Convert the image to grayscale.
114 | gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
115 | # Apply Otsu's thresholding to the grayscale image.
116 | _, binary_mask = cv2.threshold(gray_image, 0, 256, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
117 | return binary_mask
118 |
119 |
120 | def crop_image(original_image, binary_mask, buffer=IMAGE_PADDING):
121 | """
122 | Crops the region of interest from the given image and binary mask.
123 |
124 | Args:
125 | original_image (numpy.ndarray): The original image.
126 | binary_mask (numpy.ndarray): The binary mask indicating the region of interest.
127 | buffer (int, optional): The buffer size around the region of interest. Defaults to 0.
128 |
129 | Returns:
130 | tuple: A tuple containing the cropped original image, the cropped binary mask, and a boolean indicating if the cropped image is valid.
131 | """
132 | # Find the contours in the binary mask
133 | contours, _ = cv2.findContours(binary_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
134 | # If no contours are found, return None
135 | if not contours:
136 | print("No contours detected; unable to crop the image.")
137 | return None, None, False
138 |
139 | # Find the largest contour and compute the bounding box
140 | largest_contour = max(contours, key=cv2.contourArea)
141 | x, y, w, h = cv2.boundingRect(largest_contour)
142 |
143 | # Compute the square crop dimensions with buffer
144 | side_length = max(w, h) + 2 * buffer
145 | center_x, center_y = x + w // 2, y + h // 2
146 | x = max(center_x - side_length // 2, 0)
147 | y = max(center_y - side_length // 2, 0)
148 | # Ensure the crop dimensions do not exceed the image size
149 | if x + side_length > original_image.shape[1]:
150 | side_length = original_image.shape[1] - x
151 | if y + side_length > original_image.shape[0]:
152 | side_length = original_image.shape[0] - y
153 |
154 | # Crop the original image and the binary mask
155 | cropped_original = original_image[y:y + side_length, x:x + side_length]
156 | cropped_mask = binary_mask[y:y + side_length, x:x + side_length]
157 |
158 | # Check if the cropped image is empty or completely white
159 | if cropped_original.size == 0 or np.all(cropped_original >= 250):
160 | print("Cropped image is empty or completely white.")
161 | return None, None, False
162 |
163 | return cropped_original, cropped_mask, True
164 |
165 |
166 | def invert_colors(image):
167 | """
168 | Inverts the colors of the given image and increases contrast.
169 |
170 | Args:
171 | image (numpy.ndarray): The input image to be processed.
172 |
173 | Returns:
174 | numpy.ndarray: The inverted image with increased contrast.
175 | """
176 | # Convert the image to the HSV color space
177 | hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
178 | hsv_image[:, :, 1] = 255 - hsv_image[:, :, 1]
179 |
180 | # Increase contrast
181 | hsv_image[:, :, 2] = np.clip(hsv_image[:, :, 2] * IMG_CONTRAST, 0, 255)
182 |
183 | # Convert the image back to the BGR color space
184 | inverted_image = cv2.cvtColor(hsv_image, cv2.COLOR_HSV2BGR)
185 | return inverted_image
186 |
187 |
188 | def save_cropped_images(cropped_original, cropped_mask, output_path, i, class_name, is_valid, output_dir):
189 | """
190 | Saves the cropped original and the cropped binary mask images.
191 |
192 | Args:
193 | cropped_original (numpy.ndarray): The cropped original image.
194 | cropped_mask (numpy.ndarray): The cropped binary mask image.
195 | output_path (str): The path where the images will be saved.
196 | i (int): The index of the image.
197 | class_name (str): The class name of the image (benign or malignant).
198 | is_valid (bool): Indicates whether the cropped image is valid or not.
199 | output_dir (str): The output directory for saving the images.
200 |
201 | Returns:
202 | None
203 | """
204 | if not is_valid:
205 | # Save the rejected image with the caption
206 | cv2.putText(cropped_original, "Image not valid - Image Quality Too Low", (10, 30),
207 | cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
208 | cv2.imwrite(os.path.join(output_dir, f"{class_name}_rejected_{i}.jpg"), cropped_original)
209 | return
210 |
211 | # Save the cropped original image
212 | cv2.imwrite(os.path.join(output_dir, f"{class_name}_cropped_{i}.jpg"), cropped_original)
213 | # Save the cropped image with the binary mask applied
214 | cv2.imwrite(os.path.join(output_dir, f"{class_name}_binary_{i}.jpg"), cropped_mask)
215 |
216 |
217 | def main():
218 | """
219 | Image Cropping and Mask Generation Tool.
220 |
221 | This tool takes an input image file, crops the image based on a binary mask,
222 | and generates an inverted version of the cropped image. The cropped image,
223 | inverted image, and the binary mask are then saved to the specified output directory.
224 |
225 | Args:
226 | Base directory for processed data "-bd", for "--base_dir"
227 | Starting directory for training data "-sd", for "--starting_dir"
228 | Directory for benign images "-bnd", for "--benign_dir"
229 | Directory for malignant images "-md", for "--malignant_dir"
230 | Output directory for saving the images "-o", for "--output_dir"
231 |
232 | Returns:
233 | None main()
234 |
235 | """
236 |
237 | parser = argparse.ArgumentParser(description="Image Cropping and Mask Generation Tool")
238 | parser.add_argument("-bd", "--base_dir", default="/home/vuk/Documents/ML_Data/HAM/processed",
239 | help="Base directory for processed data (default: /home/vuk/Documents/ML_Data/HAM/processed)")
240 |
241 | parser.add_argument("-sd", "--starting_dir", default="/home/vuk/Documents/ML_Data/HAM/train/",
242 | help="Starting directory for training data (default: /home/vuk/Documents/ML_Data/HAM/train/)")
243 |
244 | parser.add_argument("-bnd", "--benign_dir", default="/home/vuk/Documents/ML_Data/HAM/train/benign",
245 | help="Directory for benign images (default: /home/vuk/Documents/ML_Data/HAM/train/benign)")
246 |
247 | parser.add_argument("-md", "--malignant_dir", default="/home/vuk/Documents/ML_Data/HAM/train/malignant",
248 | help="Directory for malignant images (default: /home/vuk/Documents/ML_Data/HAM/train/malignant)")
249 |
250 | parser.add_argument("-o", "--output_dir", default="/path/to/output/directory",
251 | help="Output directory for saving the images (default: /path/to/output/directory)")
252 | args = parser.parse_args()
253 |
254 | base_dir = args.base_dir
255 | starting_dir = args.starting_dir
256 | benign_dir = args.benign_dir
257 | malignant_dir = args.malignant_dir
258 | output_dir = args.output_dir
259 |
260 | roi_dir = os.path.join(output_dir, 'roi')
261 | binary_dir = os.path.join(output_dir, 'binary')
262 |
263 | # Create the output directories if they don't exist
264 | os.makedirs(roi_dir, exist_ok=True)
265 | os.makedirs(binary_dir, exist_ok=True)
266 |
267 | image_paths = glob.glob(os.path.join(starting_dir, '*', '*'))
268 |
269 | for i, image_path in enumerate(image_paths):
270 | print(image_path)
271 | original_image = read_image(image_path)
272 | if original_image is None:
273 | print(f"Failed to load image: {image_path}")
274 | continue # Skip to the next image if loading failed
275 | preprocessed_image = preprocess_image(original_image)
276 | binary_mask = generate_binary_mask(preprocessed_image)
277 | cropped_original, cropped_mask, is_valid = crop_image(original_image, binary_mask)
278 | if not is_valid:
279 | print(f"Image {i} not valid - Image Quality Too Low.")
280 | if 'benign' in image_path:
281 | class_name = 'benign'
282 | elif 'malignant' in image_path:
283 | class_name = 'malignant'
284 | else:
285 | print(f"Unknown class for image: {image_path}")
286 | continue
287 | save_cropped_images(cropped_original, cropped_mask, roi_dir, i, class_name, is_valid, output_dir)
288 |
289 |
290 | if __name__ == "__main__":
291 | main()
292 |
--------------------------------------------------------------------------------
/client/src/assets/svg/allowcamera.svg:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------