├── .gitignore
├── README.md
├── client
├── .gitignore
├── App.js
├── app.json
├── assets
│ ├── adaptive-icon.png
│ ├── favicon.png
│ ├── icon.png
│ └── splash.png
├── babel.config.js
├── components
│ ├── Footer.jsx
│ ├── ImageButtons.jsx
│ ├── SendButton.jsx
│ └── Welcome.jsx
├── package-lock.json
└── package.json
└── server
├── mlapi.py
├── model
├── dir_manipulation.py
├── img_manipulation.py
├── interface.py
├── model.py
├── pre_processing.py
└── saved_model.pb
└── requirements.txt
/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Cancer Detection App 📸💻
2 |
3 | ## Project Overview
4 | Welcome to my most exciting computer science project! 🚀 I've developed a Cancer Detection App using React Native for the frontend and TensorFlow, NumPy, and Python for the backend. The app empowers users to check if a naevus (mole) is benign or malignant.
5 |
6 | ## Features
7 | - 📷 **Camera Integration:** Capture photos directly from your phone's camera.
8 | - 🔄 **Real-time Detection:** Instantly send the photo to the TensorFlow model for analysis.
9 | - 🤖 **Machine Learning Magic:** Utilizing TensorFlow, NumPy, and Python to distinguish between benign and malignant moles.
10 |
11 | ## How It Works
12 | 1. 📱 **User Permission:** The app prompts the user for camera permissions.
13 | 2. 📸 **Capture Photo:** Users can take a photo of the naevus they want to analyze.
14 | 3. 🚀 **Model Processing:** The app sends the photo to the TensorFlow model for analysis.
15 | 4. 🩺 **Diagnosis Result:** The model processes the image and provides feedback on whether the naevus is benign or malignant.
16 |
17 | ## Technologies Used
18 | - ⚛️ **React Native:** For the frontend development.
19 | - 🧠 **TensorFlow:** Powering the machine learning model.
20 | - 🐍 **Python:** Backend development and model training.
21 | - 📊 **NumPy:** Handling numerical operations efficiently.
22 | - 📷 **Camera Permissions:** Leveraging the device's camera for photo capture.
23 |
24 | ## Training Data
25 | - 📊 **Kaggle Dataset:** The model has been trained on a curated dataset from Kaggle, ensuring robust and accurate predictions.
26 |
27 | ## Future Enhancements
28 | - 🌐 **Web Deployment:** I am considering deploying the app on the web for broader accessibility.
29 | - 🌈 **Improved UX/UI:** I plan to enhance the user interface with nativewind.
30 |
31 | ## Acknowledgments
32 | A big shoutout to the open-source community and the incredible tools and libraries that made this project possible. 🎉
33 |
34 | Happy Coding! 🚀👩💻👨💻
35 |
36 | ## Requirements
37 |
38 | - Android or iOS device with a camera
39 | - Internet connection for TensorFlow.js model updates (if applicable)
40 |
41 | ## Installation
42 |
43 | 1. Clone the repository:
44 |
45 | ```bash
46 | git clone https://github.com/VukIG/Cancer-Detection-App.git
47 | ```
48 |
49 | 2. Install dependencies:
50 |
51 | ```bash
52 | cd Cancer-Detection-App
53 | npm install
54 | ```
55 |
56 | 3. Run the app and Scan the QR code with the Expo app from Play Store :
57 |
58 | ```bash
59 | npx expo start --tunnel
60 | ```
61 | 4. Run the app on your emulator ( Optional if you don't want to use the expo app ):
62 | ```bash
63 | Press w for web, a for android emulator ( Requires the AndroidSDK setup ) or i for ios emulator ( requires xcode )
64 | ```
65 | ## How to Contribute
66 | Feel free to fork the repository and contribute to the development. Your suggestions and enhancements are more than welcome! 🙌
67 |
68 |
69 | We welcome contributions! If you have suggestions, found a bug, or want to improve the app, please open an issue or submit a pull request.
70 |
71 | ## License
72 |
73 | This project is licensed under the [MIT License](LICENSE).
74 |
--------------------------------------------------------------------------------
/client/.gitignore:
--------------------------------------------------------------------------------
1 | # Learn more https://docs.github.com/en/get-started/getting-started-with-git/ignoring-files
2 |
3 | # dependencies
4 | node_modules/
5 |
6 | # Expo
7 | .expo/
8 | dist/
9 | web-build/
10 |
11 | # Native
12 | *.orig.*
13 | *.jks
14 | *.p8
15 | *.p12
16 | *.key
17 | *.mobileprovision
18 |
19 | # Metro
20 | .metro-health-check*
21 |
22 | # debug
23 | npm-debug.*
24 | yarn-debug.*
25 | yarn-error.*
26 |
27 | # macOS
28 | .DS_Store
29 | *.pem
30 |
31 | # local env files
32 | .env*.local
33 |
34 | # typescript
35 | *.tsbuildinfo
36 |
--------------------------------------------------------------------------------
/client/App.js:
--------------------------------------------------------------------------------
1 | import { View, Image, StyleSheet } from "react-native";
2 | import Welcome from "./components/Welcome";
3 | import Footer from "./components/Footer";
4 | import SendButton from "./components/SendButton";
5 | import ImageButtons from "./components/ImageButtons";
6 | import { useState } from "react";
7 |
8 | export default function App() {
9 | const [image, setImage] = useState(null);
10 | const [prediction, setPrediction] = useState(null);
11 | console.log(image);
12 | return (
13 |
14 |
15 | {image ? (
16 |
17 |
18 | {prediction ?
19 | {prediction}:
20 |
21 | }
22 |
23 | ) : (
24 |
25 | )}
26 |
27 |
28 |
29 |
30 | );
31 | }
32 |
33 | const styles = StyleSheet.create({
34 | image: {
35 | marginTop: 200,
36 | marginLeft: 6,
37 | width: 400,
38 | height: 400,
39 | },
40 | });
41 |
--------------------------------------------------------------------------------
/client/app.json:
--------------------------------------------------------------------------------
1 | {
2 | "expo": {
3 | "name": "Cancer-Detection-App",
4 | "slug": "Cancer-Detection-App",
5 | "version": "1.0.0",
6 | "orientation": "portrait",
7 | "icon": "./assets/icon.png",
8 | "userInterfaceStyle": "light",
9 | "plugins": [
10 | [
11 | "expo-image-picker",
12 | {
13 | "photosPermission": "The app accesses your photos to let you share them with your friends.",
14 | "cameraPermission": "The app accesses your camera to let you take pictures"
15 | }
16 | ]
17 | ],
18 | "splash": {
19 | "image": "./assets/splash.png",
20 | "resizeMode": "contain",
21 | "backgroundColor": "#ffffff"
22 | },
23 | "assetBundlePatterns": ["**/*"],
24 | "ios": {
25 | "supportsTablet": true
26 | },
27 | "android": {
28 | "adaptiveIcon": {
29 | "foregroundImage": "./assets/adaptive-icon.png",
30 | "backgroundColor": "#ffffff"
31 | }
32 | },
33 | "web": {
34 | "favicon": "./assets/favicon.png"
35 | }
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/client/assets/adaptive-icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VukIG/Cancer-Detection-App/491784eb6a4781fc13a01855894a28101356c9ff/client/assets/adaptive-icon.png
--------------------------------------------------------------------------------
/client/assets/favicon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VukIG/Cancer-Detection-App/491784eb6a4781fc13a01855894a28101356c9ff/client/assets/favicon.png
--------------------------------------------------------------------------------
/client/assets/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VukIG/Cancer-Detection-App/491784eb6a4781fc13a01855894a28101356c9ff/client/assets/icon.png
--------------------------------------------------------------------------------
/client/assets/splash.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VukIG/Cancer-Detection-App/491784eb6a4781fc13a01855894a28101356c9ff/client/assets/splash.png
--------------------------------------------------------------------------------
/client/babel.config.js:
--------------------------------------------------------------------------------
1 | module.exports = function (api) {
2 | api.cache(true);
3 | return {
4 | presets: ["babel-preset-expo"],
5 | };
6 | };
7 |
--------------------------------------------------------------------------------
/client/components/Footer.jsx:
--------------------------------------------------------------------------------
1 | import React from "react";
2 | import { Text, View, TouchableOpacity, StyleSheet } from "react-native";
3 | import { AntDesign } from "@expo/vector-icons";
4 | import { Linking } from "react-native";
5 |
6 | function Footer() {
7 | return (
8 |
9 |
10 | Made by Vuk Ignjatovic
11 |
13 | Linking.openURL(
14 | "https://www.linkedin.com/in/vuk-ignjatovic-53152a248/",
15 | )
16 | }
17 | >
18 |
19 |
20 |
21 |
22 | );
23 | }
24 |
25 | const styles = StyleSheet.create({
26 | footerContainer: {
27 | backgroundColor: "#4a90e2",
28 | padding: 10,
29 | position: "absolute",
30 | bottom: 0,
31 | width: "100%",
32 | },
33 | flexContainer: {
34 | flexDirection: "row",
35 | justifyContent: "center",
36 | alignItems: "center",
37 | },
38 | textStyle: {
39 | color: "white",
40 | marginRight: 10,
41 | },
42 | });
43 |
44 | export default Footer;
45 |
--------------------------------------------------------------------------------
/client/components/ImageButtons.jsx:
--------------------------------------------------------------------------------
1 | import React, { useState } from "react";
2 | import { View, TouchableOpacity, Text, StyleSheet } from "react-native";
3 | import { Ionicons, MaterialIcons } from "@expo/vector-icons";
4 | import * as ImagePicker from "expo-image-picker";
5 |
6 | function ImageButtons({ image, setImage }) {
7 | const [cameraPermission, setCameraPermission] = useState(true);
8 | const [galleryPermission, setGalleryPermission] = useState(true);
9 |
10 | const grabFromLibrary = async () => {
11 | const galleryStatus = await ImagePicker.requestMediaLibraryPermissionsAsync();
12 | if (galleryStatus.status == "granted") {
13 | let result = await ImagePicker.launchImageLibraryAsync({
14 | mediaTypes: ImagePicker.MediaTypeOptions.All,
15 | allowsEditing: true,
16 | aspect: [1, 1],
17 | quality: 1,
18 | });
19 |
20 | console.log(result);
21 |
22 | if (!result.canceled && result.assets && result.assets.length > 0) {
23 | setImage(result.assets[0].uri);
24 | }
25 | } else {
26 | setGalleryPermission(false);
27 | }
28 | };
29 |
30 | const grabFromCamera = async () => {
31 | const cameraStatus = await ImagePicker.requestCameraPermissionsAsync();
32 | if (cameraStatus.status == "granted") {
33 | let result = await ImagePicker.launchCameraAsync({
34 | mediaTypes: ImagePicker.MediaTypeOptions.All,
35 | allowsEditing: true,
36 | aspect: [1, 1],
37 | quality: 1,
38 | });
39 |
40 | if (!result.canceled && result.assets && result.assets.length > 0) {
41 | setImage(result.assets[0].uri);
42 | }
43 | } else {
44 | setCameraPermission(false);
45 | }
46 | };
47 |
48 | return (
49 |
52 |
53 |
54 |
55 |
56 | {image ? "Retake a picture" : "Take a picture"}
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 | {image ? "Reupload an image" : "Upload an image"}
65 |
66 |
67 |
68 |
69 |
70 | {!cameraPermission && (
71 |
72 | {" "}
73 | You need to grant accsess to the camera{" "}
74 |
75 | )}
76 | {!galleryPermission && (
77 |
78 | {" "}
79 | You need to grant accsess to galery{" "}
80 |
81 | )}
82 |
83 | );
84 | }
85 |
86 | const styles = StyleSheet.create({
87 | buttonsWithoutImage: {
88 | position: "absolute",
89 | top: "240%",
90 | left: "37%",
91 | transform: [{ translateX: -115 }],
92 | },
93 | buttonsPosition: {
94 | left: "29%",
95 | top: "5%",
96 | display: "flex",
97 | justifyContent: "center",
98 | transform: [{ translateX: -115 }],
99 | },
100 | buttonsWrapper: {
101 | flexDirection: "row",
102 | justifyContent: "center",
103 | alignItems: "center",
104 | },
105 | button: {
106 | backgroundColor: "#4a90e2",
107 | padding: 20,
108 | borderRadius: 5,
109 | marginRight: 10,
110 | display: "flex",
111 | justifyContent: "center",
112 | },
113 | buttonContent: {
114 | flexDirection: "row",
115 | alignItems: "center",
116 | },
117 | buttonText: {
118 | color: "white",
119 | marginRight: 5,
120 | },
121 | errorText: {
122 | color: "red",
123 | },
124 | });
125 |
126 | export default ImageButtons;
127 |
--------------------------------------------------------------------------------
/client/components/SendButton.jsx:
--------------------------------------------------------------------------------
1 | import { View, StyleSheet, TouchableOpacity, Text } from "react-native";
2 | import axios from "axios";
3 |
4 | function SendButton({ image, prediction,setPrediction }) {
5 | const formData = new FormData();
6 | formData.append('image', {
7 | uri: image,
8 | type: 'image/jpeg',
9 | name: 'test.jpg',
10 | });
11 | const sendToServer = async () => {
12 | axios
13 | .post("http://192.168.1.6:8000/predict", formData)
14 | .then((response) => {
15 | setPrediction(response);
16 | })
17 | .catch((error) => {
18 | console.error(error);
19 | });
20 | };
21 | return (
22 |
23 |
24 | Scan the image
25 |
26 |
27 | );
28 | }
29 |
30 | export default SendButton;
31 |
32 | const styles = StyleSheet.create({
33 | sendWrapper: {
34 | position: "absolute",
35 | top: "10%",
36 | left: "50%",
37 | transform: [{ translateX: -130 }, { translateY: 20 }],
38 | },
39 | button: {
40 | backgroundColor: "#4a90e2",
41 | padding: 30,
42 | borderRadius: 5,
43 | marginRight: 10,
44 | display: "flex",
45 | justifyContent: "center",
46 | },
47 | buttonText: {
48 | color: "white",
49 | marginRight: 5,
50 | fontSize: 30,
51 | },
52 | });
53 |
--------------------------------------------------------------------------------
/client/components/Welcome.jsx:
--------------------------------------------------------------------------------
1 | import React from "react";
2 | import { Text, View, StyleSheet } from "react-native";
3 |
4 | function Welcome() {
5 | return (
6 |
7 | Welcome to CDA
8 |
9 | Scan your naevus with your phone's camera and check if it's benign or
10 | malignant
11 |
12 |
13 | );
14 | }
15 |
16 | const styles = StyleSheet.create({
17 | center: {
18 | justifyContent: "center",
19 | alignItems: "center",
20 | margin: 20,
21 | top: "120%",
22 | },
23 | title: {
24 | fontSize: 50,
25 | fontWeight: "bold",
26 | marginBottom: 10,
27 | },
28 | description: {
29 | fontSize: 16,
30 | textAlign: "left",
31 | maxWidth: 330,
32 | marginLeft: -30,
33 | paddingHorizontal: 20,
34 | },
35 | });
36 |
37 | export default Welcome;
38 |
--------------------------------------------------------------------------------
/client/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "cancer-detection-app",
3 | "version": "1.0.0",
4 | "main": "node_modules/expo/AppEntry.js",
5 | "scripts": {
6 | "start": "expo start",
7 | "android": "expo start --android",
8 | "ios": "expo start --ios",
9 | "web": "expo start --web"
10 | },
11 | "dependencies": {
12 | "axios": "^1.6.5",
13 | "expo": "~49.0.15",
14 | "expo-image-picker": "~14.3.2",
15 | "expo-status-bar": "~1.6.0",
16 | "react": "18.2.0",
17 | "react-native": "0.72.6"
18 | },
19 | "devDependencies": {
20 | "@babel/core": "^7.20.0"
21 | },
22 | "private": true
23 | }
24 |
--------------------------------------------------------------------------------
/server/mlapi.py:
--------------------------------------------------------------------------------
1 | from fastapi import FastAPI, File, UploadFile
2 | from fastapi.middleware.cors import CORSMiddleware
3 |
4 | from fastapi.responses import JSONResponse
5 | from pydantic import BaseModel
6 | import tensorflow as tf
7 | from PIL import Image
8 | import numpy as np
9 |
10 | app = FastAPI()
11 |
12 | class PatientInfo(BaseModel):
13 | image: str
14 |
15 | #CORS ERRORI SKINUTI
16 |
17 | app.add_middleware(
18 | CORSMiddleware,
19 | allow_origins=["*"],
20 | allow_credentials=True,
21 | allow_methods=["*"],
22 | allow_headers=["*"],
23 | )
24 |
25 |
26 | # Load your cancer detection model only once when the app starts
27 | saved_model_path = '/home/vuk/Documents/ML/Cancer-Detection-AI/Cancer-detection-model'
28 | model = tf.saved_model.load(saved_model_path)
29 |
30 | @app.post('/predict')
31 | async def predict_cancer(patient_info: PatientInfo):
32 | image_uri = patient_info.image
33 | image_data = process_image(image_uri)
34 |
35 | if image_data is None:
36 | return JSONResponse(content={"error": "Invalid image URI"}, status_code=400)
37 |
38 | try:
39 | # Assuming your model expects a numpy array
40 | img_input = np.array.reshape(image_data, (1, image_data.shape[0], image_data.shape[1], image_data.shape[2]))
41 | prediction = model.predict(img_input)
42 | return {"prediction": int(prediction[0])}
43 | except Exception as e:
44 | return JSONResponse(content={"error": f"Error during prediction: {e}"}, status_code=500)
45 |
46 | def process_image(image_uri: str):
47 | try:
48 | img = Image.open(image_uri)
49 | img_input = np.reshape(img, (1, img.shape[0], img.shape[1], img.shape[2]))
50 | return img_input
51 | except Exception as e:
52 | print(f"Error processing image: {e}")
53 | return None
54 |
55 | @app.get('/')
56 | def index():
57 | return {'message': 'Amenelibockura'}
58 |
--------------------------------------------------------------------------------
/server/model/dir_manipulation.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import os
3 | import shutil
4 |
5 | directory = '/home/vuk/Documents/ML Data/CDA/'
6 | csv_location = os.path.join(directory, 'HAM10000_metadata.csv')
7 |
8 | source_directory_1 = os.path.join(directory, 'HAM10000_images_part_1')
9 | source_directory_2 = os.path.join(directory, 'HAM10000_images_part_2')
10 |
11 | # Create HAM10000 directory
12 | ham_directory = os.path.join(directory, 'HAM10000')
13 | if not os.path.exists(ham_directory):
14 | os.makedirs(ham_directory)
15 | else:
16 | print(f"Directory '{ham_directory}' already exists.")
17 |
18 | destination_directory = os.path.join(directory, 'HAM10000/')
19 |
20 | def move_images(src, dest):
21 | for file in os.listdir(src):
22 | file_path = os.path.join(src, file)
23 | dest_path = os.path.join(dest, file)
24 | shutil.move(file_path, dest_path)
25 |
26 | # Move images from the first and second source directory to the destination directory
27 | if os.path.exists(source_directory_1) and os.listdir(source_directory_1):
28 | move_images(source_directory_1, destination_directory)
29 | shutil.rmtree(source_directory_1)
30 | else:
31 | print(f"Directory '{source_directory_1}' does not exist or is empty.")
32 |
33 | if os.path.exists(source_directory_2) and os.listdir(source_directory_2):
34 | move_images(source_directory_2, destination_directory)
35 | shutil.rmtree(source_directory_2)
36 | else:
37 | print(f"Directory '{source_directory_2}' does not exist or is empty.")
38 |
39 | # Create test_data directories
40 | test_data = os.path.join(directory, 'test/')
41 | for subfolder in ['benign', 'malignant']:
42 | subfolder_path = os.path.join(test_data, subfolder)
43 | if not os.path.exists(subfolder_path):
44 | os.makedirs(subfolder_path)
45 | else:
46 | print(f"Directory '{subfolder_path}' already exists.")
47 |
48 | # Create train_data directories
49 | train_data = os.path.join(directory, 'train/')
50 | for subfolder in ['benign', 'malignant']:
51 | subfolder_path = os.path.join(train_data, subfolder)
52 | if not os.path.exists(subfolder_path):
53 | os.makedirs(subfolder_path)
54 | else:
55 | print(f"Directory '{subfolder_path}' already exists.")
56 |
57 | # Create validate_data directories
58 | validate_data = os.path.join(directory, 'validate/')
59 | for subfolder in ['benign', 'malignant']:
60 | subfolder_path = os.path.join(validate_data, subfolder)
61 | if not os.path.exists(subfolder_path):
62 | os.makedirs(subfolder_path)
63 | else:
64 | print(f"Directory '{subfolder_path}' already exists.")
65 |
66 | main_folder = destination_directory
67 |
68 | data = pd.read_csv(csv_location)
69 |
--------------------------------------------------------------------------------
/server/model/img_manipulation.py:
--------------------------------------------------------------------------------
1 | import random
2 | import shutil
3 | import os
4 | from dir_manipulation import test_data, train_data, validate_data, main_folder,csv_location, data;
5 |
6 | seed = 1
7 | random.seed(seed)
8 | test_examples = train_examples = validation_examples = 0
9 |
10 | train = main_folder + "train/"
11 | test = main_folder + "test/"
12 | validation = main_folder + "validation/"
13 |
14 | if os.path.exists(train):
15 | print('Image manipulation not needed')
16 | else:
17 | for line in open(csv_location).readlines()[1:]:
18 | split_line = line.split(',');
19 | img_file = split_line[1];
20 | tumor_type = split_line[2];
21 | random_num = random.random()
22 |
23 | if random_num < 0.8:
24 | location = train_data
25 | train_examples += 1
26 |
27 | elif random_num < 0.9:
28 | location = validate_data
29 | validation_examples += 1
30 |
31 | else:
32 | location = test_data
33 | test_examples += 1
34 |
35 | if(tumor_type == 'mel'):
36 | shutil.copy(
37 | main_folder + img_file + ".jpg",
38 | location + "malignant/" + img_file + ".jpg",
39 | )
40 | else:
41 | shutil.copy(
42 | main_folder + img_file + ".jpg",
43 | location + "benign/"+ img_file + ".jpg",
44 | )
45 |
46 | print(f"Number of training examples {train_examples}")
47 | print(f"Number of test examples {test_examples}")
48 | print(f"Number of validation examples {validation_examples}")
--------------------------------------------------------------------------------
/server/model/interface.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import matplotlib.pyplot as plt
3 | import tensorflow as tf
4 | from sklearn.metrics import precision_score
5 | from pre_processing import test_dataset, batch_size
6 | from model import test_size
7 |
8 | # Load the pre-trained model
9 | loaded_model = tf.keras.models.load_model('/home/vuk/Documents/ML/Cancer-Detection-AI/Cancer-detection-model')
10 |
11 | # Evaluate the model on the test dataset
12 | evaluation_results = loaded_model.evaluate(test_dataset, steps=test_size // batch_size, verbose=0)
13 |
14 | # Assuming BinaryAccuracy is the first metric in your METRICS list
15 | accuracy = evaluation_results[1]
16 |
17 | # Extract true labels and predicted labels
18 | true_labels = test_dataset.labels
19 | predicted_labels = loaded_model.predict(test_dataset).argmax(axis=1)
20 |
21 | # Calculate precision
22 | precision = precision_score(true_labels, predicted_labels, average='weighted') # or 'binary' if appropriate
23 |
24 | # Print the results
25 | print(f"Accuracy: {accuracy}")
26 | print(f"Precision: {precision}")
27 |
28 | def get_image(dataset, index):
29 | batch_index = index // batch_size
30 | image_index = index % batch_size
31 |
32 | dataset_iter = iter(dataset)
33 | for i in range(batch_index + 1):
34 | batch = next(dataset_iter)
35 |
36 | image = batch[0][image_index]
37 | label = batch[1][image_index]
38 |
39 | return image, label
40 |
41 |
42 | while True:
43 | index = int(input("Izaberi broj slike za testiranje (0 - 9999): "))
44 | if index < 0 or index > 9999:
45 | index = int(input("Izaberite novu vrednost izmedju 0 i 9999: "))
46 |
47 | img, label = get_image(test_dataset, index)
48 |
49 | img_input = np.reshape(img, (1, img.shape[0], img.shape[1], img.shape[2]))
50 |
51 | # Use the loaded model for predictions
52 | predictions = loaded_model.predict(img_input)
53 | predicted_label = np.argmax(predictions)
54 | if(predicted_label == 1):
55 | predicted_label = 'kancerogeni mladez'
56 | else:
57 | predicted_label = 'dobrocudni tumor'
58 | plt.imshow(img, cmap=plt.cm.binary)
59 | plt.title(f"Model predvidja da ova slika sadrzi: {predicted_label}, ova slika sadrzi: {label}")
60 | plt.show()
61 |
--------------------------------------------------------------------------------
/server/model/model.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import os
3 | from pre_processing import train_dataset, validation_dataset, img_width, img_height, batch_size
4 |
5 | validation_size=988
6 | train_size=7993
7 | test_size=1034
8 |
9 | if os.path.exists('/home/vuk/Documents/ML/Cancer-Detection-AI/Cancer-detection-model'):
10 | print("Model loaded")
11 | else:
12 |
13 | model = tf.keras.models.Sequential([
14 | tf.keras.layers.Conv2D(filters=32, kernel_size=(3, 3), activation='relu', input_shape=(img_width,img_height,3)),
15 | tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
16 |
17 | tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation='relu'),
18 | tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
19 |
20 | tf.keras.layers.Conv2D(filters=128, kernel_size=(3, 3), activation='relu'),
21 | tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
22 |
23 | tf.keras.layers.Conv2D(filters=256, kernel_size=(3, 3), activation='relu'),
24 | tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
25 |
26 | tf.keras.layers.Flatten(),
27 |
28 | tf.keras.layers.Dense(units=512, activation='relu'),
29 | tf.keras.layers.Dense(units=1, activation='sigmoid')
30 | ])
31 |
32 |
33 | METRICS = [
34 | tf.keras.metrics.BinaryAccuracy(),
35 | tf.keras.metrics.Precision(),
36 | tf.keras.metrics.Recall(),
37 | tf.keras.metrics.AUC(),
38 | ]
39 |
40 | model.compile(
41 | optimizer=tf.keras.optimizers.Adam(learning_rate=1e-3),
42 | loss=tf.keras.losses.BinaryCrossentropy(),
43 | metrics=METRICS
44 | )
45 |
46 | model.fit(
47 | train_dataset,
48 | epochs=1,
49 | verbose=2,
50 | steps_per_epoch=train_size // batch_size,
51 | validation_data=validation_dataset,
52 | validation_steps=validation_size // batch_size,
53 | )
54 |
55 | model.save("Cancer-detection-model");
56 | loaded_model = tf.keras.models.load_model("Cancer-detection-model")
--------------------------------------------------------------------------------
/server/model/pre_processing.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 | import pandas as pd
4 | from dir_manipulation import test_data, train_data, validate_data
5 | import scipy
6 |
7 | #constants
8 | img_width = img_height = 224
9 | batch_size = 32
10 | csv_file = '/home/vuk/Documents/ML Data/CDA/hmnist_28_28_L.csv'
11 |
12 | images_dataset = pd.read_csv(csv_file)
13 |
14 |
15 | #Generate additional images due to the imbalance of the HAM10000 dataset
16 | train_gen = tf.keras.preprocessing.image.ImageDataGenerator(
17 | rescale = 1/255,
18 | rotation_range = 15,
19 | width_shift_range = 0.1,
20 | height_shift_range = 0.1,
21 | zoom_range = 0.2,
22 | horizontal_flip = True,
23 | vertical_flip = True,
24 | )
25 | test_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
26 | validation_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
27 |
28 | #Grab images from their respective folders
29 | train_dataset = train_gen.flow_from_directory(
30 | train_data,
31 | target_size=(img_height, img_width),
32 | batch_size=batch_size,
33 | color_mode="rgb",
34 | class_mode="binary",
35 | shuffle=True,
36 | seed=123,
37 | )
38 |
39 | test_dataset = test_gen.flow_from_directory(
40 | test_data,
41 | target_size=(img_height, img_width),
42 | batch_size=batch_size,
43 | color_mode="rgb",
44 | class_mode="binary",
45 | shuffle=True,
46 | seed=123,
47 | )
48 |
49 | validation_dataset = validation_gen.flow_from_directory(
50 | validate_data,
51 | target_size=(img_height, img_width),
52 | batch_size=batch_size,
53 | color_mode="rgb",
54 | class_mode="binary",
55 | shuffle=True,
56 | seed=123,
57 | )
58 |
--------------------------------------------------------------------------------
/server/model/saved_model.pb:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VukIG/Cancer-Detection-App/491784eb6a4781fc13a01855894a28101356c9ff/server/model/saved_model.pb
--------------------------------------------------------------------------------
/server/requirements.txt:
--------------------------------------------------------------------------------
1 | fastapi==0.105.0
2 | matplotlib==3.8.2
3 | numpy==1.26.2
4 | pandas==2.1.4
5 | pydantic==2.5.2
6 | scikit_learn==1.3.2
7 | scipy==1.11.4
8 | tensorflow==2.15.0
9 |
--------------------------------------------------------------------------------