├── Demo.avif ├── requirements.txt ├── Raspberry-pi setup ├── Required libraries ├── Autonomous Navigation ├── Loop ├── Motor Control ├── rpi_connection_cam.py └── rpi_controlcar_distance.py ├── Training the model ├── np.txt ├── Training ├── Evaluate and saving the model ├── Model Creation └── Data Loading and Preprocessing ├── .github └── ISSUE_TEMPLATE │ └── feature_request.md ├── LICENSE ├── Neural Network setup ├── train_neural_network.py ├── model.py └── collect_images_via_rpi.py ├── README.md └── CODE_OF_CONDUCT.md /Demo.avif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AbhiFutureTech/ANN-Robotics-simu/HEAD/Demo.avif -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | opencv-python 3 | tensorflow 4 | picamera 5 | RPi.GPIO 6 | -------------------------------------------------------------------------------- /Raspberry-pi setup/Required libraries: -------------------------------------------------------------------------------- 1 | sudo apt-get update 2 | sudo apt-get install python3-opencv 3 | pip3 install tensorflow numpy picamera 4 | -------------------------------------------------------------------------------- /Training the model/np.txt: -------------------------------------------------------------------------------- 1 | image_path,steering_angle,speed 2 | images/image_001.jpg,0.1,10.0 3 | images/image_002.jpg,-0.05,12.0 4 | ... 5 | 6 | -------------------------------------------------------------------------------- /Training the model/Training: -------------------------------------------------------------------------------- 1 | # Training the model 2 | batch_size = 32 3 | epochs = 10 4 | 5 | history = model.fit( 6 | datagen.flow(X_train, y_train, batch_size=batch_size), 7 | validation_data=(X_val, y_val), 8 | steps_per_epoch=len(X_train) // batch_size, 9 | epochs=epochs, 10 | validation_steps=len(X_val) // batch_size 11 | ) 12 | -------------------------------------------------------------------------------- /Training the model/Evaluate and saving the model: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | 3 | # Evaluate the model 4 | val_loss = model.evaluate(X_val, y_val) 5 | print(f'Validation Loss: {val_loss}') 6 | 7 | # Plot training history 8 | plt.plot(history.history['loss'], label='Train Loss') 9 | plt.plot(history.history['val_loss'], label='Validation Loss') 10 | plt.xlabel('Epoch') 11 | plt.ylabel('Mean Squared Error') 12 | plt.legend() 13 | plt.show() 14 | 15 | # Save the model 16 | model.save('self_driving_model.h5') 17 | 18 | -------------------------------------------------------------------------------- /Raspberry-pi setup/Autonomous Navigation: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import tensorflow as tf 4 | from picamera.array import PiRGBArray 5 | from picamera import PiCamera 6 | 7 | # Load the pre-trained model 8 | model = tf.keras.models.load_model('C:\Users\abhia\Downloads\npz.txt') 9 | 10 | # Preprocessing function 11 | def preprocess_image(image): 12 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) 13 | image = cv2.resize(image, (200, 66)) 14 | image = image / 255.0 15 | return np.expand_dims(image, axis=0) 16 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /Training the model/Model Creation: -------------------------------------------------------------------------------- 1 | from tensorflow.keras.models import Sequential 2 | from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout 3 | 4 | def create_model(): 5 | model = Sequential([ 6 | Conv2D(24, (5, 5), strides=(2, 2), activation='relu', input_shape=(66, 200, 3)), 7 | Conv2D(36, (5, 5), strides=(2, 2), activation='relu'), 8 | Conv2D(48, (5, 5), strides=(2, 2), activation='relu'), 9 | Conv2D(64, (3, 3), activation='relu'), 10 | Conv2D(64, (3, 3), activation='relu'), 11 | Flatten(), 12 | Dense(100, activation='relu'), 13 | Dropout(0.5), 14 | Dense(50, activation='relu'), 15 | Dense(10, activation='relu'), 16 | Dense(1) # Regression output for steering angle 17 | ]) 18 | model.compile(optimizer='adam', loss='mse') 19 | return model 20 | 21 | model = create_model() 22 | model.summary() 23 | -------------------------------------------------------------------------------- /Raspberry-pi setup/Loop: -------------------------------------------------------------------------------- 1 | def main(): 2 | camera = PiCamera() 3 | camera.resolution = (640, 480) 4 | camera.framerate = 32 5 | raw_capture = PiRGBArray(camera, size=(640, 480)) 6 | 7 | for frame in camera.capture_continuous(raw_capture, format="bgr", use_video_port=True): 8 | image = frame.array 9 | 10 | # Preprocess the image 11 | preprocessed_image = preprocess_image(image) 12 | 13 | # Predict the steering angle 14 | steering_angle = model.predict(preprocessed_image)[0] 15 | 16 | # Convert steering angle to motor speed 17 | left_speed = 0.5 + steering_angle 18 | right_speed = 0.5 - steering_angle 19 | 20 | # Set motor speed 21 | set_motor_speed(left_speed, right_speed) 22 | 23 | # Clear the stream for the next frame 24 | raw_capture.truncate(0) 25 | 26 | if __name__ == "__main__": 27 | try: 28 | main() 29 | except KeyboardInterrupt: 30 | GPIO.cleanup() 31 | -------------------------------------------------------------------------------- /Raspberry-pi setup/Motor Control: -------------------------------------------------------------------------------- 1 | import RPi.GPIO as GPIO 2 | 3 | # Motor control pins 4 | motor_left_forward = 17 5 | motor_left_backward = 18 6 | motor_right_forward = 22 7 | motor_right_backward = 23 8 | 9 | # Setup GPIO 10 | GPIO.setmode(GPIO.BCM) 11 | GPIO.setup(motor_left_forward, GPIO.OUT) 12 | GPIO.setup(motor_left_backward, GPIO.OUT) 13 | GPIO.setup(motor_right_forward, GPIO.OUT) 14 | GPIO.setup(motor_right_backward, GPIO.OUT) 15 | 16 | def set_motor_speed(left_speed, right_speed): 17 | if left_speed > 0: 18 | GPIO.output(motor_left_forward, GPIO.HIGH) 19 | GPIO.output(motor_left_backward, GPIO.LOW) 20 | else: 21 | GPIO.output(motor_left_forward, GPIO.LOW) 22 | GPIO.output(motor_left_backward, GPIO.HIGH) 23 | 24 | if right_speed > 0: 25 | GPIO.output(motor_right_forward, GPIO.HIGH) 26 | GPIO.output(motor_right_backward, GPIO.LOW) 27 | else: 28 | GPIO.output(motor_right_forward, GPIO.LOW) 29 | GPIO.output(motor_right_backward, GPIO.HIGH) 30 | 31 | # Set PWM for speed control if needed 32 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Abhijit Patil 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Raspberry-pi setup/rpi_connection_cam.py: -------------------------------------------------------------------------------- 1 | 2 | __main-author__ = 'zhengwang' 3 | __co-author__ = 'akshay' 4 | 5 | import io 6 | import socket 7 | import struct 8 | import time 9 | import picamera 10 | 11 | 12 | # create socket and bind host 192.168.43.166 192.168.137.1 13 | client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 14 | client_socket.connect(('10.42.0.1', 8000)) 15 | connection = client_socket.makefile('wb') 16 | buffe=20 17 | try: 18 | with picamera.PiCamera() as camera: 19 | camera.resolution = (320, 240) # pi camera resolution 20 | camera.framerate = 10 # 10 frames/sec 21 | time.sleep(2) # give 2 secs for camera to initilize 22 | start = time.time() 23 | stream = io.BytesIO() 24 | 25 | # send jpeg format video stream 26 | for foo in camera.capture_continuous(stream, 'jpeg', use_video_port = True): 27 | connection.write(struct.pack(' 600: 32 | break 33 | stream.seek(0) 34 | stream.truncate() 35 | 36 | connection.write(struct.pack('10: 74 | if data[0]=='0': 75 | GPIO.output(l1, False) #forward 76 | GPIO.output(l2, True) 77 | GPIO.output(r1, False) 78 | GPIO.output(r2, True) 79 | elif data[0]=='1': #right 80 | GPIO.output(l1, True) 81 | GPIO.output(l2, False) 82 | GPIO.output(r1, False) 83 | GPIO.output(r2, True) 84 | elif data[0]=='2': 85 | GPIO.output(l1, False) #left 86 | GPIO.output(l2, True) 87 | GPIO.output(r1, True) 88 | GPIO.output(r2, False) 89 | else: 90 | GPIO.output(l1, False) 91 | GPIO.output(l2, False) 92 | GPIO.output(r1, False) 93 | GPIO.output(r2, False) 94 | else: 95 | GPIO.output(l1, False) 96 | GPIO.output(l2, False) 97 | GPIO.output(r1, False) 98 | GPIO.output(r2, False) 99 | finally: 100 | client_socket.close() 101 | GPIO.cleanup() 102 | client_socket.close() 103 | -------------------------------------------------------------------------------- /Neural Network setup/train_neural_network.py: -------------------------------------------------------------------------------- 1 | 2 | __author__ = 'akshay' 3 | 4 | import cv2 5 | import tensorflow as tf 6 | import numpy as np 7 | import glob 8 | import sys 9 | from sklearn.model_selection import train_test_split 10 | 11 | print 'Loading training data...' 12 | e0 = cv2.getTickCount() 13 | 14 | # load training data 15 | image_array = np.zeros((1, 38400)) 16 | #image_array = np.zeros((1, 76800)) 17 | label_array = np.zeros((1, 3), 'float') 18 | training_data = glob.glob('training_data/*.npz') 19 | 20 | # if no data, exit 21 | if not training_data: 22 | print "No training data in directory, exit" 23 | sys.exit() 24 | 25 | #load training data 26 | for single_npz in training_data: 27 | with np.load(single_npz) as data: 28 | train_temp = data['train'] 29 | train_labels_temp = data['train_labels'] 30 | image_array = np.vstack((image_array, train_temp)) 31 | label_array = np.vstack((label_array, train_labels_temp)) 32 | 33 | X = image_array[1:, :] 34 | y = label_array[1:, :] 35 | #split training data 36 | train, test, train_labels, test_labels = train_test_split(X, y, test_size=0.1) 37 | 38 | #initialize nodes of hidden_1_layer 39 | n_nodes_hl1 = 32 40 | 41 | 42 | n_classes = 3 43 | 44 | #create placeholde for INPUT 45 | x = tf.placeholder('float', [1,38400]) 46 | #x = tf.placeholder('float', [1,76800]) 47 | y = tf.placeholder('float',[1,3]) 48 | 49 | #define neural network 50 | def neural_network_model(data): 51 | hidden_1_layer = {'weights':tf.Variable(tf.truncated_normal([38400, n_nodes_hl1],stddev=1.0)), 52 | 'biases':tf.Variable(tf.random_normal([n_nodes_hl1]))} 53 | 54 | output_layer = {'weights':tf.Variable(tf.truncated_normal([n_nodes_hl1, n_classes],stddev=1.0)), 55 | 'biases':tf.Variable(tf.random_normal([n_classes]))} 56 | 57 | 58 | l1 = tf.add(tf.matmul(data,hidden_1_layer['weights']), hidden_1_layer['biases']) 59 | l1 = tf.nn.relu(l1) 60 | output = tf.matmul(l1,output_layer['weights']) + output_layer['biases'] 61 | 62 | return output 63 | 64 | def train_neural_network(x): 65 | prediction = neural_network_model(x) 66 | # OLD VERSION: 67 | #cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(prediction,y) ) 68 | # NEW: 69 | cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y) ) 70 | optimizer = tf.train.AdamOptimizer(1e-5).minimize(cost) 71 | 72 | #epochs 73 | hm_epochs = 25 74 | with tf.Session() as sess: 75 | # OLD: 76 | #sess.run(tf.initialize_all_variables()) 77 | # NEW: 78 | sess.run(tf.global_variables_initializer()) 79 | 80 | for epoch in range(hm_epochs): 81 | epoch_loss = 0 82 | for i,j in zip(train,train_labels): 83 | _, c = sess.run([optimizer, cost], feed_dict={x:[i], y:[j]}) 84 | epoch_loss += c 85 | 86 | print('Epoch', epoch, 'completed out of',hm_epochs,'loss:',epoch_loss) 87 | print (prediction,y) 88 | correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1)) 89 | 90 | accuracy = tf.reduce_mean(tf.cast(correct, 'float')) 91 | for i,j in zip(test,test_labels): 92 | print('Accuracy:',accuracy.eval({x:[i], y:[j]})) 93 | for i in test: 94 | p1=tf.argmax(prediction,1) 95 | print p1.eval({x:[i]}) 96 | print prediction.eval({x:[i]}) 97 | saver=tf.train.Saver() 98 | #Save the model after training. 99 | saver.save(sess,'/home/akshay/Downloads/savedata/project') 100 | train_neural_network(x) 101 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Neural Network Based Autonomous Navigation for Self Driving Robots 2 | 3 | 1.The Robot uses neural network to learn and predict decisions just like a human brain. 4 | 5 | 2.The model is built using Neural Network and it's trained by feeding in labelled images of tracks. 6 | 7 | 3.After the model is trained it will be capable of making its own decisions. The prediction will be made on the laptop due to larger memory and flexibility. Raspberry pi will be used to stream the video to 8 | laptop using Pi-camera. 9 | 10 | 4.First we will train the model using the dataset that contains the labelled images of the track. 11 | 12 | 5.Raspberry Pi will stream the live feed to the laptop and the predictions will be sent back to the raspberry pi. 13 | 14 | 6.The raspberry pi is connected to motor driver which will control the wheels of the bot. Ultrasonic sensor makes sure that the robot does not collide with obstacles. Once trained it can run autonomously and 15 | make its decisions.It will try to maintain its path along the track and prevent from collision 16 | 17 | 18 | ## A) Hardware Design 19 | 20 | The Hardware components used for this project are as follows: 21 | 22 | 1.Raspberry Pi (any model with sufficient performance, such as Raspberry Pi 4) 23 | 2.Camera module 24 | 3.Motor driver (e.g., L298N) 25 | 4.Motors and wheels 26 | 5.Power supply 27 | 28 | ## Required Libraries 29 | 30 | Install the required libraries on your Raspberry Pi: 31 | 32 | ``` 33 | sudo apt-get update 34 | sudo apt-get install python3-opencv 35 | pip3 install tensorflow numpy picamera 36 | ``` 37 | 38 | ![Demo (1)](https://github.com/patilabhi20/Robotic-Tasks-via-Large-Language-Models/assets/157373320/c83189c3-d478-4657-8c9e-ae332751a466) 39 | 40 | ## Installation 41 | 42 | To set up the project, clone the repository and install the required dependencies: 43 | 44 | 45 | ``` 46 | git clone https://github.com/yourusername/Neural-Network-Based-Autonomous-Navigation-for-Self-Driving-Robots.git 47 | cd Neural-Network-Based-Autonomous-Navigation-for-Self-Driving-Robots 48 | pip install -r requirements.txt 49 | 50 | ``` 51 | 52 | Ensure you have the necessary hardware and software configurations as detailed in the documentation. 53 | 54 | ## B) Software Design 55 | 56 | 1.Python(2.7) 57 | 2.TensorFlow 58 | 3.OpenCV 59 | 60 | ## Model Architecture 61 | 62 | The model architecture is based on a convolutional neural network (CNN) designed for real-time processing. The architecture includes convolutional layers for feature extraction and dense layers for decision making. 63 | 64 | 65 | ``` 66 | from tensorflow.keras.models import Sequential 67 | from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout 68 | 69 | def create_model(): 70 | model = Sequential([ 71 | Conv2D(24, (5, 5), strides=(2, 2), activation='relu', input_shape=(66, 200, 3)), 72 | Conv2D(36, (5, 5), strides=(2, 2), activation='relu'), 73 | Conv2D(48, (5, 5), strides=(2, 2), activation='relu'), 74 | Conv2D(64, (3, 3), activation='relu'), 75 | Conv2D(64, (3, 3), activation='relu'), 76 | Flatten(), 77 | Dense(100, activation='relu'), 78 | Dropout(0.5), 79 | Dense(50, activation='relu'), 80 | Dense(10, activation='relu'), 81 | Dense(1) # Regression output for steering angle 82 | ]) 83 | model.compile(optimizer='adam', loss='mse') 84 | return model 85 | 86 | ``` 87 | 88 | 89 | ## Training 90 | 91 | The training pipeline includes data augmentation and preprocessing steps to enhance model performance. The model is trained using the mean squared error loss function and the Adam optimizer. 92 | 93 | ``` 94 | 95 | # Training the model 96 | batch_size = 32 97 | epochs = 10 98 | 99 | history = model.fit( 100 | datagen.flow(X_train, y_train, batch_size=batch_size), 101 | validation_data=(X_val, y_val), 102 | steps_per_epoch=len(X_train) // batch_size, 103 | epochs=epochs, 104 | validation_steps=len(X_val) // batch_size 105 | ) 106 | 107 | ``` 108 | 109 | ## Evaluation 110 | 111 | Evaluate the model's performance using the validation set and visualize the results: 112 | 113 | 114 | ``` 115 | import matplotlib.pyplot as plt 116 | 117 | # Evaluate the model 118 | val_loss = model.evaluate(X_val, y_val) 119 | print(f'Validation Loss: {val_loss}') 120 | 121 | # Plot training history 122 | plt.plot(history.history['loss'], label='Train Loss') 123 | plt.plot(history.history['val_loss'], label='Validation Loss') 124 | plt.xlabel('Epoch') 125 | plt.ylabel('Mean Squared Error') 126 | plt.legend() 127 | plt.show() 128 | 129 | ``` 130 | 131 | ## Examples 132 | 133 | 1.Object Recognition 134 | 135 | ``` 136 | from recognition.cnn_model import CNNModel 137 | 138 | cnn_model = CNNModel() 139 | cnn_model.load_model('models/object_recognition_model.h5') 140 | result = cnn_model.predict('images/test_image.jpg') 141 | print(f"Recognized Object: {result}") 142 | Image Preprocessing 143 | 144 | ``` 145 | 146 | ``` 147 | from processing.image_preprocessor import ImagePreprocessor 148 | 149 | image_preprocessor = ImagePreprocessor() 150 | preprocessed_image = image_preprocessor.process('images/raw_image.jpg') 151 | preprocessed_image.show() 152 | 153 | ``` 154 | 155 | ## 156 | Thank you for your interest in the Neural Network-Based Autonomous Navigation for Self-Driving Robots project! We hope you find it useful and engaging. Happy coding! 157 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in our 6 | community a harassment-free experience for everyone, regardless of age, body 7 | size, visible or invisible disability, ethnicity, sex characteristics, gender 8 | identity and expression, level of experience, education, socio-economic status, 9 | nationality, personal appearance, race, religion, or sexual identity 10 | and orientation. 11 | 12 | We pledge to act and interact in ways that contribute to an open, welcoming, 13 | diverse, inclusive, and healthy community. 14 | 15 | ## Our Standards 16 | 17 | Examples of behavior that contributes to a positive environment for our 18 | community include: 19 | 20 | * Demonstrating empathy and kindness toward other people 21 | * Being respectful of differing opinions, viewpoints, and experiences 22 | * Giving and gracefully accepting constructive feedback 23 | * Accepting responsibility and apologizing to those affected by our mistakes, 24 | and learning from the experience 25 | * Focusing on what is best not just for us as individuals, but for the 26 | overall community 27 | 28 | Examples of unacceptable behavior include: 29 | 30 | * The use of sexualized language or imagery, and sexual attention or 31 | advances of any kind 32 | * Trolling, insulting or derogatory comments, and personal or political attacks 33 | * Public or private harassment 34 | * Publishing others' private information, such as a physical or email 35 | address, without their explicit permission 36 | * Other conduct which could reasonably be considered inappropriate in a 37 | professional setting 38 | 39 | ## Enforcement Responsibilities 40 | 41 | Community leaders are responsible for clarifying and enforcing our standards of 42 | acceptable behavior and will take appropriate and fair corrective action in 43 | response to any behavior that they deem inappropriate, threatening, offensive, 44 | or harmful. 45 | 46 | Community leaders have the right and responsibility to remove, edit, or reject 47 | comments, commits, code, wiki edits, issues, and other contributions that are 48 | not aligned to this Code of Conduct, and will communicate reasons for moderation 49 | decisions when appropriate. 50 | 51 | ## Scope 52 | 53 | This Code of Conduct applies within all community spaces, and also applies when 54 | an individual is officially representing the community in public spaces. 55 | Examples of representing our community include using an official e-mail address, 56 | posting via an official social media account, or acting as an appointed 57 | representative at an online or offline event. 58 | 59 | ## Enforcement 60 | 61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 62 | reported to the community leaders responsible for enforcement at 63 | . 64 | All complaints will be reviewed and investigated promptly and fairly. 65 | 66 | All community leaders are obligated to respect the privacy and security of the 67 | reporter of any incident. 68 | 69 | ## Enforcement Guidelines 70 | 71 | Community leaders will follow these Community Impact Guidelines in determining 72 | the consequences for any action they deem in violation of this Code of Conduct: 73 | 74 | ### 1. Correction 75 | 76 | **Community Impact**: Use of inappropriate language or other behavior deemed 77 | unprofessional or unwelcome in the community. 78 | 79 | **Consequence**: A private, written warning from community leaders, providing 80 | clarity around the nature of the violation and an explanation of why the 81 | behavior was inappropriate. A public apology may be requested. 82 | 83 | ### 2. Warning 84 | 85 | **Community Impact**: A violation through a single incident or series 86 | of actions. 87 | 88 | **Consequence**: A warning with consequences for continued behavior. No 89 | interaction with the people involved, including unsolicited interaction with 90 | those enforcing the Code of Conduct, for a specified period of time. This 91 | includes avoiding interactions in community spaces as well as external channels 92 | like social media. Violating these terms may lead to a temporary or 93 | permanent ban. 94 | 95 | ### 3. Temporary Ban 96 | 97 | **Community Impact**: A serious violation of community standards, including 98 | sustained inappropriate behavior. 99 | 100 | **Consequence**: A temporary ban from any sort of interaction or public 101 | communication with the community for a specified period of time. No public or 102 | private interaction with the people involved, including unsolicited interaction 103 | with those enforcing the Code of Conduct, is allowed during this period. 104 | Violating these terms may lead to a permanent ban. 105 | 106 | ### 4. Permanent Ban 107 | 108 | **Community Impact**: Demonstrating a pattern of violation of community 109 | standards, including sustained inappropriate behavior, harassment of an 110 | individual, or aggression toward or disparagement of classes of individuals. 111 | 112 | **Consequence**: A permanent ban from any sort of public interaction within 113 | the community. 114 | 115 | ## Attribution 116 | 117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 118 | version 2.0, available at 119 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. 120 | 121 | Community Impact Guidelines were inspired by [Mozilla's code of conduct 122 | enforcement ladder](https://github.com/mozilla/diversity). 123 | 124 | [homepage]: https://www.contributor-covenant.org 125 | 126 | For answers to common questions about this code of conduct, see the FAQ at 127 | https://www.contributor-covenant.org/faq. Translations are available at 128 | https://www.contributor-covenant.org/translations. 129 | -------------------------------------------------------------------------------- /Neural Network setup/model.py: -------------------------------------------------------------------------------- 1 | __author__ = 'akshay' 2 | __co-author__ = 'zhengwang' 3 | 4 | import glob 5 | import numpy as np 6 | import cv2 7 | from pygame.locals import * 8 | import socket 9 | import time 10 | import threading 11 | import tensorflow as tf 12 | n_nodes_hl1 = 32 13 | n_classes = 3 14 | temp_prediction='0' 15 | 16 | class SendToRPI(object): 17 | def __init__(self): 18 | global temp_prediction 19 | TCP_IP = '10.42.0.1' 20 | TCP_PORT = 8001 21 | BUFFER_SIZE = 1024 # Normally 1024, but we want fast response 22 | 23 | s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 24 | s.bind((TCP_IP, TCP_PORT)) 25 | s.listen(1) 26 | conn, addr = s.accept() 27 | print 'Connection address:', addr 28 | while True: 29 | #Send prediction made by Neural Network to RPI 30 | conn.send(str(temp_prediction.strip('[]')))# echo 31 | time.sleep(0.2) 32 | 33 | conn.close() 34 | 35 | 36 | 37 | class MakePrediction(object): 38 | 39 | def __init__(self): 40 | 41 | self.x = tf.placeholder('float', [1,38400]) 42 | self.y = tf.placeholder('float',[1,3]) 43 | 44 | self.server_socket = socket.socket() 45 | self.server_socket.bind(('10.42.0.1', 8000)) 46 | self.server_socket.listen(0) 47 | 48 | # accept a single connection 49 | self.connection = self.server_socket.accept()[0].makefile('rb') 50 | 51 | 52 | self.send_inst = True 53 | 54 | # create labels 55 | self.k = np.zeros((3, 3), 'float') 56 | for i in range(3): 57 | self.k[i, i] = 1 58 | self.temp_label = np.zeros((1, 3), 'float') 59 | self.collect_image() 60 | 61 | 62 | 63 | 64 | def collect_image(self): 65 | 66 | saved_frame = 0 67 | total_frame = 0 68 | 69 | # collect images for training 70 | print ('Start collecting images...') 71 | e1 = cv2.getTickCount() 72 | #image_array = np.zeros((1, 76800)) 73 | image_array = np.zeros((1, 38400)) 74 | label_array = np.zeros((1, 3), 'float') 75 | frame=1 76 | # stream video frames one by one 77 | try: 78 | global temp_prediction 79 | prediction = self.neural_network_model(self.x) 80 | sess=tf.Session() 81 | # OLD: 82 | #sess.run(tf.initialize_all_variables()) 83 | # NEW: 84 | sess.run(tf.global_variables_initializer()) 85 | saver=tf.train.Saver() 86 | #saver=tf.train.import_meta_graph('/home/akshay/Downloads/savedata/project.meta') 87 | saver.restore(sess,'/home/akshay/Downloads/savedata/project') 88 | 89 | stream_bytes = ' ' 90 | while self.send_inst: 91 | stream_bytes += self.connection.read(1024) 92 | first = stream_bytes.find('\xff\xd8') 93 | last = stream_bytes.find('\xff\xd9') 94 | if first != -1 and last != -1: 95 | jpg = stream_bytes[first:last + 2] 96 | stream_bytes = stream_bytes[last + 2:] 97 | image = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),cv2.CV_LOAD_IMAGE_GRAYSCALE) 98 | 99 | # select lower half of the image 100 | roi = image[0:120, :] 101 | #roi=image 102 | # save streamed images 103 | #cv2.imwrite('training_images/frame{:>05}.jpg'.format(frame),image) 104 | 105 | #cv2.imshow('roi_image', roi) 106 | cv2.imshow('image',roi) 107 | cv2.waitKey(1) 108 | 109 | # reshape the roi image into one row array 110 | #temp_array = roi.reshape(1,76800).astype(np.float32) 111 | temp_array = roi.reshape(1,38400).astype(np.float32) 112 | if frame%3 is 0 : 113 | correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(self.y, 1)) 114 | accuracy = tf.reduce_mean(tf.cast(correct, 'float')) 115 | p1=tf.argmax(prediction,1) 116 | #save prediction to global variable 117 | temp_prediction=p1.eval({self.x:temp_array},session=sess) 118 | temp_prediction=str(temp_prediction) 119 | #print prediction.eval({x:tempimage}) 120 | 121 | frame += 1 122 | finally: 123 | self.connection.close() 124 | self.server_socket.close() 125 | #cv2.closeAllWindows() 126 | #sess.close() 127 | #ss.brea() 128 | 129 | 130 | def neural_network_model(self,data): 131 | 132 | hidden_1_layer = {'weights':tf.Variable(tf.truncated_normal([38400, n_nodes_hl1],stddev=1.0)), 133 | 'biases':tf.Variable(tf.random_normal([n_nodes_hl1]))} 134 | output_layer = {'weights':tf.Variable(tf.truncated_normal([n_nodes_hl1, n_classes],stddev=1.0)), 135 | 'biases':tf.Variable(tf.random_normal([n_classes]))} 136 | 137 | 138 | l1 = tf.add(tf.matmul(data,hidden_1_layer['weights']), hidden_1_layer['biases']) 139 | l1 = tf.nn.relu(l1) 140 | output = tf.matmul(l1,output_layer['weights']) + output_layer['biases'] 141 | 142 | return output 143 | 144 | 145 | 146 | class ThreadServer(object): 147 | def server_thread(): 148 | SendToRPI() 149 | def server_thread2(): 150 | MakePrediction() 151 | 152 | process1=threading.Thread(target=server_thread) 153 | process1.start() 154 | process2=threading.Thread(target=server_thread2) 155 | process2.start() 156 | ThreadServer() 157 | -------------------------------------------------------------------------------- /Neural Network setup/collect_images_via_rpi.py: -------------------------------------------------------------------------------- 1 | __main-author__ = 'zhengwang' 2 | __co-author__ = 'akshay' 3 | 4 | import numpy as np 5 | import threading 6 | import cv2 7 | import pygame 8 | from pygame.locals import * 9 | import socket 10 | import time 11 | import os 12 | import scipy.sparse 13 | 14 | pygame.init() 15 | screen=pygame.display.set_mode((70,50)) 16 | pygame.mouse.set_visible(0) 17 | temp='[0]' 18 | 19 | class ConnectToRPI(): 20 | def __init__(self): 21 | global temp 22 | #IP address and Port of RPI 23 | RPI_TCP_IP = '10.42.0.1' 24 | RPI_TCP_PORT = 8001 25 | BUFFER_SIZE = 1024 # Normally 1024, but we want fast response 26 | #Make connection to RPI 27 | s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 28 | s.bind((RPI_TCP_IP, RPI_TCP_PORT)) 29 | s.listen(1) 30 | conn, addr = s.accept() 31 | print 'Connection address:', addr 32 | while True: 33 | conn.send(str(temp)) 34 | time.sleep(0.2) 35 | 36 | class CollectData(object): 37 | 38 | def __init__(self): 39 | self.server_socket = socket.socket() 40 | self.server_socket.bind(('10.42.0.1', 8000)) 41 | self.server_socket.listen(0) 42 | 43 | # accept a single connection 44 | self.connection = self.server_socket.accept()[0].makefile('rb') 45 | self.send_inst = True 46 | 47 | # create labels 48 | self.k = np.zeros((3, 3), 'float') 49 | for i in range(3): 50 | self.k[i, i] = 1 51 | self.temp_label = np.zeros((1, 3), 'float') 52 | 53 | pygame.init() 54 | self.collect_image() 55 | 56 | def collect_image(self): 57 | 58 | saved_frame = 0 59 | total_frame = 0 60 | 61 | # collect images for training 62 | print ('Start collecting images...') 63 | e1 = cv2.getTickCount() 64 | #placeholder for images which will be flattened, which is equal to (1,(width x height)) 65 | image_array = np.zeros((1, 38400)) 66 | #image_array = np.zeros((1, 76800)) 67 | label_array = np.zeros((1, 3), 'float') 68 | 69 | # stream video frames one by one 70 | try: 71 | global temp 72 | stream_bytes = ' ' 73 | frame = 1 74 | while self.send_inst: 75 | stream_bytes += self.connection.read(1024) 76 | 77 | first = stream_bytes.find('\xff\xd8') 78 | last = stream_bytes.find('\xff\xd9') 79 | if first != -1 and last != -1: 80 | jpg = stream_bytes[first:last + 2] 81 | stream_bytes = stream_bytes[last + 2:] 82 | image = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),cv2.CV_LOAD_IMAGE_GRAYSCALE) 83 | 84 | # select lower half of the image 85 | roi = image[0:120, :] 86 | #roi=image 87 | 88 | # save streamed images to check whether data collected is valid 89 | #cv2.imwrite('training_images/frame{:>05}.jpg'.format(frame),image) 90 | 91 | #cv2.imshow('roi_image', roi) 92 | cv2.imshow('r', roi) 93 | cv2.waitKey(1) 94 | 95 | # reshape the roi image into one row array 96 | temp_array = roi.reshape(1, 38400).astype(np.float32) 97 | #temp_array = roi.reshape(1, 76800).astype(np.float32) 98 | 99 | frame += 1 100 | total_frame += 1 101 | # get input from keypad 102 | for event in pygame.event.get(): 103 | #if keypad is pressed 104 | if event.type == KEYDOWN: 105 | print("code:"+str(event.key)+"Char:"+chr(event.key)) 106 | key_input = pygame.key.get_pressed() 107 | 108 | #if key 'w' is pressed, the robot moves in forward direction. 109 | if key_input[pygame.K_w]: 110 | print("Forward") 111 | saved_frame += 1 112 | image_array = np.vstack((image_array, temp_array)) 113 | label_array = np.vstack((label_array, self.k[0])) 114 | #self.ser.write(chr(1)) 115 | #save images 116 | cv2.imwrite('training_images/f/frame{:>05}.jpg'.format(frame),roi) 117 | temp='0' 118 | 119 | 120 | #if key 'd' is pressed, the robot moves in right direction. 121 | elif key_input[pygame.K_d]: 122 | print("Right") 123 | image_array = np.vstack((image_array, temp_array)) 124 | label_array = np.vstack((label_array, self.k[1])) 125 | saved_frame += 1 126 | #self.ser.write(chr(3)) 127 | cv2.imwrite('training_images/r/frame{:>05}.jpg'.format(frame),roi) 128 | #save images 129 | temp='1' 130 | #if key 'a' is pressed, the robot moves in left direction. 131 | elif key_input[pygame.K_a]: 132 | print("Left") 133 | image_array = np.vstack((image_array, temp_array)) 134 | label_array = np.vstack((label_array, self.k[2])) 135 | saved_frame += 1 136 | #self.ser.write(chr(4)) 137 | cv2.imwrite('training_images/l/frame{:>05}.jpg'.format(frame),roi) 138 | #save images 139 | temp='2' 140 | #exit 141 | elif key_input[pygame.K_p] or key_input[pygame.K_o]: 142 | print ('exit') 143 | self.send_inst = False 144 | #self.ser.write(chr(0)) 145 | break 146 | else: 147 | temp='3' 148 | 149 | 150 | elif event.type == pygame.KEYUP: 151 | temp='3' 152 | #self.ser.write(chr(0)) 153 | 154 | # save training images and labels 155 | train = image_array[1:, :] 156 | train_labels = label_array[1:, :] 157 | 158 | 159 | 160 | # save training data as a numpy file 161 | file_name = str(int(time.time())) 162 | directory = "training_data" 163 | if not os.path.exists(directory): 164 | os.makedirs(directory) 165 | try: 166 | np.savez(directory + '/' + file_name + '.npz', train=train, train_labels=train_labels) 167 | except IOError as e: 168 | print(e) 169 | 170 | e2 = cv2.getTickCount() 171 | # calculate streaming duration 172 | time0 = (e2 - e1) / cv2.getTickFrequency() 173 | print ('Streaming duration:', time0) 174 | 175 | 176 | print(train.shape) 177 | print(train_labels.shape) 178 | print ('Total frame:', total_frame) 179 | print ('Saved frame:', saved_frame) 180 | print ('Dropped frame', total_frame - saved_frame) 181 | 182 | finally: 183 | self.connection.close() 184 | self.server_socket.close() 185 | cv2.destroyAllWindows() 186 | 187 | class RunThread(object): 188 | def server_thread(): 189 | ConnectToRPI() 190 | def server_thread2(): 191 | CollectData() 192 | 193 | d=threading.Thread(target=server_thread) 194 | d.start() 195 | v=threading.Thread(target=server_thread2) 196 | v.start() 197 | RunThread() 198 | --------------------------------------------------------------------------------