├── dfs.py ├── bfs.py ├── N_Queen.py ├── hangman_art.py ├── hangman.py ├── waterjug.py ├── waterjugbfs.py ├── MLPClassifier.py ├── hangman_words.py └── MLPClassifier.ipynb /dfs.py: -------------------------------------------------------------------------------- 1 | graph= { 2 | 'A' : ['B', 'C'], 3 | 'B' : ['D', 'E'], 4 | 'C' : ['F'], 5 | 'D' : [], 6 | 'E' : ['F'], 7 | 'F' : [], 8 | } 9 | visited = set() 10 | def dfs(visited, graph, node): 11 | if node not in visited: 12 | print(node) 13 | visited.add(node) 14 | for neighbour in graph[node]: 15 | dfs(visited, graph, neighbour) 16 | print("Following is the Depth-First Search") 17 | dfs(visited, graph, 'A') 18 | 19 | -------------------------------------------------------------------------------- /bfs.py: -------------------------------------------------------------------------------- 1 | graph={ 2 | 'P':['Q','R','S'], 3 | 'Q':['P','R'], 4 | 'R':['P','Q','T'], 5 | 'T':['R'], 6 | 'S':['P'] 7 | } 8 | visited=[] 9 | queue=[] 10 | def bfs(visited,graph,node): 11 | visited.append(node) 12 | queue.append(node) 13 | while queue: 14 | m=queue.pop(0) 15 | print(m,end=" ") 16 | for neighbour in graph[m]: 17 | if neighbour not in visited: 18 | visited.append(neighbour) 19 | queue.append(neighbour) 20 | print("following is the breadth first search") 21 | bfs(visited,graph,'P') 22 | -------------------------------------------------------------------------------- /N_Queen.py: -------------------------------------------------------------------------------- 1 | class solution: 2 | def __init__(self): 3 | self.MAX = 20 4 | self.A = [0]*self.MAX 5 | def placement(self,i,j): 6 | for k in range(1,i): 7 | if(self.A[k] == j) or abs(self.A[k] -j) == abs(k-i): 8 | return False 9 | print(self.A) 10 | return True 11 | def printplacedqueen(self,N): 12 | print('Arrangement-->') 13 | print() 14 | 15 | for i in range(1,N+1): 16 | for j in range(1,N+1): 17 | if self.A[i] !=j: 18 | print('\t_',end =' ') 19 | else: 20 | print('\tQ',end = ' ') 21 | print() 22 | print() 23 | 24 | def N_Queens(self,i,j): 25 | for k in range(1,N+1): 26 | if self.placement(i,k): 27 | self.A[i] = k 28 | if i == N: 29 | self.printplacedqueen(N) 30 | else: 31 | self.N_Queens(i+1,N) 32 | N= int(input("enter the queens value")) 33 | obj = solution() 34 | obj.N_Queens(1,N) 35 | -------------------------------------------------------------------------------- /hangman_art.py: -------------------------------------------------------------------------------- 1 | stages = [''' 2 | +---+ 3 | | | 4 | O | 5 | /|\ | 6 | / \ | 7 | | 8 | ========= 9 | ''', ''' 10 | +---+ 11 | | | 12 | O | 13 | /|\ | 14 | / | 15 | | 16 | ========= 17 | ''', ''' 18 | +---+ 19 | | | 20 | O | 21 | /|\ | 22 | | 23 | | 24 | ========= 25 | ''', ''' 26 | +---+ 27 | | | 28 | O | 29 | /| | 30 | | 31 | | 32 | =========''', ''' 33 | +---+ 34 | | | 35 | O | 36 | | | 37 | | 38 | | 39 | ========= 40 | ''', ''' 41 | +---+ 42 | | | 43 | O | 44 | | 45 | | 46 | | 47 | ========= 48 | ''', ''' 49 | +---+ 50 | | | 51 | | 52 | | 53 | | 54 | | 55 | ========= 56 | '''] 57 | 58 | logo = ''' 59 | _ 60 | | | 61 | | |__ __ _ _ __ __ _ _ __ ___ __ _ _ __ 62 | | '_ \ / _` | '_ \ / _` | '_ ` _ \ / _` | '_ \ 63 | | | | | (_| | | | | (_| | | | | | | (_| | | | | 64 | |_| |_|\__,_|_| |_|\__, |_| |_| |_|\__,_|_| |_| 65 | __/ | 66 | |___/ ''' 67 | 68 | 69 | -------------------------------------------------------------------------------- /hangman.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | from hangman_words import word_list 4 | from hangman_art import stages, logo 5 | chosen_word=random.choice(word_list) 6 | lives=6 7 | print(f'Psst, the solution is {chosen_word}.') 8 | print(logo) 9 | print(len(stages)) 10 | display=[] 11 | guesses=[] 12 | for i in range(len(chosen_word)): 13 | display.append("_") 14 | while "_" in display and lives >0: 15 | guess=input("Guess a letter:").lower() 16 | if len(guess)==1 and guess.isalpha(): 17 | if guess in guessed: 18 | print("letter already printed") 19 | continue 20 | guessed.append(guess) 21 | for i in range(len(chosen_word)): 22 | letter = chosen_word[i] 23 | if letter==guess: 24 | display[i]=letter 25 | if guess not in chosen_word: 26 | if lives>0: 27 | lives = lives-1 28 | if lives==0: 29 | print(stages[lives]) 30 | print(f'the solution is {chosen_word}.') 31 | print("You loose") 32 | exit(1) 33 | print(f"{' '.join(display)}") 34 | print(lives) 35 | print(stages[lives]) 36 | else: 37 | print("guess should be a character rather than a word") 38 | else: 39 | print("you have won") 40 | -------------------------------------------------------------------------------- /waterjug.py: -------------------------------------------------------------------------------- 1 | import math 2 | a = int(input("Enter Jug A capacity: ")) 3 | b = int(input("Enter jug B capacity: ")) 4 | ai = int(input("Initially water in jug A: ")) 5 | bi = int(input("Initially water in Jug B: ")) 6 | af = int(input("Final state of Jug A: ")) 7 | bf = int(input("Final state of Jug B: ")) 8 | if a<=0 or b<=0: 9 | print("Jug capacities must be positive") 10 | exit(1) 11 | if ai < 0 or bi < 0 or af < 0 or bf < 0: 12 | print("Negative values are not allowed") 13 | exit(1) 14 | def wjug(a,b,ai,bi,af,bf): 15 | print("List of operations you can do:\n") 16 | print("1.Fill Jug A completely") 17 | print("2.Fill Jug B completely") 18 | print("3.Empty Jug A completely") 19 | print("4.Empty Jug B completely") 20 | print("5.Pour from Jug A till Jug B is full or A becomes empty") 21 | print("6.Pour from Jug B till Jug A is full or B becomes empty") 22 | print("7.Pour all from Jug B to Jug A") 23 | print("8.Pour all from Jug A to Jug B") 24 | 25 | while ai!=af or bi!=bf: 26 | op=int(input("Enter type operatiuon(1-8):")) 27 | if op==1: 28 | ai=a 29 | elif op==2: 30 | bi=b 31 | elif op==3: 32 | ai=0 33 | elif op==4: 34 | bi=o 35 | elif op==5: 36 | pour_amount= min(ai,b-bi) 37 | ai-=pour_amount 38 | bi+=pour_amount 39 | elif op==6: 40 | pour_amount= min(bi,a-ai) 41 | bi-=pour_amount 42 | ai+=pour_amount 43 | elif op==7: 44 | pour_amount=min(bi,a-ai) 45 | ai+=pour_amount 46 | bi-=pour_amount 47 | elif op==8: 48 | pour_amount= min(ai,b-bi) 49 | bi+=pour_amount 50 | ai-=pour_amount 51 | else: 52 | print("Invalid opertaion please choose a number between 1 and 8") 53 | continue 54 | print(f"Jug A:{ai}, Jug B{bi}") 55 | if ai==af and bi==bf: 56 | print("Final state reached: Jug A =", ai, ", Jug B =",bi) 57 | return 58 | print("Final state reached: Jug A =", ai, ",Jug B =", bi) 59 | 60 | gcd= math.gcd(a,b) 61 | if(af<=a and bf<=b) and (af%gcd==bf%gcd==0): 62 | wjug(a,b,ai,bi,af,bf) 63 | else: 64 | print("The final state is not achievable with given capabilities ") 65 | exit(1) 66 | 67 | -------------------------------------------------------------------------------- /waterjugbfs.py: -------------------------------------------------------------------------------- 1 | import math 2 | from collections import deque 3 | 4 | ''' Input capacities and initial/final states for jugs''' 5 | a = int(input("Enter Jug A Capacity: ")) 6 | b = int(input("Enter Jug B Capacity: ")) 7 | ai = int(input("Initially Water in Jug A: ")) 8 | bi = int(input("Initially Water in Jug B: ")) 9 | af = int(input("Final State of Jug A: ")) 10 | bf = int(input("Final State of Jug B: ")) 11 | 12 | # Check for negative values and whether initial state is equal to final state 13 | if a <= 0 or b <= 0: 14 | print("Jug capacities must be positive.") 15 | exit(1) 16 | if ai < 0 or bi < 0 or af < 0 or bf < 0: 17 | print("Negative values are not allowed.") 18 | exit(1) 19 | if ai==af and bi==bf: 20 | print(f"initial state is already the final state: juga{ai} and jugb={bi}") 21 | exit() 22 | # Define the water jug solver function using BFS 23 | def bfs_wjug(a, b, ai, bi, af, bf): 24 | visited = set() 25 | queue = deque([(ai, bi, [])]) # (Jug A state, Jug B state, List of operations) 26 | 27 | while queue: 28 | curr_ai, curr_bi, operations = queue.popleft() 29 | 30 | if (curr_ai, curr_bi) in visited: 31 | continue 32 | visited.add((curr_ai, curr_bi)) 33 | 34 | # Check if the final state is reached 35 | if curr_ai == af and curr_bi == bf: 36 | for i, op in enumerate(operations): 37 | print(f"Step {i + 1}: {op}") 38 | print(f"Final State Reached: Jug A = {curr_ai}, Jug B = {curr_bi}") 39 | return 40 | 41 | # List of possible operations 42 | possible_operations = [ 43 | (a, curr_bi, "Fill Jug A"), # Fill Jug A 44 | (curr_ai, b, "Fill Jug B"), # Fill Jug B 45 | (0, curr_bi, "Empty Jug A"), # Empty Jug A 46 | (curr_ai, 0, "Empty Jug B"), # Empty Jug B 47 | (curr_ai - min(curr_ai, b - curr_bi), curr_bi + min(curr_ai, b - curr_bi), "Pour from A to B"), # Pour A to B 48 | (curr_ai + min(curr_bi, a - curr_ai), curr_bi - min(curr_bi, a - curr_ai), "Pour from B to A"), # Pour B to A 49 | ] 50 | 51 | # Add each possible operation to the queue 52 | for next_ai, next_bi, op in possible_operations: 53 | if (next_ai, next_bi) not in visited: 54 | queue.append((next_ai, next_bi, operations + [op])) 55 | 56 | print("No solution found.") 57 | return 58 | 59 | # Check if the final state can be achievable using GCD 60 | gcd = math.gcd(a, b) 61 | 62 | if (af <= a and bf <= b) and (af % gcd == bf % gcd == 0): 63 | bfs_wjug(a, b, ai, bi, af, bf) 64 | else: 65 | print("The final state is not achievable with the given capacities.") 66 | exit() 67 | -------------------------------------------------------------------------------- /MLPClassifier.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding: utf-8 3 | 4 | # In[1]: 5 | 6 | 7 | import numpy as np 8 | import pandas as pd 9 | data = pd.read_csv('HR_comma_sep.csv') 10 | data.head() 11 | 12 | 13 | # In[2]: 14 | 15 | 16 | data.info() 17 | 18 | 19 | # In[3]: 20 | 21 | 22 | data['Departments'].value_counts() 23 | 24 | 25 | # In[4]: 26 | 27 | 28 | data['Departments'].unique() 29 | 30 | 31 | # In[5]: 32 | 33 | 34 | data['salary'].unique() 35 | 36 | 37 | # In[6]: 38 | 39 | 40 | from sklearn import preprocessing 41 | le = preprocessing.LabelEncoder() 42 | print(le) 43 | data['salary']=le.fit_transform(data['salary']) 44 | data['Departments']=le.fit_transform(data['Departments']) 45 | 46 | 47 | # In[7]: 48 | 49 | 50 | data['salary'].unique() 51 | 52 | 53 | # In[8]: 54 | 55 | 56 | X=data.iloc[:,:-1] 57 | X 58 | 59 | 60 | # In[9]: 61 | 62 | 63 | X=data[['satisfaction_level','last_evaluation','number_project','average_montly_hours','time_spend_company','Work_accident','promotion_last_5years','Departments','salary']] 64 | y=data['left'] 65 | 66 | 67 | from sklearn.model_selection import train_test_split 68 | X_train, X_test, y_train, y_test=train_test_split(X,y,test_size=0.3, random_state=42) 69 | 70 | 71 | # In[10]: 72 | 73 | 74 | X_train 75 | 76 | 77 | # In[11]: 78 | 79 | 80 | y_train 81 | 82 | 83 | # In[16]: 84 | 85 | 86 | from sklearn.neural_network import MLPClassifier 87 | clf = MLPClassifier( 88 | hidden_layer_sizes=(6,5), 89 | random_state=5, 90 | verbose=True, 91 | learning_rate_init=0.01, 92 | ) 93 | clf.fit(X_train,y_train) 94 | 95 | 96 | # In[17]: 97 | 98 | 99 | ypred=clf.predict(X_test) 100 | from sklearn.metrics import accuracy_score 101 | accuracy_score(y_test,ypred) 102 | 103 | 104 | # In[18]: 105 | 106 | 107 | X_test.shape 108 | 109 | 110 | # In[19]: 111 | 112 | 113 | n=pd.DataFrame({ 114 | 'satisfaction_level':[0.78], 115 | 'last_evaluation':[0.53], 116 | 'number_project':[2], 117 | 'average_montly_hours':[157], 118 | 'time_spend_company':[3], 119 | 'Work_accident':[0], 120 | 'promotion_last_5years':[0], 121 | 'Departments':[1], 122 | 'salary':[1] 123 | 124 | 125 | }) 126 | 127 | 128 | # In[20]: 129 | 130 | 131 | new_data=clf.predict(n) 132 | print(new_data) 133 | 134 | 135 | # In[22]: 136 | 137 | 138 | from sklearn.metrics import classification_report 139 | print(classification_report(y_test, ypred)) 140 | 141 | 142 | # In[23]: 143 | 144 | 145 | from sklearn.metrics import confusion_matrix 146 | ypred = clf.predict(X_test) 147 | print(confusion_matrix(y_test, ypred)) 148 | 149 | 150 | # In[24]: 151 | 152 | 153 | print(y_train.value_counts()) 154 | print(y_test.value_counts()) 155 | 156 | 157 | # In[ ]: 158 | 159 | 160 | 161 | 162 | -------------------------------------------------------------------------------- /hangman_words.py: -------------------------------------------------------------------------------- 1 | word_list = [ 2 | 'abruptly', 3 | 'absurd', 4 | 'abyss', 5 | 'affix', 6 | 'askew', 7 | 'avenue', 8 | 'awkward', 9 | 'axiom', 10 | 'azure', 11 | 'bagpipes', 12 | 'bandwagon', 13 | 'banjo', 14 | 'bayou', 15 | 'beekeeper', 16 | 'blitz', 17 | 'blizzard', 18 | 'boggle', 19 | 'bookworm', 20 | 'boxcar', 21 | 'boxful', 22 | 'buckaroo', 23 | 'buffalo', 24 | 'buffoon', 25 | 'buxom', 26 | 'buzzard', 27 | 'buzzing', 28 | 'buzzwords', 29 | 'caliph', 30 | 'cobweb', 31 | 'cockiness', 32 | 'croquet', 33 | 'crypt', 34 | 'curacao', 35 | 'cycle', 36 | 'daiquiri', 37 | 'dirndl', 38 | 'disavow', 39 | 'dizzying', 40 | 'duplex', 41 | 'dwarves', 42 | 'embezzle', 43 | 'equip', 44 | 'espionage', 45 | 'euouae', 46 | 'exodus', 47 | 'faking', 48 | 'fishhook', 49 | 'fixable', 50 | 'fjord', 51 | 'flapjack', 52 | 'flopping', 53 | 'fluffiness', 54 | 'flyby', 55 | 'foxglove', 56 | 'frazzled', 57 | 'frizzled', 58 | 'fuchsia', 59 | 'funny', 60 | 'gabby', 61 | 'galaxy', 62 | 'galvanize', 63 | 'gazebo', 64 | 'giaour', 65 | 'gizmo', 66 | 'glowworm', 67 | 'glyph', 68 | 'gnarly', 69 | 'gnostic', 70 | 'gossip', 71 | 'grogginess', 72 | 'haiku', 73 | 'haphazard', 74 | 'hyphen', 75 | 'iatrogenic', 76 | 'icebox', 77 | 'injury', 78 | 'ivory', 79 | 'ivy', 80 | 'jackpot', 81 | 'jaundice', 82 | 'jawbreaker', 83 | 'jaywalk', 84 | 'jazziest', 85 | 'jazzy', 86 | 'jelly', 87 | 'jigsaw', 88 | 'jinx', 89 | 'jiujitsu', 90 | 'jockey', 91 | 'jogging', 92 | 'joking', 93 | 'jovial', 94 | 'joyful', 95 | 'juicy', 96 | 'jukebox', 97 | 'jumbo', 98 | 'kayak', 99 | 'kazoo', 100 | 'keyhole', 101 | 'khaki', 102 | 'kilobyte', 103 | 'kiosk', 104 | 'kitsch', 105 | 'kiwifruit', 106 | 'klutz', 107 | 'knapsack', 108 | 'larynx', 109 | 'lengths', 110 | 'lucky', 111 | 'luxury', 112 | 'lymph', 113 | 'marquis', 114 | 'matrix', 115 | 'megahertz', 116 | 'microwave', 117 | 'mnemonic', 118 | 'mystify', 119 | 'naphtha', 120 | 'nightclub', 121 | 'nowadays', 122 | 'numbskull', 123 | 'nymph', 124 | 'onyx', 125 | 'ovary', 126 | 'oxidize', 127 | 'oxygen', 128 | 'pajama', 129 | 'peekaboo', 130 | 'phlegm', 131 | 'pixel', 132 | 'pizazz', 133 | 'pneumonia', 134 | 'polka', 135 | 'pshaw', 136 | 'psyche', 137 | 'puppy', 138 | 'puzzling', 139 | 'quartz', 140 | 'queue', 141 | 'quips', 142 | 'quixotic', 143 | 'quiz', 144 | 'quizzes', 145 | 'quorum', 146 | 'razzmatazz', 147 | 'rhubarb', 148 | 'rhythm', 149 | 'rickshaw', 150 | 'schnapps', 151 | 'scratch', 152 | 'shiv', 153 | 'snazzy', 154 | 'sphinx', 155 | 'spritz', 156 | 'squawk', 157 | 'staff', 158 | 'strength', 159 | 'strengths', 160 | 'stretch', 161 | 'stronghold', 162 | 'stymied', 163 | 'subway', 164 | 'swivel', 165 | 'syndrome', 166 | 'thriftless', 167 | 'thumbscrew', 168 | 'topaz', 169 | 'transcript', 170 | 'transgress', 171 | 'transplant', 172 | 'triphthong', 173 | 'twelfth', 174 | 'twelfths', 175 | 'unknown', 176 | 'unworthy', 177 | 'unzip', 178 | 'uptown', 179 | 'vaporize', 180 | 'vixen', 181 | 'vodka', 182 | 'voodoo', 183 | 'vortex', 184 | 'voyeurism', 185 | 'walkway', 186 | 'waltz', 187 | 'wave', 188 | 'wavy', 189 | 'waxy', 190 | 'wellspring', 191 | 'wheezy', 192 | 'whiskey', 193 | 'whizzing', 194 | 'whomever', 195 | 'wimpy', 196 | 'witchcraft', 197 | 'wizard', 198 | 'woozy', 199 | 'wristwatch', 200 | 'wyvern', 201 | 'xylophone', 202 | 'yachtsman', 203 | 'yippee', 204 | 'yoked', 205 | 'youthful', 206 | 'yummy', 207 | 'zephyr', 208 | 'zigzag', 209 | 'zigzagging', 210 | 'zilch', 211 | 'zipper', 212 | 'zodiac', 213 | 'zombie', 214 | ] 215 | -------------------------------------------------------------------------------- /MLPClassifier.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "id": "6bbb6e0d", 7 | "metadata": {}, 8 | "outputs": [ 9 | { 10 | "data": { 11 | "text/html": [ 12 | "
\n", 13 | "\n", 26 | "\n", 27 | " \n", 28 | " \n", 29 | " \n", 30 | " \n", 31 | " \n", 32 | " \n", 33 | " \n", 34 | " \n", 35 | " \n", 36 | " \n", 37 | " \n", 38 | " \n", 39 | " \n", 40 | " \n", 41 | " \n", 42 | " \n", 43 | " \n", 44 | " \n", 45 | " \n", 46 | " \n", 47 | " \n", 48 | " \n", 49 | " \n", 50 | " \n", 51 | " \n", 52 | " \n", 53 | " \n", 54 | " \n", 55 | " \n", 56 | " \n", 57 | " \n", 58 | " \n", 59 | " \n", 60 | " \n", 61 | " \n", 62 | " \n", 63 | " \n", 64 | " \n", 65 | " \n", 66 | " \n", 67 | " \n", 68 | " \n", 69 | " \n", 70 | " \n", 71 | " \n", 72 | " \n", 73 | " \n", 74 | " \n", 75 | " \n", 76 | " \n", 77 | " \n", 78 | " \n", 79 | " \n", 80 | " \n", 81 | " \n", 82 | " \n", 83 | " \n", 84 | " \n", 85 | " \n", 86 | " \n", 87 | " \n", 88 | " \n", 89 | " \n", 90 | " \n", 91 | " \n", 92 | " \n", 93 | " \n", 94 | " \n", 95 | " \n", 96 | " \n", 97 | " \n", 98 | " \n", 99 | " \n", 100 | " \n", 101 | " \n", 102 | " \n", 103 | " \n", 104 | " \n", 105 | " \n", 106 | " \n", 107 | " \n", 108 | " \n", 109 | "
satisfaction_levellast_evaluationnumber_projectaverage_montly_hourstime_spend_companyWork_accidentleftpromotion_last_5yearsDepartmentssalary
00.380.5321573010saleslow
10.800.8652626010salesmedium
20.110.8872724010salesmedium
30.720.8752235010saleslow
40.370.5221593010saleslow
\n", 110 | "
" 111 | ], 112 | "text/plain": [ 113 | " satisfaction_level last_evaluation number_project average_montly_hours \\\n", 114 | "0 0.38 0.53 2 157 \n", 115 | "1 0.80 0.86 5 262 \n", 116 | "2 0.11 0.88 7 272 \n", 117 | "3 0.72 0.87 5 223 \n", 118 | "4 0.37 0.52 2 159 \n", 119 | "\n", 120 | " time_spend_company Work_accident left promotion_last_5years Departments \\\n", 121 | "0 3 0 1 0 sales \n", 122 | "1 6 0 1 0 sales \n", 123 | "2 4 0 1 0 sales \n", 124 | "3 5 0 1 0 sales \n", 125 | "4 3 0 1 0 sales \n", 126 | "\n", 127 | " salary \n", 128 | "0 low \n", 129 | "1 medium \n", 130 | "2 medium \n", 131 | "3 low \n", 132 | "4 low " 133 | ] 134 | }, 135 | "execution_count": 1, 136 | "metadata": {}, 137 | "output_type": "execute_result" 138 | } 139 | ], 140 | "source": [ 141 | "import numpy as np\n", 142 | "import pandas as pd\n", 143 | "data = pd.read_csv('HR_comma_sep.csv')\n", 144 | "data.head()" 145 | ] 146 | }, 147 | { 148 | "cell_type": "code", 149 | "execution_count": 2, 150 | "id": "6da0e596", 151 | "metadata": {}, 152 | "outputs": [ 153 | { 154 | "name": "stdout", 155 | "output_type": "stream", 156 | "text": [ 157 | "\n", 158 | "RangeIndex: 14999 entries, 0 to 14998\n", 159 | "Data columns (total 10 columns):\n", 160 | " # Column Non-Null Count Dtype \n", 161 | "--- ------ -------------- ----- \n", 162 | " 0 satisfaction_level 14999 non-null float64\n", 163 | " 1 last_evaluation 14999 non-null float64\n", 164 | " 2 number_project 14999 non-null int64 \n", 165 | " 3 average_montly_hours 14999 non-null int64 \n", 166 | " 4 time_spend_company 14999 non-null int64 \n", 167 | " 5 Work_accident 14999 non-null int64 \n", 168 | " 6 left 14999 non-null int64 \n", 169 | " 7 promotion_last_5years 14999 non-null int64 \n", 170 | " 8 Departments 14999 non-null object \n", 171 | " 9 salary 14999 non-null object \n", 172 | "dtypes: float64(2), int64(6), object(2)\n", 173 | "memory usage: 1.1+ MB\n" 174 | ] 175 | } 176 | ], 177 | "source": [ 178 | "data.info()" 179 | ] 180 | }, 181 | { 182 | "cell_type": "code", 183 | "execution_count": 3, 184 | "id": "48033e0a", 185 | "metadata": {}, 186 | "outputs": [ 187 | { 188 | "data": { 189 | "text/plain": [ 190 | "sales 4140\n", 191 | "technical 2720\n", 192 | "support 2229\n", 193 | "IT 1227\n", 194 | "product_mng 902\n", 195 | "marketing 858\n", 196 | "RandD 787\n", 197 | "accounting 767\n", 198 | "hr 739\n", 199 | "management 630\n", 200 | "Name: Departments, dtype: int64" 201 | ] 202 | }, 203 | "execution_count": 3, 204 | "metadata": {}, 205 | "output_type": "execute_result" 206 | } 207 | ], 208 | "source": [ 209 | "data['Departments'].value_counts()" 210 | ] 211 | }, 212 | { 213 | "cell_type": "code", 214 | "execution_count": 4, 215 | "id": "22e62021", 216 | "metadata": {}, 217 | "outputs": [ 218 | { 219 | "data": { 220 | "text/plain": [ 221 | "array(['sales', 'accounting', 'hr', 'technical', 'support', 'management',\n", 222 | " 'IT', 'product_mng', 'marketing', 'RandD'], dtype=object)" 223 | ] 224 | }, 225 | "execution_count": 4, 226 | "metadata": {}, 227 | "output_type": "execute_result" 228 | } 229 | ], 230 | "source": [ 231 | "data['Departments'].unique()" 232 | ] 233 | }, 234 | { 235 | "cell_type": "code", 236 | "execution_count": 5, 237 | "id": "ab71dd46", 238 | "metadata": {}, 239 | "outputs": [ 240 | { 241 | "data": { 242 | "text/plain": [ 243 | "array(['low', 'medium', 'high'], dtype=object)" 244 | ] 245 | }, 246 | "execution_count": 5, 247 | "metadata": {}, 248 | "output_type": "execute_result" 249 | } 250 | ], 251 | "source": [ 252 | "data['salary'].unique()" 253 | ] 254 | }, 255 | { 256 | "cell_type": "code", 257 | "execution_count": 6, 258 | "id": "9e6c666e", 259 | "metadata": {}, 260 | "outputs": [ 261 | { 262 | "name": "stdout", 263 | "output_type": "stream", 264 | "text": [ 265 | "LabelEncoder()\n" 266 | ] 267 | } 268 | ], 269 | "source": [ 270 | "from sklearn import preprocessing\n", 271 | "le = preprocessing.LabelEncoder()\n", 272 | "print(le)\n", 273 | "data['salary']=le.fit_transform(data['salary'])\n", 274 | "data['Departments']=le.fit_transform(data['Departments'])\n" 275 | ] 276 | }, 277 | { 278 | "cell_type": "code", 279 | "execution_count": 7, 280 | "id": "41104ac7", 281 | "metadata": {}, 282 | "outputs": [ 283 | { 284 | "data": { 285 | "text/plain": [ 286 | "array([1, 2, 0])" 287 | ] 288 | }, 289 | "execution_count": 7, 290 | "metadata": {}, 291 | "output_type": "execute_result" 292 | } 293 | ], 294 | "source": [ 295 | "data['salary'].unique()" 296 | ] 297 | }, 298 | { 299 | "cell_type": "code", 300 | "execution_count": 8, 301 | "id": "1e489dbb", 302 | "metadata": {}, 303 | "outputs": [ 304 | { 305 | "data": { 306 | "text/html": [ 307 | "
\n", 308 | "\n", 321 | "\n", 322 | " \n", 323 | " \n", 324 | " \n", 325 | " \n", 326 | " \n", 327 | " \n", 328 | " \n", 329 | " \n", 330 | " \n", 331 | " \n", 332 | " \n", 333 | " \n", 334 | " \n", 335 | " \n", 336 | " \n", 337 | " \n", 338 | " \n", 339 | " \n", 340 | " \n", 341 | " \n", 342 | " \n", 343 | " \n", 344 | " \n", 345 | " \n", 346 | " \n", 347 | " \n", 348 | " \n", 349 | " \n", 350 | " \n", 351 | " \n", 352 | " \n", 353 | " \n", 354 | " \n", 355 | " \n", 356 | " \n", 357 | " \n", 358 | " \n", 359 | " \n", 360 | " \n", 361 | " \n", 362 | " \n", 363 | " \n", 364 | " \n", 365 | " \n", 366 | " \n", 367 | " \n", 368 | " \n", 369 | " \n", 370 | " \n", 371 | " \n", 372 | " \n", 373 | " \n", 374 | " \n", 375 | " \n", 376 | " \n", 377 | " \n", 378 | " \n", 379 | " \n", 380 | " \n", 381 | " \n", 382 | " \n", 383 | " \n", 384 | " \n", 385 | " \n", 386 | " \n", 387 | " \n", 388 | " \n", 389 | " \n", 390 | " \n", 391 | " \n", 392 | " \n", 393 | " \n", 394 | " \n", 395 | " \n", 396 | " \n", 397 | " \n", 398 | " \n", 399 | " \n", 400 | " \n", 401 | " \n", 402 | " \n", 403 | " \n", 404 | " \n", 405 | " \n", 406 | " \n", 407 | " \n", 408 | " \n", 409 | " \n", 410 | " \n", 411 | " \n", 412 | " \n", 413 | " \n", 414 | " \n", 415 | " \n", 416 | " \n", 417 | " \n", 418 | " \n", 419 | " \n", 420 | " \n", 421 | " \n", 422 | " \n", 423 | " \n", 424 | " \n", 425 | " \n", 426 | " \n", 427 | " \n", 428 | " \n", 429 | " \n", 430 | " \n", 431 | " \n", 432 | " \n", 433 | " \n", 434 | " \n", 435 | " \n", 436 | " \n", 437 | " \n", 438 | " \n", 439 | " \n", 440 | " \n", 441 | " \n", 442 | " \n", 443 | " \n", 444 | " \n", 445 | " \n", 446 | " \n", 447 | " \n", 448 | " \n", 449 | " \n", 450 | " \n", 451 | " \n", 452 | " \n", 453 | " \n", 454 | " \n", 455 | " \n", 456 | " \n", 457 | " \n", 458 | " \n", 459 | " \n", 460 | " \n", 461 | " \n", 462 | " \n", 463 | " \n", 464 | " \n", 465 | " \n", 466 | " \n", 467 | " \n", 468 | " \n", 469 | " \n", 470 | "
satisfaction_levellast_evaluationnumber_projectaverage_montly_hourstime_spend_companyWork_accidentleftpromotion_last_5yearsDepartments
00.380.53215730107
10.800.86526260107
20.110.88727240107
30.720.87522350107
40.370.52215930107
..............................
149940.400.57215130108
149950.370.48216030108
149960.370.53214330108
149970.110.96628040108
149980.370.52215830108
\n", 471 | "

14999 rows × 9 columns

\n", 472 | "
" 473 | ], 474 | "text/plain": [ 475 | " satisfaction_level last_evaluation number_project \\\n", 476 | "0 0.38 0.53 2 \n", 477 | "1 0.80 0.86 5 \n", 478 | "2 0.11 0.88 7 \n", 479 | "3 0.72 0.87 5 \n", 480 | "4 0.37 0.52 2 \n", 481 | "... ... ... ... \n", 482 | "14994 0.40 0.57 2 \n", 483 | "14995 0.37 0.48 2 \n", 484 | "14996 0.37 0.53 2 \n", 485 | "14997 0.11 0.96 6 \n", 486 | "14998 0.37 0.52 2 \n", 487 | "\n", 488 | " average_montly_hours time_spend_company Work_accident left \\\n", 489 | "0 157 3 0 1 \n", 490 | "1 262 6 0 1 \n", 491 | "2 272 4 0 1 \n", 492 | "3 223 5 0 1 \n", 493 | "4 159 3 0 1 \n", 494 | "... ... ... ... ... \n", 495 | "14994 151 3 0 1 \n", 496 | "14995 160 3 0 1 \n", 497 | "14996 143 3 0 1 \n", 498 | "14997 280 4 0 1 \n", 499 | "14998 158 3 0 1 \n", 500 | "\n", 501 | " promotion_last_5years Departments \n", 502 | "0 0 7 \n", 503 | "1 0 7 \n", 504 | "2 0 7 \n", 505 | "3 0 7 \n", 506 | "4 0 7 \n", 507 | "... ... ... \n", 508 | "14994 0 8 \n", 509 | "14995 0 8 \n", 510 | "14996 0 8 \n", 511 | "14997 0 8 \n", 512 | "14998 0 8 \n", 513 | "\n", 514 | "[14999 rows x 9 columns]" 515 | ] 516 | }, 517 | "execution_count": 8, 518 | "metadata": {}, 519 | "output_type": "execute_result" 520 | } 521 | ], 522 | "source": [ 523 | "X=data.iloc[:,:-1]\n", 524 | "X" 525 | ] 526 | }, 527 | { 528 | "cell_type": "code", 529 | "execution_count": 9, 530 | "id": "2787d7a6", 531 | "metadata": {}, 532 | "outputs": [], 533 | "source": [ 534 | "X=data[['satisfaction_level','last_evaluation','number_project','average_montly_hours','time_spend_company','Work_accident','promotion_last_5years','Departments','salary']]\n", 535 | "y=data['left']\n", 536 | "\n", 537 | "\n", 538 | "from sklearn.model_selection import train_test_split\n", 539 | "X_train, X_test, y_train, y_test=train_test_split(X,y,test_size=0.3, random_state=42)" 540 | ] 541 | }, 542 | { 543 | "cell_type": "code", 544 | "execution_count": 10, 545 | "id": "4484d1e7", 546 | "metadata": {}, 547 | "outputs": [ 548 | { 549 | "data": { 550 | "text/html": [ 551 | "
\n", 552 | "\n", 565 | "\n", 566 | " \n", 567 | " \n", 568 | " \n", 569 | " \n", 570 | " \n", 571 | " \n", 572 | " \n", 573 | " \n", 574 | " \n", 575 | " \n", 576 | " \n", 577 | " \n", 578 | " \n", 579 | " \n", 580 | " \n", 581 | " \n", 582 | " \n", 583 | " \n", 584 | " \n", 585 | " \n", 586 | " \n", 587 | " \n", 588 | " \n", 589 | " \n", 590 | " \n", 591 | " \n", 592 | " \n", 593 | " \n", 594 | " \n", 595 | " \n", 596 | " \n", 597 | " \n", 598 | " \n", 599 | " \n", 600 | " \n", 601 | " \n", 602 | " \n", 603 | " \n", 604 | " \n", 605 | " \n", 606 | " \n", 607 | " \n", 608 | " \n", 609 | " \n", 610 | " \n", 611 | " \n", 612 | " \n", 613 | " \n", 614 | " \n", 615 | " \n", 616 | " \n", 617 | " \n", 618 | " \n", 619 | " \n", 620 | " \n", 621 | " \n", 622 | " \n", 623 | " \n", 624 | " \n", 625 | " \n", 626 | " \n", 627 | " \n", 628 | " \n", 629 | " \n", 630 | " \n", 631 | " \n", 632 | " \n", 633 | " \n", 634 | " \n", 635 | " \n", 636 | " \n", 637 | " \n", 638 | " \n", 639 | " \n", 640 | " \n", 641 | " \n", 642 | " \n", 643 | " \n", 644 | " \n", 645 | " \n", 646 | " \n", 647 | " \n", 648 | " \n", 649 | " \n", 650 | " \n", 651 | " \n", 652 | " \n", 653 | " \n", 654 | " \n", 655 | " \n", 656 | " \n", 657 | " \n", 658 | " \n", 659 | " \n", 660 | " \n", 661 | " \n", 662 | " \n", 663 | " \n", 664 | " \n", 665 | " \n", 666 | " \n", 667 | " \n", 668 | " \n", 669 | " \n", 670 | " \n", 671 | " \n", 672 | " \n", 673 | " \n", 674 | " \n", 675 | " \n", 676 | " \n", 677 | " \n", 678 | " \n", 679 | " \n", 680 | " \n", 681 | " \n", 682 | " \n", 683 | " \n", 684 | " \n", 685 | " \n", 686 | " \n", 687 | " \n", 688 | " \n", 689 | " \n", 690 | " \n", 691 | " \n", 692 | " \n", 693 | " \n", 694 | " \n", 695 | " \n", 696 | " \n", 697 | " \n", 698 | " \n", 699 | " \n", 700 | " \n", 701 | " \n", 702 | " \n", 703 | " \n", 704 | " \n", 705 | " \n", 706 | " \n", 707 | " \n", 708 | " \n", 709 | " \n", 710 | " \n", 711 | " \n", 712 | " \n", 713 | " \n", 714 | "
satisfaction_levellast_evaluationnumber_projectaverage_montly_hourstime_spend_companyWork_accidentpromotion_last_5yearsDepartmentssalary
126020.100.84725040061
48890.570.68415431042
15720.390.48215430091
133750.910.68413240002
8790.820.97526350092
..............................
51910.520.96424630081
134180.490.65423370072
53900.660.73524920082
8600.791.00421850071
72700.980.86221940071
\n", 715 | "

10499 rows × 9 columns

\n", 716 | "
" 717 | ], 718 | "text/plain": [ 719 | " satisfaction_level last_evaluation number_project \\\n", 720 | "12602 0.10 0.84 7 \n", 721 | "4889 0.57 0.68 4 \n", 722 | "1572 0.39 0.48 2 \n", 723 | "13375 0.91 0.68 4 \n", 724 | "879 0.82 0.97 5 \n", 725 | "... ... ... ... \n", 726 | "5191 0.52 0.96 4 \n", 727 | "13418 0.49 0.65 4 \n", 728 | "5390 0.66 0.73 5 \n", 729 | "860 0.79 1.00 4 \n", 730 | "7270 0.98 0.86 2 \n", 731 | "\n", 732 | " average_montly_hours time_spend_company Work_accident \\\n", 733 | "12602 250 4 0 \n", 734 | "4889 154 3 1 \n", 735 | "1572 154 3 0 \n", 736 | "13375 132 4 0 \n", 737 | "879 263 5 0 \n", 738 | "... ... ... ... \n", 739 | "5191 246 3 0 \n", 740 | "13418 233 7 0 \n", 741 | "5390 249 2 0 \n", 742 | "860 218 5 0 \n", 743 | "7270 219 4 0 \n", 744 | "\n", 745 | " promotion_last_5years Departments salary \n", 746 | "12602 0 6 1 \n", 747 | "4889 0 4 2 \n", 748 | "1572 0 9 1 \n", 749 | "13375 0 0 2 \n", 750 | "879 0 9 2 \n", 751 | "... ... ... ... \n", 752 | "5191 0 8 1 \n", 753 | "13418 0 7 2 \n", 754 | "5390 0 8 2 \n", 755 | "860 0 7 1 \n", 756 | "7270 0 7 1 \n", 757 | "\n", 758 | "[10499 rows x 9 columns]" 759 | ] 760 | }, 761 | "execution_count": 10, 762 | "metadata": {}, 763 | "output_type": "execute_result" 764 | } 765 | ], 766 | "source": [ 767 | "X_train" 768 | ] 769 | }, 770 | { 771 | "cell_type": "code", 772 | "execution_count": 11, 773 | "id": "2620323e", 774 | "metadata": {}, 775 | "outputs": [ 776 | { 777 | "data": { 778 | "text/plain": [ 779 | "12602 1\n", 780 | "4889 0\n", 781 | "1572 1\n", 782 | "13375 0\n", 783 | "879 1\n", 784 | " ..\n", 785 | "5191 0\n", 786 | "13418 0\n", 787 | "5390 0\n", 788 | "860 1\n", 789 | "7270 0\n", 790 | "Name: left, Length: 10499, dtype: int64" 791 | ] 792 | }, 793 | "execution_count": 11, 794 | "metadata": {}, 795 | "output_type": "execute_result" 796 | } 797 | ], 798 | "source": [ 799 | "y_train" 800 | ] 801 | }, 802 | { 803 | "cell_type": "code", 804 | "execution_count": 16, 805 | "id": "f0c0dbf9", 806 | "metadata": {}, 807 | "outputs": [ 808 | { 809 | "name": "stdout", 810 | "output_type": "stream", 811 | "text": [ 812 | "Iteration 1, loss = 0.61512605\n", 813 | "Iteration 2, loss = 0.57545658\n", 814 | "Iteration 3, loss = 0.55823146\n", 815 | "Iteration 4, loss = 0.53011644\n", 816 | "Iteration 5, loss = 0.50549749\n", 817 | "Iteration 6, loss = 0.48004244\n", 818 | "Iteration 7, loss = 0.47915513\n", 819 | "Iteration 8, loss = 0.46239153\n", 820 | "Iteration 9, loss = 0.47441120\n", 821 | "Iteration 10, loss = 0.46241650\n", 822 | "Iteration 11, loss = 0.45068143\n", 823 | "Iteration 12, loss = 0.45071101\n", 824 | "Iteration 13, loss = 0.45213613\n", 825 | "Iteration 14, loss = 0.46049483\n", 826 | "Iteration 15, loss = 0.45897398\n", 827 | "Iteration 16, loss = 0.46170601\n", 828 | "Iteration 17, loss = 0.45527116\n", 829 | "Iteration 18, loss = 0.44996595\n", 830 | "Iteration 19, loss = 0.44982305\n", 831 | "Iteration 20, loss = 0.45384764\n", 832 | "Iteration 21, loss = 0.46981282\n", 833 | "Iteration 22, loss = 0.45010489\n", 834 | "Iteration 23, loss = 0.46852413\n", 835 | "Iteration 24, loss = 0.45242336\n", 836 | "Iteration 25, loss = 0.45769894\n", 837 | "Iteration 26, loss = 0.45074974\n", 838 | "Iteration 27, loss = 0.44067556\n", 839 | "Iteration 28, loss = 0.43205930\n", 840 | "Iteration 29, loss = 0.41680331\n", 841 | "Iteration 30, loss = 0.40752887\n", 842 | "Iteration 31, loss = 0.39186392\n", 843 | "Iteration 32, loss = 0.37054743\n", 844 | "Iteration 33, loss = 0.35797517\n", 845 | "Iteration 34, loss = 0.34539661\n", 846 | "Iteration 35, loss = 0.33669847\n", 847 | "Iteration 36, loss = 0.33052333\n", 848 | "Iteration 37, loss = 0.31754074\n", 849 | "Iteration 38, loss = 0.31470388\n", 850 | "Iteration 39, loss = 0.31036267\n", 851 | "Iteration 40, loss = 0.30717479\n", 852 | "Iteration 41, loss = 0.31789566\n", 853 | "Iteration 42, loss = 0.31782866\n", 854 | "Iteration 43, loss = 0.30641517\n", 855 | "Iteration 44, loss = 0.30123487\n", 856 | "Iteration 45, loss = 0.30474254\n", 857 | "Iteration 46, loss = 0.31051473\n", 858 | "Iteration 47, loss = 0.28624198\n", 859 | "Iteration 48, loss = 0.29316190\n", 860 | "Iteration 49, loss = 0.28463864\n", 861 | "Iteration 50, loss = 0.28582673\n", 862 | "Iteration 51, loss = 0.28378548\n", 863 | "Iteration 52, loss = 0.28098729\n", 864 | "Iteration 53, loss = 0.28513299\n", 865 | "Iteration 54, loss = 0.27992380\n", 866 | "Iteration 55, loss = 0.28034603\n", 867 | "Iteration 56, loss = 0.28447776\n", 868 | "Iteration 57, loss = 0.27920526\n", 869 | "Iteration 58, loss = 0.27545140\n", 870 | "Iteration 59, loss = 0.27418263\n", 871 | "Iteration 60, loss = 0.26921345\n", 872 | "Iteration 61, loss = 0.27200225\n", 873 | "Iteration 62, loss = 0.26757142\n", 874 | "Iteration 63, loss = 0.26824884\n", 875 | "Iteration 64, loss = 0.26115995\n", 876 | "Iteration 65, loss = 0.25875549\n", 877 | "Iteration 66, loss = 0.25919820\n", 878 | "Iteration 67, loss = 0.26178076\n", 879 | "Iteration 68, loss = 0.26228984\n", 880 | "Iteration 69, loss = 0.26001505\n", 881 | "Iteration 70, loss = 0.25199503\n", 882 | "Iteration 71, loss = 0.27545142\n", 883 | "Iteration 72, loss = 0.25545200\n", 884 | "Iteration 73, loss = 0.25464872\n", 885 | "Iteration 74, loss = 0.24550959\n", 886 | "Iteration 75, loss = 0.24336325\n", 887 | "Iteration 76, loss = 0.24215966\n", 888 | "Iteration 77, loss = 0.23850104\n", 889 | "Iteration 78, loss = 0.23889253\n", 890 | "Iteration 79, loss = 0.23109586\n", 891 | "Iteration 80, loss = 0.23399638\n", 892 | "Iteration 81, loss = 0.23802679\n", 893 | "Iteration 82, loss = 0.23385322\n", 894 | "Iteration 83, loss = 0.24284518\n", 895 | "Iteration 84, loss = 0.23134733\n", 896 | "Iteration 85, loss = 0.24330075\n", 897 | "Iteration 86, loss = 0.22532877\n", 898 | "Iteration 87, loss = 0.22568393\n", 899 | "Iteration 88, loss = 0.22081075\n", 900 | "Iteration 89, loss = 0.22846698\n", 901 | "Iteration 90, loss = 0.22502850\n", 902 | "Iteration 91, loss = 0.21784989\n", 903 | "Iteration 92, loss = 0.21292884\n", 904 | "Iteration 93, loss = 0.18813462\n", 905 | "Iteration 94, loss = 0.17769639\n", 906 | "Iteration 95, loss = 0.18582654\n", 907 | "Iteration 96, loss = 0.18127325\n", 908 | "Iteration 97, loss = 0.16734828\n", 909 | "Iteration 98, loss = 0.18286465\n", 910 | "Iteration 99, loss = 0.16981846\n", 911 | "Iteration 100, loss = 0.17330641\n", 912 | "Iteration 101, loss = 0.15969499\n", 913 | "Iteration 102, loss = 0.15829448\n", 914 | "Iteration 103, loss = 0.15677615\n", 915 | "Iteration 104, loss = 0.16861506\n", 916 | "Iteration 105, loss = 0.15665635\n", 917 | "Iteration 106, loss = 0.17290740\n", 918 | "Iteration 107, loss = 0.16313185\n", 919 | "Iteration 108, loss = 0.15127830\n", 920 | "Iteration 109, loss = 0.15304460\n", 921 | "Iteration 110, loss = 0.15963898\n", 922 | "Iteration 111, loss = 0.16963601\n", 923 | "Iteration 112, loss = 0.15063887\n", 924 | "Iteration 113, loss = 0.15393529\n", 925 | "Iteration 114, loss = 0.17528787\n", 926 | "Iteration 115, loss = 0.17271381\n", 927 | "Iteration 116, loss = 0.15426095\n", 928 | "Iteration 117, loss = 0.14971746\n", 929 | "Iteration 118, loss = 0.15078193\n", 930 | "Iteration 119, loss = 0.16200046\n", 931 | "Iteration 120, loss = 0.15242240\n", 932 | "Iteration 121, loss = 0.15700943\n", 933 | "Iteration 122, loss = 0.15836204\n", 934 | "Iteration 123, loss = 0.15847074\n", 935 | "Iteration 124, loss = 0.15069127\n", 936 | "Iteration 125, loss = 0.14876175\n", 937 | "Iteration 126, loss = 0.14993943\n", 938 | "Iteration 127, loss = 0.15168619\n", 939 | "Iteration 128, loss = 0.16245349\n", 940 | "Iteration 129, loss = 0.15119375\n", 941 | "Iteration 130, loss = 0.15576164\n", 942 | "Iteration 131, loss = 0.15837115\n", 943 | "Iteration 132, loss = 0.15242943\n", 944 | "Iteration 133, loss = 0.15235532\n", 945 | "Iteration 134, loss = 0.15423949\n", 946 | "Iteration 135, loss = 0.15300715\n", 947 | "Iteration 136, loss = 0.15038034\n", 948 | "Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping.\n" 949 | ] 950 | }, 951 | { 952 | "data": { 953 | "text/html": [ 954 | "
MLPClassifier(hidden_layer_sizes=(6, 5), learning_rate_init=0.01,\n",
 955 |        "              random_state=5, verbose=True)
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
" 957 | ], 958 | "text/plain": [ 959 | "MLPClassifier(hidden_layer_sizes=(6, 5), learning_rate_init=0.01,\n", 960 | " random_state=5, verbose=True)" 961 | ] 962 | }, 963 | "execution_count": 16, 964 | "metadata": {}, 965 | "output_type": "execute_result" 966 | } 967 | ], 968 | "source": [ 969 | "from sklearn.neural_network import MLPClassifier\n", 970 | "clf = MLPClassifier(\n", 971 | " hidden_layer_sizes=(6,5),\n", 972 | " random_state=5,\n", 973 | " verbose=True,\n", 974 | " learning_rate_init=0.01,\n", 975 | " )\n", 976 | "clf.fit(X_train,y_train)" 977 | ] 978 | }, 979 | { 980 | "cell_type": "code", 981 | "execution_count": 17, 982 | "id": "a8d6f0bd", 983 | "metadata": {}, 984 | "outputs": [ 985 | { 986 | "data": { 987 | "text/plain": [ 988 | "0.9386666666666666" 989 | ] 990 | }, 991 | "execution_count": 17, 992 | "metadata": {}, 993 | "output_type": "execute_result" 994 | } 995 | ], 996 | "source": [ 997 | "ypred=clf.predict(X_test)\n", 998 | "from sklearn.metrics import accuracy_score\n", 999 | "accuracy_score(y_test,ypred)" 1000 | ] 1001 | }, 1002 | { 1003 | "cell_type": "code", 1004 | "execution_count": 18, 1005 | "id": "08e0c1f2", 1006 | "metadata": {}, 1007 | "outputs": [ 1008 | { 1009 | "data": { 1010 | "text/plain": [ 1011 | "(4500, 9)" 1012 | ] 1013 | }, 1014 | "execution_count": 18, 1015 | "metadata": {}, 1016 | "output_type": "execute_result" 1017 | } 1018 | ], 1019 | "source": [ 1020 | "X_test.shape" 1021 | ] 1022 | }, 1023 | { 1024 | "cell_type": "code", 1025 | "execution_count": 19, 1026 | "id": "2d6b1dfe", 1027 | "metadata": {}, 1028 | "outputs": [], 1029 | "source": [ 1030 | "n=pd.DataFrame({\n", 1031 | " 'satisfaction_level':[0.78],\n", 1032 | " 'last_evaluation':[0.53],\n", 1033 | " 'number_project':[2],\n", 1034 | " 'average_montly_hours':[157],\n", 1035 | " 'time_spend_company':[3],\n", 1036 | " 'Work_accident':[0],\n", 1037 | " 'promotion_last_5years':[0],\n", 1038 | " 'Departments':[1],\n", 1039 | " 'salary':[1]\n", 1040 | " \n", 1041 | " \n", 1042 | "})" 1043 | ] 1044 | }, 1045 | { 1046 | "cell_type": "code", 1047 | "execution_count": 20, 1048 | "id": "e9918395", 1049 | "metadata": {}, 1050 | "outputs": [ 1051 | { 1052 | "name": "stdout", 1053 | "output_type": "stream", 1054 | "text": [ 1055 | "[0]\n" 1056 | ] 1057 | } 1058 | ], 1059 | "source": [ 1060 | "new_data=clf.predict(n)\n", 1061 | "print(new_data)" 1062 | ] 1063 | }, 1064 | { 1065 | "cell_type": "code", 1066 | "execution_count": 22, 1067 | "id": "061b0348", 1068 | "metadata": {}, 1069 | "outputs": [ 1070 | { 1071 | "name": "stdout", 1072 | "output_type": "stream", 1073 | "text": [ 1074 | " precision recall f1-score support\n", 1075 | "\n", 1076 | " 0 0.97 0.95 0.96 3428\n", 1077 | " 1 0.84 0.91 0.88 1072\n", 1078 | "\n", 1079 | " accuracy 0.94 4500\n", 1080 | " macro avg 0.91 0.93 0.92 4500\n", 1081 | "weighted avg 0.94 0.94 0.94 4500\n", 1082 | "\n" 1083 | ] 1084 | } 1085 | ], 1086 | "source": [ 1087 | "from sklearn.metrics import classification_report\n", 1088 | "print(classification_report(y_test, ypred))" 1089 | ] 1090 | }, 1091 | { 1092 | "cell_type": "code", 1093 | "execution_count": 23, 1094 | "id": "ebad03ae", 1095 | "metadata": {}, 1096 | "outputs": [ 1097 | { 1098 | "name": "stdout", 1099 | "output_type": "stream", 1100 | "text": [ 1101 | "[[3248 180]\n", 1102 | " [ 96 976]]\n" 1103 | ] 1104 | } 1105 | ], 1106 | "source": [ 1107 | "from sklearn.metrics import confusion_matrix\n", 1108 | "ypred = clf.predict(X_test)\n", 1109 | "print(confusion_matrix(y_test, ypred))" 1110 | ] 1111 | }, 1112 | { 1113 | "cell_type": "code", 1114 | "execution_count": 24, 1115 | "id": "7c034037", 1116 | "metadata": {}, 1117 | "outputs": [ 1118 | { 1119 | "name": "stdout", 1120 | "output_type": "stream", 1121 | "text": [ 1122 | "0 8000\n", 1123 | "1 2499\n", 1124 | "Name: left, dtype: int64\n", 1125 | "0 3428\n", 1126 | "1 1072\n", 1127 | "Name: left, dtype: int64\n" 1128 | ] 1129 | } 1130 | ], 1131 | "source": [ 1132 | "print(y_train.value_counts())\n", 1133 | "print(y_test.value_counts())" 1134 | ] 1135 | }, 1136 | { 1137 | "cell_type": "code", 1138 | "execution_count": null, 1139 | "id": "30cb511b", 1140 | "metadata": {}, 1141 | "outputs": [], 1142 | "source": [] 1143 | } 1144 | ], 1145 | "metadata": { 1146 | "kernelspec": { 1147 | "display_name": "Python 3 (ipykernel)", 1148 | "language": "python", 1149 | "name": "python3" 1150 | }, 1151 | "language_info": { 1152 | "codemirror_mode": { 1153 | "name": "ipython", 1154 | "version": 3 1155 | }, 1156 | "file_extension": ".py", 1157 | "mimetype": "text/x-python", 1158 | "name": "python", 1159 | "nbconvert_exporter": "python", 1160 | "pygments_lexer": "ipython3", 1161 | "version": "3.11.4" 1162 | } 1163 | }, 1164 | "nbformat": 4, 1165 | "nbformat_minor": 5 1166 | } 1167 | --------------------------------------------------------------------------------