├── images
├── no_keyword.png
├── results_2.png
├── output_108_0.png
├── output_110_1.png
├── output_118_0.png
├── output_63_0.png
├── output_68_0.png
├── output_70_0.png
├── output_72_0.png
├── output_77_1.png
├── output_79_1.png
├── output_92_0.png
├── output_94_0.png
├── simulated_words.png
├── location_features.png
├── feature_importance_1.png
├── feature_importance_2.png
├── test_set_performance.png
└── location_features_example.png
├── README.md
└── insurance_card_text_classification.md
/images/no_keyword.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hatemr/Classifying-Insurance-Card-Text/master/images/no_keyword.png
--------------------------------------------------------------------------------
/images/results_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hatemr/Classifying-Insurance-Card-Text/master/images/results_2.png
--------------------------------------------------------------------------------
/images/output_108_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hatemr/Classifying-Insurance-Card-Text/master/images/output_108_0.png
--------------------------------------------------------------------------------
/images/output_110_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hatemr/Classifying-Insurance-Card-Text/master/images/output_110_1.png
--------------------------------------------------------------------------------
/images/output_118_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hatemr/Classifying-Insurance-Card-Text/master/images/output_118_0.png
--------------------------------------------------------------------------------
/images/output_63_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hatemr/Classifying-Insurance-Card-Text/master/images/output_63_0.png
--------------------------------------------------------------------------------
/images/output_68_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hatemr/Classifying-Insurance-Card-Text/master/images/output_68_0.png
--------------------------------------------------------------------------------
/images/output_70_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hatemr/Classifying-Insurance-Card-Text/master/images/output_70_0.png
--------------------------------------------------------------------------------
/images/output_72_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hatemr/Classifying-Insurance-Card-Text/master/images/output_72_0.png
--------------------------------------------------------------------------------
/images/output_77_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hatemr/Classifying-Insurance-Card-Text/master/images/output_77_1.png
--------------------------------------------------------------------------------
/images/output_79_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hatemr/Classifying-Insurance-Card-Text/master/images/output_79_1.png
--------------------------------------------------------------------------------
/images/output_92_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hatemr/Classifying-Insurance-Card-Text/master/images/output_92_0.png
--------------------------------------------------------------------------------
/images/output_94_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hatemr/Classifying-Insurance-Card-Text/master/images/output_94_0.png
--------------------------------------------------------------------------------
/images/simulated_words.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hatemr/Classifying-Insurance-Card-Text/master/images/simulated_words.png
--------------------------------------------------------------------------------
/images/location_features.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hatemr/Classifying-Insurance-Card-Text/master/images/location_features.png
--------------------------------------------------------------------------------
/images/feature_importance_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hatemr/Classifying-Insurance-Card-Text/master/images/feature_importance_1.png
--------------------------------------------------------------------------------
/images/feature_importance_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hatemr/Classifying-Insurance-Card-Text/master/images/feature_importance_2.png
--------------------------------------------------------------------------------
/images/test_set_performance.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hatemr/Classifying-Insurance-Card-Text/master/images/test_set_performance.png
--------------------------------------------------------------------------------
/images/location_features_example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hatemr/Classifying-Insurance-Card-Text/master/images/location_features_example.png
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Classifying-Insurance-Card-Text
2 | A machine learning project to identify names, Group IDs, and Member IDs from insurance cards.
3 |
4 | The code can be found in [insurance_card_text_classification.ipynb](insurance_card_text_classification.ipynb). Since `.ipynb` files often don't render in Github, I also made a `.md` version [here](insurance_card_text_classification.md).
5 |
6 | # Business Problem
7 | Patients scan their insurance card at the clinic, but the administrative assisstant still must manually enter insurance information into the computer. This manual data entry costs time for the clinic and lowers the patient experience. If we could automatically extract information from the scanned insurance card, we could avoid time and frsutrated from manual entry.
8 |
9 | # Solution
10 | I build a text classifier to identify which words on an insurance card were the 1) member name, 2) Member ID, and 3) Group number.
11 |
12 | # Data
13 | The company who builds the scanners do not save the scanned text, because it would violate health privacy laws. So we had to produce insurance cards ourselves. We found about five real cards and 20 generic cards from online. The company extracted text using OCR software, then I took the resulting XML and extracted the words from the card.
14 |
15 | I then hand-labelled every word as 1) member name, 2) group ID, 3) member ID, or 4) none. The classes were heavily imbalanced; there's only one name on every hard and a couple hundred other words. To overcome class imbalance, I simulated new Group IDs:
16 | * letter -> random letter
17 | * digit -> random digit
18 | * punctuation -> same
19 |
20 | 
21 |
22 | The resulting simulated words looked similar, just with different digits/letters.
23 |
24 | I also simulated names, using a list of most-common names I found online. This balanced the classes, even though most of the data was now simulated.
25 |
26 | # Modeling
27 | I ran a random forest using 60/40 stratified splitting, to keep the classes balanced. Here is the test set performance:
28 |
29 | 
30 |
31 | I also measured the feature importances to see which features were highly predictive:
32 |
33 | 
34 |
35 | The fraction of alphabetic features was the most predictive, while the length was not predictive.
36 |
37 | ## Take-2
38 | Next, I changed to multiclass classification to predict 1) group ID, 2) member ID, 3) none. I also added in an indicator variable for whether the word was a "keyword", such as "Member" or "Group". The results are shown below, broken down by simulated and real data:
39 |
40 | 
41 |
42 | The results seem mostly good, with only few off-diagonals. However, it performs worse on the _real_ data, which causes concern. This would have to be improved later, but for a proof-of-concept, performance isn't bad.
43 |
44 | Now, length becomes a very important feature:
45 | 
46 |
47 | # Text-based features
48 | I engineered 10 text-based features to represent each word:
49 | 1. Length of word
50 | 2. Fraction of characters that are letters
51 | 3. Fraction of characters that are digits
52 | 4. Fraction of alphabetic characters that are uppercase
53 | 5. Fraction of alphabetric characters that are lowercase
54 | 6. Fraction of digits that are puncuation (.,:)
55 | 7. Fraction of digits that are punctuation that are periods
56 | 8. Fraction of digits that are punctuation that are dashes
57 |
58 | # Location features
59 | We can use location information as features to identify Group numbers and others. For example, Group numbers often sit next to text saying "Group No.:". We encode location feautures manually; the vertical location is the line number, normalized to range one, and the horizontal location left/middle/right (0/0.5/1):
60 |
61 | 
62 |
63 | 
64 |
65 | Instead of using a machine learning model with location as features, we instead use a simple logic algorithm:
66 | * Iteration through words.
67 | * If the word is a keyword (e.g. _Member_ is a keyword for Member IDs), measure the distance to every other word.
68 | * Iterate through the other words, starting with the closest word.
69 | * If the word is more than 50% digits and longer than three characters, predict it as a Member ID (or Group ID). This is because IDs are typically longer than 3 characters and have many numbers and few letters.
70 |
71 | # Results
72 | This simple algorithm correctly dientified 8 of 10 Member IDs and 7 of 10 Group IDs. The failures cases are all known cases where the assumptions of the algorithm failed:
73 | * no group ID to find
74 | * group ID had less than 50% digits
75 | * no keyword on the card. Here's an example:
76 |
77 | 
78 |
79 | # Future work
80 | This project gives a compelling proof-of-concept for automatic text entry. Indeed, some companies like Zocdoc have built models using CNNs to identify the group IDs when you scan the card on your phone. Another similar offering is Textract from Amazon, which scans an image and extracts key-value pairs. The approached used here, feature engineering and ML prediction, can serve as a good solution, especially if other commercial tools aren't flexible enough for this particular task. Further work should be done to engineer new features, refine the models, and deploy a model in production.
81 |
--------------------------------------------------------------------------------
/insurance_card_text_classification.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | ```python
4 | import xml.etree.ElementTree as ET
5 | import os, sys
6 | #import nltk
7 | #from nltk.corpus import names
8 | #nltk.download('punkt')
9 | #nltk.download('names')
10 | import string
11 | import random
12 | import matplotlib.pyplot as plt
13 | import numpy as np
14 | import pandas as pd
15 | import seaborn as sn
16 | %matplotlib inline
17 | ```
18 |
19 |
20 | ```python
21 | # import data
22 | df = pd.read_excel('Card Samples/group ids not identified.xlsx', index_col=None, header=0, dtype={'word': str})
23 |
24 | # fill NAs
25 | df1 = df.fillna(0)
26 |
27 | # remove include=0
28 | df1 = df1.loc[df1.include==1]
29 | ```
30 |
31 | Split the lines into individual words.
32 |
33 |
34 | ```python
35 | df1_values = df1.values.copy()
36 | new_values = np.empty((1,12))
37 |
38 | # split rows to individual words
39 | for i in range(df1_values[:,1].shape[0]):
40 | for word in df1_values[i,1].split():
41 | new_row = df1_values[i,:].copy()
42 | new_row[1] = word
43 | new_row = new_row.reshape(1,-1)
44 | new_values = np.append(new_values, new_row, axis=0)
45 |
46 | # delete first row
47 | new_values = new_values[1:,:]
48 |
49 | # create dataframe
50 | df2 = pd.DataFrame(columns=df1.columns.tolist(), data=new_values)
51 |
52 | # horizontal location
53 | df2 = df2.assign(x_loc=0.5*df2.middle + df2.right_side)
54 | df2.x_loc = df2.x_loc.astype(float)
55 |
56 | # vertical location
57 | df2 = df2.assign(y_loc=1-df2.line/df2.total_lines_on_card)
58 | df2.y_loc = df2.y_loc.astype(float)
59 |
60 | # delete uneeded columns
61 | df2 = df2.drop(columns=['include', 'left_side', 'middle', 'right_side', 'line', 'total_lines_on_card'])
62 | ```
63 |
64 |
65 | ```python
66 | # get location of keyword
67 | df2_values = df2.values
68 | dist_memberid = np.empty((1,2))
69 | dist_groupid = np.empty((1,2))
70 |
71 | for card in np.unique(df2_values[:,0]):
72 | d1 = df2_values[df2_values[:,0]==card] #
73 |
74 | d2 = d1[d1[:,4]==1] # member id keyword
75 | if d2.shape[0] == 0:
76 | memberid_loc = np.array([[0,0]])
77 | else:
78 | memberid_loc = d2[0,6:8].reshape(1,-1)
79 | memberid_loc = np.repeat(memberid_loc, d1.shape[0], axis=0)
80 | dist_memberid = np.append(dist_memberid, memberid_loc, axis=0)
81 |
82 | d3 = d1[d1[:,5]==1] # group id keyword
83 | if d3.shape[0] == 0:
84 | groupid_loc = np.array([[0,0]])
85 | else:
86 | groupid_loc = d3[0,6:8].reshape(1,-1) # x and y
87 | groupid_loc = np.repeat(groupid_loc, d1.shape[0], axis=0)
88 | dist_groupid = np.append(dist_groupid, groupid_loc, axis=0)
89 |
90 | dist_memberid = dist_memberid[1:].astype(float)
91 | dist_groupid = dist_groupid[1:].astype(float)
92 |
93 | # group id keyword locations
94 | df3 = df2.assign(x_loc_group_keyword=dist_groupid[:,0])
95 | df3 = df3.assign(y_loc_group_keyword=dist_groupid[:,1])
96 |
97 | # member id keyword locations
98 | df3 = df3.assign(x_loc_member_keyword=dist_memberid[:,0])
99 | df3 = df3.assign(y_loc_member_keyword=dist_memberid[:,1])
100 |
101 | # calc distances
102 | dist_group_id = np.linalg.norm(df3[['x_loc','y_loc']].values.astype(float) - df3[['x_loc_group_keyword','y_loc_group_keyword']].values.astype(float), axis=1)
103 | dist_member_id = np.linalg.norm(df3[['x_loc','y_loc']].values.astype(float) - df3[['x_loc_member_keyword','y_loc_member_keyword']].values.astype(float), axis=1)
104 |
105 | df3 = df3.assign(dist_member_id=dist_member_id)
106 | df3 = df3.assign(dist_group_id=dist_group_id)
107 |
108 | frac_digit = []
109 | for index, row in df3.iterrows():
110 | frac_digit.append(sum([1 for char in row.word if char.isdigit()]) / len(row.word))
111 |
112 | df3 = df3.assign(frac_digit = frac_digit)
113 | df3 = df3.assign(pred_member=df3.shape[0]*[0])
114 | df3 = df3.assign(pred_group=df3.shape[0]*[0])
115 | # drop extra columns
116 | #df3 = df3.drop(columns=['x_loc','y_loc','x_loc_group_keyword','y_loc_group_keyword', 'x_loc_member_keyword','y_loc_member_keyword'])
117 | ```
118 |
119 |
120 | ```python
121 | n,m = df3.iloc[0:1,:].shape
122 | cols = df3.columns.tolist()
123 | #df4 = pd.DataFrame(data=d4, columns=cols)
124 |
125 | d0 = np.empty((1,m))
126 |
127 | for card in np.unique(df3.card):
128 | if card>3:
129 | pass
130 | d1 = df3.loc[df3.card==card]
131 |
132 | for i, row in d1.sort_values('dist_member_id').iterrows():
133 | if row.frac_digit > 0.5 and len(row.word) >= 4:
134 | #i_memberid_pred = i
135 | d5 = row.values.copy().reshape(1,-1)
136 | d5[:,-2] = 1
137 | break
138 | #if 'd5' not in locals():
139 | # print("No prediction for member id for card", card)
140 |
141 |
142 | for i, row in d1.sort_values('dist_group_id').iterrows():
143 | if row.frac_digit > 0.5 and len(row.word) >= 4:
144 | i_groupid_pred = i
145 | d10 = row.values.copy().reshape(1,-1)
146 | d10[:,-1] = 1
147 | break
148 | #if 'd10' not in locals():
149 | # print("No prediction for group id for card", card)
150 |
151 | # member id
152 | d3 = d1.loc[d1.member_id==1].values
153 | d4 = d1.loc[d1.memberid_keyword==1].values
154 | #d5 = d2.loc[d1.index==i_memberid_pred].values.reshape(1,-1)
155 | d6 = np.append(d3, d4, axis=0)
156 | d6 = np.append(d6, d5, axis=0)
157 |
158 | # group id
159 | d8 = d1.loc[d1.group_id==1].values
160 | d9 = d1.loc[d1.groupid_keyword==1].values
161 | # d10 = d7.loc[d7.index==i_groupid_pred].values.reshape(1,-1)
162 | d11 = np.append(d8, d9, axis=0)
163 | d11 = np.append(d11, d10, axis=0)
164 |
165 | # combine member id and group id
166 | d12 = np.append(d6, d11, axis=0)
167 |
168 |
169 | d0 = np.append(d0, d12, axis=0)
170 | #break
171 |
172 | df4 = pd.DataFrame(data=d0, columns=cols)
173 | df4.drop([0], inplace=True)
174 | ```
175 |
176 |
177 | ```python
178 | #pd.DataFrame(data=d0, columns=cols).drop([0])
179 | ```
180 |
181 |
182 | ```python
183 | cards = np.unique(df4.card)
184 | i=6
185 | df4.loc[df4.card==cards[i]][['word','member_id','memberid_keyword','x_loc','y_loc', 'x_loc_member_keyword', 'y_loc_member_keyword', 'dist_member_id','frac_digit','pred_member']]
186 | ```
187 |
188 |
189 |
190 |
191 |
192 |
205 |
206 |
207 |
208 | |
209 | word |
210 | member_id |
211 | memberid_keyword |
212 | x_loc |
213 | y_loc |
214 | x_loc_member_keyword |
215 | y_loc_member_keyword |
216 | dist_member_id |
217 | frac_digit |
218 | pred_member |
219 |
220 |
221 |
222 |
223 | | 38 |
224 | 123456789 |
225 | 1 |
226 | 0 |
227 | 0 |
228 | 0.727273 |
229 | 0 |
230 | 0.727273 |
231 | 0 |
232 | 1 |
233 | 0 |
234 |
235 |
236 | | 39 |
237 | Member |
238 | 0 |
239 | 1 |
240 | 0 |
241 | 0.727273 |
242 | 0 |
243 | 0.727273 |
244 | 0 |
245 | 0 |
246 | 0 |
247 |
248 |
249 | | 40 |
250 | 123456789 |
251 | 1 |
252 | 0 |
253 | 0 |
254 | 0.727273 |
255 | 0 |
256 | 0.727273 |
257 | 0 |
258 | 1 |
259 | 1 |
260 |
261 |
262 | | 41 |
263 | 123456 |
264 | 0 |
265 | 0 |
266 | 1 |
267 | 0.727273 |
268 | 0 |
269 | 0.727273 |
270 | 1 |
271 | 1 |
272 | 0 |
273 |
274 |
275 | | 42 |
276 | Group |
277 | 0 |
278 | 0 |
279 | 1 |
280 | 0.727273 |
281 | 0 |
282 | 0.727273 |
283 | 1 |
284 | 0 |
285 | 0 |
286 |
287 |
288 | | 43 |
289 | 123456 |
290 | 0 |
291 | 0 |
292 | 1 |
293 | 0.727273 |
294 | 0 |
295 | 0.727273 |
296 | 1 |
297 | 1 |
298 | 0 |
299 |
300 |
301 |
302 |
303 |
304 |
305 |
306 |
307 | ```python
308 | filt = df4.loc[df4.pred_member==1]
309 | filt1 = filt.loc[filt.member_id==1]
310 | TP = filt1.shape[0]
311 | print(filt.member_id.sum()/filt.shape[0])
312 |
313 | ```
314 |
315 | 0.8
316 |
317 |
318 |
319 | ```python
320 | filt = df4.loc[df4.pred_group==1]
321 | filt1 = filt.loc[filt.group_id==1]
322 | TP = filt1.shape[0]
323 | print(filt.group_id.sum()/filt.shape[0])
324 | ```
325 |
326 | 0.7
327 |
328 |
329 |
330 | ```python
331 | i=8
332 | df4.loc[df4.card==cards[i]][['word','group_id','groupid_keyword','x_loc','y_loc', 'x_loc_group_keyword', 'y_loc_group_keyword', 'dist_group_id','frac_digit','pred_group']]
333 | ```
334 |
335 |
336 |
337 |
338 |
339 |
352 |
353 |
354 |
355 | |
356 | word |
357 | group_id |
358 | groupid_keyword |
359 | x_loc |
360 | y_loc |
361 | x_loc_group_keyword |
362 | y_loc_group_keyword |
363 | dist_group_id |
364 | frac_digit |
365 | pred_group |
366 |
367 |
368 |
369 |
370 | | 50 |
371 | 112345000 |
372 | 0 |
373 | 0 |
374 | 0 |
375 | 0.714286 |
376 | 1 |
377 | 0.857143 |
378 | 1.01015 |
379 | 1 |
380 | 0 |
381 |
382 |
383 | | 51 |
384 | Member |
385 | 0 |
386 | 0 |
387 | 0 |
388 | 0.714286 |
389 | 1 |
390 | 0.857143 |
391 | 1.01015 |
392 | 0 |
393 | 0 |
394 |
395 |
396 | | 52 |
397 | ID |
398 | 0 |
399 | 0 |
400 | 0 |
401 | 0.714286 |
402 | 1 |
403 | 0.857143 |
404 | 1.01015 |
405 | 0 |
406 | 0 |
407 |
408 |
409 | | 53 |
410 | 112345000 |
411 | 0 |
412 | 0 |
413 | 0 |
414 | 0.714286 |
415 | 1 |
416 | 0.857143 |
417 | 1.01015 |
418 | 1 |
419 | 0 |
420 |
421 |
422 | | 54 |
423 | NEOOOOOO |
424 | 1 |
425 | 0 |
426 | 1 |
427 | 0.857143 |
428 | 1 |
429 | 0.857143 |
430 | 0 |
431 | 0 |
432 | 0 |
433 |
434 |
435 | | 55 |
436 | Group |
437 | 0 |
438 | 1 |
439 | 1 |
440 | 0.857143 |
441 | 1 |
442 | 0.857143 |
443 | 0 |
444 | 0 |
445 | 0 |
446 |
447 |
448 | | 56 |
449 | 00000 |
450 | 0 |
451 | 0 |
452 | 1 |
453 | 0.142857 |
454 | 1 |
455 | 0.857143 |
456 | 0.714286 |
457 | 1 |
458 | 1 |
459 |
460 |
461 |
462 |
463 |
464 |
465 |
466 |
467 | ```python
468 | df3.loc[df3.card==cards[i]]
469 | ```
470 |
471 |
472 |
473 |
474 |
475 |
488 |
489 |
490 |
491 | |
492 | card |
493 | word |
494 | member_id |
495 | group_id |
496 | memberid_keyword |
497 | groupid_keyword |
498 | x_loc |
499 | y_loc |
500 | x_loc_group_keyword |
501 | y_loc_group_keyword |
502 | x_loc_member_keyword |
503 | y_loc_member_keyword |
504 | dist_member_id |
505 | dist_group_id |
506 | frac_digit |
507 | pred_member |
508 | pred_group |
509 |
510 |
511 |
512 |
513 | | 282 |
514 | 24 |
515 | PacificSource |
516 | 0 |
517 | 0 |
518 | 0 |
519 | 0 |
520 | 0.0 |
521 | 0.928571 |
522 | 1.0 |
523 | 0.857143 |
524 | 0.0 |
525 | 0.714286 |
526 | 0.214286 |
527 | 1.002548 |
528 | 0.00 |
529 | 0 |
530 | 0 |
531 |
532 |
533 | | 283 |
534 | 24 |
535 | Group |
536 | 0 |
537 | 0 |
538 | 0 |
539 | 0 |
540 | 1.0 |
541 | 0.928571 |
542 | 1.0 |
543 | 0.857143 |
544 | 0.0 |
545 | 0.714286 |
546 | 1.022702 |
547 | 0.071429 |
548 | 0.00 |
549 | 0 |
550 | 0 |
551 |
552 |
553 | | 284 |
554 | 24 |
555 | Name |
556 | 0 |
557 | 0 |
558 | 0 |
559 | 0 |
560 | 1.0 |
561 | 0.928571 |
562 | 1.0 |
563 | 0.857143 |
564 | 0.0 |
565 | 0.714286 |
566 | 1.022702 |
567 | 0.071429 |
568 | 0.00 |
569 | 0 |
570 | 0 |
571 |
572 |
573 | | 285 |
574 | 24 |
575 | Here |
576 | 0 |
577 | 0 |
578 | 0 |
579 | 0 |
580 | 1.0 |
581 | 0.928571 |
582 | 1.0 |
583 | 0.857143 |
584 | 0.0 |
585 | 0.714286 |
586 | 1.022702 |
587 | 0.071429 |
588 | 0.00 |
589 | 0 |
590 | 0 |
591 |
592 |
593 | | 286 |
594 | 24 |
595 | HEALTH |
596 | 0 |
597 | 0 |
598 | 0 |
599 | 0 |
600 | 1.0 |
601 | 0.857143 |
602 | 1.0 |
603 | 0.857143 |
604 | 0.0 |
605 | 0.714286 |
606 | 1.010153 |
607 | 0.000000 |
608 | 0.00 |
609 | 0 |
610 | 0 |
611 |
612 |
613 | | 287 |
614 | 24 |
615 | PLANS |
616 | 0 |
617 | 0 |
618 | 0 |
619 | 0 |
620 | 1.0 |
621 | 0.857143 |
622 | 1.0 |
623 | 0.857143 |
624 | 0.0 |
625 | 0.714286 |
626 | 1.010153 |
627 | 0.000000 |
628 | 0.00 |
629 | 0 |
630 | 0 |
631 |
632 |
633 | | 288 |
634 | 24 |
635 | Group |
636 | 0 |
637 | 0 |
638 | 0 |
639 | 1 |
640 | 1.0 |
641 | 0.857143 |
642 | 1.0 |
643 | 0.857143 |
644 | 0.0 |
645 | 0.714286 |
646 | 1.010153 |
647 | 0.000000 |
648 | 0.00 |
649 | 0 |
650 | 0 |
651 |
652 |
653 | | 289 |
654 | 24 |
655 | #: |
656 | 0 |
657 | 0 |
658 | 0 |
659 | 0 |
660 | 1.0 |
661 | 0.857143 |
662 | 1.0 |
663 | 0.857143 |
664 | 0.0 |
665 | 0.714286 |
666 | 1.010153 |
667 | 0.000000 |
668 | 0.00 |
669 | 0 |
670 | 0 |
671 |
672 |
673 | | 290 |
674 | 24 |
675 | NEOOOOOO |
676 | 0 |
677 | 1 |
678 | 0 |
679 | 0 |
680 | 1.0 |
681 | 0.857143 |
682 | 1.0 |
683 | 0.857143 |
684 | 0.0 |
685 | 0.714286 |
686 | 1.010153 |
687 | 0.000000 |
688 | 0.00 |
689 | 0 |
690 | 0 |
691 |
692 |
693 | | 291 |
694 | 24 |
695 | Subscriber |
696 | 0 |
697 | 0 |
698 | 0 |
699 | 0 |
700 | 0.0 |
701 | 0.785714 |
702 | 1.0 |
703 | 0.857143 |
704 | 0.0 |
705 | 0.714286 |
706 | 0.071429 |
707 | 1.002548 |
708 | 0.00 |
709 | 0 |
710 | 0 |
711 |
712 |
713 | | 292 |
714 | 24 |
715 | Name: |
716 | 0 |
717 | 0 |
718 | 0 |
719 | 0 |
720 | 0.0 |
721 | 0.785714 |
722 | 1.0 |
723 | 0.857143 |
724 | 0.0 |
725 | 0.714286 |
726 | 0.071429 |
727 | 1.002548 |
728 | 0.00 |
729 | 0 |
730 | 0 |
731 |
732 |
733 | | 293 |
734 | 24 |
735 | John |
736 | 0 |
737 | 0 |
738 | 0 |
739 | 0 |
740 | 0.0 |
741 | 0.785714 |
742 | 1.0 |
743 | 0.857143 |
744 | 0.0 |
745 | 0.714286 |
746 | 0.071429 |
747 | 1.002548 |
748 | 0.00 |
749 | 0 |
750 | 0 |
751 |
752 |
753 | | 294 |
754 | 24 |
755 | Smith |
756 | 0 |
757 | 0 |
758 | 0 |
759 | 0 |
760 | 0.0 |
761 | 0.785714 |
762 | 1.0 |
763 | 0.857143 |
764 | 0.0 |
765 | 0.714286 |
766 | 0.071429 |
767 | 1.002548 |
768 | 0.00 |
769 | 0 |
770 | 0 |
771 |
772 |
773 | | 295 |
774 | 24 |
775 | Member |
776 | 0 |
777 | 0 |
778 | 1 |
779 | 0 |
780 | 0.0 |
781 | 0.714286 |
782 | 1.0 |
783 | 0.857143 |
784 | 0.0 |
785 | 0.714286 |
786 | 0.000000 |
787 | 1.010153 |
788 | 0.00 |
789 | 0 |
790 | 0 |
791 |
792 |
793 | | 296 |
794 | 24 |
795 | ID |
796 | 0 |
797 | 0 |
798 | 1 |
799 | 0 |
800 | 0.0 |
801 | 0.714286 |
802 | 1.0 |
803 | 0.857143 |
804 | 0.0 |
805 | 0.714286 |
806 | 0.000000 |
807 | 1.010153 |
808 | 0.00 |
809 | 0 |
810 | 0 |
811 |
812 |
813 | | 297 |
814 | 24 |
815 | 112345000 |
816 | 1 |
817 | 0 |
818 | 0 |
819 | 0 |
820 | 0.0 |
821 | 0.714286 |
822 | 1.0 |
823 | 0.857143 |
824 | 0.0 |
825 | 0.714286 |
826 | 0.000000 |
827 | 1.010153 |
828 | 1.00 |
829 | 0 |
830 | 0 |
831 |
832 |
833 | | 298 |
834 | 24 |
835 | Network: |
836 | 0 |
837 | 0 |
838 | 0 |
839 | 0 |
840 | 0.0 |
841 | 0.642857 |
842 | 1.0 |
843 | 0.857143 |
844 | 0.0 |
845 | 0.714286 |
846 | 0.071429 |
847 | 1.022702 |
848 | 0.00 |
849 | 0 |
850 | 0 |
851 |
852 |
853 | | 299 |
854 | 24 |
855 | SmartHeaIth |
856 | 0 |
857 | 0 |
858 | 0 |
859 | 0 |
860 | 0.0 |
861 | 0.642857 |
862 | 1.0 |
863 | 0.857143 |
864 | 0.0 |
865 | 0.714286 |
866 | 0.071429 |
867 | 1.022702 |
868 | 0.00 |
869 | 0 |
870 | 0 |
871 |
872 |
873 | | 300 |
874 | 24 |
875 | (Referral |
876 | 0 |
877 | 0 |
878 | 0 |
879 | 0 |
880 | 0.0 |
881 | 0.642857 |
882 | 1.0 |
883 | 0.857143 |
884 | 0.0 |
885 | 0.714286 |
886 | 0.071429 |
887 | 1.022702 |
888 | 0.00 |
889 | 0 |
890 | 0 |
891 |
892 |
893 | | 301 |
894 | 24 |
895 | Required) |
896 | 0 |
897 | 0 |
898 | 0 |
899 | 0 |
900 | 0.0 |
901 | 0.642857 |
902 | 1.0 |
903 | 0.857143 |
904 | 0.0 |
905 | 0.714286 |
906 | 0.071429 |
907 | 1.022702 |
908 | 0.00 |
909 | 0 |
910 | 0 |
911 |
912 |
913 | | 302 |
914 | 24 |
915 | Card |
916 | 0 |
917 | 0 |
918 | 0 |
919 | 0 |
920 | 0.0 |
921 | 0.571429 |
922 | 1.0 |
923 | 0.857143 |
924 | 0.0 |
925 | 0.714286 |
926 | 0.142857 |
927 | 1.040016 |
928 | 0.00 |
929 | 0 |
930 | 0 |
931 |
932 |
933 | | 303 |
934 | 24 |
935 | Issued: |
936 | 0 |
937 | 0 |
938 | 0 |
939 | 0 |
940 | 0.0 |
941 | 0.571429 |
942 | 1.0 |
943 | 0.857143 |
944 | 0.0 |
945 | 0.714286 |
946 | 0.142857 |
947 | 1.040016 |
948 | 0.00 |
949 | 0 |
950 | 0 |
951 |
952 |
953 | | 304 |
954 | 24 |
955 | 01/01/14 |
956 | 0 |
957 | 0 |
958 | 0 |
959 | 0 |
960 | 0.0 |
961 | 0.571429 |
962 | 1.0 |
963 | 0.857143 |
964 | 0.0 |
965 | 0.714286 |
966 | 0.142857 |
967 | 1.040016 |
968 | 0.75 |
969 | 0 |
970 | 0 |
971 |
972 |
973 | | 305 |
974 | 24 |
975 | ID |
976 | 0 |
977 | 0 |
978 | 0 |
979 | 0 |
980 | 0.0 |
981 | 0.500000 |
982 | 1.0 |
983 | 0.857143 |
984 | 0.0 |
985 | 0.714286 |
986 | 0.214286 |
987 | 1.061862 |
988 | 0.00 |
989 | 0 |
990 | 0 |
991 |
992 |
993 | | 306 |
994 | 24 |
995 | 00 |
996 | 0 |
997 | 0 |
998 | 0 |
999 | 0 |
1000 | 0.0 |
1001 | 0.428571 |
1002 | 1.0 |
1003 | 0.857143 |
1004 | 0.0 |
1005 | 0.714286 |
1006 | 0.285714 |
1007 | 1.087968 |
1008 | 1.00 |
1009 | 0 |
1010 | 0 |
1011 |
1012 |
1013 | | 307 |
1014 | 24 |
1015 | 01 |
1016 | 0 |
1017 | 0 |
1018 | 0 |
1019 | 0 |
1020 | 0.0 |
1021 | 0.357143 |
1022 | 1.0 |
1023 | 0.857143 |
1024 | 0.0 |
1025 | 0.714286 |
1026 | 0.357143 |
1027 | 1.118034 |
1028 | 1.00 |
1029 | 0 |
1030 | 0 |
1031 |
1032 |
1033 | | 308 |
1034 | 24 |
1035 | 02 |
1036 | 0 |
1037 | 0 |
1038 | 0 |
1039 | 0 |
1040 | 0.0 |
1041 | 0.285714 |
1042 | 1.0 |
1043 | 0.857143 |
1044 | 0.0 |
1045 | 0.714286 |
1046 | 0.428571 |
1047 | 1.151751 |
1048 | 1.00 |
1049 | 0 |
1050 | 0 |
1051 |
1052 |
1053 | | 309 |
1054 | 24 |
1055 | Member |
1056 | 0 |
1057 | 0 |
1058 | 0 |
1059 | 0 |
1060 | 0.0 |
1061 | 0.500000 |
1062 | 1.0 |
1063 | 0.857143 |
1064 | 0.0 |
1065 | 0.714286 |
1066 | 0.214286 |
1067 | 1.061862 |
1068 | 0.00 |
1069 | 0 |
1070 | 0 |
1071 |
1072 |
1073 | | 310 |
1074 | 24 |
1075 | PCP |
1076 | 0 |
1077 | 0 |
1078 | 0 |
1079 | 0 |
1080 | 0.5 |
1081 | 0.500000 |
1082 | 1.0 |
1083 | 0.857143 |
1084 | 0.0 |
1085 | 0.714286 |
1086 | 0.543984 |
1087 | 0.614452 |
1088 | 0.00 |
1089 | 0 |
1090 | 0 |
1091 |
1092 |
1093 | | 311 |
1094 | 24 |
1095 | John |
1096 | 0 |
1097 | 0 |
1098 | 0 |
1099 | 0 |
1100 | 0.0 |
1101 | 0.428571 |
1102 | 1.0 |
1103 | 0.857143 |
1104 | 0.0 |
1105 | 0.714286 |
1106 | 0.285714 |
1107 | 1.087968 |
1108 | 0.00 |
1109 | 0 |
1110 | 0 |
1111 |
1112 |
1113 | | 312 |
1114 | 24 |
1115 | Susie |
1116 | 0 |
1117 | 0 |
1118 | 0 |
1119 | 0 |
1120 | 0.0 |
1121 | 0.357143 |
1122 | 1.0 |
1123 | 0.857143 |
1124 | 0.0 |
1125 | 0.714286 |
1126 | 0.357143 |
1127 | 1.118034 |
1128 | 0.00 |
1129 | 0 |
1130 | 0 |
1131 |
1132 |
1133 | | 313 |
1134 | 24 |
1135 | David |
1136 | 0 |
1137 | 0 |
1138 | 0 |
1139 | 0 |
1140 | 0.0 |
1141 | 0.285714 |
1142 | 1.0 |
1143 | 0.857143 |
1144 | 0.0 |
1145 | 0.714286 |
1146 | 0.428571 |
1147 | 1.151751 |
1148 | 0.00 |
1149 | 0 |
1150 | 0 |
1151 |
1152 |
1153 | | 314 |
1154 | 24 |
1155 | D. |
1156 | 0 |
1157 | 0 |
1158 | 0 |
1159 | 0 |
1160 | 0.5 |
1161 | 0.428571 |
1162 | 1.0 |
1163 | 0.857143 |
1164 | 0.0 |
1165 | 0.714286 |
1166 | 0.575876 |
1167 | 0.658539 |
1168 | 0.00 |
1169 | 0 |
1170 | 0 |
1171 |
1172 |
1173 | | 315 |
1174 | 24 |
1175 | Jones |
1176 | 0 |
1177 | 0 |
1178 | 0 |
1179 | 0 |
1180 | 0.5 |
1181 | 0.428571 |
1182 | 1.0 |
1183 | 0.857143 |
1184 | 0.0 |
1185 | 0.714286 |
1186 | 0.575876 |
1187 | 0.658539 |
1188 | 0.00 |
1189 | 0 |
1190 | 0 |
1191 |
1192 |
1193 | | 316 |
1194 | 24 |
1195 | D. |
1196 | 0 |
1197 | 0 |
1198 | 0 |
1199 | 0 |
1200 | 0.5 |
1201 | 0.357143 |
1202 | 1.0 |
1203 | 0.857143 |
1204 | 0.0 |
1205 | 0.714286 |
1206 | 0.614452 |
1207 | 0.707107 |
1208 | 0.00 |
1209 | 0 |
1210 | 0 |
1211 |
1212 |
1213 | | 317 |
1214 | 24 |
1215 | Jones |
1216 | 0 |
1217 | 0 |
1218 | 0 |
1219 | 0 |
1220 | 0.5 |
1221 | 0.357143 |
1222 | 1.0 |
1223 | 0.857143 |
1224 | 0.0 |
1225 | 0.714286 |
1226 | 0.614452 |
1227 | 0.707107 |
1228 | 0.00 |
1229 | 0 |
1230 | 0 |
1231 |
1232 |
1233 | | 318 |
1234 | 24 |
1235 | D. |
1236 | 0 |
1237 | 0 |
1238 | 0 |
1239 | 0 |
1240 | 0.5 |
1241 | 0.285714 |
1242 | 1.0 |
1243 | 0.857143 |
1244 | 0.0 |
1245 | 0.714286 |
1246 | 0.658539 |
1247 | 0.759296 |
1248 | 0.00 |
1249 | 0 |
1250 | 0 |
1251 |
1252 |
1253 | | 319 |
1254 | 24 |
1255 | Jones |
1256 | 0 |
1257 | 0 |
1258 | 0 |
1259 | 0 |
1260 | 0.5 |
1261 | 0.285714 |
1262 | 1.0 |
1263 | 0.857143 |
1264 | 0.0 |
1265 | 0.714286 |
1266 | 0.658539 |
1267 | 0.759296 |
1268 | 0.00 |
1269 | 0 |
1270 | 0 |
1271 |
1272 |
1273 | | 320 |
1274 | 24 |
1275 | Drug |
1276 | 0 |
1277 | 0 |
1278 | 0 |
1279 | 0 |
1280 | 1.0 |
1281 | 0.214286 |
1282 | 1.0 |
1283 | 0.857143 |
1284 | 0.0 |
1285 | 0.714286 |
1286 | 1.118034 |
1287 | 0.642857 |
1288 | 0.00 |
1289 | 0 |
1290 | 0 |
1291 |
1292 |
1293 | | 321 |
1294 | 24 |
1295 | List |
1296 | 0 |
1297 | 0 |
1298 | 0 |
1299 | 0 |
1300 | 1.0 |
1301 | 0.214286 |
1302 | 1.0 |
1303 | 0.857143 |
1304 | 0.0 |
1305 | 0.714286 |
1306 | 1.118034 |
1307 | 0.642857 |
1308 | 0.00 |
1309 | 0 |
1310 | 0 |
1311 |
1312 |
1313 | | 322 |
1314 | 24 |
1315 | RxBin |
1316 | 0 |
1317 | 0 |
1318 | 0 |
1319 | 0 |
1320 | 1.0 |
1321 | 0.142857 |
1322 | 1.0 |
1323 | 0.857143 |
1324 | 0.0 |
1325 | 0.714286 |
1326 | 1.151751 |
1327 | 0.714286 |
1328 | 0.00 |
1329 | 0 |
1330 | 0 |
1331 |
1332 |
1333 | | 323 |
1334 | 24 |
1335 | RxGroup |
1336 | 0 |
1337 | 0 |
1338 | 0 |
1339 | 0 |
1340 | 1.0 |
1341 | 0.071429 |
1342 | 1.0 |
1343 | 0.857143 |
1344 | 0.0 |
1345 | 0.714286 |
1346 | 1.188808 |
1347 | 0.785714 |
1348 | 0.00 |
1349 | 0 |
1350 | 0 |
1351 |
1352 |
1353 | | 324 |
1354 | 24 |
1355 | RxPCN |
1356 | 0 |
1357 | 0 |
1358 | 0 |
1359 | 0 |
1360 | 1.0 |
1361 | 0.000000 |
1362 | 1.0 |
1363 | 0.857143 |
1364 | 0.0 |
1365 | 0.714286 |
1366 | 1.228904 |
1367 | 0.857143 |
1368 | 0.00 |
1369 | 0 |
1370 | 0 |
1371 |
1372 |
1373 | | 325 |
1374 | 24 |
1375 | XX |
1376 | 0 |
1377 | 0 |
1378 | 0 |
1379 | 0 |
1380 | 1.0 |
1381 | 0.214286 |
1382 | 1.0 |
1383 | 0.857143 |
1384 | 0.0 |
1385 | 0.714286 |
1386 | 1.118034 |
1387 | 0.642857 |
1388 | 0.00 |
1389 | 0 |
1390 | 0 |
1391 |
1392 |
1393 | | 326 |
1394 | 24 |
1395 | 00000 |
1396 | 0 |
1397 | 0 |
1398 | 0 |
1399 | 0 |
1400 | 1.0 |
1401 | 0.142857 |
1402 | 1.0 |
1403 | 0.857143 |
1404 | 0.0 |
1405 | 0.714286 |
1406 | 1.151751 |
1407 | 0.714286 |
1408 | 1.00 |
1409 | 0 |
1410 | 0 |
1411 |
1412 |
1413 | | 327 |
1414 | 24 |
1415 | 00000000 |
1416 | 0 |
1417 | 0 |
1418 | 0 |
1419 | 0 |
1420 | 1.0 |
1421 | 0.071429 |
1422 | 1.0 |
1423 | 0.857143 |
1424 | 0.0 |
1425 | 0.714286 |
1426 | 1.188808 |
1427 | 0.785714 |
1428 | 1.00 |
1429 | 0 |
1430 | 0 |
1431 |
1432 |
1433 | | 328 |
1434 | 24 |
1435 | 00000 |
1436 | 0 |
1437 | 0 |
1438 | 0 |
1439 | 0 |
1440 | 1.0 |
1441 | 0.000000 |
1442 | 1.0 |
1443 | 0.857143 |
1444 | 0.0 |
1445 | 0.714286 |
1446 | 1.228904 |
1447 | 0.857143 |
1448 | 1.00 |
1449 | 0 |
1450 | 0 |
1451 |
1452 |
1453 |
1454 |
1455 |
1456 |
1457 |
1458 |
1459 | ```python
1460 |
1461 | ```
1462 |
1463 |
1464 | ```python
1465 | df3.loc[df3.card==2][['word','member_id','memberid_keyword','x_loc','y_loc', 'x_loc_member_keyword', 'y_loc_member_keyword', 'dist_member_id','frac_digit','pred_member']]
1466 | ```
1467 |
1468 |
1469 |
1470 |
1471 |
1472 |
1485 |
1486 |
1487 |
1488 | |
1489 | word |
1490 | member_id |
1491 | memberid_keyword |
1492 | x_loc |
1493 | y_loc |
1494 | x_loc_member_keyword |
1495 | y_loc_member_keyword |
1496 | dist_member_id |
1497 | frac_digit |
1498 | pred_member |
1499 |
1500 |
1501 |
1502 |
1503 | | 0 |
1504 | BlueCross |
1505 | 0 |
1506 | 0 |
1507 | 0.0 |
1508 | 0.9375 |
1509 | 0.0 |
1510 | 0.6875 |
1511 | 0.250000 |
1512 | 0.000000 |
1513 | 0 |
1514 |
1515 |
1516 | | 1 |
1517 | BlueShield |
1518 | 0 |
1519 | 0 |
1520 | 0.0 |
1521 | 0.9375 |
1522 | 0.0 |
1523 | 0.6875 |
1524 | 0.250000 |
1525 | 0.000000 |
1526 | 0 |
1527 |
1528 |
1529 | | 2 |
1530 | Subscriber |
1531 | 0 |
1532 | 0 |
1533 | 0.0 |
1534 | 0.8125 |
1535 | 0.0 |
1536 | 0.6875 |
1537 | 0.125000 |
1538 | 0.000000 |
1539 | 0 |
1540 |
1541 |
1542 | | 3 |
1543 | (O): |
1544 | 0 |
1545 | 0 |
1546 | 0.0 |
1547 | 0.8125 |
1548 | 0.0 |
1549 | 0.6875 |
1550 | 0.125000 |
1551 | 0.000000 |
1552 | 0 |
1553 |
1554 |
1555 | | 4 |
1556 | SMITH, |
1557 | 0 |
1558 | 0 |
1559 | 0.0 |
1560 | 0.7500 |
1561 | 0.0 |
1562 | 0.6875 |
1563 | 0.062500 |
1564 | 0.000000 |
1565 | 0 |
1566 |
1567 |
1568 | | 5 |
1569 | JOHN |
1570 | 0 |
1571 | 0 |
1572 | 0.0 |
1573 | 0.7500 |
1574 | 0.0 |
1575 | 0.6875 |
1576 | 0.062500 |
1577 | 0.000000 |
1578 | 0 |
1579 |
1580 |
1581 | | 6 |
1582 | Identification |
1583 | 0 |
1584 | 1 |
1585 | 0.0 |
1586 | 0.6875 |
1587 | 0.0 |
1588 | 0.6875 |
1589 | 0.000000 |
1590 | 0.000000 |
1591 | 0 |
1592 |
1593 |
1594 | | 7 |
1595 | Number(3.5): |
1596 | 0 |
1597 | 0 |
1598 | 0.0 |
1599 | 0.6875 |
1600 | 0.0 |
1601 | 0.6875 |
1602 | 0.000000 |
1603 | 0.166667 |
1604 | 0 |
1605 |
1606 |
1607 | | 8 |
1608 | ZGP123456789 |
1609 | 1 |
1610 | 0 |
1611 | 0.0 |
1612 | 0.6250 |
1613 | 0.0 |
1614 | 0.6875 |
1615 | 0.062500 |
1616 | 0.750000 |
1617 | 0 |
1618 |
1619 |
1620 | | 9 |
1621 | Group |
1622 | 0 |
1623 | 0 |
1624 | 0.0 |
1625 | 0.5625 |
1626 | 0.0 |
1627 | 0.6875 |
1628 | 0.125000 |
1629 | 0.000000 |
1630 | 0 |
1631 |
1632 |
1633 | | 10 |
1634 | No: |
1635 | 0 |
1636 | 0 |
1637 | 0.0 |
1638 | 0.5625 |
1639 | 0.0 |
1640 | 0.6875 |
1641 | 0.125000 |
1642 | 0.000000 |
1643 | 0 |
1644 |
1645 |
1646 | | 11 |
1647 | 123456 |
1648 | 0 |
1649 | 0 |
1650 | 0.0 |
1651 | 0.5625 |
1652 | 0.0 |
1653 | 0.6875 |
1654 | 0.125000 |
1655 | 1.000000 |
1656 | 0 |
1657 |
1658 |
1659 | | 12 |
1660 | Effective |
1661 | 0 |
1662 | 0 |
1663 | 0.0 |
1664 | 0.5000 |
1665 | 0.0 |
1666 | 0.6875 |
1667 | 0.187500 |
1668 | 0.000000 |
1669 | 0 |
1670 |
1671 |
1672 | | 13 |
1673 | 01/01/13 |
1674 | 0 |
1675 | 0 |
1676 | 0.0 |
1677 | 0.5000 |
1678 | 0.0 |
1679 | 0.6875 |
1680 | 0.187500 |
1681 | 0.750000 |
1682 | 0 |
1683 |
1684 |
1685 | | 14 |
1686 | Plan |
1687 | 0 |
1688 | 0 |
1689 | 0.0 |
1690 | 0.4375 |
1691 | 0.0 |
1692 | 0.6875 |
1693 | 0.250000 |
1694 | 0.000000 |
1695 | 0 |
1696 |
1697 |
1698 | | 15 |
1699 | Code: |
1700 | 0 |
1701 | 0 |
1702 | 0.0 |
1703 | 0.4375 |
1704 | 0.0 |
1705 | 0.6875 |
1706 | 0.250000 |
1707 | 0.000000 |
1708 | 0 |
1709 |
1710 |
1711 | | 16 |
1712 | BC |
1713 | 0 |
1714 | 0 |
1715 | 0.0 |
1716 | 0.4375 |
1717 | 0.0 |
1718 | 0.6875 |
1719 | 0.250000 |
1720 | 0.000000 |
1721 | 0 |
1722 |
1723 |
1724 | | 17 |
1725 | 400 |
1726 | 0 |
1727 | 0 |
1728 | 0.0 |
1729 | 0.4375 |
1730 | 0.0 |
1731 | 0.6875 |
1732 | 0.250000 |
1733 | 1.000000 |
1734 | 0 |
1735 |
1736 |
1737 | | 18 |
1738 | BS |
1739 | 0 |
1740 | 0 |
1741 | 0.0 |
1742 | 0.4375 |
1743 | 0.0 |
1744 | 0.6875 |
1745 | 0.250000 |
1746 | 0.000000 |
1747 | 0 |
1748 |
1749 |
1750 | | 19 |
1751 | 900 |
1752 | 0 |
1753 | 0 |
1754 | 0.0 |
1755 | 0.4375 |
1756 | 0.0 |
1757 | 0.6875 |
1758 | 0.250000 |
1759 | 1.000000 |
1760 | 0 |
1761 |
1762 |
1763 | | 20 |
1764 | Rx |
1765 | 0 |
1766 | 0 |
1767 | 1.0 |
1768 | 0.3125 |
1769 | 0.0 |
1770 | 0.6875 |
1771 | 1.068000 |
1772 | 0.000000 |
1773 | 0 |
1774 |
1775 |
1776 | | 21 |
1777 | PCN |
1778 | 0 |
1779 | 0 |
1780 | 1.0 |
1781 | 0.3125 |
1782 | 0.0 |
1783 | 0.6875 |
1784 | 1.068000 |
1785 | 0.000000 |
1786 | 0 |
1787 |
1788 |
1789 | | 22 |
1790 | OV/SPC |
1791 | 0 |
1792 | 0 |
1793 | 1.0 |
1794 | 0.2500 |
1795 | 0.0 |
1796 | 0.6875 |
1797 | 1.091516 |
1798 | 0.000000 |
1799 | 0 |
1800 |
1801 |
1802 | | 23 |
1803 | Emergency |
1804 | 0 |
1805 | 0 |
1806 | 1.0 |
1807 | 0.1875 |
1808 | 0.0 |
1809 | 0.6875 |
1810 | 1.118034 |
1811 | 0.000000 |
1812 | 0 |
1813 |
1814 |
1815 | | 24 |
1816 | Rx |
1817 | 0 |
1818 | 0 |
1819 | 1.0 |
1820 | 0.1250 |
1821 | 0.0 |
1822 | 0.6875 |
1823 | 1.147347 |
1824 | 0.000000 |
1825 | 0 |
1826 |
1827 |
1828 | | 25 |
1829 | Deductible |
1830 | 0 |
1831 | 0 |
1832 | 1.0 |
1833 | 0.1250 |
1834 | 0.0 |
1835 | 0.6875 |
1836 | 1.147347 |
1837 | 0.000000 |
1838 | 0 |
1839 |
1840 |
1841 | | 26 |
1842 | Rx |
1843 | 0 |
1844 | 0 |
1845 | 1.0 |
1846 | 0.0625 |
1847 | 0.0 |
1848 | 0.6875 |
1849 | 1.179248 |
1850 | 0.000000 |
1851 | 0 |
1852 |
1853 |
1854 | | 27 |
1855 | Copay |
1856 | 0 |
1857 | 0 |
1858 | 1.0 |
1859 | 0.0625 |
1860 | 0.0 |
1861 | 0.6875 |
1862 | 1.179248 |
1863 | 0.000000 |
1864 | 0 |
1865 |
1866 |
1867 | | 28 |
1868 | Gen |
1869 | 0 |
1870 | 0 |
1871 | 1.0 |
1872 | 0.0625 |
1873 | 0.0 |
1874 | 0.6875 |
1875 | 1.179248 |
1876 | 0.000000 |
1877 | 0 |
1878 |
1879 |
1880 | | 29 |
1881 | Rx |
1882 | 0 |
1883 | 0 |
1884 | 1.0 |
1885 | 0.0000 |
1886 | 0.0 |
1887 | 0.6875 |
1888 | 1.213530 |
1889 | 0.000000 |
1890 | 0 |
1891 |
1892 |
1893 | | 30 |
1894 | copay |
1895 | 0 |
1896 | 0 |
1897 | 1.0 |
1898 | 0.0000 |
1899 | 0.0 |
1900 | 0.6875 |
1901 | 1.213530 |
1902 | 0.000000 |
1903 | 0 |
1904 |
1905 |
1906 | | 31 |
1907 | Br |
1908 | 0 |
1909 | 0 |
1910 | 1.0 |
1911 | 0.0000 |
1912 | 0.0 |
1913 | 0.6875 |
1914 | 1.213530 |
1915 | 0.000000 |
1916 | 0 |
1917 |
1918 |
1919 | | 32 |
1920 | 11552 |
1921 | 0 |
1922 | 0 |
1923 | 1.0 |
1924 | 0.3750 |
1925 | 0.0 |
1926 | 0.6875 |
1927 | 1.047691 |
1928 | 1.000000 |
1929 | 0 |
1930 |
1931 |
1932 | | 33 |
1933 | BCIL |
1934 | 0 |
1935 | 0 |
1936 | 1.0 |
1937 | 0.3125 |
1938 | 0.0 |
1939 | 0.6875 |
1940 | 1.068000 |
1941 | 0.000000 |
1942 | 0 |
1943 |
1944 |
1945 | | 34 |
1946 | $20/$40 |
1947 | 0 |
1948 | 0 |
1949 | 1.0 |
1950 | 0.2500 |
1951 | 0.0 |
1952 | 0.6875 |
1953 | 1.091516 |
1954 | 0.571429 |
1955 | 0 |
1956 |
1957 |
1958 | | 35 |
1959 | 200 |
1960 | 0 |
1961 | 0 |
1962 | 1.0 |
1963 | 0.1875 |
1964 | 0.0 |
1965 | 0.6875 |
1966 | 1.118034 |
1967 | 1.000000 |
1968 | 0 |
1969 |
1970 |
1971 | | 36 |
1972 | 50 |
1973 | 0 |
1974 | 0 |
1975 | 1.0 |
1976 | 0.1250 |
1977 | 0.0 |
1978 | 0.6875 |
1979 | 1.147347 |
1980 | 1.000000 |
1981 | 0 |
1982 |
1983 |
1984 | | 37 |
1985 | $100/120 |
1986 | 0 |
1987 | 0 |
1988 | 1.0 |
1989 | 0.0625 |
1990 | 0.0 |
1991 | 0.6875 |
1992 | 1.179248 |
1993 | 0.750000 |
1994 | 0 |
1995 |
1996 |
1997 | | 38 |
1998 | $100/200/300 |
1999 | 0 |
2000 | 0 |
2001 | 1.0 |
2002 | 0.0000 |
2003 | 0.0 |
2004 | 0.6875 |
2005 | 1.213530 |
2006 | 0.750000 |
2007 | 0 |
2008 |
2009 |
2010 |
2011 |
2012 |
2013 |
2014 |
2015 |
2016 | ```python
2017 |
2018 | ```
2019 |
2020 |
2021 | ```python
2022 |
2023 | ```
2024 |
2025 |
2026 | ```python
2027 |
2028 | ```
2029 |
2030 | > “For decades, machine learning approaches targeting Natural Language Processing problems have been based on shallow models (e.g., SVM and logistic regression) trained on very high dimensional and sparse features. In the last few years, neural networks based on dense vector representations have been producing superior results on various NLP tasks. This trend is sparked by the success of word embeddings and deep learning methods.” [1]
2031 |
2032 | We are using the old technique due to:
2033 | 1. little data
2034 | 2. data isn't really "natural language." It involves text but is less natural, and less fluid, more structured.
2035 |
2036 | Source: [here](https://medium.com/@martinpella/how-to-use-pre-trained-word-embeddings-in-pytorch-71ca59249f76)
2037 |
2038 | ### Upload Data
2039 |
2040 |
2041 | ```python
2042 | # deprecated
2043 | # first four sample
2044 | #tree = ET.parse('Card Samples/Scan02-15-2019 11 44 21.xml')
2045 | #root = tree.getroot()
2046 |
2047 | # second set of samples
2048 | #tree2 = ET.parse('Card Samples/Scan02-15-2019 11 44 21.xml')
2049 | #root2 = tree2.getroot()
2050 | ```
2051 |
2052 | ### Extract Text
2053 |
2054 |
2055 | ```python
2056 | # findall looks only one level down
2057 | # root[1][0][0][0][0][3].findall('w:t', ns)[0].text
2058 | ```
2059 |
2060 | This function returns a dictionary of the namespaces in the xml file. The namespace map is needed to extract the text later.
2061 |
2062 |
2063 | ```python
2064 | # Example 2: https://www.programcreek.com/python/example/77333/xml.etree.ElementTree.iterparse
2065 |
2066 | # get namespaces
2067 | def xml_parse(xml_file):
2068 | """
2069 | Parse an XML file, returns a tree of nodes and a dict of namespaces
2070 | :param xml_file: the input XML file
2071 | :returns: (doc, ns_map)
2072 | """
2073 | root = None
2074 | ns_map = {} # prefix -> ns_uri
2075 | for event, elem in ET.iterparse(xml_file, ['start-ns', 'start', 'end']):
2076 | if event == 'start-ns':
2077 | # elem = (prefix, ns_uri)
2078 | ns_map[elem[0]] = elem[1]
2079 | elif event == 'start':
2080 | if root is None:
2081 | root = elem
2082 | for prefix, uri in ns_map.items():
2083 | ET.register_namespace(prefix, uri)
2084 |
2085 | return (ET.ElementTree(root), ns_map)
2086 | ```
2087 |
2088 | This function extracts the text.
2089 |
2090 |
2091 | ```python
2092 | def words_from_root(file_path, xml_file):
2093 | """
2094 | Extract text from xml file
2095 | Params:
2096 | file_path: path to files
2097 | xml_file: xml file to be parsed
2098 | Returns:
2099 | List of text in the xml
2100 | """
2101 | # create ElementTree object
2102 | tree = ET.parse(file_path + '/' + xml_file)
2103 | root = tree.getroot()
2104 |
2105 | # create namespace map, for parsing
2106 | doc, ns_map = xml_parse(file_path + '/' + xml_file)
2107 |
2108 | # initialize output (list of text)
2109 | words = []
2110 |
2111 | # iterate recursivley over current element and all elements below it
2112 | for elem in root.iter():
2113 | #find elements of with tag name "w:t"
2114 | hts = elem.findall('w:t', ns_map)
2115 |
2116 | # if any found, append
2117 | if hts:
2118 | words.append(hts[0].text)
2119 | return words
2120 | ```
2121 |
2122 | Extract text from the four sample cards.
2123 |
2124 |
2125 | ```python
2126 | #from insurance_card_prediction import xml_parse, words_from_root
2127 | ```
2128 |
2129 |
2130 | ```python
2131 | # List of files
2132 | file_path = "Card Samples/renamed copies"
2133 | dirs = os.listdir(file_path)
2134 |
2135 | words_on_card = {}
2136 |
2137 | # Extract text
2138 | for file in dirs:
2139 | words_on_card[file] = words_from_root(file_path, file)
2140 |
2141 | # remove card6.xml. OCR is not good enough
2142 | del words_on_card['card6.xml']
2143 | ```
2144 |
2145 | ### Tokenization
2146 | * These strings are all from the same line. Let's separate words into a bag-of-words for each card.
2147 | * Link: https://www.nltk.org/book/ch03.html
2148 |
2149 | Lines of words to bag-of-words
2150 |
2151 |
2152 | ```python
2153 | bag_of_words = {}
2154 |
2155 | # turn lines-of-words into words
2156 |
2157 | # for each xml file
2158 | for key in sorted(words_on_card.keys()):
2159 | bag = []
2160 | # for each line in the XML
2161 | for i, line in enumerate(words_on_card[key]):
2162 | if i == 0:
2163 | l = words_on_card[key][i:i+3] # choose two nearest lines (five total)
2164 | elif i == 1:
2165 | l = words_on_card[key][i-1:i+3]
2166 | else:
2167 | l = words_on_card[key][i-2:i+3]
2168 | l = ' '.join(l)
2169 | #print(i, l)
2170 |
2171 | list_of_words_on_line = nltk.word_tokenize(line)
2172 | for word in list_of_words_on_line:
2173 | # save word with all words on its line
2174 | bag.append((word, l))
2175 | #if 'Group' in word:
2176 | #print(l)
2177 | bag_of_words[key] = bag
2178 | ```
2179 |
2180 | * Separating the words throws away information about the surrounding words. For example, 'Eric Davidson' turns into 'Eric' and 'Davidson', and they can never be rejoined. Similary, the words "Delta Dental of Illinois" should all be together, but they will be separated.
2181 | * To start, I will ignore this complication and simply try to build a classifier to tell if a word is a _name_ or not.
2182 |
2183 | ## Name Classifier
2184 | 1. Combine words from all four samples into one big bag-of-words.
2185 | 2. Label words as "name" or not.
2186 | 3. Augment the dataset with extra words and names.
2187 |
2188 |
2189 | ```python
2190 | # 1. Combine words from all four samples
2191 | word_bag = []
2192 | for key in bag_of_words:
2193 | word_bag += bag_of_words[key]
2194 | ```
2195 |
2196 | ### Target Variables
2197 | * Member name
2198 | * Group ID
2199 | * Member ID
2200 |
2201 | Label the data.
2202 |
2203 |
2204 | ```python
2205 | # Label words (name/not name)
2206 | for i, word in enumerate(word_bag):
2207 | pass #if i in ind_list_group_ids:
2208 | #print(i, word[0], word[1])
2209 | ```
2210 |
2211 |
2212 | ```python
2213 | # list of names
2214 | ind_list_names = [153,154,156,158,269,270,455,456,605,607,609,610,611,612,613,614,647,649,698,701,719,720,721, \
2215 | 722,723,724, 738,740,815,816,864,865,886,887,888,890,892,894,952,1032,1033,1034,1094,1095,1096, \
2216 | 1128,1129,1130,1190,1191,1192,1240,1241,1276,1277,1278,1326,1327,1328,1402,14031438,1439,1440, \
2217 | 1441,1462,1463,1468,1498,1500,1577,1579,1580,1641,1642,1694,1696,1749,1750,1790,1791,1845,1846, \
2218 | 1961,1962,2005,2006,2034,2041,2043,2092,2094] # Name, Member, ID
2219 | #ind_list_names_suspect = [647,649,701,719,720,721,722,723,724,740]
2220 | target_name = [1 if i in ind_list_names else 0 for i in range(len(word_bag))]
2221 |
2222 | # list of group IDs
2223 | ind_list_group_ids = [142,483,619,662,683,770,804,860,972,1029,1197,1229,1425,1762,1805,1870,1982,2034,2050,2132] # Group, Number
2224 | #ind_list_group_ids_suspect = [662,683,804,860,972]
2225 | target_group_id = [1 if i in ind_list_group_ids else 0 for i in range(len(word_bag))]
2226 |
2227 | # list of member IDs
2228 | ind_list_member_ids = [149,150,274,460,602,653,704,708,710,715,743,744,812,868,915,947,1025,1154,1155,1236,1237,1318,1405,1444, \
2229 | 1524,1525,1603,1754,1755,1788,1839,1912,1958,2003,2047,2086,2115] # Member , ID
2230 | #ind_list_member_ids_suspect = [653,704,708,701,715,743,744,812,868,915,947]
2231 | target_member_id = [1 if i in ind_list_member_ids else 0 for i in range(len(word_bag))]
2232 | ```
2233 |
2234 | The names from the dataset come as all UPPERCASE. This makes the model predict _name_ for any word with an uppercase letter. To avoid given this easy tell, change the original names to the same case as the simulated names: uppercase first letter and rest lower.
2235 |
2236 | Fix case of the true names.
2237 |
2238 |
2239 | ```python
2240 | # Change names from ALL UPPERCASE to Capitalize (only first letter)
2241 | word_bag_cap = []
2242 | for i, tup in enumerate(word_bag):
2243 | if i in ind_list_names:
2244 | name_cap = tup[0].capitalize()
2245 | tup = (name_cap, tup[1])
2246 | word_bag_cap.append(tup)
2247 | else:
2248 | word_bag_cap.append(tup)
2249 | ```
2250 |
2251 | Turn the data into a Pandas dataframe.
2252 |
2253 |
2254 | ```python
2255 | # create dataframe
2256 | df = pd.DataFrame(index=[tup[0] for tup in word_bag_cap])
2257 |
2258 | df = df.assign(target_name=target_name)
2259 | df = df.assign(target_group_id=target_group_id)
2260 | df = df.assign(target_member_id=target_member_id)
2261 | df = df.assign(words_in_line=[tup[1] for tup in word_bag_cap])
2262 | ```
2263 |
2264 |
2265 | ```python
2266 | df.head()
2267 | ```
2268 |
2269 |
2270 |
2271 |
2272 |
2273 |
2286 |
2287 |
2288 |
2289 | |
2290 | target_name |
2291 | target_group_id |
2292 | target_member_id |
2293 | words_in_line |
2294 |
2295 |
2296 |
2297 |
2298 | | www.aetna.com |
2299 | 0 |
2300 | 0 |
2301 | 0 |
2302 | www.aetna.com PAYER NUMBER 60054 0735 Informed... |
2303 |
2304 |
2305 | | PAYER |
2306 | 0 |
2307 | 0 |
2308 | 0 |
2309 | www.aetna.com PAYER NUMBER 60054 0735 Informed... |
2310 |
2311 |
2312 | | NUMBER |
2313 | 0 |
2314 | 0 |
2315 | 0 |
2316 | www.aetna.com PAYER NUMBER 60054 0735 Informed... |
2317 |
2318 |
2319 | | 60054 |
2320 | 0 |
2321 | 0 |
2322 | 0 |
2323 | www.aetna.com PAYER NUMBER 60054 0735 Informed... |
2324 |
2325 |
2326 | | 0735 |
2327 | 0 |
2328 | 0 |
2329 | 0 |
2330 | www.aetna.com PAYER NUMBER 60054 0735 Informed... |
2331 |
2332 |
2333 |
2334 |
2335 |
2336 |
2337 |
2338 |
2339 | ```python
2340 | # number of "names"
2341 | print('There are', df.target_name.sum(), 'names out of', df.shape[0], 'words.', round(df.target_name.sum()/df.shape[0],3), 'percent.')
2342 | ```
2343 |
2344 | There are 90 names out of 2204 words. 0.041 percent.
2345 |
2346 |
2347 | # Features
2348 |
2349 |
2350 | ```python
2351 | def create_features(df):
2352 | """
2353 | Creates features from words
2354 | Args: dataframe with words as the indices
2355 | Returns: dataframe with the new features
2356 | """
2357 |
2358 | length = []
2359 | frac_alpha = []
2360 | frac_alpha_upper = []
2361 | frac_alpha_lower = []
2362 | frac_digit = []
2363 | frac_punc = []
2364 | frac_punc_dashes = []
2365 | frac_punc_periods = []
2366 | name_keywords_ind = []
2367 | groupid_keywords_ind = []
2368 | memberid_keywords_ind = []
2369 | five_or_more_digits = []
2370 |
2371 | # iterate down rows
2372 | for index, row in df.iterrows():
2373 |
2374 | leng = len(index)
2375 | length.append(leng)
2376 | frac_alpha.append(sum([1 for char in index if char.isalpha()]) / leng)
2377 | frac_alpha_upper.append(sum([1 for char in index if (char.isalpha() and char.isupper())]) / leng)
2378 | frac_alpha_lower.append(sum([1 for char in index if (char.isalpha() and char.islower())]) / leng)
2379 | frac_digit.append(sum([1 for char in index if char.isdigit()]) / leng)
2380 |
2381 | count = lambda l1,l2: sum([1 for x in l1 if x in l2])
2382 | frac_punc.append( count(index,set(string.punctuation)) / leng)
2383 | frac_punc_dashes.append( count(index,set(["-"])) / leng)
2384 | frac_punc_periods.append( count(index,set(["."])) / leng)
2385 |
2386 | words_in_line = row.words_in_line.split()
2387 | words_in_line_wo_punc = [word.translate(str.maketrans('', '', string.punctuation)) for word in words_in_line]
2388 |
2389 | name_keywords_ind.append( sum([1 for word in words_in_line_wo_punc if word.lower() in ['name','member','id']]) >= 1 )
2390 | groupid_keywords_ind.append( sum([1 for word in words_in_line_wo_punc if word.lower() in ['group', 'grp']]) >=1 )
2391 | memberid_keywords_ind.append( sum([1 for word in words_in_line_wo_punc if word.lower() in ['member', 'id']]) >=1 )
2392 |
2393 | five_or_more_digits.append(sum([1 for char in index if char.isdigit()]) >=5)
2394 |
2395 | # add simulated=0 if not there already
2396 | if 'simulated' not in df.columns:
2397 | df = df.assign(simulated = df.shape[0]*[0])
2398 |
2399 | # find length of each string
2400 | df = df.assign(length=length);
2401 |
2402 | # add new columns
2403 | df = df.assign(frac_alpha=frac_alpha)
2404 | df = df.assign(frac_alpha_upper=frac_alpha_upper)
2405 | df = df.assign(frac_alpha_lower=frac_alpha_lower)
2406 | df = df.assign(frac_digit=frac_digit)
2407 | df = df.assign(frac_punc=frac_punc)
2408 | df = df.assign(frac_punc_dashes=frac_punc_dashes)
2409 | df = df.assign(frac_punc_periods=frac_punc_periods)
2410 | df = df.assign(name_keywords_ind=name_keywords_ind)
2411 | df = df.assign(groupid_keywords_ind=groupid_keywords_ind)
2412 | df = df.assign(memberid_keywords_ind=memberid_keywords_ind)
2413 | df = df.assign(five_or_more_digits=five_or_more_digits)
2414 |
2415 | # check NLTK's corpus of names: https://www.cs.cmu.edu/Groups/AI/areas/nlp/corpora/names/0.html
2416 | # THIS IS CHEATING
2417 | #df = df.assign(in_nltk_corpus=[1 if word.capitalize() in names.words() else 0 for word in df.index.values])
2418 |
2419 | return df
2420 | ```
2421 |
2422 |
2423 | ```python
2424 | #from insurance_card_prediction import create_features
2425 | ```
2426 |
2427 |
2428 | ```python
2429 | # create features
2430 | df = create_features(df)
2431 | ```
2432 |
2433 | # Simulate Data
2434 |
2435 | 1) Names
2436 | 2) Group IDs
2437 | 3) Member IDs
2438 |
2439 | * The labels are highly imbalanced; only 0.025% of examples are "names." I will add more names to the dataset. Names are very easy to sample from since we know what realistic names are, unlike some other variables.
2440 | * Sample uniformly from top-10 names from 1960s
2441 |
2442 |
2443 | ```python
2444 | def simulate_data(df, targets=['group IDs']):
2445 | """
2446 | Simulates names by sampling uniformly from top-10 baby names from 1960s
2447 | Args:
2448 | df: dataframe
2449 | targets: list of strings of the target variables to simulate
2450 |
2451 | Returns: dataframe augmented with more names
2452 | """
2453 |
2454 | # SIMULATE NAMES
2455 | if 'names' in targets:
2456 | print('Simulating names')
2457 | # names https://www.ssa.gov/oact/babynames/decades/names1960s.html
2458 | male_names = ['Michael','David','John','James','Robert' ,'Mark','William','Richard','Thomas','Jeffrey']
2459 | female_names = ['Lisa','Mary','Susan','Karen','Kimberly','Patricia','Linda','Donna','Michelle','Cynthia']
2460 | all_names = male_names+female_names
2461 |
2462 | # generate samples
2463 | num_samples = 611
2464 | np.random.seed(102)
2465 | new_names = np.random.choice(a=all_names, size=num_samples)
2466 | new_names1 = []
2467 |
2468 | # randomly change the capitalization (UPPER, lower, Capital)
2469 | for i, name in enumerate(new_names):
2470 | j = np.random.choice(2)
2471 | if j == 0:
2472 | new_names1.append(name.lower())
2473 | elif j == 1:
2474 | new_names1.append(name.upper())
2475 | else:
2476 | new_names1.append(name)
2477 |
2478 | # dataframe with new samples
2479 | df2 = pd.DataFrame(index=new_names1)
2480 | df2 = df2.assign(target_name=num_samples*[1.])
2481 | df2 = df2.assign(target_group_id=num_samples*[0.])
2482 | df2 = df2.assign(target_member_id=num_samples*[0.])
2483 |
2484 | df = df.append(df2)
2485 |
2486 |
2487 | # SIMULATE GROUP IDS
2488 | if 'group IDs' in targets:
2489 | print('Simulating Group IDs')
2490 |
2491 | # list group IDs
2492 | grp_ids = list(df.loc[df.target_group_id==1].index)
2493 |
2494 | # bring ratio to 40% balance of group IDs
2495 | num_new_grp_ids = int((2*df.shape[0] - 5*len(grp_ids))/3)
2496 |
2497 | # for new words
2498 | new_grp_ids = []
2499 |
2500 | np.random.seed(102)
2501 | # to replace alpha character randomly
2502 | replace_word = lambda w: random.choice(string.ascii_uppercase) if w.isupper() else random.choice(string.ascii_lowercase)
2503 |
2504 | # enough to reach 40%
2505 | for i in range(int(num_new_grp_ids)):
2506 |
2507 | # randomly select Group ID to copy
2508 | grp_id_to_copy = random.choice(grp_ids)
2509 |
2510 | # copy Group ID
2511 | new_grp_ids.append(''.join([random.choice(string.digits) if char.isdigit() else replace_word(char) if char.isalpha() else char for char in grp_id_to_copy]))
2512 |
2513 | # create new dataframe
2514 | df3 = pd.DataFrame(index=new_grp_ids)
2515 | df3 = df3.assign(target_name=num_new_grp_ids*[0.])
2516 | df3 = df3.assign(target_group_id=num_new_grp_ids*[1.]) # all ones
2517 | df3 = df3.assign(target_member_id=num_new_grp_ids*[0.])
2518 | df3 = df3.assign(words_in_line=new_grp_ids) # lines by themselves (no neighbors)
2519 | df3 = df3.assign(simulated=df3.shape[0]*[1.]) # simulated=1
2520 |
2521 | # append new df to old df
2522 | df = df.append(df3)[df.columns.tolist()]
2523 |
2524 |
2525 | # SIMULATE MEMBER IDS
2526 | if 'member IDs' in targets:
2527 | print('Simulating Member IDs')
2528 | # list member IDs
2529 | member_ids = list(df.loc[df.target_member_id==1].index)
2530 |
2531 | # bring ratio to 40% balance of group IDs
2532 | num_new_member_ids = int((2*df.shape[0] - 5*len(member_ids))/3)
2533 |
2534 | # for new words
2535 | new_member_ids = []
2536 |
2537 | np.random.seed(102)
2538 | # to replace alpha character randomly
2539 | replace_word = lambda w: random.choice(string.ascii_uppercase) if w.isupper() else random.choice(string.ascii_lowercase)
2540 |
2541 | # enough to reach 40%
2542 | for i in range(int(num_new_member_ids)):
2543 |
2544 | # randomly select member ID to copy
2545 | member_id_to_copy = random.choice(member_ids)
2546 |
2547 | # copy Group ID
2548 | new_member_ids.append(''.join([random.choice(string.digits) if char.isdigit() else replace_word(char) if char.isalpha() else char for char in member_id_to_copy]))
2549 |
2550 | # create new dataframe
2551 | df4 = pd.DataFrame(index=new_member_ids)
2552 | df4 = df4.assign(target_name=num_new_member_ids*[0.])
2553 | df4 = df4.assign(target_group_id=num_new_member_ids*[0.])
2554 | df4 = df4.assign(target_member_id=num_new_member_ids*[1.]) # all ones
2555 | df4 = df4.assign(words_in_line=new_member_ids) # lines by themselves (no neighbors)
2556 | df4 = df4.assign(simulated=df4.shape[0]*[1.]) # simulated=1
2557 |
2558 | # append new df to old df
2559 | df = df.append(df4)[df.columns.tolist()]
2560 |
2561 | return df
2562 | ```
2563 |
2564 | Simulate desired data.
2565 |
2566 |
2567 | ```python
2568 | #from insurance_card_prediction import simulate_data_1
2569 |
2570 | #simulate data (BOTH group IDs and member IDs)
2571 | df1 = simulate_data(df, ['group IDs','member IDs'])
2572 | #df1 = df.copy()
2573 | ```
2574 |
2575 | Simulating Group IDs
2576 | Simulating Member IDs
2577 |
2578 |
2579 | Create features for the new rows.
2580 |
2581 |
2582 | ```python
2583 | # create features for new rows
2584 | df2 = create_features(df1)
2585 | ```
2586 |
2587 | # Modeling
2588 |
2589 | ## Prepare Data
2590 |
2591 | 1. Standardize the numeric cariables
2592 | 2. One-hot encode the categorical variables
2593 |
2594 |
2595 | ```python
2596 | # https://jorisvandenbossche.github.io/blog/2018/05/28/scikit-learn-columntransformer/
2597 | from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder, LabelBinarizer
2598 | from sklearn.compose import ColumnTransformer, make_column_transformer
2599 |
2600 | numerical_columns = df2.columns[5:13].tolist() # add LENGTH (5)
2601 | categorical_columns = df2.columns[[4,13,14,15,16]].tolist() # remove LENGTH (5)
2602 |
2603 | from sklearn.base import TransformerMixin #gives fit_transform method for free
2604 |
2605 | class MyLabelBinarizer(TransformerMixin):
2606 | def __init__(self, *args, **kwargs):
2607 | self.encoder = LabelBinarizer(*args, **kwargs)
2608 | def fit(self, x, y=0):
2609 | self.encoder.fit(x)
2610 | return self
2611 | def transform(self, x, y=0):
2612 | return self.encoder.transform(x)
2613 |
2614 | preprocess = make_column_transformer(
2615 | (StandardScaler(), numerical_columns)
2616 | #(MyLabelBinarizer(), categorical_columns)
2617 | #OneHotEncoder(categories='auto'), categorical_columns)
2618 | )
2619 |
2620 | df_cat = pd.DataFrame(index=df2.index)
2621 |
2622 | # one-hot encode categorical variables
2623 | for col in categorical_columns:
2624 | #df_temp = df2[col].astype('category')
2625 | #df_temp_2 = pd.get_dummies(df_temp, prefix=col)
2626 | #df_cat = pd.concat([df_cat, df_temp_2], axis=1)
2627 | le = LabelEncoder()
2628 | X = le.fit_transform(df2[col])
2629 | df_temp = pd.DataFrame(data=X, index=df2.index.values , columns=[col])
2630 | df_cat = pd.concat([df_cat, df_temp], axis=1)
2631 |
2632 | #for col in categorical_columns:
2633 | # df_temp
2634 |
2635 | # transform. returns numpy array
2636 | X = preprocess.fit_transform(df2)
2637 | df_num = pd.DataFrame(index=df2.index, data=X, columns=numerical_columns)
2638 |
2639 | # transform. returns numpy array
2640 | #X = preprocess.fit_transform(df2)
2641 |
2642 | # combine numerical and concatenated
2643 | df3 = pd.concat([df_num, df_cat], axis=1)
2644 |
2645 | # true label - now member IDs
2646 | y = []
2647 | for index, row in df2.iterrows():
2648 | if row.target_name == 1:
2649 | y.append(0)
2650 | elif row.target_group_id == 1:
2651 | y.append(1)
2652 | elif row.target_member_id == 1:
2653 | y.append(0)
2654 | else:
2655 | y.append(0)
2656 |
2657 | # add target variable
2658 | df3 = df3.assign(y=y)
2659 | ```
2660 |
2661 | C:\Users\Emile\Anaconda3\lib\site-packages\sklearn\preprocessing\data.py:625: DataConversionWarning: Data with input dtype int64, float64 were all converted to float64 by StandardScaler.
2662 | return self.partial_fit(X, y)
2663 | C:\Users\Emile\Anaconda3\lib\site-packages\sklearn\base.py:462: DataConversionWarning: Data with input dtype int64, float64 were all converted to float64 by StandardScaler.
2664 | return self.fit(X, **fit_params).transform(X)
2665 |
2666 |
2667 | Split into training and test sets
2668 |
2669 |
2670 | ```python
2671 | from sklearn.model_selection import StratifiedShuffleSplit
2672 | from sklearn.model_selection import train_test_split
2673 | pd.options.mode.chained_assignment = None # default='warn'
2674 |
2675 | X = df3.iloc[:,:-1]
2676 | y = df3.iloc[:,-1]
2677 |
2678 | X_train, X_test, y_train, y_test = train_test_split(X , y,
2679 | stratify=y,
2680 | test_size=0.4,
2681 | random_state=102)
2682 |
2683 | X_train_simulated = pd.DataFrame(X_train.loc[:, ('simulated')].copy())
2684 | X_test_simulated = pd.DataFrame(X_test.loc[:, ('simulated')].copy())
2685 |
2686 | X_train.drop(columns=['simulated'], inplace=True)
2687 | X_test.drop(columns=['simulated'], inplace=True)
2688 |
2689 | y_train = pd.DataFrame(y_train)
2690 | y_test = pd.DataFrame(y_test)
2691 | ```
2692 |
2693 | ## Gradient Boosting
2694 |
2695 |
2696 | ```python
2697 | import xgboost as xgb
2698 | from xgboost import XGBClassifier
2699 | from xgboost import plot_importance
2700 | from sklearn.metrics import accuracy_score
2701 | from sklearn.metrics import confusion_matrix
2702 | ```
2703 |
2704 |
2705 | ```python
2706 | # specify parameters via map
2707 | param = {'max_depth':2, 'eta':1, 'silent':1, 'objective':'multi:softmax', 'num_class':3, 'random_state':102}
2708 | num_round = 2
2709 |
2710 | # initialize model
2711 | gb_clf = XGBClassifier(max_depth=3, objective='binary:logistic')
2712 |
2713 | # fit model
2714 | eval_set = [(X_test, y_test.y.values)]
2715 | gb_clf.fit(X_train, y_train.y.values, eval_metric="error", eval_set=eval_set, verbose=True, early_stopping_rounds=None)
2716 |
2717 | # make prediction
2718 | y_pred = gb_clf.predict(X_test)
2719 |
2720 | # predict probabilties
2721 | y_pred_prob = gb_clf.predict_proba(X_test)
2722 |
2723 | print("Accuracy on training set: {:.3f}".format(gb_clf.score(X_train, y_train)))
2724 | print("Accuracy on test set: {:.3f}".format(gb_clf.score(X_test, y_test)))
2725 | ```
2726 |
2727 | [0] validation_0-error:0.193669
2728 | [1] validation_0-error:0.193669
2729 | [2] validation_0-error:0.157018
2730 | [3] validation_0-error:0.143274
2731 | [4] validation_0-error:0.143274
2732 | [5] validation_0-error:0.143274
2733 | [6] validation_0-error:0.143274
2734 | [7] validation_0-error:0.154519
2735 | [8] validation_0-error:0.154519
2736 | [9] validation_0-error:0.127447
2737 | [10] validation_0-error:0.127447
2738 | [11] validation_0-error:0.127447
2739 | [12] validation_0-error:0.127447
2740 | [13] validation_0-error:0.127447
2741 | [14] validation_0-error:0.103707
2742 | [15] validation_0-error:0.103707
2743 | [16] validation_0-error:0.103707
2744 | [17] validation_0-error:0.103707
2745 | [18] validation_0-error:0.103707
2746 | [19] validation_0-error:0.103707
2747 | [20] validation_0-error:0.103707
2748 | [21] validation_0-error:0.103707
2749 | [22] validation_0-error:0.098709
2750 | [23] validation_0-error:0.098292
2751 | [24] validation_0-error:0.098292
2752 | [25] validation_0-error:0.098292
2753 | [26] validation_0-error:0.098292
2754 | [27] validation_0-error:0.098292
2755 | [28] validation_0-error:0.098292
2756 | [29] validation_0-error:0.098292
2757 | [30] validation_0-error:0.098292
2758 | [31] validation_0-error:0.098292
2759 | [32] validation_0-error:0.098709
2760 | [33] validation_0-error:0.098709
2761 | [34] validation_0-error:0.099125
2762 | [35] validation_0-error:0.099125
2763 | [36] validation_0-error:0.099542
2764 | [37] validation_0-error:0.099542
2765 | [38] validation_0-error:0.099542
2766 | [39] validation_0-error:0.099542
2767 | [40] validation_0-error:0.099542
2768 | [41] validation_0-error:0.099542
2769 | [42] validation_0-error:0.099542
2770 | [43] validation_0-error:0.099542
2771 | [44] validation_0-error:0.099542
2772 | [45] validation_0-error:0.099125
2773 | [46] validation_0-error:0.098292
2774 | [47] validation_0-error:0.098292
2775 | [48] validation_0-error:0.098292
2776 | [49] validation_0-error:0.098292
2777 | [50] validation_0-error:0.098292
2778 | [51] validation_0-error:0.098292
2779 | [52] validation_0-error:0.098292
2780 | [53] validation_0-error:0.098292
2781 | [54] validation_0-error:0.087047
2782 | [55] validation_0-error:0.087047
2783 | [56] validation_0-error:0.087047
2784 | [57] validation_0-error:0.087047
2785 | [58] validation_0-error:0.08788
2786 | [59] validation_0-error:0.08788
2787 | [60] validation_0-error:0.087047
2788 | [61] validation_0-error:0.086214
2789 | [62] validation_0-error:0.086214
2790 | [63] validation_0-error:0.086214
2791 | [64] validation_0-error:0.078301
2792 | [65] validation_0-error:0.078301
2793 | [66] validation_0-error:0.078301
2794 | [67] validation_0-error:0.078301
2795 | [68] validation_0-error:0.078301
2796 | [69] validation_0-error:0.077884
2797 | [70] validation_0-error:0.077884
2798 | [71] validation_0-error:0.077884
2799 | [72] validation_0-error:0.077884
2800 | [73] validation_0-error:0.064556
2801 | [74] validation_0-error:0.064556
2802 | [75] validation_0-error:0.064556
2803 | [76] validation_0-error:0.064556
2804 | [77] validation_0-error:0.064556
2805 | [78] validation_0-error:0.064556
2806 | [79] validation_0-error:0.064556
2807 | [80] validation_0-error:0.064556
2808 | [81] validation_0-error:0.064556
2809 | [82] validation_0-error:0.064556
2810 | [83] validation_0-error:0.064556
2811 | [84] validation_0-error:0.064556
2812 | [85] validation_0-error:0.064556
2813 | [86] validation_0-error:0.064556
2814 | [87] validation_0-error:0.062474
2815 | [88] validation_0-error:0.062474
2816 | [89] validation_0-error:0.062474
2817 | [90] validation_0-error:0.062474
2818 | [91] validation_0-error:0.062474
2819 | [92] validation_0-error:0.062474
2820 | [93] validation_0-error:0.062474
2821 | [94] validation_0-error:0.062474
2822 | [95] validation_0-error:0.062474
2823 | [96] validation_0-error:0.062474
2824 | [97] validation_0-error:0.062474
2825 | [98] validation_0-error:0.062474
2826 | [99] validation_0-error:0.062474
2827 | Accuracy on training set: 0.930
2828 | Accuracy on test set: 0.938
2829 |
2830 |
2831 | Plot feature importances
2832 |
2833 |
2834 | ```python
2835 | plot_importance(gb_clf);
2836 | ```
2837 |
2838 |
2839 | 
2840 |
2841 |
2842 | Combine actuals with predictions.
2843 |
2844 |
2845 | ```python
2846 | # combine actual with predicted
2847 | y_test_combined = y_test.rename(index=str, columns={"y": "y_true"}).assign(y_pred=y_pred).assign(simulated=X_test_simulated['simulated'].values.astype(int))
2848 |
2849 | y_test_combined = y_test_combined.assign(got_right=(y_test_combined.y_true == y_test_combined.y_pred).astype(int))
2850 |
2851 | y_test_combined = y_test_combined.assign(y_pred_prob_0=y_pred_prob[:,0])
2852 | y_test_combined = y_test_combined.assign(y_pred_prob_1=y_pred_prob[:,1])
2853 | #y_test_combined = y_test_combined.assign(y_pred_prob_2=y_pred_prob[:,2])
2854 |
2855 | ```
2856 |
2857 |
2858 | ```python
2859 | from insurance_card_prediction import plot_confusion_matrix
2860 | ```
2861 |
2862 | Plot the confusion matrix.
2863 |
2864 |
2865 | ```python
2866 | cm = confusion_matrix(y_test_combined.y_true.values, y_test_combined.y_pred.values)
2867 |
2868 | # plot it
2869 | plot_confusion_matrix(cm,
2870 | target_names=['Not Group ID','Group ID'],
2871 | title='Total (real and simulated)',
2872 | cmap=None,
2873 | normalize=True)
2874 |
2875 | print(cm)
2876 | ```
2877 |
2878 |
2879 | 
2880 |
2881 |
2882 | [[1724 96]
2883 | [ 54 527]]
2884 |
2885 |
2886 | Split into real and simulated.
2887 |
2888 |
2889 | ```python
2890 | # cm1 is simulated
2891 | cm1 = confusion_matrix(y_test_combined.loc[y_test_combined.simulated.values==1].y_true.values, y_test_combined.loc[y_test_combined.simulated.values==1].y_pred.values)
2892 |
2893 | if cm1.shape == (2,2):
2894 | newrow = np.array([[0,0]])
2895 | cm1 = np.vstack((newrow, cm1))
2896 |
2897 | newcol = np.array([[0],[0],[0]])
2898 | cm1 = np.hstack((newcol, cm1))
2899 |
2900 | # plot it
2901 | plot_confusion_matrix(cm1,
2902 | target_names=['Neither','Group ID', 'Member ID'],
2903 | title='Simulated',
2904 | cmap=None,
2905 | normalize=True)
2906 |
2907 | print(cm1)
2908 | ```
2909 |
2910 |
2911 | 
2912 |
2913 |
2914 | [[ 0 0 0]
2915 | [ 0 527 46]
2916 | [ 1 73 875]]
2917 |
2918 |
2919 | Now look at reals.
2920 |
2921 |
2922 | ```python
2923 | # cm2 is real
2924 | cm2 = confusion_matrix(y_test_combined.loc[y_test_combined.simulated.values==0].y_true.values, y_test_combined.loc[y_test_combined.simulated.values==0].y_pred.values)
2925 |
2926 | #newrow = np.array([[0,0]])
2927 | #cm1 = np.vstack((newrow, cm1))
2928 |
2929 | #newcol = np.array([[0],[0],[0]])
2930 | #cm1 = np.hstack((newcol, cm1))
2931 |
2932 | # plot it
2933 | plot_confusion_matrix(cm2,
2934 | target_names=['Neither','Group ID', 'Member ID'],
2935 | title='Real',
2936 | cmap=None,
2937 | normalize=True)
2938 |
2939 | print(cm2)
2940 | ```
2941 |
2942 |
2943 | 
2944 |
2945 |
2946 | [[778 37 45]
2947 | [ 4 4 0]
2948 | [ 3 0 8]]
2949 |
2950 |
2951 |
2952 | ```python
2953 | a = y_test_combined.loc[y_test_combined.simulated==0]
2954 | a = y_test_combined.loc[y_test_combined.y_true==0]
2955 | a = a.loc[a.y_pred!=0]
2956 | a.loc[:,['y_true','y_pred']]
2957 | ```
2958 |
2959 |
2960 |
2961 |
2962 |
2963 |
2976 |
2977 |
2978 |
2979 | |
2980 | y_true |
2981 | y_pred |
2982 |
2983 |
2984 |
2985 |
2986 | | 30374-0800 |
2987 | 0 |
2988 | 2 |
2989 |
2990 |
2991 | | 43 |
2992 | 0 |
2993 | 2 |
2994 |
2995 |
2996 | | 35 |
2997 | 0 |
2998 | 2 |
2999 |
3000 |
3001 | | WASHINGTON |
3002 | 0 |
3003 | 1 |
3004 |
3005 |
3006 | | 20 |
3007 | 0 |
3008 | 2 |
3009 |
3010 |
3011 | | 87726 |
3012 | 0 |
3013 | 2 |
3014 |
3015 |
3016 | | 05000 |
3017 | 0 |
3018 | 2 |
3019 |
3020 |
3021 | | 100000001 |
3022 | 0 |
3023 | 2 |
3024 |
3025 |
3026 | | 610011 |
3027 | 0 |
3028 | 1 |
3029 |
3030 |
3031 | | CHILDI |
3032 | 0 |
3033 | 1 |
3034 |
3035 |
3036 | | 91 |
3037 | 0 |
3038 | 2 |
3039 |
3040 |
3041 | | HOSPITAL |
3042 | 0 |
3043 | 1 |
3044 |
3045 |
3046 | | ME |
3047 | 0 |
3048 | 2 |
3049 |
3050 |
3051 | | IL |
3052 | 0 |
3053 | 2 |
3054 |
3055 |
3056 | | REQUIRED |
3057 | 0 |
3058 | 1 |
3059 |
3060 |
3061 | | MO |
3062 | 0 |
3063 | 2 |
3064 |
3065 |
3066 | | TRINET |
3067 | 0 |
3068 | 1 |
3069 |
3070 |
3071 | | HOSPITAL |
3072 | 0 |
3073 | 1 |
3074 |
3075 |
3076 | | ID |
3077 | 0 |
3078 | 2 |
3079 |
3080 |
3081 | | NO |
3082 | 0 |
3083 | 2 |
3084 |
3085 |
3086 | | 003586 |
3087 | 0 |
3088 | 1 |
3089 |
3090 |
3091 | | 76342 |
3092 | 0 |
3093 | 2 |
3094 |
3095 |
3096 | | DENTAL |
3097 | 0 |
3098 | 1 |
3099 |
3100 |
3101 | | NA |
3102 | 0 |
3103 | 2 |
3104 |
3105 |
3106 | | 004915 |
3107 | 0 |
3108 | 1 |
3109 |
3110 |
3111 | | CMS-H3832 |
3112 | 0 |
3113 | 2 |
3114 |
3115 |
3116 | | 00000000 |
3117 | 0 |
3118 | 1 |
3119 |
3120 |
3121 | | OF |
3122 | 0 |
3123 | 2 |
3124 |
3125 |
3126 | | 00699999 |
3127 | 0 |
3128 | 1 |
3129 |
3130 |
3131 | | PO |
3132 | 0 |
3133 | 2 |
3134 |
3135 |
3136 | | ... |
3137 | ... |
3138 | ... |
3139 |
3140 |
3141 | | 23735125 |
3142 | 0 |
3143 | 1 |
3144 |
3145 |
3146 | | PROVIDERS |
3147 | 0 |
3148 | 2 |
3149 |
3150 |
3151 | | GA |
3152 | 0 |
3153 | 2 |
3154 |
3155 |
3156 | | Independent |
3157 | 0 |
3158 | 2 |
3159 |
3160 |
3161 | | HEALTH |
3162 | 0 |
3163 | 1 |
3164 |
3165 |
3166 | | DR |
3167 | 0 |
3168 | 2 |
3169 |
3170 |
3171 | | 004336 |
3172 | 0 |
3173 | 1 |
3174 |
3175 |
3176 | | A000012334456 |
3177 | 0 |
3178 | 1 |
3179 |
3180 |
3181 | | HOSPITAL |
3182 | 0 |
3183 | 1 |
3184 |
3185 |
3186 | | OH |
3187 | 0 |
3188 | 2 |
3189 |
3190 |
3191 | | 11 |
3192 | 0 |
3193 | 2 |
3194 |
3195 |
3196 | | 01 |
3197 | 0 |
3198 | 2 |
3199 |
3200 |
3201 | | 004336 |
3202 | 0 |
3203 | 1 |
3204 |
3205 |
3206 | | 7952304120 |
3207 | 0 |
3208 | 2 |
3209 |
3210 |
3211 | | CHILD2SMITH |
3212 | 0 |
3213 | 1 |
3214 |
3215 |
3216 | | 999999999 |
3217 | 0 |
3218 | 2 |
3219 |
3220 |
3221 | | 23735125 |
3222 | 0 |
3223 | 1 |
3224 |
3225 |
3226 | | H0432 |
3227 | 0 |
3228 | 1 |
3229 |
3230 |
3231 | | 80840 |
3232 | 0 |
3233 | 2 |
3234 |
3235 |
3236 | | RX |
3237 | 0 |
3238 | 2 |
3239 |
3240 |
3241 | | 122222222 |
3242 | 0 |
3243 | 2 |
3244 |
3245 |
3246 | | XX |
3247 | 0 |
3248 | 2 |
3249 |
3250 |
3251 | | 123456789 |
3252 | 0 |
3253 | 2 |
3254 |
3255 |
3256 | | 00000 |
3257 | 0 |
3258 | 2 |
3259 |
3260 |
3261 | | KANSAS |
3262 | 0 |
3263 | 1 |
3264 |
3265 |
3266 | | WG |
3267 | 0 |
3268 | 2 |
3269 |
3270 |
3271 | | 610342 |
3272 | 0 |
3273 | 1 |
3274 |
3275 |
3276 | | HAWAII |
3277 | 0 |
3278 | 1 |
3279 |
3280 |
3281 | | 017010 |
3282 | 0 |
3283 | 1 |
3284 |
3285 |
3286 | | 232-1164 |
3287 | 0 |
3288 | 1 |
3289 |
3290 |
3291 |
3292 |
82 rows × 2 columns
3293 |
3294 |
3295 |
3296 |
3297 |
3298 | ```python
3299 | y_test_combined.loc[y_test_combined.y_true!=0].head(10)
3300 | ```
3301 |
3302 |
3303 |
3304 |
3305 |
3306 |
3319 |
3320 |
3321 |
3322 | |
3323 | y_true |
3324 | y_pred |
3325 | simulated |
3326 | got_right |
3327 | y_pred_prob_0 |
3328 | y_pred_prob_1 |
3329 | y_pred_prob_2 |
3330 |
3331 |
3332 |
3333 |
3334 | | PHOAUWTIK |
3335 | 2 |
3336 | 2 |
3337 | 1 |
3338 | 1 |
3339 | 0.099076 |
3340 | 0.096246 |
3341 | 0.804678 |
3342 |
3343 |
3344 | | 987057200179 |
3345 | 1 |
3346 | 1 |
3347 | 1 |
3348 | 1 |
3349 | 0.016479 |
3350 | 0.885554 |
3351 | 0.097966 |
3352 |
3353 |
3354 | | 87019363638 |
3355 | 2 |
3356 | 2 |
3357 | 1 |
3358 | 1 |
3359 | 0.011182 |
3360 | 0.065865 |
3361 | 0.922952 |
3362 |
3363 |
3364 | | 891100251 |
3365 | 2 |
3366 | 2 |
3367 | 1 |
3368 | 1 |
3369 | 0.005606 |
3370 | 0.019466 |
3371 | 0.974928 |
3372 |
3373 |
3374 | | 763537830378 |
3375 | 1 |
3376 | 1 |
3377 | 1 |
3378 | 1 |
3379 | 0.016479 |
3380 | 0.885554 |
3381 | 0.097966 |
3382 |
3383 |
3384 | | 966337739-12 |
3385 | 2 |
3386 | 2 |
3387 | 1 |
3388 | 1 |
3389 | 0.016701 |
3390 | 0.052785 |
3391 | 0.930514 |
3392 |
3393 |
3394 | | 08889719 |
3395 | 1 |
3396 | 1 |
3397 | 1 |
3398 | 1 |
3399 | 0.037798 |
3400 | 0.690810 |
3401 | 0.271392 |
3402 |
3403 |
3404 | | K8535 |
3405 | 2 |
3406 | 1 |
3407 | 1 |
3408 | 0 |
3409 | 0.051245 |
3410 | 0.678274 |
3411 | 0.270481 |
3412 |
3413 |
3414 | | 53755132 |
3415 | 1 |
3416 | 1 |
3417 | 1 |
3418 | 1 |
3419 | 0.037798 |
3420 | 0.690810 |
3421 | 0.271392 |
3422 |
3423 |
3424 | | 377869079-31 |
3425 | 2 |
3426 | 2 |
3427 | 1 |
3428 | 1 |
3429 | 0.016701 |
3430 | 0.052785 |
3431 | 0.930514 |
3432 |
3433 |
3434 |
3435 |
3436 |
3437 |
3438 |
3439 |
3440 | ```python
3441 | low_prob_mask = y_test_combined.iloc[:,4:7].max(axis=1) < 0.57
3442 | y_test_combined.loc[low_prob_mask][115:126]
3443 | ```
3444 |
3445 |
3446 |
3447 |
3448 |
3449 |
3462 |
3463 |
3464 |
3465 | |
3466 | y_true |
3467 | y_pred |
3468 | simulated |
3469 | got_right |
3470 | y_pred_prob_0 |
3471 | y_pred_prob_1 |
3472 | y_pred_prob_2 |
3473 |
3474 |
3475 |
3476 |
3477 | | 39250 |
3478 | 2 |
3479 | 2 |
3480 | 1 |
3481 | 1 |
3482 | 0.049223 |
3483 | 0.381607 |
3484 | 0.569170 |
3485 |
3486 |
3487 | | 10527 |
3488 | 2 |
3489 | 2 |
3490 | 1 |
3491 | 1 |
3492 | 0.049223 |
3493 | 0.381607 |
3494 | 0.569170 |
3495 |
3496 |
3497 | | 00577 |
3498 | 1 |
3499 | 2 |
3500 | 1 |
3501 | 0 |
3502 | 0.049223 |
3503 | 0.381607 |
3504 | 0.569170 |
3505 |
3506 |
3507 | | 26709 |
3508 | 2 |
3509 | 2 |
3510 | 1 |
3511 | 1 |
3512 | 0.049223 |
3513 | 0.381607 |
3514 | 0.569170 |
3515 |
3516 |
3517 | | 92161 |
3518 | 2 |
3519 | 2 |
3520 | 1 |
3521 | 1 |
3522 | 0.049223 |
3523 | 0.381607 |
3524 | 0.569170 |
3525 |
3526 |
3527 | | 57854 |
3528 | 1 |
3529 | 2 |
3530 | 1 |
3531 | 0 |
3532 | 0.049223 |
3533 | 0.381607 |
3534 | 0.569170 |
3535 |
3536 |
3537 | | 93095 |
3538 | 1 |
3539 | 2 |
3540 | 1 |
3541 | 0 |
3542 | 0.049223 |
3543 | 0.381607 |
3544 | 0.569170 |
3545 |
3546 |
3547 | | 08328 |
3548 | 2 |
3549 | 2 |
3550 | 1 |
3551 | 1 |
3552 | 0.049223 |
3553 | 0.381607 |
3554 | 0.569170 |
3555 |
3556 |
3557 | | 85563 |
3558 | 1 |
3559 | 2 |
3560 | 1 |
3561 | 0 |
3562 | 0.049223 |
3563 | 0.381607 |
3564 | 0.569170 |
3565 |
3566 |
3567 | | 7952304120 |
3568 | 0 |
3569 | 2 |
3570 | 0 |
3571 | 0 |
3572 | 0.410342 |
3573 | 0.130900 |
3574 | 0.458758 |
3575 |
3576 |
3577 | | CHILD2SMITH |
3578 | 0 |
3579 | 1 |
3580 | 0 |
3581 | 0 |
3582 | 0.225169 |
3583 | 0.429728 |
3584 | 0.345103 |
3585 |
3586 |
3587 |
3588 |
3589 |
3590 |
3591 |
3592 |
3593 | ```python
3594 |
3595 | ```
3596 |
3597 |
3598 | ```python
3599 | y_test_combined.iloc[:,4:7].max(axis=1).hist(bins=20);
3600 | plt.xlabel('Predicted probability of max class')
3601 | plt.ylabel('Frequency')
3602 | ```
3603 |
3604 |
3605 |
3606 |
3607 | Text(0, 0.5, 'Frequency')
3608 |
3609 |
3610 |
3611 |
3612 | 
3613 |
3614 |
3615 |
3616 | ```python
3617 | import seaborn as sns
3618 | ```
3619 |
3620 |
3621 | ```python
3622 | sns.set_style('darkgrid')
3623 | sns.distplot(y_test_combined.iloc[:,4:7].max(axis=1), norm_hist=False);
3624 | plt.xlabel('Predicted probability of max class')
3625 | plt.ylabel('Frequency in percentages')
3626 | ```
3627 |
3628 |
3629 |
3630 |
3631 | Text(0, 0.5, 'Frequency in percentages')
3632 |
3633 |
3634 |
3635 |
3636 | 
3637 |
3638 |
3639 |
3640 | ```python
3641 | sns.set_style('darkgrid')
3642 | ```
3643 |
3644 |
3645 | ```python
3646 |
3647 | ```
3648 |
3649 |
3650 | ```python
3651 |
3652 | ```
3653 |
3654 |
3655 | ```python
3656 |
3657 | ```
3658 |
3659 |
3660 | ```python
3661 |
3662 | ```
3663 |
3664 |
3665 | ```python
3666 |
3667 | ```
3668 |
3669 | ### Random Forest
3670 | Used for Group IDs classification.
3671 |
3672 |
3673 | ```python
3674 | import warnings
3675 | warnings.filterwarnings("ignore", category=FutureWarning)
3676 |
3677 | from sklearn.ensemble import RandomForestClassifier
3678 |
3679 | forest_clf = RandomForestClassifier(random_state=102)
3680 | forest_clf.fit(X_train, y_train.y.values)
3681 |
3682 | y_pred = forest_clf.predict(X_test)
3683 | print("%d out of %d exmaples were wrong."
3684 | % ((y_test.y != y_pred).sum(), X_test.shape[0]))
3685 | ```
3686 |
3687 | 187 out of 2401 exmaples were wrong.
3688 |
3689 |
3690 | Confusion matrix
3691 |
3692 |
3693 | ```python
3694 | from sklearn.metrics import confusion_matrix
3695 |
3696 | cm = confusion_matrix(y_test.y.values, y_pred)
3697 | print("Confusion matrix: \n",cm, "\n")
3698 |
3699 | tn, fp, fn, tp = cm.ravel()
3700 | print(' TN:',tn, '\n FP:',fp, '\n FN:',fn, '\n TP',tp)
3701 | ```
3702 |
3703 | Confusion matrix:
3704 | [[1344 97]
3705 | [ 90 870]]
3706 |
3707 | TN: 1344
3708 | FP: 97
3709 | FN: 90
3710 | TP 870
3711 |
3712 |
3713 | Plot the confusion matrix
3714 |
3715 |
3716 | ```python
3717 | # https://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
3718 | def plot_confusion_matrix(cm,
3719 | target_names,
3720 | title='Confusion matrix',
3721 | cmap=None,
3722 | normalize=True):
3723 | """
3724 | given a sklearn confusion matrix (cm), make a nice plot
3725 |
3726 | Arguments
3727 | ---------
3728 | cm: confusion matrix from sklearn.metrics.confusion_matrix
3729 |
3730 | target_names: given classification classes such as [0, 1, 2]
3731 | the class names, for example: ['high', 'medium', 'low']
3732 |
3733 | title: the text to display at the top of the matrix
3734 |
3735 | cmap: the gradient of the values displayed from matplotlib.pyplot.cm
3736 | see http://matplotlib.org/examples/color/colormaps_reference.html
3737 | plt.get_cmap('jet') or plt.cm.Blues
3738 |
3739 | normalize: If False, plot the raw numbers
3740 | If True, plot the proportions
3741 |
3742 | Usage
3743 | -----
3744 | plot_confusion_matrix(cm = cm, # confusion matrix created by
3745 | # sklearn.metrics.confusion_matrix
3746 | normalize = True, # show proportions
3747 | target_names = y_labels_vals, # list of names of the classes
3748 | title = best_estimator_name) # title of graph
3749 |
3750 | Citiation
3751 | ---------
3752 | http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
3753 |
3754 | """
3755 | import matplotlib.pyplot as plt
3756 | import numpy as np
3757 | import itertools
3758 |
3759 | accuracy = np.trace(cm) / float(np.sum(cm))
3760 | misclass = 1 - accuracy
3761 |
3762 | if cmap is None:
3763 | cmap = plt.get_cmap('Blues')
3764 |
3765 | plt.figure(figsize=(8, 6))
3766 | plt.imshow(cm, interpolation='nearest', cmap=cmap)
3767 | plt.title(title)
3768 | plt.colorbar()
3769 |
3770 | if target_names is not None:
3771 | tick_marks = np.arange(len(target_names))
3772 | plt.xticks(tick_marks, target_names, rotation=45)
3773 | plt.yticks(tick_marks, target_names)
3774 |
3775 | if normalize:
3776 | cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
3777 |
3778 |
3779 | thresh = cm.max() / 1.5 if normalize else cm.max() / 2
3780 | for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
3781 | if normalize:
3782 | plt.text(j, i, "{:0.2f}".format(cm[i, j],2),
3783 | horizontalalignment="center",
3784 | color="black" if cm[i, j] > thresh else "black", fontsize=30)
3785 | else:
3786 | plt.text(j, i, "{:,}".format(cm[i, j]),
3787 | horizontalalignment="center",
3788 | color="white" if cm[i, j] > thresh else "black", fontsize=30)
3789 |
3790 |
3791 | plt.tight_layout()
3792 | plt.ylabel('True label')
3793 | plt.xlabel('Predicted label')
3794 | #plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass))
3795 | plt.show()
3796 | ```
3797 |
3798 |
3799 | ```python
3800 | # plot it
3801 | plot_confusion_matrix(cm,
3802 | target_names=['Not a Group ID','Group ID'],
3803 | title='Confusion matrix',
3804 | cmap=None,
3805 | normalize=True)
3806 |
3807 | print(cm)
3808 | ```
3809 |
3810 |
3811 | 
3812 |
3813 |
3814 | [[825 48]
3815 | [ 2 580]]
3816 |
3817 |
3818 |
3819 | ```python
3820 | # combine actual with predicted
3821 | y_test_combined = y_test.rename(index=str, columns={"y": "y_true"}).assign(y_pred=y_pred).assign(simulated=X_test_simulated['simulated_1'].values.astype(int))
3822 | #y_test_combined = y_test_combined.assign(simulated_0=X_test_simulated['simulated_1'].values)
3823 | y_test_combined = y_test_combined.assign(got_right=(y_test_combined.y_true == y_test_combined.y_pred).astype(int))
3824 |
3825 | y_test_combined.head()
3826 | ```
3827 |
3828 |
3829 |
3830 |
3831 |
3832 |
3845 |
3846 |
3847 |
3848 | |
3849 | y_true |
3850 | y_pred |
3851 | simulated |
3852 | got_right |
3853 |
3854 |
3855 |
3856 |
3857 | | INSURANCE |
3858 | 0 |
3859 | 0 |
3860 | 0 |
3861 | 1 |
3862 |
3863 |
3864 | | John |
3865 | 0 |
3866 | 0 |
3867 | 0 |
3868 | 1 |
3869 |
3870 |
3871 | | 967462-020-14860 |
3872 | 1 |
3873 | 1 |
3874 | 1 |
3875 | 1 |
3876 |
3877 |
3878 | | AT |
3879 | 0 |
3880 | 0 |
3881 | 0 |
3882 | 1 |
3883 |
3884 |
3885 | | 636498 |
3886 | 1 |
3887 | 1 |
3888 | 1 |
3889 | 1 |
3890 |
3891 |
3892 |
3893 |
3894 |
3895 |
3896 |
3897 |
3898 | ```python
3899 | mask = (y_test_combined.simulated==0)# & (y_test_combined.y_true==0)
3900 |
3901 | cm2 = confusion_matrix(y_test_combined[mask].y_true.values, y_test_combined[mask].y_pred.values)
3902 |
3903 | if cm2.shape == (1,1):
3904 | cm2 = np.array([[0,0],[0,cm2[0,0]]])
3905 |
3906 |
3907 | plot_confusion_matrix(cm2,
3908 | target_names=['Not a Group ID','Group ID'],
3909 | title='Confusion matrix',
3910 | cmap=None,
3911 | normalize=True);
3912 |
3913 | print('Real data')
3914 | print("Confusion matrix: \n",cm2, "\n")
3915 |
3916 | ```
3917 |
3918 |
3919 | 
3920 |
3921 |
3922 | Real data
3923 | Confusion matrix:
3924 | [[825 48]
3925 | [ 2 8]]
3926 |
3927 |
3928 |
3929 |
3930 | ```python
3931 |
3932 | ```
3933 |
3934 |
3935 | ```python
3936 |
3937 | ```
3938 |
3939 |
3940 | ```python
3941 |
3942 | ```
3943 |
3944 |
3945 | ```python
3946 |
3947 | ```
3948 |
3949 |
3950 | ```python
3951 |
3952 | ```
3953 |
3954 |
3955 | ```python
3956 |
3957 | ```
3958 |
3959 |
3960 | ```python
3961 |
3962 | ```
3963 |
3964 | Find the area under the curve.
3965 |
3966 |
3967 | ```python
3968 | """from sklearn.metrics import roc_auc_score
3969 |
3970 | y_scores = forest_clf.predict_proba(X)
3971 | print('AUC:', round(roc_auc_score(y, y_scores[:,1]),3))"""
3972 | ```
3973 |
3974 |
3975 |
3976 |
3977 | "from sklearn.metrics import roc_auc_score\n\ny_scores = forest_clf.predict_proba(X)\nprint('AUC:', round(roc_auc_score(y, y_scores[:,1]),3))"
3978 |
3979 |
3980 |
3981 | Find feature importances
3982 |
3983 |
3984 | ```python
3985 | """# feature importance
3986 | df_fi = pd.DataFrame(index = df.columns.tolist()[3:])
3987 | df_fi = df_fi.assign(importance=forest_clf.feature_importances_)
3988 | df_fi = df_fi.sort_values(by=['importance'], ascending=False)"""
3989 | ```
3990 |
3991 |
3992 |
3993 |
3994 | "# feature importance\ndf_fi = pd.DataFrame(index = df.columns.tolist()[3:])\ndf_fi = df_fi.assign(importance=forest_clf.feature_importances_)\ndf_fi = df_fi.sort_values(by=['importance'], ascending=False)"
3995 |
3996 |
3997 |
3998 |
3999 | ```python
4000 |
4001 | ```
4002 |
4003 | Plot feature importances
4004 |
4005 |
4006 | ```python
4007 | # Plot feature importances
4008 | cols = X_train.columns.values
4009 | importances = forest_clf.feature_importances_
4010 | indices = np.argsort(importances)[::-1]
4011 |
4012 | plt.figure()
4013 | plt.title("Feature importances")
4014 | plt.bar(range(X_train.shape[1]), importances[indices],
4015 | color="b", align="center")
4016 | plt.xticks(range(X_train.shape[1]), cols)
4017 | plt.xlim([-1, X.shape[1]])
4018 | plt.xticks(rotation=45)
4019 | plt.show();
4020 | ```
4021 |
4022 |
4023 | 
4024 |
4025 |
4026 | #### Plot decision boundaries
4027 |
4028 | https://scikit-learn.org/stable/auto_examples/ensemble/plot_voting_decision_regions.html
4029 |
4030 |
4031 | ```python
4032 | print(__doc__)
4033 |
4034 | from itertools import product
4035 |
4036 | import numpy as np
4037 | import matplotlib.pyplot as plt
4038 |
4039 | from sklearn import datasets
4040 | from sklearn.tree import DecisionTreeClassifier
4041 | from sklearn.neighbors import KNeighborsClassifier
4042 | from sklearn.svm import SVC
4043 | from sklearn.ensemble import VotingClassifier, RandomForestClassifier
4044 | from sklearn.linear_model import LogisticRegression
4045 |
4046 | # Loading some example data
4047 | iris = datasets.load_iris()
4048 | X = X_train.loc[:,['frac_alpha','frac_digit']].values
4049 | y = y_train.y.values
4050 | #X = iris.data[:, [0, 2]]
4051 | #y = iris.target
4052 |
4053 | # Training classifiers
4054 | clf1 = DecisionTreeClassifier(max_depth=4)
4055 | clf2 = KNeighborsClassifier(n_neighbors=7)
4056 | clf3 = LogisticRegression()
4057 | #clf3 = SVC(gamma=.1, kernel='rbf', probability=True)
4058 | eclf = RandomForestClassifier(random_state=102)
4059 | #eclf = VotingClassifier(estimators=[('dt', clf1), ('knn', clf2),
4060 | # ('svc', clf3)],
4061 | # voting='soft', weights=[2, 1, 2])
4062 |
4063 | clf1.fit(X, y)
4064 | clf2.fit(X, y)
4065 | clf3.fit(X, y)
4066 | eclf.fit(X, y)
4067 |
4068 | # Plotting decision regions
4069 | x_min, x_max = X[:, 0].min() - .1, X[:, 0].max() + .1
4070 | y_min, y_max = X[:, 1].min() - .1, X[:, 1].max() + .1
4071 | xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
4072 | np.arange(y_min, y_max, 0.1))
4073 |
4074 | f, axarr = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(10, 8))
4075 |
4076 | for idx, clf, tt in zip(product([0, 1], [0, 1]),
4077 | [clf1, clf2, clf3, eclf],
4078 | ['Decision Tree (depth=4)', 'KNN (k=7)',
4079 | 'Logistic', 'Random Forest']):
4080 |
4081 | Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
4082 | Z = Z.reshape(xx.shape)
4083 |
4084 | axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.4)
4085 | axarr[idx[0], idx[1]].scatter(X[:, 0], X[:, 1], c=y,
4086 | s=20, edgecolor='k')
4087 | axarr[idx[0], idx[1]].set_title(tt)
4088 |
4089 | plt.show()
4090 | ```
4091 |
4092 | Automatically created module for IPython interactive environment
4093 |
4094 |
4095 |
4096 | 
4097 |
4098 |
4099 |
4100 | ```python
4101 |
4102 | ```
4103 |
4104 |
4105 | ```python
4106 |
4107 | ```
4108 |
4109 |
4110 | ```python
4111 |
4112 | ```
4113 |
4114 |
4115 | ```python
4116 | # AUC
4117 | from sklearn.metrics import roc_auc_score
4118 |
4119 | y_test_scores = forest_clf.predict_proba(X_test)
4120 | print('AUC:', round(roc_auc_score(y_test, y_test_scores[:,1]),3))
4121 | ```
4122 |
4123 | AUC: 0.988
4124 |
4125 |
4126 | Train model on simulated data and test on real data.
4127 |
4128 |
4129 | ```python
4130 | # put all real group ids in test set.
4131 | np.random.seed(103)
4132 | ind_test = np.where(np.logical_and(df3.simulated_1==0, df3.y==1))[0]
4133 | sample = [i for i in range(df3.shape[0]) if i not in ind_test]
4134 | more_indices = np.random.choice(sample, int(len(ind_test)*20), replace=False)
4135 | more_indices1 = [i for i in more_indices if df3.simulated_1[i]==0] # throw away if it's simulated
4136 | ind_test = np.concatenate((ind_test, more_indices1))
4137 |
4138 | ind_train = np.array([i for i in range(df3.shape[0]) if i not in ind_test])
4139 |
4140 | X_train = df3.iloc[ind_train,:-1].copy()
4141 | X_test = df3.iloc[ind_test,:-1].copy()
4142 |
4143 | y_train = df3.iloc[ind_train,-1].copy()
4144 | y_test = df3.iloc[ind_test,-1].copy()
4145 |
4146 | X_train_simulated = pd.DataFrame(X_train['simulated_1'].copy())
4147 | X_test_simulated = pd.DataFrame(X_test['simulated_1'].copy())
4148 |
4149 | X_train.drop(columns=['simulated_0', 'simulated_1'], inplace=True)
4150 | X_test.drop(columns=['simulated_0', 'simulated_1'], inplace=True)
4151 |
4152 | y_train = pd.DataFrame(y_train)
4153 | y_test = pd.DataFrame(y_test)
4154 | ```
4155 |
4156 |
4157 | ```python
4158 | forest_clf1 = RandomForestClassifier(random_state=102)
4159 | forest_clf1.fit(X_train, y_train.y.values)
4160 |
4161 | y_pred = forest_clf1.predict(X_test)
4162 | print("%d out of %d exmaples were wrong."
4163 | % ((y_test.y != y_pred).sum(), X_test.shape[0]))
4164 | ```
4165 |
4166 | 15 out of 287 exmaples were wrong.
4167 |
4168 |
4169 |
4170 | ```python
4171 | cm3 = confusion_matrix(y_test.y.values, y_pred)
4172 |
4173 | if cm3.shape == (1,1):
4174 | cm3 = np.array([[0,0],[0,cm2[0,0]]])
4175 |
4176 |
4177 | plot_confusion_matrix(cm3,
4178 | target_names=['Not a Group ID','Group ID'],
4179 | title='Confusion matrix',
4180 | cmap=None,
4181 | normalize=True);
4182 |
4183 | print('Test set has all the real group IDs')
4184 | print("Confusion matrix: \n",cm3, "\n")
4185 | ```
4186 |
4187 |
4188 | 
4189 |
4190 |
4191 | Test set has all the real group IDs
4192 | Confusion matrix:
4193 | [[254 11]
4194 | [ 4 18]]
4195 |
4196 |
4197 |
4198 |
4199 | ```python
4200 |
4201 | ```
4202 |
4203 |
4204 | ```python
4205 |
4206 | ```
4207 |
4208 |
4209 | ```python
4210 |
4211 | ```
4212 |
--------------------------------------------------------------------------------