├── .ipynb_checkpoints
├── BeautifulSoupCode-checkpoint.ipynb
├── Data Storage-checkpoint.ipynb
├── Dimensionality Reduction-checkpoint.ipynb
├── List, Dictionary, Sets-checkpoint.ipynb
├── NumPy-checkpoint.ipynb
├── Python - Strings-checkpoint.ipynb
├── Python 1 - Getting Started-checkpoint.ipynb
├── Python 2 - Conditional, loops-checkpoint.ipynb
├── Python Programming 1-checkpoint.ipynb
├── TensorFlow 1-checkpoint.ipynb
├── Tuples, Sets & Dictionary-checkpoint.ipynb
├── Understanding List-checkpoint.ipynb
├── Untitled-checkpoint.ipynb
└── Untitled1-checkpoint.ipynb
├── .py
├── 2. Understanding Strings.ipynb
├── Airbus - NumPy & Pandas.ipynb
├── Airbus - Python.ipynb
├── BeautifulSoupCode.ipynb
├── Cerner 1.ipynb
├── Cerner 2.ipynb
├── Cerner 3.ipynb
├── Data Storage.ipynb
├── Day 2 - SN.ipynb
├── Day1 - Airbus.ipynb
├── Day1.py
├── Deep Learning using Python.txt
├── Deep learning using Tensorflow
├── Dimensionality Reduction.ipynb
├── Exercises.ipynb
├── Getting started.ipynb
├── Impetus Day 1.ipynb
├── Impetus Day2.ipynb
├── Introduction to Python.ipynb
├── Java-flipkart
├── List 2 & Dictionary.ipynb
├── List, Dictionary, Sets.ipynb
├── NumPy.ipynb
├── Python - Strings.ipynb
├── Python 1 - Getting Started.ipynb
├── Python 2 - Conditional, loops.ipynb
├── Python Day 1.ipynb
├── Python Programming 1.ipynb
├── Python-1.ipynb
├── SCIO-Python-Day1.ipynb
├── Session 1.ipynb
├── Storage - List, Dictionary, Tuple, Sets.ipynb
├── TensorFlow 1.ipynb
├── Tuples, Sets & Dictionary.ipynb
├── Understanding List.ipynb
├── Untitled.ipynb
├── Untitled1.ipynb
├── XML-Parsing.ipynb
├── a
├── a.py
├── a.txt
├── advanced_features = [sdsd
├── asyncio-code.py
├── books.xml
├── classes-code.py
├── classes-more-code.py
├── classes.py
├── cloud-tag.py
├── code
├── conditional.py
├── control-code.py
├── create_xml.py
├── create_xml2.py
├── csv_reader.py
├── decorator.py
├── dict-to-dict.py
├── dict.py
├── exception.py
├── f.py
├── file
├── file.json
├── file.xml
├── file2.json
├── file_oper.py
├── file_operations.py
├── find_word.py
├── find_word_argparse.py
├── func-code.py
├── func.py
├── functional_prog.py
├── functional_prog2.py
├── functions.py
├── g
├── generators.py
├── hello.py
├── in_file
├── input.csv
├── json-parser.py
├── jsonfile.json
├── jsonreader-basic.py
├── jsonreader.py
├── kmeans-code.py
├── l2
├── list_programs.py
├── lists.py
├── ml-solution.py
├── movies.xml
├── my_code.py
├── my_mod.py
├── my_mod.pyc
├── my_mod_use.py
├── numbers.py
├── numbers.pyc
├── os-code.py
├── out_file
├── output1.html
├── panda-code.py
├── parse_books_xml.py
├── parse_movie.py
├── parse_xml.py
├── pyclass1.py
├── python-extc.c
├── quotesbot-master
├── .gitignore
├── LICENSE
├── README.md
├── quotesbot
│ ├── __init__.py
│ ├── items.py
│ ├── pipelines.py
│ ├── settings.py
│ └── spiders
│ │ ├── __init__.py
│ │ ├── toscrape-css.py
│ │ └── toscrape-xpath.py
└── scrapy.cfg
├── regularExpressions.py
├── sdsd
├── search-page.html
├── server.py
├── setup.py
├── string_programs.py
├── student.xml
├── sub-code.py
├── sub-code2.py
├── sub-code3.py
├── subprocess-code.py
├── test-sleep.py
├── test.py
├── test.xml
├── threads-code.py
├── try-except.code
├── try-except.py
├── try-except.pyc
├── update_xml.py
├── web-scrap-code.py
├── web-scrap.py
├── web-scrap.pyc
├── xl.py
├── xl_write.csv
├── xmlparser.py
├── xmlreader.py
└── xtd.xml
/.ipynb_checkpoints/BeautifulSoupCode-checkpoint.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [],
3 | "metadata": {},
4 | "nbformat": 4,
5 | "nbformat_minor": 2
6 | }
7 |
--------------------------------------------------------------------------------
/.ipynb_checkpoints/Dimensionality Reduction-checkpoint.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [],
3 | "metadata": {},
4 | "nbformat": 4,
5 | "nbformat_minor": 2
6 | }
7 |
--------------------------------------------------------------------------------
/.ipynb_checkpoints/List, Dictionary, Sets-checkpoint.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [],
3 | "metadata": {},
4 | "nbformat": 4,
5 | "nbformat_minor": 2
6 | }
7 |
--------------------------------------------------------------------------------
/.ipynb_checkpoints/NumPy-checkpoint.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [],
3 | "metadata": {},
4 | "nbformat": 4,
5 | "nbformat_minor": 2
6 | }
7 |
--------------------------------------------------------------------------------
/.ipynb_checkpoints/Python - Strings-checkpoint.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [],
3 | "metadata": {},
4 | "nbformat": 4,
5 | "nbformat_minor": 2
6 | }
7 |
--------------------------------------------------------------------------------
/.ipynb_checkpoints/Python 1 - Getting Started-checkpoint.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [],
3 | "metadata": {},
4 | "nbformat": 4,
5 | "nbformat_minor": 2
6 | }
7 |
--------------------------------------------------------------------------------
/.ipynb_checkpoints/Python Programming 1-checkpoint.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [
8 | {
9 | "name": "stdout",
10 | "output_type": "stream",
11 | "text": [
12 | "Hello World\n"
13 | ]
14 | }
15 | ],
16 | "source": [
17 | "print ('Hello World')"
18 | ]
19 | },
20 | {
21 | "cell_type": "code",
22 | "execution_count": 2,
23 | "metadata": {
24 | "collapsed": true
25 | },
26 | "outputs": [],
27 | "source": [
28 | "a = 10\n",
29 | "b = 10"
30 | ]
31 | },
32 | {
33 | "cell_type": "code",
34 | "execution_count": 3,
35 | "metadata": {},
36 | "outputs": [
37 | {
38 | "data": {
39 | "text/plain": [
40 | "True"
41 | ]
42 | },
43 | "execution_count": 3,
44 | "metadata": {},
45 | "output_type": "execute_result"
46 | }
47 | ],
48 | "source": [
49 | "a is b"
50 | ]
51 | },
52 | {
53 | "cell_type": "code",
54 | "execution_count": 4,
55 | "metadata": {},
56 | "outputs": [
57 | {
58 | "data": {
59 | "text/plain": [
60 | "1707204656"
61 | ]
62 | },
63 | "execution_count": 4,
64 | "metadata": {},
65 | "output_type": "execute_result"
66 | }
67 | ],
68 | "source": [
69 | "id(a)"
70 | ]
71 | },
72 | {
73 | "cell_type": "code",
74 | "execution_count": 5,
75 | "metadata": {},
76 | "outputs": [
77 | {
78 | "data": {
79 | "text/plain": [
80 | "1707204656"
81 | ]
82 | },
83 | "execution_count": 5,
84 | "metadata": {},
85 | "output_type": "execute_result"
86 | }
87 | ],
88 | "source": [
89 | "id(b)"
90 | ]
91 | },
92 | {
93 | "cell_type": "code",
94 | "execution_count": 6,
95 | "metadata": {
96 | "collapsed": true
97 | },
98 | "outputs": [],
99 | "source": [
100 | "a = 11"
101 | ]
102 | },
103 | {
104 | "cell_type": "code",
105 | "execution_count": 7,
106 | "metadata": {},
107 | "outputs": [
108 | {
109 | "data": {
110 | "text/plain": [
111 | "10"
112 | ]
113 | },
114 | "execution_count": 7,
115 | "metadata": {},
116 | "output_type": "execute_result"
117 | }
118 | ],
119 | "source": [
120 | "b"
121 | ]
122 | },
123 | {
124 | "cell_type": "code",
125 | "execution_count": 8,
126 | "metadata": {
127 | "collapsed": true
128 | },
129 | "outputs": [],
130 | "source": [
131 | "c = 300\n",
132 | "d = 400"
133 | ]
134 | },
135 | {
136 | "cell_type": "code",
137 | "execution_count": 9,
138 | "metadata": {},
139 | "outputs": [
140 | {
141 | "data": {
142 | "text/plain": [
143 | "False"
144 | ]
145 | },
146 | "execution_count": 9,
147 | "metadata": {},
148 | "output_type": "execute_result"
149 | }
150 | ],
151 | "source": [
152 | "c is d"
153 | ]
154 | },
155 | {
156 | "cell_type": "code",
157 | "execution_count": 10,
158 | "metadata": {},
159 | "outputs": [
160 | {
161 | "data": {
162 | "text/plain": [
163 | "int"
164 | ]
165 | },
166 | "execution_count": 10,
167 | "metadata": {},
168 | "output_type": "execute_result"
169 | }
170 | ],
171 | "source": [
172 | "type(a)"
173 | ]
174 | },
175 | {
176 | "cell_type": "code",
177 | "execution_count": 11,
178 | "metadata": {
179 | "collapsed": true
180 | },
181 | "outputs": [],
182 | "source": [
183 | "# Python is dynamically typed programming language\n",
184 | "# Type of the variable is assigned based on the value assigned to it"
185 | ]
186 | },
187 | {
188 | "cell_type": "code",
189 | "execution_count": 12,
190 | "metadata": {
191 | "collapsed": true
192 | },
193 | "outputs": [],
194 | "source": [
195 | "a = 7\n",
196 | "a = 'Hello World'"
197 | ]
198 | },
199 | {
200 | "cell_type": "code",
201 | "execution_count": 13,
202 | "metadata": {
203 | "collapsed": true
204 | },
205 | "outputs": [],
206 | "source": [
207 | "import math"
208 | ]
209 | },
210 | {
211 | "cell_type": "code",
212 | "execution_count": 14,
213 | "metadata": {},
214 | "outputs": [
215 | {
216 | "data": {
217 | "text/plain": [
218 | "True"
219 | ]
220 | },
221 | "execution_count": 14,
222 | "metadata": {},
223 | "output_type": "execute_result"
224 | }
225 | ],
226 | "source": [
227 | "'a' in 'abcde'"
228 | ]
229 | },
230 | {
231 | "cell_type": "code",
232 | "execution_count": 15,
233 | "metadata": {
234 | "collapsed": true
235 | },
236 | "outputs": [],
237 | "source": [
238 | "s = 'Hello World'"
239 | ]
240 | },
241 | {
242 | "cell_type": "code",
243 | "execution_count": 16,
244 | "metadata": {},
245 | "outputs": [
246 | {
247 | "data": {
248 | "text/plain": [
249 | "'el'"
250 | ]
251 | },
252 | "execution_count": 16,
253 | "metadata": {},
254 | "output_type": "execute_result"
255 | }
256 | ],
257 | "source": [
258 | "s[1:3]"
259 | ]
260 | },
261 | {
262 | "cell_type": "code",
263 | "execution_count": 17,
264 | "metadata": {},
265 | "outputs": [
266 | {
267 | "data": {
268 | "text/plain": [
269 | "'dlroW olleH'"
270 | ]
271 | },
272 | "execution_count": 17,
273 | "metadata": {},
274 | "output_type": "execute_result"
275 | }
276 | ],
277 | "source": [
278 | "s[::-1]"
279 | ]
280 | },
281 | {
282 | "cell_type": "code",
283 | "execution_count": 18,
284 | "metadata": {},
285 | "outputs": [
286 | {
287 | "data": {
288 | "text/plain": [
289 | "'drWolH'"
290 | ]
291 | },
292 | "execution_count": 18,
293 | "metadata": {},
294 | "output_type": "execute_result"
295 | }
296 | ],
297 | "source": [
298 | "s[::-2]"
299 | ]
300 | },
301 | {
302 | "cell_type": "code",
303 | "execution_count": 19,
304 | "metadata": {},
305 | "outputs": [
306 | {
307 | "data": {
308 | "text/plain": [
309 | "'Hello world'"
310 | ]
311 | },
312 | "execution_count": 19,
313 | "metadata": {},
314 | "output_type": "execute_result"
315 | }
316 | ],
317 | "source": [
318 | "s.capitalize()"
319 | ]
320 | },
321 | {
322 | "cell_type": "code",
323 | "execution_count": 20,
324 | "metadata": {},
325 | "outputs": [
326 | {
327 | "data": {
328 | "text/plain": [
329 | "'Hello World'"
330 | ]
331 | },
332 | "execution_count": 20,
333 | "metadata": {},
334 | "output_type": "execute_result"
335 | }
336 | ],
337 | "source": [
338 | "s"
339 | ]
340 | },
341 | {
342 | "cell_type": "code",
343 | "execution_count": 21,
344 | "metadata": {
345 | "collapsed": true
346 | },
347 | "outputs": [],
348 | "source": [
349 | "# string is immutable"
350 | ]
351 | },
352 | {
353 | "cell_type": "code",
354 | "execution_count": 22,
355 | "metadata": {
356 | "collapsed": true
357 | },
358 | "outputs": [],
359 | "source": [
360 | "a = 'Hello'\n",
361 | "b = 'Hello'"
362 | ]
363 | },
364 | {
365 | "cell_type": "code",
366 | "execution_count": 23,
367 | "metadata": {},
368 | "outputs": [
369 | {
370 | "data": {
371 | "text/plain": [
372 | "2557628659504"
373 | ]
374 | },
375 | "execution_count": 23,
376 | "metadata": {},
377 | "output_type": "execute_result"
378 | }
379 | ],
380 | "source": [
381 | "id(a)"
382 | ]
383 | },
384 | {
385 | "cell_type": "code",
386 | "execution_count": 24,
387 | "metadata": {},
388 | "outputs": [
389 | {
390 | "data": {
391 | "text/plain": [
392 | "2557628659504"
393 | ]
394 | },
395 | "execution_count": 24,
396 | "metadata": {},
397 | "output_type": "execute_result"
398 | }
399 | ],
400 | "source": [
401 | "id(b)"
402 | ]
403 | },
404 | {
405 | "cell_type": "code",
406 | "execution_count": 26,
407 | "metadata": {},
408 | "outputs": [
409 | {
410 | "data": {
411 | "text/plain": [
412 | "1"
413 | ]
414 | },
415 | "execution_count": 26,
416 | "metadata": {},
417 | "output_type": "execute_result"
418 | }
419 | ],
420 | "source": [
421 | "s.count('el')"
422 | ]
423 | },
424 | {
425 | "cell_type": "code",
426 | "execution_count": 27,
427 | "metadata": {},
428 | "outputs": [
429 | {
430 | "name": "stdout",
431 | "output_type": "stream",
432 | "text": [
433 | "Help on built-in function expandtabs:\n",
434 | "\n",
435 | "expandtabs(...) method of builtins.str instance\n",
436 | " S.expandtabs(tabsize=8) -> str\n",
437 | " \n",
438 | " Return a copy of S where all tab characters are expanded using spaces.\n",
439 | " If tabsize is not given, a tab size of 8 characters is assumed.\n",
440 | "\n"
441 | ]
442 | }
443 | ],
444 | "source": [
445 | "help(s.expandtabs)"
446 | ]
447 | },
448 | {
449 | "cell_type": "code",
450 | "execution_count": 31,
451 | "metadata": {},
452 | "outputs": [
453 | {
454 | "data": {
455 | "text/plain": [
456 | "'hello World'"
457 | ]
458 | },
459 | "execution_count": 31,
460 | "metadata": {},
461 | "output_type": "execute_result"
462 | }
463 | ],
464 | "source": [
465 | "s = 'hello\\tWorld'\n",
466 | "s.expandtabs(17)"
467 | ]
468 | },
469 | {
470 | "cell_type": "code",
471 | "execution_count": 32,
472 | "metadata": {
473 | "collapsed": true
474 | },
475 | "outputs": [],
476 | "source": [
477 | "s = 'hey how are you hey aa'"
478 | ]
479 | },
480 | {
481 | "cell_type": "code",
482 | "execution_count": 33,
483 | "metadata": {},
484 | "outputs": [
485 | {
486 | "data": {
487 | "text/plain": [
488 | "0"
489 | ]
490 | },
491 | "execution_count": 33,
492 | "metadata": {},
493 | "output_type": "execute_result"
494 | }
495 | ],
496 | "source": [
497 | "s.find('hey')"
498 | ]
499 | },
500 | {
501 | "cell_type": "code",
502 | "execution_count": 34,
503 | "metadata": {},
504 | "outputs": [
505 | {
506 | "data": {
507 | "text/plain": [
508 | "16"
509 | ]
510 | },
511 | "execution_count": 34,
512 | "metadata": {},
513 | "output_type": "execute_result"
514 | }
515 | ],
516 | "source": [
517 | "s.find('hey',1)"
518 | ]
519 | },
520 | {
521 | "cell_type": "code",
522 | "execution_count": 35,
523 | "metadata": {
524 | "collapsed": true
525 | },
526 | "outputs": [],
527 | "source": [
528 | "h = \"Congrats, {title} {name} for your {percent}% hike\""
529 | ]
530 | },
531 | {
532 | "cell_type": "code",
533 | "execution_count": 36,
534 | "metadata": {},
535 | "outputs": [
536 | {
537 | "data": {
538 | "text/plain": [
539 | "'Congrats, Mr Joshi for your 10% hike'"
540 | ]
541 | },
542 | "execution_count": 36,
543 | "metadata": {},
544 | "output_type": "execute_result"
545 | }
546 | ],
547 | "source": [
548 | "h.format(name='Joshi', title='Mr', percent='10')"
549 | ]
550 | },
551 | {
552 | "cell_type": "code",
553 | "execution_count": 37,
554 | "metadata": {
555 | "collapsed": true
556 | },
557 | "outputs": [],
558 | "source": [
559 | "s=\"I don't like this\""
560 | ]
561 | },
562 | {
563 | "cell_type": "code",
564 | "execution_count": 38,
565 | "metadata": {
566 | "collapsed": true
567 | },
568 | "outputs": [],
569 | "source": [
570 | "s='I don\\t like this'"
571 | ]
572 | },
573 | {
574 | "cell_type": "code",
575 | "execution_count": 39,
576 | "metadata": {},
577 | "outputs": [
578 | {
579 | "data": {
580 | "text/plain": [
581 | "'I don\\t like this'"
582 | ]
583 | },
584 | "execution_count": 39,
585 | "metadata": {},
586 | "output_type": "execute_result"
587 | }
588 | ],
589 | "source": [
590 | "s"
591 | ]
592 | },
593 | {
594 | "cell_type": "code",
595 | "execution_count": 42,
596 | "metadata": {
597 | "collapsed": true
598 | },
599 | "outputs": [],
600 | "source": [
601 | "s=\"I don\\n like this\""
602 | ]
603 | },
604 | {
605 | "cell_type": "code",
606 | "execution_count": 43,
607 | "metadata": {},
608 | "outputs": [
609 | {
610 | "data": {
611 | "text/plain": [
612 | "'I don\\n like this'"
613 | ]
614 | },
615 | "execution_count": 43,
616 | "metadata": {},
617 | "output_type": "execute_result"
618 | }
619 | ],
620 | "source": [
621 | "s"
622 | ]
623 | },
624 | {
625 | "cell_type": "code",
626 | "execution_count": 44,
627 | "metadata": {},
628 | "outputs": [
629 | {
630 | "data": {
631 | "text/plain": [
632 | "False"
633 | ]
634 | },
635 | "execution_count": 44,
636 | "metadata": {},
637 | "output_type": "execute_result"
638 | }
639 | ],
640 | "source": [
641 | "s.isupper()"
642 | ]
643 | },
644 | {
645 | "cell_type": "code",
646 | "execution_count": 45,
647 | "metadata": {},
648 | "outputs": [
649 | {
650 | "data": {
651 | "text/plain": [
652 | "'I don\\n like this'"
653 | ]
654 | },
655 | "execution_count": 45,
656 | "metadata": {},
657 | "output_type": "execute_result"
658 | }
659 | ],
660 | "source": [
661 | "s"
662 | ]
663 | },
664 | {
665 | "cell_type": "code",
666 | "execution_count": 46,
667 | "metadata": {
668 | "collapsed": true
669 | },
670 | "outputs": [],
671 | "source": [
672 | "l = ['I','like','Python']"
673 | ]
674 | },
675 | {
676 | "cell_type": "code",
677 | "execution_count": 48,
678 | "metadata": {},
679 | "outputs": [
680 | {
681 | "data": {
682 | "text/plain": [
683 | "'I like Python'"
684 | ]
685 | },
686 | "execution_count": 48,
687 | "metadata": {},
688 | "output_type": "execute_result"
689 | }
690 | ],
691 | "source": [
692 | "' '.join(l)"
693 | ]
694 | },
695 | {
696 | "cell_type": "code",
697 | "execution_count": 49,
698 | "metadata": {
699 | "collapsed": true
700 | },
701 | "outputs": [],
702 | "source": [
703 | "s = 'this is ax nice ax place'"
704 | ]
705 | },
706 | {
707 | "cell_type": "code",
708 | "execution_count": null,
709 | "metadata": {
710 | "collapsed": true
711 | },
712 | "outputs": [],
713 | "source": []
714 | }
715 | ],
716 | "metadata": {
717 | "kernelspec": {
718 | "display_name": "Python 3",
719 | "language": "python",
720 | "name": "python3"
721 | },
722 | "language_info": {
723 | "codemirror_mode": {
724 | "name": "ipython",
725 | "version": 3
726 | },
727 | "file_extension": ".py",
728 | "mimetype": "text/x-python",
729 | "name": "python",
730 | "nbconvert_exporter": "python",
731 | "pygments_lexer": "ipython3",
732 | "version": "3.5.0"
733 | }
734 | },
735 | "nbformat": 4,
736 | "nbformat_minor": 2
737 | }
738 |
--------------------------------------------------------------------------------
/.ipynb_checkpoints/TensorFlow 1-checkpoint.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [],
3 | "metadata": {},
4 | "nbformat": 4,
5 | "nbformat_minor": 2
6 | }
7 |
--------------------------------------------------------------------------------
/.ipynb_checkpoints/Untitled-checkpoint.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [],
3 | "metadata": {},
4 | "nbformat": 4,
5 | "nbformat_minor": 2
6 | }
7 |
--------------------------------------------------------------------------------
/.ipynb_checkpoints/Untitled1-checkpoint.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [],
3 | "metadata": {},
4 | "nbformat": 4,
5 | "nbformat_minor": 2
6 | }
7 |
--------------------------------------------------------------------------------
/.py:
--------------------------------------------------------------------------------
1 | from openpyxl import Workbook
2 | wb = Workbook()
3 |
4 | # grab the active worksheet
5 | ws = wb.active
6 |
7 | # Data can be assigned directly to cells
8 | ws['A1'] = 42
9 |
10 | # Rows can also be appended
11 | ws.append([1, 2, 3])
12 |
13 | # Python types will automatically be converted
14 | import datetime
15 | ws['A2'] = datetime.datetime.now()
16 |
17 | # Save the file
18 | wb.save("sample.xlsx")
--------------------------------------------------------------------------------
/BeautifulSoupCode.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {
7 | "collapsed": true
8 | },
9 | "outputs": [],
10 | "source": [
11 | "from bs4 import BeautifulSoup"
12 | ]
13 | },
14 | {
15 | "cell_type": "code",
16 | "execution_count": 2,
17 | "metadata": {
18 | "collapsed": true
19 | },
20 | "outputs": [],
21 | "source": [
22 | "html_doc = \"\"\"\n",
23 | "
The Dormouse's story\n",
24 | "\n",
25 | "The Dormouse's story
\n",
26 | "\n",
27 | "Once upon a time there were three little sisters; and their names were\n",
28 | "Elsie,\n",
29 | "Lacie and\n",
30 | "Tillie;\n",
31 | "and they lived at the bottom of a well.
\n",
32 | "\n",
33 | "...
\n",
34 | "\"\"\""
35 | ]
36 | },
37 | {
38 | "cell_type": "code",
39 | "execution_count": 3,
40 | "metadata": {
41 | "collapsed": true
42 | },
43 | "outputs": [],
44 | "source": [
45 | "from bs4 import BeautifulSoup\n",
46 | "soup = BeautifulSoup(html_doc, 'html.parser')"
47 | ]
48 | },
49 | {
50 | "cell_type": "code",
51 | "execution_count": 7,
52 | "metadata": {},
53 | "outputs": [
54 | {
55 | "name": "stdout",
56 | "output_type": "stream",
57 | "text": [
58 | "http://www.google.com\n",
59 | "http://www.google.com\n",
60 | "http://www.google.com\n"
61 | ]
62 | }
63 | ],
64 | "source": [
65 | "for a in soup.find_all('a'):\n",
66 | " a['href'] = 'http://www.google.com'\n",
67 | " print (a['href'])"
68 | ]
69 | },
70 | {
71 | "cell_type": "code",
72 | "execution_count": 8,
73 | "metadata": {},
74 | "outputs": [
75 | {
76 | "name": "stdout",
77 | "output_type": "stream",
78 | "text": [
79 | "Elsie\n",
80 | "Lacie\n",
81 | "Tillie\n"
82 | ]
83 | }
84 | ],
85 | "source": [
86 | "for a in soup.find_all('a'):\n",
87 | " print(a)"
88 | ]
89 | },
90 | {
91 | "cell_type": "code",
92 | "execution_count": 10,
93 | "metadata": {},
94 | "outputs": [],
95 | "source": [
96 | "with open(\"output1.html\", \"w\") as file:\n",
97 | " file.write(str(soup))"
98 | ]
99 | },
100 | {
101 | "cell_type": "code",
102 | "execution_count": null,
103 | "metadata": {
104 | "collapsed": true
105 | },
106 | "outputs": [],
107 | "source": []
108 | }
109 | ],
110 | "metadata": {
111 | "kernelspec": {
112 | "display_name": "Python 3",
113 | "language": "python",
114 | "name": "python3"
115 | },
116 | "language_info": {
117 | "codemirror_mode": {
118 | "name": "ipython",
119 | "version": 3
120 | },
121 | "file_extension": ".py",
122 | "mimetype": "text/x-python",
123 | "name": "python",
124 | "nbconvert_exporter": "python",
125 | "pygments_lexer": "ipython3",
126 | "version": "3.5.0"
127 | }
128 | },
129 | "nbformat": 4,
130 | "nbformat_minor": 2
131 | }
132 |
--------------------------------------------------------------------------------
/Cerner 3.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [
8 | {
9 | "ename": "ModuleNotFoundError",
10 | "evalue": "No module named 'email.MIMEMultipart'",
11 | "output_type": "error",
12 | "traceback": [
13 | "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
14 | "\u001b[1;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)",
15 | "\u001b[1;32m\u001b[0m in \u001b[0;36m\u001b[1;34m()\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0msmtplib\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 2\u001b[1;33m \u001b[1;32mfrom\u001b[0m \u001b[0memail\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mMIMEMultipart\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mMIMEMultipart\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 3\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0memail\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mMIMEText\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mMIMEText\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 4\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0memail\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mMIMEBase\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mMIMEBase\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 5\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0memail\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mencoders\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
16 | "\u001b[1;31mModuleNotFoundError\u001b[0m: No module named 'email.MIMEMultipart'"
17 | ]
18 | }
19 | ],
20 | "source": [
21 | "import smtplib\n",
22 | "from email.MIMEMultipart import MIMEMultipart\n",
23 | "from email.MIMEText import MIMEText\n",
24 | "from email.MIMEBase import MIMEBase\n",
25 | "from email import encoders\n",
26 | "\n",
27 | "fromaddr = \"awantikdas@gmail.com\"\n",
28 | "toaddr = \"bangalorecoach@gmail.com\"\n",
29 | "\n",
30 | "msg = MIMEMultipart()\n",
31 | "\n",
32 | "msg['From'] = fromaddr\n",
33 | "msg['To'] = toaddr\n",
34 | "msg['Subject'] = \"SUBJECT OF THE EMAIL\"\n",
35 | "\n",
36 | "body = \"TEXT YOU WANT TO SEND\"\n",
37 | "\n",
38 | "msg.attach(MIMEText(body, 'plain'))\n",
39 | "\n",
40 | "filename = \"mail.py\"\n",
41 | "attachment = open(\"mail.py\", \"rb\")\n",
42 | "\n",
43 | "part = MIMEBase('application', 'octet-stream')\n",
44 | "part.set_payload((attachment).read())\n",
45 | "encoders.encode_base64(part)\n",
46 | "part.add_header('Content-Disposition', \"attachment; filename= %s\" % filename)\n",
47 | "\n",
48 | "msg.attach(part)\n",
49 | "\n",
50 | "server = smtplib.SMTP('smtp.gmail.com', 587)\n",
51 | "server.starttls()\n",
52 | "server.login(fromaddr, \"xxxxx\")\n",
53 | "text = msg.as_string()\n",
54 | "server.sendmail(fromaddr, toaddr, text)\n",
55 | "server.quit()"
56 | ]
57 | },
58 | {
59 | "cell_type": "code",
60 | "execution_count": null,
61 | "metadata": {},
62 | "outputs": [],
63 | "source": [
64 | "# Import smtplib for the actual sending function\n",
65 | "import smtplib\n",
66 | "\n",
67 | "# Import the email modules we'll need\n",
68 | "from email.message import EmailMessage\n",
69 | "\n",
70 | "# Open the plain text file whose name is in textfile for reading.\n",
71 | "with open(textfile) as fp:\n",
72 | " # Create a text/plain message\n",
73 | " msg = EmailMessage()\n",
74 | " msg.set_content(fp.read())\n",
75 | "\n",
76 | "# me == the sender's email address\n",
77 | "# you == the recipient's email address\n",
78 | "msg['Subject'] = 'The contents of %s' % textfile\n",
79 | "msg['From'] = awantikdas@gmail.com\n",
80 | "msg['To'] = awantikdas@zekelabs.com\n",
81 | "\n",
82 | "server = smtplib.SMTP('smtp.gmail.com', 587)\n",
83 | "\n",
84 | "# Send the message via our own SMTP server.\n",
85 | "s = smtplib.SMTP('localhost')\n",
86 | "s.send_message(msg)\n",
87 | "s.quit()"
88 | ]
89 | }
90 | ],
91 | "metadata": {
92 | "kernelspec": {
93 | "display_name": "Python 3",
94 | "language": "python",
95 | "name": "python3"
96 | },
97 | "language_info": {
98 | "name": ""
99 | }
100 | },
101 | "nbformat": 4,
102 | "nbformat_minor": 2
103 | }
104 |
--------------------------------------------------------------------------------
/Day1.py:
--------------------------------------------------------------------------------
1 | print ('hello world')
2 | #Dynamically Typed Programming language
3 | a = 1
4 | b = 2
5 |
6 | print (type(a))
7 |
8 | a = 'hello'
9 |
10 | print (type(a))
11 |
12 | a = 9381902389183109831823091830912839012 * 7281273192738127391827318973
13 | print (a)
14 |
15 | a = 100
16 | b = 20
17 |
18 | print (a/b)
19 | print (a**b)
20 |
21 | s = 'hello world'
22 | print (s+s)
23 | print ('\n\n')
24 | print (s * 5)
25 | print (s[-1])
26 |
27 | print (s[2:6])
28 | print (s[::-1])
29 |
30 |
31 | s = 'the great hello great world'
32 |
33 | ### String Functions
34 | print (s.count('o'))
35 |
36 | print (s.capitalize())
37 |
38 | print (s.center(100))
39 |
40 | print (s.count(' '))
41 |
42 | print (s.endswith('d'))
43 |
44 | print (s.startswith('t'))
45 |
46 | print (s.find('great',5))
47 |
48 | print (s.index('great',8))
49 |
50 | s = 'Mr/Mrs {name}, Congrats for getting {percent}% hike'
51 | print (s.format(name='abc',percent=2))
52 | print (s.format(name='asamkeerth',percent=422))
--------------------------------------------------------------------------------
/Deep Learning using Python.txt:
--------------------------------------------------------------------------------
1 | 1. Machine Learning – An Introduction
2 | What is machine learning?
3 | Different machine learning approaches
4 | Supervised learning
5 | Unsupervised learning
6 | Reinforcement learning
7 | Steps Involved in machine learning systems
8 | Brief description of popular techniques/algorithms
9 | Linear regression
10 | Decision trees
11 | K-means
12 | Naïve Bayes
13 | Support vector machines
14 | The cross-entropy method
15 | Neural networks
16 | Deep learning
17 | Applications in real life
18 | A popular open source package
19 |
20 | 2. Neural Networks
21 | Why neural networks?
22 | Fundamentals
23 | Neurons and layers
24 | Different types of activation function
25 | The back-propagation algorithm
26 | Linear regression
27 | Logistic regression
28 | Back-propagation
29 | Applications in industry
30 | Signal processing
31 | Medical
32 | Autonomous car driving
33 | Business
34 | Pattern recognition
35 | Speech production
36 | Code example of a neural network for the function xor
37 |
38 | 3. Deep Learning Fundamentals
39 | What is deep learning?
40 | Fundamental concepts
41 | Feature learning
42 | Deep learning algorithms
43 | Deep learning applications
44 | Speech recognition
45 | Object recognition and classification
46 | GPU versus CPU
47 | Popular open source libraries – an introduction
48 | Theano
49 | TensorFlow
50 | Keras
51 | Sample deep neural net code using Keras
52 |
53 | 4. Unsupervised Feature Learning
54 | Autoencoders
55 | Network design
56 | Regularization techniques for autoencoders
57 | Denoising autoencoders
58 | Contractive autoencoders
59 | Sparse autoencoders
60 | Summary of autoencoders
61 | Restricted Boltzmann machines
62 | Hopfield networks and Boltzmann machines
63 | Boltzmann machine
64 | Restricted Boltzmann machine
65 | Implementation in TensorFlow
66 | Deep belief networks
67 |
68 | 5. Image Recognition
69 | Similarities between artificial and biological models
70 | Intuition and justification
71 | Convolutional layers
72 | Stride and padding in convolutional layers
73 | Pooling layers
74 | Dropout
75 | Convolutional layers in deep learning
76 | Convolutional layers in Theano
77 | A convolutional layer example with Keras to recognize digits
78 | A convolutional layer example with Keras for cifar10
79 | Pre-training
80 |
81 | 6. Recurrent Neural Networks and Language Models
82 | Recurrent neural networks
83 | RNN — how to implement and train
84 | Backpropagation through time
85 | Vanishing and exploding gradients
86 | Long short term memory
87 | Language modeling
88 | Word-based models
89 | N-grams
90 | Neural language models
91 | Character-based model
92 | Preprocessing and reading data
93 | LSTM network
94 | Training
95 | Sampling
96 | Example training
97 | Speech recognition
98 | Speech recognition pipeline
99 | Speech as input data
100 | Preprocessing
101 | Acoustic model
102 | Deep belief networks
103 | Recurrent neural networks
104 | CTC
105 | Attention-based models
106 | Decoding
107 | End-to-end models
108 |
109 | 7. Deep Learning for Board Games
110 | Early game playing AI
111 | Using the min-max algorithm to value game states
112 | Implementing a Python Tic-Tac-Toe game
113 | Learning a value function
114 | Training AI to master Go
115 | Upper confidence bounds applied to trees
116 | Deep learning in Monte Carlo Tree Search
117 | Quick recap on reinforcement learning
118 | Policy gradients for learning policy functions
119 | Policy gradients in AlphaGo
120 |
121 | 8. Deep Learning for Computer Games
122 | A supervised learning approach to games
123 | Applying genetic algorithms to playing games
124 | Q-Learning
125 | Q-function
126 | Q-learning in action
127 | Dynamic games
128 | Experience replay
129 | Epsilon greedy
130 | Atari Breakout
131 | Atari Breakout random benchmark
132 | Preprocessing the screen
133 | Creating a deep convolutional network
134 | Convergence issues in Q-learning
135 | Policy gradients versus Q-learning
136 | Actor-critic methods
137 | Baseline for variance reduction
138 | Generalized advantage estimator
139 | Asynchronous methods
140 | Model-based approaches
141 |
142 | 9. Anomaly Detection
143 | What is anomaly and outlier detection?
144 | Real-world applications of anomaly detection
145 | Popular shallow machine learning techniques
146 | Data modeling
147 | Detection modeling
148 | Anomaly detection using deep auto-encoders
149 | H2O
150 | Getting started with H2O
151 | Examples
152 | MNIST digit anomaly recognition
153 | Electrocardiogram pulse detection
154 |
155 | 10. Building a Production-Ready Intrusion Detection System
156 | What is a data product?
157 | Training
158 | Weights initialization
159 | Parallel SGD using HOGWILD!
160 | Adaptive learning
161 | Rate annealing
162 | Momentum
163 | Nesterov's acceleration
164 | Newton's method
165 | Adagrad
166 | Adadelta
167 | Distributed learning via Map/Reduce
168 | Sparkling Water
169 | Testing
170 | Model validation
171 | Labeled Data
172 | Unlabeled Data
173 | Summary of validation
174 | Hyper-parameters tuning
175 | End-to-end evaluation
176 | A/B Testing
177 | A summary of testing
178 | Deployment
179 | POJO model export
180 | Anomaly score APIs
181 | A summary of deployment
182 |
--------------------------------------------------------------------------------
/Deep learning using Tensorflow:
--------------------------------------------------------------------------------
1 | Getting Started with Deep Learning
2 | Introducing machine learning
3 | Supervised learning
4 | Unsupervised learning
5 | Reinforcement learning
6 | What is deep learning?
7 | How the human brain works
8 | Deep learning history
9 | Problems addressed
10 | Neural networks
11 | The biological neuron
12 | An artificial neuron
13 | How does an artificial neural network learn?
14 | The backpropagation algorithm
15 | Weights optimization
16 | Stochastic gradient descent
17 | Neural network architectures
18 | Multilayer perceptron
19 | DNNs architectures
20 | Convolutional Neural Networks
21 | Restricted Boltzmann Machines
22 | Autoencoders
23 | Recurrent Neural Networks
24 | Deep learning framework comparisons
25 |
26 | First Look at TensorFlow
27 | General overview
28 | What's new with TensorFlow 1.x?
29 | How does it change the way people use it?
30 | Installing and getting started with TensorFlow
31 | Installing TensorFlow on Linux
32 | Which TensorFlow to install on your platform?
33 | Requirements for running TensorFlow with GPU from NVIDIA
34 | How to install TensorFlow
35 | Installing TensorFlow with native pip
36 | Installing with virtualenv
37 | Installing TensorFlow on Windows
38 | Installation from source
39 | Install on Windows
40 | Test your TensorFlow installation
41 | Computational graphs
42 | Why a computational graph?
43 | Neural networks as computational graphs
44 | The programming model
45 | Data model
46 | Rank
47 | Shape
48 | Data types
49 | Variables
50 | Fetches
51 | Feeds
52 | TensorBoard
53 | How does TensorBoard work?
54 | Implementing a single input neuron
55 | Source code for the single input neuron
56 | Migrating to TensorFlow 1.x
57 | How to upgrade using the script
58 | Limitations
59 | Upgrading code manually
60 | Variables
61 | Functions
62 | Simplified mathematical variants
63 | Miscellaneous changes
64 |
65 | Using TensorFlow on a Feed-Forward Neural Network
66 | Introducing feed-forward neural networks
67 | Feed-forward and backpropagation
68 | Weights and biases
69 | Transfer functions
70 | Classification of handwritten digits
71 | Exploring the MNIST dataset
72 | Softmax classifier
73 | Visualization
74 | How to save and restore a TensorFlow model
75 | Saving a model
76 | Restoring a model
77 | Softmax source code
78 | Softmax loader source code
79 | Implementing a five-layer neural network
80 | Visualization
81 | Five-layer neural network source code
82 | ReLU classifier
83 | Visualization
84 | Source code for the ReLU classifier
85 | Dropout optimization
86 | Visualization
87 | Source code for dropout optimization
88 |
89 | TensorFlow on a Convolutional Neural Network
90 | Introducing CNNs
91 | CNN architecture
92 | A model for CNNs - LeNet
93 | Building your first CNN
94 | Source code for a handwritten classifier
95 | Emotion recognition with CNNs
96 | Source code for emotion classifier
97 | Testing the model on your own image
98 | Source code
99 |
100 | Optimizing TensorFlow Autoencoders
101 | Introducing autoencoders
102 | Implementing an autoencoder
103 | Source code for the autoencoder
104 | Improving autoencoder robustness
105 | Building a denoising autoencoder
106 | Source code for the denoising autoencoder
107 | Convolutional autoencoders
108 | Encoder
109 | Decoder
110 | Source code for convolutional autoencoder
111 |
112 | Recurrent Neural Networks
113 | RNNs basic concepts
114 | RNNs at work
115 | Unfolding an RNN
116 | The vanishing gradient problem
117 | LSTM networks
118 | An image classifier with RNNs
119 | Source code for RNN image classifier
120 | Bidirectional RNNs
121 | Source code for the bidirectional RNN
122 | Text prediction
123 | Dataset
124 | Perplexity
125 | PTB model
126 | Running the example
127 |
128 | GPU Computing
129 | GPGPU computing
130 | GPGPU history
131 | The CUDA architecture
132 | GPU programming model
133 | TensorFlow GPU set up
134 | Update TensorFlow
135 | TensorFlow GPU management
136 | Programming example
137 | Source code for GPU computation
138 | GPU memory management
139 | Assigning a single GPU on a multi-GPU system
140 | Source code for GPU with soft placement
141 | Using multiple GPUs
142 | Source code for multiple GPUs management
143 |
144 | Advanced TensorFlow Programming
145 | Introducing Keras
146 | Installation
147 | Building deep learning models
148 | Sentiment classification of movie reviews
149 | Source code for the Keras movie classifier
150 | Adding a convolutional layer
151 | Source code for movie classifier with convolutional layer
152 | Pretty Tensor
153 | Chaining layers
154 | Normal mode
155 | Sequential mode
156 | Branch and join
157 | Digit classifier
158 | Source code for digit classifier
159 | TFLearn
160 | TFLearn installation
161 | Titanic survival predictor
162 | Source code for titanic classifier
163 |
164 | Advanced Multimedia Programming with TensorFlow
165 | Introduction to multimedia analysis
166 | Deep learning for Scalable Object Detection
167 | Bottlenecks
168 | Using the retrained model
169 | Accelerated Linear Algebra
170 | Key strengths of TensorFlow
171 | Just-in-time compilation via XLA
172 | JIT compilation
173 | Existence and advantages of XLA
174 | Under the hood working of XLA
175 | Still experimental
176 | Supported platforms
177 | More experimental material
178 | TensorFlow and Keras
179 | What is Keras?
180 | Effects of having Keras on board
181 | Video question answering system
182 | Not runnable code!
183 | Deep learning on Android
184 | TensorFlow demo examples
185 | Getting started with Android
186 | Architecture requirements
187 | Prebuilt APK
188 | Running the demo
189 | Building with Android studio
190 | Going deeper - Building with Bazel
191 |
192 | Reinforcement Learning
193 | Basic concepts of Reinforcement Learning
194 | Q-learning algorithm
195 | Introducing the OpenAI Gym framework
196 | FrozenLake-v0 implementation problem
197 | Source code for the FrozenLake-v0 problem
198 | Q-learning with TensorFlow
199 | Source code for the Q-learning neural network
200 |
--------------------------------------------------------------------------------
/Exercises.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 2,
6 | "metadata": {},
7 | "outputs": [
8 | {
9 | "data": {
10 | "text/plain": [
11 | "5"
12 | ]
13 | },
14 | "execution_count": 2,
15 | "metadata": {},
16 | "output_type": "execute_result"
17 | }
18 | ],
19 | "source": [
20 | "txt='Python is powerful and easy.'\n",
21 | "len(txt.split())"
22 | ]
23 | },
24 | {
25 | "cell_type": "code",
26 | "execution_count": 3,
27 | "metadata": {
28 | "collapsed": true
29 | },
30 | "outputs": [],
31 | "source": [
32 | "data = 'hello this is a nice world, welcome here'"
33 | ]
34 | },
35 | {
36 | "cell_type": "code",
37 | "execution_count": 7,
38 | "metadata": {},
39 | "outputs": [],
40 | "source": [
41 | "vowels = data.count('a') + data.count('e') + data.count('i') + data.count('o') + data.count('u')"
42 | ]
43 | },
44 | {
45 | "cell_type": "code",
46 | "execution_count": 8,
47 | "metadata": {},
48 | "outputs": [
49 | {
50 | "data": {
51 | "text/plain": [
52 | "20"
53 | ]
54 | },
55 | "execution_count": 8,
56 | "metadata": {},
57 | "output_type": "execute_result"
58 | }
59 | ],
60 | "source": [
61 | "len(data) - data.count(' ') - vowels"
62 | ]
63 | },
64 | {
65 | "cell_type": "code",
66 | "execution_count": 85,
67 | "metadata": {
68 | "collapsed": true
69 | },
70 | "outputs": [],
71 | "source": [
72 | "st = 'abccba'"
73 | ]
74 | },
75 | {
76 | "cell_type": "code",
77 | "execution_count": 86,
78 | "metadata": {
79 | "collapsed": true
80 | },
81 | "outputs": [],
82 | "source": [
83 | "import math"
84 | ]
85 | },
86 | {
87 | "cell_type": "code",
88 | "execution_count": 87,
89 | "metadata": {
90 | "collapsed": true
91 | },
92 | "outputs": [],
93 | "source": [
94 | "central = math.floor(len(st)/2)"
95 | ]
96 | },
97 | {
98 | "cell_type": "code",
99 | "execution_count": 88,
100 | "metadata": {},
101 | "outputs": [],
102 | "source": [
103 | "central\n",
104 | "offset = 0\n",
105 | "if len(st)%2 != 0:\n",
106 | " offset = 1"
107 | ]
108 | },
109 | {
110 | "cell_type": "code",
111 | "execution_count": 89,
112 | "metadata": {},
113 | "outputs": [
114 | {
115 | "data": {
116 | "text/plain": [
117 | "True"
118 | ]
119 | },
120 | "execution_count": 89,
121 | "metadata": {},
122 | "output_type": "execute_result"
123 | }
124 | ],
125 | "source": [
126 | "st[:central+offset] == st[central:][::-1]"
127 | ]
128 | },
129 | {
130 | "cell_type": "code",
131 | "execution_count": 90,
132 | "metadata": {},
133 | "outputs": [
134 | {
135 | "data": {
136 | "text/plain": [
137 | "'abc'"
138 | ]
139 | },
140 | "execution_count": 90,
141 | "metadata": {},
142 | "output_type": "execute_result"
143 | }
144 | ],
145 | "source": [
146 | "st[central:][::-1]"
147 | ]
148 | },
149 | {
150 | "cell_type": "code",
151 | "execution_count": 91,
152 | "metadata": {},
153 | "outputs": [
154 | {
155 | "data": {
156 | "text/plain": [
157 | "'abc'"
158 | ]
159 | },
160 | "execution_count": 91,
161 | "metadata": {},
162 | "output_type": "execute_result"
163 | }
164 | ],
165 | "source": [
166 | "st[:central+offset]"
167 | ]
168 | },
169 | {
170 | "cell_type": "code",
171 | "execution_count": 72,
172 | "metadata": {},
173 | "outputs": [
174 | {
175 | "data": {
176 | "text/plain": [
177 | "0"
178 | ]
179 | },
180 | "execution_count": 72,
181 | "metadata": {},
182 | "output_type": "execute_result"
183 | }
184 | ],
185 | "source": [
186 | "offset"
187 | ]
188 | },
189 | {
190 | "cell_type": "code",
191 | "execution_count": 78,
192 | "metadata": {},
193 | "outputs": [
194 | {
195 | "data": {
196 | "text/plain": [
197 | "2"
198 | ]
199 | },
200 | "execution_count": 78,
201 | "metadata": {},
202 | "output_type": "execute_result"
203 | }
204 | ],
205 | "source": [
206 | "central"
207 | ]
208 | },
209 | {
210 | "cell_type": "code",
211 | "execution_count": 92,
212 | "metadata": {
213 | "collapsed": true
214 | },
215 | "outputs": [],
216 | "source": [
217 | "s = 'Www.HackerRank.com'"
218 | ]
219 | },
220 | {
221 | "cell_type": "code",
222 | "execution_count": 93,
223 | "metadata": {},
224 | "outputs": [
225 | {
226 | "data": {
227 | "text/plain": [
228 | "'wWW.hACKERrANK.COM'"
229 | ]
230 | },
231 | "execution_count": 93,
232 | "metadata": {},
233 | "output_type": "execute_result"
234 | }
235 | ],
236 | "source": [
237 | "s.swapcase()"
238 | ]
239 | },
240 | {
241 | "cell_type": "code",
242 | "execution_count": null,
243 | "metadata": {
244 | "collapsed": true
245 | },
246 | "outputs": [],
247 | "source": []
248 | }
249 | ],
250 | "metadata": {
251 | "kernelspec": {
252 | "display_name": "Python 3",
253 | "language": "python",
254 | "name": "python3"
255 | },
256 | "language_info": {
257 | "codemirror_mode": {
258 | "name": "ipython",
259 | "version": 3
260 | },
261 | "file_extension": ".py",
262 | "mimetype": "text/x-python",
263 | "name": "python",
264 | "nbconvert_exporter": "python",
265 | "pygments_lexer": "ipython3",
266 | "version": "3.5.0"
267 | }
268 | },
269 | "nbformat": 4,
270 | "nbformat_minor": 2
271 | }
272 |
--------------------------------------------------------------------------------
/Java-flipkart:
--------------------------------------------------------------------------------
1 | Programming Java
2 | Getting Started
3 | Base Types
4 | Classes and Objects
5 | Creating and Using Objects
6 | Defining a Class
7 | Strings, Wrappers, Arrays, and Enum Types
8 | Expressions
9 | Literals
10 | Operators
11 | Type Conversions
12 | Control Flow
13 | The If and Switch Statements
14 | Loops
15 | Explicit Control-Flow Statements
16 | Simple Input and Output
17 | An Example Program
18 | Packages and Imports
19 | Software Development
20 | Design
21 | Pseudocode
22 | Coding
23 | Documentation and Style
24 | Testing and Debugging
25 | Exercises
26 |
27 | Object-Oriented Design
28 | Goals, Principles, and Patterns
29 | Object-Oriented Design Goals
30 | Object-Oriented Design Principles
31 | Design Patterns
32 | Inheritance
33 | Extending the CreditCard Class
34 | Polymorphism and Dynamic Dispatch
35 | Inheritance Hierarchies
36 | Interfaces and Abstract Classes
37 | Interfaces in Java
38 | Multiple Inheritance for Interfaces
39 | Abstract Classes
40 | Exceptions
41 | Catching Exceptions
42 | Throwing Exceptions
43 | Java’s Exception Hierarchy
44 | Casting and Generics
45 | Casting
46 | Generics
47 | Nested Classes
48 | Exercises
49 | Design Problem: Classes for CLI based Multiuser shopping E-Kart
50 |
51 | Fundamental Data Structures
52 | Using Arrays
53 | Storing Game Entries in an Array
54 | Sorting an Array
55 | javautil Methods for Arrays and Random Numbers
56 | Simple Cryptography with Character Arrays
57 | Two-Dimensional Arrays and Positional Games
58 | Singly Linked Lists
59 | Implementing a Singly Linked List Class
60 | Circularly Linked Lists
61 | Round-Robin Scheduling
62 | Designing and Implementing a Circularly Linked List
63 | Doubly Linked Lists
64 | Implementing a Doubly Linked List Class
65 | Equivalence Testing
66 | Equivalence Testing with Arrays
67 | Equivalence Testing with Linked Lists
68 | Cloning Data Structures
69 | Cloning Arrays
70 | Cloning Linked Lists
71 | Exercises
72 | Design Problem: Storing items in E-Kart in an Array. Keeping storage functionality modular
73 | for further enhancements.
74 |
75 | Algorithm Analysis
76 | Experimental Studies
77 | Moving Beyond Experimental Analysis
78 | The Seven Functions Used in This Book
79 | Comparing Growth Rates
80 | Asymptotic Analysis
81 | The “Big-Oh” Notation
82 | Comparative Analysis
83 | Examples of Algorithm Analysis
84 | Simple Justification Techniques
85 | By Example
86 | The “Contra” Attack
87 | Induction and Loop Invariants
88 | Exercises
89 | Design Problem: Understaing current complexity of Shopping E-Kart
90 |
91 | Recursion - Fundamental of designing next level of data structures
92 | Illustrative Examples
93 | The Factorial Function
94 | Drawing an English Ruler
95 | Binary Search
96 | File Systems
97 | Analyzing Recursive Algorithms
98 | Further Examples of Recursion
99 | Linear Recursion
100 | Binary Recursion
101 | Multiple Recursion
102 | Designing Recursive Algorithms
103 | Recursion Run Amok
104 | Maximum Recursive Depth in Java
105 | Eliminating Tail Recursion
106 | Exercises
107 |
108 | Stacks, Queues, and Deques
109 | Stacks
110 | The Stack Abstract Data Type
111 | A Simple Array-Based Stack Implementation
112 | Implementing a Stack with a Singly Linked List
113 | Reversing an Array Using a Stack
114 | Matching Parentheses and HTML Tags
115 | Queues
116 | The Queue Abstract Data Type
117 | Array-Based Queue Implementation
118 | Implementing a Queue with a Singly Linked List
119 | A Circular Queue
120 | Double-Ended Queues
121 | The Deque Abstract Data Type
122 | Implementing a Deque
123 | Deques in the Java Collections Framework
124 | Exercises
125 | Design Problem: Maintaining priority queue of unavailable items. Sending notification to user when
126 | item is available to users based on priority
127 |
128 |
129 | List and Iterator ADTs
130 | The List ADT
131 | Array Lists
132 | Dynamic Arrays
133 | Implementing a Dynamic Array
134 | Amortized Analysis of Dynamic Arrays
135 | Java’s StringBuilder class
136 | Positional Lists
137 | Positions
138 | The Positional List Abstract Data Type
139 | Doubly Linked List Implementation
140 | Iterators
141 | The Iterable Interface and Java’s For-Each Loop
142 | Implementing Iterators
143 | The Java Collections Framework
144 | List Iterators in Java
145 | Comparison to Our Positional List ADT
146 | List-Based Algorithms in the Java Collections Framework
147 | Sorting a Positional List
148 | Case Study: Maintaining Access Frequencies
149 | Using a Sorted List
150 | Using a List with the Move-to-Front Heuristic
151 | Exercises
152 | Design Problem: Changing storage functionality to list data structures & realizing benefits over array
153 |
154 | Trees
155 | General Trees
156 | Tree Definitions and Properties
157 | The Tree Abstract Data Type
158 | Computing Depth and Height
159 | Binary Trees
160 | The Binary Tree Abstract Data Type
161 | Properties of Binary Trees
162 | Implementing Trees
163 | Linked Structure for Binary Trees
164 | Array-Based Representation of a Binary Tree
165 | Linked Structure for General Trees
166 | Tree Traversal Algorithms
167 | Preorder and Postorder Traversals of General Trees
168 | Breadth-First Tree Traversal
169 | Inorder Traversal of a Binary Tree
170 | Implementing Tree Traversals in Java
171 | Applications of Tree Traversals
172 | Euler Tours
173 | Exercises
174 | Design Problem: Display all items in alphabetical order, increasing prices, exepected time of delivary
175 |
176 | Priority Queues
177 | The Priority Queue Abstract Data Type
178 | Priorities
179 | The Priority Queue ADT
180 | Implementing a Priority Queue
181 | The Entry Composite
182 | Comparing Keys with Total Orders
183 | The AbstractPriorityQueue Base Class
184 | Implementing a Priority Queue with an Unsorted List
185 | Implementing a Priority Queue with a Sorted List
186 | Heaps
187 | The Heap Data Structure
188 | Implementing a Priority Queue with a Heap
189 | Analysis of a Heap-Based Priority Queue
190 | Bottom-Up Heap Construction ⋆
191 | Using the javautilPriorityQueue Class
192 | Sorting with a Priority Queue
193 | Selection-Sort and Insertion-Sort
194 | Heap-Sort
195 | Adaptable Priority Queues
196 | Location-Aware Entries
197 | Implementing an Adaptable Priority Queue
198 | Exercises
199 |
200 | Maps, Hash Tables, and Skip Lists
201 | Maps
202 | The Map ADT
203 | Application: Counting Word Frequencies
204 | An AbstractMap Base Class
205 | A Simple Unsorted Map Implementation
206 | Hash Tables
207 | Hash Functions
208 | Collision-Handling Schemes
209 | Load Factors, Rehashing, and Efficiency
210 | Java Hash Table Implementation
211 | Sorted Maps
212 | Sorted Search Tables
213 | Two Applications of Sorted Maps
214 | Skip Lists
215 | Search and Update Operations in a Skip List
216 | Probabilistic Analysis of Skip Lists ⋆
217 | Sets, Multisets, and Multimaps
218 | The Set ADT
219 | The Multiset ADT
220 | The Multimap ADT
221 | Exercises
222 | Design Problems: Improvising current solution with Maps & check the results.
223 |
224 | Search Trees
225 | Binary Search Trees
226 | Searching Within a Binary Search Tree
227 | Insertions and Deletions
228 | Java Implementation
229 | Performance of a Binary Search Tree
230 | Balanced Search Trees
231 | Java Framework for Balancing Search Trees
232 | AVL Trees
233 | Update Operations
234 | Java Implementation
235 | Splay Trees
236 | Splaying
237 | When to Splay
238 | Java Implementation
239 | Amortized Analysis of Splaying ⋆
240 | (,) Trees
241 | Multiway Search Trees
242 | (,)-Tree Operations
243 | Red-Black Trees
244 | Red-Black Tree Operations
245 | Java Implementation
246 | Exercises
247 |
248 | Sorting and Selection
249 | Merge-Sort
250 | Divide-and-Conquer
251 | Array-Based Implementation of Merge-Sort
252 | The Running Time of Merge-Sort
253 | Merge-Sort and Recurrence Equations ⋆
254 | Alternative Implementations of Merge-Sort
255 | Quick-Sort
256 | Randomized Quick-Sort
257 | Additional Optimizations for Quick-Sort
258 | Studying Sorting through an Algorithmic Lens
259 | Lower Bound for Sorting
260 | Linear-Time Sorting: Bucket-Sort and Radix-Sort
261 | Comparing Sorting Algorithms
262 | Selection
263 | Prune-and-Search
264 | Randomized Quick-Select
265 | Analyzing Randomized Quick-Select
266 | Exercises
267 |
268 | Text Processing
269 | Abundance of Digitized Text
270 | Notations for Character Strings
271 | Pattern-Matching Algorithms
272 | Brute Force
273 | The Boyer-Moore Algorithm
274 | The Knuth-Morris-Pratt Algorithm
275 | Tries
276 | Standard Tries
277 | Compressed Tries
278 | Suffix Tries
279 | Search Engine Indexing
280 | Text Compression and the Greedy Method
281 | The Huffman Coding Algorithm
282 | The Greedy Method
283 | Dynamic Programming
284 | Matrix Chain-Product
285 | DNA and Text Sequence Alignment
286 | Exercises
287 |
288 | Graph Algorithms
289 | Graphs
290 | The Graph ADT
291 | Data Structures for Graphs
292 | Edge List Structure
293 | Adjacency List Structure
294 | Adjacency Map Structure
295 | Adjacency Matrix Structure
296 | Java Implementation
297 | Graph Traversals
298 | Depth-First Search
299 | DFS Implementation and Extensions
300 | Breadth-First Search
301 | Transitive Closure
302 | Directed Acyclic Graphs
303 | Topological Ordering
304 | Shortest Paths
305 | Weighted Graphs
306 | Dijkstra’s Algorithm
307 | Minimum Spanning Trees
308 | Prim-Jarn´ık Algorithm
309 | Kruskal’s Algorithm
310 | Disjoint Partitions and Union-Find Structures
311 | Exercises
312 |
313 | Memory Management and B-Trees
314 | Memory Management
315 | Stacks in the Java Virtual Machine
316 | Allocating Space in the Memory Heap
317 | Garbage Collection
318 | Memory Hierarchies and Caching
319 | Memory Systems
320 | Caching Strategies
321 | External Searching and B-Trees
322 | (a,b) Trees
323 | B-Trees
324 | External-Memory Sorting
325 | Multiway Merging
326 | Exercises
--------------------------------------------------------------------------------
/NumPy.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {
7 | "collapsed": true
8 | },
9 | "outputs": [],
10 | "source": []
11 | }
12 | ],
13 | "metadata": {
14 | "kernelspec": {
15 | "display_name": "Python 3",
16 | "language": "python",
17 | "name": "python3"
18 | },
19 | "language_info": {
20 | "codemirror_mode": {
21 | "name": "ipython",
22 | "version": 3
23 | },
24 | "file_extension": ".py",
25 | "mimetype": "text/x-python",
26 | "name": "python",
27 | "nbconvert_exporter": "python",
28 | "pygments_lexer": "ipython3",
29 | "version": "3.5.0"
30 | }
31 | },
32 | "nbformat": 4,
33 | "nbformat_minor": 2
34 | }
35 |
--------------------------------------------------------------------------------
/Python - Strings.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {
7 | "collapsed": true
8 | },
9 | "outputs": [],
10 | "source": [
11 | "st = 'Hello World, here I come'"
12 | ]
13 | },
14 | {
15 | "cell_type": "code",
16 | "execution_count": 4,
17 | "metadata": {},
18 | "outputs": [
19 | {
20 | "data": {
21 | "text/plain": [
22 | "1"
23 | ]
24 | },
25 | "execution_count": 4,
26 | "metadata": {},
27 | "output_type": "execute_result"
28 | }
29 | ],
30 | "source": [
31 | "st.count('re')"
32 | ]
33 | },
34 | {
35 | "cell_type": "code",
36 | "execution_count": 5,
37 | "metadata": {},
38 | "outputs": [
39 | {
40 | "data": {
41 | "text/plain": [
42 | "True"
43 | ]
44 | },
45 | "execution_count": 5,
46 | "metadata": {},
47 | "output_type": "execute_result"
48 | }
49 | ],
50 | "source": [
51 | "st.endswith('me')"
52 | ]
53 | },
54 | {
55 | "cell_type": "code",
56 | "execution_count": 6,
57 | "metadata": {},
58 | "outputs": [
59 | {
60 | "data": {
61 | "text/plain": [
62 | "11"
63 | ]
64 | },
65 | "execution_count": 6,
66 | "metadata": {},
67 | "output_type": "execute_result"
68 | }
69 | ],
70 | "source": [
71 | "st.find(',')"
72 | ]
73 | },
74 | {
75 | "cell_type": "code",
76 | "execution_count": 7,
77 | "metadata": {},
78 | "outputs": [
79 | {
80 | "data": {
81 | "text/plain": [
82 | "13"
83 | ]
84 | },
85 | "execution_count": 7,
86 | "metadata": {},
87 | "output_type": "execute_result"
88 | }
89 | ],
90 | "source": [
91 | "st.find('here')"
92 | ]
93 | },
94 | {
95 | "cell_type": "code",
96 | "execution_count": 9,
97 | "metadata": {},
98 | "outputs": [
99 | {
100 | "data": {
101 | "text/plain": [
102 | "'Congrats Sam, for your hike of 0.5%'"
103 | ]
104 | },
105 | "execution_count": 9,
106 | "metadata": {},
107 | "output_type": "execute_result"
108 | }
109 | ],
110 | "source": [
111 | "'Congrats {name}, for your hike of {percent}%'.format(name='Sam',percent=.5)"
112 | ]
113 | },
114 | {
115 | "cell_type": "code",
116 | "execution_count": 10,
117 | "metadata": {},
118 | "outputs": [
119 | {
120 | "data": {
121 | "text/plain": [
122 | "'Congrats abc, for your hike of 0.5%'"
123 | ]
124 | },
125 | "execution_count": 10,
126 | "metadata": {},
127 | "output_type": "execute_result"
128 | }
129 | ],
130 | "source": [
131 | "'Congrats {name}, for your hike of {percent}%'.format(percent=.5,name='abc')"
132 | ]
133 | },
134 | {
135 | "cell_type": "code",
136 | "execution_count": 11,
137 | "metadata": {},
138 | "outputs": [
139 | {
140 | "data": {
141 | "text/plain": [
142 | "0"
143 | ]
144 | },
145 | "execution_count": 11,
146 | "metadata": {},
147 | "output_type": "execute_result"
148 | }
149 | ],
150 | "source": [
151 | "st.index('Hello')"
152 | ]
153 | },
154 | {
155 | "cell_type": "code",
156 | "execution_count": 12,
157 | "metadata": {},
158 | "outputs": [
159 | {
160 | "data": {
161 | "text/plain": [
162 | "'Hello World, here I come'"
163 | ]
164 | },
165 | "execution_count": 12,
166 | "metadata": {},
167 | "output_type": "execute_result"
168 | }
169 | ],
170 | "source": [
171 | "st"
172 | ]
173 | },
174 | {
175 | "cell_type": "code",
176 | "execution_count": 13,
177 | "metadata": {},
178 | "outputs": [
179 | {
180 | "data": {
181 | "text/plain": [
182 | "-1"
183 | ]
184 | },
185 | "execution_count": 13,
186 | "metadata": {},
187 | "output_type": "execute_result"
188 | }
189 | ],
190 | "source": [
191 | "st.find('Hesdllo')"
192 | ]
193 | },
194 | {
195 | "cell_type": "code",
196 | "execution_count": 14,
197 | "metadata": {},
198 | "outputs": [
199 | {
200 | "ename": "ValueError",
201 | "evalue": "substring not found",
202 | "output_type": "error",
203 | "traceback": [
204 | "\u001b[1;31m-----------------------------------------------------------------------\u001b[0m",
205 | "\u001b[1;31mValueError\u001b[0m Traceback (most recent call last)",
206 | "\u001b[1;32m\u001b[0m in \u001b[0;36m\u001b[1;34m()\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mst\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mindex\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'Hedllo'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
207 | "\u001b[1;31mValueError\u001b[0m: substring not found"
208 | ]
209 | }
210 | ],
211 | "source": [
212 | "st.index('Hedllo')"
213 | ]
214 | },
215 | {
216 | "cell_type": "code",
217 | "execution_count": 15,
218 | "metadata": {},
219 | "outputs": [
220 | {
221 | "data": {
222 | "text/plain": [
223 | "False"
224 | ]
225 | },
226 | "execution_count": 15,
227 | "metadata": {},
228 | "output_type": "execute_result"
229 | }
230 | ],
231 | "source": [
232 | "st.isalnum()"
233 | ]
234 | },
235 | {
236 | "cell_type": "code",
237 | "execution_count": 16,
238 | "metadata": {
239 | "collapsed": true
240 | },
241 | "outputs": [],
242 | "source": [
243 | "s = 'abc'"
244 | ]
245 | },
246 | {
247 | "cell_type": "code",
248 | "execution_count": 17,
249 | "metadata": {},
250 | "outputs": [
251 | {
252 | "data": {
253 | "text/plain": [
254 | "True"
255 | ]
256 | },
257 | "execution_count": 17,
258 | "metadata": {},
259 | "output_type": "execute_result"
260 | }
261 | ],
262 | "source": [
263 | "s.isalnum()"
264 | ]
265 | },
266 | {
267 | "cell_type": "code",
268 | "execution_count": 18,
269 | "metadata": {},
270 | "outputs": [
271 | {
272 | "data": {
273 | "text/plain": [
274 | "False"
275 | ]
276 | },
277 | "execution_count": 18,
278 | "metadata": {},
279 | "output_type": "execute_result"
280 | }
281 | ],
282 | "source": [
283 | "s.isnumeric()"
284 | ]
285 | },
286 | {
287 | "cell_type": "code",
288 | "execution_count": 19,
289 | "metadata": {
290 | "collapsed": true
291 | },
292 | "outputs": [],
293 | "source": [
294 | "d = '1234'"
295 | ]
296 | },
297 | {
298 | "cell_type": "code",
299 | "execution_count": 20,
300 | "metadata": {},
301 | "outputs": [
302 | {
303 | "data": {
304 | "text/plain": [
305 | "True"
306 | ]
307 | },
308 | "execution_count": 20,
309 | "metadata": {},
310 | "output_type": "execute_result"
311 | }
312 | ],
313 | "source": [
314 | "d.isnumeric()"
315 | ]
316 | },
317 | {
318 | "cell_type": "code",
319 | "execution_count": 21,
320 | "metadata": {
321 | "collapsed": true
322 | },
323 | "outputs": [],
324 | "source": [
325 | "a = ' this is good '"
326 | ]
327 | },
328 | {
329 | "cell_type": "code",
330 | "execution_count": 22,
331 | "metadata": {},
332 | "outputs": [
333 | {
334 | "data": {
335 | "text/plain": [
336 | "'this is good'"
337 | ]
338 | },
339 | "execution_count": 22,
340 | "metadata": {},
341 | "output_type": "execute_result"
342 | }
343 | ],
344 | "source": [
345 | "a.strip()"
346 | ]
347 | },
348 | {
349 | "cell_type": "code",
350 | "execution_count": 24,
351 | "metadata": {},
352 | "outputs": [
353 | {
354 | "data": {
355 | "text/plain": [
356 | "'thisisgood'"
357 | ]
358 | },
359 | "execution_count": 24,
360 | "metadata": {},
361 | "output_type": "execute_result"
362 | }
363 | ],
364 | "source": [
365 | "a.replace(' ','')"
366 | ]
367 | },
368 | {
369 | "cell_type": "code",
370 | "execution_count": 25,
371 | "metadata": {},
372 | "outputs": [
373 | {
374 | "data": {
375 | "text/plain": [
376 | "' THIS IS GOOD '"
377 | ]
378 | },
379 | "execution_count": 25,
380 | "metadata": {},
381 | "output_type": "execute_result"
382 | }
383 | ],
384 | "source": [
385 | "a.upper()"
386 | ]
387 | },
388 | {
389 | "cell_type": "code",
390 | "execution_count": 26,
391 | "metadata": {},
392 | "outputs": [
393 | {
394 | "data": {
395 | "text/plain": [
396 | "['this', 'is', 'good']"
397 | ]
398 | },
399 | "execution_count": 26,
400 | "metadata": {},
401 | "output_type": "execute_result"
402 | }
403 | ],
404 | "source": [
405 | "a.split()"
406 | ]
407 | },
408 | {
409 | "cell_type": "code",
410 | "execution_count": 27,
411 | "metadata": {
412 | "collapsed": true
413 | },
414 | "outputs": [],
415 | "source": [
416 | "a = 'hello how are you'"
417 | ]
418 | },
419 | {
420 | "cell_type": "code",
421 | "execution_count": 29,
422 | "metadata": {},
423 | "outputs": [
424 | {
425 | "data": {
426 | "text/plain": [
427 | "'helloyou'"
428 | ]
429 | },
430 | "execution_count": 29,
431 | "metadata": {},
432 | "output_type": "execute_result"
433 | }
434 | ],
435 | "source": [
436 | "a.split()[0] + a.split()[-1]"
437 | ]
438 | },
439 | {
440 | "cell_type": "code",
441 | "execution_count": 31,
442 | "metadata": {},
443 | "outputs": [],
444 | "source": [
445 | "d,e,f = a.partition(' ')"
446 | ]
447 | },
448 | {
449 | "cell_type": "code",
450 | "execution_count": 32,
451 | "metadata": {},
452 | "outputs": [
453 | {
454 | "data": {
455 | "text/plain": [
456 | "'hello'"
457 | ]
458 | },
459 | "execution_count": 32,
460 | "metadata": {},
461 | "output_type": "execute_result"
462 | }
463 | ],
464 | "source": [
465 | "d"
466 | ]
467 | },
468 | {
469 | "cell_type": "code",
470 | "execution_count": 33,
471 | "metadata": {},
472 | "outputs": [
473 | {
474 | "data": {
475 | "text/plain": [
476 | "' '"
477 | ]
478 | },
479 | "execution_count": 33,
480 | "metadata": {},
481 | "output_type": "execute_result"
482 | }
483 | ],
484 | "source": [
485 | "e"
486 | ]
487 | },
488 | {
489 | "cell_type": "code",
490 | "execution_count": 34,
491 | "metadata": {},
492 | "outputs": [
493 | {
494 | "data": {
495 | "text/plain": [
496 | "'how are you'"
497 | ]
498 | },
499 | "execution_count": 34,
500 | "metadata": {},
501 | "output_type": "execute_result"
502 | }
503 | ],
504 | "source": [
505 | "f"
506 | ]
507 | },
508 | {
509 | "cell_type": "code",
510 | "execution_count": 35,
511 | "metadata": {},
512 | "outputs": [
513 | {
514 | "name": "stdout",
515 | "output_type": "stream",
516 | "text": [
517 | "Help on built-in function translate:\n",
518 | "\n",
519 | "translate(...) method of builtins.str instance\n",
520 | " S.translate(table) -> str\n",
521 | " \n",
522 | " Return a copy of the string S in which each character has been mapped\n",
523 | " through the given translation table. The table must implement\n",
524 | " lookup/indexing via __getitem__, for instance a dictionary or list,\n",
525 | " mapping Unicode ordinals to Unicode ordinals, strings, or None. If\n",
526 | " this operation raises LookupError, the character is left untouched.\n",
527 | " Characters mapped to None are deleted.\n",
528 | "\n"
529 | ]
530 | }
531 | ],
532 | "source": [
533 | "help(f.translate)"
534 | ]
535 | },
536 | {
537 | "cell_type": "code",
538 | "execution_count": 36,
539 | "metadata": {},
540 | "outputs": [
541 | {
542 | "name": "stdout",
543 | "output_type": "stream",
544 | "text": [
545 | "Help on built-in function maketrans:\n",
546 | "\n",
547 | "maketrans(x, y=None, z=None, /)\n",
548 | " Return a translation table usable for str.translate().\n",
549 | " \n",
550 | " If there is only one argument, it must be a dictionary mapping Unicode\n",
551 | " ordinals (integers) or characters to Unicode ordinals, strings or None.\n",
552 | " Character keys will be then converted to ordinals.\n",
553 | " If there are two arguments, they must be strings of equal length, and\n",
554 | " in the resulting dictionary, each character in x will be mapped to the\n",
555 | " character at the same position in y. If there is a third argument, it\n",
556 | " must be a string, whose characters will be mapped to None in the result.\n",
557 | "\n"
558 | ]
559 | }
560 | ],
561 | "source": [
562 | "help(s.maketrans)"
563 | ]
564 | },
565 | {
566 | "cell_type": "code",
567 | "execution_count": 45,
568 | "metadata": {},
569 | "outputs": [],
570 | "source": [
571 | "#from string import maketrans\n",
572 | "t = s.maketrans('abc ','1239')"
573 | ]
574 | },
575 | {
576 | "cell_type": "code",
577 | "execution_count": 42,
578 | "metadata": {},
579 | "outputs": [
580 | {
581 | "data": {
582 | "text/plain": [
583 | "'how91re9you'"
584 | ]
585 | },
586 | "execution_count": 42,
587 | "metadata": {},
588 | "output_type": "execute_result"
589 | }
590 | ],
591 | "source": [
592 | "f.translate(t)"
593 | ]
594 | },
595 | {
596 | "cell_type": "code",
597 | "execution_count": 43,
598 | "metadata": {},
599 | "outputs": [
600 | {
601 | "data": {
602 | "text/plain": [
603 | "'abc'"
604 | ]
605 | },
606 | "execution_count": 43,
607 | "metadata": {},
608 | "output_type": "execute_result"
609 | }
610 | ],
611 | "source": [
612 | "s"
613 | ]
614 | },
615 | {
616 | "cell_type": "code",
617 | "execution_count": 46,
618 | "metadata": {},
619 | "outputs": [
620 | {
621 | "data": {
622 | "text/plain": [
623 | "'how are you'"
624 | ]
625 | },
626 | "execution_count": 46,
627 | "metadata": {},
628 | "output_type": "execute_result"
629 | }
630 | ],
631 | "source": [
632 | "f"
633 | ]
634 | },
635 | {
636 | "cell_type": "code",
637 | "execution_count": 47,
638 | "metadata": {},
639 | "outputs": [
640 | {
641 | "data": {
642 | "text/plain": [
643 | "True"
644 | ]
645 | },
646 | "execution_count": 47,
647 | "metadata": {},
648 | "output_type": "execute_result"
649 | }
650 | ],
651 | "source": [
652 | "s.startswith('a')"
653 | ]
654 | },
655 | {
656 | "cell_type": "code",
657 | "execution_count": 48,
658 | "metadata": {},
659 | "outputs": [
660 | {
661 | "data": {
662 | "text/plain": [
663 | "'abc'"
664 | ]
665 | },
666 | "execution_count": 48,
667 | "metadata": {},
668 | "output_type": "execute_result"
669 | }
670 | ],
671 | "source": [
672 | "s"
673 | ]
674 | },
675 | {
676 | "cell_type": "code",
677 | "execution_count": 49,
678 | "metadata": {
679 | "collapsed": true
680 | },
681 | "outputs": [],
682 | "source": [
683 | "s = ['hello','how','are','you']"
684 | ]
685 | },
686 | {
687 | "cell_type": "code",
688 | "execution_count": 51,
689 | "metadata": {},
690 | "outputs": [
691 | {
692 | "data": {
693 | "text/plain": [
694 | "'hello*how*are*you'"
695 | ]
696 | },
697 | "execution_count": 51,
698 | "metadata": {},
699 | "output_type": "execute_result"
700 | }
701 | ],
702 | "source": [
703 | "'*'.join(s)"
704 | ]
705 | },
706 | {
707 | "cell_type": "code",
708 | "execution_count": 53,
709 | "metadata": {},
710 | "outputs": [
711 | {
712 | "name": "stdout",
713 | "output_type": "stream",
714 | "text": [
715 | "Help on built-in function zfill:\n",
716 | "\n",
717 | "zfill(...) method of builtins.str instance\n",
718 | " S.zfill(width) -> str\n",
719 | " \n",
720 | " Pad a numeric string S with zeros on the left, to fill a field\n",
721 | " of the specified width. The string S is never truncated.\n",
722 | "\n"
723 | ]
724 | }
725 | ],
726 | "source": [
727 | "help(f.zfill)"
728 | ]
729 | },
730 | {
731 | "cell_type": "code",
732 | "execution_count": 55,
733 | "metadata": {},
734 | "outputs": [
735 | {
736 | "data": {
737 | "text/plain": [
738 | "'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000how are you'"
739 | ]
740 | },
741 | "execution_count": 55,
742 | "metadata": {},
743 | "output_type": "execute_result"
744 | }
745 | ],
746 | "source": [
747 | "f.zfill(100)"
748 | ]
749 | },
750 | {
751 | "cell_type": "code",
752 | "execution_count": null,
753 | "metadata": {
754 | "collapsed": true
755 | },
756 | "outputs": [],
757 | "source": []
758 | }
759 | ],
760 | "metadata": {
761 | "kernelspec": {
762 | "display_name": "Python 3",
763 | "language": "python",
764 | "name": "python3"
765 | },
766 | "language_info": {
767 | "codemirror_mode": {
768 | "name": "ipython",
769 | "version": 3
770 | },
771 | "file_extension": ".py",
772 | "mimetype": "text/x-python",
773 | "name": "python",
774 | "nbconvert_exporter": "python",
775 | "pygments_lexer": "ipython3",
776 | "version": "3.5.0"
777 | }
778 | },
779 | "nbformat": 4,
780 | "nbformat_minor": 2
781 | }
782 |
--------------------------------------------------------------------------------
/Python Programming 1.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [
8 | {
9 | "name": "stdout",
10 | "output_type": "stream",
11 | "text": [
12 | "Hello World\n"
13 | ]
14 | }
15 | ],
16 | "source": [
17 | "print ('Hello World')"
18 | ]
19 | },
20 | {
21 | "cell_type": "code",
22 | "execution_count": 2,
23 | "metadata": {
24 | "collapsed": true
25 | },
26 | "outputs": [],
27 | "source": [
28 | "a = 10\n",
29 | "b = 10"
30 | ]
31 | },
32 | {
33 | "cell_type": "code",
34 | "execution_count": 3,
35 | "metadata": {},
36 | "outputs": [
37 | {
38 | "data": {
39 | "text/plain": [
40 | "True"
41 | ]
42 | },
43 | "execution_count": 3,
44 | "metadata": {},
45 | "output_type": "execute_result"
46 | }
47 | ],
48 | "source": [
49 | "a is b"
50 | ]
51 | },
52 | {
53 | "cell_type": "code",
54 | "execution_count": 4,
55 | "metadata": {},
56 | "outputs": [
57 | {
58 | "data": {
59 | "text/plain": [
60 | "1707204656"
61 | ]
62 | },
63 | "execution_count": 4,
64 | "metadata": {},
65 | "output_type": "execute_result"
66 | }
67 | ],
68 | "source": [
69 | "id(a)"
70 | ]
71 | },
72 | {
73 | "cell_type": "code",
74 | "execution_count": 5,
75 | "metadata": {},
76 | "outputs": [
77 | {
78 | "data": {
79 | "text/plain": [
80 | "1707204656"
81 | ]
82 | },
83 | "execution_count": 5,
84 | "metadata": {},
85 | "output_type": "execute_result"
86 | }
87 | ],
88 | "source": [
89 | "id(b)"
90 | ]
91 | },
92 | {
93 | "cell_type": "code",
94 | "execution_count": 6,
95 | "metadata": {
96 | "collapsed": true
97 | },
98 | "outputs": [],
99 | "source": [
100 | "a = 11"
101 | ]
102 | },
103 | {
104 | "cell_type": "code",
105 | "execution_count": 7,
106 | "metadata": {},
107 | "outputs": [
108 | {
109 | "data": {
110 | "text/plain": [
111 | "10"
112 | ]
113 | },
114 | "execution_count": 7,
115 | "metadata": {},
116 | "output_type": "execute_result"
117 | }
118 | ],
119 | "source": [
120 | "b"
121 | ]
122 | },
123 | {
124 | "cell_type": "code",
125 | "execution_count": 8,
126 | "metadata": {
127 | "collapsed": true
128 | },
129 | "outputs": [],
130 | "source": [
131 | "c = 300\n",
132 | "d = 400"
133 | ]
134 | },
135 | {
136 | "cell_type": "code",
137 | "execution_count": 9,
138 | "metadata": {},
139 | "outputs": [
140 | {
141 | "data": {
142 | "text/plain": [
143 | "False"
144 | ]
145 | },
146 | "execution_count": 9,
147 | "metadata": {},
148 | "output_type": "execute_result"
149 | }
150 | ],
151 | "source": [
152 | "c is d"
153 | ]
154 | },
155 | {
156 | "cell_type": "code",
157 | "execution_count": 10,
158 | "metadata": {},
159 | "outputs": [
160 | {
161 | "data": {
162 | "text/plain": [
163 | "int"
164 | ]
165 | },
166 | "execution_count": 10,
167 | "metadata": {},
168 | "output_type": "execute_result"
169 | }
170 | ],
171 | "source": [
172 | "type(a)"
173 | ]
174 | },
175 | {
176 | "cell_type": "code",
177 | "execution_count": 11,
178 | "metadata": {
179 | "collapsed": true
180 | },
181 | "outputs": [],
182 | "source": [
183 | "# Python is dynamically typed programming language\n",
184 | "# Type of the variable is assigned based on the value assigned to it"
185 | ]
186 | },
187 | {
188 | "cell_type": "code",
189 | "execution_count": 12,
190 | "metadata": {
191 | "collapsed": true
192 | },
193 | "outputs": [],
194 | "source": [
195 | "a = 7\n",
196 | "a = 'Hello World'"
197 | ]
198 | },
199 | {
200 | "cell_type": "code",
201 | "execution_count": 13,
202 | "metadata": {
203 | "collapsed": true
204 | },
205 | "outputs": [],
206 | "source": [
207 | "import math"
208 | ]
209 | },
210 | {
211 | "cell_type": "code",
212 | "execution_count": 14,
213 | "metadata": {},
214 | "outputs": [
215 | {
216 | "data": {
217 | "text/plain": [
218 | "True"
219 | ]
220 | },
221 | "execution_count": 14,
222 | "metadata": {},
223 | "output_type": "execute_result"
224 | }
225 | ],
226 | "source": [
227 | "'a' in 'abcde'"
228 | ]
229 | },
230 | {
231 | "cell_type": "code",
232 | "execution_count": 15,
233 | "metadata": {
234 | "collapsed": true
235 | },
236 | "outputs": [],
237 | "source": [
238 | "s = 'Hello World'"
239 | ]
240 | },
241 | {
242 | "cell_type": "code",
243 | "execution_count": 16,
244 | "metadata": {},
245 | "outputs": [
246 | {
247 | "data": {
248 | "text/plain": [
249 | "'el'"
250 | ]
251 | },
252 | "execution_count": 16,
253 | "metadata": {},
254 | "output_type": "execute_result"
255 | }
256 | ],
257 | "source": [
258 | "s[1:3]"
259 | ]
260 | },
261 | {
262 | "cell_type": "code",
263 | "execution_count": 17,
264 | "metadata": {},
265 | "outputs": [
266 | {
267 | "data": {
268 | "text/plain": [
269 | "'dlroW olleH'"
270 | ]
271 | },
272 | "execution_count": 17,
273 | "metadata": {},
274 | "output_type": "execute_result"
275 | }
276 | ],
277 | "source": [
278 | "s[::-1]"
279 | ]
280 | },
281 | {
282 | "cell_type": "code",
283 | "execution_count": 18,
284 | "metadata": {},
285 | "outputs": [
286 | {
287 | "data": {
288 | "text/plain": [
289 | "'drWolH'"
290 | ]
291 | },
292 | "execution_count": 18,
293 | "metadata": {},
294 | "output_type": "execute_result"
295 | }
296 | ],
297 | "source": [
298 | "s[::-2]"
299 | ]
300 | },
301 | {
302 | "cell_type": "code",
303 | "execution_count": 19,
304 | "metadata": {},
305 | "outputs": [
306 | {
307 | "data": {
308 | "text/plain": [
309 | "'Hello world'"
310 | ]
311 | },
312 | "execution_count": 19,
313 | "metadata": {},
314 | "output_type": "execute_result"
315 | }
316 | ],
317 | "source": [
318 | "s.capitalize()"
319 | ]
320 | },
321 | {
322 | "cell_type": "code",
323 | "execution_count": 20,
324 | "metadata": {},
325 | "outputs": [
326 | {
327 | "data": {
328 | "text/plain": [
329 | "'Hello World'"
330 | ]
331 | },
332 | "execution_count": 20,
333 | "metadata": {},
334 | "output_type": "execute_result"
335 | }
336 | ],
337 | "source": [
338 | "s"
339 | ]
340 | },
341 | {
342 | "cell_type": "code",
343 | "execution_count": 21,
344 | "metadata": {
345 | "collapsed": true
346 | },
347 | "outputs": [],
348 | "source": [
349 | "# string is immutable"
350 | ]
351 | },
352 | {
353 | "cell_type": "code",
354 | "execution_count": 22,
355 | "metadata": {
356 | "collapsed": true
357 | },
358 | "outputs": [],
359 | "source": [
360 | "a = 'Hello'\n",
361 | "b = 'Hello'"
362 | ]
363 | },
364 | {
365 | "cell_type": "code",
366 | "execution_count": 23,
367 | "metadata": {},
368 | "outputs": [
369 | {
370 | "data": {
371 | "text/plain": [
372 | "2557628659504"
373 | ]
374 | },
375 | "execution_count": 23,
376 | "metadata": {},
377 | "output_type": "execute_result"
378 | }
379 | ],
380 | "source": [
381 | "id(a)"
382 | ]
383 | },
384 | {
385 | "cell_type": "code",
386 | "execution_count": 24,
387 | "metadata": {},
388 | "outputs": [
389 | {
390 | "data": {
391 | "text/plain": [
392 | "2557628659504"
393 | ]
394 | },
395 | "execution_count": 24,
396 | "metadata": {},
397 | "output_type": "execute_result"
398 | }
399 | ],
400 | "source": [
401 | "id(b)"
402 | ]
403 | },
404 | {
405 | "cell_type": "code",
406 | "execution_count": 26,
407 | "metadata": {},
408 | "outputs": [
409 | {
410 | "data": {
411 | "text/plain": [
412 | "1"
413 | ]
414 | },
415 | "execution_count": 26,
416 | "metadata": {},
417 | "output_type": "execute_result"
418 | }
419 | ],
420 | "source": [
421 | "s.count('el')"
422 | ]
423 | },
424 | {
425 | "cell_type": "code",
426 | "execution_count": 27,
427 | "metadata": {},
428 | "outputs": [
429 | {
430 | "name": "stdout",
431 | "output_type": "stream",
432 | "text": [
433 | "Help on built-in function expandtabs:\n",
434 | "\n",
435 | "expandtabs(...) method of builtins.str instance\n",
436 | " S.expandtabs(tabsize=8) -> str\n",
437 | " \n",
438 | " Return a copy of S where all tab characters are expanded using spaces.\n",
439 | " If tabsize is not given, a tab size of 8 characters is assumed.\n",
440 | "\n"
441 | ]
442 | }
443 | ],
444 | "source": [
445 | "help(s.expandtabs)"
446 | ]
447 | },
448 | {
449 | "cell_type": "code",
450 | "execution_count": 31,
451 | "metadata": {},
452 | "outputs": [
453 | {
454 | "data": {
455 | "text/plain": [
456 | "'hello World'"
457 | ]
458 | },
459 | "execution_count": 31,
460 | "metadata": {},
461 | "output_type": "execute_result"
462 | }
463 | ],
464 | "source": [
465 | "s = 'hello\\tWorld'\n",
466 | "s.expandtabs(17)"
467 | ]
468 | },
469 | {
470 | "cell_type": "code",
471 | "execution_count": 32,
472 | "metadata": {
473 | "collapsed": true
474 | },
475 | "outputs": [],
476 | "source": [
477 | "s = 'hey how are you hey aa'"
478 | ]
479 | },
480 | {
481 | "cell_type": "code",
482 | "execution_count": 33,
483 | "metadata": {},
484 | "outputs": [
485 | {
486 | "data": {
487 | "text/plain": [
488 | "0"
489 | ]
490 | },
491 | "execution_count": 33,
492 | "metadata": {},
493 | "output_type": "execute_result"
494 | }
495 | ],
496 | "source": [
497 | "s.find('hey')"
498 | ]
499 | },
500 | {
501 | "cell_type": "code",
502 | "execution_count": 34,
503 | "metadata": {},
504 | "outputs": [
505 | {
506 | "data": {
507 | "text/plain": [
508 | "16"
509 | ]
510 | },
511 | "execution_count": 34,
512 | "metadata": {},
513 | "output_type": "execute_result"
514 | }
515 | ],
516 | "source": [
517 | "s.find('hey',1)"
518 | ]
519 | },
520 | {
521 | "cell_type": "code",
522 | "execution_count": 35,
523 | "metadata": {
524 | "collapsed": true
525 | },
526 | "outputs": [],
527 | "source": [
528 | "h = \"Congrats, {title} {name} for your {percent}% hike\""
529 | ]
530 | },
531 | {
532 | "cell_type": "code",
533 | "execution_count": 36,
534 | "metadata": {},
535 | "outputs": [
536 | {
537 | "data": {
538 | "text/plain": [
539 | "'Congrats, Mr Joshi for your 10% hike'"
540 | ]
541 | },
542 | "execution_count": 36,
543 | "metadata": {},
544 | "output_type": "execute_result"
545 | }
546 | ],
547 | "source": [
548 | "h.format(name='Joshi', title='Mr', percent='10')"
549 | ]
550 | },
551 | {
552 | "cell_type": "code",
553 | "execution_count": 37,
554 | "metadata": {
555 | "collapsed": true
556 | },
557 | "outputs": [],
558 | "source": [
559 | "s=\"I don't like this\""
560 | ]
561 | },
562 | {
563 | "cell_type": "code",
564 | "execution_count": 38,
565 | "metadata": {
566 | "collapsed": true
567 | },
568 | "outputs": [],
569 | "source": [
570 | "s='I don\\t like this'"
571 | ]
572 | },
573 | {
574 | "cell_type": "code",
575 | "execution_count": 39,
576 | "metadata": {},
577 | "outputs": [
578 | {
579 | "data": {
580 | "text/plain": [
581 | "'I don\\t like this'"
582 | ]
583 | },
584 | "execution_count": 39,
585 | "metadata": {},
586 | "output_type": "execute_result"
587 | }
588 | ],
589 | "source": [
590 | "s"
591 | ]
592 | },
593 | {
594 | "cell_type": "code",
595 | "execution_count": 42,
596 | "metadata": {
597 | "collapsed": true
598 | },
599 | "outputs": [],
600 | "source": [
601 | "s=\"I don\\n like this\""
602 | ]
603 | },
604 | {
605 | "cell_type": "code",
606 | "execution_count": 43,
607 | "metadata": {},
608 | "outputs": [
609 | {
610 | "data": {
611 | "text/plain": [
612 | "'I don\\n like this'"
613 | ]
614 | },
615 | "execution_count": 43,
616 | "metadata": {},
617 | "output_type": "execute_result"
618 | }
619 | ],
620 | "source": [
621 | "s"
622 | ]
623 | },
624 | {
625 | "cell_type": "code",
626 | "execution_count": 44,
627 | "metadata": {},
628 | "outputs": [
629 | {
630 | "data": {
631 | "text/plain": [
632 | "False"
633 | ]
634 | },
635 | "execution_count": 44,
636 | "metadata": {},
637 | "output_type": "execute_result"
638 | }
639 | ],
640 | "source": [
641 | "s.isupper()"
642 | ]
643 | },
644 | {
645 | "cell_type": "code",
646 | "execution_count": 45,
647 | "metadata": {},
648 | "outputs": [
649 | {
650 | "data": {
651 | "text/plain": [
652 | "'I don\\n like this'"
653 | ]
654 | },
655 | "execution_count": 45,
656 | "metadata": {},
657 | "output_type": "execute_result"
658 | }
659 | ],
660 | "source": [
661 | "s"
662 | ]
663 | },
664 | {
665 | "cell_type": "code",
666 | "execution_count": 46,
667 | "metadata": {
668 | "collapsed": true
669 | },
670 | "outputs": [],
671 | "source": [
672 | "l = ['I','like','Python']"
673 | ]
674 | },
675 | {
676 | "cell_type": "code",
677 | "execution_count": 48,
678 | "metadata": {},
679 | "outputs": [
680 | {
681 | "data": {
682 | "text/plain": [
683 | "'I like Python'"
684 | ]
685 | },
686 | "execution_count": 48,
687 | "metadata": {},
688 | "output_type": "execute_result"
689 | }
690 | ],
691 | "source": [
692 | "' '.join(l)"
693 | ]
694 | },
695 | {
696 | "cell_type": "code",
697 | "execution_count": 49,
698 | "metadata": {
699 | "collapsed": true
700 | },
701 | "outputs": [],
702 | "source": [
703 | "s = 'this is ax nice ax place'"
704 | ]
705 | },
706 | {
707 | "cell_type": "code",
708 | "execution_count": 50,
709 | "metadata": {},
710 | "outputs": [
711 | {
712 | "data": {
713 | "text/plain": [
714 | "('this is ', 'ax', ' nice ax place')"
715 | ]
716 | },
717 | "execution_count": 50,
718 | "metadata": {},
719 | "output_type": "execute_result"
720 | }
721 | ],
722 | "source": [
723 | "s.partition('ax')"
724 | ]
725 | },
726 | {
727 | "cell_type": "code",
728 | "execution_count": 51,
729 | "metadata": {},
730 | "outputs": [
731 | {
732 | "data": {
733 | "text/plain": [
734 | "('this is ax nice ', 'ax', ' place')"
735 | ]
736 | },
737 | "execution_count": 51,
738 | "metadata": {},
739 | "output_type": "execute_result"
740 | }
741 | ],
742 | "source": [
743 | "s.rpartition('ax')"
744 | ]
745 | },
746 | {
747 | "cell_type": "code",
748 | "execution_count": 53,
749 | "metadata": {},
750 | "outputs": [
751 | {
752 | "name": "stdout",
753 | "output_type": "stream",
754 | "text": [
755 | "Help on built-in function split:\n",
756 | "\n",
757 | "split(...) method of builtins.str instance\n",
758 | " S.split(sep=None, maxsplit=-1) -> list of strings\n",
759 | " \n",
760 | " Return a list of the words in S, using sep as the\n",
761 | " delimiter string. If maxsplit is given, at most maxsplit\n",
762 | " splits are done. If sep is not specified or is None, any\n",
763 | " whitespace string is a separator and empty strings are\n",
764 | " removed from the result.\n",
765 | "\n"
766 | ]
767 | }
768 | ],
769 | "source": [
770 | "help(s.split)"
771 | ]
772 | },
773 | {
774 | "cell_type": "code",
775 | "execution_count": null,
776 | "metadata": {
777 | "collapsed": true
778 | },
779 | "outputs": [],
780 | "source": []
781 | }
782 | ],
783 | "metadata": {
784 | "kernelspec": {
785 | "display_name": "Python 3",
786 | "language": "python",
787 | "name": "python3"
788 | },
789 | "language_info": {
790 | "codemirror_mode": {
791 | "name": "ipython",
792 | "version": 3
793 | },
794 | "file_extension": ".py",
795 | "mimetype": "text/x-python",
796 | "name": "python",
797 | "nbconvert_exporter": "python",
798 | "pygments_lexer": "ipython3",
799 | "version": "3.5.0"
800 | }
801 | },
802 | "nbformat": 4,
803 | "nbformat_minor": 2
804 | }
805 |
--------------------------------------------------------------------------------
/TensorFlow 1.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {
7 | "collapsed": true
8 | },
9 | "outputs": [],
10 | "source": [
11 | "import tensorflow as tf"
12 | ]
13 | },
14 | {
15 | "cell_type": "markdown",
16 | "metadata": {},
17 | "source": [
18 | "* The central unit of data in TensorFlow is the tensor. \n",
19 | "* A tensor consists of a set of primitive values shaped into an array of any number of dimensions. \n",
20 | "* A tensor's rank is its number of dimensions. \n",
21 | "* [\n",
22 | " [\n",
23 | " [1., 2., 3.]\n",
24 | " ], \n",
25 | " [\n",
26 | " [7., 8., 9.]\n",
27 | " ]\n",
28 | " ] # rank 3, shape [2,1,3]"
29 | ]
30 | },
31 | {
32 | "cell_type": "markdown",
33 | "metadata": {},
34 | "source": [
35 | "## Computational Graph \n",
36 | "* A series of TensorFlow operations aranged into a graph of nodes.\n",
37 | "* Building computational graph\n",
38 | "* Running computational graph"
39 | ]
40 | },
41 | {
42 | "cell_type": "code",
43 | "execution_count": 2,
44 | "metadata": {
45 | "collapsed": true
46 | },
47 | "outputs": [],
48 | "source": [
49 | "n1 = tf.constant(10.0, tf.float32)"
50 | ]
51 | },
52 | {
53 | "cell_type": "code",
54 | "execution_count": 3,
55 | "metadata": {
56 | "collapsed": true
57 | },
58 | "outputs": [],
59 | "source": [
60 | "n2 = tf.constant(5.0)"
61 | ]
62 | },
63 | {
64 | "cell_type": "code",
65 | "execution_count": 4,
66 | "metadata": {},
67 | "outputs": [
68 | {
69 | "data": {
70 | "text/plain": [
71 | ""
72 | ]
73 | },
74 | "execution_count": 4,
75 | "metadata": {},
76 | "output_type": "execute_result"
77 | }
78 | ],
79 | "source": [
80 | "n1"
81 | ]
82 | },
83 | {
84 | "cell_type": "code",
85 | "execution_count": 5,
86 | "metadata": {
87 | "collapsed": true
88 | },
89 | "outputs": [],
90 | "source": [
91 | "sess = tf.Session()"
92 | ]
93 | },
94 | {
95 | "cell_type": "code",
96 | "execution_count": 6,
97 | "metadata": {},
98 | "outputs": [
99 | {
100 | "data": {
101 | "text/plain": [
102 | "[10.0, 5.0]"
103 | ]
104 | },
105 | "execution_count": 6,
106 | "metadata": {},
107 | "output_type": "execute_result"
108 | }
109 | ],
110 | "source": [
111 | "sess.run([n1,n2])"
112 | ]
113 | },
114 | {
115 | "cell_type": "code",
116 | "execution_count": null,
117 | "metadata": {
118 | "collapsed": true
119 | },
120 | "outputs": [],
121 | "source": []
122 | }
123 | ],
124 | "metadata": {
125 | "kernelspec": {
126 | "display_name": "Python 3",
127 | "language": "python",
128 | "name": "python3"
129 | },
130 | "language_info": {
131 | "codemirror_mode": {
132 | "name": "ipython",
133 | "version": 3
134 | },
135 | "file_extension": ".py",
136 | "mimetype": "text/x-python",
137 | "name": "python",
138 | "nbconvert_exporter": "python",
139 | "pygments_lexer": "ipython3",
140 | "version": "3.5.0"
141 | }
142 | },
143 | "nbformat": 4,
144 | "nbformat_minor": 2
145 | }
146 |
--------------------------------------------------------------------------------
/Untitled.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {
7 | "collapsed": true
8 | },
9 | "outputs": [],
10 | "source": []
11 | }
12 | ],
13 | "metadata": {
14 | "kernelspec": {
15 | "display_name": "Python 3",
16 | "language": "python",
17 | "name": "python3"
18 | },
19 | "language_info": {
20 | "codemirror_mode": {
21 | "name": "ipython",
22 | "version": 3
23 | },
24 | "file_extension": ".py",
25 | "mimetype": "text/x-python",
26 | "name": "python",
27 | "nbconvert_exporter": "python",
28 | "pygments_lexer": "ipython3",
29 | "version": "3.5.0"
30 | }
31 | },
32 | "nbformat": 4,
33 | "nbformat_minor": 2
34 | }
35 |
--------------------------------------------------------------------------------
/Untitled1.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 2,
6 | "metadata": {},
7 | "outputs": [
8 | {
9 | "name": "stdout",
10 | "output_type": "stream",
11 | "text": [
12 | "Hello World\n"
13 | ]
14 | }
15 | ],
16 | "source": [
17 | "print ('Hello World')"
18 | ]
19 | },
20 | {
21 | "cell_type": "code",
22 | "execution_count": 3,
23 | "metadata": {},
24 | "outputs": [
25 | {
26 | "data": {
27 | "text/plain": [
28 | "720196897275374032179360524346874191331262942291396047477297362604734300805947453614944491805199115327107862066"
29 | ]
30 | },
31 | "execution_count": 3,
32 | "metadata": {},
33 | "output_type": "execute_result"
34 | }
35 | ],
36 | "source": [
37 | "87498273498274982734982734982374982749827498274928749287498274 * 8230984092384092834092384092384092384092384982409"
38 | ]
39 | },
40 | {
41 | "cell_type": "code",
42 | "execution_count": 4,
43 | "metadata": {
44 | "collapsed": true
45 | },
46 | "outputs": [],
47 | "source": [
48 | "i = 9283928392839283\n",
49 | "j = 983492839283928"
50 | ]
51 | },
52 | {
53 | "cell_type": "code",
54 | "execution_count": 5,
55 | "metadata": {},
56 | "outputs": [
57 | {
58 | "data": {
59 | "text/plain": [
60 | "int"
61 | ]
62 | },
63 | "execution_count": 5,
64 | "metadata": {},
65 | "output_type": "execute_result"
66 | }
67 | ],
68 | "source": [
69 | "type(i)"
70 | ]
71 | },
72 | {
73 | "cell_type": "code",
74 | "execution_count": 6,
75 | "metadata": {},
76 | "outputs": [
77 | {
78 | "data": {
79 | "text/plain": [
80 | "int"
81 | ]
82 | },
83 | "execution_count": 6,
84 | "metadata": {},
85 | "output_type": "execute_result"
86 | }
87 | ],
88 | "source": [
89 | "type(j)"
90 | ]
91 | },
92 | {
93 | "cell_type": "code",
94 | "execution_count": 7,
95 | "metadata": {
96 | "collapsed": true
97 | },
98 | "outputs": [],
99 | "source": [
100 | "i = 'skjdksjdks'"
101 | ]
102 | },
103 | {
104 | "cell_type": "code",
105 | "execution_count": 8,
106 | "metadata": {},
107 | "outputs": [
108 | {
109 | "data": {
110 | "text/plain": [
111 | "'skjdksjdks'"
112 | ]
113 | },
114 | "execution_count": 8,
115 | "metadata": {},
116 | "output_type": "execute_result"
117 | }
118 | ],
119 | "source": [
120 | "i"
121 | ]
122 | },
123 | {
124 | "cell_type": "code",
125 | "execution_count": 9,
126 | "metadata": {},
127 | "outputs": [
128 | {
129 | "data": {
130 | "text/plain": [
131 | "[0, 2, 4, 6, 8, 10, 12, 14, 16, 18]"
132 | ]
133 | },
134 | "execution_count": 9,
135 | "metadata": {},
136 | "output_type": "execute_result"
137 | }
138 | ],
139 | "source": [
140 | "[ 2*i for i in range(10)]"
141 | ]
142 | },
143 | {
144 | "cell_type": "code",
145 | "execution_count": 10,
146 | "metadata": {
147 | "collapsed": true
148 | },
149 | "outputs": [],
150 | "source": [
151 | "l = [('z',22323), ('a',1212), ('c',87382)]"
152 | ]
153 | },
154 | {
155 | "cell_type": "code",
156 | "execution_count": 11,
157 | "metadata": {},
158 | "outputs": [
159 | {
160 | "data": {
161 | "text/plain": [
162 | "[('a', 1212), ('z', 22323), ('c', 87382)]"
163 | ]
164 | },
165 | "execution_count": 11,
166 | "metadata": {},
167 | "output_type": "execute_result"
168 | }
169 | ],
170 | "source": [
171 | "sorted(l, key=lambda d: d[1])"
172 | ]
173 | },
174 | {
175 | "cell_type": "code",
176 | "execution_count": 17,
177 | "metadata": {},
178 | "outputs": [
179 | {
180 | "name": "stdout",
181 | "output_type": "stream",
182 | "text": [
183 | "Invalid\n"
184 | ]
185 | }
186 | ],
187 | "source": [
188 | "note = 500\n",
189 | "state = ''\n",
190 | "\n",
191 | "if note == 1000:\n",
192 | " print ('Invalid note')\n",
193 | "elif note == 500 and state=='new':\n",
194 | " print ('Valid')\n",
195 | "elif note == 500 and state!='new':\n",
196 | " print ('Invalid')\n",
197 | "else:\n",
198 | " print ('valid')"
199 | ]
200 | },
201 | {
202 | "cell_type": "code",
203 | "execution_count": 18,
204 | "metadata": {
205 | "collapsed": true
206 | },
207 | "outputs": [],
208 | "source": [
209 | "a = 10\n",
210 | "b = 5"
211 | ]
212 | },
213 | {
214 | "cell_type": "code",
215 | "execution_count": 21,
216 | "metadata": {},
217 | "outputs": [
218 | {
219 | "name": "stdout",
220 | "output_type": "stream",
221 | "text": [
222 | "5\n"
223 | ]
224 | }
225 | ],
226 | "source": [
227 | "if a > b:\n",
228 | " print (a-b)\n",
229 | "else:\n",
230 | " print (b-a)"
231 | ]
232 | },
233 | {
234 | "cell_type": "code",
235 | "execution_count": 22,
236 | "metadata": {
237 | "collapsed": true
238 | },
239 | "outputs": [],
240 | "source": [
241 | "s = 'hello how are you'"
242 | ]
243 | },
244 | {
245 | "cell_type": "code",
246 | "execution_count": 28,
247 | "metadata": {},
248 | "outputs": [
249 | {
250 | "name": "stdout",
251 | "output_type": "stream",
252 | "text": [
253 | "e\n",
254 | "o\n",
255 | "o\n",
256 | "a\n",
257 | "e\n",
258 | "o\n",
259 | "u\n"
260 | ]
261 | }
262 | ],
263 | "source": [
264 | "i = 0\n",
265 | "while i < len(s):\n",
266 | " if 'a' in s[i] or 'e' in s[i] or 'i' in s[i] or 'o' in s[i] or 'u' in s[i]:\n",
267 | " print (s[i])\n",
268 | " i = i + 1"
269 | ]
270 | },
271 | {
272 | "cell_type": "code",
273 | "execution_count": 29,
274 | "metadata": {},
275 | "outputs": [
276 | {
277 | "name": "stdout",
278 | "output_type": "stream",
279 | "text": [
280 | "hellllooo\n"
281 | ]
282 | }
283 | ],
284 | "source": [
285 | "print ('hellllooo')"
286 | ]
287 | },
288 | {
289 | "cell_type": "code",
290 | "execution_count": 30,
291 | "metadata": {},
292 | "outputs": [
293 | {
294 | "data": {
295 | "text/plain": [
296 | "238266012413048825122132443121433957892288456684832674859747591001842579975552"
297 | ]
298 | },
299 | "execution_count": 30,
300 | "metadata": {},
301 | "output_type": "execute_result"
302 | }
303 | ],
304 | "source": [
305 | "289342893472893479827498273489273498724897984798 * 823472833748274892749827349824"
306 | ]
307 | },
308 | {
309 | "cell_type": "code",
310 | "execution_count": 31,
311 | "metadata": {
312 | "collapsed": true
313 | },
314 | "outputs": [],
315 | "source": [
316 | "l = 8327498247982374928472348729847"
317 | ]
318 | },
319 | {
320 | "cell_type": "code",
321 | "execution_count": 32,
322 | "metadata": {
323 | "collapsed": true
324 | },
325 | "outputs": [],
326 | "source": [
327 | "l = 'hdhfkfdj'"
328 | ]
329 | },
330 | {
331 | "cell_type": "code",
332 | "execution_count": null,
333 | "metadata": {
334 | "collapsed": true
335 | },
336 | "outputs": [],
337 | "source": []
338 | }
339 | ],
340 | "metadata": {
341 | "kernelspec": {
342 | "display_name": "Python 3",
343 | "language": "python",
344 | "name": "python3"
345 | },
346 | "language_info": {
347 | "codemirror_mode": {
348 | "name": "ipython",
349 | "version": 3
350 | },
351 | "file_extension": ".py",
352 | "mimetype": "text/x-python",
353 | "name": "python",
354 | "nbconvert_exporter": "python",
355 | "pygments_lexer": "ipython3",
356 | "version": "3.5.0"
357 | }
358 | },
359 | "nbformat": 4,
360 | "nbformat_minor": 2
361 | }
362 |
--------------------------------------------------------------------------------
/XML-Parsing.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "from bs4 import BeautifulSoup"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": 86,
15 | "metadata": {},
16 | "outputs": [],
17 | "source": [
18 | "infile = open('/home/awantik/workspace/xml_parsing/books.xml')"
19 | ]
20 | },
21 | {
22 | "cell_type": "code",
23 | "execution_count": 87,
24 | "metadata": {},
25 | "outputs": [],
26 | "source": [
27 | "contents = infile.read()"
28 | ]
29 | },
30 | {
31 | "cell_type": "code",
32 | "execution_count": 88,
33 | "metadata": {},
34 | "outputs": [],
35 | "source": [
36 | "soup = BeautifulSoup(contents, 'xml')"
37 | ]
38 | },
39 | {
40 | "cell_type": "code",
41 | "execution_count": 50,
42 | "metadata": {},
43 | "outputs": [],
44 | "source": [
45 | "titles = soup.find_all('title')"
46 | ]
47 | },
48 | {
49 | "cell_type": "code",
50 | "execution_count": 51,
51 | "metadata": {},
52 | "outputs": [
53 | {
54 | "data": {
55 | "text/plain": [
56 | "[The Cat in the Hat,\n",
57 | " Ender's Game,\n",
58 | " Prey]"
59 | ]
60 | },
61 | "execution_count": 51,
62 | "metadata": {},
63 | "output_type": "execute_result"
64 | }
65 | ],
66 | "source": [
67 | "titles"
68 | ]
69 | },
70 | {
71 | "cell_type": "code",
72 | "execution_count": 52,
73 | "metadata": {},
74 | "outputs": [],
75 | "source": [
76 | "authors = soup.find_all('author')"
77 | ]
78 | },
79 | {
80 | "cell_type": "code",
81 | "execution_count": 53,
82 | "metadata": {},
83 | "outputs": [
84 | {
85 | "data": {
86 | "text/plain": [
87 | "[Dr. Seuss,\n",
88 | " Orson Scott Card,\n",
89 | " Michael Crichton]"
90 | ]
91 | },
92 | "execution_count": 53,
93 | "metadata": {},
94 | "output_type": "execute_result"
95 | }
96 | ],
97 | "source": [
98 | "authors"
99 | ]
100 | },
101 | {
102 | "cell_type": "code",
103 | "execution_count": 54,
104 | "metadata": {},
105 | "outputs": [],
106 | "source": [
107 | "prices = soup.find_all('price')"
108 | ]
109 | },
110 | {
111 | "cell_type": "code",
112 | "execution_count": 55,
113 | "metadata": {},
114 | "outputs": [
115 | {
116 | "data": {
117 | "text/plain": [
118 | "[7.99, 8.99, 9.35]"
119 | ]
120 | },
121 | "execution_count": 55,
122 | "metadata": {},
123 | "output_type": "execute_result"
124 | }
125 | ],
126 | "source": [
127 | "prices"
128 | ]
129 | },
130 | {
131 | "cell_type": "code",
132 | "execution_count": 56,
133 | "metadata": {},
134 | "outputs": [],
135 | "source": [
136 | "import pandas as pd\n",
137 | "df = pd.DataFrame(columns=['Name','Author','Price'])\n",
138 | "\n",
139 | "for i in range(len(titles)):\n",
140 | " d = pd.DataFrame({'Name':[titles[i].get_text()] ,'Author':[authors[i].get_text()], 'Price':[prices[i].get_text()]})\n",
141 | " df = df.append(d, ignore_index=True)"
142 | ]
143 | },
144 | {
145 | "cell_type": "code",
146 | "execution_count": 57,
147 | "metadata": {},
148 | "outputs": [
149 | {
150 | "data": {
151 | "text/html": [
152 | "\n",
153 | "\n",
166 | "
\n",
167 | " \n",
168 | " \n",
169 | " | \n",
170 | " Name | \n",
171 | " Author | \n",
172 | " Price | \n",
173 | "
\n",
174 | " \n",
175 | " \n",
176 | " \n",
177 | " 0 | \n",
178 | " The Cat in the Hat | \n",
179 | " Dr. Seuss | \n",
180 | " 7.99 | \n",
181 | "
\n",
182 | " \n",
183 | " 1 | \n",
184 | " Ender's Game | \n",
185 | " Orson Scott Card | \n",
186 | " 8.99 | \n",
187 | "
\n",
188 | " \n",
189 | " 2 | \n",
190 | " Prey | \n",
191 | " Michael Crichton | \n",
192 | " 9.35 | \n",
193 | "
\n",
194 | " \n",
195 | "
\n",
196 | "
"
197 | ],
198 | "text/plain": [
199 | " Name Author Price\n",
200 | "0 The Cat in the Hat Dr. Seuss 7.99\n",
201 | "1 Ender's Game Orson Scott Card 8.99\n",
202 | "2 Prey Michael Crichton 9.35"
203 | ]
204 | },
205 | "execution_count": 57,
206 | "metadata": {},
207 | "output_type": "execute_result"
208 | }
209 | ],
210 | "source": [
211 | "df"
212 | ]
213 | },
214 | {
215 | "cell_type": "code",
216 | "execution_count": 93,
217 | "metadata": {},
218 | "outputs": [],
219 | "source": [
220 | "books = soup.find_all('book')"
221 | ]
222 | },
223 | {
224 | "cell_type": "code",
225 | "execution_count": 94,
226 | "metadata": {},
227 | "outputs": [
228 | {
229 | "name": "stdout",
230 | "output_type": "stream",
231 | "text": [
232 | "{'pub': 'abc'}\n",
233 | "*\n",
234 | "{'pub': 'def'}\n",
235 | "{'pub': 'jkl'}\n",
236 | "*\n"
237 | ]
238 | }
239 | ],
240 | "source": [
241 | "df = pd.DataFrame(columns=['Name','Author','Price','Discount','Currency'])\n",
242 | "\n",
243 | "for book in books:\n",
244 | " print (book.attrs)\n",
245 | " r = {}\n",
246 | " r['Name'] = [book.find('title').get_text()]\n",
247 | " r['Author'] = [book.find('author').get_text()]\n",
248 | " r['Price'] = [book.find('price').get_text()]\n",
249 | " try:\n",
250 | " r['Discount'] = [book.find('discount').get_text()]\n",
251 | " attrs = book.find('discount').attrs\n",
252 | " if 'currency' in attrs:\n",
253 | " r['Currency'] = attrs['currency']\n",
254 | " print ('*')\n",
255 | " except:\n",
256 | " pass\n",
257 | " \n",
258 | " d = pd.DataFrame(r)\n",
259 | " df = df.append(d, ignore_index=True)"
260 | ]
261 | },
262 | {
263 | "cell_type": "code",
264 | "execution_count": 80,
265 | "metadata": {},
266 | "outputs": [
267 | {
268 | "data": {
269 | "text/html": [
270 | "\n",
271 | "\n",
284 | "
\n",
285 | " \n",
286 | " \n",
287 | " | \n",
288 | " Author | \n",
289 | " Currency | \n",
290 | " Discount | \n",
291 | " Name | \n",
292 | " Price | \n",
293 | "
\n",
294 | " \n",
295 | " \n",
296 | " \n",
297 | " 0 | \n",
298 | " Dr. Seuss | \n",
299 | " eu | \n",
300 | " 10 | \n",
301 | " The Cat in the Hat | \n",
302 | " 7.99 | \n",
303 | "
\n",
304 | " \n",
305 | " 1 | \n",
306 | " Orson Scott Card | \n",
307 | " NaN | \n",
308 | " NaN | \n",
309 | " Ender's Game | \n",
310 | " 8.99 | \n",
311 | "
\n",
312 | " \n",
313 | " 2 | \n",
314 | " Michael Crichton | \n",
315 | " rs | \n",
316 | " 10 | \n",
317 | " Prey | \n",
318 | " 9.35 | \n",
319 | "
\n",
320 | " \n",
321 | "
\n",
322 | "
"
323 | ],
324 | "text/plain": [
325 | " Author Currency Discount Name Price\n",
326 | "0 Dr. Seuss eu 10 The Cat in the Hat 7.99\n",
327 | "1 Orson Scott Card NaN NaN Ender's Game 8.99\n",
328 | "2 Michael Crichton rs 10 Prey 9.35"
329 | ]
330 | },
331 | "execution_count": 80,
332 | "metadata": {},
333 | "output_type": "execute_result"
334 | }
335 | ],
336 | "source": [
337 | "df"
338 | ]
339 | },
340 | {
341 | "cell_type": "code",
342 | "execution_count": 67,
343 | "metadata": {},
344 | "outputs": [
345 | {
346 | "data": {
347 | "text/plain": [
348 | "[10, 10]"
349 | ]
350 | },
351 | "execution_count": 67,
352 | "metadata": {},
353 | "output_type": "execute_result"
354 | }
355 | ],
356 | "source": [
357 | "soup.find_all('discount')"
358 | ]
359 | },
360 | {
361 | "cell_type": "code",
362 | "execution_count": null,
363 | "metadata": {},
364 | "outputs": [],
365 | "source": []
366 | }
367 | ],
368 | "metadata": {
369 | "kernelspec": {
370 | "display_name": "Python 3",
371 | "language": "python",
372 | "name": "python3"
373 | },
374 | "language_info": {
375 | "codemirror_mode": {
376 | "name": "ipython",
377 | "version": 3
378 | },
379 | "file_extension": ".py",
380 | "mimetype": "text/x-python",
381 | "name": "python",
382 | "nbconvert_exporter": "python",
383 | "pygments_lexer": "ipython3",
384 | "version": "3.7.1"
385 | }
386 | },
387 | "nbformat": 4,
388 | "nbformat_minor": 2
389 | }
390 |
--------------------------------------------------------------------------------
/a.py:
--------------------------------------------------------------------------------
1 | a = 1
2 | b = 2
3 | print (a + b)
--------------------------------------------------------------------------------
/a.txt:
--------------------------------------------------------------------------------
1 | 1 2 3
--------------------------------------------------------------------------------
/advanced_features = [sdsd:
--------------------------------------------------------------------------------
1 | advanced_features = [
2 | 'bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'zipcode',
3 | 'condition',
4 | 'grade',
5 | 'waterfront',
6 | 'view',
7 | 'sqft_above',
8 | 'sqft_basement',
9 | 'yr_built',
10 | 'yr_renovated',
11 | 'lat', 'long',
12 | 'sqft_living15',
13 | 'sqft_lot15',
14 | ]
--------------------------------------------------------------------------------
/asyncio-code.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import time
3 |
4 | async def say_after(delay, what):
5 | await asyncio.sleep(delay)
6 | print(what)
7 |
8 | async def main():
9 | task1 = asyncio.create_task(
10 | say_after(1, 'hello'))
11 |
12 | task2 = asyncio.create_task(
13 | say_after(2, 'world'))
14 |
15 | print(f"started at {time.strftime('%X')}")
16 |
17 | # Wait until both tasks are completed (should take
18 | # around 2 seconds.)
19 | await task1
20 | await task2
21 |
22 | print(f"finished at {time.strftime('%X')}")
23 |
24 |
25 | asyncio.run(main())
26 |
--------------------------------------------------------------------------------
/books.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | The Cat in the Hat
5 | Dr. Seuss
6 | 7.99
7 | 10
8 |
9 |
10 | Ender's Game
11 | Orson Scott Card
12 | 8.99
13 |
14 |
15 | Prey
16 | Michael Crichton
17 | 9.35
18 | 10
19 |
20 |
21 |
--------------------------------------------------------------------------------
/classes-code.py:
--------------------------------------------------------------------------------
1 | #Class is user defined data-type
2 | #Creating an empty class
3 | #Class doesn't occupy memory
4 | '''
5 | class Student:
6 | pass
7 |
8 | def __init__(self):
9 | print 'Calling now'
10 |
11 | #s1 is ab object of empty class
12 | s1 = Student()
13 | s1.name = "awantik"
14 | print s1
15 | s2 = Student()
16 | print s2
17 | s2.age = 99
18 | print s1.__dict__
19 | print s2.__dict__
20 | '''
21 |
22 | class Student:
23 | pass
24 |
25 | def __init__(self,loc,age):
26 | self.loc = loc
27 | self.age = age
28 | print 'Calling now'
29 |
30 | #s1 is ab object of empty class
31 | s1 = Student('Delhi',88)
32 | print s1
33 | s2 = Student('Mumbai',99)
34 | print s2
35 | print s1.__dict__
36 | print s2.__dict__
37 |
38 |
--------------------------------------------------------------------------------
/classes-more-code.py:
--------------------------------------------------------------------------------
1 | # '''
2 | # w = ['abc','def','ghi']
3 | # l = []
4 | # for e in w:
5 | # l.append(e)
6 | # print set(l)
7 | # '''
8 |
9 | # l = [1,2,3]
10 | # m = [4,5,6]
11 | # x = 'abcd'
12 | # l.extend(x)
13 | # for i in l:
14 | # print i,
15 | '''
16 | class MyStuff:
17 |
18 | INFO = 10
19 |
20 | @staticmethod
21 | def staticfunction():
22 | print 'This is static function'
23 |
24 |
25 | MyStuff.staticfunction()
26 | print MyStuff.INFO
27 | '''
28 | '''
29 | class Student:
30 | def __init__(self,name,age):
31 | self.name = name
32 | self.age = age
33 |
34 | def __eq__(self,obj):
35 | if self.name == obj.name and self.age == obj.age:
36 | return True
37 | else:
38 | return False
39 |
40 | def __hash__(self):
41 | return hash(self.name)+hash(self.age)
42 |
43 | def __repr__(self):
44 | return self.name + ' ' + str(self.age)
45 |
46 | s1 = Student('awi',44)
47 | s2 = Student('awi',54)
48 |
49 | print s1 == s2
50 | print cmp(s1,s2)
51 |
52 |
53 | db = {s1:'awi',s2:'bwi'}
54 | print db[s1]
55 |
56 | class Info:
57 | def __init__(self,name):
58 | self.name = name
59 |
60 | def __repr__(self):
61 | return self.name
62 |
63 | def __eq__(self,obj):
64 | if self.name == obj.name:
65 | return True
66 | else:
67 | return False
68 |
69 | def __hash__(self):
70 | return hash(self.name)
71 |
72 | s = set([Info('awi'),Info('awi')])
73 | print s
74 | '''
75 | '''
76 | class Base:
77 | def __init__(self,name,age):
78 | self.__name = name
79 | self._age = age
80 |
81 | def func(self,somename):
82 | self.__name = somename
83 |
84 | b = Base('awantik',44)
85 | b.func('awi')
86 | #print b._Base__name
87 | print b._age
88 | '''
89 | '''
90 | class Base:
91 | def __init__(self,name):
92 | self.name = name
93 | print 'Base'
94 |
95 | def enjoy(self):
96 | print 'Enjoy in base'
97 |
98 |
99 | class Derived(Base):
100 | def __init__(self,name,age):
101 | self.age = age
102 | Base.__init__(self,name)
103 | print 'Derived'
104 |
105 | def enjoy(self):
106 | #Base.enjoy(self)
107 | print 'Enjoy in derived'
108 |
109 | b = Base('awa')
110 | b = Derived('awi',66)
111 | #print d.__dict__
112 | #d.enjoy()
113 | b.enjoy()
114 | '''
115 |
116 | '''
117 | from abc import ABCMeta,abstractmethod
118 |
119 | class MyABC:
120 | __metaclass__ = ABCMeta
121 |
122 | @abstractmethod
123 | def absmethod(self):
124 | pass
125 |
126 | class DerABC(MyABC):
127 | def absmethod(self):
128 | print 'Hello World'
129 |
130 | b = DerABC()
131 | b.absmethod()
132 |
133 | print type(b) == MyABC
134 | print isinstance(b,MyABC)
135 | print isinstance(b,DerABC)
136 | '''
137 | '''
138 | class Base(object):
139 | def __init__(self,name):
140 | self.name = name
141 | print 'Base'
142 |
143 | def enjoy(self):
144 | print 'Enjoy in base'
145 |
146 |
147 | class Derived(Base):
148 | def __init__(self,name,age):
149 | self.age = age
150 | super(Derived,self).__init__(name)
151 | print 'Derived'
152 |
153 | def enjoy(self):
154 | #Base.enjoy(self)
155 | print 'Enjoy in derived'
156 |
157 | #b = Base('awa')
158 | b = Derived(name='awi',age=66)
159 | #print d.__dict__
160 | #d.enjoy()
161 | b.enjoy()
162 | '''
163 |
164 | class Base1:
165 | def __init__(self,name):
166 | self.name = name
167 | print 'Base1'
168 |
169 | def enjoy(self):
170 | print 'Enjoy in base1'
171 |
172 |
173 | class Base2:
174 | def __init__(self,age):
175 | self.age = age
176 | print 'Base2'
177 |
178 | def enjoy(self):
179 | Base1.enjoy(self)
180 | print 'Enjoy in base2'
181 |
182 | class Derived(Base1,Base2):
183 | def __init__(self,name,age):
184 | Base1.__init__(self,name)
185 | Base2.__init__(self,age)
186 | print 'Derived'
187 |
188 | def enjoy(self):
189 | Base2.enjoy(self)
190 | print 'Enjoy in derived'
191 |
192 | #b = Base('awa')
193 | b = Derived('awi',66)
194 | #print d.__dict__
195 | #d.enjoy()
196 | b.enjoy()
--------------------------------------------------------------------------------
/classes.py:
--------------------------------------------------------------------------------
1 | class ClassName:
2 | 'Optional description'
3 | var1 = 0
4 |
5 | def __init__(self, name, id):
6 | self.name = name
7 | self.id= id
8 |
9 | def method1(self):
10 |
11 | print 'this is method 1'
12 |
13 |
14 | obj1 = ClassName("ABC", 2)
15 | obj1.method1()
16 | print ClassName.var1
17 |
18 |
19 | obj1.age = 4
20 | obj1.address= 'street'
21 | del obj1.age
22 |
23 | del ClassName.var1
24 |
25 | print (hasattr(obj1, 'age'))
26 | print getattr(obj1, 'address')
27 | setattr(obj1, 'age', 'xyz')
28 | delattr(obj1, 'age')
29 | print (hasattr(obj1, 'age'))
30 |
31 | print ClassName.__doc__
32 | print ClassName.__name__
33 | print ClassName.__bases__
34 | print ClassName.__dict__
35 | print ClassName.__module__
36 |
37 | class ChildClass (ClassName):
38 | 'Inheriting class ClassName'
39 | var2 = 5
40 | __secretNum = 6
41 | def __init__(self):
42 |
43 | print ' this is child class constructor'
44 |
45 | def method2(self, name):
46 | print name
47 |
48 | def method1(self):
49 | print 'this is child class method1'
50 |
51 |
52 |
53 |
54 | c = ChildClass()
55 | c.method2("XYZ")
56 | c.method1()
57 | print c._ChildClass__secretNum
58 |
59 |
60 |
61 | class Vehicle :
62 |
63 | def calcSpeed(self) :
64 |
65 | print 'hi'
66 |
67 |
68 | c= Vehicle()
69 | c.calcSpeed()
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
--------------------------------------------------------------------------------
/cloud-tag.py:
--------------------------------------------------------------------------------
1 | from pytagcloud import create_tag_image, make_tags
2 | from pytagcloud.lang.counter import get_tag_counts
3 |
4 | YOUR_TEXT = "A tag cloud is a visual representation for text data, typically\
5 | used to depict keyword metadata on websites, or to visualize free form text."
6 |
7 | tags = make_tags(get_tag_counts(YOUR_TEXT), maxsize=120)
8 |
9 | create_tag_image(tags, 'cloud_large.png', size=(900, 600), fontname='Lobster')
--------------------------------------------------------------------------------
/code:
--------------------------------------------------------------------------------
1 | Let's start by importing the TensorFlow libraries for our implementation:
2 |
3 | import tensorflow as tf
4 | import numpy as np
5 | from tensorflow.examples.tutorials.mnist import input_data
6 |
7 | Set the following parameters, that indicate the number of samples to consider respectively for the training phase (128) and then the test phase (256):
8 |
9 | batch_size = 128
10 | test_size = 256
11 | We define the following parameter, the value is 28 because a MNIST image is 28 pixels in height and width:
12 |
13 | img_size = 28
14 | Regarding the number of classes, the value 10 means that we'll have one class for each of 10 digits:
15 |
16 | num_classes = 10
17 | A placeholder variable, X, is defined for the input images. The data type for this tensor is set to float32 and the shape is set to [None, img_size, img_size, 1], where None means that the tensor may hold an arbitrary number of images:
18 |
19 | X = tf.placeholder("float", [None, img_size, img_size, 1])
20 | Then we set another placeholder variable, Y, for the true labels associated with the images that were input data in the placeholder variable X.
21 |
22 | The shape of this placeholder variable is [None, num_classes] which means it may hold an arbitrary number of labels and each label is a vector of the length num_classes which is 10 in this case:
23 |
24 | Y = tf.placeholder("float", [None, num_classes])
25 | We collect the mnist data which will be copied into the data folder:
26 |
27 | mnist = mnist_data.read_data_sets("data/")
28 | We build the datasets for training (trX, trY) and testing the network (teX, teY):
29 |
30 | trX, trY, teX, teY = mnist.train.images,\
31 | mnist.train.labels,\
32 | mnist.test.images,\
33 | mnist.test.labels
34 | The trX and teX image sets must be reshaped according the input shape:
35 |
36 | trX = trX.reshape(-1, img_size, img_size, 1)
37 | teX = teX.reshape(-1, img_size, img_size, 1)
38 | We shall now proceed to define the network's weights.
39 |
40 | The init_weights function builds new variables in the given shape and initializes the network's weights with random values:
41 |
42 | def init_weights(shape):
43 | return tf.Variable(tf.random_normal(shape, stddev=0.01))
44 |
45 | Each neuron of the first convolutional layer is convoluted to a small subset of the input tensor, with a dimension of 3x3x1, while the value 32 is just the number of feature maps we are considering for this first layer. The weight w is then defined:
46 |
47 | w = init_weights([3, 3, 1, 32])
48 | The number of inputs is then increased of 32, which means that each neuron of the second convolutional layer is convoluted to 3x3x32 neurons of the first convolution layer. The w2 weight is:
49 |
50 | w2 = init_weights([3, 3, 32, 64])
51 | The value 64 represents the number of obtained output features.
52 |
53 | The third convolutional layer is convoluted to 3x3x64 neurons of the previous layer, while 128 are the resulting features:
54 |
55 | w3 = init_weights([3, 3, 64, 128])
56 | The fourth layer is fully-connected. It receives 128x4x4 inputs, while the output is equal to 625:
57 |
58 | w4 = init_weights([128 * 4 * 4, 625])
59 | The output layer receives 625 inputs, while the output is the number of classes:
60 |
61 | w_o = init_weights([625, num_classes])
62 | Note that these initializations are not actually done at this point; they are merely being defined in the TensorFlow graph:
63 |
64 | p_keep_conv = tf.placeholder("float")
65 | p_keep_hidden = tf.placeholder("float")
66 | It's time to define the network model. As we did for the network's weight definition, it will be a function.
67 |
68 | It receives as input, the X tensor, the weights tensors, and the dropout parameters for convolution and fully-connected layers:
69 |
70 | def model(X, w, w2, w3, w4, w_o, p_keep_conv, p_keep_hidden):
71 | The tf.nn.conv2d() function executes the TensorFlow operation for the convolution. Note that the strides are set to 1 in all dimensions.
72 |
73 | Indeed, the first and last stride must always be 1, because the first is for the image number and the last is for the input channel. The padding parameter is set to 'SAME' which means the input image is padded with zeroes so that the size of the output is the same:
74 |
75 | conv1 = tf.nn.conv2d(X, w,strides=[1, 1, 1, 1],
76 | padding='SAME')
77 | Then we pass the conv1 layer to a relu layer. It calculates the max(x, 0) function for each input pixel x, adding some non-linearity to the formula and allows us to learn more complicated functions:
78 |
79 | conv1 = tf.nn.relu(conv1)
80 | The resulting layer is then pooled by the tf.nn.max_pool operator:
81 |
82 | conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1]
83 | ,strides=[1, 2, 2, 1],
84 | padding='SAME')
85 | It is a 2x2 max-pooling, which means that we are considering 2x2 windows and select the largest value in each window. Then we move two pixels to the next window.
86 |
87 | We try to reduce the overfitting, via the tf.nn.dropout() function, passing the conv1 layer and the p_keep_conv probability value:
88 |
89 | conv1 = tf.nn.dropout(conv1, p_keep_conv)
90 | As you can see, the next two convolutional layers, conv2, conv3, are defined in the same way as conv1:
91 |
92 | conv2 = tf.nn.conv2d(conv1, w2,
93 | strides=[1, 1, 1, 1],
94 | padding='SAME')
95 | conv2 = tf.nn.relu(conv2)
96 | conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1],
97 | strides=[1, 2, 2, 1],
98 | padding='SAME')
99 | conv2 = tf.nn.dropout(conv2, p_keep_conv)
100 |
101 | conv3=tf.nn.conv2d(conv2, w3,
102 | strides=[1, 1, 1, 1]
103 | ,padding='SAME')
104 |
105 | conv3_a = tf.nn.relu(conv3)
106 | Two fully-connected layers are added to the network. The input of the first FC_layer is the convolution layer from the previous convolution:
107 |
108 | FC_layer = tf.nn.max_pool(conv3, ksize=[1, 2, 2, 1],
109 | strides=[1, 2, 2, 1],
110 | padding='SAME')
111 |
112 | FC_layer = tf.reshape(FC_layer, [-1,w4.get_shape().as_list()[0]])
113 | A dropout function is again used to reduce the overfitting:
114 |
115 | FC_layer = tf.nn.dropout(FC_layer, p_keep_conv)
116 | The output layer receives the input as FC_layer and the w4 weight tensor. A relu and a dropout operator are respectively applied:
117 |
118 | output_layer = tf.nn.relu(tf.matmul(FC_layer, w4))
119 | output_layer = tf.nn.dropout(output_layer, p_keep_hidden)
120 | The result variable is a vector of length 10 for determining which one of the 10 classes for the input image belongs to:
121 |
122 | result = tf.matmul(output_layer, w_o)
123 | return result
124 | The cross-entropy is the performance measure we used in this classifier. The cross-entropy is a continuous function that is always positive and is equal to zero, if the predicted output exactly matches the desired output. The goal of this optimization is therefore to minimize the cross-entropy so it gets as close to zero as possible by changing the variables of the network layers.
125 |
126 | TensorFlow has a built-in function for calculating the cross-entropy. Note that the function calculates the softmax internally so we must use the output of py_x directly:
127 |
128 | py_x = model(X, w, w2, w3, w4, w_o, p_keep_conv, p_keep_hidden)
129 | Y_ = tf.nn.softmax_cross_entropy_with_logits(logits=py_x, labels=Y)
130 | Now that we have defined the cross-entropy for each classified image, we have a measure of how well the model performs on each image individually. But using the cross-entropy to guide the optimization of the networks's variables we need a single scalar value, so we simply take the average of the cross-entropy for all the classified images:
131 |
132 | cost = tf.reduce_mean(Y_)
133 | To minimize the evaluated cost, we must define an optimizer. In this case, we adopt the implemented RMSPropOptimizer function which is an advanced form of gradient descent.
134 |
135 | The RMSPropOptimizer function implements the RMSProp algorithm, that is an unpublished, adaptive learning rate method proposed by Geoff Hinton in Lecture 6e of his coursera class.
136 |
137 | You can find Geoff Hinton's course at https://www.coursera.org/learn/neural-networks.
138 | The RMSPropOptimizer function also divides the learning rate by an exponentially decaying average of squared gradients. Hinton suggests setting the decay parameter to 0.9, while a good default value for the learning rate is 0.001:
139 |
140 | optimizer = tf.train.RMSPropOptimizer(0.001, 0.9).minimize(cost)
141 | Basically, the common Stochastic Gradient Descent (SGD) algorithm has a problem in that learning rates must scale with 1/T to get convergence, where T is the iteration number. RMSProp tries to get around this by automatically adjusting the step size so that the step is on the same scale as the gradients, as the average gradient gets smaller, the coefficient in the SGD update gets bigger to compensate.
142 |
143 | An interesting reference about this algorithm can be found here:
144 | http://www.cs.toronto.edu/%7Etijmen/csc321/slides/lecture_slides_lec6.pdf.
145 | Finally, we define predict_op that is the index with the largest value across dimensions from the output of the mode:
146 |
147 | predict_op = tf.argmax(py_x, 1)
148 | Note that optimization is not performed at this point. Nothing is calculated at all; we'll just add the optimizer object to the TensorFlow graph for later execution.
149 |
150 | We now come to define the network's running session: There are 55,000 images in the training set, so it takes a long time to calculate the gradient of the model using all these images. Therefore, we'll use a small batch of images in each iteration of the optimizer. If your computer crashes or becomes very slow because you run out of RAM, then you can try and lower this number, but you may then need to perform more optimization iterations.
151 |
152 | Now we can proceed to implement a TensorFlow session:
153 |
154 | with tf.Session() as sess:
155 | sess.run(tf.global_variables_initializer())
156 | for i in range(100):
157 | We get a batch of training examples, the training_batch tensor now holds a subset of images and corresponding labels:
158 |
159 | training_batch = zip(range(0, len(trX), batch_size),
160 | range(batch_size,
161 | len(trX)+1,
162 | batch_size))
163 | Put the batch into feed_dict with the proper names for placeholder variables in the graph. We run the optimizer using this batch of training data, TensorFlow assigns the variables in a feed to the placeholder variables and then runs the optimizer:
164 |
165 | for start, end in training_batch:
166 | sess.run(optimizer, feed_dict={X: trX[start:end],
167 | Y: trY[start:end],
168 | p_keep_conv: 0.8,
169 | p_keep_hidden: 0.5})
170 | At the same time, we get a shuffled batch of test samples:
171 |
172 | test_indices = np.arange(len(teX))
173 | np.random.shuffle(test_indices)
174 | test_indices = test_indices[0:test_size]
175 | For each iteration, we display the accuracy evaluated on the batch set:
176 |
177 | print(i, np.mean(np.argmax(teY[test_indices], axis=1) ==
178 | sess.run
179 | (predict_op,
180 | feed_dict={X: teX[test_indices],
181 | Y: teY[test_indices],
182 | p_keep_conv: 1.0,
183 | p_keep_hidden: 1.0})))
184 | Training a network can take several hours depending on how much computational resources it uses. The results on my machine are as follows:
185 |
186 | Successfully downloaded train-images-idx3-ubyte.gz 9912422 bytes.
187 | Successfully extracted to train-images-idx3-ubyte.mnist 9912422 bytes.
188 | Loading ata/train-images-idx3-ubyte.mnist
189 | Successfully downloaded train-labels-idx1-ubyte.gz 28881 bytes.
190 | Successfully extracted to train-labels-idx1-ubyte.mnist 28881 bytes.
191 | Loading ata/train-labels-idx1-ubyte.mnist
192 | Successfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes.
193 | Successfully extracted to t10k-images-idx3-ubyte.mnist 1648877 bytes.
194 | Loading ata/t10k-images-idx3-ubyte.mnist
195 | Successfully downloaded t10k-labels-idx1-ubyte.gz 4542 bytes.
196 | Successfully extracted to t10k-labels-idx1-ubyte.mnist 4542 bytes.
197 | Loading ata/t10k-labels-idx1-ubyte.mnist
198 | (0, 0.95703125)
199 | (1, 0.98046875)
200 | (2, 0.9921875)
201 | (3, 0.99609375)
202 | (4, 0.99609375)
203 | (5, 0.98828125)
204 | (6, 0.99609375)
205 | (7, 0.99609375)
206 | (8, 0.98828125)
207 | (9, 0.98046875)
208 | (10, 0.99609375)
209 | .
210 | (90, 1.0)
211 | (91, 0.9921875)
212 | (92, 0.9921875)
213 | (93, 0.99609375)
214 | (94, 1.0)
215 | (95, 0.98828125)
216 | (96, 0.98828125)
217 | (97, 0.99609375)
218 | (98, 1.0)
219 | (99, 0.99609375)
220 | After 10,000 iterations, the model has an accuracy of about 99% no bad!!
--------------------------------------------------------------------------------
/conditional.py:
--------------------------------------------------------------------------------
1 | '''
2 | a = 10
3 | if a == 10:
4 | print a
5 | print 'Great World'
6 | else:
7 | print 'Not 10'
8 |
9 |
10 | '''
11 | '''
12 | data = raw_input('Enter data ')
13 | print type(data)
14 |
15 | if not data.isdigit():
16 | pass
17 | else:
18 | print int(data) + 1000
19 |
20 | '''
21 | ##Looping
22 |
23 |
24 | l = [1,3,2,2,2,3,98]
25 |
26 | for e in l:
27 | print e
28 |
29 | l = ['this','is','good']
30 |
31 | for c in l:
32 | print c,
33 |
34 |
--------------------------------------------------------------------------------
/control-code.py:
--------------------------------------------------------------------------------
1 | s = 'abcdefghijkl'
2 |
3 | for i,c in enumerate(s):
4 | print i,c
5 | print 'hello world'
6 |
7 | print 'great'
8 |
9 | print s[3]
--------------------------------------------------------------------------------
/create_xml.py:
--------------------------------------------------------------------------------
1 | import xml.etree.ElementTree as xml
2 |
3 | def createXML(filename):
4 | # Start with the root element
5 | root = xml.Element("users")
6 | children1 = xml.Element("user")
7 | root.append(children1)
8 |
9 | tree = xml.ElementTree(root)
10 | with open(filename, "wb") as fh:
11 | tree.write(fh)
12 |
13 |
14 | if __name__ == "__main__":
15 | createXML("test.xml")
16 |
--------------------------------------------------------------------------------
/create_xml2.py:
--------------------------------------------------------------------------------
1 | import xml.etree.ElementTree as xml
2 |
3 | def createXML(filename):
4 | # Start with the root element
5 | root = xml.Element("users")
6 | children1 = xml.Element("user")
7 | root.append(children1)
8 |
9 | userId1 = xml.SubElement(children1, "id")
10 | userId1.text = "123"
11 |
12 | userName1 = xml.SubElement(children1, "name")
13 | userName1.text = "Shubham"
14 |
15 | tree = xml.ElementTree(root)
16 | with open(filename, "wb") as fh:
17 | tree.write(fh)
18 |
19 |
20 | if __name__ == "__main__":
21 | createXML("test.xml")
22 |
--------------------------------------------------------------------------------
/csv_reader.py:
--------------------------------------------------------------------------------
1 | import csv
2 |
3 | with open('input.csv') as fd:
4 | st = csv.reader(fd,delimiter=',')
5 | print st
6 | for row in st:
7 | print row
8 |
9 |
--------------------------------------------------------------------------------
/decorator.py:
--------------------------------------------------------------------------------
1 | def p_decorate(func):
2 |
3 | #Function inside a function
4 | def func_wrapper(name):
5 | print func('Mac')
6 | return "Great Stuff - p"
7 |
8 | return func_wrapper
9 |
10 | def q_decorate(func):
11 |
12 | #Function inside a function
13 | def func_wrapper(name):
14 | print func('Mac')
15 | return "Great Stuff - q"
16 |
17 | return func_wrapper
18 |
19 | @p_decorate
20 | @q_decorate
21 | def get_text(name):
22 | return "Hello World " + name
23 |
24 | print get_text("John")
--------------------------------------------------------------------------------
/dict-to-dict.py:
--------------------------------------------------------------------------------
1 | import dicttoxml
2 |
3 | doc = dicttoxml.dicttoxml({'a':5,'c':'d'},custom_root='hostel')
4 | print doc
--------------------------------------------------------------------------------
/dict.py:
--------------------------------------------------------------------------------
1 | #dict = {'name' : 'abc', 'school' : 'dps', 'age' : 20}
2 |
3 | # print dict['age']
4 |
5 | # print dict
6 |
7 |
8 | # dict['name'] = 'def'
9 |
10 | # print dict
11 |
12 | # dict.clear()
13 |
14 | #del(dict['name'])
15 |
16 | #print dict
17 |
18 | # dict2 = {'name' : 'def', 'age' : 7}
19 |
20 | # print len(dict2)
21 |
22 | # dict3= dict.copy()
23 | # print dict3
24 |
25 | # a= [1, 'abc', 'ghi']
26 |
27 | # dict1 = {}
28 |
29 | # dict1=dict1.fromkeys(a)
30 |
31 | # print dict1
32 |
33 | # dict2 = {'abc': [1,2]}
34 | # print dict2
35 |
36 | dict = {'name' : 'abc', 'school' : 'dps', 'age' : 20}
37 |
38 | print dict.has_key('sdf')
39 |
40 | print dict.items()
41 |
42 | print str(dict)
43 |
44 | print dict.keys()
45 |
46 | print dict.values()
47 |
48 | print dict.setdefault('name', 'none')
49 |
50 | print dict.viewitems()
51 |
52 | dict1 = {'name' : {'gr':'hi', 'hj':6}}
53 |
54 |
55 |
56 |
57 | list= dict1['name']
58 | print list
59 |
60 |
--------------------------------------------------------------------------------
/exception.py:
--------------------------------------------------------------------------------
1 | # try :
2 |
3 | # f = open("test.txt", "w")
4 | # f.write("Hello this is exception example")
5 |
6 | # except IOError :
7 | # print "Error in writing"
8 |
9 | # else :
10 | # print "Successfully Written"
11 |
12 | # f.close()
13 |
14 |
15 | # try :
16 | # f = open("test.txt", "r")
17 | # f.write("hello")
18 |
19 | # except (IOError , ZeroDivisionError):
20 | # print "Error : cant write into the file"
21 |
22 |
23 | # else :
24 | # print "Successfully written"
25 | # f.close()
26 |
27 |
28 |
29 |
30 | # try :
31 | # f = open("test.txt", "r")
32 | # f.write("hello")
33 | # a=5
34 | # b=5/0
35 | # except (IOError , ZeroDivisionError):
36 | # print "Error : cant write into the file"
37 | # print "Trying to divide by zero"
38 |
39 |
40 | # else :
41 | # print "Successfully written"
42 | # f.close()
43 |
44 |
45 |
46 |
47 | # try :
48 | # f = open("test.txt", "r")
49 | # f.write("hello")
50 | # a=5
51 | # b=5/0
52 | # except IOError:
53 | # print "Error : cant write into the file "
54 | # print "Trying to divide by zero"
55 | # finally :
56 | # print "Successfully written"
57 | # f.close()
58 |
59 |
60 | # try :
61 | # f = open("test.txt", "r")
62 | # f.write("hello")
63 | # except IOError, argument :
64 | # print "Error : cant write into the file : ", argument
65 | # print "Trying to divide by zero"
66 | # finally :
67 | # print "Successfully written"
68 | # f.close()
69 |
70 |
71 |
72 | # try :
73 | # raise NameError("Hello this is name error exception")
74 | # except NameError:
75 | # print "Exception 1"
76 | # raise
77 |
78 |
79 |
80 | # class NError (Exception):
81 | # def _init_(self, arg):
82 | # self.arg=arg
83 |
84 | # try :
85 | # raise NError(5)
86 |
87 | # except NError , e:
88 |
89 | # print e.arg
90 |
91 |
92 | # class MyError(Exception):
93 | # def __init__(self, value):
94 | # self.value = value
95 |
96 | # try:
97 | # raise MyError("hello")
98 | # except MyError , e:
99 | # print 'My exception occurred, value:', e.value
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
--------------------------------------------------------------------------------
/f.py:
--------------------------------------------------------------------------------
1 | import urllib
2 | import urllib2
3 | from bs4 import BeautifulSoup
4 | #open search result for viewing
5 | #import webbrowser
6 |
7 |
8 | search = urllib.urlencode({'item':'Python'})
9 |
10 | url = 'http://139.59.1.147/search-courses/'
11 | full_url = url + '?' + search
12 | response = urllib2.urlopen(full_url)
13 |
14 |
15 | with open("search-page.html","w") as f:
16 | data = response.read()
17 | f.write(data)
18 |
19 | soup = BeautifulSoup(data, "html.parser")
20 | print soup.title
21 | #webbrowser.open("search-page.html")
22 |
23 |
--------------------------------------------------------------------------------
/file:
--------------------------------------------------------------------------------
1 | this is a nice place
2 | but i m fine with better options
3 | why cant thing b good
--------------------------------------------------------------------------------
/file.json:
--------------------------------------------------------------------------------
1 | {
2 | "glossary": {
3 | "title": "example glossary",
4 | "GlossDiv": {
5 | "title": 55,
6 | "GlossList": {
7 | "GlossEntry": {
8 | "ID": "SGML",
9 | "SortAs": "SGML",
10 | "GlossTerm": "Standard Generalized Markup Language",
11 | "Acronym": "null",
12 | "Abbrev": "ISO 8879:1986",
13 | "GlossDef": {
14 | "para": "A meta-markup language, used to create markup languages such as DocBook.",
15 | "GlossSeeAlso": ["GML", "XML"]
16 | },
17 | "GlossSee": "markup"
18 | }
19 | }
20 | }
21 | }
22 | }
--------------------------------------------------------------------------------
/file.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | elements
4 | more elements
5 |
6 |
7 | element as well
8 |
9 |
10 | cool elements
11 | too more elements
12 |
13 |
14 | blue elements
15 | ultra elements
16 |
17 |
--------------------------------------------------------------------------------
/file2.json:
--------------------------------------------------------------------------------
1 | {"menu": {
2 | "id": "file",
3 | "value": "File",
4 | "popup": {
5 | "menuitem": [
6 | {"value": "New", "onclick": "CreateNewDoc()"},
7 | {"value": "Open", "onclick": "OpenDoc()"},
8 | {"value": "Close", "onclick": "CloseDoc()"}
9 | ]
10 | }
11 | }}
--------------------------------------------------------------------------------
/file_oper.py:
--------------------------------------------------------------------------------
1 | wr = open('out_file','w') #opening file in write mode, if file doesn't exist create one
2 |
3 | for l in open('in_file'):
4 | f = []
5 | ldata = l.split()
6 | for word in ldata:
7 | if len(word) < 4:
8 | f.append(word)
9 | wr.write(' '.join(f) + '\n')
10 |
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/file_operations.py:
--------------------------------------------------------------------------------
1 | '''
2 | wr = open('wr_file','w')
3 | for line in open('file'):
4 | l = line.split()
5 | l.sort()
6 | wr.write(' '.join(l))
7 | wr.write('\n')
8 |
9 | '''
10 | tot_list = []
11 | wr = open('rev_file','w')
12 | for line in open('file'):
13 | tot_list.insert(0,line.strip())
14 |
15 |
16 | for elem in tot_list:
17 | wr.write(elem+'\n')
18 |
--------------------------------------------------------------------------------
/find_word.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | input = sys.stdin.read()
4 | sys.stdout.write('Received: %s'%input)
5 |
--------------------------------------------------------------------------------
/find_word_argparse.py:
--------------------------------------------------------------------------------
1 | import argparse,sys
2 |
3 | parser = argparse.ArgumentParser()
4 |
5 | parser.add_argument("--match", help="data", required=True)
6 |
7 | params = parser.parse_args()
8 |
9 | input = params.match
10 |
11 | data_input = sys.stdin.read()
12 |
13 | data_input_l = data_input.split('\n')
14 | data_l = list(filter(lambda e: input in e,data_input_l))
15 | data = '\n'.join(data_l)
16 |
17 | sys.stdout.write('Received: %s'%data)
18 |
19 |
--------------------------------------------------------------------------------
/func-code.py:
--------------------------------------------------------------------------------
1 |
2 | def func(args):
3 | print args
4 | print 'hello world'
5 | return args, 77
6 |
7 | a = func('hello')
8 | print a
9 |
10 | '''
11 | #default arguments
12 | def newFunc(arg='abc',arg1):
13 | print 'Hello, How are you ',arg, arg1
14 |
15 | newFunc(44)
16 | newFunc(55)
17 | newFunc(66,'Awi')
18 |
19 |
20 | def func(name,age,location):
21 | print name,age,location
22 |
23 | func(age=33,location='mumbai',name='awa')
24 | '''
25 | '''
26 | def func(*vargs):
27 | print vargs[1] * vargs[3]
28 |
29 | func('abc',44,'def',90)
30 | #func('xyz')
31 | '''
32 | #l = [1,2,3,4,5]
33 | '''
34 | def revpacking(name,age,location):
35 | print name,age,location
36 |
37 |
38 | l = ['awi',77,'blore']
39 | revpacking(*l)
40 | '''
41 | '''
42 | def fun(name,*subjects):
43 | print 'hello %s' %name
44 | res = ''
45 | for i in subjects:
46 | res = res + ' ' + str(i)
47 | print 'thanks for attending subjects,', res
48 |
49 | fun('abc',34,67,23)
50 | fun('def',55,66)
51 | '''
52 | '''
53 | def func(name,age,location):
54 | print name,age,location
55 |
56 | db = {'name':'abc','age':33, 'location':'bangalore'}
57 |
58 | func(**db)
59 |
60 | def kwargsfunc(**kwargs):
61 | if 'age' in kwargs:
62 | print kwargs['age']
63 |
64 | kwargsfunc(name='awantik',age=20)
65 | '''
66 | '''
67 | def kfunc(name,*marks,**location):
68 | print name
69 | print marks
70 | print location
71 |
72 | #kfunc('awantik', 33,44,55, comp='abc',age=44)
73 | kfunc('awantik', 33,44,55,34,43,56)
74 | '''
75 | '''
76 | print 'a' in 'abcd'
77 | print 'a' in ['a','b','c']
78 | db = {'a':'awantik', 'b':'bawa', 'c':'cat'}
79 | if 'a' in db:
80 | print 'yess'
81 | else:
82 | print 'not there'
83 | '''
84 | '''
85 | l = range(10,100,5)
86 | print len(l), max(l), sum(l)
87 |
88 | for d in l[5:]:
89 | print d
90 |
91 | for x in range(len(l)):
92 | l[x] -=2
93 |
94 | print l
95 |
96 | for idx,data in enumerate(l):
97 | print idx,data
98 |
99 | x = enumerate(l)
100 | print x.next()
101 | print x.next()
102 | '''
103 | '''
104 | def func():
105 | i = 0
106 | while True:
107 | i += 1
108 | yield i
109 |
110 | f = func()
111 | print f
112 | print f.next()
113 | print f.next()
114 | print f.next()
115 | print f.next()
116 | '''
117 |
118 | #lambda - Annonymous functions
119 | '''
120 | f = lambda x: x + 5
121 | g = lambda x,y: x + y
122 |
123 | print f(8)
124 | print g(9,19)
125 | l = [ ['awa',33], ['bwa',11] ]
126 |
127 | print sorted(l,cmp= lambda x,y: cmp(x[0],y[0]))
128 | '''
129 |
130 | #Function that generates function
131 |
132 | def funGenerator(num):
133 | return lambda x: x**num
134 |
135 | f = funGenerator(2) # lambda x: x**2
136 | g = funGenerator(3) # lambda x: x**3
137 | h = funGenerator(4) #
138 |
139 | print f(4)
140 | print g(4)
141 | print h(4)
142 |
143 |
144 | #Functional Programming
145 | '''
146 | def fun(x):
147 | return x*2
148 |
149 | l = [1,2,3,3,4,4,5,56,9]
150 |
151 | print map(float,l)
152 | print map(fun,l)
153 | print map(lambda x:x**2,l)
154 |
155 | print filter(lambda x: x%2 ==0,l)
156 |
157 | print reduce(lambda x,y: x+y,l)
158 | '''
159 | '''
160 | class Base:
161 | pass
162 |
163 | b = Base()
164 | b.name = 'awantik'
165 | b.gender = 'M'
166 |
167 | print b.__dict__
168 | print b.name
169 | '''
170 | '''
171 | class Base:
172 |
173 | address = 'abc'
174 |
175 | def __init__(self,name,gender):
176 | self.__name = name
177 | self.gender = gender
178 |
179 | def someinfo(self,location):
180 | #self.location
181 | print self
182 |
183 | @staticmethod
184 | def staticSomeInfo(name,age):
185 | Base.address = name
186 |
187 | b = Base('awantik','male')
188 | print b.__dict__
189 |
190 | b.someinfo('bangalore')
191 |
192 | print b.address
193 |
194 | Base.staticSomeInfo('awantik',44)
195 | print b._Base__name
196 |
197 | class Derived(Base):
198 | def Happy(self):
199 | print 'happy time'
200 |
201 |
202 | d = Derived('abc',87)
203 | d.Happy()
204 | d.someinfo('bangalore')
205 | '''
206 | '''
207 | class Base:
208 | def __init__(self,**kwargs):
209 | for k,v in kwargs.items():
210 | print k,v
211 | if k == 'age':
212 | self.age = v
213 | if k == 'name':
214 | self.name = v
215 |
216 | b = Base(name='Awantik',age=40)
217 | print b.__dict__
218 | b = Base(name='newguy')
219 | print b.__dict__
220 |
221 | class Derived(Base):
222 | pass
223 |
224 | d = Derived()
225 |
226 | a = []
227 | if type(a) == list:
228 | print 'List it is'
229 |
230 | print isinstance(d,Base)
231 | '''
232 |
233 | class Person:
234 | def __init__(self,name,age):
235 | self.name = name
236 | self.age = age
237 |
238 | def __eq__(self,obj):
239 | return self.name == obj.name and self.age == obj.age
240 |
241 | def __le__()
242 |
243 | p1 = Person('awantik',79)
244 | p2 = Person('awantik',79)
245 |
246 | #p2 = p1
247 |
248 | print id(p1), id(p2)
249 |
250 | if p1 == p2:
251 | print 'same'
252 |
253 | #print p1.__dict__
254 | #print p2.__dict__
255 |
256 |
257 |
258 |
259 |
260 |
261 |
262 |
263 |
--------------------------------------------------------------------------------
/func.py:
--------------------------------------------------------------------------------
1 | def func (list1, *str):
2 |
3 |
4 |
5 |
6 | list1.append('rt')
7 | print list1
8 | list1=[10,50]
9 | print str
10 | s1=[50,70]
11 | print m
12 |
13 | return list1
14 |
15 | def func2 ():
16 | print 'hello'
17 |
18 |
19 |
20 | func2()
21 |
22 | m=[40,90]
23 | s= ['hi', 23]
24 | lis = func(s,20,30,40)
25 | print s
26 | print lis
27 | func2()
--------------------------------------------------------------------------------
/functional_prog.py:
--------------------------------------------------------------------------------
1 | '''
2 | #pass function to function
3 | def my_func(f,data):
4 | return f(data)
5 |
6 | print my_func(lambda x: x*2, 5)
7 |
8 | #Sort list based on number
9 | l = [ ('awi',33), ('zwi',22), ('bqi',11) ]
10 | print sorted(l, key=lambda x: x[1])
11 | print sorted(l, cmp=lambda x,y: cmp(x[0],y[0]))
12 | '''
13 | '''
14 | l = [1,2,3,4,5,65,7,7]
15 | print map(lambda x: x*x, l)
16 |
17 | l = [ ('awi',33), ('zwi',22), ('bqi',11) ]
18 | print map( lambda x: x[0], l)
19 |
20 | db = {'a':1,'b':2,'c':3,'d':4}
21 | print db.values()
22 | print map(lambda x:x[1], db.items())
23 |
24 | s = '1 2 3 4 5'
25 | print map( int ,s.split())
26 | '''
27 | l = [ (1,2), (4,6), (2,9), (5,9), (2,3) ]
28 |
29 | #print filter(lambda x: x[0] > 5 or x[1] > 5, l)
30 | #print filter(lambda x: max(x) > 5, l)
31 |
32 | m = [1,2,3,4]
33 | def fun(x,y):
34 | print x,y
35 |
36 |
37 | print reduce(lambda x,y: (x[0] +y[0], x[1]+y[1]),l)
38 | #print reduce(fun,l)
39 |
40 |
41 |
--------------------------------------------------------------------------------
/functional_prog2.py:
--------------------------------------------------------------------------------
1 | '''
2 | x = lambda y : y + y
3 |
4 | def fun(x):
5 | return x + x
6 |
7 | print x(7)
8 |
9 | y = lambda x,y: x+y
10 |
11 | print y(4,6)
12 | '''
13 |
14 | #Function which returns a function
15 | '''
16 | def funcGenerator(x):
17 | return lambda y: y**x
18 |
19 | f = funcGenerator(3) # f is a function which takes one arg & returns cube of it
20 | g = funcGenerator(4) # g is a function which takes one arg & return ^4 of it
21 |
22 | print f(5)
23 | print g(5)
24 | '''
25 | '''
26 | l = [ [4,32], [1,2], [33,1] ]
27 | print sorted(l,key=lambda x: x[1])
28 | print sorted(l, cmp=(lambda x,y:cmp(x[0],y[0])))
29 | '''
30 | #List Compreshension
31 | l = [ x*2 for x in range(5) if x%2 == 0]
32 | print l
--------------------------------------------------------------------------------
/functions.py:
--------------------------------------------------------------------------------
1 | '''
2 | #Multiple returns
3 | def func(a,b):
4 | return a + b, a * b
5 |
6 | a,b = func(5,10)
7 | print 'hello',a,'world',b
8 | print 'hello %d great %d string %s' % (a,b,'great')
9 |
10 | #Packing or Variable arguments function
11 | def vargsFunc(*args):
12 | for a in args:
13 | print a,
14 |
15 | vargsFunc(1,2,2,'hello',[1,2,2,2])
16 | '''
17 | '''
18 | #default arguments
19 | def func(age,loc='Bangalore',name='Awantik'):
20 | print age,loc,name
21 |
22 | func(87)
23 | '''
24 | '''
25 | #UnPacking
26 | l = ['awantik','zekelabs','python']
27 |
28 | def packFunc(name,company,subject):
29 | print name,company,subject
30 |
31 | packFunc(*l)
32 | '''
33 |
34 | #Key words based Arguments
35 | '''
36 | def fun(name,address,place):
37 | print name,address,place
38 |
39 | fun(place='bangalore',name='awi',address='office')
40 | '''
41 | '''
42 | db = {'name':'awi','age':90}
43 | def func(name,age):
44 | print name,age
45 |
46 | func(**db)
47 |
48 | '''
49 |
50 | def func(**kwargs):
51 | print kwargs
52 |
53 | func(name='awi',age=40)
54 | func(loc='bangalore',price=700)
--------------------------------------------------------------------------------
/g:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/generators.py:
--------------------------------------------------------------------------------
1 | #Function having yield returns generator
2 | #Using a generator we can iterate through iterable data structure
3 | '''
4 | def gen_func():
5 | i = 0
6 | while True:
7 | #return i
8 | yield i
9 | i += 1
10 |
11 | f = gen_func() #Return spl. object
12 |
13 | print f.next()
14 | print f.next()
15 | print f.next()
16 | print f.next()
17 |
18 | for i in range(10):
19 | print f.next()
20 | '''
21 |
22 | #list Comprehension
23 | '''
24 | l = [x*x for x in range(10)]
25 | print l
26 | '''
27 | '''
28 | import random
29 |
30 | #random.randint(0,len(m)-1)
31 |
32 | m = ['a','b','c','d','e']
33 | data = [ m[random.randint(0,len(m)-1)] for i in range(100)]
34 | print data
35 | '''
36 |
37 | ##Decorators
38 | def awi(f):
39 | print f
40 | # print func
41 | # def wrap():
42 | # func('Good')
43 |
44 | #wrap('Lord')
45 |
46 | @awi("name")
47 | def hello():
48 | print 'hello '
49 |
50 | hello()
51 |
52 |
53 |
--------------------------------------------------------------------------------
/hello.py:
--------------------------------------------------------------------------------
1 | print 'hello world'
--------------------------------------------------------------------------------
/in_file:
--------------------------------------------------------------------------------
1 | hello how are you
2 | this is a nice place to live
3 | but i m not sure of things
--------------------------------------------------------------------------------
/input.csv:
--------------------------------------------------------------------------------
1 | 1,awantik,55
2 | 2,awi,66
3 | 3,bwi,100
--------------------------------------------------------------------------------
/json-parser.py:
--------------------------------------------------------------------------------
1 | #Use case of json - exchange data / info among servers
2 | #json & xml are data exchanging mechanisms
3 |
4 | import json
5 |
6 | with open('file2.json') as fd:
7 | #x = fd.read()
8 | doc = json.loads(fd.read()) #Converts a string of json to dictionary
9 |
10 | #print doc
11 | print type(doc)
12 | print doc['menu']['']
13 |
14 |
--------------------------------------------------------------------------------
/jsonfile.json:
--------------------------------------------------------------------------------
1 | {"menu": {
2 | "id": "file",
3 | "value": "File",
4 | "popup": {
5 | "menuitem": [
6 | {"value": "New", "onclick": "CreateNewDoc()"},
7 | {"value": "Open", "onclick": "OpenDoc()"},
8 | {"value": "Close", "onclick": "CloseDoc()"}
9 | ]
10 | }
11 | }}
--------------------------------------------------------------------------------
/jsonreader-basic.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | def printMenuItem(db):
4 | for k,v in db.items():
5 | if isinstance(v,dict):
6 | printMenuItem(v)
7 | else:
8 | print k, ' : ', v
9 |
10 | def parseInt(x):
11 | print 'Hello' + x
12 | return int(x)+1
13 |
14 | def parseConst(x):
15 | print 'Cost ' + x
16 | return x
17 |
18 | with open('file.json') as fd:
19 | doc = json.loads(fd.read(),parse_int=parseInt,parse_constant=parseConst)
20 |
21 | printMenuItem(doc)
--------------------------------------------------------------------------------
/jsonreader.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | def printMenuItem(db):
4 | print db
5 |
6 |
7 |
8 | with open('file.json') as fd:
9 | doc = json.loads(fd.read())
10 |
11 | printMenuItem(doc)
--------------------------------------------------------------------------------
/kmeans-code.py:
--------------------------------------------------------------------------------
1 | from __future__ import division
2 | from linear_algebra import squared_distance, vector_mean, distance
3 | import math, random
4 | import matplotlib.image as mpimg
5 | import matplotlib.pyplot as plt
6 |
7 | class KMeans:
8 | """performs k-means clustering"""
9 | #Constructor
10 | def __init__(self, k):
11 | self.k = k # number of clusters
12 | self.means = None # means of clusters
13 |
14 | def classify(self, input):
15 | """return the index of the cluster closest to the input"""
16 | return min(range(self.k),
17 | key=lambda i: squared_distance(input, self.means[i]))
18 |
19 | def train(self, inputs):
20 |
21 | self.means = random.sample(inputs, self.k)
22 | assignments = None
23 |
24 | progress = 0
25 | while progress < 2:
26 | progress = progress + 1
27 | print "iteration : {0}".format(progress)
28 |
29 | print "--a--"
30 | # Find new assignments
31 | new_assignments = map(self.classify, inputs)
32 | print "--b--"
33 |
34 | # If no assignments have changed, we're done.
35 | if assignments == new_assignments:
36 | return
37 |
38 | # Otherwise keep the new assignments,
39 | assignments = new_assignments
40 |
41 | print "--c--"
42 | for i in range(self.k):
43 | i_points = [p for p, a in zip(inputs, assignments) if a == i]
44 | # avoid divide-by-zero if i_points is empty
45 | if i_points:
46 | self.means[i] = vector_mean(i_points)
47 |
48 | def squared_clustering_errors(inputs, k):
49 | """finds the total squared error from k-means clustering the inputs"""
50 | clusterer = KMeans(k)
51 | clusterer.train(inputs)
52 | means = clusterer.means
53 | assignments = map(clusterer.classify, inputs)
54 |
55 | return sum(squared_distance(input,means[cluster])
56 | for input, cluster in zip(inputs, assignments))
57 |
58 | def plot_squared_clustering_errors(plt):
59 |
60 | ks = range(1, len(inputs) + 1)
61 | errors = [squared_clustering_errors(inputs, k) for k in ks]
62 |
63 | plt.plot(ks, errors)
64 | plt.xticks(ks)
65 | plt.xlabel("k")
66 | plt.ylabel("total squared error")
67 | plt.show()
68 |
69 | #
70 | # using clustering to recolor an image
71 | #
72 |
73 | def recolor_image(input_file, k):
74 |
75 | img = mpimg.imread(input_file)
76 |
77 | print "1"
78 | # Create list/array of pixels of the images
79 | pixels = [pixel for row in img for pixel in row]
80 | print "2"
81 |
82 | #Init KMeans with number of clusters, calls constructors
83 | clusterer = KMeans(k) #k -> number of clusters
84 | print "3"
85 |
86 | #KMEANS object being passed with all pixels
87 | clusterer.train(pixels) # this might take a while
88 | print "4"
89 | def recolor(pixel):
90 | cluster = clusterer.classify(pixel) # index of the closest cluster
91 | return clusterer.means[cluster] # mean of the closest cluster
92 |
93 | new_img = [[recolor(pixel) for pixel in row]
94 | for row in img]
95 |
96 | print "5"
97 | plt.imshow(new_img)
98 | plt.axis('off')
99 | plt.show()
100 |
101 | #
102 | # hierarchical clustering
103 | #
104 |
105 | def is_leaf(cluster):
106 | """a cluster is a leaf if it has length 1"""
107 | return len(cluster) == 1
108 |
109 | def get_children(cluster):
110 | """returns the two children of this cluster if it's a merged cluster;
111 | raises an exception if this is a leaf cluster"""
112 | if is_leaf(cluster):
113 | raise TypeError("a leaf cluster has no children")
114 | else:
115 | return cluster[1]
116 |
117 | def get_values(cluster):
118 | """returns the value in this cluster (if it's a leaf cluster)
119 | or all the values in the leaf clusters below it (if it's not)"""
120 | if is_leaf(cluster):
121 | return cluster # is already a 1-tuple containing value
122 | else:
123 | return [value
124 | for child in get_children(cluster)
125 | for value in get_values(child)]
126 |
127 | def cluster_distance(cluster1, cluster2, distance_agg=min):
128 | """finds the aggregate distance between elements of cluster1
129 | and elements of cluster2"""
130 | return distance_agg([distance(input1, input2)
131 | for input1 in get_values(cluster1)
132 | for input2 in get_values(cluster2)])
133 |
134 | def get_merge_order(cluster):
135 | if is_leaf(cluster):
136 | return float('inf')
137 | else:
138 | return cluster[0] # merge_order is first element of 2-tuple
139 |
140 | def bottom_up_cluster(inputs, distance_agg=min):
141 | # start with every input a leaf cluster / 1-tuple
142 | clusters = [(input,) for input in inputs]
143 |
144 | # as long as we have more than one cluster left...
145 | while len(clusters) > 1:
146 | # find the two closest clusters
147 | c1, c2 = min([(cluster1, cluster2)
148 | for i, cluster1 in enumerate(clusters)
149 | for cluster2 in clusters[:i]],
150 | key=lambda (x, y): cluster_distance(x, y, distance_agg))
151 |
152 | # remove them from the list of clusters
153 | clusters = [c for c in clusters if c != c1 and c != c2]
154 |
155 | # merge them, using merge_order = # of clusters left
156 | merged_cluster = (len(clusters), [c1, c2])
157 |
158 | # and add their merge
159 | clusters.append(merged_cluster)
160 |
161 | # when there's only one cluster left, return it
162 | return clusters[0]
163 |
164 | def generate_clusters(base_cluster, num_clusters):
165 | # start with a list with just the base cluster
166 | clusters = [base_cluster]
167 |
168 | # as long as we don't have enough clusters yet...
169 | while len(clusters) < num_clusters:
170 | # choose the last-merged of our clusters
171 | next_cluster = min(clusters, key=get_merge_order)
172 | # remove it from the list
173 | clusters = [c for c in clusters if c != next_cluster]
174 | # and add its children to the list (i.e., unmerge it)
175 | clusters.extend(get_children(next_cluster))
176 |
177 | # once we have enough clusters...
178 | return clusters
179 |
180 | if __name__ == "__main__":
181 |
182 | inputs = [[-14,-5],[13,13],[20,23],[-19,-11],[-9,-16],[21,27],[-49,15],[26,13],[-46,5],[-34,-1],[11,15],[-49,0],[-22,-16],[19,28],[-12,-8],[-13,-19],[-41,8],[-11,-6],[-25,-9],[-18,-3]]
183 |
184 | random.seed(0) # so you get the same results as me
185 | clusterer = KMeans(3)
186 | clusterer.train(inputs)
187 | print "3-means:"
188 | print clusterer.means
189 | print
190 |
191 | random.seed(0)
192 | clusterer = KMeans(2)
193 | clusterer.train(inputs)
194 | print "2-means:"
195 | print clusterer.means
196 | print
197 |
198 | print "errors as a function of k"
199 |
200 | for k in range(1, len(inputs) + 1):
201 | print k, squared_clustering_errors(inputs, k)
202 | print
203 |
204 |
205 | print "bottom up hierarchical clustering"
206 |
207 | base_cluster = bottom_up_cluster(inputs)
208 | print base_cluster
209 |
210 | print
211 | print "three clusters, min:"
212 | for cluster in generate_clusters(base_cluster, 3):
213 | print get_values(cluster)
214 |
215 | print
216 | print "three clusters, max:"
217 | base_cluster = bottom_up_cluster(inputs, max)
218 | for cluster in generate_clusters(base_cluster, 3):
219 | print get_values(cluster)
220 |
221 | recolor_image('Sample5.png', 2) # 2 is number of clusters
--------------------------------------------------------------------------------
/l2:
--------------------------------------------------------------------------------
1 | l2
2 | ['good', 'god', 'great']
3 |
4 | l
5 | [1, 2, 3, 3, 4, 77, 'good god']
6 | l.extend(l2)
7 | l
8 | [1, 2, 3, 3, 4, 77, 'good god', 'good', 'god', 'great']
9 |
10 |
11 |
12 | l2
13 | ['good', 'god', 'great']
14 |
15 |
16 | l
17 | [1, 2, 3, 3, 4, 77, 'good god', 'good', 'god', 'great']
18 | l.append(l2)
19 | l
20 | [1, 2, 3, 3, 4, 77, 'good god', 'good', 'god', 'great', ['good', 'god', 'great']]
21 |
22 |
23 | l[-1]
24 | ['good', 'god', 'great']
25 | l[::-1]
26 | [['good', 'god', 'great'], 'great', 'god', 'good', 'good god', 77, 4, 3, 3, 2, 1]
27 | l
28 | [1, 2, 3, 3, 4, 77, 'good god', 'good', 'god', 'great', ['good', 'god', 'great']]
29 |
30 |
31 | l
32 | [1, 2, 3, 3, 4, 77, 'good god', 'good', 'god', 'great', ['good', 'god', 'great']]
33 |
34 | * insert - Add entity at a particular position.
35 | * remove - removes by values. Only the first occurrence.Don't return anything
36 | * pop - remove by index. Returns the data
37 | * sort - Sorts data
38 |
39 | l = [1,2,3,3,7,8,-3,5]
40 | l.sort()
41 | l
42 | [-3, 1, 2, 3, 3, 5, 7, 8]
43 |
44 | l.sort(reverse=True)
45 |
46 |
47 | l
48 | [8, 7, 5, 3, 3, 2, 1, -3]
49 |
50 | * reverse - reverses the list
51 | s = 'this is my country and i love it'
52 | s[::-1]
53 | 'ti evol i dna yrtnuoc ym si siht'
54 | l = s.split()
55 | l.reverse()
56 | l
57 | ['it', 'love', 'i', 'and', 'country', 'my', 'is', 'this']
58 | ' '.join(l)
59 | 'it love i and country my is this'
60 |
61 |
62 |
63 | s = 'ssdkajskajs dfdf'
64 | len(s)
65 | 16
66 | l = [1,2,3,77,99,44]
67 | len(l)
68 | 6
69 | l = [1,2,3,4,5,5555]
70 | sum(l)
71 | 5570
72 | max(l)
73 | 5555
74 | min(l)
75 | 1
76 | l = ['hello world','good guy']
77 | len(l[0])
78 |
--------------------------------------------------------------------------------
/list_programs.py:
--------------------------------------------------------------------------------
1 | '''
2 | from string import maketrans
3 | s = 'hello world this is a nice place'
4 |
5 | mapping = maketrans('aeiou',' '*5)
6 | print s.translate(mapping)
7 | '''
8 | '''
9 | l = [ [1,2,3], 3, [4,2,3], [3,2,1], [4,5,6], 3, [7,8,9] ,5]
10 |
11 | c = 0
12 | search = 3
13 |
14 | for k in l:
15 | if isinstance(k,list):
16 | c += k.count(search)
17 | if isinstance(k,int) and k == search:
18 | c += 1
19 | print c
20 | '''
21 | '''
22 | l = [ [1,'great',2], [9,'hello',4], 'great']
23 | '''
24 |
25 | #Dictionary
26 | #1.
27 | '''
28 | db = {'a':1,'b':2,'c':3}
29 | for k,v in db.items():
30 | print k,v
31 |
32 | print db.values()
33 | print db.keys()
34 | '''
35 | '''
36 | l = [1,2,3,3,1,1,4]
37 |
38 | for e in l:
39 | if e % 2 == 0:
40 | print e
41 | '''
42 | '''
43 | ### Find count of 3 among all sublists
44 | s = 0
45 | l = [ [1,2],3, [4,3,5,6], [7,8,3,9] ]
46 | for e in l:
47 | if isinstance(e,list):
48 | s += e.count(3)
49 | if e == 3:
50 | s += 1
51 | print s
52 | '''
53 | '''
54 | ###Break the list into two lists, one containing 0th element & other 1st
55 | l = [ ['a',44], ['b',22], ['c',66]]
56 |
57 | l1 =[]
58 | l2 = []
59 |
60 | for e in l:
61 | l1.append(e[0])
62 | l2.append(e[1])
63 | print l1
64 | print l2
65 | '''
66 | '''
67 | ###Create a list with max of each sublist
68 | maxlist = []
69 | l = [ [-1,3,1,222], [7,99,3], [55,11,74,3,99,3,3,3,3] ]
70 | for e in l:
71 | maxlist.append(max(e))
72 | print maxlist
73 |
74 | '''
75 | ###Remove all the words starting with #
76 | '''
77 | s = "this is a #great world but i m not #sure of staying here for #long, don't know about u"
78 | l = s.split()
79 | for word in l:
80 | if word.startswith('#'):
81 | l.remove(word)
82 |
83 | print l
84 | '''
85 | ###Accessing list by index
86 | '''
87 | l = [1,2,3,3,3,3]
88 | for i in range(len(l)):
89 | l[i] += l[i]
90 | print l
91 | '''
92 | ###Copy list - Deep Copy, Shallow Copy
93 | '''
94 | l = [3,4,5]
95 | m = l #shallow copy - l & m points to same memory
96 | #l[0] = 999
97 | print m
98 |
99 | g = list(l) # deep copy - seperate copy
100 | l[0] = 999
101 | print g,m
102 | '''
103 | '''
104 | l = [1,2,3, [2,3,4]]
105 | m = list(l)
106 | print m
107 | l[-1][-1] = 'zzzz'
108 | print m,l
109 | '''
110 | '''
111 | ###Deep copy for list of lists - deep copy donot copy directly
112 | l = [ [1,2,3], [4,5,6], [7,8,9] ]
113 |
114 | m = []
115 | for e in l:
116 | m.append(list(e))
117 |
118 | print m
119 | l [1][1] = 'zzz'
120 | print 'L: ', l
121 | print 'M: ', m
122 |
123 | '''
124 |
125 |
--------------------------------------------------------------------------------
/lists.py:
--------------------------------------------------------------------------------
1 | l = ['sddf', 4, 5.7, 36, 78]
2 |
3 | #print l
4 |
5 | m = ['sddf', 4, 5.6, 47]
6 |
7 | # print m[0]
8 |
9 | # m[0]=8
10 | # print m
11 |
12 | # if 7 not in m:
13 | # print "yes"
14 |
15 | # del(m[0])
16 | # print m
17 |
18 | # print l[:-1]
19 |
20 | # del(l[1:3])
21 | # print l
22 |
23 | # print l
24 | l+=m
25 | print l
26 | m = ['abc', 4, 5.6, 47]
27 | print m*3
28 |
29 | # print len(l)
30 |
31 | # l[3]='third'
32 | # l[1:5]=['our']
33 | # print l
34 |
35 | # l.append(789)
36 | # print l
37 |
38 | # print l.count(4)
39 |
40 | # l.extend(m)
41 | # print l
42 | # print l
43 | # print l.index(78, 0, 5)
44 |
45 | # l.insert(2,'second')
46 | # print l
47 |
48 | # l.pop(7)
49 | # print l
50 |
51 | # l.remove(4)
52 | # print l
53 |
54 | # l.sort(reverse = True)
55 | # print l
56 |
57 | # n = [1,2,3.4,[5,6,8.7]]
58 | # print n
59 | # print n[3][2]
60 |
61 |
62 | k = (1,4,'ghj')
63 | print k
64 | print repr(k)
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
--------------------------------------------------------------------------------
/ml-solution.py:
--------------------------------------------------------------------------------
1 | from pyspark.mllib.regression import LabeledPoint, LinearRegressionWithSGD
2 | from pyspark.mllib.classification import LogisticRegressionWithSGD
3 |
4 | from pyspark import SparkContext
5 | from pyspark.mllib.linalg import Vectors
6 |
7 | def check ( d ):
8 | if d == "?":
9 | return 0.0
10 | else:
11 | return float(d)
12 |
13 | def process( row ):
14 | trimmed = row#.replace("\"","")
15 | label = int(trimmed[-1])
16 | features = map(check, trimmed[4:])
17 | return LabeledPoint(label, Vectors.dense(features))
18 |
19 |
20 | sc = SparkContext("local[2]", "First Spark App")
21 | rdd = sc.textFile("train.tsv")
22 | rdd = rdd.map( lambda x: x.replace("\"",""))
23 | rdd = rdd.map( lambda x: x.split("\t"))
24 | res = rdd.map(process)
25 |
26 | numIterations = 10
27 | maxTreeDepth = 5
28 |
29 | lrModel = LogisticRegressionWithSGD.train( res, numIterations)
30 |
31 | datapoint = res.first()
32 | print lrModel.predict(datapoint.features)
33 |
--------------------------------------------------------------------------------
/movies.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | DVD
7 | 1981
8 | PG
9 |
10 | 'Archaeologist and adventurer Indiana Jones
11 | is hired by the U.S. government to find the Ark of the
12 | Covenant before the Nazis.'
13 |
14 |
15 |
16 | DVD,Online
17 | 1984
18 | PG
19 | None provided.
20 |
21 |
22 | Blu-ray
23 | 1985
24 | PG
25 | Marty McFly
26 |
27 |
28 |
29 |
30 | dvd, digital
31 | 2000
32 | PG-13
33 | Two mutants come to a private academy for their kind whose resident superhero team must
34 | oppose a terrorist organization with similar powers.
35 |
36 |
37 | VHS
38 | 1992
39 | PG13
40 | NA.
41 |
42 |
43 | Online
44 | 1992
45 | R
46 | WhAtEvER I Want!!!?!
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 | DVD
55 | 1979
56 | R
57 | """""""""
58 |
59 |
60 |
61 |
62 | DVD
63 | 1986
64 | PG13
65 | Funny movie about a funny guy
66 |
67 |
68 | blue-ray
69 | 2000
70 | Unrated
71 | psychopathic Bateman
72 |
73 |
74 |
75 |
76 |
--------------------------------------------------------------------------------
/my_code.py:
--------------------------------------------------------------------------------
1 | print ('Hello World')
2 | print ('Hello World')
3 | print ('Some')
4 | print ('Hello World')
5 | print ('Some')
6 | print ('Hello World')
7 | print ('Hello World')
8 |
--------------------------------------------------------------------------------
/my_mod.py:
--------------------------------------------------------------------------------
1 | def fun():
2 | return 'Hello World'
3 |
4 | class Hello():
5 | def __init__(self,name):
6 | self.name = name
7 |
8 | def sayHi(self):
9 | print 'Hiii ' + self.name
--------------------------------------------------------------------------------
/my_mod.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edyoda/python/2275ac9ebdfdd04f5b5c70d1c013ee1d9cde9f0f/my_mod.pyc
--------------------------------------------------------------------------------
/my_mod_use.py:
--------------------------------------------------------------------------------
1 | #import my_mod
2 | from my_mod import Hello
3 | #print my_mod.fun()
4 |
5 | x = Hello('awantik')
6 | x.sayHi()
7 |
--------------------------------------------------------------------------------
/numbers.py:
--------------------------------------------------------------------------------
1 |
2 | from math import *
3 |
4 | a= -1.25
5 | print abs(a)
6 | b= 25
7 |
8 | print sqrt(b)
9 |
10 | a = 6.35
11 | print floor (a)
12 |
13 | print min(10,5,6,34,7)
14 | import random
15 |
16 | print random.random()
17 |
18 | l = ["dfg", 56, 7.8 ,"xyz"]
19 |
20 | print random.choice(l)
21 |
22 | print random.randrange(10,50,5)
23 |
24 | random.shuffle(l)
25 | print l
26 |
27 | print random.uniform(3,40)
28 |
29 | random.seed(5)
30 |
31 | print random.random()
32 |
33 |
34 |
35 |
--------------------------------------------------------------------------------
/numbers.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edyoda/python/2275ac9ebdfdd04f5b5c70d1c013ee1d9cde9f0f/numbers.pyc
--------------------------------------------------------------------------------
/os-code.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import os
3 |
4 | os.system("python threads-code.py")
5 |
--------------------------------------------------------------------------------
/out_file:
--------------------------------------------------------------------------------
1 | how are you
2 | is a to
3 | but i m not of
4 |
--------------------------------------------------------------------------------
/output1.html:
--------------------------------------------------------------------------------
1 |
2 | The Dormouse's story
3 |
4 | The Dormouse's story
5 | Once upon a time there were three little sisters; and their names were
6 | Elsie,
7 | Lacie and
8 | Tillie;
9 | and they lived at the bottom of a well.
10 | ...
11 |
--------------------------------------------------------------------------------
/panda-code.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 |
3 | companies = pd.read_csv('D:\Data\company_master_data_upto_Mar_2015_Karnataka.csv')
4 |
5 | print companies.head()
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 | 25 < 36
20 | 5 6
21 |
22 |
--------------------------------------------------------------------------------
/parse_books_xml.py:
--------------------------------------------------------------------------------
1 | from bs4 import BeautifulSoup
2 | infile = open("books.xml","r")
3 | contents = infile.read()
4 | soup = BeautifulSoup(contents,'xml')
5 | titles = soup.find_all('title')
6 | authors = soup.find_all('author')
7 | prices = soup.find_all('price')
8 | for i in range(0, len(titles)):
9 | print(titles[i].get_text(),"by",end=' ')
10 | print(authors[i].get_text(),end=' ')
11 | print("costs $" + prices[i].get_text())
12 |
--------------------------------------------------------------------------------
/parse_movie.py:
--------------------------------------------------------------------------------
1 | import xml.etree.ElementTree as ET
2 | tree = ET.parse('movies.xml')
3 | root = tree.getroot()
4 |
5 | for child in root:
6 | print(child.tag, child.attrib)
7 |
8 | print([elem.tag for elem in root.iter()])
9 |
10 | for movie in root.iter('movie'):
11 | print(movie.attrib)
12 |
13 |
14 | for description in root.iter('description'):
15 | print(description.text)
16 |
17 | for movie in root.findall("./genre/decade/movie/[year='1992']"):
18 | print(movie.attrib)
19 |
20 | for movie in root.findall("./genre/decade/movie/format/[@multiple='Yes']"):
21 | print(movie.attrib)
22 |
23 | for movie in root.findall("./genre/decade/movie/format[@multiple='Yes']..."):
24 | print(movie.attrib)
25 |
--------------------------------------------------------------------------------
/parse_xml.py:
--------------------------------------------------------------------------------
1 | import xml.etree.cElementTree as xml
2 |
3 | def parseXML(file_name):
4 | # Parse XML with ElementTree
5 | tree = xml.ElementTree(file=file_name)
6 | print(tree.getroot())
7 | root = tree.getroot()
8 | print("tag=%s, attrib=%s" % (root.tag, root.attrib))
9 |
10 | # get the information via the children!
11 | print("-" * 40)
12 | print("Iterating using getchildren()")
13 | print("-" * 40)
14 | users = root.getchildren()
15 | for user in users:
16 | user_children = user.getchildren()
17 | for user_child in user_children:
18 | print("%s=%s" % (user_child.tag, user_child.text))
19 |
20 | if __name__ == "__main__":
21 | parseXML("test.xml")
22 |
--------------------------------------------------------------------------------
/pyclass1.py:
--------------------------------------------------------------------------------
1 | # print 'hi'
2 | # a=5
3 | # print type(a)
4 |
5 | # a=6
6 | # b=5
7 |
8 | # a,b,c,d=7,8,6,'f'
9 | # print a
10 | # print d
11 |
12 | # print str(a)
13 |
14 | # # #+ - / * % ** //
15 |
16 | # s ='hello'
17 | # print s
18 |
19 | # # # > = < !=
20 | # g=7.9
21 | # print g/2.0
22 | # a= a+5
23 | # a+=5
24 |
25 | # # #a&=b || a= a&b
26 |
27 | # # in, not in
28 |
29 | # # a*5-7&b/9**5
30 |
31 | # a =5
32 | # b=8
33 |
34 | # if a>7 :
35 | # print 'first if statement'
36 | # if b>7 :
37 | # print 'nested if'
38 |
39 | # elif a<4 :
40 | # print 'first elif'
41 | # elif a==5:
42 | # print 'second elif'
43 | # else :
44 | # print 'first else statement'
45 | # a=10
46 | # while a>5 :
47 | # a -= 1 #a=a-1
48 | # print a
49 |
50 | # num = 10
51 |
52 | # while num >0 :
53 | # if num%2==0 :
54 | # print 'number is even ',num
55 | # num -=1
56 |
57 | # for num in range(1,10):
58 | # if num%2==0:
59 | # print 'even ', num
60 |
61 |
62 |
63 | # for num in range(3,10):
64 | # a=0
65 | # for prime in range(2,num-1):
66 | # if num%prime == 0 :
67 | # a += 1
68 | # if a==0:
69 | # print 'prime number ', num
70 |
71 | #NUMBERS:
72 | #int, float, long, complex
73 | # a=4
74 | # b=5
75 | # print long(a)
76 |
77 | # import math
78 |
79 | # a =7.6789065444
80 | # b=8
81 | # c=10
82 | # #print abs(a) #absolute value of a
83 |
84 | # print round(a,3)
85 |
86 | # import random
87 |
88 | # l=[3,5.6,'fgh']
89 | # print random.choice(l)
90 | # print random.choice(l)
91 |
92 | #print random.randrange(10,100,3)
93 |
94 | #print random.random() # random number will always be greater than or equal to 0 and always less than 1.
95 | # random.seed(15)
96 | # print random.random()
97 | # #0.57140259469
98 | # random.seed(15)
99 | # print random.random()
100 | # random.shuffle(l)
101 | # print l[2]
102 | # print random.uniform(2,10)
103 | # import math
104 | # print math.radians(180)
105 | # print math.e
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 |
141 |
142 |
143 |
144 |
145 |
146 |
147 |
148 |
149 |
150 |
151 |
152 |
153 |
154 |
155 |
156 |
157 |
158 |
159 |
160 |
161 |
162 |
163 |
164 |
165 |
166 |
167 |
168 |
169 |
--------------------------------------------------------------------------------
/python-extc.c:
--------------------------------------------------------------------------------
1 | #include
2 |
3 | static PyObject* helloworld(PyObject* self)
4 | {
5 | return Py_BuildValue("s", "Hello, Python extensions!!");
6 | }
7 |
8 | static char helloworld_docs[] =
9 | "helloworld( ): Any message you want to put here!!\n";
10 |
11 | static PyMethodDef helloworld_funcs[] = {
12 | {"helloworld", (PyCFunction)helloworld,
13 | METH_NOARGS, helloworld_docs},
14 | {NULL}
15 | };
16 |
17 | void inithelloworld(void)
18 | {
19 | Py_InitModule3("helloworld", helloworld_funcs,
20 | "Extension module example!");
21 | }
22 |
--------------------------------------------------------------------------------
/quotesbot-master/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | env/
12 | build/
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 |
27 | # PyInstaller
28 | # Usually these files are written by a python script from a template
29 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
30 | *.manifest
31 | *.spec
32 |
33 | # Installer logs
34 | pip-log.txt
35 | pip-delete-this-directory.txt
36 |
37 | # Unit test / coverage reports
38 | htmlcov/
39 | .tox/
40 | .coverage
41 | .coverage.*
42 | .cache
43 | nosetests.xml
44 | coverage.xml
45 | *,cover
46 | .hypothesis/
47 |
48 | # Translations
49 | *.mo
50 | *.pot
51 |
52 | # Django stuff:
53 | *.log
54 | local_settings.py
55 |
56 | # Flask stuff:
57 | instance/
58 | .webassets-cache
59 |
60 | # Scrapy stuff:
61 | .scrapy
62 |
63 | # Sphinx documentation
64 | docs/_build/
65 |
66 | # PyBuilder
67 | target/
68 |
69 | # IPython Notebook
70 | .ipynb_checkpoints
71 |
72 | # pyenv
73 | .python-version
74 |
75 | # celery beat schedule file
76 | celerybeat-schedule
77 |
78 | # dotenv
79 | .env
80 |
81 | # virtualenv
82 | venv/
83 | ENV/
84 |
85 | # Spyder project settings
86 | .spyderproject
87 |
88 | # Rope project settings
89 | .ropeproject
90 |
--------------------------------------------------------------------------------
/quotesbot-master/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2016 Scrapy project
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/quotesbot-master/README.md:
--------------------------------------------------------------------------------
1 | # QuotesBot
2 | This is a Scrapy project to scrape quotes from famous people from http://quotes.toscrape.com ([github repo](https://github.com/scrapinghub/spidyquotes)).
3 |
4 | This project is only meant for educational purposes.
5 |
6 |
7 | ## Extracted data
8 |
9 | This project extracts quotes, combined with the respective author names and tags.
10 | The extracted data looks like this sample:
11 |
12 | {
13 | 'author': 'Douglas Adams',
14 | 'text': '“I may not have gone where I intended to go, but I think I ...”',
15 | 'tags': ['life', 'navigation']
16 | }
17 |
18 |
19 | ## Spiders
20 |
21 | This project contains two spiders and you can list them using the `list`
22 | command:
23 |
24 | $ scrapy list
25 | toscrape-css
26 | toscrape-xpath
27 |
28 | Both spiders extract the same data from the same website, but `toscrape-css`
29 | employs CSS selectors, while `toscrape-xpath` employs XPath expressions.
30 |
31 | You can learn more about the spiders by going through the
32 | [Scrapy Tutorial](http://doc.scrapy.org/en/latest/intro/tutorial.html).
33 |
34 |
35 | ## Running the spiders
36 |
37 | You can run a spider using the `scrapy crawl` command, such as:
38 |
39 | $ scrapy crawl toscrape-css
40 |
41 | If you want to save the scraped data to a file, you can pass the `-o` option:
42 |
43 | $ scrapy crawl toscrape-css -o quotes.json
44 |
--------------------------------------------------------------------------------
/quotesbot-master/quotesbot/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edyoda/python/2275ac9ebdfdd04f5b5c70d1c013ee1d9cde9f0f/quotesbot-master/quotesbot/__init__.py
--------------------------------------------------------------------------------
/quotesbot-master/quotesbot/items.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Define here the models for your scraped items
4 | #
5 | # See documentation in:
6 | # http://doc.scrapy.org/en/latest/topics/items.html
7 |
8 | import scrapy
9 |
10 |
11 | class QuotesbotItem(scrapy.Item):
12 | # define the fields for your item here like:
13 | # name = scrapy.Field()
14 | pass
15 |
--------------------------------------------------------------------------------
/quotesbot-master/quotesbot/pipelines.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Define your item pipelines here
4 | #
5 | # Don't forget to add your pipeline to the ITEM_PIPELINES setting
6 | # See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
7 |
8 |
9 | class QuotesbotPipeline(object):
10 | def process_item(self, item, spider):
11 | return item
12 |
--------------------------------------------------------------------------------
/quotesbot-master/quotesbot/settings.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Scrapy settings for quotesbot project
4 | #
5 | # For simplicity, this file contains only settings considered important or
6 | # commonly used. You can find more settings consulting the documentation:
7 | #
8 | # http://doc.scrapy.org/en/latest/topics/settings.html
9 | # http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
10 | # http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
11 |
12 | BOT_NAME = 'quotesbot'
13 |
14 | SPIDER_MODULES = ['quotesbot.spiders']
15 | NEWSPIDER_MODULE = 'quotesbot.spiders'
16 |
17 |
18 | # Crawl responsibly by identifying yourself (and your website) on the user-agent
19 | #USER_AGENT = 'quotesbot (+http://www.yourdomain.com)'
20 |
21 | # Obey robots.txt rules
22 | ROBOTSTXT_OBEY = True
23 |
24 | # Configure maximum concurrent requests performed by Scrapy (default: 16)
25 | #CONCURRENT_REQUESTS = 32
26 |
27 | # Configure a delay for requests for the same website (default: 0)
28 | # See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
29 | # See also autothrottle settings and docs
30 | #DOWNLOAD_DELAY = 3
31 | # The download delay setting will honor only one of:
32 | #CONCURRENT_REQUESTS_PER_DOMAIN = 16
33 | #CONCURRENT_REQUESTS_PER_IP = 16
34 |
35 | # Disable cookies (enabled by default)
36 | #COOKIES_ENABLED = False
37 |
38 | # Disable Telnet Console (enabled by default)
39 | #TELNETCONSOLE_ENABLED = False
40 |
41 | # Override the default request headers:
42 | #DEFAULT_REQUEST_HEADERS = {
43 | # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
44 | # 'Accept-Language': 'en',
45 | #}
46 |
47 | # Enable or disable spider middlewares
48 | # See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
49 | #SPIDER_MIDDLEWARES = {
50 | # 'quotesbot.middlewares.MyCustomSpiderMiddleware': 543,
51 | #}
52 |
53 | # Enable or disable downloader middlewares
54 | # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
55 | #DOWNLOADER_MIDDLEWARES = {
56 | # 'quotesbot.middlewares.MyCustomDownloaderMiddleware': 543,
57 | #}
58 |
59 | # Enable or disable extensions
60 | # See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
61 | #EXTENSIONS = {
62 | # 'scrapy.extensions.telnet.TelnetConsole': None,
63 | #}
64 |
65 | # Configure item pipelines
66 | # See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
67 | #ITEM_PIPELINES = {
68 | # 'quotesbot.pipelines.SomePipeline': 300,
69 | #}
70 |
71 | # Enable and configure the AutoThrottle extension (disabled by default)
72 | # See http://doc.scrapy.org/en/latest/topics/autothrottle.html
73 | #AUTOTHROTTLE_ENABLED = True
74 | # The initial download delay
75 | #AUTOTHROTTLE_START_DELAY = 5
76 | # The maximum download delay to be set in case of high latencies
77 | #AUTOTHROTTLE_MAX_DELAY = 60
78 | # The average number of requests Scrapy should be sending in parallel to
79 | # each remote server
80 | #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
81 | # Enable showing throttling stats for every response received:
82 | #AUTOTHROTTLE_DEBUG = False
83 |
84 | # Enable and configure HTTP caching (disabled by default)
85 | # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
86 | #HTTPCACHE_ENABLED = True
87 | #HTTPCACHE_EXPIRATION_SECS = 0
88 | #HTTPCACHE_DIR = 'httpcache'
89 | #HTTPCACHE_IGNORE_HTTP_CODES = []
90 | #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
91 |
--------------------------------------------------------------------------------
/quotesbot-master/quotesbot/spiders/__init__.py:
--------------------------------------------------------------------------------
1 | # This package will contain the spiders of your Scrapy project
2 | #
3 | # Please refer to the documentation for information on how to create and manage
4 | # your spiders.
5 |
--------------------------------------------------------------------------------
/quotesbot-master/quotesbot/spiders/toscrape-css.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import scrapy
3 |
4 |
5 | class ToScrapeCSSSpider(scrapy.Spider):
6 | name = "toscrape-css"
7 | start_urls = [
8 | 'http://quotes.toscrape.com/',
9 | ]
10 |
11 | def parse(self, response):
12 | for quote in response.css("div.quote"):
13 | yield {
14 | 'text': quote.css("span.text::text").extract_first(),
15 | 'author': quote.css("small.author::text").extract_first(),
16 | 'tags': quote.css("div.tags > a.tag::text").extract()
17 | }
18 |
19 | next_page_url = response.css("li.next > a::attr(href)").extract_first()
20 | if next_page_url is not None:
21 | yield scrapy.Request(response.urljoin(next_page_url))
22 |
23 |
--------------------------------------------------------------------------------
/quotesbot-master/quotesbot/spiders/toscrape-xpath.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import scrapy
3 |
4 |
5 | class ToScrapeSpiderXPath(scrapy.Spider):
6 | name = 'toscrape-xpath'
7 | start_urls = [
8 | 'http://quotes.toscrape.com/',
9 | ]
10 |
11 | def parse(self, response):
12 | for quote in response.xpath('//div[@class="quote"]'):
13 | yield {
14 | 'text': quote.xpath('./span[@class="text"]/text()').extract_first(),
15 | 'author': quote.xpath('.//small[@class="author"]/text()').extract_first(),
16 | 'tags': quote.xpath('.//div[@class="tags"]/a[@class="tag"]/text()').extract()
17 | }
18 |
19 | next_page_url = response.xpath('//li[@class="next"]/a/@href').extract_first()
20 | if next_page_url is not None:
21 | yield scrapy.Request(response.urljoin(next_page_url))
22 |
23 |
--------------------------------------------------------------------------------
/quotesbot-master/scrapy.cfg:
--------------------------------------------------------------------------------
1 | # Automatically created by: scrapy startproject
2 | #
3 | # For more information about the [deploy] section see:
4 | # https://scrapyd.readthedocs.org/en/latest/deploy.html
5 |
6 | [settings]
7 | default = quotesbot.settings
8 |
9 | [deploy]
10 | #url = http://localhost:6800/
11 | project = quotesbot
12 |
--------------------------------------------------------------------------------
/regularExpressions.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 | s = 'h peeeeeeellloooo heeeeee'
4 |
5 | var = re.search (r'h\s*\w\w\w', s)
6 |
7 | if var :
8 | print var.group()
9 |
10 |
11 |
12 | # str = 'an example word:cat!!'
13 | # match = re.search(r'word:\w\w\w', str)
14 | # # If-statement after search() tests if it succeeded
15 | # if match:
16 | # print 'found', match.group() ## 'found word:cat'
17 | # else:
18 | # print 'did not find'
19 |
20 |
21 | # str = 'an example word:cat!!'
22 | # match1 = re.match(r"word:\w\w\w!!", "hello")
23 | # # If-statement after search() tests if it succeeded
24 | # if match1:
25 | # print 'found', match1.group() ## 'found word:cat'
26 | # else:
27 | # print 'did not find'
--------------------------------------------------------------------------------
/server.py:
--------------------------------------------------------------------------------
1 | from http.server import BaseHTTPRequestHandler,HTTPServer
2 | from socketserver import ThreadingMixIn
3 | import threading
4 | import argparse
5 | import re
6 | import cgi
7 |
8 | class LocalData(object):
9 | records = {}
10 |
11 | class HTTPRequestHandler(BaseHTTPRequestHandler):
12 | def do_POST(self):
13 | if None != re.search('/api/v1/addrecord/*', self.path):
14 | ctype, pdict = cgi.parse_header(self.headers.getheader('content-type'))
15 | if ctype == 'application/json':
16 | length = int(self.headers.getheader('content-length'))
17 | data = cgi.parse_qs(self.rfile.read(length), keep_blank_values=1)
18 | recordID = self.path.split('/')[-1]
19 | LocalData.records[recordID] = data
20 | print ("record %s is added successfully" % recordID)
21 | else:
22 | data = {}
23 | self.send_response(200)
24 | self.end_headers()
25 | else:
26 | self.send_response(403)
27 | self.send_header('Content-Type', 'application/json')
28 | self.end_headers()
29 | return
30 |
31 | def do_GET(self):
32 | if None != re.search('/api/v1/getrecord/*', self.path):
33 | recordID = self.path.split('/')[-1]
34 | print(LocalData.records.items())
35 | if LocalData.records[str(recordID)]:
36 | self.send_response(200)
37 | self.send_header('Content-Type', 'application/json')
38 | self.end_headers()
39 | self.wfile.write(LocalData.records[recordID])
40 | else:
41 | self.send_response(400, 'Bad Request: record does not exist')
42 | self.send_header('Content-Type', 'application/json')
43 | self.end_headers()
44 | else:
45 | self.send_response(403)
46 | self.send_header('Content-Type', 'application/json')
47 | self.end_headers()
48 | return
49 |
50 | class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
51 | allow_reuse_address = True
52 |
53 | def shutdown(self):
54 | self.socket.close()
55 | HTTPServer.shutdown(self)
56 |
57 | class SimpleHttpServer():
58 | def __init__(self, ip, port):
59 | self.server = ThreadedHTTPServer((ip,port), HTTPRequestHandler)
60 |
61 | def start(self):
62 | self.server_thread = threading.Thread(target=self.server.serve_forever)
63 | self.server_thread.daemon = True
64 | self.server_thread.start()
65 |
66 | def waitForThread(self):
67 | self.server_thread.join()
68 |
69 | def addRecord(self, recordID, jsonEncodedRecord):
70 | LocalData.records[recordID] = jsonEncodedRecord
71 |
72 | def stop(self):
73 | self.server.shutdown()
74 | self.waitForThread()
75 |
76 | if __name__=='__main__':
77 | parser = argparse.ArgumentParser(description='HTTP Server')
78 | parser.add_argument('port', type=int, help='Listening port for HTTP Server')
79 | parser.add_argument('ip', help='HTTP Server IP')
80 | args = parser.parse_args()
81 |
82 | server = SimpleHttpServer(args.ip, args.port)
83 | print ('HTTP Server Running...........')
84 | server.start()
85 | server.waitForThread()
86 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from distutils.core import setup, Extension
2 | setup(name='helloworld', version='1.0', \
3 | ext_modules=[Extension('helloworld', ['hello.c'])])
4 |
5 |
6 | $ python setup.py install
7 |
8 | Python script
9 |
10 | #!/usr/bin/python
11 | import helloworld
12 |
13 | print helloworld.helloworld()
14 |
--------------------------------------------------------------------------------
/string_programs.py:
--------------------------------------------------------------------------------
1 | #Take strings from user & print back the ones which start with 'a'
2 | '''
3 | while True:
4 | data = raw_input('Enter Data ')
5 | if not data:
6 | break
7 | if data.startswith('a'):
8 | print data
9 | '''
10 |
11 | #Take lines from user & print back only those which contains 'the' in it
12 | '''
13 | while True:
14 | line = raw_input('Enter Data ')
15 | if not line:
16 | break
17 | if 'the' in line:
18 | print line
19 | '''
20 |
21 | #Check if user data is less than 10, between 10-50, greater than 50
22 | '''
23 | while True:
24 | data = raw_input('Enter Number ')
25 | if not data:
26 | break
27 | if not data.isdigit():
28 | continue
29 | num = int(data)
30 | if num < 10:
31 | s = 'Num {num} less than 10'
32 | print s.format(num=num)
33 | elif num < 50:
34 | s = 'Num {num} less than 50 greater than 10'
35 | print s.format(num=num)
36 | else:
37 | s = 'Num {num} greater than 50'
38 | print s.format(num=num)
39 | '''
40 |
41 | #Reverse words of given string
42 | '''
43 | while True:
44 | line = raw_input('Enter line ')
45 | if not line:
46 | break
47 | l = line.strip() #remove preceeding & trailing spaces
48 | l = l.split() #converting into list
49 | l.reverse()
50 | s = ' '.join(l) #converting list back to strip with spaces between words
51 | print s
52 | '''
53 |
54 | #Replace all the '@' with '#', all 'z' with 'x', all 'p' with 'y' & remove all ' '
55 | #Or, a -> 1, b-> 2, c -> 3, d -> 4, & remove all '@'
56 | '''
57 | from string import maketrans # Import maketrans from string package
58 | data = raw_input('Enter Data ')
59 | mapping = maketrans('abcd','1234')
60 | removal = '@#!'
61 |
62 | s = data.translate(mapping, removal)
63 | print s
64 | '''
65 |
--------------------------------------------------------------------------------
/student.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | 123
4 | Shubham
5 | 0
6 |
7 |
8 | 234
9 | Pankaj
10 | 0
11 |
12 |
13 | 345
14 | JournalDev
15 | 0
16 |
17 |
18 |
--------------------------------------------------------------------------------
/sub-code.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 |
3 | p1 = subprocess.Popen(['python','my_code.py'], stdout = subprocess.PIPE)
4 |
5 | p2 = subprocess.Popen(['python', 'find_word.py'], stdin= p1.stdout , stdout = subprocess.PIPE)
6 |
7 | p1.stdout.close()
8 |
9 | print (p2.communicate()[0])
10 |
--------------------------------------------------------------------------------
/sub-code2.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 |
3 | p1 = subprocess.Popen(['python','my_code.py'], stdout = subprocess.PIPE)
4 |
5 | p2 = subprocess.Popen(['python', 'find_word.py'], stdin = p1.stdout , stdout = subprocess.PIPE)
6 |
7 | p1.stdout.close()
8 |
9 | print (p2.communicate()[0])
10 |
--------------------------------------------------------------------------------
/sub-code3.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 |
3 | p1 = subprocess.Popen(['python','my_code.py'], stdout = subprocess.PIPE)
4 |
5 | p2 = subprocess.Popen(['python', 'find_word_argparse.py', '--match', 'Some'], stdin = p1.stdout , stdout = subprocess.PIPE)
6 |
7 | p1.stdout.close()
8 |
9 | print (p2.communicate()[0])
10 |
--------------------------------------------------------------------------------
/subprocess-code.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 |
3 | subprocess.run(["ls","-al"])
4 |
--------------------------------------------------------------------------------
/test-sleep.py:
--------------------------------------------------------------------------------
1 | import time
2 | while True:
3 | time.sleep(1)
4 | print 'Heyyy'
--------------------------------------------------------------------------------
/test.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | import ctypes
3 | import os
4 |
5 | def main():
6 | print 'Hello World'
7 | # d = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
8 | # library_path = 'C:\Users\ZekeLabs\Documents\Visual Studio 2015\Projects\MathLibraryAndClient\Debug\MathLibrary.dll'
9 | # lib = ctypes.cdll.LoadLibrary(library_path)
10 |
11 | # restype = ctypes.c_int
12 | # argtypes = [ ctypes.c_int ]
13 | # fibonacci_prototype = ctypes.CFUNCTYPE(restype, *argtypes)
14 | # print lib._FuncPtr
15 | #fibonacci_function = fibonacci_prototype(lib.Fibonacci)
16 |
17 | # term = 10
18 | # nf = fibonacci_function(term)
19 | # print('The {}th Fibonacci number is {}'.format(term, nf))
20 |
21 |
22 | if __name__ == '__main__':
23 | pass
24 | #main()
25 |
--------------------------------------------------------------------------------
/test.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | elements
4 | more elements
5 |
6 |
7 | element as well
8 |
9 |
--------------------------------------------------------------------------------
/threads-code.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Spyder Editor
4 |
5 | This is a temporary script file.
6 | """
7 | import threading
8 |
9 | db = {}
10 |
11 | def sayHello():
12 | return "Hello"
13 |
14 | def sayHey():
15 | return "Hey"
16 |
17 | def thread_function(idx,info):
18 | print ('Function {name} {info}'.format(name=idx, info=info))
19 |
20 | db[idx] = True
21 |
22 | if idx % 2 == 0:
23 | print (sayHello())
24 | else:
25 | print (sayHey())
26 |
27 |
28 | def other_thread_functions(idx,info):
29 | print ('Some other functions')
30 | db[idx] = True
31 |
32 | if __name__ == "__main__":
33 |
34 | threads = []
35 |
36 | for i in range(5):
37 | x = threading.Thread(target=thread_function, args=(i,"Information"))
38 | threads.append(x)
39 |
40 | for i in range(5,8):
41 | x = threading.Thread(target=other_thread_functions, args=(i,"Information"))
42 | threads.append(x)
43 |
44 |
45 | for thread in threads:
46 | thread.start()
47 | thread.join()
48 |
49 | print (db)
--------------------------------------------------------------------------------
/try-except.code:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edyoda/python/2275ac9ebdfdd04f5b5c70d1c013ee1d9cde9f0f/try-except.code
--------------------------------------------------------------------------------
/try-except.py:
--------------------------------------------------------------------------------
1 | '''
2 | def fun(l):
3 | h = 0
4 | try:
5 | #Try doing things here, if unsuccessful go to except
6 | print h
7 | print l[1]
8 | return
9 | except Exception as e:
10 | print e
11 | finally:
12 | print 'Finally here'
13 |
14 | print 'hello'
15 |
16 | fun("jsd")
17 | '''
18 | '''
19 | def func(d):
20 | try:
21 | print d.index('d')
22 | except:
23 | print 'Cant doing'
24 |
25 | func("abc")
26 | '''
27 | '''
28 | def func(d):
29 | print d.index('d')
30 |
31 | func("abc")
32 | '''
33 |
34 |
35 |
--------------------------------------------------------------------------------
/try-except.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edyoda/python/2275ac9ebdfdd04f5b5c70d1c013ee1d9cde9f0f/try-except.pyc
--------------------------------------------------------------------------------
/update_xml.py:
--------------------------------------------------------------------------------
1 | import xml.etree.ElementTree as xml
2 |
3 | def updateXML(filename):
4 | # Start with the root element
5 | tree = xml.ElementTree(file=filename)
6 | root = tree.getroot()
7 |
8 | for salary in root.iter("salary"):
9 | salary.text = '1000'
10 |
11 | tree = xml.ElementTree(root)
12 | with open("updated_test.xml", "wb") as fh:
13 | tree.write(fh)
14 |
15 | if __name__ == "__main__":
16 | updateXML("student.xml")
17 |
--------------------------------------------------------------------------------
/web-scrap-code.py:
--------------------------------------------------------------------------------
1 | import requests
2 | from bs4 import BeautifulSoup
3 |
4 | r = requests.get('http://www.timesofindia.com')
5 |
6 | soup = BeautifulSoup(r.text)
7 |
8 | for d in soup.find_all('h2'):
9 | print d
--------------------------------------------------------------------------------
/web-scrap.py:
--------------------------------------------------------------------------------
1 | import urllib
2 | import urllib2
3 | from bs4 import BeautifulSoup
4 | #open search result for viewing
5 | #import webbrowser
6 |
7 |
8 | search = urllib.urlencode({'item':'Python'})
9 |
10 | url = 'http://139.59.1.147/search-courses/'
11 | full_url = url + '?' + search
12 | response = urllib2.urlopen(full_url)
13 |
14 |
15 | with open("search-page.html","w") as f:
16 | data = response.read()
17 | f.write(data)
18 |
19 | soup = BeautifulSoup(data, "html.parser")
20 | print soup.title
21 | #webbrowser.open("search-page.html")
22 |
23 |
--------------------------------------------------------------------------------
/web-scrap.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edyoda/python/2275ac9ebdfdd04f5b5c70d1c013ee1d9cde9f0f/web-scrap.pyc
--------------------------------------------------------------------------------
/xl.py:
--------------------------------------------------------------------------------
1 | from openpyxl import Workbook
2 |
3 | def main():
4 | wb = Workbook()
5 |
6 | # grab the active worksheet
7 | ws = wb.active
8 |
9 | # Data can be assigned directly to cells
10 | ws['A1'] = 42
11 |
12 | # Rows can also be appended
13 | ws.append([1, 2, 3])
14 |
15 | # Python types will automatically be converted
16 | import datetime
17 | ws['A2'] = datetime.datetime.now()
18 |
19 | # Save the file
20 | wb.save("sample.xlsx")
21 |
22 |
23 | if __name__ == '__main__':
24 | main()
25 |
--------------------------------------------------------------------------------
/xl_write.csv:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/edyoda/python/2275ac9ebdfdd04f5b5c70d1c013ee1d9cde9f0f/xl_write.csv
--------------------------------------------------------------------------------
/xmlparser.py:
--------------------------------------------------------------------------------
1 | import xmltodict
2 |
3 | def findKeyInfo(db,pattern):
4 | for k,v in db.items():
5 | if k == pattern:
6 | print v
7 | if isinstance(v,dict):
8 | findKeyInfo(v,pattern)
9 |
10 |
11 | pattern = 'many'
12 |
13 | #executes only one time
14 | with open('file.xml') as fd:
15 | doc = xmltodict.parse(fd.read())
16 |
17 | findKeyInfo(doc,pattern)
--------------------------------------------------------------------------------
/xmlreader.py:
--------------------------------------------------------------------------------
1 | import xmltodict
2 |
3 | with open('test.xml') as fd:
4 | doc = xmltodict.parse(fd.read()) #Convert string of xml content to xml
5 |
6 | print doc['mydocument']['plus']['@a']
7 | print doc['mydocument']['@has']
8 | for m in doc['mydocument']['and']['many']:
9 | print m
--------------------------------------------------------------------------------
/xtd.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | host1
4 | Linux
5 |
6 |
7 | em0
8 | 10.0.0.1
9 |
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------