├── .idea
├── TensorFlowinAction.iml
├── misc.xml
├── modules.xml
├── vcs.xml
└── workspace.xml
├── InActionB1
├── chapter5
│ ├── 5_1.ipynb
│ ├── 5_2_1.ipynb
│ ├── 5_2_1.py
│ ├── 5_4_2.py
│ ├── mnist_eval_5_5.py
│ ├── mnist_inference_5_5.py
│ ├── mnist_inference_5_5.pyc
│ ├── mnist_train_5_5.py
│ └── mnist_train_5_5.pyc
├── chapter6
│ ├── 6_4_1.py
│ ├── mnist_inference_6_4_1.py
│ ├── mnist_inference_6_4_1.pyc
│ └── mnist_train_6_4_1.py
├── chapter8
│ ├── nlModel
│ ├── sin.png
│ └── sinModel.py
├── checkpoint
├── cher2
├── cher3
└── meta_graph.json
├── InActionB2
└── chapter4
│ ├── me.jpg
│ ├── neural_100.jpg
│ ├── neural_200.jpg
│ ├── neural_300.jpg
│ ├── neural_400.jpg
│ ├── neural_500.jpg
│ ├── neural_me_100.jpg
│ ├── neural_me_200.jpg
│ ├── neural_me_300.jpg
│ ├── neural_me_400.jpg
│ ├── neural_me_500.jpg
│ ├── star.jpg
│ ├── tranImage.py
│ └── xihu.jpg
└── README.md
/.idea/TensorFlowinAction.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/workspace.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 |
141 |
142 |
143 |
144 |
145 |
146 |
147 |
148 |
149 |
150 |
151 |
152 |
153 |
154 |
155 |
156 |
157 |
158 |
159 |
160 |
161 |
162 |
163 |
164 |
165 |
166 |
167 |
168 |
169 |
170 |
171 |
172 |
173 |
174 |
175 |
176 |
177 |
178 |
179 |
180 |
181 |
182 |
183 |
184 |
185 |
186 |
187 |
188 |
189 |
190 |
191 |
192 |
193 |
194 |
195 |
196 |
197 |
198 |
199 |
200 |
201 |
202 |
203 |
204 |
205 |
206 |
207 |
208 |
209 |
210 |
211 |
212 |
213 |
214 |
215 |
216 |
217 |
218 |
219 |
220 |
221 |
222 |
223 |
224 |
225 |
226 |
227 |
228 |
229 |
230 |
231 |
232 |
233 |
234 |
235 |
236 |
237 |
238 |
239 |
240 |
241 |
242 |
243 |
244 |
245 |
246 |
247 |
248 |
249 |
250 |
251 |
252 |
253 |
254 |
255 |
256 |
257 |
258 |
259 |
260 |
261 |
262 |
263 |
264 |
265 |
266 |
267 |
268 |
269 |
270 |
271 |
272 |
273 |
274 |
275 |
276 |
277 |
278 |
279 |
280 |
281 |
282 |
283 |
284 |
285 |
286 |
287 |
288 |
289 |
290 |
291 |
292 |
293 |
294 |
295 |
296 |
297 |
298 |
299 |
300 |
301 |
302 |
303 |
304 |
305 |
306 |
307 |
308 |
309 |
310 |
311 |
312 |
313 |
314 |
315 |
316 |
317 |
318 |
319 |
320 |
321 |
322 |
323 |
324 |
325 |
326 |
327 |
328 |
329 |
330 |
331 |
332 |
333 |
334 |
335 |
336 |
337 |
338 |
339 |
340 |
341 |
342 |
343 |
344 |
345 |
346 |
347 |
348 |
349 |
350 |
351 |
352 |
353 |
354 |
355 |
356 |
357 |
358 |
359 |
360 |
361 |
362 |
363 |
364 |
365 |
366 |
367 |
368 |
369 |
370 |
371 |
372 |
373 |
374 |
375 |
376 |
377 |
378 |
379 |
380 |
381 |
382 |
383 |
384 |
385 |
386 |
387 |
388 |
389 |
390 |
391 |
392 |
393 |
394 |
395 |
396 |
397 |
398 |
399 | 1492478857409
400 |
401 |
402 | 1492478857409
403 |
404 |
405 |
406 |
407 |
408 |
409 |
410 |
411 |
412 |
413 |
414 |
415 |
416 |
417 |
418 |
419 |
420 |
421 |
422 |
423 |
424 |
425 |
426 |
427 |
428 |
429 |
430 |
431 |
432 |
433 |
434 |
435 |
436 |
437 |
438 |
439 |
440 |
441 |
442 |
443 |
444 |
445 |
446 |
447 |
448 |
449 |
450 |
451 |
452 |
453 |
454 |
455 |
456 |
457 |
458 |
459 |
460 |
461 |
462 |
463 |
464 |
465 |
466 |
467 |
468 |
469 |
470 |
471 |
472 |
473 |
474 |
475 |
476 |
477 |
478 |
479 |
480 |
481 |
482 |
483 |
484 |
485 |
486 |
487 |
488 |
489 |
490 |
491 |
492 |
493 |
494 |
495 |
496 |
497 |
498 |
499 |
500 |
501 |
502 |
503 |
504 |
505 |
506 |
507 |
508 |
509 |
510 |
511 |
512 |
513 |
514 |
515 |
516 |
517 |
518 |
519 |
520 |
521 |
522 |
523 |
524 |
525 |
526 |
527 |
528 |
529 |
530 |
531 |
532 |
533 |
534 |
535 |
536 |
537 |
538 |
539 |
540 |
541 |
542 |
543 |
544 |
545 |
546 |
547 |
548 |
549 |
550 |
551 |
552 |
553 |
554 |
555 |
556 |
557 |
558 |
559 |
560 |
561 |
562 |
563 |
564 |
565 |
566 |
567 |
568 |
569 |
570 |
571 |
572 |
573 |
574 |
575 |
576 |
577 |
578 |
579 |
580 |
581 |
582 |
583 |
584 |
585 |
586 |
587 |
588 |
589 |
590 |
591 |
592 |
593 |
594 |
595 |
596 |
597 |
598 |
599 |
600 |
601 |
602 |
603 |
604 |
605 |
606 |
607 |
608 |
609 |
610 |
611 |
612 |
613 |
614 |
615 |
616 |
617 |
618 |
619 |
620 |
621 |
622 |
623 |
624 |
625 |
626 |
627 |
628 |
629 |
630 |
631 |
632 |
633 |
634 |
635 |
636 |
637 |
638 |
639 |
640 |
641 |
642 |
643 |
644 |
645 |
646 |
647 |
648 |
649 |
650 |
651 |
652 |
653 |
654 |
655 |
656 |
657 |
658 |
659 |
660 |
661 |
662 |
663 |
664 |
665 |
666 |
667 |
668 |
669 |
670 |
671 |
672 |
673 |
674 |
675 |
676 |
677 |
678 |
679 |
680 |
681 |
682 |
683 |
684 |
685 |
686 |
687 |
688 |
689 |
690 |
691 |
692 |
693 |
694 |
--------------------------------------------------------------------------------
/InActionB1/chapter5/5_1.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 5,
6 | "metadata": {
7 | "collapsed": true
8 | },
9 | "outputs": [
10 | {
11 | "name": "stdout",
12 | "output_type": "stream",
13 | "text": [
14 | "Extracting /home/soso/MNIST_data/train-images-idx3-ubyte.gz\n"
15 | ]
16 | },
17 | {
18 | "name": "stdout",
19 | "output_type": "stream",
20 | "text": [
21 | "Extracting /home/soso/MNIST_data/train-labels-idx1-ubyte.gz\nExtracting /home/soso/MNIST_data/t10k-images-idx3-ubyte.gz\nExtracting /home/soso/MNIST_data/t10k-labels-idx1-ubyte.gz\n"
22 | ]
23 | },
24 | {
25 | "name": "stdout",
26 | "output_type": "stream",
27 | "text": [
28 | " Training data size: 55000\nTraining data : [ 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0.38039219 0.37647063\n 0.3019608 0.46274513 0.2392157 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0.35294119 0.5411765\n 0.92156869 0.92156869 0.92156869 0.92156869 0.92156869 0.92156869\n 0.98431379 0.98431379 0.97254908 0.99607849 0.96078438 0.92156869\n 0.74509805 0.08235294 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0.\n 0.54901963 0.98431379 0.99607849 0.99607849 0.99607849 0.99607849\n 0.99607849 0.99607849 0.99607849 0.99607849 0.99607849 0.99607849\n 0.99607849 0.99607849 0.99607849 0.99607849 0.74117649 0.09019608\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0.88627458 0.99607849 0.81568635\n 0.78039223 0.78039223 0.78039223 0.78039223 0.54509807 0.2392157\n 0.2392157 0.2392157 0.2392157 0.2392157 0.50196081 0.8705883\n 0.99607849 0.99607849 0.74117649 0.08235294 0. 0. 0.\n 0. 0. 0. 0. 0. 0.\n 0.14901961 0.32156864 0.0509804 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0.13333334 0.83529419 0.99607849 0.99607849 0.45098042 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0.32941177 0.99607849 0.99607849 0.91764712 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0.32941177 0.99607849 0.99607849 0.91764712 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0.41568631 0.6156863 0.99607849 0.99607849 0.95294124 0.20000002\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0.09803922 0.45882356 0.89411771\n 0.89411771 0.89411771 0.99215692 0.99607849 0.99607849 0.99607849\n 0.99607849 0.94117653 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0.26666668 0.4666667 0.86274517\n 0.99607849 0.99607849 0.99607849 0.99607849 0.99607849 0.99607849\n 0.99607849 0.99607849 0.99607849 0.55686277 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0.14509805 0.73333335 0.99215692\n 0.99607849 0.99607849 0.99607849 0.87450987 0.80784321 0.80784321\n 0.29411766 0.26666668 0.84313732 0.99607849 0.99607849 0.45882356\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0.44313729\n 0.8588236 0.99607849 0.94901967 0.89019614 0.45098042 0.34901962\n 0.12156864 0. 0. 0. 0. 0.7843138\n 0.99607849 0.9450981 0.16078432 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0.66274512 0.99607849 0.6901961 0.24313727 0. 0.\n 0. 0. 0. 0. 0. 0.18823531\n 0.90588242 0.99607849 0.91764712 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0.07058824 0.48627454 0. 0. 0.\n 0. 0. 0. 0. 0. 0.\n 0.32941177 0.99607849 0.99607849 0.65098041 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0.54509807 0.99607849 0.9333334 0.22352943 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0.\n 0.82352948 0.98039222 0.99607849 0.65882355 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0.94901967 0.99607849 0.93725497 0.22352943 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0.\n 0.34901962 0.98431379 0.9450981 0.33725491 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0.\n 0.01960784 0.80784321 0.96470594 0.6156863 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0.01568628 0.45882356 0.27058825 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. ]\n"
29 | ]
30 | }
31 | ],
32 | "source": [
33 | "from tensorflow.examples.tutorials.mnist import input_data \n",
34 | "\n",
35 | "mnist = input_data.read_data_sets(\"/home/soso/MNIST_data\",one_hot=True)\n",
36 | "\n",
37 | "print \"Training data size:\",mnist.train.num_examples\n",
38 | "\n",
39 | "print \"Training data :\",mnist.train.images[0]"
40 | ]
41 | },
42 | {
43 | "cell_type": "code",
44 | "execution_count": null,
45 | "metadata": {},
46 | "outputs": [],
47 | "source": [
48 | ""
49 | ]
50 | }
51 | ],
52 | "metadata": {
53 | "kernelspec": {
54 | "display_name": "Python 2",
55 | "language": "python",
56 | "name": "python2"
57 | },
58 | "language_info": {
59 | "codemirror_mode": {
60 | "name": "ipython",
61 | "version": 2.0
62 | },
63 | "file_extension": ".py",
64 | "mimetype": "text/x-python",
65 | "name": "python",
66 | "nbconvert_exporter": "python",
67 | "pygments_lexer": "ipython2",
68 | "version": "2.7.6"
69 | }
70 | },
71 | "nbformat": 4,
72 | "nbformat_minor": 0
73 | }
--------------------------------------------------------------------------------
/InActionB1/chapter5/5_2_1.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 3,
6 | "metadata": {
7 | "collapsed": true
8 | },
9 | "outputs": [
10 | {
11 | "name": "stdout",
12 | "output_type": "stream",
13 | "text": [
14 | "Extracting /home/soso/MNIST_data/train-images-idx3-ubyte.gz\n"
15 | ]
16 | },
17 | {
18 | "name": "stdout",
19 | "output_type": "stream",
20 | "text": [
21 | "Extracting /home/soso/MNIST_data/train-labels-idx1-ubyte.gz\nExtracting /home/soso/MNIST_data/t10k-images-idx3-ubyte.gz\nExtracting /home/soso/MNIST_data/t10k-labels-idx1-ubyte.gz\n"
22 | ]
23 | },
24 | {
25 | "ename": "ValueError",
26 | "evalue": "Only call `sparse_softmax_cross_entropy_with_logits` with named arguments (labels=..., logits=..., ...)",
27 | "traceback": [
28 | "\u001b[0;31m\u001b[0m",
29 | "\u001b[0;31mValueError\u001b[0mTraceback (most recent call last)",
30 | "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 98\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 99\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0m__name__\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m'__main__'\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 100\u001b[0;31m \u001b[0mtf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
31 | "\u001b[0;32m/home/soso/anaconda2/lib/python2.7/site-packages/tensorflow/python/platform/app.pyc\u001b[0m in \u001b[0;36mrun\u001b[0;34m(main, argv)\u001b[0m\n\u001b[1;32m 42\u001b[0m \u001b[0;31m# Call the main function, passing through any arguments\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 43\u001b[0m \u001b[0;31m# to the final program.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 44\u001b[0;31m \u001b[0m_sys\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0m_sys\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0margv\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mflags_passthrough\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 45\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 46\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
32 | "\u001b[0;32m\u001b[0m in \u001b[0;36mmain\u001b[0;34m(argv)\u001b[0m\n\u001b[1;32m 95\u001b[0m mnist = input_data.read_data_sets(\n\u001b[1;32m 96\u001b[0m \"/home/soso/MNIST_data\",one_hot=True)\n\u001b[0;32m---> 97\u001b[0;31m \u001b[0mtrain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmnist\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 98\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 99\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0m__name__\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m'__main__'\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
33 | "\u001b[0;32m\u001b[0m in \u001b[0;36mtrain\u001b[0;34m(mnist)\u001b[0m\n\u001b[1;32m 51\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 52\u001b[0m cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n\u001b[0;32m---> 53\u001b[0;31m y,tf.argmax(y_,1))\n\u001b[0m\u001b[1;32m 54\u001b[0m \u001b[0mcross_entropy_mean\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreduce_mean\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcross_entropy\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 55\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
34 | "\u001b[0;32m/home/soso/anaconda2/lib/python2.7/site-packages/tensorflow/python/ops/nn_ops.pyc\u001b[0m in \u001b[0;36msparse_softmax_cross_entropy_with_logits\u001b[0;34m(_sentinel, labels, logits, name)\u001b[0m\n\u001b[1;32m 1682\u001b[0m \"\"\"\n\u001b[1;32m 1683\u001b[0m _ensure_xent_args(\"sparse_softmax_cross_entropy_with_logits\", _sentinel,\n\u001b[0;32m-> 1684\u001b[0;31m labels, logits)\n\u001b[0m\u001b[1;32m 1685\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1686\u001b[0m \u001b[0;31m# TODO(pcmurray) Raise an error when the label is not an index in\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
35 | "\u001b[0;32m/home/soso/anaconda2/lib/python2.7/site-packages/tensorflow/python/ops/nn_ops.pyc\u001b[0m in \u001b[0;36m_ensure_xent_args\u001b[0;34m(name, sentinel, labels, logits)\u001b[0m\n\u001b[1;32m 1531\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0msentinel\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1532\u001b[0m raise ValueError(\"Only call `%s` with \"\n\u001b[0;32m-> 1533\u001b[0;31m \"named arguments (labels=..., logits=..., ...)\" % name)\n\u001b[0m\u001b[1;32m 1534\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mlabels\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0mNone\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mlogits\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1535\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Both labels and logits must be provided.\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
36 | "\u001b[0;31mValueError\u001b[0m: Only call `sparse_softmax_cross_entropy_with_logits` with named arguments (labels=..., logits=..., ...)"
37 | ],
38 | "output_type": "error"
39 | }
40 | ],
41 | "source": [
42 | "import tensorflow as tf\n",
43 | "from tensorflow.examples.tutorials.mnist import input_data \n",
44 | "\n",
45 | "INPUT_NODE = 784\n",
46 | "OUTPUT_NODE = 10\n",
47 | "\n",
48 | "LAYER1_NONE = 500\n",
49 | "\n",
50 | "BATCH_SIZE = 100\n",
51 | "\n",
52 | "LEARNING_RATE_BASE = 0.8\n",
53 | "LEARNING_RATE_DACAY = 0.99\n",
54 | "REGULARIZATION_RATE = 0.0001\n",
55 | "TRAINING_STEPS = 30000\n",
56 | "MOVING_AVERAGE_DECAY = 0.99\n",
57 | "\n",
58 | "def inference(input_tensor,avg_class,weights1,biases1,weights2,biases2):\n",
59 | " if avg_class == None:\n",
60 | " layer1 = tf.nn.relu(tf.matmul(input_tensor,weights1)+biases1)\n",
61 | " return tf.matmul(layer1,weights2) + biases2\n",
62 | " else:\n",
63 | " layer1 = tf.nn.relu(tf.matmul(\n",
64 | " input_tensor,avg_class.average(weights1))+\n",
65 | " avg_class.average(biases1))\n",
66 | " return tf.matmul(layer1,avg_class.average(weights2)+\n",
67 | " avg_class.average(biases2))\n",
68 | " \n",
69 | "def train(mnist):\n",
70 | " x = tf.placeholder(tf.float32,[None,INPUT_NODE],name='x-input')\n",
71 | " y_ = tf.placeholder(tf.float32,[None,OUTPUT_NODE],name='y-input')\n",
72 | " \n",
73 | " weights1 = tf.Variable(tf.truncated_normal(\n",
74 | " [INPUT_NODE,LAYER1_NONE],stddev=0.1))\n",
75 | " biases1 = tf.Variable(tf.constant(0.1,shape=[LAYER1_NONE]))\n",
76 | " \n",
77 | " weight2 = tf.Variable(tf.truncated_normal(\n",
78 | " [LAYER1_NONE,OUTPUT_NODE],stddev=0.1))\n",
79 | " biases2 = tf.Variable(tf.constant(0.1,shape=[OUTPUT_NODE]))\n",
80 | " \n",
81 | " y = inference(x,None,weights1,biases1,weight2,biases2)\n",
82 | " \n",
83 | " global_step = tf.Variable(0,trainable=False)\n",
84 | " \n",
85 | " variables_averages = tf.train.ExponentialMovingAverage(\n",
86 | " MOVING_AVERAGE_DECAY,global_step)\n",
87 | " variables_averages_op = variables_averages.apply(\n",
88 | " tf.trainable_variables())\n",
89 | " \n",
90 | " average_y = inference(x,variables_averages,\n",
91 | " weights1,biases1,weight2,biases2)\n",
92 | " \n",
93 | " cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n",
94 | " y,tf.argmax(y_,1))\n",
95 | " cross_entropy_mean = tf.reduce_mean(cross_entropy)\n",
96 | " \n",
97 | " regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)\n",
98 | " regularization = regularizer(weights1) + regularizer(weight2)\n",
99 | " loss = cross_entropy_mean + regularization\n",
100 | " learning_rate = tf.train.exponential_decay(\n",
101 | " LEARNING_RATE_BASE,global_step,\n",
102 | " mnist.train.num_examples/BATCH_SIZE,LEARNING_RATE_DACAY)\n",
103 | " \n",
104 | " train_step = tf.train.GradientDescentOptimizer(\n",
105 | " learning_rate).minimize(loss,global_step=global_step)\n",
106 | " \n",
107 | " train_op = tf.group(train_step,variables_averages_op)\n",
108 | " \n",
109 | " correct_prediction = tf.equal(\n",
110 | " tf.argmax(average_y,1),tf.argmax(y_,1))\n",
111 | " accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))\n",
112 | " \n",
113 | " with tf.Session() as sess:\n",
114 | " tf.initialize_all_variables().run()\n",
115 | " \n",
116 | " validate_feed = {x:mnist.validation.images,\n",
117 | " y_:mnist.validation.labels}\n",
118 | " test_feed = {x:mnist.test.images,y_:mnist.test.labels}\n",
119 | " \n",
120 | " for i in range(TRAINING_STEPS):\n",
121 | " if i % 1000 == 0:\n",
122 | " validate_acc = sess.run(\n",
123 | " accuracy,feed_dict=validate_feed)\n",
124 | " print(\"After %d training step(s),v acc is %g\"\n",
125 | " % (i,validate_acc))\n",
126 | " \n",
127 | " xs,ys = mnist.train.next_batch(BATCH_SIZE)\n",
128 | " sess.run(train_op,feed_dict={x:xs,y_:ys})\n",
129 | " \n",
130 | " test_acc = sess.run(accuracy,feed_dict=test_feed)\n",
131 | " print(\"After %d training step(s),test accuray is %g\"\n",
132 | " %(TRAINING_STEPS,test_acc))\n",
133 | " \n",
134 | "\n",
135 | "def main(argv=None):\n",
136 | " mnist = input_data.read_data_sets(\n",
137 | " \"/home/soso/MNIST_data\",one_hot=True)\n",
138 | " train(mnist)\n",
139 | " \n",
140 | "if __name__ == '__main__':\n",
141 | " tf.app.run()"
142 | ]
143 | },
144 | {
145 | "cell_type": "code",
146 | "execution_count": 3,
147 | "metadata": {},
148 | "outputs": [],
149 | "source": [
150 | ""
151 | ]
152 | },
153 | {
154 | "cell_type": "code",
155 | "execution_count": 3,
156 | "metadata": {},
157 | "outputs": [],
158 | "source": [
159 | ""
160 | ]
161 | },
162 | {
163 | "cell_type": "code",
164 | "execution_count": null,
165 | "metadata": {},
166 | "outputs": [],
167 | "source": [
168 | ""
169 | ]
170 | }
171 | ],
172 | "metadata": {
173 | "kernelspec": {
174 | "display_name": "Python 2",
175 | "language": "python",
176 | "name": "python2"
177 | },
178 | "language_info": {
179 | "codemirror_mode": {
180 | "name": "ipython",
181 | "version": 2.0
182 | },
183 | "file_extension": ".py",
184 | "mimetype": "text/x-python",
185 | "name": "python",
186 | "nbconvert_exporter": "python",
187 | "pygments_lexer": "ipython2",
188 | "version": "2.7.6"
189 | }
190 | },
191 | "nbformat": 4,
192 | "nbformat_minor": 0
193 | }
--------------------------------------------------------------------------------
/InActionB1/chapter5/5_2_1.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | from tensorflow.examples.tutorials.mnist import input_data
3 |
4 | INPUT_NODE = 784
5 | OUTPUT_NODE = 10
6 |
7 | LAYER1_NONE = 500
8 |
9 | BATCH_SIZE = 100
10 |
11 | LEARNING_RATE_BASE = 0.8
12 | LEARNING_RATE_DACAY = 0.99
13 | REGULARIZATION_RATE = 0.0001
14 | TRAINING_STEPS = 30000
15 | MOVING_AVERAGE_DECAY = 0.99
16 |
17 |
18 | def inference(input_tensor, avg_class, weights1, biases1, weights2, biases2):
19 | if avg_class == None:
20 | layer1 = tf.nn.relu(tf.matmul(input_tensor, weights1) + biases1)
21 | return tf.matmul(layer1, weights2) + biases2
22 | else:
23 | layer1 = tf.nn.relu(tf.matmul(
24 | input_tensor, avg_class.average(weights1)) + avg_class.average(biases1))
25 | return tf.matmul(layer1, avg_class.average(weights2)) + avg_class.average(biases2)
26 |
27 |
28 | def train(mnist):
29 | x = tf.placeholder(tf.float32, [None, INPUT_NODE], name='x-input')
30 | y_ = tf.placeholder(tf.float32, [None, OUTPUT_NODE], name='y-input')
31 |
32 | weights1 = tf.Variable(tf.truncated_normal([INPUT_NODE, LAYER1_NONE], stddev=0.1))
33 | biases1 = tf.Variable(tf.constant(0.1, shape=[LAYER1_NONE]))
34 |
35 | weight2 = tf.Variable(tf.truncated_normal([LAYER1_NONE, OUTPUT_NODE], stddev=0.1))
36 | biases2 = tf.Variable(tf.constant(0.1, shape=[OUTPUT_NODE]))
37 |
38 | y = inference(x, None, weights1, biases1, weight2, biases2)
39 |
40 | global_step = tf.Variable(0, trainable=False)
41 |
42 | variables_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
43 | variables_averages_op = variables_averages.apply(tf.trainable_variables())
44 |
45 | average_y = inference(x, variables_averages,weights1, biases1, weight2, biases2)
46 |
47 | cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,labels=tf.argmax(y_, 1))
48 | cross_entropy_mean = tf.reduce_mean(cross_entropy)
49 |
50 | regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
51 | regularization = regularizer(weights1) + regularizer(weight2)
52 | loss = cross_entropy_mean + regularization
53 |
54 | learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step,mnist.train.num_examples / BATCH_SIZE, LEARNING_RATE_DACAY)
55 |
56 | train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
57 |
58 | train_op = tf.group(train_step, variables_averages_op)
59 |
60 | correct_prediction = tf.equal(tf.argmax(average_y, 1), tf.argmax(y_, 1))
61 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
62 |
63 | with tf.Session() as sess:
64 | tf.initialize_all_variables().run()
65 |
66 | validate_feed = {x: mnist.validation.images,y_: mnist.validation.labels}
67 | test_feed = {x: mnist.test.images, y_: mnist.test.labels}
68 |
69 | for i in range(TRAINING_STEPS):
70 | if i % 1000 == 0:
71 | validate_acc = sess.run(accuracy, feed_dict=validate_feed)
72 | print("After %d training step(s),v acc is %g"% (i, validate_acc))
73 |
74 | xs, ys = mnist.train.next_batch(BATCH_SIZE)
75 | sess.run(train_op, feed_dict={x: xs, y_: ys})
76 |
77 | test_acc = sess.run(accuracy, feed_dict=test_feed)
78 | print("After %d training step(s),test accuray is %g"% (TRAINING_STEPS, test_acc))
79 |
80 | def main(argv=None):
81 | mnist = input_data.read_data_sets("/home/soso/MNIST_data", one_hot=True)
82 | train(mnist)
83 |
84 | if __name__ == '__main__':
85 | tf.app.run()
--------------------------------------------------------------------------------
/InActionB1/chapter5/5_4_2.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 |
3 | v1 = tf.Variable(tf.constant(1.0,shape=[1]),name="v1")
4 | v2 = tf.Variable(tf.constant(2.0,shape=[1]),name="v2")
5 | result = v1 + v2
6 |
7 | saver = tf.train.Saver()
8 |
9 | saver.export_meta_graph("/home/soso/PycharmProjects/TensorFlowinAction/meta_graph.json",as_text=True)
10 |
--------------------------------------------------------------------------------
/InActionB1/chapter5/mnist_eval_5_5.py:
--------------------------------------------------------------------------------
1 | import os
2 | import tensorflow as tf
3 | import mnist_inference_5_5
4 | import mnist_train_5_5
5 | import time
6 |
7 | from tensorflow.examples.tutorials.mnist import input_data
8 |
9 | EVAL_INTERVAL_SECS = 10
10 |
11 | def evaluate(mnist):
12 | with tf.Graph().as_default() as g:
13 | x = tf.placeholder(tf.float32, [None, mnist_inference_5_5.INPUT_NODE], name='x-input')
14 | y_ = tf.placeholder(tf.float32, [None, mnist_inference_5_5.OUTPUT_NODE], name='y-input')
15 | validate_feed = {x:mnist.validation.images,y_:mnist.validation.labels}
16 |
17 | y = mnist_inference_5_5.inference(x,None)
18 | correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(y_,1))
19 | accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
20 |
21 | variable_averages = tf.train.ExponentialMovingAverage(mnist_train_5_5.MOVING_AVERAGE_DECAY)
22 | variable_to_restore = variable_averages.variables_to_restore()
23 | saver = tf.train.Saver(variable_to_restore)
24 |
25 | while True:
26 | with tf.Session() as sess:
27 | ckpt = tf.train.get_checkpoint_state(mnist_train_5_5.MODEL_SAVE_PATH)
28 | if ckpt and ckpt.model_checkpoint_path:
29 | saver.restore(sess,ckpt.model_checkpoint_path)
30 | print(ckpt.model_checkpoint_path)
31 | global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
32 | accuracy_score = sess.run(accuracy,feed_dict=validate_feed)
33 | print("After %s training step(s),validation accuracy = %g " %(global_step,accuracy_score))
34 |
35 | else:
36 | print("No checkpoint")
37 | return
38 | time.sleep(EVAL_INTERVAL_SECS)
39 |
40 |
41 |
42 | def main(argv = None):
43 | mnist = input_data.read_data_sets("/home/soso/MNIST_data",one_hot=True)
44 | evaluate(mnist)
45 |
46 | if __name__ == '__main__':
47 | tf.app.run()
48 |
--------------------------------------------------------------------------------
/InActionB1/chapter5/mnist_inference_5_5.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import tensorflow as tf
3 |
4 | INPUT_NODE = 784
5 | OUTPUT_NODE = 10
6 | LAYER1_NODE = 500
7 |
8 |
9 | def get_weight_variable(shape,regularizer):
10 | weights = tf.get_variable("weights",shape,initializer=tf.truncated_normal_initializer(stddev=0.1))
11 |
12 | if regularizer != None:
13 | tf.add_to_collection('losses',regularizer(weights))
14 |
15 | return weights
16 |
17 | def inference(input_tensor,regularizer):
18 | with tf.variable_scope('layer1'):
19 | weights = get_weight_variable([INPUT_NODE,LAYER1_NODE],regularizer)
20 | biases = tf.get_variable("biases",[LAYER1_NODE],initializer=tf.constant_initializer(0.0))
21 | layer1 = tf.nn.relu(tf.matmul(input_tensor,weights)+biases)
22 |
23 | with tf.variable_scope('layer2'):
24 | weights = get_weight_variable([LAYER1_NODE,OUTPUT_NODE],regularizer)
25 | biases = tf.get_variable("biases", [OUTPUT_NODE], initializer=tf.constant_initializer(0.0))
26 | layer2 = tf.matmul(layer1,weights) + biases
27 |
28 | return layer2
29 |
30 |
--------------------------------------------------------------------------------
/InActionB1/chapter5/mnist_inference_5_5.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xiaoyesoso/TensorFlowinAction/332ce0b33e9588365740628eb8b83cd89a40265e/InActionB1/chapter5/mnist_inference_5_5.pyc
--------------------------------------------------------------------------------
/InActionB1/chapter5/mnist_train_5_5.py:
--------------------------------------------------------------------------------
1 | # -*- coding:utf-8 -*-
2 | import os
3 | import tensorflow as tf
4 | import mnist_inference_5_5
5 | from tensorflow.examples.tutorials.mnist import input_data
6 |
7 | BATCH_SIZE = 100
8 | LEARNING_RATE_BASE = 0.8
9 | LEARNING_RATE_DECAY = 0.99
10 | REGULARZTION_RATE = 0.0001
11 | TRAINING_STEPS = 30000
12 | MOVING_AVERAGE_DECAY = 0.99
13 |
14 | MODEL_SAVE_PATH = "/home/soso/PycharmProjects/TensorFlowinAction/"
15 | MODEL_NAME = "model_5_5.ckpt"
16 |
17 | def train(mnist):
18 | x = tf.placeholder(tf.float32,[None,mnist_inference_5_5.INPUT_NODE],name='x-input')
19 | y_ = tf.placeholder(tf.float32,[None,mnist_inference_5_5.OUTPUT_NODE],name='y-input')
20 | regularizer = tf.contrib.layers.l2_regularizer(REGULARZTION_RATE)
21 | y = mnist_inference_5_5.inference(x,regularizer)
22 |
23 | global_step = tf.Variable(0,trainable=False)
24 |
25 | variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,global_step)
26 | variable_averages_op = variable_averages.apply(tf.trainable_variables())
27 |
28 | cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,labels=tf.argmax(y_, 1))
29 | cross_entropy_mean = tf.reduce_mean(cross_entropy)
30 | loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
31 |
32 | learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,global_step,mnist.train.num_examples / BATCH_SIZE,LEARNING_RATE_DECAY)
33 | train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step)
34 | train_op = tf.group(train_step, variable_averages_op)
35 |
36 | saver = tf.train.Saver()
37 |
38 | with tf.Session() as sess:
39 | tf.global_variables_initializer().run()
40 |
41 | for i in range(TRAINING_STEPS):
42 | xs,ys = mnist.train.next_batch(BATCH_SIZE)
43 | _,loss_value,step = sess.run([train_op,loss,global_step],feed_dict={x:xs,y_:ys})
44 |
45 | if i % 1000 == 0:
46 | print("After %d training step(s), loss is %g" %(step,loss_value))
47 | saver.save(sess,os.path.join(MODEL_SAVE_PATH,MODEL_NAME),global_step=global_step)
48 |
49 | def main(argv = None):
50 | mnist = input_data.read_data_sets("/home/soso/MNIST_data",one_hot=True)
51 | train(mnist)
52 |
53 | if __name__ == '__main__':
54 | tf.app.run()
55 |
--------------------------------------------------------------------------------
/InActionB1/chapter5/mnist_train_5_5.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xiaoyesoso/TensorFlowinAction/332ce0b33e9588365740628eb8b83cd89a40265e/InActionB1/chapter5/mnist_train_5_5.pyc
--------------------------------------------------------------------------------
/InActionB1/chapter6/6_4_1.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xiaoyesoso/TensorFlowinAction/332ce0b33e9588365740628eb8b83cd89a40265e/InActionB1/chapter6/6_4_1.py
--------------------------------------------------------------------------------
/InActionB1/chapter6/mnist_inference_6_4_1.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import tensorflow as tf
3 |
4 | INPUT_NODE = 784
5 | OUTPUT_NODE = 10
6 |
7 | IMAGE_SIZE = 28
8 | NUM_CHANNELS = 1
9 | NUM_LABELS = 10
10 |
11 | CONV1_DEEP = 32
12 | CONV1_SIZE = 5
13 |
14 | CONV2_DEEP = 64
15 | CONV2_SIZE = 5
16 |
17 | FC_SIZE = 512
18 |
19 |
20 |
21 | def inference(input_tensor,train,regularizer):
22 |
23 | with tf.variable_scope('layer1-conv1'):
24 | conv1_weights = tf.get_variable("weight",[CONV1_SIZE,CONV1_SIZE,NUM_CHANNELS,CONV1_DEEP],initializer=tf.truncated_normal_initializer(stddev=0.1))
25 | conv1_biases = tf.get_variable("bias",[CONV1_DEEP],initializer=tf.constant_initializer(0.0))
26 |
27 | conv1 = tf.nn.conv2d(input_tensor,conv1_weights,strides=[1,1,1,1],padding='SAME')
28 | relu1 = tf.nn.relu(tf.nn.bias_add(conv1,conv1_biases))
29 |
30 |
31 | with tf.name_scope('layer2-pool1'):
32 | pool1 = tf.nn.max_pool(relu1,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
33 |
34 | with tf.variable_scope('layer3-conv2'):
35 | conv2_weights = tf.get_variable("weight", [CONV2_SIZE, CONV2_SIZE, CONV1_DEEP, CONV2_DEEP],
36 | initializer=tf.truncated_normal_initializer(stddev=0.1))
37 | conv2_biases = tf.get_variable("bias", [CONV2_DEEP], initializer=tf.constant_initializer(0.0))
38 |
39 | conv2 = tf.nn.conv2d(pool1, conv2_weights,strides=[1,1,1,1], padding='SAME')
40 | relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))
41 |
42 | with tf.name_scope('layer4-pool2'):
43 | pool2 = tf.nn.max_pool(relu2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
44 |
45 |
46 | pool_shape = pool2.get_shape().as_list()
47 | nodes = pool_shape[1] * pool_shape[2] * pool_shape[3]
48 | reshaped = tf.reshape(pool2,[pool_shape[0],nodes])
49 |
50 | with tf.variable_scope('layer5-fc1'):
51 | fc1_weights = tf.get_variable("weight", [nodes,FC_SIZE],
52 | initializer=tf.truncated_normal_initializer(stddev=0.1))
53 | if regularizer != None:
54 | tf.add_to_collection('losses',regularizer(fc1_weights))
55 |
56 | fc1_biases = tf.get_variable("bias", [FC_SIZE], initializer=tf.constant_initializer(0.1))
57 | fc1 = tf.nn.relu(tf.matmul(reshaped,fc1_weights) + fc1_biases)
58 | if train:
59 | fc1 = tf.nn.dropout(fc1,0.5)
60 |
61 | with tf.variable_scope('layer6-fc2'):
62 | fc2_weights = tf.get_variable("weight", [FC_SIZE, NUM_LABELS],
63 | initializer=tf.truncated_normal_initializer(stddev=0.1))
64 | if regularizer != None:
65 | tf.add_to_collection('losses', regularizer(fc2_weights))
66 |
67 | fc2_biases = tf.get_variable("bias", [NUM_LABELS], initializer=tf.constant_initializer(0.1))
68 | logit = tf.nn.relu(tf.matmul(fc1, fc2_weights) + fc2_biases)
69 |
70 | return logit
71 |
72 |
--------------------------------------------------------------------------------
/InActionB1/chapter6/mnist_inference_6_4_1.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xiaoyesoso/TensorFlowinAction/332ce0b33e9588365740628eb8b83cd89a40265e/InActionB1/chapter6/mnist_inference_6_4_1.pyc
--------------------------------------------------------------------------------
/InActionB1/chapter6/mnist_train_6_4_1.py:
--------------------------------------------------------------------------------
1 | # -*- coding:utf-8 -*-
2 | import os
3 | import tensorflow as tf
4 | import mnist_inference_6_4_1
5 | from tensorflow.examples.tutorials.mnist import input_data
6 | import numpy as np
7 | from tensorflow.contrib import layers
8 |
9 | BATCH_SIZE = 100
10 | LEARNING_RATE_BASE = 0.01
11 | LEARNING_RATE_DECAY = 0.99
12 | REGULARZTION_RATE = 0.0001
13 | TRAINING_STEPS = 60000
14 | MOVING_AVERAGE_DECAY = 0.99
15 |
16 | MODEL_SAVE_PATH = "/home/soso/PycharmProjects/MODEL_SAVE/"
17 | MODEL_NAME = "model_6_4_1.ckpt"
18 |
19 | def train(mnist):
20 | x = tf.placeholder(tf.float32,[BATCH_SIZE,mnist_inference_6_4_1.IMAGE_SIZE,mnist_inference_6_4_1.IMAGE_SIZE,mnist_inference_6_4_1.NUM_CHANNELS],name='x-input')
21 | y_ = tf.placeholder(tf.float32,[None,mnist_inference_6_4_1.OUTPUT_NODE],name='y-input')
22 | regularizer = tf.contrib.layers.l2_regularizer(REGULARZTION_RATE)
23 | y = mnist_inference_6_4_1.inference(x,True,regularizer)
24 |
25 | global_step = tf.Variable(0,trainable=False)
26 |
27 | variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,global_step)
28 | variable_averages_op = variable_averages.apply(tf.trainable_variables())
29 |
30 | cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,labels=tf.argmax(y_, 1))
31 | cross_entropy_mean = tf.reduce_mean(cross_entropy)
32 | loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
33 |
34 | learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,global_step,mnist.train.num_examples / BATCH_SIZE,LEARNING_RATE_DECAY)
35 | train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step)
36 | train_op = tf.group(train_step, variable_averages_op)
37 |
38 | saver = tf.train.Saver()
39 |
40 | net = layers.conv2d(input_data,32,[3,3])
41 |
42 |
43 | with tf.Session() as sess:
44 | tf.global_variables_initializer().run()
45 |
46 | for i in range(TRAINING_STEPS):
47 | xs,ys = mnist.train.next_batch(BATCH_SIZE)
48 | reshaped_xs = np.reshape(xs,(BATCH_SIZE,mnist_inference_6_4_1.IMAGE_SIZE,mnist_inference_6_4_1.IMAGE_SIZE,mnist_inference_6_4_1.NUM_CHANNELS))
49 | _,loss_value,step = sess.run([train_op,loss,global_step],feed_dict={x:reshaped_xs,y_:ys})
50 |
51 | if i % 100 == 0:
52 | print("After %d training step(s), loss is %g" %(step,loss_value))
53 | saver.save(sess,os.path.join(MODEL_SAVE_PATH,MODEL_NAME),global_step=global_step)
54 |
55 | def main(argv = None):
56 | mnist = input_data.read_data_sets("/home/soso/MNIST_data",one_hot=True)
57 | train(mnist)
58 |
59 | if __name__ == '__main__':
60 | tf.app.run()
61 |
--------------------------------------------------------------------------------
/InActionB1/chapter8/nlModel:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import tensorflow as tf
3 | #from tensorflow
4 | #def main(_):
5 | # train_data,valid_data,test_data,_ =
--------------------------------------------------------------------------------
/InActionB1/chapter8/sin.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xiaoyesoso/TensorFlowinAction/332ce0b33e9588365740628eb8b83cd89a40265e/InActionB1/chapter8/sin.png
--------------------------------------------------------------------------------
/InActionB1/chapter8/sinModel.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import tensorflow as tf
3 | import matplotlib as mpl
4 | from matplotlib import pyplot as plt
5 |
6 | learn = tf.contrib.learn
7 |
8 | HIDDEN_SIZE = 30
9 | NUM_LAYERS = 2
10 | TIMESTEPS = 10
11 | TRAINING_STEPS = 10000
12 | BATCH_SIZE = 32
13 |
14 | TRAINING_EXAMPLES = 10000
15 | TESTING_EXAMPLES = 1000
16 | SAMPLE_GAP = 0.01
17 |
18 | def lstm_model(X,y):
19 | lstm_cell = tf.contrib.rnn.BasicLSTMCell(HIDDEN_SIZE, state_is_tuple=True)
20 | cell = tf.contrib.rnn.MultiRNNCell([lstm_cell] * NUM_LAYERS)
21 |
22 | output,_ = tf.nn.dynamic_rnn(cell,X,dtype=tf.float32)
23 | output = tf.reshape(output,[-1, HIDDEN_SIZE])
24 | predictions = tf.contrib.layers.fully_connected(output, 1, None)
25 | labels = tf.reshape(y, [-1])
26 | predictions = tf.reshape(predictions, [-1])
27 | loss = tf.losses.mean_squared_error(predictions, labels)
28 | train_op = tf.contrib.layers.optimize_loss(
29 | loss, tf.contrib.framework.get_global_step(),
30 | optimizer="Adagrad", learning_rate=0.1)
31 | return predictions,loss,train_op
32 |
33 |
34 |
35 | regressor = learn.Estimator(model_fn=lstm_model)
36 |
37 | test_start = TRAINING_EXAMPLES * SAMPLE_GAP
38 | test_end = (TRAINING_EXAMPLES + TESTING_EXAMPLES) * SAMPLE_GAP
39 |
40 | def generate_data(seq):
41 | X = []
42 | y = []
43 |
44 | for i in range(len(seq) - TIMESTEPS -1):
45 | X.append([seq[i:i+TIMESTEPS]])
46 | y.append([seq[i+TIMESTEPS]])
47 | return np.array(X,dtype=np.float32),np.array(y,dtype=np.float32)
48 |
49 |
50 | train_X,train_y = generate_data(np.sin(np.linspace(0,test_start,TRAINING_EXAMPLES,dtype=np.float32)))
51 | test_X,test_y = generate_data(np.sin(np.linspace(test_start,test_end,TESTING_EXAMPLES,dtype=np.float32)))
52 |
53 | regressor.fit(train_X,train_y,batch_size=BATCH_SIZE,steps=TRAINING_STEPS)
54 |
55 | predicted = [[pred] for pred in regressor.predict(test_X)]
56 |
57 | rmse = np.sqrt(((predicted - test_y) ** 2 ).mean(axis=0))
58 | print ("rmse : %f" % rmse[0])
59 |
60 | fig = plt.figure()
61 | plot_predicted = plt.plot(predicted,label='predicted',color='coral',linestyle=':')
62 | plot_test = plt.plot(test_y,label='real_sin')
63 | #plt.plot(predicted, 'mo:', test_y, 'kp-.')
64 | #plt.show()
65 | plt.legend([plot_predicted,plot_test],['predicted','real_sin'])
66 |
67 | fig.savefig('sin.png')
68 |
--------------------------------------------------------------------------------
/InActionB1/checkpoint:
--------------------------------------------------------------------------------
1 | model_checkpoint_path: "/home/soso/PycharmProjects/TensorFlowinAction/model_5_5.ckpt-29001"
2 | all_model_checkpoint_paths: "/home/soso/PycharmProjects/TensorFlowinAction/model_5_5.ckpt-25001"
3 | all_model_checkpoint_paths: "/home/soso/PycharmProjects/TensorFlowinAction/model_5_5.ckpt-26001"
4 | all_model_checkpoint_paths: "/home/soso/PycharmProjects/TensorFlowinAction/model_5_5.ckpt-27001"
5 | all_model_checkpoint_paths: "/home/soso/PycharmProjects/TensorFlowinAction/model_5_5.ckpt-28001"
6 | all_model_checkpoint_paths: "/home/soso/PycharmProjects/TensorFlowinAction/model_5_5.ckpt-29001"
7 |
--------------------------------------------------------------------------------
/InActionB1/cher2:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {
7 | "collapsed": true
8 | },
9 | "outputs": [
10 | {
11 | "data": {
12 | "text/plain": [
13 | "array([ 3., 5.], dtype=float32)"
14 | ]
15 | },
16 | "execution_count": 1,
17 | "metadata": {},
18 | "output_type": "execute_result"
19 | }
20 | ],
21 | "source": [
22 | "import tensorflow as tf\n",
23 | "a = tf.constant([1.0,2.0],name=\"a\")\n",
24 | "b = tf.constant([2.0,3.0],name=\"b\")\n",
25 | "result = a+b\n",
26 | "sess = tf.Session()\n",
27 | "sess.run(result)"
28 | ]
29 | }
30 | ],
31 | "metadata": {
32 | "kernelspec": {
33 | "display_name": "Python 2",
34 | "language": "python",
35 | "name": "python2"
36 | },
37 | "language_info": {
38 | "codemirror_mode": {
39 | "name": "ipython",
40 | "version": 2.0
41 | },
42 | "file_extension": ".py",
43 | "mimetype": "text/x-python",
44 | "name": "python",
45 | "nbconvert_exporter": "python",
46 | "pygments_lexer": "ipython2",
47 | "version": "2.7.6"
48 | }
49 | },
50 | "nbformat": 4,
51 | "nbformat_minor": 0
52 | }
--------------------------------------------------------------------------------
/InActionB1/cher3:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | from numpy.random import RandomState
3 |
4 | batch_size = 8
5 |
6 | w1 = tf.Variable(tf.random_normal([2, 3], stddev=1, seed=1))
7 | w2 = tf.Variable(tf.random_normal([3, 1], stddev=1, seed=1))
8 |
9 | x = tf.placeholder(tf.float32, shape=(None, 2), name='x-input')
10 | y_ = tf.placeholder(tf.float32, shape=(None, 1), name='y-input')
11 |
12 | a = tf.matmul(x, w1)
13 | y = tf.matmul(a, w2)
14 |
15 | cross_entropy = -tf.reduce_mean(y_ * tf.log(tf.clip_by_value(y, 1e-10, 1.0)))
16 | # cross_entropy = tf.nn.softmax_cross_entropy_with_logits()
17 |
18 | train_step = tf.train.AdamOptimizer(0.001).minimize(cross_entropy)
19 |
20 | rdm = RandomState(1)
21 | dataset_size = 128
22 | X = rdm.rand(dataset_size, 2)
23 | Y = [[int(x1 + x2 < 1)] for (x1, x2) in X]
24 |
25 | with tf.Session() as sess:
26 | init_op = tf.global_variables_initializer()
27 | sess.run(init_op)
28 | print sess.run(w1)
29 | print sess.run(w2)
30 |
31 | STEPS = 5000
32 |
33 | for i in range(STEPS):
34 | start = (i * batch_size) % dataset_size
35 | end = min(start + batch_size, dataset_size)
36 |
37 | sess.run(train_step, feed_dict={x: X[start:end], y_: Y[start:end]})
38 |
39 | if i % 1000 == 0:
40 | total_cross_entropy = sess.run(cross_entropy,feed_dict={x: X, y_: Y})
41 | print("Atter %d training step(s),cross entropy on all data ""is %g " % (i, total_cross_entropy))
42 |
43 | print sess.run(w1)
44 | print sess.run(w2)
--------------------------------------------------------------------------------
/InActionB1/meta_graph.json:
--------------------------------------------------------------------------------
1 | meta_info_def {
2 | stripped_op_list {
3 | op {
4 | name: "Add"
5 | input_arg {
6 | name: "x"
7 | type_attr: "T"
8 | }
9 | input_arg {
10 | name: "y"
11 | type_attr: "T"
12 | }
13 | output_arg {
14 | name: "z"
15 | type_attr: "T"
16 | }
17 | attr {
18 | name: "T"
19 | type: "type"
20 | allowed_values {
21 | list {
22 | type: DT_HALF
23 | type: DT_FLOAT
24 | type: DT_DOUBLE
25 | type: DT_UINT8
26 | type: DT_INT8
27 | type: DT_INT16
28 | type: DT_INT32
29 | type: DT_INT64
30 | type: DT_COMPLEX64
31 | type: DT_COMPLEX128
32 | type: DT_STRING
33 | }
34 | }
35 | }
36 | }
37 | op {
38 | name: "Assign"
39 | input_arg {
40 | name: "ref"
41 | type_attr: "T"
42 | is_ref: true
43 | }
44 | input_arg {
45 | name: "value"
46 | type_attr: "T"
47 | }
48 | output_arg {
49 | name: "output_ref"
50 | type_attr: "T"
51 | is_ref: true
52 | }
53 | attr {
54 | name: "T"
55 | type: "type"
56 | }
57 | attr {
58 | name: "validate_shape"
59 | type: "bool"
60 | default_value {
61 | b: true
62 | }
63 | }
64 | attr {
65 | name: "use_locking"
66 | type: "bool"
67 | default_value {
68 | b: true
69 | }
70 | }
71 | allows_uninitialized_input: true
72 | }
73 | op {
74 | name: "Const"
75 | output_arg {
76 | name: "output"
77 | type_attr: "dtype"
78 | }
79 | attr {
80 | name: "value"
81 | type: "tensor"
82 | }
83 | attr {
84 | name: "dtype"
85 | type: "type"
86 | }
87 | }
88 | op {
89 | name: "Identity"
90 | input_arg {
91 | name: "input"
92 | type_attr: "T"
93 | }
94 | output_arg {
95 | name: "output"
96 | type_attr: "T"
97 | }
98 | attr {
99 | name: "T"
100 | type: "type"
101 | }
102 | }
103 | op {
104 | name: "NoOp"
105 | }
106 | op {
107 | name: "RestoreV2"
108 | input_arg {
109 | name: "prefix"
110 | type: DT_STRING
111 | }
112 | input_arg {
113 | name: "tensor_names"
114 | type: DT_STRING
115 | }
116 | input_arg {
117 | name: "shape_and_slices"
118 | type: DT_STRING
119 | }
120 | output_arg {
121 | name: "tensors"
122 | type_list_attr: "dtypes"
123 | }
124 | attr {
125 | name: "dtypes"
126 | type: "list(type)"
127 | has_minimum: true
128 | minimum: 1
129 | }
130 | }
131 | op {
132 | name: "SaveV2"
133 | input_arg {
134 | name: "prefix"
135 | type: DT_STRING
136 | }
137 | input_arg {
138 | name: "tensor_names"
139 | type: DT_STRING
140 | }
141 | input_arg {
142 | name: "shape_and_slices"
143 | type: DT_STRING
144 | }
145 | input_arg {
146 | name: "tensors"
147 | type_list_attr: "dtypes"
148 | }
149 | attr {
150 | name: "dtypes"
151 | type: "list(type)"
152 | has_minimum: true
153 | minimum: 1
154 | }
155 | }
156 | op {
157 | name: "VariableV2"
158 | output_arg {
159 | name: "ref"
160 | type_attr: "dtype"
161 | is_ref: true
162 | }
163 | attr {
164 | name: "shape"
165 | type: "shape"
166 | }
167 | attr {
168 | name: "dtype"
169 | type: "type"
170 | }
171 | attr {
172 | name: "container"
173 | type: "string"
174 | default_value {
175 | s: ""
176 | }
177 | }
178 | attr {
179 | name: "shared_name"
180 | type: "string"
181 | default_value {
182 | s: ""
183 | }
184 | }
185 | is_stateful: true
186 | }
187 | }
188 | tensorflow_version: "1.0.1"
189 | tensorflow_git_version: "v1.0.0-65-g4763edf-dirty"
190 | }
191 | graph_def {
192 | node {
193 | name: "Const"
194 | op: "Const"
195 | attr {
196 | key: "_output_shapes"
197 | value {
198 | list {
199 | shape {
200 | dim {
201 | size: 1
202 | }
203 | }
204 | }
205 | }
206 | }
207 | attr {
208 | key: "dtype"
209 | value {
210 | type: DT_FLOAT
211 | }
212 | }
213 | attr {
214 | key: "value"
215 | value {
216 | tensor {
217 | dtype: DT_FLOAT
218 | tensor_shape {
219 | dim {
220 | size: 1
221 | }
222 | }
223 | float_val: 1.0
224 | }
225 | }
226 | }
227 | }
228 | node {
229 | name: "v1"
230 | op: "VariableV2"
231 | attr {
232 | key: "_output_shapes"
233 | value {
234 | list {
235 | shape {
236 | dim {
237 | size: 1
238 | }
239 | }
240 | }
241 | }
242 | }
243 | attr {
244 | key: "container"
245 | value {
246 | s: ""
247 | }
248 | }
249 | attr {
250 | key: "dtype"
251 | value {
252 | type: DT_FLOAT
253 | }
254 | }
255 | attr {
256 | key: "shape"
257 | value {
258 | shape {
259 | dim {
260 | size: 1
261 | }
262 | }
263 | }
264 | }
265 | attr {
266 | key: "shared_name"
267 | value {
268 | s: ""
269 | }
270 | }
271 | }
272 | node {
273 | name: "v1/Assign"
274 | op: "Assign"
275 | input: "v1"
276 | input: "Const"
277 | attr {
278 | key: "T"
279 | value {
280 | type: DT_FLOAT
281 | }
282 | }
283 | attr {
284 | key: "_class"
285 | value {
286 | list {
287 | s: "loc:@v1"
288 | }
289 | }
290 | }
291 | attr {
292 | key: "_output_shapes"
293 | value {
294 | list {
295 | shape {
296 | dim {
297 | size: 1
298 | }
299 | }
300 | }
301 | }
302 | }
303 | attr {
304 | key: "use_locking"
305 | value {
306 | b: true
307 | }
308 | }
309 | attr {
310 | key: "validate_shape"
311 | value {
312 | b: true
313 | }
314 | }
315 | }
316 | node {
317 | name: "v1/read"
318 | op: "Identity"
319 | input: "v1"
320 | attr {
321 | key: "T"
322 | value {
323 | type: DT_FLOAT
324 | }
325 | }
326 | attr {
327 | key: "_class"
328 | value {
329 | list {
330 | s: "loc:@v1"
331 | }
332 | }
333 | }
334 | attr {
335 | key: "_output_shapes"
336 | value {
337 | list {
338 | shape {
339 | dim {
340 | size: 1
341 | }
342 | }
343 | }
344 | }
345 | }
346 | }
347 | node {
348 | name: "Const_1"
349 | op: "Const"
350 | attr {
351 | key: "_output_shapes"
352 | value {
353 | list {
354 | shape {
355 | dim {
356 | size: 1
357 | }
358 | }
359 | }
360 | }
361 | }
362 | attr {
363 | key: "dtype"
364 | value {
365 | type: DT_FLOAT
366 | }
367 | }
368 | attr {
369 | key: "value"
370 | value {
371 | tensor {
372 | dtype: DT_FLOAT
373 | tensor_shape {
374 | dim {
375 | size: 1
376 | }
377 | }
378 | float_val: 2.0
379 | }
380 | }
381 | }
382 | }
383 | node {
384 | name: "v2"
385 | op: "VariableV2"
386 | attr {
387 | key: "_output_shapes"
388 | value {
389 | list {
390 | shape {
391 | dim {
392 | size: 1
393 | }
394 | }
395 | }
396 | }
397 | }
398 | attr {
399 | key: "container"
400 | value {
401 | s: ""
402 | }
403 | }
404 | attr {
405 | key: "dtype"
406 | value {
407 | type: DT_FLOAT
408 | }
409 | }
410 | attr {
411 | key: "shape"
412 | value {
413 | shape {
414 | dim {
415 | size: 1
416 | }
417 | }
418 | }
419 | }
420 | attr {
421 | key: "shared_name"
422 | value {
423 | s: ""
424 | }
425 | }
426 | }
427 | node {
428 | name: "v2/Assign"
429 | op: "Assign"
430 | input: "v2"
431 | input: "Const_1"
432 | attr {
433 | key: "T"
434 | value {
435 | type: DT_FLOAT
436 | }
437 | }
438 | attr {
439 | key: "_class"
440 | value {
441 | list {
442 | s: "loc:@v2"
443 | }
444 | }
445 | }
446 | attr {
447 | key: "_output_shapes"
448 | value {
449 | list {
450 | shape {
451 | dim {
452 | size: 1
453 | }
454 | }
455 | }
456 | }
457 | }
458 | attr {
459 | key: "use_locking"
460 | value {
461 | b: true
462 | }
463 | }
464 | attr {
465 | key: "validate_shape"
466 | value {
467 | b: true
468 | }
469 | }
470 | }
471 | node {
472 | name: "v2/read"
473 | op: "Identity"
474 | input: "v2"
475 | attr {
476 | key: "T"
477 | value {
478 | type: DT_FLOAT
479 | }
480 | }
481 | attr {
482 | key: "_class"
483 | value {
484 | list {
485 | s: "loc:@v2"
486 | }
487 | }
488 | }
489 | attr {
490 | key: "_output_shapes"
491 | value {
492 | list {
493 | shape {
494 | dim {
495 | size: 1
496 | }
497 | }
498 | }
499 | }
500 | }
501 | }
502 | node {
503 | name: "add"
504 | op: "Add"
505 | input: "v1/read"
506 | input: "v2/read"
507 | attr {
508 | key: "T"
509 | value {
510 | type: DT_FLOAT
511 | }
512 | }
513 | attr {
514 | key: "_output_shapes"
515 | value {
516 | list {
517 | shape {
518 | dim {
519 | size: 1
520 | }
521 | }
522 | }
523 | }
524 | }
525 | }
526 | node {
527 | name: "save/Const"
528 | op: "Const"
529 | attr {
530 | key: "_output_shapes"
531 | value {
532 | list {
533 | shape {
534 | }
535 | }
536 | }
537 | }
538 | attr {
539 | key: "dtype"
540 | value {
541 | type: DT_STRING
542 | }
543 | }
544 | attr {
545 | key: "value"
546 | value {
547 | tensor {
548 | dtype: DT_STRING
549 | tensor_shape {
550 | }
551 | string_val: "model"
552 | }
553 | }
554 | }
555 | }
556 | node {
557 | name: "save/SaveV2/tensor_names"
558 | op: "Const"
559 | attr {
560 | key: "_output_shapes"
561 | value {
562 | list {
563 | shape {
564 | dim {
565 | size: 2
566 | }
567 | }
568 | }
569 | }
570 | }
571 | attr {
572 | key: "dtype"
573 | value {
574 | type: DT_STRING
575 | }
576 | }
577 | attr {
578 | key: "value"
579 | value {
580 | tensor {
581 | dtype: DT_STRING
582 | tensor_shape {
583 | dim {
584 | size: 2
585 | }
586 | }
587 | string_val: "v1"
588 | string_val: "v2"
589 | }
590 | }
591 | }
592 | }
593 | node {
594 | name: "save/SaveV2/shape_and_slices"
595 | op: "Const"
596 | attr {
597 | key: "_output_shapes"
598 | value {
599 | list {
600 | shape {
601 | dim {
602 | size: 2
603 | }
604 | }
605 | }
606 | }
607 | }
608 | attr {
609 | key: "dtype"
610 | value {
611 | type: DT_STRING
612 | }
613 | }
614 | attr {
615 | key: "value"
616 | value {
617 | tensor {
618 | dtype: DT_STRING
619 | tensor_shape {
620 | dim {
621 | size: 2
622 | }
623 | }
624 | string_val: ""
625 | string_val: ""
626 | }
627 | }
628 | }
629 | }
630 | node {
631 | name: "save/SaveV2"
632 | op: "SaveV2"
633 | input: "save/Const"
634 | input: "save/SaveV2/tensor_names"
635 | input: "save/SaveV2/shape_and_slices"
636 | input: "v1"
637 | input: "v2"
638 | attr {
639 | key: "dtypes"
640 | value {
641 | list {
642 | type: DT_FLOAT
643 | type: DT_FLOAT
644 | }
645 | }
646 | }
647 | }
648 | node {
649 | name: "save/control_dependency"
650 | op: "Identity"
651 | input: "save/Const"
652 | input: "^save/SaveV2"
653 | attr {
654 | key: "T"
655 | value {
656 | type: DT_STRING
657 | }
658 | }
659 | attr {
660 | key: "_class"
661 | value {
662 | list {
663 | s: "loc:@save/Const"
664 | }
665 | }
666 | }
667 | attr {
668 | key: "_output_shapes"
669 | value {
670 | list {
671 | shape {
672 | }
673 | }
674 | }
675 | }
676 | }
677 | node {
678 | name: "save/RestoreV2/tensor_names"
679 | op: "Const"
680 | attr {
681 | key: "_output_shapes"
682 | value {
683 | list {
684 | shape {
685 | dim {
686 | size: 1
687 | }
688 | }
689 | }
690 | }
691 | }
692 | attr {
693 | key: "dtype"
694 | value {
695 | type: DT_STRING
696 | }
697 | }
698 | attr {
699 | key: "value"
700 | value {
701 | tensor {
702 | dtype: DT_STRING
703 | tensor_shape {
704 | dim {
705 | size: 1
706 | }
707 | }
708 | string_val: "v1"
709 | }
710 | }
711 | }
712 | }
713 | node {
714 | name: "save/RestoreV2/shape_and_slices"
715 | op: "Const"
716 | attr {
717 | key: "_output_shapes"
718 | value {
719 | list {
720 | shape {
721 | dim {
722 | size: 1
723 | }
724 | }
725 | }
726 | }
727 | }
728 | attr {
729 | key: "dtype"
730 | value {
731 | type: DT_STRING
732 | }
733 | }
734 | attr {
735 | key: "value"
736 | value {
737 | tensor {
738 | dtype: DT_STRING
739 | tensor_shape {
740 | dim {
741 | size: 1
742 | }
743 | }
744 | string_val: ""
745 | }
746 | }
747 | }
748 | }
749 | node {
750 | name: "save/RestoreV2"
751 | op: "RestoreV2"
752 | input: "save/Const"
753 | input: "save/RestoreV2/tensor_names"
754 | input: "save/RestoreV2/shape_and_slices"
755 | attr {
756 | key: "_output_shapes"
757 | value {
758 | list {
759 | shape {
760 | unknown_rank: true
761 | }
762 | }
763 | }
764 | }
765 | attr {
766 | key: "dtypes"
767 | value {
768 | list {
769 | type: DT_FLOAT
770 | }
771 | }
772 | }
773 | }
774 | node {
775 | name: "save/Assign"
776 | op: "Assign"
777 | input: "v1"
778 | input: "save/RestoreV2"
779 | attr {
780 | key: "T"
781 | value {
782 | type: DT_FLOAT
783 | }
784 | }
785 | attr {
786 | key: "_class"
787 | value {
788 | list {
789 | s: "loc:@v1"
790 | }
791 | }
792 | }
793 | attr {
794 | key: "_output_shapes"
795 | value {
796 | list {
797 | shape {
798 | dim {
799 | size: 1
800 | }
801 | }
802 | }
803 | }
804 | }
805 | attr {
806 | key: "use_locking"
807 | value {
808 | b: true
809 | }
810 | }
811 | attr {
812 | key: "validate_shape"
813 | value {
814 | b: true
815 | }
816 | }
817 | }
818 | node {
819 | name: "save/RestoreV2_1/tensor_names"
820 | op: "Const"
821 | attr {
822 | key: "_output_shapes"
823 | value {
824 | list {
825 | shape {
826 | dim {
827 | size: 1
828 | }
829 | }
830 | }
831 | }
832 | }
833 | attr {
834 | key: "dtype"
835 | value {
836 | type: DT_STRING
837 | }
838 | }
839 | attr {
840 | key: "value"
841 | value {
842 | tensor {
843 | dtype: DT_STRING
844 | tensor_shape {
845 | dim {
846 | size: 1
847 | }
848 | }
849 | string_val: "v2"
850 | }
851 | }
852 | }
853 | }
854 | node {
855 | name: "save/RestoreV2_1/shape_and_slices"
856 | op: "Const"
857 | attr {
858 | key: "_output_shapes"
859 | value {
860 | list {
861 | shape {
862 | dim {
863 | size: 1
864 | }
865 | }
866 | }
867 | }
868 | }
869 | attr {
870 | key: "dtype"
871 | value {
872 | type: DT_STRING
873 | }
874 | }
875 | attr {
876 | key: "value"
877 | value {
878 | tensor {
879 | dtype: DT_STRING
880 | tensor_shape {
881 | dim {
882 | size: 1
883 | }
884 | }
885 | string_val: ""
886 | }
887 | }
888 | }
889 | }
890 | node {
891 | name: "save/RestoreV2_1"
892 | op: "RestoreV2"
893 | input: "save/Const"
894 | input: "save/RestoreV2_1/tensor_names"
895 | input: "save/RestoreV2_1/shape_and_slices"
896 | attr {
897 | key: "_output_shapes"
898 | value {
899 | list {
900 | shape {
901 | unknown_rank: true
902 | }
903 | }
904 | }
905 | }
906 | attr {
907 | key: "dtypes"
908 | value {
909 | list {
910 | type: DT_FLOAT
911 | }
912 | }
913 | }
914 | }
915 | node {
916 | name: "save/Assign_1"
917 | op: "Assign"
918 | input: "v2"
919 | input: "save/RestoreV2_1"
920 | attr {
921 | key: "T"
922 | value {
923 | type: DT_FLOAT
924 | }
925 | }
926 | attr {
927 | key: "_class"
928 | value {
929 | list {
930 | s: "loc:@v2"
931 | }
932 | }
933 | }
934 | attr {
935 | key: "_output_shapes"
936 | value {
937 | list {
938 | shape {
939 | dim {
940 | size: 1
941 | }
942 | }
943 | }
944 | }
945 | }
946 | attr {
947 | key: "use_locking"
948 | value {
949 | b: true
950 | }
951 | }
952 | attr {
953 | key: "validate_shape"
954 | value {
955 | b: true
956 | }
957 | }
958 | }
959 | node {
960 | name: "save/restore_all"
961 | op: "NoOp"
962 | input: "^save/Assign"
963 | input: "^save/Assign_1"
964 | }
965 | versions {
966 | producer: 21
967 | }
968 | }
969 | saver_def {
970 | filename_tensor_name: "save/Const:0"
971 | save_tensor_name: "save/control_dependency:0"
972 | restore_op_name: "save/restore_all"
973 | max_to_keep: 5
974 | keep_checkpoint_every_n_hours: 10000.0
975 | version: V2
976 | }
977 | collection_def {
978 | key: "trainable_variables"
979 | value {
980 | bytes_list {
981 | value: "\n\004v1:0\022\tv1/Assign\032\tv1/read:0"
982 | value: "\n\004v2:0\022\tv2/Assign\032\tv2/read:0"
983 | }
984 | }
985 | }
986 | collection_def {
987 | key: "variables"
988 | value {
989 | bytes_list {
990 | value: "\n\004v1:0\022\tv1/Assign\032\tv1/read:0"
991 | value: "\n\004v2:0\022\tv2/Assign\032\tv2/read:0"
992 | }
993 | }
994 | }
995 |
--------------------------------------------------------------------------------
/InActionB2/chapter4/me.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xiaoyesoso/TensorFlowinAction/332ce0b33e9588365740628eb8b83cd89a40265e/InActionB2/chapter4/me.jpg
--------------------------------------------------------------------------------
/InActionB2/chapter4/neural_100.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xiaoyesoso/TensorFlowinAction/332ce0b33e9588365740628eb8b83cd89a40265e/InActionB2/chapter4/neural_100.jpg
--------------------------------------------------------------------------------
/InActionB2/chapter4/neural_200.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xiaoyesoso/TensorFlowinAction/332ce0b33e9588365740628eb8b83cd89a40265e/InActionB2/chapter4/neural_200.jpg
--------------------------------------------------------------------------------
/InActionB2/chapter4/neural_300.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xiaoyesoso/TensorFlowinAction/332ce0b33e9588365740628eb8b83cd89a40265e/InActionB2/chapter4/neural_300.jpg
--------------------------------------------------------------------------------
/InActionB2/chapter4/neural_400.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xiaoyesoso/TensorFlowinAction/332ce0b33e9588365740628eb8b83cd89a40265e/InActionB2/chapter4/neural_400.jpg
--------------------------------------------------------------------------------
/InActionB2/chapter4/neural_500.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xiaoyesoso/TensorFlowinAction/332ce0b33e9588365740628eb8b83cd89a40265e/InActionB2/chapter4/neural_500.jpg
--------------------------------------------------------------------------------
/InActionB2/chapter4/neural_me_100.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xiaoyesoso/TensorFlowinAction/332ce0b33e9588365740628eb8b83cd89a40265e/InActionB2/chapter4/neural_me_100.jpg
--------------------------------------------------------------------------------
/InActionB2/chapter4/neural_me_200.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xiaoyesoso/TensorFlowinAction/332ce0b33e9588365740628eb8b83cd89a40265e/InActionB2/chapter4/neural_me_200.jpg
--------------------------------------------------------------------------------
/InActionB2/chapter4/neural_me_300.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xiaoyesoso/TensorFlowinAction/332ce0b33e9588365740628eb8b83cd89a40265e/InActionB2/chapter4/neural_me_300.jpg
--------------------------------------------------------------------------------
/InActionB2/chapter4/neural_me_400.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xiaoyesoso/TensorFlowinAction/332ce0b33e9588365740628eb8b83cd89a40265e/InActionB2/chapter4/neural_me_400.jpg
--------------------------------------------------------------------------------
/InActionB2/chapter4/neural_me_500.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xiaoyesoso/TensorFlowinAction/332ce0b33e9588365740628eb8b83cd89a40265e/InActionB2/chapter4/neural_me_500.jpg
--------------------------------------------------------------------------------
/InActionB2/chapter4/star.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xiaoyesoso/TensorFlowinAction/332ce0b33e9588365740628eb8b83cd89a40265e/InActionB2/chapter4/star.jpg
--------------------------------------------------------------------------------
/InActionB2/chapter4/tranImage.py:
--------------------------------------------------------------------------------
1 | from PIL import Image
2 | import numpy as np
3 | import tensorflow as tf
4 | import scipy.io as sio
5 |
6 | STYLE_WEIGHT = 1
7 | CONTENT_WEIGHT = 1
8 | STYLE_LAYERS = ['relu1_2','relu2_2','relu3_2']
9 | CONTENT_LAYERS = ['relu1_2']
10 | _vgg_params = None
11 |
12 | def vgg_params():
13 | global _vgg_params
14 | if _vgg_params is None:
15 | _vgg_params = sio.loadmat('imagenet-vgg-verydeep-19.mat')
16 | return _vgg_params
17 |
18 | def vgg19(input_image):
19 | layers = (
20 | 'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',
21 | 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
22 | 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'conv3_4', 'relu3_4','pool3',
23 | 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3', 'conv4_4', 'relu4_4', 'pool4',
24 | 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'conv5_4', 'relu5_4', 'pool5'
25 | )
26 |
27 | weights = vgg_params()['layers'][0]
28 | net = input_image
29 | network = {}
30 | for i,name in enumerate(layers):
31 | layer_type = name[:4]
32 | if layer_type == 'conv':
33 | kernels,bias = weights[i][0][0][0][0]
34 | kernels = np.transpose(kernels,(1,0,2,3))
35 | conv = tf.nn.conv2d(net,tf.constant(kernels),strides=(1,1,1,1),padding='SAME',name=name)
36 | net = tf.nn.bias_add(conv,bias.reshape(-1))
37 | net = tf.nn.relu(net)
38 | elif layer_type == 'pool':
39 | net = tf.nn.max_pool(net,ksize=(1,2,2,1),strides=(1,2,2,1),padding='SAME')
40 | network[name] = net
41 |
42 | return network
43 |
44 | def content_loss(target_features,content_features):
45 | _,height,width,channel = map(lambda i:i.value,content_features.get_shape())
46 | print ('content_features.get_shape() : ')
47 | print (content_features.get_shape())
48 | content_size = height * width * channel
49 | return tf.nn.l2_loss(target_features - content_features) / content_size
50 |
51 | def style_loss(target_features,style_features):
52 | _,height,width,channel = map(lambda i:i.value,target_features.get_shape())
53 | print ('target_features.get_shape() : ')
54 | print (target_features.get_shape())
55 | size = height * width * channel
56 | target_features = tf.reshape(target_features,(-1,channel))
57 | target_gram = tf.matmul(tf.transpose(target_features),target_features) / size
58 |
59 | style_features = tf.reshape(style_features,(-1,channel))
60 | style_gram = tf.matmul(tf.transpose(style_features),style_features) / size
61 |
62 | return tf.nn.l2_loss(target_gram - style_gram) / size
63 |
64 | def loss_function(style_image,content_image,target_image):
65 | style_features = vgg19([style_image])
66 | content_features = vgg19([content_image])
67 | target_features = vgg19([target_image])
68 | loss = 0.0
69 | for layer in CONTENT_LAYERS:
70 | loss += CONTENT_WEIGHT * content_loss(target_features[layer],content_features[layer])
71 |
72 | for layer in STYLE_LAYERS:
73 | loss += STYLE_WEIGHT * style_loss(target_features[layer],style_features[layer])
74 |
75 | return loss
76 |
77 |
78 |
79 | def stylize(style_image,content_image,learning_rate=0.1,epochs=500):
80 | target = tf.Variable(tf.random_normal(content_image.shape),dtype=tf.float32)
81 | style_input = tf.constant(style_image,dtype=tf.float32)
82 | content_input = tf.constant(content_image, dtype=tf.float32)
83 | cost = loss_function(style_input,content_input,target)
84 | train_op = tf.train.AdamOptimizer(learning_rate).minimize(cost)
85 | with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:
86 | tf.initialize_all_variables().run()
87 | for i in range(epochs):
88 | _,loss,target_image = sess.run([train_op,cost,target])
89 | print("iter:%d,loss:%.9f" % (i, loss))
90 | if (i+1) % 100 == 0:
91 | image = np.clip(target_image + 128,0,255).astype(np.uint8)
92 | Image.fromarray(image).save("./neural_me_%d.jpg" % (i + 1))
93 |
94 |
95 | if __name__ == '__main__':
96 | style = Image.open('star.jpg')
97 | style = np.array(style).astype(np.float32) - 128.0
98 | content = Image.open('me.jpg')
99 | content = np.array(content).astype(np.float32) - 128.0
100 | stylize(style,content,0.5,500)
101 | #print(content.shape)
102 | #print(style.shape)
103 |
--------------------------------------------------------------------------------
/InActionB2/chapter4/xihu.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xiaoyesoso/TensorFlowinAction/332ce0b33e9588365740628eb8b83cd89a40265e/InActionB2/chapter4/xihu.jpg
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # TensorFlowinAction
--------------------------------------------------------------------------------