├── .gitignore
├── .gitmodules
├── README.md
├── doc
└── ml
│ └── An overview of gradient descent optimization algorithms.mhtml
└── src
├── .obs
└── dot_parser4tf_csv_file
│ ├── csv_parser.py
│ ├── dots
│ ├── Asserts and boolean checks.dot
│ ├── BayesFlow Stochastic Graph (contrib).dot
│ ├── Building Graph.dot
│ ├── Constants,Sequences,and Random Valu.dot
│ ├── Control Flow.dot
│ ├── Copying Graph Elements (contrib).dot
│ ├── Data IO (Python functions).dot
│ ├── FFmpeg (contrib).dot
│ ├── Framework (contrib).dot
│ ├── Higher Order Functions.dot
│ ├── Histograms.dot
│ ├── Images.dot
│ ├── Inputs and Readers.dot
│ ├── Layers (contrib).dot
│ ├── Learn (contrib).dot
│ ├── Losses (contrib).dot
│ ├── Math.dot
│ ├── Metrics (contrib).dot
│ ├── Monitors (contrib).dot
│ ├── Neural Network RNN Cells.dot
│ ├── Neural Network.dot
│ ├── Running Graphs.dot
│ ├── Sparse Tensors.dot
│ ├── Statistical distributions (contrib).dot
│ ├── Strings.dot
│ ├── Summary Operations.dot
│ ├── Tensor Handle Operations.dot
│ ├── Tensor Transformations.dot
│ ├── TensorArray Operations.dot
│ ├── Testing.dot
│ ├── Training.dot
│ ├── Utilities (contrib).dot
│ ├── Variables.dot
│ └── Wraps python functions.dot
│ ├── tensorflow_api.csv
│ └── tf.pdf
├── ComputerScience
├── Calculator_AST.py
├── FullBinaryHeap.py
└── dsr.py
├── Controller
├── cartpole_pid.py
└── pid.py
├── MachineLearning
├── Deep_Learning
│ ├── dqn_cartpole.py
│ └── pg_cartpole.ipynb
├── TensorFlow
│ ├── Batcher.py
│ ├── IncreaseNN.py
│ ├── TensorflowLearning
│ │ └── Untitled.ipynb
│ ├── list_devices.py
│ └── multi-gpu.py
├── algorithm
│ ├── .ipynb_checkpoints
│ │ └── step by step q learning-checkpoint.ipynb
│ ├── DecisionTree.py
│ ├── DeepReinforcementLearning
│ │ ├── ReplayBuffer.py
│ │ ├── SimplePolicyGradient.py
│ │ └── step by step q learning.ipynb
│ ├── LDA.py
│ ├── NeuralNetwork
│ │ ├── nn_mnist.py
│ │ └── nn_mnist_static.py
│ ├── SupportVectorMachine
│ │ └── SMO_Simple.ipynb
│ └── tsne.py
├── preprocess.py
└── sklearn
│ └── tsne.py
├── MinorProjects
├── ECDICT-ultimate
│ ├── ECDICT.css
│ ├── ECDICT.plist
│ ├── Makefile
│ ├── OtherResources
│ │ ├── ECDICT.xsl
│ │ └── ECDICT_prefs.html
│ ├── README.md
│ └── csv2dict.py
├── ECDICT
│ ├── ECDICT.css
│ ├── ECDICT.plist
│ ├── Makefile
│ ├── OtherResources
│ │ ├── ECDICT.xsl
│ │ └── ECDICT_prefs.html
│ ├── README.md
│ └── csv2dict.py
├── chat
│ ├── chat.py
│ ├── doc
│ │ └── structure.md
│ ├── http.txt
│ ├── udp_client.py
│ └── ui.py
├── copydisk.py
├── diff.py
├── excel.py
├── media
│ ├── comics
│ │ └── tmp.py
│ └── ffmpeg_convert2mp4.py
├── multy_copy.py
├── plz
│ ├── car_velocity.py
│ ├── compute_time.py
│ ├── image_cutout.py
│ ├── turtle.bot
│ ├── turtlebot.ipynb
│ └── turtlebot.py
├── sendmail.py
├── tieba_content.py
├── xyq
│ └── xyq.py
└── yyf
│ └── donation.py
├── Web
├── Verification code.ipynb
├── html
│ └── split.html
└── ipgw.py
└── probability.py
/.gitignore:
--------------------------------------------------------------------------------
1 | ECDICT.xml
2 | objects/
3 | *.py[cod]
4 |
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Packages
10 | *.egg
11 | *.egg-info
12 | dist
13 | build
14 | eggs
15 | parts
16 | bin
17 | var
18 | sdist
19 | data
20 | develop-eggs
21 | .installed.cfg
22 | lib
23 | lib64
24 | __pycache__
25 |
26 | # Installer logs
27 | pip-log.txt
28 |
29 | # Unit test / coverage reports
30 | .coverage
31 | .tox
32 | nosetests.xml
33 |
34 | # Translations
35 | *.mo
36 |
37 | # Mr Developer
38 | .mr.developer.cfg
39 | .project
40 | .pydevproject
41 |
42 | # Tensorboard
43 | .ash-laptop
44 |
45 | # Editor
46 | *~
47 | ~*
48 | .swp
49 |
50 | # data
51 | *data
52 | data
53 | .bz2
54 |
55 | .ipynb_checkpoints
56 |
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "src/Web/js/Function/jquery-qrcode"]
2 | path = src/Web/js/Function/jquery-qrcode
3 | url = https://github.com/jeromeetienne/jquery-qrcode.git
4 | [submodule "src/Web/js/Views/canvas-nest"]
5 | path = src/Web/js/Views/canvas-nest
6 | url = https://github.com/hustcc/canvas-nest.js.git
7 | [submodule "ShadowSocksShare"]
8 | path = src/Web/Flask/ShadowSocksShare-OpenShift
9 | url = https://github.com/the0demiurge/ShadowSocksShare-OpenShift.git
10 | [submodule "MATLAB"]
11 | path = src/pyMATLAB-style
12 | url = git@github.com:the0demiurge/pyMATLAB-style.git
13 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # My Python Scripts
2 | 用于python 测试与学习
3 | # Contents
4 | ```
5 | rc/
6 | ├── ComputerScience
7 | │ └── data_structure
8 | │ └── linked_list.py
9 | ├── Controller
10 | │ ├── cartpole_pid.py
11 | │ └── pid.py
12 | ├── DevTools
13 | │ ├── crash_on_ipy.py
14 | │ └── jupyter_hide_code.py
15 | ├── Example
16 | │ ├── argparse_example.py
17 | │ ├── ncurses_example.py
18 | │ └── qrcode_example.py
19 | ├── MachineLearning
20 | │ ├── algorithm
21 | │ │ ├── DecisionTree.py
22 | │ │ ├── DeepReinforcementLearning
23 | │ │ │ ├── ReplayBuffer.py
24 | │ │ │ ├── SimplePolicyGradient.py
25 | │ │ │ └── step by step q learning.ipynb
26 | │ │ ├── LDA.py
27 | │ │ ├── neural_network.ipynb
28 | │ │ ├── nn_mnist.py
29 | │ │ ├── nn_mnist_static.py
30 | │ │ └── tsne.py -> ../sklearn/tsne.py
31 | │ ├── Deep_Learning
32 | │ │ ├── dqn_cartpole.py
33 | │ │ └── pg_cartpole.ipynb
34 | │ ├── preprocess.py
35 | │ ├── sklearn
36 | │ │ └── tsne.py
37 | │ └── TensorFlow
38 | │ ├── Batcher.py
39 | │ ├── IncreaseNN.py
40 | │ └── multi-gpu.py
41 | ├── MinorProjects
42 | │ ├── diff.py
43 | │ ├── excel.py
44 | │ ├── media
45 | │ │ ├── comics
46 | │ │ │ └── tmp.py
47 | │ │ └── ffmpeg_convert2mp4.py
48 | │ ├── multy_copy.py
49 | │ ├── plz
50 | │ │ ├── compute_time.py
51 | │ │ ├── turtle.bot
52 | │ │ ├── turtlebot.ipynb
53 | │ │ └── turtlebot.py
54 | │ └── sendmail.py
55 | └── Web
56 | ├── Flask
57 | ├── html
58 | │ └── split.html
59 | ├── ipgw.py
60 | └── Verification code.ipynb
61 |
62 | 18 directories, 37 files
63 |
64 | ```
65 | src/MinorProjects/plz: 张老师实验室计算出勤时间的计算器
66 | src/MinorProjects/tf: tensorflow 学习代码
67 | src/MinorProjects/sendmail.py: 使用Python发送邮件(自动化运维)
68 | `tree src/ -I 'pyc|__pycache__|js|ShadowSocksShare-OpenShift|MNIST_data'`
69 |
--------------------------------------------------------------------------------
/src/.obs/dot_parser4tf_csv_file/csv_parser.py:
--------------------------------------------------------------------------------
1 | #!/data/data/com.termux/files/usr/bin/python3
2 | import sys
3 | import os
4 | import re
5 |
6 | filename = sys.argv[-1]
7 | if len(sys.argv) == 1:
8 | filename = 'tensorflow_api.csv'
9 |
10 | with open(filename, 'r') as f:
11 | data = f.readlines()
12 |
13 | name = data[0][2:-3]
14 |
15 | ops = dict()
16 | for i in data[1:]:
17 | if i[:2] == "# ":
18 | key = i[2:-3]
19 | ops[key] = list()
20 | elif i[0] != '#':
21 | ops[key].append(i.split(',')[0:2])
22 |
23 | for key in ops:
24 | with open('%s.dot' % key, 'w') as f:
25 | print('digraph "%s"{' % key, file=f)
26 | print('rankdir="LR"', file=f)
27 | print('node [shape="box"]', file=f)
28 | #print('\t"{}" -> "{}"[color="red"];'.format(name, key))
29 | for value in ops[key]:
30 | print('\t\t"{}" -> "{}" -> "{}";'.format(key, value[0], value[1]), file=f)
31 | print('}', file=f)
32 | os.system('dot -T svg -o "%s.svg" "%s.dot"' %(key,key))
33 |
34 |
35 |
--------------------------------------------------------------------------------
/src/.obs/dot_parser4tf_csv_file/dots/Asserts and boolean checks.dot:
--------------------------------------------------------------------------------
1 | digraph "Asserts and boolean checks"{
2 | rankdir="LR"
3 | node [shape="box"]
4 | "Asserts and boolean checks" -> "assert_equal" -> "tf.assert_equal";
5 | "Asserts and boolean checks" -> "assert_integer" -> "tf.assert_integer";
6 | "Asserts and boolean checks" -> "assert_less" -> "tf.assert_less";
7 | "Asserts and boolean checks" -> "assert_less_equal" -> "tf.assert_less_equal";
8 | "Asserts and boolean checks" -> "assert_negative" -> "tf.assert_negative";
9 | "Asserts and boolean checks" -> "assert_non_negative" -> "tf.assert_non_negative";
10 | "Asserts and boolean checks" -> "assert_non_positive" -> "tf.assert_non_positive";
11 | "Asserts and boolean checks" -> "assert_positive" -> "tf.assert_positive";
12 | "Asserts and boolean checks" -> "assert_proper_iterable" -> "tf.assert_proper_iterable";
13 | "Asserts and boolean checks" -> "assert_rank" -> "tf.assert_rank";
14 | "Asserts and boolean checks" -> "assert_rank_at_least" -> "tf.assert_rank_at_least";
15 | "Asserts and boolean checks" -> "assert_type" -> "tf.assert_type";
16 | "Asserts and boolean checks" -> "is_non_decreasing" -> "tf.is_non_decreasing";
17 | "Asserts and boolean checks" -> "is_numeric_tensor" -> "tf.is_numeric_tensor";
18 | "Asserts and boolean checks" -> "is_strictly_increasing" -> "tf.is_strictly_increasing";
19 | }
20 |
--------------------------------------------------------------------------------
/src/.obs/dot_parser4tf_csv_file/dots/BayesFlow Stochastic Graph (contrib).dot:
--------------------------------------------------------------------------------
1 | digraph "BayesFlow Stochastic Graph (contrib)"{
2 | rankdir="LR"
3 | node [shape="box"]
4 | "BayesFlow Stochastic Graph (contrib)" -> "DistributionTensor" -> "tf.contreib.bayesflow.stochastic_graph.DistributionTensor";
5 | "BayesFlow Stochastic Graph (contrib)" -> "get_current_value_type" -> "tf.contreib.bayesflow.stochastic_graph.get_current_value_type";
6 | "BayesFlow Stochastic Graph (contrib)" -> "get_score_function_with_baseline" -> "tf.contreib.bayesflow.stochastic_graph.get_score_function_with_baseline";
7 | "BayesFlow Stochastic Graph (contrib)" -> "MeanValue" -> "tf.contreib.bayesflow.stochastic_graph.MeanValue";
8 | "BayesFlow Stochastic Graph (contrib)" -> "NoValueTypeSetError" -> "tf.contreib.bayesflow.stochastic_graph.NoValueTypeSetError";
9 | "BayesFlow Stochastic Graph (contrib)" -> "SampleAndReshapeValue" -> "tf.contreib.bayesflow.stochastic_graph.SampleAndReshapeValue";
10 | "BayesFlow Stochastic Graph (contrib)" -> "SampleValue" -> "tf.contreib.bayesflow.stochastic_graph.SampleValue";
11 | "BayesFlow Stochastic Graph (contrib)" -> "score_function" -> "tf.contreib.bayesflow.stochastic_graph.score_function";
12 | "BayesFlow Stochastic Graph (contrib)" -> "StochasticTensor" -> "tf.contreib.bayesflow.stochastic_graph.StochasticTensor";
13 | "BayesFlow Stochastic Graph (contrib)" -> "surrogate_loss" -> "tf.contreib.bayesflow.stochastic_graph.surrogate_loss";
14 | "BayesFlow Stochastic Graph (contrib)" -> "value_type" -> "tf.contreib.bayesflow.stochastic_graph.value_type";
15 | }
16 |
--------------------------------------------------------------------------------
/src/.obs/dot_parser4tf_csv_file/dots/Building Graph.dot:
--------------------------------------------------------------------------------
1 | digraph "Building Graph"{
2 | rankdir="LR"
3 | node [shape="box"]
4 | "Building Graph" -> "add_to_collection" -> "tf.add_to_collection";
5 | "Building Graph" -> "as_dtype" -> "tf.as_dtype";
6 | "Building Graph" -> "bytes" -> "tf.bytes";
7 | "Building Graph" -> "container" -> "tf.container";
8 | "Building Graph" -> "control_dependencies" -> "tf.control_dependencies";
9 | "Building Graph" -> "convert_to_tensor" -> "tf.convert_to_tensor";
10 | "Building Graph" -> "convert_to_tensor_or_indexed_slices" -> "tf.convert_to_tensor_or_indexed_slices";
11 | "Building Graph" -> "device" -> "tf.device";
12 | "Building Graph" -> "DeviceSpec" -> "tf.DeviceSpec";
13 | "Building Graph" -> "Dimension" -> "tf.Dimension";
14 | "Building Graph" -> "DType" -> "tf.DType";
15 | "Building Graph" -> "get_collection" -> "tf.get_collection";
16 | "Building Graph" -> "get_collection_ref" -> "tf.get_collection_ref";
17 | "Building Graph" -> "get_default_graph" -> "tf.get_default_graph";
18 | "Building Graph" -> "get_seed" -> "tf.get_seed";
19 | "Building Graph" -> "Graph" -> "tf.Graph";
20 | "Building Graph" -> "GraphKeys" -> "tf.GraphKeys";
21 | "Building Graph" -> "import_graph_def" -> "tf.import_graph_def";
22 | "Building Graph" -> "load_file_system_library" -> "tf.load_file_system_library";
23 | "Building Graph" -> "load_op_library" -> "tf.load_op_library";
24 | "Building Graph" -> "name_scope" -> "tf.name_scope";
25 | "Building Graph" -> "NoGradient" -> "tf.NoGradient";
26 | "Building Graph" -> "op_scope" -> "tf.op_scope";
27 | "Building Graph" -> "Operation" -> "tf.Operation";
28 | "Building Graph" -> "register_tensor_conversion_function" -> "tf.register_tensor_conversion_function";
29 | "Building Graph" -> "RegisterGradient" -> "tf.RegisterGradient";
30 | "Building Graph" -> "RegisterShape" -> "tf.RegisterShape";
31 | "Building Graph" -> "reset_default_graph" -> "tf.reset_default_graph";
32 | "Building Graph" -> "Tensor" -> "tf.Tensor";
33 | "Building Graph" -> "TensorShape" -> "tf.TensorShape";
34 | }
35 |
--------------------------------------------------------------------------------
/src/.obs/dot_parser4tf_csv_file/dots/Constants,Sequences,and Random Valu.dot:
--------------------------------------------------------------------------------
1 | digraph "Constants,Sequences,and Random Valu"{
2 | rankdir="LR"
3 | node [shape="box"]
4 | "Constants,Sequences,and Random Valu" -> "constant" -> "tf.constant";
5 | "Constants,Sequences,and Random Valu" -> "fill" -> "tf.fill";
6 | "Constants,Sequences,and Random Valu" -> "linspace" -> "tf.linspace";
7 | "Constants,Sequences,and Random Valu" -> "multinomial" -> "tf.multinomial";
8 | "Constants,Sequences,and Random Valu" -> "ones" -> "tf.ones";
9 | "Constants,Sequences,and Random Valu" -> "ones_like" -> "tf.ones_like";
10 | "Constants,Sequences,and Random Valu" -> "random_crop" -> "tf.random_crop";
11 | "Constants,Sequences,and Random Valu" -> "random_gamma" -> "tf.random_gamma";
12 | "Constants,Sequences,and Random Valu" -> "random_normal" -> "tf.random_normal";
13 | "Constants,Sequences,and Random Valu" -> "random_shuffle" -> "tf.random_shuffle";
14 | "Constants,Sequences,and Random Valu" -> "random_uniform" -> "tf.random_uniform";
15 | "Constants,Sequences,and Random Valu" -> "range" -> "tf.range";
16 | "Constants,Sequences,and Random Valu" -> "set_random_seed" -> "tf.set_random_seed";
17 | "Constants,Sequences,and Random Valu" -> "truncated_normal" -> "tf.truncated_normal";
18 | "Constants,Sequences,and Random Valu" -> "zeros" -> "tf.zeros";
19 | "Constants,Sequences,and Random Valu" -> "zeros_like" -> "tf.zeros_like";
20 | }
21 |
--------------------------------------------------------------------------------
/src/.obs/dot_parser4tf_csv_file/dots/Control Flow.dot:
--------------------------------------------------------------------------------
1 | digraph "Control Flow"{
2 | rankdir="LR"
3 | node [shape="box"]
4 | "Control Flow" -> "add_check_numerics_ops" -> "tf.add_check_numerics_ops";
5 | "Control Flow" -> "Assert" -> "tf.Assert";
6 | "Control Flow" -> "case" -> "tf.case";
7 | "Control Flow" -> "check_numerics" -> "tf.check_numerics";
8 | "Control Flow" -> "cond" -> "tf.cond";
9 | "Control Flow" -> "count_up_to" -> "tf.count_up_to";
10 | "Control Flow" -> "equal" -> "tf.equal";
11 | "Control Flow" -> "greater" -> "tf.greater";
12 | "Control Flow" -> "greater_equal" -> "tf.greater_equal";
13 | "Control Flow" -> "group" -> "tf.group";
14 | "Control Flow" -> "identity" -> "tf.identity";
15 | "Control Flow" -> "is_finite" -> "tf.is_finite";
16 | "Control Flow" -> "is_inf" -> "tf.is_inf";
17 | "Control Flow" -> "is_nan" -> "tf.is_nan";
18 | "Control Flow" -> "less" -> "tf.less";
19 | "Control Flow" -> "less_equal" -> "tf.less_equal";
20 | "Control Flow" -> "logical_and" -> "tf.logical_and";
21 | "Control Flow" -> "logical_not" -> "tf.logical_not";
22 | "Control Flow" -> "logical_or" -> "tf.logical_or";
23 | "Control Flow" -> "logical_xor" -> "tf.logical_xor";
24 | "Control Flow" -> "no_op" -> "tf.no_op";
25 | "Control Flow" -> "not_equal" -> "tf.not_equal";
26 | "Control Flow" -> "Print" -> "tf.Print";
27 | "Control Flow" -> "select" -> "tf.select";
28 | "Control Flow" -> "tuple" -> "tf.tuple";
29 | "Control Flow" -> "verify_tensor_all_finite" -> "tf.verify_tensor_all_finite";
30 | "Control Flow" -> "where" -> "tf.where";
31 | "Control Flow" -> "while_loop" -> "tf.while_loop";
32 | }
33 |
--------------------------------------------------------------------------------
/src/.obs/dot_parser4tf_csv_file/dots/Copying Graph Elements (contrib).dot:
--------------------------------------------------------------------------------
1 | digraph "Copying Graph Elements (contrib)"{
2 | rankdir="LR"
3 | node [shape="box"]
4 | "Copying Graph Elements (contrib)" -> "copy_op_to_graph" -> "tf.contrib.copy_graph.copy_op_to_graph";
5 | "Copying Graph Elements (contrib)" -> "copy_variable_to_graph" -> "tf.contrib.copy_graph.copy_variable_to_graph";
6 | "Copying Graph Elements (contrib)" -> "get_copied_op" -> "tf.contrib.copy_graph.get_copied_op";
7 | }
8 |
--------------------------------------------------------------------------------
/src/.obs/dot_parser4tf_csv_file/dots/Data IO (Python functions).dot:
--------------------------------------------------------------------------------
1 | digraph "Data IO (Python functions)"{
2 | rankdir="LR"
3 | node [shape="box"]
4 | "Data IO (Python functions)" -> "tf_record_iterator" -> "tf.tf_record_iterator";
5 | "Data IO (Python functions)" -> "TFRecordWriter" -> "tf.TFRecordWriter";
6 | }
7 |
--------------------------------------------------------------------------------
/src/.obs/dot_parser4tf_csv_file/dots/FFmpeg (contrib).dot:
--------------------------------------------------------------------------------
1 | digraph "FFmpeg (contrib)"{
2 | rankdir="LR"
3 | node [shape="box"]
4 | "FFmpeg (contrib)" -> "decode_audio" -> "tf.contrib.ffmpeg.decode_audio";
5 | "FFmpeg (contrib)" -> "encode_audio" -> "tf.contrib.ffmpeg.encode_audio";
6 | }
7 |
--------------------------------------------------------------------------------
/src/.obs/dot_parser4tf_csv_file/dots/Framework (contrib).dot:
--------------------------------------------------------------------------------
1 | digraph "Framework (contrib)"{
2 | rankdir="LR"
3 | node [shape="box"]
4 | "Framework (contrib)" -> "add_arg_scope" -> "tf.contrib.framework.add_arg_scope";
5 | "Framework (contrib)" -> "add_model_variable" -> "tf.contrib.framework.add_model_variable";
6 | "Framework (contrib)" -> "arg_scope" -> "tf.contrib.framework.arg_scope";
7 | "Framework (contrib)" -> "arg_scoped_arguments" -> "tf.contrib.framework.arg_scoped_arguments";
8 | "Framework (contrib)" -> "assert_global_step" -> "tf.contrib.framework.assert_global_step";
9 | "Framework (contrib)" -> "assert_or_get_global_step" -> "tf.contrib.framework.assert_or_get_global_step";
10 | "Framework (contrib)" -> "assert_same_float_dtype" -> "tf.contrib.framework.assert_same_float_dtype";
11 | "Framework (contrib)" -> "assert_scalar_int" -> "tf.contrib.framework.assert_scalar_int";
12 | "Framework (contrib)" -> "convert_to_tensor_or_sparse_tensor" -> "tf.contrib.framework.convert_to_tensor_or_sparse_tensor";
13 | "Framework (contrib)" -> "create_global_step" -> "tf.contrib.framework.create_global_step";
14 | "Framework (contrib)" -> "deprecated" -> "tf.contrib.framework.deprecated";
15 | "Framework (contrib)" -> "get_global_step" -> "tf.contrib.framework.get_global_step";
16 | "Framework (contrib)" -> "get_graph_from_inputs" -> "tf.contrib.framework.get_graph_from_inputs";
17 | "Framework (contrib)" -> "get_local_variables" -> "tf.contrib.framework.get_local_variables";
18 | "Framework (contrib)" -> "get_model_variables" -> "tf.contrib.framework.get_model_variables";
19 | "Framework (contrib)" -> "get_or_create_global_step" -> "tf.contrib.framework.get_or_create_global_step";
20 | "Framework (contrib)" -> "get_unique_variable" -> "tf.contrib.framework.get_unique_variable";
21 | "Framework (contrib)" -> "get_variables" -> "tf.contrib.framework.get_variables";
22 | "Framework (contrib)" -> "get_variables_by_name" -> "tf.contrib.framework.get_variables_by_name";
23 | "Framework (contrib)" -> "get_variables_by_suffix" -> "tf.contrib.framework.get_variables_by_suffix";
24 | "Framework (contrib)" -> "get_variables_to_restore" -> "tf.contrib.framework.get_variables_to_restore";
25 | "Framework (contrib)" -> "has_arg_scope" -> "tf.contrib.framework.has_arg_scope";
26 | "Framework (contrib)" -> "is_non_decreasing" -> "tf.contrib.framework.is_non_decreasing";
27 | "Framework (contrib)" -> "is_numeric_tensor" -> "tf.contrib.framework.is_numeric_tensor";
28 | "Framework (contrib)" -> "is_strictly_increasing" -> "tf.contrib.framework.is_strictly_increasing";
29 | "Framework (contrib)" -> "is_tensor" -> "tf.contrib.framework.is_tensor";
30 | "Framework (contrib)" -> "local_variable" -> "tf.contrib.framework.local_variable";
31 | "Framework (contrib)" -> "model_variable" -> "tf.contrib.framework.model_variable";
32 | "Framework (contrib)" -> "reduce_sum_n" -> "tf.contrib.framework.reduce_sum_n";
33 | "Framework (contrib)" -> "safe_embedding_lookup_sparse" -> "tf.contrib.framework.safe_embedding_lookup_sparse";
34 | "Framework (contrib)" -> "variable" -> "tf.contrib.framework.variable";
35 | "Framework (contrib)" -> "VariableDeviceChooser" -> "tf.contrib.framework.VariableDeviceChooser";
36 | "Framework (contrib)" -> "with_same_shape" -> "tf.contrib.framework.with_same_shape";
37 | "Framework (contrib)" -> "with_shape" -> "tf.contrib.framework.with_shape";
38 | }
39 |
--------------------------------------------------------------------------------
/src/.obs/dot_parser4tf_csv_file/dots/Higher Order Functions.dot:
--------------------------------------------------------------------------------
1 | digraph "Higher Order Functions"{
2 | rankdir="LR"
3 | node [shape="box"]
4 | "Higher Order Functions" -> "foldl" -> "tf.foldl";
5 | "Higher Order Functions" -> "foldr" -> "tf.foldr";
6 | "Higher Order Functions" -> "map_fn" -> "tf.map_fn";
7 | "Higher Order Functions" -> "scan" -> "tf.scan";
8 | }
9 |
--------------------------------------------------------------------------------
/src/.obs/dot_parser4tf_csv_file/dots/Histograms.dot:
--------------------------------------------------------------------------------
1 | digraph "Histograms"{
2 | rankdir="LR"
3 | node [shape="box"]
4 | "Histograms" -> "histogram_fixed_width" -> "tf.histogram_fixed_width";
5 | }
6 |
--------------------------------------------------------------------------------
/src/.obs/dot_parser4tf_csv_file/dots/Images.dot:
--------------------------------------------------------------------------------
1 | digraph "Images"{
2 | rankdir="LR"
3 | node [shape="box"]
4 | "Images" -> "adjust_brightness" -> "tf.adjust_brightness";
5 | "Images" -> "adjust_contrast" -> "tf.adjust_contrast";
6 | "Images" -> "adjust_hue" -> "tf.adjust_hue";
7 | "Images" -> "adjust_saturation" -> "tf.adjust_saturation";
8 | "Images" -> "central_crop" -> "tf.central_crop";
9 | "Images" -> "convert_image_dtype" -> "tf.convert_image_dtype";
10 | "Images" -> "crop_and_resize" -> "tf.crop_and_resize";
11 | "Images" -> "crop_to_bounding_box" -> "tf.crop_to_bounding_box";
12 | "Images" -> "decode_jpeg" -> "tf.decode_jpeg";
13 | "Images" -> "decode_png" -> "tf.decode_png";
14 | "Images" -> "draw_bounding_boxes" -> "tf.draw_bounding_boxes";
15 | "Images" -> "encode_jpeg" -> "tf.encode_jpeg";
16 | "Images" -> "encode_png" -> "tf.encode_png";
17 | "Images" -> "extract_glimpse" -> "tf.extract_glimpse";
18 | "Images" -> "flip_left_right" -> "tf.flip_left_right";
19 | "Images" -> "flip_up_down" -> "tf.flip_up_down";
20 | "Images" -> "grayscale_to_rgb" -> "tf.grayscale_to_rgb";
21 | "Images" -> "hsv_to_rgb" -> "tf.hsv_to_rgb";
22 | "Images" -> "non_max_suppression" -> "tf.non_max_suppression";
23 | "Images" -> "pad_to_bounding_box" -> "tf.pad_to_bounding_box";
24 | "Images" -> "per_image_whitening" -> "tf.per_image_whitening";
25 | "Images" -> "random_brightness" -> "tf.random_brightness";
26 | "Images" -> "random_contrast" -> "tf.random_contrast";
27 | "Images" -> "random_flip_left_right" -> "tf.random_flip_left_right";
28 | "Images" -> "random_flip_up_down" -> "tf.random_flip_up_down";
29 | "Images" -> "random_hue" -> "tf.random_hue";
30 | "Images" -> "random_saturation" -> "tf.random_saturation";
31 | "Images" -> "resize_area" -> "tf.resize_area";
32 | "Images" -> "resize_bicubic" -> "tf.resize_bicubic";
33 | "Images" -> "resize_bilinear" -> "tf.resize_bilinear";
34 | "Images" -> "resize_image_with_crop_or_pad" -> "tf.resize_image_with_crop_or_pad";
35 | "Images" -> "resize_images" -> "tf.resize_images";
36 | "Images" -> "resize_nearest_neighbor" -> "tf.resize_nearest_neighbor";
37 | "Images" -> "rgb_to_grayscale" -> "tf.rgb_to_grayscale";
38 | "Images" -> "rgb_to_hsv" -> "tf.rgb_to_hsv";
39 | "Images" -> "rot90" -> "tf.rot90";
40 | "Images" -> "sample_distorted_bounding_box" -> "tf.sample_distorted_bounding_box";
41 | "Images" -> "transpose_image" -> "tf.transpose_image";
42 | }
43 |
--------------------------------------------------------------------------------
/src/.obs/dot_parser4tf_csv_file/dots/Inputs and Readers.dot:
--------------------------------------------------------------------------------
1 | digraph "Inputs and Readers"{
2 | rankdir="LR"
3 | node [shape="box"]
4 | "Inputs and Readers" -> "batch" -> "tf.batch";
5 | "Inputs and Readers" -> "batch_join" -> "tf.batch_join";
6 | "Inputs and Readers" -> "decode_csv" -> "tf.decode_csv";
7 | "Inputs and Readers" -> "decode_json_example" -> "tf.decode_json_example";
8 | "Inputs and Readers" -> "decode_raw" -> "tf.decode_raw";
9 | "Inputs and Readers" -> "FIFOQueue" -> "tf.FIFOQueue";
10 | "Inputs and Readers" -> "FixedLenFeature" -> "tf.FixedLenFeature";
11 | "Inputs and Readers" -> "FixedLengthRecordReader" -> "tf.FixedLengthRecordReader";
12 | "Inputs and Readers" -> "FixedLenSequenceFeature" -> "tf.FixedLenSequenceFeature";
13 | "Inputs and Readers" -> "IdentityReader" -> "tf.IdentityReader";
14 | "Inputs and Readers" -> "input_producer" -> "tf.input_producer";
15 | "Inputs and Readers" -> "limit_epochs" -> "tf.limit_epochs";
16 | "Inputs and Readers" -> "match_filenames_once" -> "tf.match_filenames_once";
17 | "Inputs and Readers" -> "matching_files" -> "tf.matching_files";
18 | "Inputs and Readers" -> "PaddingFIFOQueue" -> "tf.PaddingFIFOQueue";
19 | "Inputs and Readers" -> "parse_example" -> "tf.parse_example";
20 | "Inputs and Readers" -> "parse_single_example" -> "tf.parse_single_example";
21 | "Inputs and Readers" -> "placeholder" -> "tf.placeholder";
22 | "Inputs and Readers" -> "placeholder_with_default" -> "tf.placeholder_with_default";
23 | "Inputs and Readers" -> "QueueBase" -> "tf.QueueBase";
24 | "Inputs and Readers" -> "RandomShuffleQueue" -> "tf.RandomShuffleQueue";
25 | "Inputs and Readers" -> "range_input_producer" -> "tf.range_input_producer";
26 | "Inputs and Readers" -> "read_file" -> "tf.read_file";
27 | "Inputs and Readers" -> "ReaderBase" -> "tf.ReaderBase";
28 | "Inputs and Readers" -> "shuffle_batch" -> "tf.shuffle_batch";
29 | "Inputs and Readers" -> "shuffle_batch_join" -> "tf.shuffle_batch_join";
30 | "Inputs and Readers" -> "size" -> "tf.size";
31 | "Inputs and Readers" -> "slice_input_producer" -> "tf.slice_input_producer";
32 | "Inputs and Readers" -> "sparse_placeholder" -> "tf.sparse_placeholder";
33 | "Inputs and Readers" -> "string_input_producer" -> "tf.string_input_producer";
34 | "Inputs and Readers" -> "TextLineReader" -> "tf.TextLineReader";
35 | "Inputs and Readers" -> "TFRecordReader" -> "tf.TFRecordReader";
36 | "Inputs and Readers" -> "VarLenFeature" -> "tf.VarLenFeature";
37 | "Inputs and Readers" -> "WholeFileReader" -> "tf.WholeFileReader";
38 | }
39 |
--------------------------------------------------------------------------------
/src/.obs/dot_parser4tf_csv_file/dots/Layers (contrib).dot:
--------------------------------------------------------------------------------
1 | digraph "Layers (contrib)"{
2 | rankdir="LR"
3 | node [shape="box"]
4 | "Layers (contrib)" -> "apply_regularization" -> "tf.contrib.layers.apply_regularization";
5 | "Layers (contrib)" -> "avg_pool2d" -> "tf.contrib.layers.avg_pool2d";
6 | "Layers (contrib)" -> "batch_norm" -> "tf.contrib.layers.batch_norm";
7 | "Layers (contrib)" -> "convolution2d" -> "tf.contrib.layers.convolution2d";
8 | "Layers (contrib)" -> "convolution2d_in_plane" -> "tf.contrib.layers.convolution2d_in_plane";
9 | "Layers (contrib)" -> "convolution2d_transpose" -> "tf.contrib.layers.convolution2d_transpose";
10 | "Layers (contrib)" -> "flatten" -> "tf.contrib.layers.flatten";
11 | "Layers (contrib)" -> "fully_connected" -> "tf.contrib.layers.fully_connected";
12 | "Layers (contrib)" -> "l1_regularizer" -> "tf.contrib.layers.l1_regularizer";
13 | "Layers (contrib)" -> "l2_regularizer" -> "tf.contrib.layers.l2_regularizer";
14 | "Layers (contrib)" -> "max_pool2d" -> "tf.contrib.layers.max_pool2d";
15 | "Layers (contrib)" -> "one_hot_encoding" -> "tf.contrib.layers.one_hot_encoding";
16 | "Layers (contrib)" -> "optimize_loss" -> "tf.contrib.layers.optimize_loss";
17 | "Layers (contrib)" -> "repeat" -> "tf.contrib.layers.repeat";
18 | "Layers (contrib)" -> "separable_convolution2d" -> "tf.contrib.layers.separable_convolution2d";
19 | "Layers (contrib)" -> "stack" -> "tf.contrib.layers.stack";
20 | "Layers (contrib)" -> "sum_regularizer" -> "tf.contrib.layers.sum_regularizer";
21 | "Layers (contrib)" -> "summarize_activation" -> "tf.contrib.layers.summarize_activation";
22 | "Layers (contrib)" -> "summarize_activations" -> "tf.contrib.layers.summarize_activations";
23 | "Layers (contrib)" -> "summarize_collection" -> "tf.contrib.layers.summarize_collection";
24 | "Layers (contrib)" -> "summarize_tensor" -> "tf.contrib.layers.summarize_tensor";
25 | "Layers (contrib)" -> "summarize_tensors" -> "tf.contrib.layers.summarize_tensors";
26 | "Layers (contrib)" -> "unit_norm" -> "tf.contrib.layers.unit_norm";
27 | "Layers (contrib)" -> "variance_scaling_initializer" -> "tf.contrib.layers.variance_scaling_initializer";
28 | "Layers (contrib)" -> "xavier_initializer" -> "tf.contrib.layers.xavier_initializer";
29 | "Layers (contrib)" -> "xavier_initializer_conv2d" -> "tf.contrib.layers.xavier_initializer_conv2d";
30 | }
31 |
--------------------------------------------------------------------------------
/src/.obs/dot_parser4tf_csv_file/dots/Learn (contrib).dot:
--------------------------------------------------------------------------------
1 | digraph "Learn (contrib)"{
2 | rankdir="LR"
3 | node [shape="box"]
4 | "Learn (contrib)" -> "BaseEstimator" -> "tf.contrib.learn.BaseEstimator";
5 | "Learn (contrib)" -> "DNNClassifier" -> "tf.contrib.learn.DNNClassifier";
6 | "Learn (contrib)" -> "DNNRegressor" -> "tf.contrib.learn.DNNRegressor";
7 | "Learn (contrib)" -> "Estimator" -> "tf.contrib.learn.Estimator";
8 | "Learn (contrib)" -> "evaluate" -> "tf.contrib.learn.evaluate";
9 | "Learn (contrib)" -> "extract_dask_data" -> "tf.contrib.learn.extract_dask_data";
10 | "Learn (contrib)" -> "extract_dask_labels" -> "tf.contrib.learn.extract_dask_labels";
11 | "Learn (contrib)" -> "extract_pandas_data" -> "tf.contrib.learn.extract_pandas_data";
12 | "Learn (contrib)" -> "extract_pandas_labels" -> "tf.contrib.learn.extract_pandas_labels";
13 | "Learn (contrib)" -> "extract_pandas_matrix" -> "tf.contrib.learn.extract_pandas_matrix";
14 | "Learn (contrib)" -> "infer" -> "tf.contrib.learn.infer";
15 | "Learn (contrib)" -> "LinearClassifier" -> "tf.contrib.learn.LinearClassifier";
16 | "Learn (contrib)" -> "LinearRegressor" -> "tf.contrib.learn.LinearRegressor";
17 | "Learn (contrib)" -> "ModeKeys" -> "tf.contrib.learn.ModeKeys";
18 | "Learn (contrib)" -> "NanLossDuringTrainingError" -> "tf.contrib.learn.NanLossDuringTrainingError";
19 | "Learn (contrib)" -> "read_batch_examples" -> "tf.contrib.learn.read_batch_examples";
20 | "Learn (contrib)" -> "read_batch_features" -> "tf.contrib.learn.read_batch_features";
21 | "Learn (contrib)" -> "read_batch_record_features" -> "tf.contrib.learn.read_batch_record_features";
22 | "Learn (contrib)" -> "run_feeds" -> "tf.contrib.learn.run_feeds";
23 | "Learn (contrib)" -> "run_n" -> "tf.contrib.learn.run_n";
24 | "Learn (contrib)" -> "RunConfig" -> "tf.contrib.learn.RunConfig";
25 | "Learn (contrib)" -> "TensorFlowClassifier" -> "tf.contrib.learn.TensorFlowClassifier";
26 | "Learn (contrib)" -> "TensorFlowDNNClassifier" -> "tf.contrib.learn.TensorFlowDNNClassifier";
27 | "Learn (contrib)" -> "TensorFlowDNNRegressor" -> "tf.contrib.learn.TensorFlowDNNRegressor";
28 | "Learn (contrib)" -> "TensorFlowEstimator" -> "tf.contrib.learn.TensorFlowEstimator";
29 | "Learn (contrib)" -> "TensorFlowLinearClassifier" -> "tf.contrib.learn.TensorFlowLinearClassifier";
30 | "Learn (contrib)" -> "TensorFlowLinearRegressor" -> "tf.contrib.learn.TensorFlowLinearRegressor";
31 | "Learn (contrib)" -> "TensorFlowRegressor" -> "tf.contrib.learn.TensorFlowRegressor";
32 | "Learn (contrib)" -> "TensorFlowRNNClassifier" -> "tf.contrib.learn.TensorFlowRNNClassifier";
33 | "Learn (contrib)" -> "TensorFlowRNNRegressor" -> "tf.contrib.learn.TensorFlowRNNRegressor";
34 | "Learn (contrib)" -> "train" -> "tf.contrib.learn.train";
35 | }
36 |
--------------------------------------------------------------------------------
/src/.obs/dot_parser4tf_csv_file/dots/Losses (contrib).dot:
--------------------------------------------------------------------------------
1 | digraph "Losses (contrib)"{
2 | rankdir="LR"
3 | node [shape="box"]
4 | "Losses (contrib)" -> "absolute_difference" -> "tf.contrib.losses.absolute_difference";
5 | "Losses (contrib)" -> "add_loss" -> "tf.contrib.losses.add_loss";
6 | "Losses (contrib)" -> "cosine_distance" -> "tf.contrib.losses.cosine_distance";
7 | "Losses (contrib)" -> "get_losses" -> "tf.contrib.losses.get_losses";
8 | "Losses (contrib)" -> "get_regularization_losses" -> "tf.contrib.losses.get_regularization_losses";
9 | "Losses (contrib)" -> "get_total_loss" -> "tf.contrib.losses.get_total_loss";
10 | "Losses (contrib)" -> "log_loss" -> "tf.contrib.losses.log_loss";
11 | "Losses (contrib)" -> "sigmoid_cross_entropy" -> "tf.contrib.losses.sigmoid_cross_entropy";
12 | "Losses (contrib)" -> "softmax_cross_entropy" -> "tf.contrib.losses.softmax_cross_entropy";
13 | "Losses (contrib)" -> "sum_of_pairwise_squares" -> "tf.contrib.losses.sum_of_pairwise_squares";
14 | "Losses (contrib)" -> "sum_of_squares" -> "tf.contrib.losses.sum_of_squares";
15 | }
16 |
--------------------------------------------------------------------------------
/src/.obs/dot_parser4tf_csv_file/dots/Math.dot:
--------------------------------------------------------------------------------
1 | digraph "Math"{
2 | rankdir="LR"
3 | node [shape="box"]
4 | "Math" -> "abs" -> "tf.abs";
5 | "Math" -> "accumulate_n" -> "tf.accumulate_n";
6 | "Math" -> "acos" -> "tf.acos";
7 | "Math" -> "add" -> "tf.add";
8 | "Math" -> "add_n" -> "tf.add_n";
9 | "Math" -> "argmax" -> "tf.argmax";
10 | "Math" -> "argmin" -> "tf.argmin";
11 | "Math" -> "asin" -> "tf.asin";
12 | "Math" -> "atan" -> "tf.atan";
13 | "Math" -> "batch_cholesky" -> "tf.batch_cholesky";
14 | "Math" -> "batch_cholesky_solve" -> "tf.batch_cholesky_solve";
15 | "Math" -> "batch_fft" -> "tf.batch_fft";
16 | "Math" -> "batch_fft2d" -> "tf.batch_fft2d";
17 | "Math" -> "batch_fft3d" -> "tf.batch_fft3d";
18 | "Math" -> "batch_ifft" -> "tf.batch_ifft";
19 | "Math" -> "batch_ifft2d" -> "tf.batch_ifft2d";
20 | "Math" -> "batch_ifft3d" -> "tf.batch_ifft3d";
21 | "Math" -> "batch_matmul" -> "tf.batch_matmul";
22 | "Math" -> "batch_matrix_band_part" -> "tf.batch_matrix_band_part";
23 | "Math" -> "batch_matrix_determinant" -> "tf.batch_matrix_determinant";
24 | "Math" -> "batch_matrix_diag" -> "tf.batch_matrix_diag";
25 | "Math" -> "batch_matrix_diag_part" -> "tf.batch_matrix_diag_part";
26 | "Math" -> "batch_matrix_inverse" -> "tf.batch_matrix_inverse";
27 | "Math" -> "batch_matrix_set_diag" -> "tf.batch_matrix_set_diag";
28 | "Math" -> "batch_matrix_solve" -> "tf.batch_matrix_solve";
29 | "Math" -> "batch_matrix_solve_ls" -> "tf.batch_matrix_solve_ls";
30 | "Math" -> "batch_matrix_transpose" -> "tf.batch_matrix_transpose";
31 | "Math" -> "batch_matrix_triangular_solve" -> "tf.batch_matrix_triangular_solve";
32 | "Math" -> "batch_self_adjoint_eig" -> "tf.batch_self_adjoint_eig";
33 | "Math" -> "ceil" -> "tf.ceil";
34 | "Math" -> "cholesky" -> "tf.cholesky";
35 | "Math" -> "cholesky_solve" -> "tf.cholesky_solve";
36 | "Math" -> "complex" -> "tf.complex";
37 | "Math" -> "complex_abs" -> "tf.complex_abs";
38 | "Math" -> "conj" -> "tf.conj";
39 | "Math" -> "cos" -> "tf.cos";
40 | "Math" -> "cross" -> "tf.cross";
41 | "Math" -> "cumprod" -> "tf.cumprod";
42 | "Math" -> "cumsum" -> "tf.cumsum";
43 | "Math" -> "diag" -> "tf.diag";
44 | "Math" -> "diag_part" -> "tf.diag_part";
45 | "Math" -> "digamma" -> "tf.digamma";
46 | "Math" -> "div" -> "tf.div";
47 | "Math" -> "edit_distance" -> "tf.edit_distance";
48 | "Math" -> "erf" -> "tf.erf";
49 | "Math" -> "erfc" -> "tf.erfc";
50 | "Math" -> "exp" -> "tf.exp";
51 | "Math" -> "fft" -> "tf.fft";
52 | "Math" -> "fft2d" -> "tf.fft2d";
53 | "Math" -> "fft3d" -> "tf.fft3d";
54 | "Math" -> "floor" -> "tf.floor";
55 | "Math" -> "floordiv" -> "tf.floordiv";
56 | "Math" -> "ifft" -> "tf.ifft";
57 | "Math" -> "ifft2d" -> "tf.ifft2d";
58 | "Math" -> "ifft3d" -> "tf.ifft3d";
59 | "Math" -> "igamma" -> "tf.igamma";
60 | "Math" -> "igammac" -> "tf.igammac";
61 | "Math" -> "imag" -> "tf.imag";
62 | "Math" -> "inv" -> "tf.inv";
63 | "Math" -> "invert_permutation" -> "tf.invert_permutation";
64 | "Math" -> "lbeta" -> "tf.lbeta";
65 | "Math" -> "lgamma" -> "tf.lgamma";
66 | "Math" -> "listdiff" -> "tf.listdiff";
67 | "Math" -> "log" -> "tf.log";
68 | "Math" -> "matmul" -> "tf.matmul";
69 | "Math" -> "matrix_determinant" -> "tf.matrix_determinant";
70 | "Math" -> "matrix_inverse" -> "tf.matrix_inverse";
71 | "Math" -> "matrix_solve" -> "tf.matrix_solve";
72 | "Math" -> "matrix_solve_ls" -> "tf.matrix_solve_ls";
73 | "Math" -> "matrix_triangular_solve" -> "tf.matrix_triangular_solve";
74 | "Math" -> "maximum" -> "tf.maximum";
75 | "Math" -> "minimum" -> "tf.minimum";
76 | "Math" -> "mod" -> "tf.mod";
77 | "Math" -> "mul" -> "tf.mul";
78 | "Math" -> "neg" -> "tf.neg";
79 | "Math" -> "polygamma" -> "tf.polygamma";
80 | "Math" -> "pow" -> "tf.pow";
81 | "Math" -> "real" -> "tf.real";
82 | "Math" -> "reduce_all" -> "tf.reduce_all";
83 | "Math" -> "reduce_any" -> "tf.reduce_any";
84 | "Math" -> "reduce_max" -> "tf.reduce_max";
85 | "Math" -> "reduce_mean" -> "tf.reduce_mean";
86 | "Math" -> "reduce_min" -> "tf.reduce_min";
87 | "Math" -> "reduce_prod" -> "tf.reduce_prod";
88 | "Math" -> "reduce_sum" -> "tf.reduce_sum";
89 | "Math" -> "round" -> "tf.round";
90 | "Math" -> "rsqrt" -> "tf.rsqrt";
91 | "Math" -> "scalar_mul" -> "tf.scalar_mul";
92 | "Math" -> "segment_max" -> "tf.segment_max";
93 | "Math" -> "segment_mean" -> "tf.segment_mean";
94 | "Math" -> "segment_min" -> "tf.segment_min";
95 | "Math" -> "segment_prod" -> "tf.segment_prod";
96 | "Math" -> "segment_sum" -> "tf.segment_sum";
97 | "Math" -> "self_adjoint_eig" -> "tf.self_adjoint_eig";
98 | "Math" -> "sign" -> "tf.sign";
99 | "Math" -> "sin" -> "tf.sin";
100 | "Math" -> "sparse_segment_mean" -> "tf.sparse_segment_mean";
101 | "Math" -> "sparse_segment_sqrt_n" -> "tf.sparse_segment_sqrt_n";
102 | "Math" -> "sparse_segment_sqrt_n_grad" -> "tf.sparse_segment_sqrt_n_grad";
103 | "Math" -> "sparse_segment_sum" -> "tf.sparse_segment_sum";
104 | "Math" -> "sqrt" -> "tf.sqrt";
105 | "Math" -> "square" -> "tf.square";
106 | "Math" -> "squared_difference" -> "tf.squared_difference";
107 | "Math" -> "sub" -> "tf.sub";
108 | "Math" -> "tan" -> "tf.tan";
109 | "Math" -> "trace" -> "tf.trace";
110 | "Math" -> "transpose" -> "tf.transpose";
111 | "Math" -> "truediv" -> "tf.truediv";
112 | "Math" -> "unique" -> "tf.unique";
113 | "Math" -> "unsorted_segment_sum" -> "tf.unsorted_segment_sum";
114 | "Math" -> "where" -> "tf.where";
115 | "Math" -> "zeta" -> "tf.zeta";
116 | }
117 |
--------------------------------------------------------------------------------
/src/.obs/dot_parser4tf_csv_file/dots/Metrics (contrib).dot:
--------------------------------------------------------------------------------
1 | digraph "Metrics (contrib)"{
2 | rankdir="LR"
3 | node [shape="box"]
4 | "Metrics (contrib)" -> "accuracy" -> "tf.contrib.metrics.accuracy";
5 | "Metrics (contrib)" -> "aggregate_metric_map" -> "tf.contrib.metrics.aggregate_metric_map";
6 | "Metrics (contrib)" -> "aggregate_metrics" -> "tf.contrib.metrics.aggregate_metrics";
7 | "Metrics (contrib)" -> "auc_using_histogram" -> "tf.contrib.metrics.auc_using_histogram";
8 | "Metrics (contrib)" -> "confusion_matrix" -> "tf.contrib.metrics.confusion_matrix";
9 | "Metrics (contrib)" -> "set_difference" -> "tf.contrib.metrics.set_difference";
10 | "Metrics (contrib)" -> "set_intersection" -> "tf.contrib.metrics.set_intersection";
11 | "Metrics (contrib)" -> "set_size" -> "tf.contrib.metrics.set_size";
12 | "Metrics (contrib)" -> "set_union" -> "tf.contrib.metrics.set_union";
13 | "Metrics (contrib)" -> "streaming_accuracy" -> "tf.contrib.metrics.streaming_accuracy";
14 | "Metrics (contrib)" -> "streaming_auc" -> "tf.contrib.metrics.streaming_auc";
15 | "Metrics (contrib)" -> "streaming_mean" -> "tf.contrib.metrics.streaming_mean";
16 | "Metrics (contrib)" -> "streaming_mean_absolute_error" -> "tf.contrib.metrics.streaming_mean_absolute_error";
17 | "Metrics (contrib)" -> "streaming_mean_cosine_distance" -> "tf.contrib.metrics.streaming_mean_cosine_distance";
18 | "Metrics (contrib)" -> "streaming_mean_iou" -> "tf.contrib.metrics.streaming_mean_iou";
19 | "Metrics (contrib)" -> "streaming_mean_relative_error" -> "tf.contrib.metrics.streaming_mean_relative_error";
20 | "Metrics (contrib)" -> "streaming_mean_squared_error" -> "tf.contrib.metrics.streaming_mean_squared_error";
21 | "Metrics (contrib)" -> "streaming_percentage_less" -> "tf.contrib.metrics.streaming_percentage_less";
22 | "Metrics (contrib)" -> "streaming_precision" -> "tf.contrib.metrics.streaming_precision";
23 | "Metrics (contrib)" -> "streaming_recall" -> "tf.contrib.metrics.streaming_recall";
24 | "Metrics (contrib)" -> "streaming_recall_at_k" -> "tf.contrib.metrics.streaming_recall_at_k";
25 | "Metrics (contrib)" -> "streaming_root_mean_squared_error" -> "tf.contrib.metrics.streaming_root_mean_squared_error";
26 | "Metrics (contrib)" -> "streaming_sparse_precision_at_k" -> "tf.contrib.metrics.streaming_sparse_precision_at_k";
27 | "Metrics (contrib)" -> "streaming_sparse_recall_at_k" -> "tf.contrib.metrics.streaming_sparse_recall_at_k";
28 | }
29 |
--------------------------------------------------------------------------------
/src/.obs/dot_parser4tf_csv_file/dots/Monitors (contrib).dot:
--------------------------------------------------------------------------------
1 | digraph "Monitors (contrib)"{
2 | rankdir="LR"
3 | node [shape="box"]
4 | "Monitors (contrib)" -> "BaseMonitor" -> "tf.contrib.monitors.BaseMonitor";
5 | "Monitors (contrib)" -> "CaptureVariable" -> "tf.contrib.monitors.CaptureVariable";
6 | "Monitors (contrib)" -> "CheckpointSaver" -> "tf.contrib.monitors.CheckpointSaver";
7 | "Monitors (contrib)" -> "EveryN" -> "tf.contrib.monitors.EveryN";
8 | "Monitors (contrib)" -> "ExportMonitor" -> "tf.contrib.monitors.ExportMonitor";
9 | "Monitors (contrib)" -> "get_default_monitors" -> "tf.contrib.monitors.get_default_monitors";
10 | "Monitors (contrib)" -> "GraphDump" -> "tf.contrib.monitors.GraphDump";
11 | "Monitors (contrib)" -> "LoggingTrainable" -> "tf.contrib.monitors.LoggingTrainable";
12 | "Monitors (contrib)" -> "NanLoss" -> "tf.contrib.monitors.NanLoss";
13 | "Monitors (contrib)" -> "PrintTensor" -> "tf.contrib.monitors.PrintTensor";
14 | "Monitors (contrib)" -> "StepCounter" -> "tf.contrib.monitors.StepCounter";
15 | "Monitors (contrib)" -> "StopAtStep" -> "tf.contrib.monitors.StopAtStep";
16 | "Monitors (contrib)" -> "SummarySaver" -> "tf.contrib.monitors.SummarySaver";
17 | "Monitors (contrib)" -> "SummaryWriterCache" -> "tf.contrib.monitors.SummaryWriterCache";
18 | "Monitors (contrib)" -> "ValidationMonitor" -> "tf.contrib.monitors.ValidationMonitor";
19 | }
20 |
--------------------------------------------------------------------------------
/src/.obs/dot_parser4tf_csv_file/dots/Neural Network RNN Cells.dot:
--------------------------------------------------------------------------------
1 | digraph "Neural Network RNN Cells"{
2 | rankdir="LR"
3 | node [shape="box"]
4 | "Neural Network RNN Cells" -> "BasicLSTMCell" -> "tf.BasicLSTMCell";
5 | "Neural Network RNN Cells" -> "BasicRNNCell" -> "tf.BasicRNNCell";
6 | "Neural Network RNN Cells" -> "DropoutWrapper" -> "tf.DropoutWrapper";
7 | "Neural Network RNN Cells" -> "EmbeddingWrapper" -> "tf.EmbeddingWrapper";
8 | "Neural Network RNN Cells" -> "GRUCell" -> "tf.GRUCell";
9 | "Neural Network RNN Cells" -> "InputProjectionWrapper" -> "tf.InputProjectionWrapper";
10 | "Neural Network RNN Cells" -> "LSTMCell" -> "tf.LSTMCell";
11 | "Neural Network RNN Cells" -> "LSTMStateTuple" -> "tf.LSTMStateTuple";
12 | "Neural Network RNN Cells" -> "MultiRNNCell" -> "tf.MultiRNNCell";
13 | "Neural Network RNN Cells" -> "OutputProjectionWrapper" -> "tf.OutputProjectionWrapper";
14 | "Neural Network RNN Cells" -> "RNNCell" -> "tf.RNNCell";
15 | }
16 |
--------------------------------------------------------------------------------
/src/.obs/dot_parser4tf_csv_file/dots/Neural Network.dot:
--------------------------------------------------------------------------------
1 | digraph "Neural Network"{
2 | rankdir="LR"
3 | node [shape="box"]
4 | "Neural Network" -> "atrous_conv2d" -> "tf.atrous_conv2d";
5 | "Neural Network" -> "avg_pool" -> "tf.avg_pool";
6 | "Neural Network" -> "avg_pool3d" -> "tf.avg_pool3d";
7 | "Neural Network" -> "batch_normalization" -> "tf.batch_normalization";
8 | "Neural Network" -> "bias_add" -> "tf.bias_add";
9 | "Neural Network" -> "bidirectional_rnn" -> "tf.bidirectional_rnn";
10 | "Neural Network" -> "compute_accidental_hits" -> "tf.compute_accidental_hits";
11 | "Neural Network" -> "conv2d" -> "tf.conv2d";
12 | "Neural Network" -> "conv2d_transpose" -> "tf.conv2d_transpose";
13 | "Neural Network" -> "conv3d" -> "tf.conv3d";
14 | "Neural Network" -> "ctc_beam_search_decoder" -> "tf.ctc_beam_search_decoder";
15 | "Neural Network" -> "ctc_greedy_decoder" -> "tf.ctc_greedy_decoder";
16 | "Neural Network" -> "ctc_loss" -> "tf.ctc_loss";
17 | "Neural Network" -> "depthwise_conv2d" -> "tf.depthwise_conv2d";
18 | "Neural Network" -> "depthwise_conv2d_native" -> "tf.depthwise_conv2d_native";
19 | "Neural Network" -> "dilation2d" -> "tf.dilation2d";
20 | "Neural Network" -> "dropout" -> "tf.dropout";
21 | "Neural Network" -> "dynamic_rnn" -> "tf.dynamic_rnn";
22 | "Neural Network" -> "elu" -> "tf.elu";
23 | "Neural Network" -> "embedding_lookup" -> "tf.embedding_lookup";
24 | "Neural Network" -> "embedding_lookup_sparse" -> "tf.embedding_lookup_sparse";
25 | "Neural Network" -> "erosion2d" -> "tf.erosion2d";
26 | "Neural Network" -> "fixed_unigram_candidate_sampler" -> "tf.fixed_unigram_candidate_sampler";
27 | "Neural Network" -> "in_top_k" -> "tf.in_top_k";
28 | "Neural Network" -> "l2_loss" -> "tf.l2_loss";
29 | "Neural Network" -> "l2_normalize" -> "tf.l2_normalize";
30 | "Neural Network" -> "learned_unigram_candidate_sampler" -> "tf.learned_unigram_candidate_sampler";
31 | "Neural Network" -> "local_response_normalization" -> "tf.local_response_normalization";
32 | "Neural Network" -> "log_softmax" -> "tf.log_softmax";
33 | "Neural Network" -> "log_uniform_candidate_sampler" -> "tf.log_uniform_candidate_sampler";
34 | "Neural Network" -> "max_pool" -> "tf.max_pool";
35 | "Neural Network" -> "max_pool3d" -> "tf.max_pool3d";
36 | "Neural Network" -> "max_pool_with_argmax" -> "tf.max_pool_with_argmax";
37 | "Neural Network" -> "moments" -> "tf.moments";
38 | "Neural Network" -> "nce_loss" -> "tf.nce_loss";
39 | "Neural Network" -> "normalize_moments" -> "tf.normalize_moments";
40 | "Neural Network" -> "relu" -> "tf.relu";
41 | "Neural Network" -> "relu6" -> "tf.relu6";
42 | "Neural Network" -> "rnn" -> "tf.rnn";
43 | "Neural Network" -> "sampled_softmax_loss" -> "tf.sampled_softmax_loss";
44 | "Neural Network" -> "separable_conv2d" -> "tf.separable_conv2d";
45 | "Neural Network" -> "sigmoid" -> "tf.sigmoid";
46 | "Neural Network" -> "sigmoid_cross_entropy_with_logits" -> "tf.sigmoid_cross_entropy_with_logits";
47 | "Neural Network" -> "softmax" -> "tf.softmax";
48 | "Neural Network" -> "softmax_cross_entropy_with_logits" -> "tf.softmax_cross_entropy_with_logits";
49 | "Neural Network" -> "softplus" -> "tf.softplus";
50 | "Neural Network" -> "softsign" -> "tf.softsign";
51 | "Neural Network" -> "sparse_softmax_cross_entropy_with_logits" -> "tf.sparse_softmax_cross_entropy_with_logits";
52 | "Neural Network" -> "state_saving_rnn" -> "tf.state_saving_rnn";
53 | "Neural Network" -> "sufficient_statistics" -> "tf.sufficient_statistics";
54 | "Neural Network" -> "tanh" -> "tf.tanh";
55 | "Neural Network" -> "top_k" -> "tf.top_k";
56 | "Neural Network" -> "uniform_candidate_sampler" -> "tf.uniform_candidate_sampler";
57 | "Neural Network" -> "weighted_cross_entropy_with_logits" -> "tf.weighted_cross_entropy_with_logits";
58 | }
59 |
--------------------------------------------------------------------------------
/src/.obs/dot_parser4tf_csv_file/dots/Running Graphs.dot:
--------------------------------------------------------------------------------
1 | digraph "Running Graphs"{
2 | rankdir="LR"
3 | node [shape="box"]
4 | "Running Graphs" -> "AbortedError" -> "tf.AbortedError";
5 | "Running Graphs" -> "AlreadyExistsError" -> "tf.AlreadyExistsError";
6 | "Running Graphs" -> "CancelledError" -> "tf.CancelledError";
7 | "Running Graphs" -> "DataLossError" -> "tf.DataLossError";
8 | "Running Graphs" -> "DeadlineExceededError" -> "tf.DeadlineExceededError";
9 | "Running Graphs" -> "FailedPreconditionError" -> "tf.FailedPreconditionError";
10 | "Running Graphs" -> "get_default_session" -> "tf.get_default_session";
11 | "Running Graphs" -> "InteractiveSession" -> "tf.InteractiveSession";
12 | "Running Graphs" -> "InternalError" -> "tf.InternalError";
13 | "Running Graphs" -> "InvalidArgumentError" -> "tf.InvalidArgumentError";
14 | "Running Graphs" -> "NotFoundError" -> "tf.NotFoundError";
15 | "Running Graphs" -> "OpError" -> "tf.OpError";
16 | "Running Graphs" -> "OutOfRangeError" -> "tf.OutOfRangeError";
17 | "Running Graphs" -> "PermissionDeniedError" -> "tf.PermissionDeniedError";
18 | "Running Graphs" -> "ResourceExhaustedError" -> "tf.ResourceExhaustedError";
19 | "Running Graphs" -> "Session" -> "tf.Session";
20 | "Running Graphs" -> "UnauthenticatedError" -> "tf.UnauthenticatedError";
21 | "Running Graphs" -> "UnavailableError" -> "tf.UnavailableError";
22 | "Running Graphs" -> "UnimplementedError" -> "tf.UnimplementedError";
23 | "Running Graphs" -> "UnknownError" -> "tf.UnknownError";
24 | }
25 |
--------------------------------------------------------------------------------
/src/.obs/dot_parser4tf_csv_file/dots/Sparse Tensors.dot:
--------------------------------------------------------------------------------
1 | digraph "Sparse Tensors"{
2 | rankdir="LR"
3 | node [shape="box"]
4 | "Sparse Tensors" -> "shape" -> "tf.shape";
5 | "Sparse Tensors" -> "sparse_add" -> "tf.sparse_add";
6 | "Sparse Tensors" -> "sparse_concat" -> "tf.sparse_concat";
7 | "Sparse Tensors" -> "sparse_fill_empty_rows" -> "tf.sparse_fill_empty_rows";
8 | "Sparse Tensors" -> "sparse_maximum" -> "tf.sparse_maximum";
9 | "Sparse Tensors" -> "sparse_merge" -> "tf.sparse_merge";
10 | "Sparse Tensors" -> "sparse_minimum" -> "tf.sparse_minimum";
11 | "Sparse Tensors" -> "sparse_reduce_sum" -> "tf.sparse_reduce_sum";
12 | "Sparse Tensors" -> "sparse_reorder" -> "tf.sparse_reorder";
13 | "Sparse Tensors" -> "sparse_reset_shape" -> "tf.sparse_reset_shape";
14 | "Sparse Tensors" -> "sparse_reshape" -> "tf.sparse_reshape";
15 | "Sparse Tensors" -> "sparse_retain" -> "tf.sparse_retain";
16 | "Sparse Tensors" -> "sparse_softmax" -> "tf.sparse_softmax";
17 | "Sparse Tensors" -> "sparse_split" -> "tf.sparse_split";
18 | "Sparse Tensors" -> "sparse_tensor_dense_matmul" -> "tf.sparse_tensor_dense_matmul";
19 | "Sparse Tensors" -> "sparse_tensor_to_dense" -> "tf.sparse_tensor_to_dense";
20 | "Sparse Tensors" -> "sparse_to_dense" -> "tf.sparse_to_dense";
21 | "Sparse Tensors" -> "sparse_to_indicator" -> "tf.sparse_to_indicator";
22 | "Sparse Tensors" -> "SparseTensor" -> "tf.SparseTensor";
23 | "Sparse Tensors" -> "SparseTensorValue" -> "tf.SparseTensorValue";
24 | }
25 |
--------------------------------------------------------------------------------
/src/.obs/dot_parser4tf_csv_file/dots/Statistical distributions (contrib).dot:
--------------------------------------------------------------------------------
1 | digraph "Statistical distributions (contrib)"{
2 | rankdir="LR"
3 | node [shape="box"]
4 | "Statistical distributions (contrib)" -> "BaseDistribution" -> "tf.contrib.distributions.BaseDistribution";
5 | "Statistical distributions (contrib)" -> "batch_matrix_diag_transform" -> "tf.contrib.distributions.batch_matrix_diag_transform";
6 | "Statistical distributions (contrib)" -> "Bernoulli" -> "tf.contrib.distributions.Bernoulli";
7 | "Statistical distributions (contrib)" -> "Beta" -> "tf.contrib.distributions.Beta";
8 | "Statistical distributions (contrib)" -> "Categorical" -> "tf.contrib.distributions.Categorical";
9 | "Statistical distributions (contrib)" -> "Chi2" -> "tf.contrib.distributions.Chi2";
10 | "Statistical distributions (contrib)" -> "Dirichlet" -> "tf.contrib.distributions.Dirichlet";
11 | "Statistical distributions (contrib)" -> "DirichletMultinomial" -> "tf.contrib.distributions.DirichletMultinomial";
12 | "Statistical distributions (contrib)" -> "Distribution" -> "tf.contrib.distributions.Distribution";
13 | "Statistical distributions (contrib)" -> "Exponential" -> "tf.contrib.distributions.Exponential";
14 | "Statistical distributions (contrib)" -> "Gamma" -> "tf.contrib.distributions.Gamma";
15 | "Statistical distributions (contrib)" -> "InverseGamma" -> "tf.contrib.distributions.InverseGamma";
16 | "Statistical distributions (contrib)" -> "kl" -> "tf.contrib.distributions.kl";
17 | "Statistical distributions (contrib)" -> "Laplace" -> "tf.contrib.distributions.Laplace";
18 | "Statistical distributions (contrib)" -> "MultivariateNormalCholesky" -> "tf.contrib.distributions.MultivariateNormalCholesky";
19 | "Statistical distributions (contrib)" -> "MultivariateNormalDiag" -> "tf.contrib.distributions.MultivariateNormalDiag";
20 | "Statistical distributions (contrib)" -> "MultivariateNormalFull" -> "tf.contrib.distributions.MultivariateNormalFull";
21 | "Statistical distributions (contrib)" -> "Normal" -> "tf.contrib.distributions.Normal";
22 | "Statistical distributions (contrib)" -> "normal_congugates_known_sigma_predictive" -> "tf.contrib.distributions.normal_congugates_known_sigma_predictive";
23 | "Statistical distributions (contrib)" -> "normal_conjugates_known_sigma_posterior" -> "tf.contrib.distributions.normal_conjugates_known_sigma_posterior";
24 | "Statistical distributions (contrib)" -> "RegisterKL" -> "tf.contrib.distributions.RegisterKL";
25 | "Statistical distributions (contrib)" -> "StudentT" -> "tf.contrib.distributions.StudentT";
26 | "Statistical distributions (contrib)" -> "TransformedDistribution" -> "tf.contrib.distributions.TransformedDistribution";
27 | "Statistical distributions (contrib)" -> "Uniform" -> "tf.contrib.distributions.Uniform";
28 | }
29 |
--------------------------------------------------------------------------------
/src/.obs/dot_parser4tf_csv_file/dots/Strings.dot:
--------------------------------------------------------------------------------
1 | digraph "Strings"{
2 | rankdir="LR"
3 | node [shape="box"]
4 | "Strings" -> "as_string" -> "tf.as_string";
5 | "Strings" -> "reduce_join" -> "tf.reduce_join";
6 | "Strings" -> "string_join" -> "tf.string_join";
7 | "Strings" -> "string_to_hash_bucket" -> "tf.string_to_hash_bucket";
8 | "Strings" -> "string_to_hash_bucket_fast" -> "tf.string_to_hash_bucket_fast";
9 | "Strings" -> "string_to_hash_bucket_strong" -> "tf.string_to_hash_bucket_strong";
10 | }
11 |
--------------------------------------------------------------------------------
/src/.obs/dot_parser4tf_csv_file/dots/Summary Operations.dot:
--------------------------------------------------------------------------------
1 | digraph "Summary Operations"{
2 | rankdir="LR"
3 | node [shape="box"]
4 | "Summary Operations" -> "tensor_summary" -> "tf.tensor_summary";
5 | }
6 |
--------------------------------------------------------------------------------
/src/.obs/dot_parser4tf_csv_file/dots/Tensor Handle Operations.dot:
--------------------------------------------------------------------------------
1 | digraph "Tensor Handle Operations"{
2 | rankdir="LR"
3 | node [shape="box"]
4 | "Tensor Handle Operations" -> "delete_session_tensor" -> "tf.delete_session_tensor";
5 | "Tensor Handle Operations" -> "get_session_handle" -> "tf.get_session_handle";
6 | "Tensor Handle Operations" -> "get_session_tensor" -> "tf.get_session_tensor";
7 | }
8 |
--------------------------------------------------------------------------------
/src/.obs/dot_parser4tf_csv_file/dots/Tensor Transformations.dot:
--------------------------------------------------------------------------------
1 | digraph "Tensor Transformations"{
2 | rankdir="LR"
3 | node [shape="box"]
4 | "Tensor Transformations" -> "batch_to_space" -> "tf.batch_to_space";
5 | "Tensor Transformations" -> "bitcast" -> "tf.bitcast";
6 | "Tensor Transformations" -> "boolean_mask" -> "tf.boolean_mask";
7 | "Tensor Transformations" -> "cast" -> "tf.cast";
8 | "Tensor Transformations" -> "concat" -> "tf.concat";
9 | "Tensor Transformations" -> "depth_to_space" -> "tf.depth_to_space";
10 | "Tensor Transformations" -> "dynamic_partition" -> "tf.dynamic_partition";
11 | "Tensor Transformations" -> "dynamic_stitch" -> "tf.dynamic_stitch";
12 | "Tensor Transformations" -> "expand_dims" -> "tf.expand_dims";
13 | "Tensor Transformations" -> "extract_image_patches" -> "tf.extract_image_patches";
14 | "Tensor Transformations" -> "gather" -> "tf.gather";
15 | "Tensor Transformations" -> "gather_nd" -> "tf.gather_nd";
16 | "Tensor Transformations" -> "meshgrid" -> "tf.meshgrid";
17 | "Tensor Transformations" -> "one_hot" -> "tf.one_hot";
18 | "Tensor Transformations" -> "pack" -> "tf.pack";
19 | "Tensor Transformations" -> "pad" -> "tf.pad";
20 | "Tensor Transformations" -> "rank" -> "tf.rank";
21 | "Tensor Transformations" -> "reshape" -> "tf.reshape";
22 | "Tensor Transformations" -> "reverse" -> "tf.reverse";
23 | "Tensor Transformations" -> "reverse_sequence" -> "tf.reverse_sequence";
24 | "Tensor Transformations" -> "saturate_cast" -> "tf.saturate_cast";
25 | "Tensor Transformations" -> "shape" -> "tf.shape";
26 | "Tensor Transformations" -> "shape_n" -> "tf.shape_n";
27 | "Tensor Transformations" -> "size" -> "tf.size";
28 | "Tensor Transformations" -> "slice" -> "tf.slice";
29 | "Tensor Transformations" -> "space_to_batch" -> "tf.space_to_batch";
30 | "Tensor Transformations" -> "space_to_depth" -> "tf.space_to_depth";
31 | "Tensor Transformations" -> "split" -> "tf.split";
32 | "Tensor Transformations" -> "squeeze" -> "tf.squeeze";
33 | "Tensor Transformations" -> "string_to_number" -> "tf.string_to_number";
34 | "Tensor Transformations" -> "tile" -> "tf.tile";
35 | "Tensor Transformations" -> "to_bfloat16" -> "tf.to_bfloat16";
36 | "Tensor Transformations" -> "to_double" -> "tf.to_double";
37 | "Tensor Transformations" -> "to_float" -> "tf.to_float";
38 | "Tensor Transformations" -> "to_int32" -> "tf.to_int32";
39 | "Tensor Transformations" -> "to_int64" -> "tf.to_int64";
40 | "Tensor Transformations" -> "transpose" -> "tf.transpose";
41 | "Tensor Transformations" -> "unique_with_counts" -> "tf.unique_with_counts";
42 | "Tensor Transformations" -> "unpack" -> "tf.unpack";
43 | }
44 |
--------------------------------------------------------------------------------
/src/.obs/dot_parser4tf_csv_file/dots/TensorArray Operations.dot:
--------------------------------------------------------------------------------
1 | digraph "TensorArray Operations"{
2 | rankdir="LR"
3 | node [shape="box"]
4 | "TensorArray Operations" -> "concat" -> "tf.concat";
5 | "TensorArray Operations" -> "pack" -> "tf.pack";
6 | "TensorArray Operations" -> "split" -> "tf.split";
7 | "TensorArray Operations" -> "TensorArray" -> "tf.TensorArray";
8 | "TensorArray Operations" -> "unpack" -> "tf.unpack";
9 | }
10 |
--------------------------------------------------------------------------------
/src/.obs/dot_parser4tf_csv_file/dots/Testing.dot:
--------------------------------------------------------------------------------
1 | digraph "Testing"{
2 | rankdir="LR"
3 | node [shape="box"]
4 | "Testing" -> "assert_equal_graph_def" -> "tf.assert_equal_graph_def";
5 | "Testing" -> "compute_gradient" -> "tf.compute_gradient";
6 | "Testing" -> "compute_gradient_error" -> "tf.compute_gradient_error";
7 | "Testing" -> "get_temp_dir" -> "tf.get_temp_dir";
8 | "Testing" -> "is_built_with_cuda" -> "tf.is_built_with_cuda";
9 | "Testing" -> "main" -> "tf.main";
10 | }
11 |
--------------------------------------------------------------------------------
/src/.obs/dot_parser4tf_csv_file/dots/Training.dot:
--------------------------------------------------------------------------------
1 | digraph "Training"{
2 | rankdir="LR"
3 | node [shape="box"]
4 | "Training" -> "AdadeltaOptimizer" -> "tf.AdadeltaOptimizer";
5 | "Training" -> "AdagradOptimizer" -> "tf.AdagradOptimizer";
6 | "Training" -> "AdamOptimizer" -> "tf.AdamOptimizer";
7 | "Training" -> "add_queue_runner" -> "tf.add_queue_runner";
8 | "Training" -> "AggregationMethod" -> "tf.AggregationMethod";
9 | "Training" -> "audio_summary" -> "tf.audio_summary";
10 | "Training" -> "clip_by_average_norm" -> "tf.clip_by_average_norm";
11 | "Training" -> "clip_by_global_norm" -> "tf.clip_by_global_norm";
12 | "Training" -> "clip_by_norm" -> "tf.clip_by_norm";
13 | "Training" -> "clip_by_value" -> "tf.clip_by_value";
14 | "Training" -> "ClusterSpec" -> "tf.ClusterSpec";
15 | "Training" -> "Coordinator" -> "tf.Coordinator";
16 | "Training" -> "do_quantize_training_on_graphdef" -> "tf.do_quantize_training_on_graphdef";
17 | "Training" -> "exponential_decay" -> "tf.exponential_decay";
18 | "Training" -> "ExponentialMovingAverage" -> "tf.ExponentialMovingAverage";
19 | "Training" -> "FtrlOptimizer" -> "tf.FtrlOptimizer";
20 | "Training" -> "generate_checkpoint_state_proto" -> "tf.generate_checkpoint_state_proto";
21 | "Training" -> "global_norm" -> "tf.global_norm";
22 | "Training" -> "global_step" -> "tf.global_step";
23 | "Training" -> "GradientDescentOptimizer" -> "tf.GradientDescentOptimizer";
24 | "Training" -> "gradients" -> "tf.gradients";
25 | "Training" -> "histogram_summary" -> "tf.histogram_summary";
26 | "Training" -> "image_summary" -> "tf.image_summary";
27 | "Training" -> "LooperThread" -> "tf.LooperThread";
28 | "Training" -> "merge_all_summaries" -> "tf.merge_all_summaries";
29 | "Training" -> "merge_summary" -> "tf.merge_summary";
30 | "Training" -> "MomentumOptimizer" -> "tf.MomentumOptimizer";
31 | "Training" -> "Optimizer" -> "tf.Optimizer";
32 | "Training" -> "QueueRunner" -> "tf.QueueRunner";
33 | "Training" -> "replica_device_setter" -> "tf.replica_device_setter";
34 | "Training" -> "RMSPropOptimizer" -> "tf.RMSPropOptimizer";
35 | "Training" -> "scalar_summary" -> "tf.scalar_summary";
36 | "Training" -> "Server" -> "tf.Server";
37 | "Training" -> "SessionManager" -> "tf.SessionManager";
38 | "Training" -> "start_queue_runners" -> "tf.start_queue_runners";
39 | "Training" -> "stop_gradient" -> "tf.stop_gradient";
40 | "Training" -> "summary_iterator" -> "tf.summary_iterator";
41 | "Training" -> "SummaryWriter" -> "tf.SummaryWriter";
42 | "Training" -> "Supervisor" -> "tf.Supervisor";
43 | "Training" -> "write_graph" -> "tf.write_graph";
44 | "Training" -> "zero_fraction" -> "tf.zero_fraction";
45 | }
46 |
--------------------------------------------------------------------------------
/src/.obs/dot_parser4tf_csv_file/dots/Utilities (contrib).dot:
--------------------------------------------------------------------------------
1 | digraph "Utilities (contrib)"{
2 | rankdir="LR"
3 | node [shape="box"]
4 | "Utilities (contrib)" -> "constant_value" -> "tf.contrib.util.constant_value";
5 | "Utilities (contrib)" -> "make_ndarray" -> "tf.contrib.util.make_ndarray";
6 | "Utilities (contrib)" -> "make_tensor_proto" -> "tf.contrib.util.make_tensor_proto";
7 | "Utilities (contrib)" -> "ops_used_by_graph_def" -> "tf.contrib.util.ops_used_by_graph_def";
8 | "Utilities (contrib)" -> "stripped_op_list_for_graph" -> "tf.contrib.util.stripped_op_list_for_graph";
9 | }
10 |
--------------------------------------------------------------------------------
/src/.obs/dot_parser4tf_csv_file/dots/Variables.dot:
--------------------------------------------------------------------------------
1 | digraph "Variables"{
2 | rankdir="LR"
3 | node [shape="box"]
4 | "Variables" -> "all_variables" -> "tf.all_variables";
5 | "Variables" -> "assert_variables_initialized" -> "tf.assert_variables_initialized";
6 | "Variables" -> "assign" -> "tf.assign";
7 | "Variables" -> "assign_add" -> "tf.assign_add";
8 | "Variables" -> "assign_sub" -> "tf.assign_sub";
9 | "Variables" -> "constant_initializer" -> "tf.constant_initializer";
10 | "Variables" -> "count_up_to" -> "tf.count_up_to";
11 | "Variables" -> "device" -> "tf.device";
12 | "Variables" -> "export_meta_graph" -> "tf.export_meta_graph";
13 | "Variables" -> "get_checkpoint_state" -> "tf.get_checkpoint_state";
14 | "Variables" -> "get_variable" -> "tf.get_variable";
15 | "Variables" -> "get_variable_scope" -> "tf.get_variable_scope";
16 | "Variables" -> "import_meta_graph" -> "tf.import_meta_graph";
17 | "Variables" -> "IndexedSlices" -> "tf.IndexedSlices";
18 | "Variables" -> "initialize_all_variables" -> "tf.initialize_all_variables";
19 | "Variables" -> "initialize_local_variables" -> "tf.initialize_local_variables";
20 | "Variables" -> "initialize_variables" -> "tf.initialize_variables";
21 | "Variables" -> "is_variable_initialized" -> "tf.is_variable_initialized";
22 | "Variables" -> "latest_checkpoint" -> "tf.latest_checkpoint";
23 | "Variables" -> "local_variables" -> "tf.local_variables";
24 | "Variables" -> "make_template" -> "tf.make_template";
25 | "Variables" -> "min_max_variable_partitioner" -> "tf.min_max_variable_partitioner";
26 | "Variables" -> "moving_average_variables" -> "tf.moving_average_variables";
27 | "Variables" -> "no_regularizer" -> "tf.no_regularizer";
28 | "Variables" -> "ones_initializer" -> "tf.ones_initializer";
29 | "Variables" -> "random_normal_initializer" -> "tf.random_normal_initializer";
30 | "Variables" -> "random_uniform_initializer" -> "tf.random_uniform_initializer";
31 | "Variables" -> "report_uninitialized_variables" -> "tf.report_uninitialized_variables";
32 | "Variables" -> "Saver" -> "tf.Saver";
33 | "Variables" -> "scatter_add" -> "tf.scatter_add";
34 | "Variables" -> "scatter_sub" -> "tf.scatter_sub";
35 | "Variables" -> "scatter_update" -> "tf.scatter_update";
36 | "Variables" -> "sparse_mask" -> "tf.sparse_mask";
37 | "Variables" -> "trainable_variables" -> "tf.trainable_variables";
38 | "Variables" -> "truncated_normal_initializer" -> "tf.truncated_normal_initializer";
39 | "Variables" -> "uniform_unit_scaling_initializer" -> "tf.uniform_unit_scaling_initializer";
40 | "Variables" -> "update_checkpoint_state" -> "tf.update_checkpoint_state";
41 | "Variables" -> "Variable" -> "tf.Variable";
42 | "Variables" -> "variable_axis_size_partitioner" -> "tf.variable_axis_size_partitioner";
43 | "Variables" -> "variable_op_scope" -> "tf.variable_op_scope";
44 | "Variables" -> "variable_scope" -> "tf.variable_scope";
45 | "Variables" -> "VariableScope" -> "tf.VariableScope";
46 | "Variables" -> "zeros_initializer" -> "tf.zeros_initializer";
47 | }
48 |
--------------------------------------------------------------------------------
/src/.obs/dot_parser4tf_csv_file/dots/Wraps python functions.dot:
--------------------------------------------------------------------------------
1 | digraph "Wraps python functions"{
2 | rankdir="LR"
3 | node [shape="box"]
4 | "Wraps python functions" -> "py_func" -> "tf.py_func";
5 | }
6 |
--------------------------------------------------------------------------------
/src/.obs/dot_parser4tf_csv_file/tf.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/the0demiurge/Python-Scripts/02bedbcd7909b0b25a79ba086cdb0b3a87e1652a/src/.obs/dot_parser4tf_csv_file/tf.pdf
--------------------------------------------------------------------------------
/src/ComputerScience/Calculator_AST.py:
--------------------------------------------------------------------------------
1 | """Calculator, worte in AST in python
2 | """
3 | # This could be implemented as string-based big-number calcaulation
4 |
5 |
6 | def ADD(x, y):
7 | return str(eval(x) + eval(y))
8 |
9 |
10 | def SUB(x, y):
11 | return str(eval(x) - eval(y))
12 |
13 |
14 | def POW(x, y):
15 | return str(eval(x) ** eval(y))
16 |
17 |
18 | def MUL(x, y):
19 | return str(eval(x) * eval(y))
20 |
21 |
22 | def DIV(x, y):
23 | return str(eval(x) / eval(y))
24 |
25 |
26 | operations = {
27 | # 'op': [priority, function]
28 | '+': [1, ADD],
29 | '-': [1, SUB],
30 | '*': [2, MUL],
31 | '/': [2, DIV],
32 | '^': [3, POW]
33 | }
34 |
35 |
36 | class TreeNode(object):
37 | __slots__ = ('val', 'left', 'right')
38 |
39 | def __init__(self, val=None, left=None, right=None):
40 | self.val, self.left, self.right = val, left, right
41 |
42 | def __repr__(self):
43 | left, right = ' ', ' '
44 | if self.left is not None:
45 | left = repr(self.left) + ' <- '
46 | if self.right is not None:
47 | right = ' -> ' + repr(self.right)
48 | return '(' + left + repr(self.val) + right + ')'
49 |
50 |
51 | def split_atomic(string):
52 | """
53 | @brief Splits expression to atoms.
54 |
55 | @param string The string expression
56 |
57 | @return list[tuple("TYPE", 'value')]
58 | """
59 | # removing useless parens
60 | while string.startswith('(') and string.endswith(')'):
61 | string = string[1:-1]
62 | result = list()
63 | if not string:
64 | return result
65 | head, tail = 0, 0 # head and tail are used for slice number out
66 |
67 | numeric = False # Stores previous scanned atom type. If numeric changes, the successor atom type will change
68 | while tail < len(string):
69 | # if '+' '-' are operators
70 | if string[tail] in {'+', '-'} and numeric:
71 | numeric = False
72 | result.append(('SYMB', string[tail]))
73 | # elif numbers or '+' '-' are numbers' symbol
74 | elif (string[tail].isdecimal() or string[tail] is '.') or (string[tail] in {'+', '-'} and not numeric):
75 | numeric = True
76 | head = tail
77 | tail += 1
78 | while tail < len(string) and (string[tail].isdecimal() or string[tail] is '.'):
79 | tail += 1
80 | num = string[head:tail]
81 | if num in {'+', '-'}:
82 | num += '1'
83 | result.append(('NUM', num))
84 | tail -= 1
85 | # elif operators or parentheses
86 | elif string[tail] in {'*', '/', '^', '(', ')'}:
87 | symb = 'PAREN' if string[tail] in {'(', ')'} else 'SYMB'
88 | if numeric and string[tail] is '(':
89 | result.append(('SYMB', '*'))
90 | numeric = True if string[tail] is ')' else False
91 | result.append((symb, string[tail]))
92 | tail += 1
93 | return result
94 |
95 |
96 | def ast(atoms):
97 | """
98 | @brief Receives atoms and return an Abstract Syntax Tree
99 | for a calculator
100 |
101 | @param atoms The atoms splitted by split_atomic
102 |
103 | @return AST Tree Node
104 | """
105 | if len(atoms) is 0:
106 | return
107 | # Remove useless parens
108 | if atoms[0][1] is '(' and atoms[-1][1] is ')':
109 | atoms = atoms[1:-1]
110 |
111 | parens = {
112 | '(': 1,
113 | ')': -1
114 | }
115 | paren_amounts = 0
116 | min_prior = 10
117 | min_prior_index = None
118 |
119 | # find the operator out of parentheses with smallest priority
120 | for i, data in enumerate(atoms):
121 | TYPE, TOKEN = data
122 | # judge weather in parentheses or not
123 | if TYPE is 'PAREN':
124 | paren_amounts += parens[TOKEN]
125 |
126 | elif TYPE is 'SYMB' and paren_amounts is 0:
127 | if operations[TOKEN][0] < min_prior:
128 | min_prior_index, min_prior = i, operations[TOKEN][0]
129 |
130 | if min_prior_index is None:
131 | return TreeNode(atoms[0][1])
132 |
133 | left = ast(atoms[:min_prior_index])
134 | right = ast(atoms[min_prior_index + 1:])
135 | return TreeNode(atoms[min_prior_index][1], left, right)
136 |
137 |
138 | def solve(ast_root, DEBUG=False):
139 | """Traverse the AST and do calculation
140 | """
141 | if ast_root is None:
142 | raise ValueError('AST cannot be None')
143 | if ast_root.left is ast_root.right is None:
144 | return ast_root.val
145 | else:
146 | if DEBUG:
147 | left = solve(ast_root.left, DEBUG)
148 | right = solve(ast_root.right, DEBUG)
149 | result = operations[ast_root.val][1](left, right)
150 | print('Calculating:', ast_root.val, ast_root.left, '|', ast_root.right, '=>', left, ast_root.val, right, '=', result)
151 | return result
152 | else:
153 | return operations[ast_root.val][1](solve(ast_root.left), solve(ast_root.right))
154 |
155 |
156 | def calculator(string): return solve(ast(split_atomic(string)))
157 |
158 |
159 | def main():
160 | DEBUG = False
161 | prompt = ['Expression > ', 'DEBUG > ']
162 | try:
163 | while 1:
164 | string = input(prompt[DEBUG])
165 | if string.upper().strip() == 'DEBUG':
166 | DEBUG = not DEBUG
167 | continue
168 |
169 | if DEBUG:
170 | print('Atoms :', list(zip(*split_atomic(string)))[1])
171 | print('Tree :', repr(ast(split_atomic(string))))
172 | # print('PyResult :', eval(string.replace('^', '**')))
173 | print('MyResult :', solve(ast(split_atomic(string)), DEBUG))
174 | else:
175 | print('Result:', calculator(string))
176 | except (EOFError, KeyboardInterrupt):
177 | exit()
178 |
179 |
180 | if __name__ == '__main__':
181 | main()
182 |
--------------------------------------------------------------------------------
/src/ComputerScience/FullBinaryHeap.py:
--------------------------------------------------------------------------------
1 | from dsr import BinaryTreeNode
2 |
3 |
4 | class FullBinaryTreeHeap(object):
5 | def __init__(self, data=None, is_largest=True):
6 | self.__is_largest = is_largest
7 | if is_largest:
8 | self.__judgement = lambda x, y: x < y
9 | else:
10 | self.__judgement = lambda x, y: x > y
11 | if data is None:
12 | self.__heap = list()
13 | else:
14 | self.__heap = [i for i in data]
15 | self.heapify()
16 |
17 | def shift_down(self, i):
18 | child = 2 * i + 1
19 | T = self.__heap[i]
20 | while child < len(self.__heap):
21 | if child + 1 < len(self.__heap) and self.__judgement(
22 | self.__heap[child],
23 | self.__heap[child + 1]):
24 | child += 1
25 | if self.__judgement(T, self.__heap[child]):
26 | self.__heap[i] = self.__heap[child]
27 | i = child
28 | child = 2 * i + 1
29 | else:
30 | break
31 | self.__heap[i] = T
32 |
33 | def shift_up(self, i):
34 | parent = (i + 1) // 2 - 1
35 | T = self.__heap[i]
36 | while parent >= 0:
37 | if self.__judgement(self.__heap[parent], T):
38 | self.__heap[i] = self.__heap[parent]
39 | i = parent
40 | parent = (i + 1) // 2 - 1
41 | else:
42 | break
43 | self.__heap[i] = T
44 |
45 | def push(self, data):
46 | self.__heap.append(data)
47 | self.shift_up(len(self.__heap) - 1)
48 |
49 | def pop(self):
50 | if len(self.__heap) == 0:
51 | raise IndexError('No enough data to pop')
52 | ret = self.__heap[0]
53 | self.__heap[0] = self.__heap[-1]
54 | self.__heap.pop()
55 | if len(self.__heap) > 0:
56 | self.shift_down(0)
57 | return ret
58 |
59 | def replace(self, i, data):
60 | self.__heap[i] = data
61 | self.shift_up(i)
62 |
63 | def heapify(self):
64 | for i in range(len(self.__heap) // 2 - 1, -1, -1):
65 | self.shift_down(i)
66 |
67 | @property
68 | def data(self):
69 | return self.__heap.copy()
70 |
71 | @property
72 | def tree(self):
73 | return self.__tree()
74 |
75 | def __tree(self, i=None):
76 | if not self.__heap:
77 | return BinaryTreeNode()
78 | if i is None:
79 | i = 0
80 | root = BinaryTreeNode(self.__heap[i])
81 | if 2 * i + 1 < len(self.__heap):
82 | root.left = self.__tree(2 * i + 1)
83 | if 2 * i + 2 < len(self.__heap):
84 | root.right = self.__tree(2 * i + 2)
85 | return root
86 |
87 | def copy(self):
88 | return FullBinaryTreeHeap(self.__heap, self.__is_largest)
89 |
90 | def __repr__(self):
91 | return repr(self.tree)
92 |
93 | def __len__(self):
94 | return self.__heap.__len__()
95 |
96 | def __iter__(self):
97 | iterator = self.copy()
98 | while len(iterator) > 0:
99 | yield iterator.pop()
100 |
--------------------------------------------------------------------------------
/src/ComputerScience/dsr.py:
--------------------------------------------------------------------------------
1 | from reprlib import recursive_repr
2 |
3 |
4 | class BinaryTreeNode(object):
5 |
6 | def __init__(self, value=None, left=None, right=None):
7 | self.left, self.right = left, right
8 | self.value = value
9 |
10 | @recursive_repr(fillvalue='...')
11 | def __repr__(self):
12 | if self.left is self.right is None:
13 | return '(' + repr(self.value) + ')'
14 |
15 | result = list()
16 | repr_left, repr_right = map(
17 | lambda x: repr(x).split('\n') if x is not None else list(),
18 | (self.left, self.right))
19 | repr_self = '(' + repr(self.value) + ')'
20 |
21 | len_left, len_right = map(
22 | lambda x: len(x[0]) if len(x) > 0 else 0,
23 | (repr_left, repr_right))
24 |
25 | height_left, height_right, len_self = map(
26 | len,
27 | (repr_left, repr_right, repr_self))
28 |
29 | diff_height = height_left - height_right
30 | left_bar = ' ' if self.left is None else '/'
31 | right_bar = ' ' if self.right is None else '\\'
32 |
33 | if diff_height > 0:
34 | repr_right.extend([' ' * len_right] * diff_height)
35 | elif diff_height < 0:
36 | repr_left.extend([' ' * len_left] * (-diff_height))
37 |
38 | result.append(' ' * (len_left + 1) + repr_self + ' ' * (len_right + 1))
39 | result.append(' ' * (len_left) + left_bar + ' ' * len_self + right_bar + ' ' * len_right)
40 | result.extend(map(
41 | lambda x: (' ' * (len_self + 2)).join(x),
42 | zip(repr_left, repr_right)))
43 |
44 | return '\n'.join(result)
45 |
46 |
47 | class TreeNode(object):
48 | __slots__ = ("value", "children")
49 |
50 | def __init__(self, value=None, children=None):
51 | self.value = value
52 | self.children = list() if children is None else children
53 |
54 | @recursive_repr(fillvalue='...')
55 | def __repr__(self):
56 | root = '(' + repr(self.value) + ')'
57 | if not self.children:
58 | return root
59 | result = [root]
60 | for child in self.children[:-1]:
61 | result.append(self.__build_child_repr(child, end=False))
62 | result.append(self.__build_child_repr(self.children[-1], end=True))
63 | return '\n'.join(result)
64 |
65 | @staticmethod
66 | def __build_child_repr(child, end=False):
67 | child_repr = repr(child).split('\n')
68 | fork, space = (' └── ', ' ') if end else (' ├── ', ' │ ')
69 | return '\n'.join([fork + child_repr[0]] + list(map(lambda x: space + x, child_repr[1:])))
70 |
71 |
72 | def main():
73 | print('BinaryTreeNode:')
74 | a = BinaryTreeNode(100)
75 | b = BinaryTreeNode(2)
76 | c = BinaryTreeNode(0, a, b)
77 | d = BinaryTreeNode('a', c, c)
78 | a.right = d
79 | a.left = d
80 |
81 | print(d)
82 | print('TreeNode:')
83 | root = TreeNode('tree', [
84 | TreeNode('types', [TreeNode(str), TreeNode(int)]),
85 | TreeNode('values', [TreeNode(1), TreeNode(3.1415926), TreeNode(True)]),
86 | TreeNode('empty'),
87 | 2.718281828,
88 | 'Not TreeNode'
89 | ])
90 | print(root)
91 |
92 |
93 | if __name__ == '__main__':
94 | main()
95 |
--------------------------------------------------------------------------------
/src/Controller/cartpole_pid.py:
--------------------------------------------------------------------------------
1 | import gym
2 | import pid
3 | import numpy as np
4 | import multiprocessing
5 | from matplotlib import pyplot
6 |
7 |
8 | def run(Kp=5, Ki=.1, Kd=.1, rho=0.5, n_iter=900, close=True, params=[0, -1, -1, -1]):
9 | # This runs one iterate with given parameters
10 | # INIT
11 | controller = pid.PID(Kp=Kp, Ki=Ki, Kd=Kd)
12 | us = list()
13 | es = list()
14 | env = gym.make('CartPole-v0')
15 | env.reset()
16 | env.render(close=close)
17 |
18 | def decide_step(u):
19 | # Because there are only 2 steps, simply discretization the step.
20 | if u > 0:
21 | return 1
22 | elif u < 0:
23 | return 0
24 | else:
25 | return np.random.randint(0, 2)
26 |
27 | # There are only 2 steps, `0, 1`. Choose first step randomly
28 | step = decide_step(0)
29 |
30 | for i in range(n_iter):
31 | # Add interferce at timestamp 20
32 | if i == 20:
33 | for i in range(4):
34 | env.step(0)
35 | if i == 60:
36 | for i in range(4):
37 | env.step(1)
38 | state = env.step(step)
39 | # The giver amount is all-zero, consequently the deviation is -(sum(...))
40 | e = - (sum(map(lambda x: x[0] * x[1], zip(params, state[0]))))
41 | # Get the control amount
42 | u = controller.step(e)
43 | step = decide_step(u)
44 | us.append(u)
45 | es.append(e)
46 | env.render(close=close)
47 |
48 | loss = sum(map(lambda x: x ** 2, us)) + rho * sum(map(lambda x: x ** 2, es))
49 | return loss, us, es
50 |
51 |
52 | def cem(func, mean, variance, n=100, ratio=.2, max_iter=1000, min_var=0.001):
53 | """Cross-Entropy Method Optimizer, using Gaussian distribution approximation. Minimize loss returned from func
54 | Args:
55 | func receives parameters and returns loss only
56 | mean and variance: mean and variance of the init gaussian
57 | n: size of tries per iterate
58 | ratio: re-sampling referance ratio
59 | max_iter, min_var: stop condition
60 |
61 | Returns:
62 | param: best parameters
63 | mean: mean of parameters
64 | variance: variance of parameters
65 | traj: min-loss curve durning optimizing
66 | """
67 |
68 | # Check input
69 | n_ref = int(n * ratio)
70 | n_params = len(tuple(zip(mean, variance)))
71 | traj = list()
72 | assert n > 0, 'n not > 0'
73 | assert 0 < n_ref < n, 'n_ref out-bounded'
74 | assert min_var > 0, 'min_var not > 0'
75 | assert min(variance) > 0, 'variance not > 0'
76 |
77 | pool = multiprocessing.Pool()
78 | for _ in range(max_iter):
79 | params = np.random.randn(n, n_params)
80 | for i, p in enumerate(zip(mean, variance)):
81 | params[:, i] = params[:, i] * np.sqrt(p[1]) + p[0]
82 | loss = pool.map(func, params)
83 | sorted_params, sorted_loss = zip(*sorted(zip(params, loss), key=lambda x: x[-1]))
84 | traj.append(sorted_loss[0])
85 | params_ref = np.array(sorted_params[:n_ref])
86 | mean = np.mean(params_ref, axis=0)
87 | variance = np.var(params_ref, axis=0)
88 | if min(variance) < min_var:
89 | break
90 | return list(params_ref[0]), mean, variance, traj
91 |
92 |
93 | def run_(x, rho=.5, close=True):
94 | # This function is designed for multi-subprocess
95 | return run(x[0], x[1], x[2], close=close, rho=rho)[0]
96 |
97 |
98 | def run0(x):
99 | return run_(x, rho=0)
100 |
101 |
102 | def run_25(x):
103 | return run_(x, rho=.25)
104 |
105 |
106 | def run_5(x):
107 | return run_(x, rho=.5)
108 |
109 |
110 | def run_75(x):
111 | return run_(x, rho=.75)
112 |
113 |
114 | def run1(x):
115 | return run_(x, rho=1)
116 |
117 |
118 | def main():
119 | data = dict()
120 | for func, rho in zip([run0, run_25, run_5, run_75, run1], [0, .25, 0.5, .75, 1]):
121 | best, mean, variance, traj = cem(func, [10, 0, 0], [60, 40, 40])
122 | loss, us, es = run(mean[0], mean[1], mean[2], rho, n_iter=500, close=False)
123 | data[rho] = {
124 | 'traj': traj,
125 | 'us': us,
126 | 'es': es,
127 | 'best': best,
128 | 'mean': mean,
129 | 'variance': variance,
130 | }
131 |
132 | # Print information
133 | for rho in data:
134 | print('''rho = {}
135 | Best params: {}
136 | Mean of params: {}
137 | Var of params: {}
138 | Var of u: {}
139 | Mean of u: {}
140 | Var of e: {}
141 | Mean of e: {}
142 | \n'''.format(
143 | rho,
144 | data[rho]['best'],
145 | data[rho]['mean'],
146 | data[rho]['variance'],
147 | np.var(data[rho]['us']),
148 | np.mean(data[rho]['us']),
149 | np.var(data[rho]['es']),
150 | np.mean(data[rho]['es']),
151 | ))
152 |
153 | # Plot optimizion trajectory
154 | pyplot.figure()
155 | for rho in data:
156 | pyplot.plot(data[rho]['traj'], '*-', label='$\\rho={}$'.format(rho))
157 | pyplot.xlabel('Optimizing Iteration')
158 | pyplot.ylabel('Min Loss')
159 | pyplot.legend()
160 |
161 | # Plot us
162 | pyplot.figure()
163 | for rho in data:
164 | plot_data = data[rho]['us']
165 | # moving_average = list(map(lambda x: x[-1] / 2 + x[-2] / 4 + (x[-3] + x[-4]) / 8, zip(plot_data, plot_data[1:], plot_data[2:], plot_data[3:])))
166 | pyplot.plot(plot_data[:100], label='$\\rho={}$'.format(rho))
167 | pyplot.xlabel('$t$')
168 | pyplot.ylabel('Input amount')
169 | pyplot.legend()
170 |
171 | # Plot es
172 | pyplot.figure()
173 | for rho in data:
174 | plot_data = data[rho]['es']
175 | # moving_average = list(map(lambda x: x[-1] / 2 + x[-2] / 4 + (x[-3] + x[-4]) / 8, zip(plot_data, plot_data[1:], plot_data[2:], plot_data[3:])))
176 | pyplot.plot(plot_data[:100], label='$\\rho={}$'.format(rho))
177 | pyplot.xlabel('$t$')
178 | pyplot.ylabel('Deviation')
179 | pyplot.legend()
180 | pyplot.show()
181 |
182 |
183 | if __name__ == '__main__':
184 | main()
185 |
--------------------------------------------------------------------------------
/src/Controller/pid.py:
--------------------------------------------------------------------------------
1 | class PID(object):
2 |
3 | def __init__(self, Kp=1, Ki=.1, Kd=0, T_sample=1):
4 | self.Kp = Kp
5 | self.Ki = Ki
6 | self.Kd = Kd
7 | self.T_sample = T_sample
8 |
9 | self.u_prev = 0
10 | self.e_prev = 0
11 | self.e_prev_prev = 0
12 |
13 | def step(self, e):
14 | p = self.Kp * (e - self.e_prev)
15 | i = self.Ki * self.T_sample * e
16 | d = self.Kd * (e - 2 * self.e_prev + self.e_prev_prev)
17 | u = (self.u_prev + p + i + d)
18 | self.u_prev, self.e_prev, self.e_prev_prev = u, e, self.e_prev
19 | return u
20 |
--------------------------------------------------------------------------------
/src/MachineLearning/Deep_Learning/dqn_cartpole.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import random
3 | import gym
4 | import tensorflow as tf
5 | from collections import deque
6 |
7 |
8 | ENV_NAME = 'CartPole-v0'
9 | EPISODE = 10000 # Episode limitation
10 | STEP = 300 # Step limitation in an episode
11 |
12 |
13 | class DQN(object):
14 |
15 | """A DQN testing and learning class"""
16 |
17 | def __init__(self, env, mem=1000, phimem=4, skip_frame=0):
18 | self.env = env
19 | self.mem = mem
20 | self.phimem = phimem
21 | self.phi_state = deque()
22 | self.skip_frame = skip_frame
23 | self.state = self.env.reset()
24 | self.sess = tf.InteractiveSession()
25 | self.experience_pool = deque()
26 | self.n_state = self.env.observation_space.shape[0] * self.phimem
27 | self.n_act = self.env.action_space.n
28 | self._create_net(40)
29 |
30 | def _create_net(self, hidden_size=20):
31 | # Creating Q net
32 | self._xq = tf.placeholder(tf.float32, shape=[None, self.n_state], name='x')
33 | self._y_q = tf.placeholder(tf.float32, shape=[None, self.n_act])
34 | self._weightsq = [
35 | tf.Variable(tf.random_normal([self.n_state, hidden_size], stddev=0.1)),
36 | tf.Variable(tf.random_normal([hidden_size, self.n_act], stddev=0.1))]
37 | self._biasesq = [
38 | tf.Variable(tf.constant(0.1, shape=[hidden_size])),
39 | tf.Variable(tf.constant(0.1, shape=[self.n_act]))]
40 |
41 | self.lq = tf.nn.relu(tf.add(tf.matmul(self._xq, self._weightsq[0]), self._biasesq[0]))
42 | self.q = tf.add(tf.matmul(self.lq, self._weightsq[1]), self._biasesq[1])
43 |
44 | # Creating Q* net
45 | self._x = tf.placeholder(tf.float32, shape=[None, self.n_state], name='x')
46 | self._y_q_ = tf.placeholder(tf.float32, shape=[None, self.n_act])
47 | self._weightsq_ = [
48 | tf.Variable(tf.random_normal([self.n_state, hidden_size], stddev=0.1)),
49 | tf.Variable(tf.random_normal([hidden_size, self.n_act], stddev=0.1))]
50 | self._biasesq_ = [
51 | tf.Variable(tf.constant(0.1, shape=[hidden_size])),
52 | tf.Variable(tf.constant(0.1, shape=[self.n_act]))]
53 |
54 | self.lq_ = tf.nn.relu(tf.add(tf.matmul(self._x, self._weightsq_[0]), self._biasesq_[0]))
55 | self.q_ = tf.add(tf.matmul(self.lq_, self._weightsq_[1]), self._biasesq_[1])
56 |
57 | #Loss
58 | self.y = tf.placeholder(tf.float32, [None, self.n_act])
59 | self.loss = tf.square(self.y - self.q)
60 | self.optimizer = tf.train.RMSPropOptimizer(0.01,
61 | momentum=0.1,
62 | use_locking=True,
63 | centered=True).minimize(self.loss)
64 |
65 | # Initialize
66 | tf.global_variables_initializer().run()
67 | self.sync_theta()
68 |
69 | def sync_theta(self):
70 | for i in range(2):
71 | self._biasesq_[i] = self._biasesq[i]
72 | self._weightsq_[i] = self._weightsq[i]
73 |
74 | def phi(self, state=None, reset=False):
75 | if reset:
76 | self.phi_state = deque()
77 | return
78 | if not state:
79 | state = list(self.state)
80 | if np.shape(self.phi_state)[0] < self.phimem:
81 | self.phi_state = deque([state] * (self.phimem - np.shape(self.phi_state)[0]))
82 | else:
83 | self.phi_state.appendleft(state)
84 | self.phi_state.pop()
85 | ret = []
86 | for i in self.phi_state:
87 | ret.extend(i)
88 | return np.mat(ret)
89 |
90 | def play(self, epsilon=0.05):
91 | if np.random.rand() <= epsilon:
92 | act = np.random.randint(0, self.n_act)
93 | else:
94 | state = self.phi()
95 | result = list(self.q.eval(feed_dict={self._xq: state}))
96 | act = result.index(max(result))
97 | return self.env.step(act), act
98 |
99 | def train(self, gamma=0.9, EPISODE=5000, STEP=5000, minibatch=32, C=10):
100 | c = 0
101 | epsilon = 1
102 | for episode in range(EPISODE):
103 | self.state = self.env.reset()
104 | self.phi(reset=True)
105 | state = self.phi()
106 | total_reward = 0
107 | total_state = []
108 |
109 | for t in range(STEP):
110 | (self.state, reward, done, _), act = self.play(epsilon)
111 | #reward = 0 if done else 0.1
112 | total_reward += reward
113 |
114 | state1 = self.phi()
115 | total_state.append([state, reward, act, state1])
116 | print(act, end=' ')
117 | self.env.render()
118 | if len(self.experience_pool) > self.mem:
119 | epsilon = epsilon - 0.001 if epsilon > 0.95 else 0.05
120 | while len(self.experience_pool) > self.mem:
121 | self.experience_pool.pop()
122 | state = state1
123 |
124 | if done:
125 | for index, content in enumerate(total_state):
126 | total_state[index][1] = total_reward
127 | total_state[-1][1] = 0
128 | self.experience_pool.extendleft(total_state)
129 | print(epsilon)
130 | for i in range(200):
131 | #Replay the experience
132 | if len(self.experience_pool) >= self.mem:
133 | training_set = random.sample(self.experience_pool, minibatch)
134 | phi0 = np.vstack(i[0] for i in training_set)
135 | phi1 = np.vstack(i[-1] for i in training_set)
136 | r = np.mat([i[1] for i in training_set]).T
137 | y = r + gamma * self.q_.eval(feed_dict={self._x: phi1})
138 | self.optimizer.run(feed_dict={self._xq: phi0, self.y: y})
139 | c += 1
140 | if c > C:
141 | self.sync_theta()
142 | break
143 |
144 | def main():
145 | env = gym.make(ENV_NAME)
146 | agent = DQN(env)
147 | print(agent._xq.get_shape())
148 | agent.train()
149 | print(len(agent.phi_state))
150 | print(len(agent.phi()))
151 | for i in range(1000):
152 | agent.play(epsilon=0)
153 | return agent
154 |
155 |
156 | if __name__ == '__main__':
157 | agent = main()
158 |
--------------------------------------------------------------------------------
/src/MachineLearning/Deep_Learning/pg_cartpole.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Policy Gradient for CartPole-v0"
8 | ]
9 | },
10 | {
11 | "cell_type": "code",
12 | "execution_count": 1,
13 | "metadata": {
14 | "collapsed": true
15 | },
16 | "outputs": [],
17 | "source": [
18 | "from pylab import *\n",
19 | "import gym"
20 | ]
21 | },
22 | {
23 | "cell_type": "code",
24 | "execution_count": null,
25 | "metadata": {
26 | "collapsed": true
27 | },
28 | "outputs": [],
29 | "source": [
30 | "ENV_NAME = 'CartPole-v0'\n",
31 | "EPISODE = 10000 # Episode limitation\n",
32 | "STEP = 300 # Step limitation in an episode"
33 | ]
34 | },
35 | {
36 | "cell_type": "code",
37 | "execution_count": null,
38 | "metadata": {
39 | "collapsed": true
40 | },
41 | "outputs": [],
42 | "source": [
43 | "env = gym.make(ENVNAME)\n",
44 | "env.reset()"
45 | ]
46 | },
47 | {
48 | "cell_type": "code",
49 | "execution_count": null,
50 | "metadata": {
51 | "collapsed": true
52 | },
53 | "outputs": [],
54 | "source": []
55 | },
56 | {
57 | "cell_type": "code",
58 | "execution_count": null,
59 | "metadata": {
60 | "collapsed": true
61 | },
62 | "outputs": [],
63 | "source": []
64 | },
65 | {
66 | "cell_type": "code",
67 | "execution_count": null,
68 | "metadata": {
69 | "collapsed": true
70 | },
71 | "outputs": [],
72 | "source": []
73 | },
74 | {
75 | "cell_type": "code",
76 | "execution_count": null,
77 | "metadata": {
78 | "collapsed": true
79 | },
80 | "outputs": [],
81 | "source": []
82 | },
83 | {
84 | "cell_type": "code",
85 | "execution_count": null,
86 | "metadata": {
87 | "collapsed": true
88 | },
89 | "outputs": [],
90 | "source": []
91 | },
92 | {
93 | "cell_type": "code",
94 | "execution_count": null,
95 | "metadata": {
96 | "collapsed": true
97 | },
98 | "outputs": [],
99 | "source": []
100 | },
101 | {
102 | "cell_type": "code",
103 | "execution_count": null,
104 | "metadata": {
105 | "collapsed": true
106 | },
107 | "outputs": [],
108 | "source": []
109 | },
110 | {
111 | "cell_type": "code",
112 | "execution_count": null,
113 | "metadata": {
114 | "collapsed": true
115 | },
116 | "outputs": [],
117 | "source": []
118 | }
119 | ],
120 | "metadata": {
121 | "kernelspec": {
122 | "display_name": "Python [conda root]",
123 | "language": "python",
124 | "name": "conda-root-py"
125 | },
126 | "language_info": {
127 | "codemirror_mode": {
128 | "name": "ipython",
129 | "version": 3
130 | },
131 | "file_extension": ".py",
132 | "mimetype": "text/x-python",
133 | "name": "python",
134 | "nbconvert_exporter": "python",
135 | "pygments_lexer": "ipython3",
136 | "version": "3.5.3"
137 | }
138 | },
139 | "nbformat": 4,
140 | "nbformat_minor": 2
141 | }
142 |
--------------------------------------------------------------------------------
/src/MachineLearning/TensorFlow/Batcher.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | import pandas as pd
4 | import numpy as np
5 | from sklearn.model_selection import train_test_split
6 | from sklearn.utils import shuffle
7 |
8 | __author__ = 'the0demiurge'
9 |
10 |
11 | class Batcher(object):
12 |
13 | '''create a batcher with the same api as tensorflow
14 | usage:
15 | data = Batcher(X, Y)
16 | batch_xs, batch_ys = data.next_batch(100)
17 | '''
18 | _batch_position = 0
19 |
20 | def __init__(
21 | self,
22 | X,
23 | Y,
24 | train_size=None,
25 | test_size=None,
26 | random_state=None,
27 | to_shuffle=True):
28 | '''
29 | Args:
30 | X, Y: either array or ndarray or pandas.core.DataFrame, training
31 | inputs and targets
32 | train_size : float, int, or None (default is None)
33 | If float, should be between 0.0 and 1.0 and represent the
34 | proportion of the dataset to include in the train split. If
35 | int, represents the absolute number of train samples. If None,
36 | the value is automatically set to the complement of the test size.
37 | test_size : float, int, or None (default is None)
38 | If float, should be between 0.0 and 1.0 and represent the
39 | proportion of the dataset to include in the test split. If
40 | int, represents the absolute number of test samples. If None,
41 | the value is automatically set to the complement of the train size.
42 | If train size is also None, test size is set to 0.25.
43 | random_state : int or RandomState
44 | Pseudo-random number generator state used for random sampling.
45 | to_shuffle: shuffle the data initially
46 | '''
47 | X = pd.DataFrame(X)
48 | Y = pd.DataFrame(Y)
49 |
50 | if X.shape[0] != Y.shape[0]:
51 | raise ValueError('Amount of X and Y are not equal!')
52 |
53 | if to_shuffle:
54 | X, Y = shuffle(X, Y)
55 |
56 | if random_state is None:
57 | random_state = np.random.randint(0, 4294967295)
58 |
59 | (self.X_train,
60 | self.X_test,
61 | self.Y_train,
62 | self.Y_test) = train_test_split(
63 | X,
64 | Y,
65 | train_size=train_size,
66 | test_size=test_size,
67 | random_state=random_state)
68 |
69 | self.test_size = self.X_test.shape[0]
70 | self.train_size = self.X_train.shape[0]
71 |
72 | def next_batch(self, batch_size=100):
73 | '''returns next_batch data with batch_size
74 | Args:
75 | batch_size: size per batch
76 |
77 | Returns:
78 | batch_xs, batch_ys: numpy.ndarray
79 | '''
80 | assert int(batch_size) > 0, 'batch_size {} is not > 0'.format(
81 | batch_size)
82 | batch_xs = self.next_xbatch(batch_size)
83 | batch_ys = self.this_ybatch()
84 | return batch_xs, batch_ys
85 |
86 | def this_xbatch(self):
87 | batch_index = np.array(range(
88 | self._batch_position,
89 | self._batch_position + self._batch_size))
90 | batch_index %= self.train_size
91 | batch_xs = self.X_train.iloc[batch_index, :]
92 | return batch_xs.values
93 |
94 | def this_ybatch(self):
95 | batch_index = np.array(range(
96 | self._batch_position,
97 | self._batch_position + self._batch_size))
98 | batch_index %= self.train_size
99 | batch_ys = self.Y_train.iloc[batch_index, :]
100 | return batch_ys.values
101 |
102 | def next_xbatch(self, batch_size=100):
103 | assert int(batch_size) > 0, 'batch_size {} is not > 0'.format(
104 | batch_size)
105 | self._batch_size = batch_size
106 | self._batch_position += batch_size
107 | self._batch_position %= self.train_size
108 |
109 | batch_index = np.array(range(
110 | self._batch_position,
111 | self._batch_position + batch_size))
112 | batch_index %= self.train_size
113 | batch_xs = self.X_train.iloc[batch_index, :]
114 | return batch_xs.values
115 |
116 | def next_ybatch(self, batch_size=100):
117 | assert int(batch_size) > 0, 'batch_size {} is not > 0'.format(
118 | batch_size)
119 | self._batch_size = batch_size
120 | self._batch_position += batch_size
121 | self._batch_position %= self.train_size
122 |
123 | batch_index = np.array(range(
124 | self._batch_position,
125 | self._batch_position + batch_size))
126 | batch_index %= self.train_size
127 | batch_ys = self.Y_train.iloc[batch_index, :]
128 | return batch_ys.values
129 |
--------------------------------------------------------------------------------
/src/MachineLearning/TensorFlow/IncreaseNN.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | import numpy as np
4 | import tensorflow as tf
5 | from tensorflow.examples.tutorials.mnist import input_data
6 |
7 |
8 | class IncreaseNN(object):
9 |
10 | def __init__(self, shape, X=None, Y=None, log_dir='/tmp/tf_charlesxu'):
11 | if X and Y:
12 | shape.insert(0, X.shape[1])
13 | shape.append(Y.shape[1])
14 | tf.reset_default_graph()
15 | self.__shape = shape
16 | if not os.path.isdir(log_dir):
17 | os.makedirs(log_dir)
18 | self.log_dir = log_dir + '/' + self.curtime
19 | self.sess = tf.InteractiveSession()
20 | self.net = self.interferece(self.shape)
21 | tf.global_variables_initializer().run()
22 | self.__step = 0
23 |
24 | def _init_placeholders(self, shape):
25 | with tf.name_scope('input'):
26 | placeholders = {
27 | 'x': tf.placeholder(tf.float32, [None, shape[0]]),
28 | 'y': tf.placeholder(tf.float32, [None, shape[-1]])
29 | }
30 | return placeholders
31 |
32 | def _init_variables(self, shape):
33 |
34 | def init_weight_variable(shape):
35 | """Create a weight variable with appropriate initialization."""
36 | initial = tf.truncated_normal(shape, stddev=0.1)
37 | return tf.Variable(initial)
38 |
39 | def init_bias_variable(shape):
40 | """Create a bias variable with appropriate initialization."""
41 | initial = tf.constant(0.1, shape=shape)
42 | return tf.Variable(initial)
43 |
44 | variables = {
45 | 'weights': [init_weight_variable([a, b]) for a, b in zip(shape[:-1], shape[1:])],
46 | 'biases': [init_bias_variable([a]) for a in shape[1:]]
47 | }
48 | for index, (w, b) in enumerate(zip(variables['weights'], variables['biases'])):
49 | self._variable_summaries(w, 'weight_%d' % index)
50 | self._variable_summaries(b, 'bias_%d' % index)
51 | return variables
52 |
53 | def _init_layer(self, name, x, w, b, activation=tf.nn.relu):
54 | with tf.name_scope(name):
55 | z = tf.matmul(x, w) + b
56 | tf.summary.histogram('z', z)
57 | layer = activation(z)
58 | tf.summary.histogram('layer', layer)
59 | return layer
60 |
61 | def _init_layers(self, placeholders, variables):
62 | layers = [placeholders['x']]
63 | for index, (w, b) in enumerate(zip(variables['weights'][:-1], variables['biases'][:-1])):
64 | layer = self._init_layer('layer_%d' % index, layers[-1], w, b)
65 | layers.append(layer)
66 | layer = self._init_layer('last_layer', layers[-1], variables['weights'][-1], variables['biases'][-1], tf.identity)
67 | layers.append(layer)
68 | return layers
69 |
70 | def _init_loss(self, labels, logits):
71 | with tf.name_scope('cross_entropy'):
72 | diff = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits)
73 | with tf.name_scope('total'):
74 | cross_entropy = tf.reduce_mean(diff)
75 | tf.summary.scalar('cross entropy', cross_entropy)
76 | return cross_entropy
77 |
78 | def _init_accuracy(self, labels, logits):
79 | with tf.name_scope('accuracy'):
80 | with tf.name_scope('correct_prediction'):
81 | correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
82 | with tf.name_scope('accuracy'):
83 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
84 | tf.summary.scalar('accuracy', accuracy)
85 | return accuracy
86 |
87 | def _init_train_ops(self, lost, learning_rate=0.01):
88 | with tf.name_scope('train'):
89 | train_step = tf.train.RMSPropOptimizer(learning_rate).minimize(lost)
90 | return train_step
91 |
92 | def interferece(self, shape):
93 | interfereces = dict()
94 | interfereces['placeholders'] = self._init_placeholders(shape)
95 | interfereces['variables'] = self._init_variables(shape)
96 | interfereces['layers'] = self._init_layers(interfereces['placeholders'], interfereces['variables'])
97 | interfereces['cross_entropy'] = self._init_loss(labels=interfereces['placeholders']['y'], logits=interfereces['layers'][-1])
98 | interfereces['accuracy'] = self._init_accuracy(labels=interfereces['placeholders']['y'], logits=interfereces['layers'][-1])
99 | interfereces['train_step'] = self._init_train_ops(interfereces['cross_entropy'])
100 | interfereces['merged'] = tf.summary.merge_all()
101 | interfereces['summary'] = {
102 | 'train_writer': tf.summary.FileWriter(self.log_dir + '/train', self.sess.graph),
103 | 'test_writer': tf.summary.FileWriter(self.log_dir + '/test', self.sess.graph)}
104 | return interfereces
105 |
106 | def _feed_dict(self, data, net):
107 | xs, ys = data.next_batch(100)
108 | feed_dict = {
109 | net['placeholders']['x']: xs,
110 | net['placeholders']['y']: ys
111 | }
112 | return feed_dict
113 |
114 | def fit(self, data_train, data_test, epoches=1000):
115 | net = self.net
116 | for epoch in range(epoches):
117 | # testing
118 | if epoch % 1 == 0:
119 | run_metadata = tf.RunMetadata()
120 | summary, test_accuracy = self.sess.run(
121 | [net['merged'], net['accuracy']],
122 | feed_dict=self._feed_dict(data_test, net),
123 | run_metadata=run_metadata)
124 | net['summary']['test_writer'].add_run_metadata(run_metadata, 'step%06d' % self.__step)
125 | net['summary']['test_writer'].add_summary(summary, self.__step)
126 | print(self.__step, test_accuracy, end='\n')
127 |
128 | # training
129 | run_metadata = tf.RunMetadata()
130 | summary, _ = self.sess.run(
131 | [net['merged'], net['train_step']],
132 | feed_dict=self._feed_dict(data_train, net),
133 | run_metadata=run_metadata)
134 | net['summary']['train_writer'].add_run_metadata(run_metadata, 'step%06d' % self.__step)
135 | net['summary']['train_writer'].add_summary(summary, self.__step)
136 | self.__step += 1
137 | self.__step += 100
138 |
139 | def increase(self, shape):
140 | old_variables = {
141 | 'weights': [var.eval() for var in self.net['variables']['weights']],
142 | 'biases': [var.eval() for var in self.net['variables']['biases']]
143 | }
144 | self.sess.close()
145 | tf.reset_default_graph()
146 | self.sess = tf.InteractiveSession()
147 | interfereces = dict()
148 | interfereces['placeholders'] = self._init_placeholders(shape)
149 |
150 | variables = {
151 | 'weights': [self._increase_variable(
152 | [a, b], old_variables['weights'][index] if index < len(old_variables['weights']) else None
153 | ) for index, (a, b) in enumerate(zip(shape[:-1], shape[1:]))],
154 | 'biases': [self._increase_variable(
155 | [a], old_variables['biases'][index] if index < len(old_variables['biases']) else None
156 | ) for index, a in enumerate(shape[1:])]
157 | }
158 |
159 | for index, (w, b) in enumerate(zip(variables['weights'], variables['biases'])):
160 | self._variable_summaries(w, 'weight_%d' % index)
161 | self._variable_summaries(b, 'bias_%d' % index)
162 |
163 | interfereces['variables'] = variables
164 | interfereces['layers'] = self._init_layers(interfereces['placeholders'], interfereces['variables'])
165 | interfereces['cross_entropy'] = self._init_loss(labels=interfereces['placeholders']['y'], logits=interfereces['layers'][-1])
166 | interfereces['accuracy'] = self._init_accuracy(labels=interfereces['placeholders']['y'], logits=interfereces['layers'][-1])
167 | interfereces['train_step'] = self._init_train_ops(interfereces['cross_entropy'])
168 | interfereces['merged'] = tf.summary.merge_all()
169 | interfereces['summary'] = {
170 | 'train_writer': tf.summary.FileWriter(self.log_dir + '/train', self.sess.graph),
171 | 'test_writer': tf.summary.FileWriter(self.log_dir + '/test', self.sess.graph)}
172 | self.net = interfereces
173 | tf.global_variables_initializer().run()
174 | return interfereces
175 |
176 | def predict(self, data_x):
177 | return self.net['layers'][-1].eval(session=self.sess, feed_dict={self.net['placeholders']['x']: data_x})
178 |
179 | def _variable_summaries(self, variable, name='var'):
180 | """Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
181 | with tf.name_scope(name):
182 | mean = tf.reduce_mean(variable)
183 | tf.summary.scalar('mean', mean)
184 | with tf.name_scope('stddev'):
185 | stddev = tf.sqrt(tf.reduce_mean(tf.square(variable - mean)))
186 | tf.summary.scalar('stddev', stddev)
187 | tf.summary.scalar('max', tf.reduce_max(variable))
188 | tf.summary.scalar('min', tf.reduce_min(variable))
189 | tf.summary.histogram('histogram', variable)
190 |
191 | def _increase_variable(self, shape, from_variable=None):
192 | if from_variable is None:
193 | to_values = np.random.randn(*shape) / 10
194 | var = tf.Variable(to_values, dtype=tf.float32)
195 | tf.variables_initializer([var]).run()
196 | return var
197 |
198 | if isinstance(from_variable, tf.Variable):
199 | try:
200 | from_variable.eval(session=self.sess)
201 | except tf.errors.FailedPreconditionError:
202 | tf.variables_initializer([from_variable]).run()
203 | from_shape = from_variable.get_shape().as_list()
204 | from_values = from_variable.eval(session=self.sess)
205 | elif isinstance(from_variable, np.ndarray):
206 | from_shape = from_variable.shape
207 | from_values = from_variable
208 | else:
209 | raise Exception('Not recognised type %s' % str(type(from_variable)))
210 | to_values = np.random.randn(*shape) / 10
211 | transfer_shape = [min(dim) for dim in zip(from_shape, shape)]
212 |
213 | if len(from_shape) == 1:
214 | to_values += 0.1
215 | to_values[:transfer_shape[0]] = from_values[:transfer_shape[0]]
216 | else:
217 | to_values[..., :transfer_shape[-2], :transfer_shape[-1]] = from_values[..., :transfer_shape[-2], :transfer_shape[-1]]
218 | var = tf.Variable(to_values, dtype=tf.float32)
219 | tf.variables_initializer([var]).run()
220 | return var
221 |
222 | @property
223 | def curtime(self):
224 | cur_time = time.strftime('%Y-%m-%d_%X', time.localtime(time.time()))
225 | return cur_time
226 |
227 | @property
228 | def shape(self):
229 | return self.__shape
230 |
231 |
232 | def original(data):
233 | network = IncreaseNN([784, 20, 10], log_dir='/tmp/tf_charlesxu/original_wide')
234 | network.fit(data.train, data.test, epoches=100)
235 | for hidden in range(20):
236 | network.increase([784, 20, 10])
237 | network.fit(data.train, data.test, epoches=100)
238 |
239 | structure = [784] + [30] * 19 + [10]
240 | network = IncreaseNN(structure, log_dir='/tmp/tf_charlesxu/original_deep')
241 | network.fit(data.train, data.test, epoches=100)
242 | for hidden in range(20):
243 | network.increase(structure)
244 | network.fit(data.train, data.test, epoches=100)
245 |
246 |
247 | def widen(data):
248 | network = IncreaseNN([784, 1, 10], log_dir='/tmp/tf_charlesxu/widen')
249 | network.fit(data.train, data.test, epoches=100)
250 | for hidden in range(20):
251 | network.increase([784, hidden + 2, 10])
252 | network.fit(data.train, data.test, epoches=100)
253 |
254 |
255 | def deepen(data):
256 | structure = [784, 30, 10]
257 | network = IncreaseNN(structure, log_dir='/tmp/tf_charlesxu/deepen')
258 | network.fit(data.train, data.test, epoches=100)
259 | for hidden in range(20):
260 | structure.insert(1, 30)
261 | network.increase(structure)
262 | network.fit(data.train, data.test, epoches=100)
263 |
264 |
265 | def main():
266 | data_path = '/home/charlesxu/Workspace/data/MNIST_data/'
267 | data = input_data.read_data_sets(data_path, one_hot=True)
268 |
269 | original(data)
270 | widen(data)
271 | deepen(data)
272 |
273 |
274 | if __name__ == '__main__':
275 | main()
276 | ()
277 |
--------------------------------------------------------------------------------
/src/MachineLearning/TensorFlow/TensorflowLearning/Untitled.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import tensorflow as tf"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": 13,
15 | "metadata": {},
16 | "outputs": [],
17 | "source": [
18 | "sess = tf.InteractiveSession()"
19 | ]
20 | },
21 | {
22 | "cell_type": "code",
23 | "execution_count": 43,
24 | "metadata": {},
25 | "outputs": [
26 | {
27 | "name": "stdout",
28 | "output_type": "stream",
29 | "text": [
30 | "[[ 1.41028273 1.76107073 2.1326015 3.25804257]\n",
31 | " [-0.29294467 1.32900047 3.68154383 3.18772244]\n",
32 | " [-0.45015633 1.11059964 3.40281439 3.56811571]\n",
33 | " [ 0.69193411 1.46399367 3.12278247 4.01428461]\n",
34 | " [ 1.87994528 1.89346516 1.92173779 2.68355751]\n",
35 | " [ 2.22133255 3.30972195 3.50474811 4.51246405]\n",
36 | " [ 1.25771391 0.31267393 2.96506882 4.30967855]]\n",
37 | "mean= [ 0.69804859 1.60123134 3.57658982 4.10836506] \n",
38 | "var= [ 1.01954579 1.23900151 0.58759832 0.73256236]\n"
39 | ]
40 | }
41 | ],
42 | "source": [
43 | "x = tf.constant([[1,2,3,4]]*7, tf.float32)\n",
44 | "y = tf.truncated_normal(x.shape)\n",
45 | "z = x + y\n",
46 | "mean, var = tf.nn.moments(z, 0)\n",
47 | "print(z.eval())\n",
48 | "print('mean=', mean.eval(), '\\nvar=',var.eval())"
49 | ]
50 | },
51 | {
52 | "cell_type": "code",
53 | "execution_count": 44,
54 | "metadata": {},
55 | "outputs": [
56 | {
57 | "name": "stdout",
58 | "output_type": "stream",
59 | "text": [
60 | "[[ 1.53355587 0.38661456 0.10948229 1.430058 ]\n",
61 | " [-0.27069056 0.80823851 -0.27680206 -1.31970453]\n",
62 | " [ 0.63089526 -1.77756488 1.54642105 0.50779915]\n",
63 | " [ 0.92064953 -0.56752062 0.82397366 0.3272028 ]\n",
64 | " [-1.43046248 -0.67386436 -1.86051989 0.67843962]\n",
65 | " [-0.28913945 1.45824242 -0.56958675 -0.08079386]\n",
66 | " [-1.09480834 0.36585569 0.22702885 -1.54300475]]\n",
67 | "mean= [ -2.55448480e-08 1.70298989e-07 -3.40597985e-08 -2.04358784e-07] \n",
68 | "var= [ 0.99983579 0.99980336 0.99982071 0.99983197]\n"
69 | ]
70 | }
71 | ],
72 | "source": [
73 | "w = tf.nn.batch_normalization(z,mean,var,0,1,1e-4)\n",
74 | "mean, var = tf.nn.moments(w, 0)\n",
75 | "print(w.eval())\n",
76 | "print('mean=', mean.eval(), '\\nvar=',var.eval())"
77 | ]
78 | },
79 | {
80 | "cell_type": "code",
81 | "execution_count": null,
82 | "metadata": {},
83 | "outputs": [],
84 | "source": []
85 | }
86 | ],
87 | "metadata": {
88 | "kernelspec": {
89 | "display_name": "Python [conda root]",
90 | "language": "python",
91 | "name": "conda-root-py"
92 | },
93 | "language_info": {
94 | "codemirror_mode": {
95 | "name": "ipython",
96 | "version": 3
97 | },
98 | "file_extension": ".py",
99 | "mimetype": "text/x-python",
100 | "name": "python",
101 | "nbconvert_exporter": "python",
102 | "pygments_lexer": "ipython3",
103 | "version": "3.5.4"
104 | }
105 | },
106 | "nbformat": 4,
107 | "nbformat_minor": 2
108 | }
109 |
--------------------------------------------------------------------------------
/src/MachineLearning/TensorFlow/list_devices.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from tensorflow.python.client import device_lib
3 | print(device_lib.list_local_devices())
4 |
5 |
--------------------------------------------------------------------------------
/src/MachineLearning/TensorFlow/multi-gpu.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import tensorflow as tf
3 | from tensorflow.examples.tutorials import mnist
4 |
5 | data = mnist.input_data.read_data_sets('MNIST_data/', one_hot=True)
6 | # making layers
7 | "we finally reached the goal that talk with each other with English.. . haha"
8 |
9 |
10 | def relu_layer(bef, size):
11 | w = tf.Variable(tf.random_normal(size, stddev=.1, dtype=tf.float16))
12 | b = tf.Variable(.1 * tf.ones(size[-1], dtype=tf.float16))
13 | return tf.nn.relu(tf.add(tf.matmul(bef, w), b))
14 |
15 |
16 | def linear_layer(bef, size):
17 | w = tf.Variable(tf.random_normal(size, stddev=.1, dtype=tf.float16))
18 | b = tf.Variable(tf.zeros(size[-1], dtype=tf.float16))
19 | return tf.add(tf.matmul(bef, w), b)
20 |
21 |
22 | graph = tf.Graph()
23 | graph.as_default()
24 |
25 | # defining layers including weights and biases, and assigning it to cpus, gpus
26 | with tf.device('/cpu:0'):
27 | x = tf.placeholder(tf.float16, [None, 784])
28 | y_ = tf.placeholder(tf.float16, [None, 10])
29 |
30 | # first layer uses most calculating resources
31 | with tf.device('/gpu:0'):
32 | l1 = relu_layer(x, [784, 500])
33 |
34 | # assigning gpu:0 and gpu:1 to one network, cuz they can communite with each other, and DO NOT USE /gpu:1 and /gpu:2
35 | with tf.device('/gpu:1'):
36 | # l2 = relu_layer(l1, [500, 800])
37 | l3 = relu_layer(l1, [500, 200])
38 | l4 = linear_layer(l3, [200, 10])
39 | y = tf.nn.softmax(l4)
40 |
41 | cost = tf.nn.softmax_cross_entropy_with_logits(logits=l4, labels=y_)
42 |
43 | correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
44 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float16))
45 |
46 | step = tf.train.RMSPropOptimizer(7e-6).minimize(cost)
47 |
48 | sess = tf.InteractiveSession()
49 | sess.run(tf.global_variables_initializer())
50 |
51 | for i in range(20000):
52 | batch = data.train.next_batch(50)
53 | if i % 100 == 0:
54 | train_accuracy = accuracy.eval(feed_dict={x: batch[0], y_: batch[1]})
55 | print("step %d, training accuracy %g" % (i, train_accuracy))
56 | step.run(feed_dict={x: batch[0], y_: batch[1]})
57 |
58 | print("test accuracy %g" % accuracy.eval(feed_dict={x: data.test.images, y_: mnist.test.labels}))
59 |
--------------------------------------------------------------------------------
/src/MachineLearning/algorithm/DecisionTree.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | class DT(object):
5 | """Decision Tree"""
6 |
7 | def __init__(self, arg):
8 | super(DT, self).__init__()
9 | self.arg = arg
10 |
--------------------------------------------------------------------------------
/src/MachineLearning/algorithm/DeepReinforcementLearning/ReplayBuffer.py:
--------------------------------------------------------------------------------
1 | import random
2 | from collections import deque
3 |
4 |
5 | class ReplayBuffer(object):
6 | """ReplayBuffer for DRL
7 | Accept for tuple, (state, act, reward, next_state)"""
8 |
9 | def __init__(self, maxlen, data=list()):
10 | self._maxlen = maxlen
11 | self._buffer = deque(data, maxlen=maxlen)
12 |
13 | def __len__(self):
14 | return len(self._buffer)
15 |
16 | def sample(self, batch_size=32):
17 | if len(self._buffer) <= batch_size:
18 | return list(self._buffer)
19 | else:
20 | return random.sample(self._buffer, batch_size)
21 |
22 | def append(self, data):
23 | self._buffer.appendleft(data)
24 |
25 | def extend(self, data_list):
26 | self._buffer.extendleft(data_list)
27 |
28 | def clear(self):
29 | self._buffer.clear()
30 |
31 | @property
32 | def content(self):
33 | return list(self._buffer)
34 |
35 | @property
36 | def maxlen(self):
37 | return self._maxlen
38 |
39 | def __repr__(self):
40 | return self._buffer.__repr__()
41 |
42 | def __str__(self):
43 | return self._buffer.__str__()
44 |
45 |
46 | def test_buffer():
47 | data = [(1, 2, 3)] * 5 + [(2, 3, 4)] * 3
48 | buffer = ReplayBuffer(7, data)
49 | print(buffer.content)
50 | buffer.append((3, 4, 5))
51 | print(buffer.content)
52 | buffer.extend([(6, 4, 2)] * 2)
53 | print(buffer.content, len(buffer))
54 | print(buffer.sample(3), len(buffer))
55 | print(buffer.sample(7))
56 | print(buffer.sample(8))
57 | buffer.clear()
58 | print(buffer.content, len(buffer))
59 |
60 |
--------------------------------------------------------------------------------
/src/MachineLearning/algorithm/DeepReinforcementLearning/SimplePolicyGradient.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import gym
3 | import tensorflow as tf
4 | import numpy as np
5 | from tensorflow.contrib import slim
6 | from ReplayBuffer import ReplayBuffer
7 |
8 | env = gym.make('CartPole-v0')
9 | env.reset()
10 |
11 | buf = ReplayBuffer(5000)
12 | x = tf.placeholder(tf.float32, [None, 4], name='x')
13 | a = tf.placeholder(tf.float32, [None, 2], name='a')
14 | r = tf.placeholder(tf.float32, [None, 1], name='r')
15 |
16 |
17 | def build_net(x, a, r):
18 | h1 = slim.layers.fully_connected(x, 20)
19 | h2 = slim.layers.fully_connected(h1, 12)
20 | h3 = slim.layers.fully_connected(h2, 2, activation_fn=tf.identity)
21 | y = tf.nn.softmax(h3)
22 | logp = tf.log(y)
23 |
24 | # good_probabilities = tf.reduce_sum(tf.multiply(y, a), reduction_indices=[1])
25 | # # maximize the log probability
26 | # log_probabilities = tf.log(good_probabilities)
27 | # loss = -tf.reduce_sum(log_probabilities)
28 |
29 | loss = -tf.reduce_sum(
30 | tf.reduce_sum(
31 | tf.multiply(tf.multiply(r, logp), a),
32 | reduction_indices=[1]))
33 | return y, logp, h3, loss
34 |
35 |
36 | y, logp, h, loss = build_net(x, a, r)
37 |
38 | optimizer = tf.train.RMSPropOptimizer(0.001).minimize(loss)
39 |
40 | sess = tf.InteractiveSession()
41 | tf.global_variables_initializer().run()
42 | buf.clear()
43 |
44 |
45 | def get_act(feed_dict):
46 | act = sess.run(y, feed_dict=feed_dict)
47 | if np.random.uniform() <= act[0][0]:
48 | return np.array([1, 0])
49 | else:
50 | return np.array([0, 1])
51 |
52 |
53 | render_close = True
54 | for i in range(40000):
55 | if i > 4000:
56 | render_close = False
57 | s = env.reset()
58 | total_reward = 0
59 | done = False
60 |
61 | bbuf = list()
62 | while not done:
63 | # 重复试验收集数据
64 | act = get_act({x: np.reshape(s, [1, 4])})
65 | s_next, reward, done, _ = env.step(act[1])
66 | total_reward += reward
67 | bbuf.append([s, act, total_reward, s_next])
68 | env.render(close=render_close)
69 | s = s_next
70 | else:
71 | for j in bbuf:
72 | j[2] = total_reward
73 | buf.extend(bbuf)
74 | if i % 150 == 140:
75 | for i in range(4000):
76 | # 使用收集到的数据训练网络
77 | st, at, rt, sn = zip(*buf.sample())
78 | rt = np.reshape(np.array(rt), [-1, 1])
79 | # rt 中心标准化
80 | rt_norm = (rt - min(rt)) / (np.sqrt(np.var(rt)) + 1)
81 | feed_dict = {
82 | x: np.array(st),
83 | a: np.array(at),
84 | r: rt_norm
85 | }
86 | loss_runned, y_runned, _ = sess.run([loss, y, optimizer], feed_dict=feed_dict)
87 | print('loss: ', loss_runned, 'y', y_runned[0])
88 |
89 |
--------------------------------------------------------------------------------
/src/MachineLearning/algorithm/LDA.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from sklearn.datasets import load_breast_cancer
3 | from tsne import *
4 | import pandas as pd
5 | from pylab import *
6 | import seaborn as sns
7 | from functools import reduce
8 | from sklearn.preprocessing import MinMaxScaler
9 | from sklearn.model_selection import train_test_split
10 | data = load_breast_cancer()
11 |
12 | x, y, label_names = data['data'], data['target'], data['target_names']
13 | scaler = MinMaxScaler()
14 | scaler.fit(x)
15 | x = scaler.transform(x)
16 |
17 | tsne(x, y, label_names=label_names)
18 |
19 | trainx, testx, trainy, testy = train_test_split(x, reshape(y, [-1, 1]), test_size=0.1)
20 |
21 |
22 | def variance(x, u):
23 | x = reshape(x, [-1, 1])
24 | return (x - u).dot(x - u).T
25 |
26 |
27 | def lda2(x, y):
28 | x0, x1 = mat(x[y.T[0] == 0]), mat(x[y.T[0] == 1])
29 | u0, u1 = x0.mean(axis=0), x1.mean(axis=0)
30 |
31 | sigma0, sigma1 = reduce(lambda x, y: x + y, [variance(i, u0) for i in x0]), reduce(lambda x, y: x + y, [variance(i, u1) for i in x1])
32 |
33 | w = (u0 - u1).dot(pinv(sigma0 / x0.shape[0] + sigma1 / x1.shape[0]))
34 | return w
35 |
36 |
37 | def transform(w, x):
38 | return w.dot(x.T).T
39 |
40 |
41 | def predict(reduced, threshold, classes=None):
42 | if isinstance(threshold, (int, float)):
43 | threshold = [threshold]
44 | if classes is None:
45 | classes = range(len(threshold))
46 | threshold.sort()
47 | predicted = zeros(reduced.shape)
48 | for i, j, c in zip(threshold[:-1], threshold[1:], classes[1:-1]):
49 | predicted[reduced >= i and reduced < j] = c
50 | predicted[reduced < threshold[0]] = classes[0]
51 | predicted[reduced >= threshold[0]] = classes[-1]
52 | return predicted
53 |
54 |
55 | def plot_prediction(reduced, predicted, target, threshold, label_names=None):
56 | colors = 'rgbcmyk'
57 | marks = '+xo.*'
58 | classes = set(list(target))
59 | if label_names is None:
60 | label_names = [None] * len(classes)
61 | for i in classes:
62 | subscription = target == i
63 | plot(reduced[subscription], predicted[subscription], '{}{}'.format(colors[i], marks[i]), label=label_names[i])
64 | plot([threshold, threshold], [-.5, 1.5], 'grey', label='Classify boundary')
65 | xlabel('Result after dimention reduced')
66 | ylabel('Predicted')
67 | legend()
68 |
69 |
70 | def plot_prediction2(transformed, predicted, y, label_names, threshold):
71 | plot([threshold, threshold], [-50, 50], 'grey', label='Classify boundary')
72 | t = pd.DataFrame(transformed, columns=['Dimention Reduction Result After LDA Transform'])
73 | p = pd.DataFrame([label_names[int(i[0])] for i in predicted], columns=['prediction'])
74 | lab = pd.DataFrame([label_names[int(i[0])] for i in y], columns=['label'])
75 | data_t = pd.concat([t, p, lab], axis=1)
76 | sns.swarmplot(y='label', x='Dimention Reduction Result After LDA Transform', hue='prediction', data=data_t)
77 |
78 |
79 | w = lda2(trainx, trainy)
80 | threshold = -0.061
81 |
82 | traint = transform(w, trainx)
83 | trainp = predict(traint, threshold, [0, 1])
84 | trainerr = mean(abs(reshape(trainp, [1, -1]) - reshape(trainy, [1, -1])))
85 |
86 | testt = transform(w, testx)
87 | testp = predict(testt, threshold, [0, 1])
88 | testerr = mean(abs(reshape(testp, [1, -1]) - reshape(testy, [1, -1])))
89 |
90 |
91 | figure()
92 | plot_prediction2(
93 | np.vstack([traint, testt]),
94 | np.vstack([mat(trainp) + 4, mat(testp) + 6]),
95 | np.vstack([trainy, testy + 2]),
96 | list(map(lambda x: 'training set ' + x, label_names)) +
97 | list(map(lambda x: 'testing set ' + x, label_names)) +
98 | list(map(lambda x: 'train prediction ' + x, label_names)) +
99 | list(map(lambda x: 'test prediction ' + x, label_names)),
100 | threshold)
101 | print('train err:', trainerr,
102 | '\ntest err:', testerr)
103 | show()
104 | # figure()
105 | # plot_prediction(transformed, predicted, y, label_names)
106 |
107 |
108 | # \documentclass{article}
109 | # \usepackage{amsmath}
110 | # \usepackage{amssymb}
111 | # \usepackage{ctex}
112 | # \begin{document}
113 | # LDA:
114 | # $\omega = S^{-1}_\omega(\mu_0 - \mu_1)$\\
115 | # $S^{-1}_\omega = V\Sigma^{-1}U^T$\\
116 | # 使用奇异值分解,实际$S^{-1}$直接用pinv就行(psedu invert)\\
117 | # $S_\omega = \Sigma_0 + \Sigma_1 $
118 |
119 | # \end{document}
120 |
--------------------------------------------------------------------------------
/src/MachineLearning/algorithm/NeuralNetwork/nn_mnist.py:
--------------------------------------------------------------------------------
1 | """这个代码还有小问题,不过不想整了
2 | 问题1:里面不该用numpy array,而是应该用mat,否则计算结果有些地方会很令人困惑
3 | 问题2:反向传播好像有地方写的不对,如果有隐含层就会不收敛
4 | 问题3:梯度消失,一直没有解决
5 | """
6 | import pdb
7 | from pylab import *
8 | from tensorflow.examples.tutorials import mnist
9 |
10 | data = mnist.input_data.read_data_sets('MNIST_data', one_hot=True)
11 |
12 |
13 | def show_pic(image_data, index):
14 | if len(image_data.shape) == 2:
15 | imshow(reshape(image_data[index, :], [28, 28]))
16 | elif len(image_data.shape) == 1:
17 | imshow(reshape(image_data, [28, 28]))
18 | elif len(image_data.shape) == 3:
19 | imshow(image_data[index, :, :])
20 |
21 |
22 | class mnist_net(object):
23 |
24 | def __init__(self, shape, data):
25 | self.data = data
26 | self.shape = shape
27 | self.weights = [0.1 * randn(a, b) for a, b in zip(shape[:-1], shape[1:])]
28 | self.biases = [zeros([1, a]) for a in shape[1:]]
29 |
30 | def sigmoid(self, z, derivative=False):
31 | sig = 1 / (1 + exp(-z))
32 | if not derivative:
33 | return sig
34 | else:
35 | return sig * (1 - sig)
36 |
37 | def fp(self, inputs):
38 | ai = inputs
39 | self.z = []
40 | self.a = []
41 | for w, b in zip(self.weights, self.biases):
42 | self.a.append(ai)
43 | zi = ai.dot(w) + b
44 | self.z.append(zi)
45 | ai = self.sigmoid(zi)
46 | return ai
47 |
48 | def bp_step(self, batch=1, learning_rate=0.01):
49 | self.delta = []
50 | inputs, t = self.data.train.next_batch(batch)
51 | y = self.fp(inputs)
52 | d = -(y - t) * self.sigmoid(y, derivative=True)
53 | d = ones([1, batch]).dot(d) / batch
54 |
55 | for w, b, z, a in zip(self.weights[::-1], self.biases[::-1], self.z[::-1], self.a[::-1]):
56 | self.delta.insert(0, d)
57 | d = d.dot(w.T) * self.sigmoid(a, derivative=True)
58 | d = ones([1, batch]).dot(d) / batch
59 |
60 | self.gradw = []
61 | self.gradb = []
62 | for a, d in zip(self.a, self.delta):
63 | self.gradw.append(ones([1, batch]).dot(a).T.dot(d) / batch)
64 | self.gradb.append(d)
65 |
66 | for i, (gw, gb) in enumerate(zip(self.gradw, self.gradb)):
67 | self.weights[i] += learning_rate * gw
68 | self.biases[i] += learning_rate * gb
69 |
70 | def training(self, times=150, batch=1, learning_rate=0.1):
71 | correct_prediction = equal(argmax(self.fp(data.test.images), 1), argmax(data.test.labels, 1))
72 | accuracy = mean(correct_prediction, axis=0)
73 | print(accuracy)
74 | for i in range(times):
75 | self.bp_step(batch, learning_rate)
76 | if i % 50 == 0:
77 | correct_prediction = equal(argmax(self.fp(data.test.images), 1), argmax(data.test.labels, 1))
78 | accuracy_test = mean(correct_prediction, axis=0)
79 | correct_prediction = equal(argmax(self.fp(data.train.images), 1), argmax(data.train.labels, 1))
80 | accuracy_train = mean(correct_prediction, axis=0)
81 | print('%2.1f'%(i*100/times), accuracy_train, accuracy_test)
82 |
83 |
84 | net = mnist_net([784, 15, 10], data)
85 | net.training()
86 | for i in net.gradw:
87 | print(i)
88 | print(i.shape)
89 |
90 | def s(data):
91 | for i in data:
92 | print(i.shape)
93 |
--------------------------------------------------------------------------------
/src/MachineLearning/algorithm/NeuralNetwork/nn_mnist_static.py:
--------------------------------------------------------------------------------
1 | import pdb
2 | from pylab import *
3 | from tensorflow.examples.tutorials import mnist
4 |
5 | data = mnist.input_data.read_data_sets('MNIST_data', one_hot=True)
6 |
7 |
8 | def show_pic(image_data, index):
9 | if len(image_data.shape) == 2:
10 | imshow(reshape(image_data[index, :], [28, 28]))
11 | elif len(image_data.shape) == 1:
12 | imshow(reshape(image_data, [28, 28]))
13 | elif len(image_data.shape) == 3:
14 | imshow(image_data[index, :, :])
15 |
16 |
17 | class mnist_net(object):
18 |
19 | def __init__(self, shape, data):
20 | self.data = data
21 | self.W0 = np.mat(np.random.randn(784, 15) * .1)
22 | self.W1 = np.mat(np.random.randn(15, 10) * .1)
23 | self.b0 = np.mat(np.zeros([1, 15]))
24 | self.b1 = np.mat(np.zeros([1, 10]))
25 |
26 | def sigmoid(self, z, derivative=False):
27 | sig = 1 / (1 + exp(-z))
28 | if not derivative:
29 | return sig
30 | return np.multiply(sig, (1 - sig))
31 |
32 | def fp(self, inputs):
33 | self.l0 = np.mat(inputs)
34 | self.z1 = inputs.dot(self.W0) + self.b0
35 | self.l1 = self.sigmoid(self.z1)
36 | self.z2 = self.l1.dot(self.W1) + self.b1
37 | self.l2 = self.sigmoid(self.z2)
38 | return self.l2
39 |
40 | def bp_step(self, batch=1, learning_rate=1, lr_ampl=1):
41 | inputs, t = self.data.train.next_batch(batch)
42 | y = self.fp(inputs)
43 | self.d2 = np.multiply(-(y - t), self.sigmoid(self.z2, derivative=True)).mean(axis=0)
44 |
45 | self.d1 = np.multiply(self.d2.dot(self.W1.T).mean(axis=0), self.sigmoid(self.z1, derivative=True)).mean(axis=0)
46 |
47 | self.gradw1 = self.l1.mean(axis=0).T.dot(self.d2)
48 | self.gradw0 = self.l0.mean(axis=0).T.dot(self.d1)
49 | self.gradb1 = self.d2
50 | self.gradb0 = self.d1
51 |
52 | self.W1 += self.gradw1 * learning_rate
53 | self.W0 += self.gradw0 * learning_rate * lr_ampl
54 | self.b1 += self.gradb1 * learning_rate
55 | self.b0 += self.gradb0 * learning_rate
56 |
57 | def training(self, times=35000, batch=100, learning_rate=3):
58 | correct_prediction = equal(argmax(self.fp(data.test.images), 1), np.mat(argmax(data.test.labels, 1)).T)
59 | accuracy = correct_prediction.mean()
60 | print(accuracy)
61 | for i in range(times):
62 | self.bp_step(batch, learning_rate)
63 | if i % 500 == 0:
64 | correct_prediction = equal(argmax(self.fp(data.test.images), 1), np.mat(argmax(data.test.labels, 1)).T)
65 | accuracy_test = correct_prediction.mean()
66 | print('%2.1f'%(i*100/times), accuracy_test)
67 |
68 |
69 | net = mnist_net([784, 15, 10], data)
70 | net.training()
71 | for i in [net.gradw0, net.gradw1, net.W0, net.W1]:
72 | print(i)
73 | print(i.shape)
74 |
75 | def s(data):
76 | for i in data:
77 | print(i.shape)
78 |
--------------------------------------------------------------------------------
/src/MachineLearning/algorithm/tsne.py:
--------------------------------------------------------------------------------
1 | ../sklearn/tsne.py
--------------------------------------------------------------------------------
/src/MachineLearning/preprocess.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import numpy as np
4 |
5 |
6 | class Scaler(object):
7 | """Scaler for dataset
8 | Methods:
9 | refit: re-obtain min/max/mean/var with given data
10 | trans: standarize the data
11 | itrans: inverse transform data from standarized to original
12 |
13 | Usage:
14 | # loading sklearn datasets
15 | from sklearn.datasets import load_boston()
16 | dataset = load_boston()
17 | X, Y = dataset['data'], dataset['target']
18 |
19 | # using scaler to fit the variance, mean, and minmax
20 | scaler = Scaler(X, Y, method='minmax', attrs=[-1, 1])
21 | X, Y = scaler.trans(X, Y)
22 |
23 | # or just transform x or y only
24 | target = scaler.trans(Y=Y)
25 |
26 | # or inverse
27 | predicted_Y = scaler.itrans(Y=predicted)
28 | """
29 |
30 | def __init__(self, X=None, Y=None, method='minmax', attrs=None, axis=0):
31 | """
32 | methods: minmax, standard
33 | attrs:
34 | In minmax: min and max, [0, 1];
35 | In standard: mean and variance, [0, 1]
36 | """
37 | self._check_None(X, Y)
38 | self._method = method
39 | self._attrs = attrs
40 | self._axis = axis
41 | self._statistics = dict()
42 |
43 | self._trans_dict = {
44 | 'minmax': self._min_max_scaler,
45 | 'standard': self._standard_scaler}
46 |
47 | self.refit(X, Y)
48 |
49 | def refit(self, X=None, Y=None):
50 | # refit the data statics including minmax, mean, var
51 | self._check_None(X, Y)
52 | self._obtain_statistics(X, 'X')
53 | self._obtain_statistics(Y, 'Y')
54 |
55 | def trans(self, X=None, Y=None, inv=False):
56 | self._check_None(X, Y)
57 | if X is None:
58 | return self._trans_dict[self._method](Y, 'Y', inv)
59 | elif Y is None:
60 | return self._trans_dict[self._method](X, 'X', inv)
61 | else:
62 | return self._trans_dict[self._method](X, 'X', inv), self._trans_dict[self._method](Y, 'Y', inv)
63 |
64 | def itrans(self, X=None, Y=None):
65 | return self.trans(X, Y, inv=True)
66 |
67 | def _obtain_statistics(self, data, name):
68 | """Obtain minmax, mean and variance"""
69 | if data is not None:
70 | if not hasattr(data, 'shape'):
71 | raise ValueError("'{}' has no attribute '{}".format(name, 'shape'))
72 |
73 | self._statistics[name] = dict()
74 | self._statistics[name]['shape'] = data.shape
75 |
76 | # judge the dimention of data, decide which axis to use
77 | if len(data.shape) == 1:
78 | axis = 0
79 | elif len(data.shape) == 2:
80 | axis = self._axis
81 | else:
82 | raise ValueError("Dimention of '{}' is not 1 or 2, given {} with shape {}".format(
83 | name,
84 | len(data.shape).
85 | data.shape))
86 |
87 | # record the real axis and obtain statistics
88 | self._statistics[name]['axis'] = axis
89 |
90 | self._statistics[name]['minmax'] = (
91 | np.min(data, axis=axis),
92 | np.max(data, axis=axis))
93 |
94 | self._statistics[name]['norm'] = (
95 | np.mean(data, axis=axis),
96 | np.var(data, axis=axis))
97 |
98 | def _min_max_scaler(self, data, name, inv=False):
99 | """MinMax Scaler
100 | inv: inverse transform
101 | """
102 | if self._attrs is None:
103 | self._attrs = [0, 1]
104 | self._attrs.sort()
105 |
106 | data_min, data_max = self._statistics[name]['minmax']
107 |
108 | if not inv:
109 | return (data - data_min) / (data_max - data_min)
110 | else:
111 | return data * (data_max - data_min) + data_min
112 |
113 | def _standard_scaler(self, data, name, inv=False):
114 | """Standard Scaler, force the mean and variance to given number
115 | inv: inverse transform
116 | """
117 | if self._attrs is None:
118 | self._attrs = [0, 1]
119 |
120 | data_mean, data_var = self._statistics[name]['norm']
121 |
122 | if not inv:
123 | return (data - data_mean) / data_var ** 0.5
124 | else:
125 | return data * data_var ** 0.5 + data_mean
126 |
127 | @property
128 | def statistics(self):
129 | return self._statistics
130 |
131 | def _check_None(self, X, Y):
132 | if X is None and Y is None:
133 | raise ValueError('X and Y cannot both be None')
134 |
135 | def __repr__(self):
136 | return '<{} Scaler with {} fitted, attrs={}>'.format(self._method, tuple(self._statistics.keys()), self._attrs)
137 |
--------------------------------------------------------------------------------
/src/MachineLearning/sklearn/tsne.py:
--------------------------------------------------------------------------------
1 | from pylab import *
2 | from sklearn.manifold import TSNE
3 | from sklearn.decomposition import PCA
4 |
5 |
6 | def plot_embedding(X, y, plot_title=None, unbalanced=True, target_names=None):
7 | x_min, x_max = X[:, 0].min(), X[:, 0].max()
8 | y_min, y_max = X[:, 1].min(), X[:, 1].max()
9 | figure()
10 | for i in range(X.shape[0]):
11 | text(
12 | X[i, 0],
13 | X[i, 1],
14 | str(y[i]),
15 | color=cm.Set1(1 - y[i] / 7),
16 | fontdict={
17 | 'weight': 'bold',
18 | 'size': 4 if y[i] == 0 and unbalanced else 9})
19 | if title is not None:
20 | title(plot_title)
21 |
22 | factor = 0.1
23 | margin_x, margin_y = factor * (x_max - x_min), factor * (y_max - y_min)
24 | axis([
25 | x_min - margin_x,
26 | x_max + margin_x,
27 | y_min - margin_y,
28 | y_max + margin_y])
29 |
30 | figure()
31 | name_dict = set()
32 | for i in range(X.shape[0]):
33 | name = None
34 | if target_names is not None:
35 | if y[i] not in name_dict:
36 | name_dict.add(y[i])
37 | name = target_names[y[i]]
38 | plot(
39 | X[i, 0],
40 | X[i, 1],
41 | '.',
42 | c=cm.Set1(1 - y[i] / 7),
43 | markersize=2 if y[i] == 0 and unbalanced else 4,
44 | label=name)
45 | if title is not None:
46 | title(plot_title)
47 | legend()
48 |
49 |
50 | def tsne(data_test, data_label, title=None, unbalanced=False, method='tsne', label_names=None, one_hot=False):
51 | if not one_hot:
52 | label_n = data_label
53 | else:
54 | label_n = argmax(data_label, axis=1)
55 |
56 | models = {
57 | 'tsne': TSNE(n_iter=5000),
58 | 'pca': PCA()}
59 |
60 | model = models[method.lower()]
61 | tsne_transformed = model.fit_transform(data_test, label_n)
62 |
63 | plot_embedding(tsne_transformed, label_n, title if title else 't-sne projection', unbalanced, label_names)
64 | return tsne_transformed, label_n
65 |
--------------------------------------------------------------------------------
/src/MinorProjects/ECDICT-ultimate/ECDICT.css:
--------------------------------------------------------------------------------
1 | @charset "UTF-8";
2 | @namespace d url(http://www.apple.com/DTDs/DictionaryService-1.0.rng);
3 |
4 | h1 {
5 | font-size: 150%;
6 | display: inline;
7 | }
8 |
9 | span.phonetic,
10 | span.trans_name {
11 | color: gray;
12 | }
13 |
14 | div.definition {
15 | color: cadetblue;
16 | display: block;
17 | margin: 1em;
18 |
19 | }
20 |
21 | div.translation {
22 | display: block;
23 | margin: 1em;
24 | }
25 |
26 | div.tags {
27 | font-size: 80%;
28 | color: gray;
29 | }
30 |
--------------------------------------------------------------------------------
/src/MinorProjects/ECDICT-ultimate/ECDICT.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |