├── .gitignore ├── .gitmodules ├── README.md ├── doc └── ml │ └── An overview of gradient descent optimization algorithms.mhtml └── src ├── .obs └── dot_parser4tf_csv_file │ ├── csv_parser.py │ ├── dots │ ├── Asserts and boolean checks.dot │ ├── BayesFlow Stochastic Graph (contrib).dot │ ├── Building Graph.dot │ ├── Constants,Sequences,and Random Valu.dot │ ├── Control Flow.dot │ ├── Copying Graph Elements (contrib).dot │ ├── Data IO (Python functions).dot │ ├── FFmpeg (contrib).dot │ ├── Framework (contrib).dot │ ├── Higher Order Functions.dot │ ├── Histograms.dot │ ├── Images.dot │ ├── Inputs and Readers.dot │ ├── Layers (contrib).dot │ ├── Learn (contrib).dot │ ├── Losses (contrib).dot │ ├── Math.dot │ ├── Metrics (contrib).dot │ ├── Monitors (contrib).dot │ ├── Neural Network RNN Cells.dot │ ├── Neural Network.dot │ ├── Running Graphs.dot │ ├── Sparse Tensors.dot │ ├── Statistical distributions (contrib).dot │ ├── Strings.dot │ ├── Summary Operations.dot │ ├── Tensor Handle Operations.dot │ ├── Tensor Transformations.dot │ ├── TensorArray Operations.dot │ ├── Testing.dot │ ├── Training.dot │ ├── Utilities (contrib).dot │ ├── Variables.dot │ └── Wraps python functions.dot │ ├── tensorflow_api.csv │ └── tf.pdf ├── ComputerScience ├── Calculator_AST.py ├── FullBinaryHeap.py └── dsr.py ├── Controller ├── cartpole_pid.py └── pid.py ├── MachineLearning ├── Deep_Learning │ ├── dqn_cartpole.py │ └── pg_cartpole.ipynb ├── TensorFlow │ ├── Batcher.py │ ├── IncreaseNN.py │ ├── TensorflowLearning │ │ └── Untitled.ipynb │ ├── list_devices.py │ └── multi-gpu.py ├── algorithm │ ├── .ipynb_checkpoints │ │ └── step by step q learning-checkpoint.ipynb │ ├── DecisionTree.py │ ├── DeepReinforcementLearning │ │ ├── ReplayBuffer.py │ │ ├── SimplePolicyGradient.py │ │ └── step by step q learning.ipynb │ ├── LDA.py │ ├── NeuralNetwork │ │ ├── nn_mnist.py │ │ └── nn_mnist_static.py │ ├── SupportVectorMachine │ │ └── SMO_Simple.ipynb │ └── tsne.py ├── preprocess.py └── sklearn │ └── tsne.py ├── MinorProjects ├── ECDICT-ultimate │ ├── ECDICT.css │ ├── ECDICT.plist │ ├── Makefile │ ├── OtherResources │ │ ├── ECDICT.xsl │ │ └── ECDICT_prefs.html │ ├── README.md │ └── csv2dict.py ├── ECDICT │ ├── ECDICT.css │ ├── ECDICT.plist │ ├── Makefile │ ├── OtherResources │ │ ├── ECDICT.xsl │ │ └── ECDICT_prefs.html │ ├── README.md │ └── csv2dict.py ├── chat │ ├── chat.py │ ├── doc │ │ └── structure.md │ ├── http.txt │ ├── udp_client.py │ └── ui.py ├── copydisk.py ├── diff.py ├── excel.py ├── media │ ├── comics │ │ └── tmp.py │ └── ffmpeg_convert2mp4.py ├── multy_copy.py ├── plz │ ├── car_velocity.py │ ├── compute_time.py │ ├── image_cutout.py │ ├── turtle.bot │ ├── turtlebot.ipynb │ └── turtlebot.py ├── sendmail.py ├── tieba_content.py ├── xyq │ └── xyq.py └── yyf │ └── donation.py ├── Web ├── Verification code.ipynb ├── html │ └── split.html └── ipgw.py └── probability.py /.gitignore: -------------------------------------------------------------------------------- 1 | ECDICT.xml 2 | objects/ 3 | *.py[cod] 4 | 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Packages 10 | *.egg 11 | *.egg-info 12 | dist 13 | build 14 | eggs 15 | parts 16 | bin 17 | var 18 | sdist 19 | data 20 | develop-eggs 21 | .installed.cfg 22 | lib 23 | lib64 24 | __pycache__ 25 | 26 | # Installer logs 27 | pip-log.txt 28 | 29 | # Unit test / coverage reports 30 | .coverage 31 | .tox 32 | nosetests.xml 33 | 34 | # Translations 35 | *.mo 36 | 37 | # Mr Developer 38 | .mr.developer.cfg 39 | .project 40 | .pydevproject 41 | 42 | # Tensorboard 43 | .ash-laptop 44 | 45 | # Editor 46 | *~ 47 | ~* 48 | .swp 49 | 50 | # data 51 | *data 52 | data 53 | .bz2 54 | 55 | .ipynb_checkpoints 56 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "src/Web/js/Function/jquery-qrcode"] 2 | path = src/Web/js/Function/jquery-qrcode 3 | url = https://github.com/jeromeetienne/jquery-qrcode.git 4 | [submodule "src/Web/js/Views/canvas-nest"] 5 | path = src/Web/js/Views/canvas-nest 6 | url = https://github.com/hustcc/canvas-nest.js.git 7 | [submodule "ShadowSocksShare"] 8 | path = src/Web/Flask/ShadowSocksShare-OpenShift 9 | url = https://github.com/the0demiurge/ShadowSocksShare-OpenShift.git 10 | [submodule "MATLAB"] 11 | path = src/pyMATLAB-style 12 | url = git@github.com:the0demiurge/pyMATLAB-style.git 13 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # My Python Scripts 2 | 用于python 测试与学习 3 | # Contents 4 | ``` 5 | rc/ 6 | ├── ComputerScience 7 | │   └── data_structure 8 | │   └── linked_list.py 9 | ├── Controller 10 | │   ├── cartpole_pid.py 11 | │   └── pid.py 12 | ├── DevTools 13 | │   ├── crash_on_ipy.py 14 | │   └── jupyter_hide_code.py 15 | ├── Example 16 | │   ├── argparse_example.py 17 | │   ├── ncurses_example.py 18 | │   └── qrcode_example.py 19 | ├── MachineLearning 20 | │   ├── algorithm 21 | │   │   ├── DecisionTree.py 22 | │   │   ├── DeepReinforcementLearning 23 | │   │   │   ├── ReplayBuffer.py 24 | │   │   │   ├── SimplePolicyGradient.py 25 | │   │   │   └── step by step q learning.ipynb 26 | │   │   ├── LDA.py 27 | │   │   ├── neural_network.ipynb 28 | │   │   ├── nn_mnist.py 29 | │   │   ├── nn_mnist_static.py 30 | │   │   └── tsne.py -> ../sklearn/tsne.py 31 | │   ├── Deep_Learning 32 | │   │   ├── dqn_cartpole.py 33 | │   │   └── pg_cartpole.ipynb 34 | │   ├── preprocess.py 35 | │   ├── sklearn 36 | │   │   └── tsne.py 37 | │   └── TensorFlow 38 | │   ├── Batcher.py 39 | │   ├── IncreaseNN.py 40 | │   └── multi-gpu.py 41 | ├── MinorProjects 42 | │   ├── diff.py 43 | │   ├── excel.py 44 | │   ├── media 45 | │   │   ├── comics 46 | │   │   │   └── tmp.py 47 | │   │   └── ffmpeg_convert2mp4.py 48 | │   ├── multy_copy.py 49 | │   ├── plz 50 | │   │   ├── compute_time.py 51 | │   │   ├── turtle.bot 52 | │   │   ├── turtlebot.ipynb 53 | │   │   └── turtlebot.py 54 | │   └── sendmail.py 55 | └── Web 56 | ├── Flask 57 | ├── html 58 | │   └── split.html 59 | ├── ipgw.py 60 | └── Verification code.ipynb 61 | 62 | 18 directories, 37 files 63 | 64 | ``` 65 | src/MinorProjects/plz: 张老师实验室计算出勤时间的计算器 66 | src/MinorProjects/tf: tensorflow 学习代码 67 | src/MinorProjects/sendmail.py: 使用Python发送邮件(自动化运维) 68 | `tree src/ -I 'pyc|__pycache__|js|ShadowSocksShare-OpenShift|MNIST_data'` 69 | -------------------------------------------------------------------------------- /src/.obs/dot_parser4tf_csv_file/csv_parser.py: -------------------------------------------------------------------------------- 1 | #!/data/data/com.termux/files/usr/bin/python3 2 | import sys 3 | import os 4 | import re 5 | 6 | filename = sys.argv[-1] 7 | if len(sys.argv) == 1: 8 | filename = 'tensorflow_api.csv' 9 | 10 | with open(filename, 'r') as f: 11 | data = f.readlines() 12 | 13 | name = data[0][2:-3] 14 | 15 | ops = dict() 16 | for i in data[1:]: 17 | if i[:2] == "# ": 18 | key = i[2:-3] 19 | ops[key] = list() 20 | elif i[0] != '#': 21 | ops[key].append(i.split(',')[0:2]) 22 | 23 | for key in ops: 24 | with open('%s.dot' % key, 'w') as f: 25 | print('digraph "%s"{' % key, file=f) 26 | print('rankdir="LR"', file=f) 27 | print('node [shape="box"]', file=f) 28 | #print('\t"{}" -> "{}"[color="red"];'.format(name, key)) 29 | for value in ops[key]: 30 | print('\t\t"{}" -> "{}" -> "{}";'.format(key, value[0], value[1]), file=f) 31 | print('}', file=f) 32 | os.system('dot -T svg -o "%s.svg" "%s.dot"' %(key,key)) 33 | 34 | 35 | -------------------------------------------------------------------------------- /src/.obs/dot_parser4tf_csv_file/dots/Asserts and boolean checks.dot: -------------------------------------------------------------------------------- 1 | digraph "Asserts and boolean checks"{ 2 | rankdir="LR" 3 | node [shape="box"] 4 | "Asserts and boolean checks" -> "assert_equal" -> "tf.assert_equal"; 5 | "Asserts and boolean checks" -> "assert_integer" -> "tf.assert_integer"; 6 | "Asserts and boolean checks" -> "assert_less" -> "tf.assert_less"; 7 | "Asserts and boolean checks" -> "assert_less_equal" -> "tf.assert_less_equal"; 8 | "Asserts and boolean checks" -> "assert_negative" -> "tf.assert_negative"; 9 | "Asserts and boolean checks" -> "assert_non_negative" -> "tf.assert_non_negative"; 10 | "Asserts and boolean checks" -> "assert_non_positive" -> "tf.assert_non_positive"; 11 | "Asserts and boolean checks" -> "assert_positive" -> "tf.assert_positive"; 12 | "Asserts and boolean checks" -> "assert_proper_iterable" -> "tf.assert_proper_iterable"; 13 | "Asserts and boolean checks" -> "assert_rank" -> "tf.assert_rank"; 14 | "Asserts and boolean checks" -> "assert_rank_at_least" -> "tf.assert_rank_at_least"; 15 | "Asserts and boolean checks" -> "assert_type" -> "tf.assert_type"; 16 | "Asserts and boolean checks" -> "is_non_decreasing" -> "tf.is_non_decreasing"; 17 | "Asserts and boolean checks" -> "is_numeric_tensor" -> "tf.is_numeric_tensor"; 18 | "Asserts and boolean checks" -> "is_strictly_increasing" -> "tf.is_strictly_increasing"; 19 | } 20 | -------------------------------------------------------------------------------- /src/.obs/dot_parser4tf_csv_file/dots/BayesFlow Stochastic Graph (contrib).dot: -------------------------------------------------------------------------------- 1 | digraph "BayesFlow Stochastic Graph (contrib)"{ 2 | rankdir="LR" 3 | node [shape="box"] 4 | "BayesFlow Stochastic Graph (contrib)" -> "DistributionTensor" -> "tf.contreib.bayesflow.stochastic_graph.DistributionTensor"; 5 | "BayesFlow Stochastic Graph (contrib)" -> "get_current_value_type" -> "tf.contreib.bayesflow.stochastic_graph.get_current_value_type"; 6 | "BayesFlow Stochastic Graph (contrib)" -> "get_score_function_with_baseline" -> "tf.contreib.bayesflow.stochastic_graph.get_score_function_with_baseline"; 7 | "BayesFlow Stochastic Graph (contrib)" -> "MeanValue" -> "tf.contreib.bayesflow.stochastic_graph.MeanValue"; 8 | "BayesFlow Stochastic Graph (contrib)" -> "NoValueTypeSetError" -> "tf.contreib.bayesflow.stochastic_graph.NoValueTypeSetError"; 9 | "BayesFlow Stochastic Graph (contrib)" -> "SampleAndReshapeValue" -> "tf.contreib.bayesflow.stochastic_graph.SampleAndReshapeValue"; 10 | "BayesFlow Stochastic Graph (contrib)" -> "SampleValue" -> "tf.contreib.bayesflow.stochastic_graph.SampleValue"; 11 | "BayesFlow Stochastic Graph (contrib)" -> "score_function" -> "tf.contreib.bayesflow.stochastic_graph.score_function"; 12 | "BayesFlow Stochastic Graph (contrib)" -> "StochasticTensor" -> "tf.contreib.bayesflow.stochastic_graph.StochasticTensor"; 13 | "BayesFlow Stochastic Graph (contrib)" -> "surrogate_loss" -> "tf.contreib.bayesflow.stochastic_graph.surrogate_loss"; 14 | "BayesFlow Stochastic Graph (contrib)" -> "value_type" -> "tf.contreib.bayesflow.stochastic_graph.value_type"; 15 | } 16 | -------------------------------------------------------------------------------- /src/.obs/dot_parser4tf_csv_file/dots/Building Graph.dot: -------------------------------------------------------------------------------- 1 | digraph "Building Graph"{ 2 | rankdir="LR" 3 | node [shape="box"] 4 | "Building Graph" -> "add_to_collection" -> "tf.add_to_collection"; 5 | "Building Graph" -> "as_dtype" -> "tf.as_dtype"; 6 | "Building Graph" -> "bytes" -> "tf.bytes"; 7 | "Building Graph" -> "container" -> "tf.container"; 8 | "Building Graph" -> "control_dependencies" -> "tf.control_dependencies"; 9 | "Building Graph" -> "convert_to_tensor" -> "tf.convert_to_tensor"; 10 | "Building Graph" -> "convert_to_tensor_or_indexed_slices" -> "tf.convert_to_tensor_or_indexed_slices"; 11 | "Building Graph" -> "device" -> "tf.device"; 12 | "Building Graph" -> "DeviceSpec" -> "tf.DeviceSpec"; 13 | "Building Graph" -> "Dimension" -> "tf.Dimension"; 14 | "Building Graph" -> "DType" -> "tf.DType"; 15 | "Building Graph" -> "get_collection" -> "tf.get_collection"; 16 | "Building Graph" -> "get_collection_ref" -> "tf.get_collection_ref"; 17 | "Building Graph" -> "get_default_graph" -> "tf.get_default_graph"; 18 | "Building Graph" -> "get_seed" -> "tf.get_seed"; 19 | "Building Graph" -> "Graph" -> "tf.Graph"; 20 | "Building Graph" -> "GraphKeys" -> "tf.GraphKeys"; 21 | "Building Graph" -> "import_graph_def" -> "tf.import_graph_def"; 22 | "Building Graph" -> "load_file_system_library" -> "tf.load_file_system_library"; 23 | "Building Graph" -> "load_op_library" -> "tf.load_op_library"; 24 | "Building Graph" -> "name_scope" -> "tf.name_scope"; 25 | "Building Graph" -> "NoGradient" -> "tf.NoGradient"; 26 | "Building Graph" -> "op_scope" -> "tf.op_scope"; 27 | "Building Graph" -> "Operation" -> "tf.Operation"; 28 | "Building Graph" -> "register_tensor_conversion_function" -> "tf.register_tensor_conversion_function"; 29 | "Building Graph" -> "RegisterGradient" -> "tf.RegisterGradient"; 30 | "Building Graph" -> "RegisterShape" -> "tf.RegisterShape"; 31 | "Building Graph" -> "reset_default_graph" -> "tf.reset_default_graph"; 32 | "Building Graph" -> "Tensor" -> "tf.Tensor"; 33 | "Building Graph" -> "TensorShape" -> "tf.TensorShape"; 34 | } 35 | -------------------------------------------------------------------------------- /src/.obs/dot_parser4tf_csv_file/dots/Constants,Sequences,and Random Valu.dot: -------------------------------------------------------------------------------- 1 | digraph "Constants,Sequences,and Random Valu"{ 2 | rankdir="LR" 3 | node [shape="box"] 4 | "Constants,Sequences,and Random Valu" -> "constant" -> "tf.constant"; 5 | "Constants,Sequences,and Random Valu" -> "fill" -> "tf.fill"; 6 | "Constants,Sequences,and Random Valu" -> "linspace" -> "tf.linspace"; 7 | "Constants,Sequences,and Random Valu" -> "multinomial" -> "tf.multinomial"; 8 | "Constants,Sequences,and Random Valu" -> "ones" -> "tf.ones"; 9 | "Constants,Sequences,and Random Valu" -> "ones_like" -> "tf.ones_like"; 10 | "Constants,Sequences,and Random Valu" -> "random_crop" -> "tf.random_crop"; 11 | "Constants,Sequences,and Random Valu" -> "random_gamma" -> "tf.random_gamma"; 12 | "Constants,Sequences,and Random Valu" -> "random_normal" -> "tf.random_normal"; 13 | "Constants,Sequences,and Random Valu" -> "random_shuffle" -> "tf.random_shuffle"; 14 | "Constants,Sequences,and Random Valu" -> "random_uniform" -> "tf.random_uniform"; 15 | "Constants,Sequences,and Random Valu" -> "range" -> "tf.range"; 16 | "Constants,Sequences,and Random Valu" -> "set_random_seed" -> "tf.set_random_seed"; 17 | "Constants,Sequences,and Random Valu" -> "truncated_normal" -> "tf.truncated_normal"; 18 | "Constants,Sequences,and Random Valu" -> "zeros" -> "tf.zeros"; 19 | "Constants,Sequences,and Random Valu" -> "zeros_like" -> "tf.zeros_like"; 20 | } 21 | -------------------------------------------------------------------------------- /src/.obs/dot_parser4tf_csv_file/dots/Control Flow.dot: -------------------------------------------------------------------------------- 1 | digraph "Control Flow"{ 2 | rankdir="LR" 3 | node [shape="box"] 4 | "Control Flow" -> "add_check_numerics_ops" -> "tf.add_check_numerics_ops"; 5 | "Control Flow" -> "Assert" -> "tf.Assert"; 6 | "Control Flow" -> "case" -> "tf.case"; 7 | "Control Flow" -> "check_numerics" -> "tf.check_numerics"; 8 | "Control Flow" -> "cond" -> "tf.cond"; 9 | "Control Flow" -> "count_up_to" -> "tf.count_up_to"; 10 | "Control Flow" -> "equal" -> "tf.equal"; 11 | "Control Flow" -> "greater" -> "tf.greater"; 12 | "Control Flow" -> "greater_equal" -> "tf.greater_equal"; 13 | "Control Flow" -> "group" -> "tf.group"; 14 | "Control Flow" -> "identity" -> "tf.identity"; 15 | "Control Flow" -> "is_finite" -> "tf.is_finite"; 16 | "Control Flow" -> "is_inf" -> "tf.is_inf"; 17 | "Control Flow" -> "is_nan" -> "tf.is_nan"; 18 | "Control Flow" -> "less" -> "tf.less"; 19 | "Control Flow" -> "less_equal" -> "tf.less_equal"; 20 | "Control Flow" -> "logical_and" -> "tf.logical_and"; 21 | "Control Flow" -> "logical_not" -> "tf.logical_not"; 22 | "Control Flow" -> "logical_or" -> "tf.logical_or"; 23 | "Control Flow" -> "logical_xor" -> "tf.logical_xor"; 24 | "Control Flow" -> "no_op" -> "tf.no_op"; 25 | "Control Flow" -> "not_equal" -> "tf.not_equal"; 26 | "Control Flow" -> "Print" -> "tf.Print"; 27 | "Control Flow" -> "select" -> "tf.select"; 28 | "Control Flow" -> "tuple" -> "tf.tuple"; 29 | "Control Flow" -> "verify_tensor_all_finite" -> "tf.verify_tensor_all_finite"; 30 | "Control Flow" -> "where" -> "tf.where"; 31 | "Control Flow" -> "while_loop" -> "tf.while_loop"; 32 | } 33 | -------------------------------------------------------------------------------- /src/.obs/dot_parser4tf_csv_file/dots/Copying Graph Elements (contrib).dot: -------------------------------------------------------------------------------- 1 | digraph "Copying Graph Elements (contrib)"{ 2 | rankdir="LR" 3 | node [shape="box"] 4 | "Copying Graph Elements (contrib)" -> "copy_op_to_graph" -> "tf.contrib.copy_graph.copy_op_to_graph"; 5 | "Copying Graph Elements (contrib)" -> "copy_variable_to_graph" -> "tf.contrib.copy_graph.copy_variable_to_graph"; 6 | "Copying Graph Elements (contrib)" -> "get_copied_op" -> "tf.contrib.copy_graph.get_copied_op"; 7 | } 8 | -------------------------------------------------------------------------------- /src/.obs/dot_parser4tf_csv_file/dots/Data IO (Python functions).dot: -------------------------------------------------------------------------------- 1 | digraph "Data IO (Python functions)"{ 2 | rankdir="LR" 3 | node [shape="box"] 4 | "Data IO (Python functions)" -> "tf_record_iterator" -> "tf.tf_record_iterator"; 5 | "Data IO (Python functions)" -> "TFRecordWriter" -> "tf.TFRecordWriter"; 6 | } 7 | -------------------------------------------------------------------------------- /src/.obs/dot_parser4tf_csv_file/dots/FFmpeg (contrib).dot: -------------------------------------------------------------------------------- 1 | digraph "FFmpeg (contrib)"{ 2 | rankdir="LR" 3 | node [shape="box"] 4 | "FFmpeg (contrib)" -> "decode_audio" -> "tf.contrib.ffmpeg.decode_audio"; 5 | "FFmpeg (contrib)" -> "encode_audio" -> "tf.contrib.ffmpeg.encode_audio"; 6 | } 7 | -------------------------------------------------------------------------------- /src/.obs/dot_parser4tf_csv_file/dots/Framework (contrib).dot: -------------------------------------------------------------------------------- 1 | digraph "Framework (contrib)"{ 2 | rankdir="LR" 3 | node [shape="box"] 4 | "Framework (contrib)" -> "add_arg_scope" -> "tf.contrib.framework.add_arg_scope"; 5 | "Framework (contrib)" -> "add_model_variable" -> "tf.contrib.framework.add_model_variable"; 6 | "Framework (contrib)" -> "arg_scope" -> "tf.contrib.framework.arg_scope"; 7 | "Framework (contrib)" -> "arg_scoped_arguments" -> "tf.contrib.framework.arg_scoped_arguments"; 8 | "Framework (contrib)" -> "assert_global_step" -> "tf.contrib.framework.assert_global_step"; 9 | "Framework (contrib)" -> "assert_or_get_global_step" -> "tf.contrib.framework.assert_or_get_global_step"; 10 | "Framework (contrib)" -> "assert_same_float_dtype" -> "tf.contrib.framework.assert_same_float_dtype"; 11 | "Framework (contrib)" -> "assert_scalar_int" -> "tf.contrib.framework.assert_scalar_int"; 12 | "Framework (contrib)" -> "convert_to_tensor_or_sparse_tensor" -> "tf.contrib.framework.convert_to_tensor_or_sparse_tensor"; 13 | "Framework (contrib)" -> "create_global_step" -> "tf.contrib.framework.create_global_step"; 14 | "Framework (contrib)" -> "deprecated" -> "tf.contrib.framework.deprecated"; 15 | "Framework (contrib)" -> "get_global_step" -> "tf.contrib.framework.get_global_step"; 16 | "Framework (contrib)" -> "get_graph_from_inputs" -> "tf.contrib.framework.get_graph_from_inputs"; 17 | "Framework (contrib)" -> "get_local_variables" -> "tf.contrib.framework.get_local_variables"; 18 | "Framework (contrib)" -> "get_model_variables" -> "tf.contrib.framework.get_model_variables"; 19 | "Framework (contrib)" -> "get_or_create_global_step" -> "tf.contrib.framework.get_or_create_global_step"; 20 | "Framework (contrib)" -> "get_unique_variable" -> "tf.contrib.framework.get_unique_variable"; 21 | "Framework (contrib)" -> "get_variables" -> "tf.contrib.framework.get_variables"; 22 | "Framework (contrib)" -> "get_variables_by_name" -> "tf.contrib.framework.get_variables_by_name"; 23 | "Framework (contrib)" -> "get_variables_by_suffix" -> "tf.contrib.framework.get_variables_by_suffix"; 24 | "Framework (contrib)" -> "get_variables_to_restore" -> "tf.contrib.framework.get_variables_to_restore"; 25 | "Framework (contrib)" -> "has_arg_scope" -> "tf.contrib.framework.has_arg_scope"; 26 | "Framework (contrib)" -> "is_non_decreasing" -> "tf.contrib.framework.is_non_decreasing"; 27 | "Framework (contrib)" -> "is_numeric_tensor" -> "tf.contrib.framework.is_numeric_tensor"; 28 | "Framework (contrib)" -> "is_strictly_increasing" -> "tf.contrib.framework.is_strictly_increasing"; 29 | "Framework (contrib)" -> "is_tensor" -> "tf.contrib.framework.is_tensor"; 30 | "Framework (contrib)" -> "local_variable" -> "tf.contrib.framework.local_variable"; 31 | "Framework (contrib)" -> "model_variable" -> "tf.contrib.framework.model_variable"; 32 | "Framework (contrib)" -> "reduce_sum_n" -> "tf.contrib.framework.reduce_sum_n"; 33 | "Framework (contrib)" -> "safe_embedding_lookup_sparse" -> "tf.contrib.framework.safe_embedding_lookup_sparse"; 34 | "Framework (contrib)" -> "variable" -> "tf.contrib.framework.variable"; 35 | "Framework (contrib)" -> "VariableDeviceChooser" -> "tf.contrib.framework.VariableDeviceChooser"; 36 | "Framework (contrib)" -> "with_same_shape" -> "tf.contrib.framework.with_same_shape"; 37 | "Framework (contrib)" -> "with_shape" -> "tf.contrib.framework.with_shape"; 38 | } 39 | -------------------------------------------------------------------------------- /src/.obs/dot_parser4tf_csv_file/dots/Higher Order Functions.dot: -------------------------------------------------------------------------------- 1 | digraph "Higher Order Functions"{ 2 | rankdir="LR" 3 | node [shape="box"] 4 | "Higher Order Functions" -> "foldl" -> "tf.foldl"; 5 | "Higher Order Functions" -> "foldr" -> "tf.foldr"; 6 | "Higher Order Functions" -> "map_fn" -> "tf.map_fn"; 7 | "Higher Order Functions" -> "scan" -> "tf.scan"; 8 | } 9 | -------------------------------------------------------------------------------- /src/.obs/dot_parser4tf_csv_file/dots/Histograms.dot: -------------------------------------------------------------------------------- 1 | digraph "Histograms"{ 2 | rankdir="LR" 3 | node [shape="box"] 4 | "Histograms" -> "histogram_fixed_width" -> "tf.histogram_fixed_width"; 5 | } 6 | -------------------------------------------------------------------------------- /src/.obs/dot_parser4tf_csv_file/dots/Images.dot: -------------------------------------------------------------------------------- 1 | digraph "Images"{ 2 | rankdir="LR" 3 | node [shape="box"] 4 | "Images" -> "adjust_brightness" -> "tf.adjust_brightness"; 5 | "Images" -> "adjust_contrast" -> "tf.adjust_contrast"; 6 | "Images" -> "adjust_hue" -> "tf.adjust_hue"; 7 | "Images" -> "adjust_saturation" -> "tf.adjust_saturation"; 8 | "Images" -> "central_crop" -> "tf.central_crop"; 9 | "Images" -> "convert_image_dtype" -> "tf.convert_image_dtype"; 10 | "Images" -> "crop_and_resize" -> "tf.crop_and_resize"; 11 | "Images" -> "crop_to_bounding_box" -> "tf.crop_to_bounding_box"; 12 | "Images" -> "decode_jpeg" -> "tf.decode_jpeg"; 13 | "Images" -> "decode_png" -> "tf.decode_png"; 14 | "Images" -> "draw_bounding_boxes" -> "tf.draw_bounding_boxes"; 15 | "Images" -> "encode_jpeg" -> "tf.encode_jpeg"; 16 | "Images" -> "encode_png" -> "tf.encode_png"; 17 | "Images" -> "extract_glimpse" -> "tf.extract_glimpse"; 18 | "Images" -> "flip_left_right" -> "tf.flip_left_right"; 19 | "Images" -> "flip_up_down" -> "tf.flip_up_down"; 20 | "Images" -> "grayscale_to_rgb" -> "tf.grayscale_to_rgb"; 21 | "Images" -> "hsv_to_rgb" -> "tf.hsv_to_rgb"; 22 | "Images" -> "non_max_suppression" -> "tf.non_max_suppression"; 23 | "Images" -> "pad_to_bounding_box" -> "tf.pad_to_bounding_box"; 24 | "Images" -> "per_image_whitening" -> "tf.per_image_whitening"; 25 | "Images" -> "random_brightness" -> "tf.random_brightness"; 26 | "Images" -> "random_contrast" -> "tf.random_contrast"; 27 | "Images" -> "random_flip_left_right" -> "tf.random_flip_left_right"; 28 | "Images" -> "random_flip_up_down" -> "tf.random_flip_up_down"; 29 | "Images" -> "random_hue" -> "tf.random_hue"; 30 | "Images" -> "random_saturation" -> "tf.random_saturation"; 31 | "Images" -> "resize_area" -> "tf.resize_area"; 32 | "Images" -> "resize_bicubic" -> "tf.resize_bicubic"; 33 | "Images" -> "resize_bilinear" -> "tf.resize_bilinear"; 34 | "Images" -> "resize_image_with_crop_or_pad" -> "tf.resize_image_with_crop_or_pad"; 35 | "Images" -> "resize_images" -> "tf.resize_images"; 36 | "Images" -> "resize_nearest_neighbor" -> "tf.resize_nearest_neighbor"; 37 | "Images" -> "rgb_to_grayscale" -> "tf.rgb_to_grayscale"; 38 | "Images" -> "rgb_to_hsv" -> "tf.rgb_to_hsv"; 39 | "Images" -> "rot90" -> "tf.rot90"; 40 | "Images" -> "sample_distorted_bounding_box" -> "tf.sample_distorted_bounding_box"; 41 | "Images" -> "transpose_image" -> "tf.transpose_image"; 42 | } 43 | -------------------------------------------------------------------------------- /src/.obs/dot_parser4tf_csv_file/dots/Inputs and Readers.dot: -------------------------------------------------------------------------------- 1 | digraph "Inputs and Readers"{ 2 | rankdir="LR" 3 | node [shape="box"] 4 | "Inputs and Readers" -> "batch" -> "tf.batch"; 5 | "Inputs and Readers" -> "batch_join" -> "tf.batch_join"; 6 | "Inputs and Readers" -> "decode_csv" -> "tf.decode_csv"; 7 | "Inputs and Readers" -> "decode_json_example" -> "tf.decode_json_example"; 8 | "Inputs and Readers" -> "decode_raw" -> "tf.decode_raw"; 9 | "Inputs and Readers" -> "FIFOQueue" -> "tf.FIFOQueue"; 10 | "Inputs and Readers" -> "FixedLenFeature" -> "tf.FixedLenFeature"; 11 | "Inputs and Readers" -> "FixedLengthRecordReader" -> "tf.FixedLengthRecordReader"; 12 | "Inputs and Readers" -> "FixedLenSequenceFeature" -> "tf.FixedLenSequenceFeature"; 13 | "Inputs and Readers" -> "IdentityReader" -> "tf.IdentityReader"; 14 | "Inputs and Readers" -> "input_producer" -> "tf.input_producer"; 15 | "Inputs and Readers" -> "limit_epochs" -> "tf.limit_epochs"; 16 | "Inputs and Readers" -> "match_filenames_once" -> "tf.match_filenames_once"; 17 | "Inputs and Readers" -> "matching_files" -> "tf.matching_files"; 18 | "Inputs and Readers" -> "PaddingFIFOQueue" -> "tf.PaddingFIFOQueue"; 19 | "Inputs and Readers" -> "parse_example" -> "tf.parse_example"; 20 | "Inputs and Readers" -> "parse_single_example" -> "tf.parse_single_example"; 21 | "Inputs and Readers" -> "placeholder" -> "tf.placeholder"; 22 | "Inputs and Readers" -> "placeholder_with_default" -> "tf.placeholder_with_default"; 23 | "Inputs and Readers" -> "QueueBase" -> "tf.QueueBase"; 24 | "Inputs and Readers" -> "RandomShuffleQueue" -> "tf.RandomShuffleQueue"; 25 | "Inputs and Readers" -> "range_input_producer" -> "tf.range_input_producer"; 26 | "Inputs and Readers" -> "read_file" -> "tf.read_file"; 27 | "Inputs and Readers" -> "ReaderBase" -> "tf.ReaderBase"; 28 | "Inputs and Readers" -> "shuffle_batch" -> "tf.shuffle_batch"; 29 | "Inputs and Readers" -> "shuffle_batch_join" -> "tf.shuffle_batch_join"; 30 | "Inputs and Readers" -> "size" -> "tf.size"; 31 | "Inputs and Readers" -> "slice_input_producer" -> "tf.slice_input_producer"; 32 | "Inputs and Readers" -> "sparse_placeholder" -> "tf.sparse_placeholder"; 33 | "Inputs and Readers" -> "string_input_producer" -> "tf.string_input_producer"; 34 | "Inputs and Readers" -> "TextLineReader" -> "tf.TextLineReader"; 35 | "Inputs and Readers" -> "TFRecordReader" -> "tf.TFRecordReader"; 36 | "Inputs and Readers" -> "VarLenFeature" -> "tf.VarLenFeature"; 37 | "Inputs and Readers" -> "WholeFileReader" -> "tf.WholeFileReader"; 38 | } 39 | -------------------------------------------------------------------------------- /src/.obs/dot_parser4tf_csv_file/dots/Layers (contrib).dot: -------------------------------------------------------------------------------- 1 | digraph "Layers (contrib)"{ 2 | rankdir="LR" 3 | node [shape="box"] 4 | "Layers (contrib)" -> "apply_regularization" -> "tf.contrib.layers.apply_regularization"; 5 | "Layers (contrib)" -> "avg_pool2d" -> "tf.contrib.layers.avg_pool2d"; 6 | "Layers (contrib)" -> "batch_norm" -> "tf.contrib.layers.batch_norm"; 7 | "Layers (contrib)" -> "convolution2d" -> "tf.contrib.layers.convolution2d"; 8 | "Layers (contrib)" -> "convolution2d_in_plane" -> "tf.contrib.layers.convolution2d_in_plane"; 9 | "Layers (contrib)" -> "convolution2d_transpose" -> "tf.contrib.layers.convolution2d_transpose"; 10 | "Layers (contrib)" -> "flatten" -> "tf.contrib.layers.flatten"; 11 | "Layers (contrib)" -> "fully_connected" -> "tf.contrib.layers.fully_connected"; 12 | "Layers (contrib)" -> "l1_regularizer" -> "tf.contrib.layers.l1_regularizer"; 13 | "Layers (contrib)" -> "l2_regularizer" -> "tf.contrib.layers.l2_regularizer"; 14 | "Layers (contrib)" -> "max_pool2d" -> "tf.contrib.layers.max_pool2d"; 15 | "Layers (contrib)" -> "one_hot_encoding" -> "tf.contrib.layers.one_hot_encoding"; 16 | "Layers (contrib)" -> "optimize_loss" -> "tf.contrib.layers.optimize_loss"; 17 | "Layers (contrib)" -> "repeat" -> "tf.contrib.layers.repeat"; 18 | "Layers (contrib)" -> "separable_convolution2d" -> "tf.contrib.layers.separable_convolution2d"; 19 | "Layers (contrib)" -> "stack" -> "tf.contrib.layers.stack"; 20 | "Layers (contrib)" -> "sum_regularizer" -> "tf.contrib.layers.sum_regularizer"; 21 | "Layers (contrib)" -> "summarize_activation" -> "tf.contrib.layers.summarize_activation"; 22 | "Layers (contrib)" -> "summarize_activations" -> "tf.contrib.layers.summarize_activations"; 23 | "Layers (contrib)" -> "summarize_collection" -> "tf.contrib.layers.summarize_collection"; 24 | "Layers (contrib)" -> "summarize_tensor" -> "tf.contrib.layers.summarize_tensor"; 25 | "Layers (contrib)" -> "summarize_tensors" -> "tf.contrib.layers.summarize_tensors"; 26 | "Layers (contrib)" -> "unit_norm" -> "tf.contrib.layers.unit_norm"; 27 | "Layers (contrib)" -> "variance_scaling_initializer" -> "tf.contrib.layers.variance_scaling_initializer"; 28 | "Layers (contrib)" -> "xavier_initializer" -> "tf.contrib.layers.xavier_initializer"; 29 | "Layers (contrib)" -> "xavier_initializer_conv2d" -> "tf.contrib.layers.xavier_initializer_conv2d"; 30 | } 31 | -------------------------------------------------------------------------------- /src/.obs/dot_parser4tf_csv_file/dots/Learn (contrib).dot: -------------------------------------------------------------------------------- 1 | digraph "Learn (contrib)"{ 2 | rankdir="LR" 3 | node [shape="box"] 4 | "Learn (contrib)" -> "BaseEstimator" -> "tf.contrib.learn.BaseEstimator"; 5 | "Learn (contrib)" -> "DNNClassifier" -> "tf.contrib.learn.DNNClassifier"; 6 | "Learn (contrib)" -> "DNNRegressor" -> "tf.contrib.learn.DNNRegressor"; 7 | "Learn (contrib)" -> "Estimator" -> "tf.contrib.learn.Estimator"; 8 | "Learn (contrib)" -> "evaluate" -> "tf.contrib.learn.evaluate"; 9 | "Learn (contrib)" -> "extract_dask_data" -> "tf.contrib.learn.extract_dask_data"; 10 | "Learn (contrib)" -> "extract_dask_labels" -> "tf.contrib.learn.extract_dask_labels"; 11 | "Learn (contrib)" -> "extract_pandas_data" -> "tf.contrib.learn.extract_pandas_data"; 12 | "Learn (contrib)" -> "extract_pandas_labels" -> "tf.contrib.learn.extract_pandas_labels"; 13 | "Learn (contrib)" -> "extract_pandas_matrix" -> "tf.contrib.learn.extract_pandas_matrix"; 14 | "Learn (contrib)" -> "infer" -> "tf.contrib.learn.infer"; 15 | "Learn (contrib)" -> "LinearClassifier" -> "tf.contrib.learn.LinearClassifier"; 16 | "Learn (contrib)" -> "LinearRegressor" -> "tf.contrib.learn.LinearRegressor"; 17 | "Learn (contrib)" -> "ModeKeys" -> "tf.contrib.learn.ModeKeys"; 18 | "Learn (contrib)" -> "NanLossDuringTrainingError" -> "tf.contrib.learn.NanLossDuringTrainingError"; 19 | "Learn (contrib)" -> "read_batch_examples" -> "tf.contrib.learn.read_batch_examples"; 20 | "Learn (contrib)" -> "read_batch_features" -> "tf.contrib.learn.read_batch_features"; 21 | "Learn (contrib)" -> "read_batch_record_features" -> "tf.contrib.learn.read_batch_record_features"; 22 | "Learn (contrib)" -> "run_feeds" -> "tf.contrib.learn.run_feeds"; 23 | "Learn (contrib)" -> "run_n" -> "tf.contrib.learn.run_n"; 24 | "Learn (contrib)" -> "RunConfig" -> "tf.contrib.learn.RunConfig"; 25 | "Learn (contrib)" -> "TensorFlowClassifier" -> "tf.contrib.learn.TensorFlowClassifier"; 26 | "Learn (contrib)" -> "TensorFlowDNNClassifier" -> "tf.contrib.learn.TensorFlowDNNClassifier"; 27 | "Learn (contrib)" -> "TensorFlowDNNRegressor" -> "tf.contrib.learn.TensorFlowDNNRegressor"; 28 | "Learn (contrib)" -> "TensorFlowEstimator" -> "tf.contrib.learn.TensorFlowEstimator"; 29 | "Learn (contrib)" -> "TensorFlowLinearClassifier" -> "tf.contrib.learn.TensorFlowLinearClassifier"; 30 | "Learn (contrib)" -> "TensorFlowLinearRegressor" -> "tf.contrib.learn.TensorFlowLinearRegressor"; 31 | "Learn (contrib)" -> "TensorFlowRegressor" -> "tf.contrib.learn.TensorFlowRegressor"; 32 | "Learn (contrib)" -> "TensorFlowRNNClassifier" -> "tf.contrib.learn.TensorFlowRNNClassifier"; 33 | "Learn (contrib)" -> "TensorFlowRNNRegressor" -> "tf.contrib.learn.TensorFlowRNNRegressor"; 34 | "Learn (contrib)" -> "train" -> "tf.contrib.learn.train"; 35 | } 36 | -------------------------------------------------------------------------------- /src/.obs/dot_parser4tf_csv_file/dots/Losses (contrib).dot: -------------------------------------------------------------------------------- 1 | digraph "Losses (contrib)"{ 2 | rankdir="LR" 3 | node [shape="box"] 4 | "Losses (contrib)" -> "absolute_difference" -> "tf.contrib.losses.absolute_difference"; 5 | "Losses (contrib)" -> "add_loss" -> "tf.contrib.losses.add_loss"; 6 | "Losses (contrib)" -> "cosine_distance" -> "tf.contrib.losses.cosine_distance"; 7 | "Losses (contrib)" -> "get_losses" -> "tf.contrib.losses.get_losses"; 8 | "Losses (contrib)" -> "get_regularization_losses" -> "tf.contrib.losses.get_regularization_losses"; 9 | "Losses (contrib)" -> "get_total_loss" -> "tf.contrib.losses.get_total_loss"; 10 | "Losses (contrib)" -> "log_loss" -> "tf.contrib.losses.log_loss"; 11 | "Losses (contrib)" -> "sigmoid_cross_entropy" -> "tf.contrib.losses.sigmoid_cross_entropy"; 12 | "Losses (contrib)" -> "softmax_cross_entropy" -> "tf.contrib.losses.softmax_cross_entropy"; 13 | "Losses (contrib)" -> "sum_of_pairwise_squares" -> "tf.contrib.losses.sum_of_pairwise_squares"; 14 | "Losses (contrib)" -> "sum_of_squares" -> "tf.contrib.losses.sum_of_squares"; 15 | } 16 | -------------------------------------------------------------------------------- /src/.obs/dot_parser4tf_csv_file/dots/Math.dot: -------------------------------------------------------------------------------- 1 | digraph "Math"{ 2 | rankdir="LR" 3 | node [shape="box"] 4 | "Math" -> "abs" -> "tf.abs"; 5 | "Math" -> "accumulate_n" -> "tf.accumulate_n"; 6 | "Math" -> "acos" -> "tf.acos"; 7 | "Math" -> "add" -> "tf.add"; 8 | "Math" -> "add_n" -> "tf.add_n"; 9 | "Math" -> "argmax" -> "tf.argmax"; 10 | "Math" -> "argmin" -> "tf.argmin"; 11 | "Math" -> "asin" -> "tf.asin"; 12 | "Math" -> "atan" -> "tf.atan"; 13 | "Math" -> "batch_cholesky" -> "tf.batch_cholesky"; 14 | "Math" -> "batch_cholesky_solve" -> "tf.batch_cholesky_solve"; 15 | "Math" -> "batch_fft" -> "tf.batch_fft"; 16 | "Math" -> "batch_fft2d" -> "tf.batch_fft2d"; 17 | "Math" -> "batch_fft3d" -> "tf.batch_fft3d"; 18 | "Math" -> "batch_ifft" -> "tf.batch_ifft"; 19 | "Math" -> "batch_ifft2d" -> "tf.batch_ifft2d"; 20 | "Math" -> "batch_ifft3d" -> "tf.batch_ifft3d"; 21 | "Math" -> "batch_matmul" -> "tf.batch_matmul"; 22 | "Math" -> "batch_matrix_band_part" -> "tf.batch_matrix_band_part"; 23 | "Math" -> "batch_matrix_determinant" -> "tf.batch_matrix_determinant"; 24 | "Math" -> "batch_matrix_diag" -> "tf.batch_matrix_diag"; 25 | "Math" -> "batch_matrix_diag_part" -> "tf.batch_matrix_diag_part"; 26 | "Math" -> "batch_matrix_inverse" -> "tf.batch_matrix_inverse"; 27 | "Math" -> "batch_matrix_set_diag" -> "tf.batch_matrix_set_diag"; 28 | "Math" -> "batch_matrix_solve" -> "tf.batch_matrix_solve"; 29 | "Math" -> "batch_matrix_solve_ls" -> "tf.batch_matrix_solve_ls"; 30 | "Math" -> "batch_matrix_transpose" -> "tf.batch_matrix_transpose"; 31 | "Math" -> "batch_matrix_triangular_solve" -> "tf.batch_matrix_triangular_solve"; 32 | "Math" -> "batch_self_adjoint_eig" -> "tf.batch_self_adjoint_eig"; 33 | "Math" -> "ceil" -> "tf.ceil"; 34 | "Math" -> "cholesky" -> "tf.cholesky"; 35 | "Math" -> "cholesky_solve" -> "tf.cholesky_solve"; 36 | "Math" -> "complex" -> "tf.complex"; 37 | "Math" -> "complex_abs" -> "tf.complex_abs"; 38 | "Math" -> "conj" -> "tf.conj"; 39 | "Math" -> "cos" -> "tf.cos"; 40 | "Math" -> "cross" -> "tf.cross"; 41 | "Math" -> "cumprod" -> "tf.cumprod"; 42 | "Math" -> "cumsum" -> "tf.cumsum"; 43 | "Math" -> "diag" -> "tf.diag"; 44 | "Math" -> "diag_part" -> "tf.diag_part"; 45 | "Math" -> "digamma" -> "tf.digamma"; 46 | "Math" -> "div" -> "tf.div"; 47 | "Math" -> "edit_distance" -> "tf.edit_distance"; 48 | "Math" -> "erf" -> "tf.erf"; 49 | "Math" -> "erfc" -> "tf.erfc"; 50 | "Math" -> "exp" -> "tf.exp"; 51 | "Math" -> "fft" -> "tf.fft"; 52 | "Math" -> "fft2d" -> "tf.fft2d"; 53 | "Math" -> "fft3d" -> "tf.fft3d"; 54 | "Math" -> "floor" -> "tf.floor"; 55 | "Math" -> "floordiv" -> "tf.floordiv"; 56 | "Math" -> "ifft" -> "tf.ifft"; 57 | "Math" -> "ifft2d" -> "tf.ifft2d"; 58 | "Math" -> "ifft3d" -> "tf.ifft3d"; 59 | "Math" -> "igamma" -> "tf.igamma"; 60 | "Math" -> "igammac" -> "tf.igammac"; 61 | "Math" -> "imag" -> "tf.imag"; 62 | "Math" -> "inv" -> "tf.inv"; 63 | "Math" -> "invert_permutation" -> "tf.invert_permutation"; 64 | "Math" -> "lbeta" -> "tf.lbeta"; 65 | "Math" -> "lgamma" -> "tf.lgamma"; 66 | "Math" -> "listdiff" -> "tf.listdiff"; 67 | "Math" -> "log" -> "tf.log"; 68 | "Math" -> "matmul" -> "tf.matmul"; 69 | "Math" -> "matrix_determinant" -> "tf.matrix_determinant"; 70 | "Math" -> "matrix_inverse" -> "tf.matrix_inverse"; 71 | "Math" -> "matrix_solve" -> "tf.matrix_solve"; 72 | "Math" -> "matrix_solve_ls" -> "tf.matrix_solve_ls"; 73 | "Math" -> "matrix_triangular_solve" -> "tf.matrix_triangular_solve"; 74 | "Math" -> "maximum" -> "tf.maximum"; 75 | "Math" -> "minimum" -> "tf.minimum"; 76 | "Math" -> "mod" -> "tf.mod"; 77 | "Math" -> "mul" -> "tf.mul"; 78 | "Math" -> "neg" -> "tf.neg"; 79 | "Math" -> "polygamma" -> "tf.polygamma"; 80 | "Math" -> "pow" -> "tf.pow"; 81 | "Math" -> "real" -> "tf.real"; 82 | "Math" -> "reduce_all" -> "tf.reduce_all"; 83 | "Math" -> "reduce_any" -> "tf.reduce_any"; 84 | "Math" -> "reduce_max" -> "tf.reduce_max"; 85 | "Math" -> "reduce_mean" -> "tf.reduce_mean"; 86 | "Math" -> "reduce_min" -> "tf.reduce_min"; 87 | "Math" -> "reduce_prod" -> "tf.reduce_prod"; 88 | "Math" -> "reduce_sum" -> "tf.reduce_sum"; 89 | "Math" -> "round" -> "tf.round"; 90 | "Math" -> "rsqrt" -> "tf.rsqrt"; 91 | "Math" -> "scalar_mul" -> "tf.scalar_mul"; 92 | "Math" -> "segment_max" -> "tf.segment_max"; 93 | "Math" -> "segment_mean" -> "tf.segment_mean"; 94 | "Math" -> "segment_min" -> "tf.segment_min"; 95 | "Math" -> "segment_prod" -> "tf.segment_prod"; 96 | "Math" -> "segment_sum" -> "tf.segment_sum"; 97 | "Math" -> "self_adjoint_eig" -> "tf.self_adjoint_eig"; 98 | "Math" -> "sign" -> "tf.sign"; 99 | "Math" -> "sin" -> "tf.sin"; 100 | "Math" -> "sparse_segment_mean" -> "tf.sparse_segment_mean"; 101 | "Math" -> "sparse_segment_sqrt_n" -> "tf.sparse_segment_sqrt_n"; 102 | "Math" -> "sparse_segment_sqrt_n_grad" -> "tf.sparse_segment_sqrt_n_grad"; 103 | "Math" -> "sparse_segment_sum" -> "tf.sparse_segment_sum"; 104 | "Math" -> "sqrt" -> "tf.sqrt"; 105 | "Math" -> "square" -> "tf.square"; 106 | "Math" -> "squared_difference" -> "tf.squared_difference"; 107 | "Math" -> "sub" -> "tf.sub"; 108 | "Math" -> "tan" -> "tf.tan"; 109 | "Math" -> "trace" -> "tf.trace"; 110 | "Math" -> "transpose" -> "tf.transpose"; 111 | "Math" -> "truediv" -> "tf.truediv"; 112 | "Math" -> "unique" -> "tf.unique"; 113 | "Math" -> "unsorted_segment_sum" -> "tf.unsorted_segment_sum"; 114 | "Math" -> "where" -> "tf.where"; 115 | "Math" -> "zeta" -> "tf.zeta"; 116 | } 117 | -------------------------------------------------------------------------------- /src/.obs/dot_parser4tf_csv_file/dots/Metrics (contrib).dot: -------------------------------------------------------------------------------- 1 | digraph "Metrics (contrib)"{ 2 | rankdir="LR" 3 | node [shape="box"] 4 | "Metrics (contrib)" -> "accuracy" -> "tf.contrib.metrics.accuracy"; 5 | "Metrics (contrib)" -> "aggregate_metric_map" -> "tf.contrib.metrics.aggregate_metric_map"; 6 | "Metrics (contrib)" -> "aggregate_metrics" -> "tf.contrib.metrics.aggregate_metrics"; 7 | "Metrics (contrib)" -> "auc_using_histogram" -> "tf.contrib.metrics.auc_using_histogram"; 8 | "Metrics (contrib)" -> "confusion_matrix" -> "tf.contrib.metrics.confusion_matrix"; 9 | "Metrics (contrib)" -> "set_difference" -> "tf.contrib.metrics.set_difference"; 10 | "Metrics (contrib)" -> "set_intersection" -> "tf.contrib.metrics.set_intersection"; 11 | "Metrics (contrib)" -> "set_size" -> "tf.contrib.metrics.set_size"; 12 | "Metrics (contrib)" -> "set_union" -> "tf.contrib.metrics.set_union"; 13 | "Metrics (contrib)" -> "streaming_accuracy" -> "tf.contrib.metrics.streaming_accuracy"; 14 | "Metrics (contrib)" -> "streaming_auc" -> "tf.contrib.metrics.streaming_auc"; 15 | "Metrics (contrib)" -> "streaming_mean" -> "tf.contrib.metrics.streaming_mean"; 16 | "Metrics (contrib)" -> "streaming_mean_absolute_error" -> "tf.contrib.metrics.streaming_mean_absolute_error"; 17 | "Metrics (contrib)" -> "streaming_mean_cosine_distance" -> "tf.contrib.metrics.streaming_mean_cosine_distance"; 18 | "Metrics (contrib)" -> "streaming_mean_iou" -> "tf.contrib.metrics.streaming_mean_iou"; 19 | "Metrics (contrib)" -> "streaming_mean_relative_error" -> "tf.contrib.metrics.streaming_mean_relative_error"; 20 | "Metrics (contrib)" -> "streaming_mean_squared_error" -> "tf.contrib.metrics.streaming_mean_squared_error"; 21 | "Metrics (contrib)" -> "streaming_percentage_less" -> "tf.contrib.metrics.streaming_percentage_less"; 22 | "Metrics (contrib)" -> "streaming_precision" -> "tf.contrib.metrics.streaming_precision"; 23 | "Metrics (contrib)" -> "streaming_recall" -> "tf.contrib.metrics.streaming_recall"; 24 | "Metrics (contrib)" -> "streaming_recall_at_k" -> "tf.contrib.metrics.streaming_recall_at_k"; 25 | "Metrics (contrib)" -> "streaming_root_mean_squared_error" -> "tf.contrib.metrics.streaming_root_mean_squared_error"; 26 | "Metrics (contrib)" -> "streaming_sparse_precision_at_k" -> "tf.contrib.metrics.streaming_sparse_precision_at_k"; 27 | "Metrics (contrib)" -> "streaming_sparse_recall_at_k" -> "tf.contrib.metrics.streaming_sparse_recall_at_k"; 28 | } 29 | -------------------------------------------------------------------------------- /src/.obs/dot_parser4tf_csv_file/dots/Monitors (contrib).dot: -------------------------------------------------------------------------------- 1 | digraph "Monitors (contrib)"{ 2 | rankdir="LR" 3 | node [shape="box"] 4 | "Monitors (contrib)" -> "BaseMonitor" -> "tf.contrib.monitors.BaseMonitor"; 5 | "Monitors (contrib)" -> "CaptureVariable" -> "tf.contrib.monitors.CaptureVariable"; 6 | "Monitors (contrib)" -> "CheckpointSaver" -> "tf.contrib.monitors.CheckpointSaver"; 7 | "Monitors (contrib)" -> "EveryN" -> "tf.contrib.monitors.EveryN"; 8 | "Monitors (contrib)" -> "ExportMonitor" -> "tf.contrib.monitors.ExportMonitor"; 9 | "Monitors (contrib)" -> "get_default_monitors" -> "tf.contrib.monitors.get_default_monitors"; 10 | "Monitors (contrib)" -> "GraphDump" -> "tf.contrib.monitors.GraphDump"; 11 | "Monitors (contrib)" -> "LoggingTrainable" -> "tf.contrib.monitors.LoggingTrainable"; 12 | "Monitors (contrib)" -> "NanLoss" -> "tf.contrib.monitors.NanLoss"; 13 | "Monitors (contrib)" -> "PrintTensor" -> "tf.contrib.monitors.PrintTensor"; 14 | "Monitors (contrib)" -> "StepCounter" -> "tf.contrib.monitors.StepCounter"; 15 | "Monitors (contrib)" -> "StopAtStep" -> "tf.contrib.monitors.StopAtStep"; 16 | "Monitors (contrib)" -> "SummarySaver" -> "tf.contrib.monitors.SummarySaver"; 17 | "Monitors (contrib)" -> "SummaryWriterCache" -> "tf.contrib.monitors.SummaryWriterCache"; 18 | "Monitors (contrib)" -> "ValidationMonitor" -> "tf.contrib.monitors.ValidationMonitor"; 19 | } 20 | -------------------------------------------------------------------------------- /src/.obs/dot_parser4tf_csv_file/dots/Neural Network RNN Cells.dot: -------------------------------------------------------------------------------- 1 | digraph "Neural Network RNN Cells"{ 2 | rankdir="LR" 3 | node [shape="box"] 4 | "Neural Network RNN Cells" -> "BasicLSTMCell" -> "tf.BasicLSTMCell"; 5 | "Neural Network RNN Cells" -> "BasicRNNCell" -> "tf.BasicRNNCell"; 6 | "Neural Network RNN Cells" -> "DropoutWrapper" -> "tf.DropoutWrapper"; 7 | "Neural Network RNN Cells" -> "EmbeddingWrapper" -> "tf.EmbeddingWrapper"; 8 | "Neural Network RNN Cells" -> "GRUCell" -> "tf.GRUCell"; 9 | "Neural Network RNN Cells" -> "InputProjectionWrapper" -> "tf.InputProjectionWrapper"; 10 | "Neural Network RNN Cells" -> "LSTMCell" -> "tf.LSTMCell"; 11 | "Neural Network RNN Cells" -> "LSTMStateTuple" -> "tf.LSTMStateTuple"; 12 | "Neural Network RNN Cells" -> "MultiRNNCell" -> "tf.MultiRNNCell"; 13 | "Neural Network RNN Cells" -> "OutputProjectionWrapper" -> "tf.OutputProjectionWrapper"; 14 | "Neural Network RNN Cells" -> "RNNCell" -> "tf.RNNCell"; 15 | } 16 | -------------------------------------------------------------------------------- /src/.obs/dot_parser4tf_csv_file/dots/Neural Network.dot: -------------------------------------------------------------------------------- 1 | digraph "Neural Network"{ 2 | rankdir="LR" 3 | node [shape="box"] 4 | "Neural Network" -> "atrous_conv2d" -> "tf.atrous_conv2d"; 5 | "Neural Network" -> "avg_pool" -> "tf.avg_pool"; 6 | "Neural Network" -> "avg_pool3d" -> "tf.avg_pool3d"; 7 | "Neural Network" -> "batch_normalization" -> "tf.batch_normalization"; 8 | "Neural Network" -> "bias_add" -> "tf.bias_add"; 9 | "Neural Network" -> "bidirectional_rnn" -> "tf.bidirectional_rnn"; 10 | "Neural Network" -> "compute_accidental_hits" -> "tf.compute_accidental_hits"; 11 | "Neural Network" -> "conv2d" -> "tf.conv2d"; 12 | "Neural Network" -> "conv2d_transpose" -> "tf.conv2d_transpose"; 13 | "Neural Network" -> "conv3d" -> "tf.conv3d"; 14 | "Neural Network" -> "ctc_beam_search_decoder" -> "tf.ctc_beam_search_decoder"; 15 | "Neural Network" -> "ctc_greedy_decoder" -> "tf.ctc_greedy_decoder"; 16 | "Neural Network" -> "ctc_loss" -> "tf.ctc_loss"; 17 | "Neural Network" -> "depthwise_conv2d" -> "tf.depthwise_conv2d"; 18 | "Neural Network" -> "depthwise_conv2d_native" -> "tf.depthwise_conv2d_native"; 19 | "Neural Network" -> "dilation2d" -> "tf.dilation2d"; 20 | "Neural Network" -> "dropout" -> "tf.dropout"; 21 | "Neural Network" -> "dynamic_rnn" -> "tf.dynamic_rnn"; 22 | "Neural Network" -> "elu" -> "tf.elu"; 23 | "Neural Network" -> "embedding_lookup" -> "tf.embedding_lookup"; 24 | "Neural Network" -> "embedding_lookup_sparse" -> "tf.embedding_lookup_sparse"; 25 | "Neural Network" -> "erosion2d" -> "tf.erosion2d"; 26 | "Neural Network" -> "fixed_unigram_candidate_sampler" -> "tf.fixed_unigram_candidate_sampler"; 27 | "Neural Network" -> "in_top_k" -> "tf.in_top_k"; 28 | "Neural Network" -> "l2_loss" -> "tf.l2_loss"; 29 | "Neural Network" -> "l2_normalize" -> "tf.l2_normalize"; 30 | "Neural Network" -> "learned_unigram_candidate_sampler" -> "tf.learned_unigram_candidate_sampler"; 31 | "Neural Network" -> "local_response_normalization" -> "tf.local_response_normalization"; 32 | "Neural Network" -> "log_softmax" -> "tf.log_softmax"; 33 | "Neural Network" -> "log_uniform_candidate_sampler" -> "tf.log_uniform_candidate_sampler"; 34 | "Neural Network" -> "max_pool" -> "tf.max_pool"; 35 | "Neural Network" -> "max_pool3d" -> "tf.max_pool3d"; 36 | "Neural Network" -> "max_pool_with_argmax" -> "tf.max_pool_with_argmax"; 37 | "Neural Network" -> "moments" -> "tf.moments"; 38 | "Neural Network" -> "nce_loss" -> "tf.nce_loss"; 39 | "Neural Network" -> "normalize_moments" -> "tf.normalize_moments"; 40 | "Neural Network" -> "relu" -> "tf.relu"; 41 | "Neural Network" -> "relu6" -> "tf.relu6"; 42 | "Neural Network" -> "rnn" -> "tf.rnn"; 43 | "Neural Network" -> "sampled_softmax_loss" -> "tf.sampled_softmax_loss"; 44 | "Neural Network" -> "separable_conv2d" -> "tf.separable_conv2d"; 45 | "Neural Network" -> "sigmoid" -> "tf.sigmoid"; 46 | "Neural Network" -> "sigmoid_cross_entropy_with_logits" -> "tf.sigmoid_cross_entropy_with_logits"; 47 | "Neural Network" -> "softmax" -> "tf.softmax"; 48 | "Neural Network" -> "softmax_cross_entropy_with_logits" -> "tf.softmax_cross_entropy_with_logits"; 49 | "Neural Network" -> "softplus" -> "tf.softplus"; 50 | "Neural Network" -> "softsign" -> "tf.softsign"; 51 | "Neural Network" -> "sparse_softmax_cross_entropy_with_logits" -> "tf.sparse_softmax_cross_entropy_with_logits"; 52 | "Neural Network" -> "state_saving_rnn" -> "tf.state_saving_rnn"; 53 | "Neural Network" -> "sufficient_statistics" -> "tf.sufficient_statistics"; 54 | "Neural Network" -> "tanh" -> "tf.tanh"; 55 | "Neural Network" -> "top_k" -> "tf.top_k"; 56 | "Neural Network" -> "uniform_candidate_sampler" -> "tf.uniform_candidate_sampler"; 57 | "Neural Network" -> "weighted_cross_entropy_with_logits" -> "tf.weighted_cross_entropy_with_logits"; 58 | } 59 | -------------------------------------------------------------------------------- /src/.obs/dot_parser4tf_csv_file/dots/Running Graphs.dot: -------------------------------------------------------------------------------- 1 | digraph "Running Graphs"{ 2 | rankdir="LR" 3 | node [shape="box"] 4 | "Running Graphs" -> "AbortedError" -> "tf.AbortedError"; 5 | "Running Graphs" -> "AlreadyExistsError" -> "tf.AlreadyExistsError"; 6 | "Running Graphs" -> "CancelledError" -> "tf.CancelledError"; 7 | "Running Graphs" -> "DataLossError" -> "tf.DataLossError"; 8 | "Running Graphs" -> "DeadlineExceededError" -> "tf.DeadlineExceededError"; 9 | "Running Graphs" -> "FailedPreconditionError" -> "tf.FailedPreconditionError"; 10 | "Running Graphs" -> "get_default_session" -> "tf.get_default_session"; 11 | "Running Graphs" -> "InteractiveSession" -> "tf.InteractiveSession"; 12 | "Running Graphs" -> "InternalError" -> "tf.InternalError"; 13 | "Running Graphs" -> "InvalidArgumentError" -> "tf.InvalidArgumentError"; 14 | "Running Graphs" -> "NotFoundError" -> "tf.NotFoundError"; 15 | "Running Graphs" -> "OpError" -> "tf.OpError"; 16 | "Running Graphs" -> "OutOfRangeError" -> "tf.OutOfRangeError"; 17 | "Running Graphs" -> "PermissionDeniedError" -> "tf.PermissionDeniedError"; 18 | "Running Graphs" -> "ResourceExhaustedError" -> "tf.ResourceExhaustedError"; 19 | "Running Graphs" -> "Session" -> "tf.Session"; 20 | "Running Graphs" -> "UnauthenticatedError" -> "tf.UnauthenticatedError"; 21 | "Running Graphs" -> "UnavailableError" -> "tf.UnavailableError"; 22 | "Running Graphs" -> "UnimplementedError" -> "tf.UnimplementedError"; 23 | "Running Graphs" -> "UnknownError" -> "tf.UnknownError"; 24 | } 25 | -------------------------------------------------------------------------------- /src/.obs/dot_parser4tf_csv_file/dots/Sparse Tensors.dot: -------------------------------------------------------------------------------- 1 | digraph "Sparse Tensors"{ 2 | rankdir="LR" 3 | node [shape="box"] 4 | "Sparse Tensors" -> "shape" -> "tf.shape"; 5 | "Sparse Tensors" -> "sparse_add" -> "tf.sparse_add"; 6 | "Sparse Tensors" -> "sparse_concat" -> "tf.sparse_concat"; 7 | "Sparse Tensors" -> "sparse_fill_empty_rows" -> "tf.sparse_fill_empty_rows"; 8 | "Sparse Tensors" -> "sparse_maximum" -> "tf.sparse_maximum"; 9 | "Sparse Tensors" -> "sparse_merge" -> "tf.sparse_merge"; 10 | "Sparse Tensors" -> "sparse_minimum" -> "tf.sparse_minimum"; 11 | "Sparse Tensors" -> "sparse_reduce_sum" -> "tf.sparse_reduce_sum"; 12 | "Sparse Tensors" -> "sparse_reorder" -> "tf.sparse_reorder"; 13 | "Sparse Tensors" -> "sparse_reset_shape" -> "tf.sparse_reset_shape"; 14 | "Sparse Tensors" -> "sparse_reshape" -> "tf.sparse_reshape"; 15 | "Sparse Tensors" -> "sparse_retain" -> "tf.sparse_retain"; 16 | "Sparse Tensors" -> "sparse_softmax" -> "tf.sparse_softmax"; 17 | "Sparse Tensors" -> "sparse_split" -> "tf.sparse_split"; 18 | "Sparse Tensors" -> "sparse_tensor_dense_matmul" -> "tf.sparse_tensor_dense_matmul"; 19 | "Sparse Tensors" -> "sparse_tensor_to_dense" -> "tf.sparse_tensor_to_dense"; 20 | "Sparse Tensors" -> "sparse_to_dense" -> "tf.sparse_to_dense"; 21 | "Sparse Tensors" -> "sparse_to_indicator" -> "tf.sparse_to_indicator"; 22 | "Sparse Tensors" -> "SparseTensor" -> "tf.SparseTensor"; 23 | "Sparse Tensors" -> "SparseTensorValue" -> "tf.SparseTensorValue"; 24 | } 25 | -------------------------------------------------------------------------------- /src/.obs/dot_parser4tf_csv_file/dots/Statistical distributions (contrib).dot: -------------------------------------------------------------------------------- 1 | digraph "Statistical distributions (contrib)"{ 2 | rankdir="LR" 3 | node [shape="box"] 4 | "Statistical distributions (contrib)" -> "BaseDistribution" -> "tf.contrib.distributions.BaseDistribution"; 5 | "Statistical distributions (contrib)" -> "batch_matrix_diag_transform" -> "tf.contrib.distributions.batch_matrix_diag_transform"; 6 | "Statistical distributions (contrib)" -> "Bernoulli" -> "tf.contrib.distributions.Bernoulli"; 7 | "Statistical distributions (contrib)" -> "Beta" -> "tf.contrib.distributions.Beta"; 8 | "Statistical distributions (contrib)" -> "Categorical" -> "tf.contrib.distributions.Categorical"; 9 | "Statistical distributions (contrib)" -> "Chi2" -> "tf.contrib.distributions.Chi2"; 10 | "Statistical distributions (contrib)" -> "Dirichlet" -> "tf.contrib.distributions.Dirichlet"; 11 | "Statistical distributions (contrib)" -> "DirichletMultinomial" -> "tf.contrib.distributions.DirichletMultinomial"; 12 | "Statistical distributions (contrib)" -> "Distribution" -> "tf.contrib.distributions.Distribution"; 13 | "Statistical distributions (contrib)" -> "Exponential" -> "tf.contrib.distributions.Exponential"; 14 | "Statistical distributions (contrib)" -> "Gamma" -> "tf.contrib.distributions.Gamma"; 15 | "Statistical distributions (contrib)" -> "InverseGamma" -> "tf.contrib.distributions.InverseGamma"; 16 | "Statistical distributions (contrib)" -> "kl" -> "tf.contrib.distributions.kl"; 17 | "Statistical distributions (contrib)" -> "Laplace" -> "tf.contrib.distributions.Laplace"; 18 | "Statistical distributions (contrib)" -> "MultivariateNormalCholesky" -> "tf.contrib.distributions.MultivariateNormalCholesky"; 19 | "Statistical distributions (contrib)" -> "MultivariateNormalDiag" -> "tf.contrib.distributions.MultivariateNormalDiag"; 20 | "Statistical distributions (contrib)" -> "MultivariateNormalFull" -> "tf.contrib.distributions.MultivariateNormalFull"; 21 | "Statistical distributions (contrib)" -> "Normal" -> "tf.contrib.distributions.Normal"; 22 | "Statistical distributions (contrib)" -> "normal_congugates_known_sigma_predictive" -> "tf.contrib.distributions.normal_congugates_known_sigma_predictive"; 23 | "Statistical distributions (contrib)" -> "normal_conjugates_known_sigma_posterior" -> "tf.contrib.distributions.normal_conjugates_known_sigma_posterior"; 24 | "Statistical distributions (contrib)" -> "RegisterKL" -> "tf.contrib.distributions.RegisterKL"; 25 | "Statistical distributions (contrib)" -> "StudentT" -> "tf.contrib.distributions.StudentT"; 26 | "Statistical distributions (contrib)" -> "TransformedDistribution" -> "tf.contrib.distributions.TransformedDistribution"; 27 | "Statistical distributions (contrib)" -> "Uniform" -> "tf.contrib.distributions.Uniform"; 28 | } 29 | -------------------------------------------------------------------------------- /src/.obs/dot_parser4tf_csv_file/dots/Strings.dot: -------------------------------------------------------------------------------- 1 | digraph "Strings"{ 2 | rankdir="LR" 3 | node [shape="box"] 4 | "Strings" -> "as_string" -> "tf.as_string"; 5 | "Strings" -> "reduce_join" -> "tf.reduce_join"; 6 | "Strings" -> "string_join" -> "tf.string_join"; 7 | "Strings" -> "string_to_hash_bucket" -> "tf.string_to_hash_bucket"; 8 | "Strings" -> "string_to_hash_bucket_fast" -> "tf.string_to_hash_bucket_fast"; 9 | "Strings" -> "string_to_hash_bucket_strong" -> "tf.string_to_hash_bucket_strong"; 10 | } 11 | -------------------------------------------------------------------------------- /src/.obs/dot_parser4tf_csv_file/dots/Summary Operations.dot: -------------------------------------------------------------------------------- 1 | digraph "Summary Operations"{ 2 | rankdir="LR" 3 | node [shape="box"] 4 | "Summary Operations" -> "tensor_summary" -> "tf.tensor_summary"; 5 | } 6 | -------------------------------------------------------------------------------- /src/.obs/dot_parser4tf_csv_file/dots/Tensor Handle Operations.dot: -------------------------------------------------------------------------------- 1 | digraph "Tensor Handle Operations"{ 2 | rankdir="LR" 3 | node [shape="box"] 4 | "Tensor Handle Operations" -> "delete_session_tensor" -> "tf.delete_session_tensor"; 5 | "Tensor Handle Operations" -> "get_session_handle" -> "tf.get_session_handle"; 6 | "Tensor Handle Operations" -> "get_session_tensor" -> "tf.get_session_tensor"; 7 | } 8 | -------------------------------------------------------------------------------- /src/.obs/dot_parser4tf_csv_file/dots/Tensor Transformations.dot: -------------------------------------------------------------------------------- 1 | digraph "Tensor Transformations"{ 2 | rankdir="LR" 3 | node [shape="box"] 4 | "Tensor Transformations" -> "batch_to_space" -> "tf.batch_to_space"; 5 | "Tensor Transformations" -> "bitcast" -> "tf.bitcast"; 6 | "Tensor Transformations" -> "boolean_mask" -> "tf.boolean_mask"; 7 | "Tensor Transformations" -> "cast" -> "tf.cast"; 8 | "Tensor Transformations" -> "concat" -> "tf.concat"; 9 | "Tensor Transformations" -> "depth_to_space" -> "tf.depth_to_space"; 10 | "Tensor Transformations" -> "dynamic_partition" -> "tf.dynamic_partition"; 11 | "Tensor Transformations" -> "dynamic_stitch" -> "tf.dynamic_stitch"; 12 | "Tensor Transformations" -> "expand_dims" -> "tf.expand_dims"; 13 | "Tensor Transformations" -> "extract_image_patches" -> "tf.extract_image_patches"; 14 | "Tensor Transformations" -> "gather" -> "tf.gather"; 15 | "Tensor Transformations" -> "gather_nd" -> "tf.gather_nd"; 16 | "Tensor Transformations" -> "meshgrid" -> "tf.meshgrid"; 17 | "Tensor Transformations" -> "one_hot" -> "tf.one_hot"; 18 | "Tensor Transformations" -> "pack" -> "tf.pack"; 19 | "Tensor Transformations" -> "pad" -> "tf.pad"; 20 | "Tensor Transformations" -> "rank" -> "tf.rank"; 21 | "Tensor Transformations" -> "reshape" -> "tf.reshape"; 22 | "Tensor Transformations" -> "reverse" -> "tf.reverse"; 23 | "Tensor Transformations" -> "reverse_sequence" -> "tf.reverse_sequence"; 24 | "Tensor Transformations" -> "saturate_cast" -> "tf.saturate_cast"; 25 | "Tensor Transformations" -> "shape" -> "tf.shape"; 26 | "Tensor Transformations" -> "shape_n" -> "tf.shape_n"; 27 | "Tensor Transformations" -> "size" -> "tf.size"; 28 | "Tensor Transformations" -> "slice" -> "tf.slice"; 29 | "Tensor Transformations" -> "space_to_batch" -> "tf.space_to_batch"; 30 | "Tensor Transformations" -> "space_to_depth" -> "tf.space_to_depth"; 31 | "Tensor Transformations" -> "split" -> "tf.split"; 32 | "Tensor Transformations" -> "squeeze" -> "tf.squeeze"; 33 | "Tensor Transformations" -> "string_to_number" -> "tf.string_to_number"; 34 | "Tensor Transformations" -> "tile" -> "tf.tile"; 35 | "Tensor Transformations" -> "to_bfloat16" -> "tf.to_bfloat16"; 36 | "Tensor Transformations" -> "to_double" -> "tf.to_double"; 37 | "Tensor Transformations" -> "to_float" -> "tf.to_float"; 38 | "Tensor Transformations" -> "to_int32" -> "tf.to_int32"; 39 | "Tensor Transformations" -> "to_int64" -> "tf.to_int64"; 40 | "Tensor Transformations" -> "transpose" -> "tf.transpose"; 41 | "Tensor Transformations" -> "unique_with_counts" -> "tf.unique_with_counts"; 42 | "Tensor Transformations" -> "unpack" -> "tf.unpack"; 43 | } 44 | -------------------------------------------------------------------------------- /src/.obs/dot_parser4tf_csv_file/dots/TensorArray Operations.dot: -------------------------------------------------------------------------------- 1 | digraph "TensorArray Operations"{ 2 | rankdir="LR" 3 | node [shape="box"] 4 | "TensorArray Operations" -> "concat" -> "tf.concat"; 5 | "TensorArray Operations" -> "pack" -> "tf.pack"; 6 | "TensorArray Operations" -> "split" -> "tf.split"; 7 | "TensorArray Operations" -> "TensorArray" -> "tf.TensorArray"; 8 | "TensorArray Operations" -> "unpack" -> "tf.unpack"; 9 | } 10 | -------------------------------------------------------------------------------- /src/.obs/dot_parser4tf_csv_file/dots/Testing.dot: -------------------------------------------------------------------------------- 1 | digraph "Testing"{ 2 | rankdir="LR" 3 | node [shape="box"] 4 | "Testing" -> "assert_equal_graph_def" -> "tf.assert_equal_graph_def"; 5 | "Testing" -> "compute_gradient" -> "tf.compute_gradient"; 6 | "Testing" -> "compute_gradient_error" -> "tf.compute_gradient_error"; 7 | "Testing" -> "get_temp_dir" -> "tf.get_temp_dir"; 8 | "Testing" -> "is_built_with_cuda" -> "tf.is_built_with_cuda"; 9 | "Testing" -> "main" -> "tf.main"; 10 | } 11 | -------------------------------------------------------------------------------- /src/.obs/dot_parser4tf_csv_file/dots/Training.dot: -------------------------------------------------------------------------------- 1 | digraph "Training"{ 2 | rankdir="LR" 3 | node [shape="box"] 4 | "Training" -> "AdadeltaOptimizer" -> "tf.AdadeltaOptimizer"; 5 | "Training" -> "AdagradOptimizer" -> "tf.AdagradOptimizer"; 6 | "Training" -> "AdamOptimizer" -> "tf.AdamOptimizer"; 7 | "Training" -> "add_queue_runner" -> "tf.add_queue_runner"; 8 | "Training" -> "AggregationMethod" -> "tf.AggregationMethod"; 9 | "Training" -> "audio_summary" -> "tf.audio_summary"; 10 | "Training" -> "clip_by_average_norm" -> "tf.clip_by_average_norm"; 11 | "Training" -> "clip_by_global_norm" -> "tf.clip_by_global_norm"; 12 | "Training" -> "clip_by_norm" -> "tf.clip_by_norm"; 13 | "Training" -> "clip_by_value" -> "tf.clip_by_value"; 14 | "Training" -> "ClusterSpec" -> "tf.ClusterSpec"; 15 | "Training" -> "Coordinator" -> "tf.Coordinator"; 16 | "Training" -> "do_quantize_training_on_graphdef" -> "tf.do_quantize_training_on_graphdef"; 17 | "Training" -> "exponential_decay" -> "tf.exponential_decay"; 18 | "Training" -> "ExponentialMovingAverage" -> "tf.ExponentialMovingAverage"; 19 | "Training" -> "FtrlOptimizer" -> "tf.FtrlOptimizer"; 20 | "Training" -> "generate_checkpoint_state_proto" -> "tf.generate_checkpoint_state_proto"; 21 | "Training" -> "global_norm" -> "tf.global_norm"; 22 | "Training" -> "global_step" -> "tf.global_step"; 23 | "Training" -> "GradientDescentOptimizer" -> "tf.GradientDescentOptimizer"; 24 | "Training" -> "gradients" -> "tf.gradients"; 25 | "Training" -> "histogram_summary" -> "tf.histogram_summary"; 26 | "Training" -> "image_summary" -> "tf.image_summary"; 27 | "Training" -> "LooperThread" -> "tf.LooperThread"; 28 | "Training" -> "merge_all_summaries" -> "tf.merge_all_summaries"; 29 | "Training" -> "merge_summary" -> "tf.merge_summary"; 30 | "Training" -> "MomentumOptimizer" -> "tf.MomentumOptimizer"; 31 | "Training" -> "Optimizer" -> "tf.Optimizer"; 32 | "Training" -> "QueueRunner" -> "tf.QueueRunner"; 33 | "Training" -> "replica_device_setter" -> "tf.replica_device_setter"; 34 | "Training" -> "RMSPropOptimizer" -> "tf.RMSPropOptimizer"; 35 | "Training" -> "scalar_summary" -> "tf.scalar_summary"; 36 | "Training" -> "Server" -> "tf.Server"; 37 | "Training" -> "SessionManager" -> "tf.SessionManager"; 38 | "Training" -> "start_queue_runners" -> "tf.start_queue_runners"; 39 | "Training" -> "stop_gradient" -> "tf.stop_gradient"; 40 | "Training" -> "summary_iterator" -> "tf.summary_iterator"; 41 | "Training" -> "SummaryWriter" -> "tf.SummaryWriter"; 42 | "Training" -> "Supervisor" -> "tf.Supervisor"; 43 | "Training" -> "write_graph" -> "tf.write_graph"; 44 | "Training" -> "zero_fraction" -> "tf.zero_fraction"; 45 | } 46 | -------------------------------------------------------------------------------- /src/.obs/dot_parser4tf_csv_file/dots/Utilities (contrib).dot: -------------------------------------------------------------------------------- 1 | digraph "Utilities (contrib)"{ 2 | rankdir="LR" 3 | node [shape="box"] 4 | "Utilities (contrib)" -> "constant_value" -> "tf.contrib.util.constant_value"; 5 | "Utilities (contrib)" -> "make_ndarray" -> "tf.contrib.util.make_ndarray"; 6 | "Utilities (contrib)" -> "make_tensor_proto" -> "tf.contrib.util.make_tensor_proto"; 7 | "Utilities (contrib)" -> "ops_used_by_graph_def" -> "tf.contrib.util.ops_used_by_graph_def"; 8 | "Utilities (contrib)" -> "stripped_op_list_for_graph" -> "tf.contrib.util.stripped_op_list_for_graph"; 9 | } 10 | -------------------------------------------------------------------------------- /src/.obs/dot_parser4tf_csv_file/dots/Variables.dot: -------------------------------------------------------------------------------- 1 | digraph "Variables"{ 2 | rankdir="LR" 3 | node [shape="box"] 4 | "Variables" -> "all_variables" -> "tf.all_variables"; 5 | "Variables" -> "assert_variables_initialized" -> "tf.assert_variables_initialized"; 6 | "Variables" -> "assign" -> "tf.assign"; 7 | "Variables" -> "assign_add" -> "tf.assign_add"; 8 | "Variables" -> "assign_sub" -> "tf.assign_sub"; 9 | "Variables" -> "constant_initializer" -> "tf.constant_initializer"; 10 | "Variables" -> "count_up_to" -> "tf.count_up_to"; 11 | "Variables" -> "device" -> "tf.device"; 12 | "Variables" -> "export_meta_graph" -> "tf.export_meta_graph"; 13 | "Variables" -> "get_checkpoint_state" -> "tf.get_checkpoint_state"; 14 | "Variables" -> "get_variable" -> "tf.get_variable"; 15 | "Variables" -> "get_variable_scope" -> "tf.get_variable_scope"; 16 | "Variables" -> "import_meta_graph" -> "tf.import_meta_graph"; 17 | "Variables" -> "IndexedSlices" -> "tf.IndexedSlices"; 18 | "Variables" -> "initialize_all_variables" -> "tf.initialize_all_variables"; 19 | "Variables" -> "initialize_local_variables" -> "tf.initialize_local_variables"; 20 | "Variables" -> "initialize_variables" -> "tf.initialize_variables"; 21 | "Variables" -> "is_variable_initialized" -> "tf.is_variable_initialized"; 22 | "Variables" -> "latest_checkpoint" -> "tf.latest_checkpoint"; 23 | "Variables" -> "local_variables" -> "tf.local_variables"; 24 | "Variables" -> "make_template" -> "tf.make_template"; 25 | "Variables" -> "min_max_variable_partitioner" -> "tf.min_max_variable_partitioner"; 26 | "Variables" -> "moving_average_variables" -> "tf.moving_average_variables"; 27 | "Variables" -> "no_regularizer" -> "tf.no_regularizer"; 28 | "Variables" -> "ones_initializer" -> "tf.ones_initializer"; 29 | "Variables" -> "random_normal_initializer" -> "tf.random_normal_initializer"; 30 | "Variables" -> "random_uniform_initializer" -> "tf.random_uniform_initializer"; 31 | "Variables" -> "report_uninitialized_variables" -> "tf.report_uninitialized_variables"; 32 | "Variables" -> "Saver" -> "tf.Saver"; 33 | "Variables" -> "scatter_add" -> "tf.scatter_add"; 34 | "Variables" -> "scatter_sub" -> "tf.scatter_sub"; 35 | "Variables" -> "scatter_update" -> "tf.scatter_update"; 36 | "Variables" -> "sparse_mask" -> "tf.sparse_mask"; 37 | "Variables" -> "trainable_variables" -> "tf.trainable_variables"; 38 | "Variables" -> "truncated_normal_initializer" -> "tf.truncated_normal_initializer"; 39 | "Variables" -> "uniform_unit_scaling_initializer" -> "tf.uniform_unit_scaling_initializer"; 40 | "Variables" -> "update_checkpoint_state" -> "tf.update_checkpoint_state"; 41 | "Variables" -> "Variable" -> "tf.Variable"; 42 | "Variables" -> "variable_axis_size_partitioner" -> "tf.variable_axis_size_partitioner"; 43 | "Variables" -> "variable_op_scope" -> "tf.variable_op_scope"; 44 | "Variables" -> "variable_scope" -> "tf.variable_scope"; 45 | "Variables" -> "VariableScope" -> "tf.VariableScope"; 46 | "Variables" -> "zeros_initializer" -> "tf.zeros_initializer"; 47 | } 48 | -------------------------------------------------------------------------------- /src/.obs/dot_parser4tf_csv_file/dots/Wraps python functions.dot: -------------------------------------------------------------------------------- 1 | digraph "Wraps python functions"{ 2 | rankdir="LR" 3 | node [shape="box"] 4 | "Wraps python functions" -> "py_func" -> "tf.py_func"; 5 | } 6 | -------------------------------------------------------------------------------- /src/.obs/dot_parser4tf_csv_file/tf.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/the0demiurge/Python-Scripts/02bedbcd7909b0b25a79ba086cdb0b3a87e1652a/src/.obs/dot_parser4tf_csv_file/tf.pdf -------------------------------------------------------------------------------- /src/ComputerScience/Calculator_AST.py: -------------------------------------------------------------------------------- 1 | """Calculator, worte in AST in python 2 | """ 3 | # This could be implemented as string-based big-number calcaulation 4 | 5 | 6 | def ADD(x, y): 7 | return str(eval(x) + eval(y)) 8 | 9 | 10 | def SUB(x, y): 11 | return str(eval(x) - eval(y)) 12 | 13 | 14 | def POW(x, y): 15 | return str(eval(x) ** eval(y)) 16 | 17 | 18 | def MUL(x, y): 19 | return str(eval(x) * eval(y)) 20 | 21 | 22 | def DIV(x, y): 23 | return str(eval(x) / eval(y)) 24 | 25 | 26 | operations = { 27 | # 'op': [priority, function] 28 | '+': [1, ADD], 29 | '-': [1, SUB], 30 | '*': [2, MUL], 31 | '/': [2, DIV], 32 | '^': [3, POW] 33 | } 34 | 35 | 36 | class TreeNode(object): 37 | __slots__ = ('val', 'left', 'right') 38 | 39 | def __init__(self, val=None, left=None, right=None): 40 | self.val, self.left, self.right = val, left, right 41 | 42 | def __repr__(self): 43 | left, right = ' ', ' ' 44 | if self.left is not None: 45 | left = repr(self.left) + ' <- ' 46 | if self.right is not None: 47 | right = ' -> ' + repr(self.right) 48 | return '(' + left + repr(self.val) + right + ')' 49 | 50 | 51 | def split_atomic(string): 52 | """ 53 | @brief Splits expression to atoms. 54 | 55 | @param string The string expression 56 | 57 | @return list[tuple("TYPE", 'value')] 58 | """ 59 | # removing useless parens 60 | while string.startswith('(') and string.endswith(')'): 61 | string = string[1:-1] 62 | result = list() 63 | if not string: 64 | return result 65 | head, tail = 0, 0 # head and tail are used for slice number out 66 | 67 | numeric = False # Stores previous scanned atom type. If numeric changes, the successor atom type will change 68 | while tail < len(string): 69 | # if '+' '-' are operators 70 | if string[tail] in {'+', '-'} and numeric: 71 | numeric = False 72 | result.append(('SYMB', string[tail])) 73 | # elif numbers or '+' '-' are numbers' symbol 74 | elif (string[tail].isdecimal() or string[tail] is '.') or (string[tail] in {'+', '-'} and not numeric): 75 | numeric = True 76 | head = tail 77 | tail += 1 78 | while tail < len(string) and (string[tail].isdecimal() or string[tail] is '.'): 79 | tail += 1 80 | num = string[head:tail] 81 | if num in {'+', '-'}: 82 | num += '1' 83 | result.append(('NUM', num)) 84 | tail -= 1 85 | # elif operators or parentheses 86 | elif string[tail] in {'*', '/', '^', '(', ')'}: 87 | symb = 'PAREN' if string[tail] in {'(', ')'} else 'SYMB' 88 | if numeric and string[tail] is '(': 89 | result.append(('SYMB', '*')) 90 | numeric = True if string[tail] is ')' else False 91 | result.append((symb, string[tail])) 92 | tail += 1 93 | return result 94 | 95 | 96 | def ast(atoms): 97 | """ 98 | @brief Receives atoms and return an Abstract Syntax Tree 99 | for a calculator 100 | 101 | @param atoms The atoms splitted by split_atomic 102 | 103 | @return AST Tree Node 104 | """ 105 | if len(atoms) is 0: 106 | return 107 | # Remove useless parens 108 | if atoms[0][1] is '(' and atoms[-1][1] is ')': 109 | atoms = atoms[1:-1] 110 | 111 | parens = { 112 | '(': 1, 113 | ')': -1 114 | } 115 | paren_amounts = 0 116 | min_prior = 10 117 | min_prior_index = None 118 | 119 | # find the operator out of parentheses with smallest priority 120 | for i, data in enumerate(atoms): 121 | TYPE, TOKEN = data 122 | # judge weather in parentheses or not 123 | if TYPE is 'PAREN': 124 | paren_amounts += parens[TOKEN] 125 | 126 | elif TYPE is 'SYMB' and paren_amounts is 0: 127 | if operations[TOKEN][0] < min_prior: 128 | min_prior_index, min_prior = i, operations[TOKEN][0] 129 | 130 | if min_prior_index is None: 131 | return TreeNode(atoms[0][1]) 132 | 133 | left = ast(atoms[:min_prior_index]) 134 | right = ast(atoms[min_prior_index + 1:]) 135 | return TreeNode(atoms[min_prior_index][1], left, right) 136 | 137 | 138 | def solve(ast_root, DEBUG=False): 139 | """Traverse the AST and do calculation 140 | """ 141 | if ast_root is None: 142 | raise ValueError('AST cannot be None') 143 | if ast_root.left is ast_root.right is None: 144 | return ast_root.val 145 | else: 146 | if DEBUG: 147 | left = solve(ast_root.left, DEBUG) 148 | right = solve(ast_root.right, DEBUG) 149 | result = operations[ast_root.val][1](left, right) 150 | print('Calculating:', ast_root.val, ast_root.left, '|', ast_root.right, '=>', left, ast_root.val, right, '=', result) 151 | return result 152 | else: 153 | return operations[ast_root.val][1](solve(ast_root.left), solve(ast_root.right)) 154 | 155 | 156 | def calculator(string): return solve(ast(split_atomic(string))) 157 | 158 | 159 | def main(): 160 | DEBUG = False 161 | prompt = ['Expression > ', 'DEBUG > '] 162 | try: 163 | while 1: 164 | string = input(prompt[DEBUG]) 165 | if string.upper().strip() == 'DEBUG': 166 | DEBUG = not DEBUG 167 | continue 168 | 169 | if DEBUG: 170 | print('Atoms :', list(zip(*split_atomic(string)))[1]) 171 | print('Tree :', repr(ast(split_atomic(string)))) 172 | # print('PyResult :', eval(string.replace('^', '**'))) 173 | print('MyResult :', solve(ast(split_atomic(string)), DEBUG)) 174 | else: 175 | print('Result:', calculator(string)) 176 | except (EOFError, KeyboardInterrupt): 177 | exit() 178 | 179 | 180 | if __name__ == '__main__': 181 | main() 182 | -------------------------------------------------------------------------------- /src/ComputerScience/FullBinaryHeap.py: -------------------------------------------------------------------------------- 1 | from dsr import BinaryTreeNode 2 | 3 | 4 | class FullBinaryTreeHeap(object): 5 | def __init__(self, data=None, is_largest=True): 6 | self.__is_largest = is_largest 7 | if is_largest: 8 | self.__judgement = lambda x, y: x < y 9 | else: 10 | self.__judgement = lambda x, y: x > y 11 | if data is None: 12 | self.__heap = list() 13 | else: 14 | self.__heap = [i for i in data] 15 | self.heapify() 16 | 17 | def shift_down(self, i): 18 | child = 2 * i + 1 19 | T = self.__heap[i] 20 | while child < len(self.__heap): 21 | if child + 1 < len(self.__heap) and self.__judgement( 22 | self.__heap[child], 23 | self.__heap[child + 1]): 24 | child += 1 25 | if self.__judgement(T, self.__heap[child]): 26 | self.__heap[i] = self.__heap[child] 27 | i = child 28 | child = 2 * i + 1 29 | else: 30 | break 31 | self.__heap[i] = T 32 | 33 | def shift_up(self, i): 34 | parent = (i + 1) // 2 - 1 35 | T = self.__heap[i] 36 | while parent >= 0: 37 | if self.__judgement(self.__heap[parent], T): 38 | self.__heap[i] = self.__heap[parent] 39 | i = parent 40 | parent = (i + 1) // 2 - 1 41 | else: 42 | break 43 | self.__heap[i] = T 44 | 45 | def push(self, data): 46 | self.__heap.append(data) 47 | self.shift_up(len(self.__heap) - 1) 48 | 49 | def pop(self): 50 | if len(self.__heap) == 0: 51 | raise IndexError('No enough data to pop') 52 | ret = self.__heap[0] 53 | self.__heap[0] = self.__heap[-1] 54 | self.__heap.pop() 55 | if len(self.__heap) > 0: 56 | self.shift_down(0) 57 | return ret 58 | 59 | def replace(self, i, data): 60 | self.__heap[i] = data 61 | self.shift_up(i) 62 | 63 | def heapify(self): 64 | for i in range(len(self.__heap) // 2 - 1, -1, -1): 65 | self.shift_down(i) 66 | 67 | @property 68 | def data(self): 69 | return self.__heap.copy() 70 | 71 | @property 72 | def tree(self): 73 | return self.__tree() 74 | 75 | def __tree(self, i=None): 76 | if not self.__heap: 77 | return BinaryTreeNode() 78 | if i is None: 79 | i = 0 80 | root = BinaryTreeNode(self.__heap[i]) 81 | if 2 * i + 1 < len(self.__heap): 82 | root.left = self.__tree(2 * i + 1) 83 | if 2 * i + 2 < len(self.__heap): 84 | root.right = self.__tree(2 * i + 2) 85 | return root 86 | 87 | def copy(self): 88 | return FullBinaryTreeHeap(self.__heap, self.__is_largest) 89 | 90 | def __repr__(self): 91 | return repr(self.tree) 92 | 93 | def __len__(self): 94 | return self.__heap.__len__() 95 | 96 | def __iter__(self): 97 | iterator = self.copy() 98 | while len(iterator) > 0: 99 | yield iterator.pop() 100 | -------------------------------------------------------------------------------- /src/ComputerScience/dsr.py: -------------------------------------------------------------------------------- 1 | from reprlib import recursive_repr 2 | 3 | 4 | class BinaryTreeNode(object): 5 | 6 | def __init__(self, value=None, left=None, right=None): 7 | self.left, self.right = left, right 8 | self.value = value 9 | 10 | @recursive_repr(fillvalue='...') 11 | def __repr__(self): 12 | if self.left is self.right is None: 13 | return '(' + repr(self.value) + ')' 14 | 15 | result = list() 16 | repr_left, repr_right = map( 17 | lambda x: repr(x).split('\n') if x is not None else list(), 18 | (self.left, self.right)) 19 | repr_self = '(' + repr(self.value) + ')' 20 | 21 | len_left, len_right = map( 22 | lambda x: len(x[0]) if len(x) > 0 else 0, 23 | (repr_left, repr_right)) 24 | 25 | height_left, height_right, len_self = map( 26 | len, 27 | (repr_left, repr_right, repr_self)) 28 | 29 | diff_height = height_left - height_right 30 | left_bar = ' ' if self.left is None else '/' 31 | right_bar = ' ' if self.right is None else '\\' 32 | 33 | if diff_height > 0: 34 | repr_right.extend([' ' * len_right] * diff_height) 35 | elif diff_height < 0: 36 | repr_left.extend([' ' * len_left] * (-diff_height)) 37 | 38 | result.append(' ' * (len_left + 1) + repr_self + ' ' * (len_right + 1)) 39 | result.append(' ' * (len_left) + left_bar + ' ' * len_self + right_bar + ' ' * len_right) 40 | result.extend(map( 41 | lambda x: (' ' * (len_self + 2)).join(x), 42 | zip(repr_left, repr_right))) 43 | 44 | return '\n'.join(result) 45 | 46 | 47 | class TreeNode(object): 48 | __slots__ = ("value", "children") 49 | 50 | def __init__(self, value=None, children=None): 51 | self.value = value 52 | self.children = list() if children is None else children 53 | 54 | @recursive_repr(fillvalue='...') 55 | def __repr__(self): 56 | root = '(' + repr(self.value) + ')' 57 | if not self.children: 58 | return root 59 | result = [root] 60 | for child in self.children[:-1]: 61 | result.append(self.__build_child_repr(child, end=False)) 62 | result.append(self.__build_child_repr(self.children[-1], end=True)) 63 | return '\n'.join(result) 64 | 65 | @staticmethod 66 | def __build_child_repr(child, end=False): 67 | child_repr = repr(child).split('\n') 68 | fork, space = (' └── ', '  ') if end else (' ├── ', ' │   ') 69 | return '\n'.join([fork + child_repr[0]] + list(map(lambda x: space + x, child_repr[1:]))) 70 | 71 | 72 | def main(): 73 | print('BinaryTreeNode:') 74 | a = BinaryTreeNode(100) 75 | b = BinaryTreeNode(2) 76 | c = BinaryTreeNode(0, a, b) 77 | d = BinaryTreeNode('a', c, c) 78 | a.right = d 79 | a.left = d 80 | 81 | print(d) 82 | print('TreeNode:') 83 | root = TreeNode('tree', [ 84 | TreeNode('types', [TreeNode(str), TreeNode(int)]), 85 | TreeNode('values', [TreeNode(1), TreeNode(3.1415926), TreeNode(True)]), 86 | TreeNode('empty'), 87 | 2.718281828, 88 | 'Not TreeNode' 89 | ]) 90 | print(root) 91 | 92 | 93 | if __name__ == '__main__': 94 | main() 95 | -------------------------------------------------------------------------------- /src/Controller/cartpole_pid.py: -------------------------------------------------------------------------------- 1 | import gym 2 | import pid 3 | import numpy as np 4 | import multiprocessing 5 | from matplotlib import pyplot 6 | 7 | 8 | def run(Kp=5, Ki=.1, Kd=.1, rho=0.5, n_iter=900, close=True, params=[0, -1, -1, -1]): 9 | # This runs one iterate with given parameters 10 | # INIT 11 | controller = pid.PID(Kp=Kp, Ki=Ki, Kd=Kd) 12 | us = list() 13 | es = list() 14 | env = gym.make('CartPole-v0') 15 | env.reset() 16 | env.render(close=close) 17 | 18 | def decide_step(u): 19 | # Because there are only 2 steps, simply discretization the step. 20 | if u > 0: 21 | return 1 22 | elif u < 0: 23 | return 0 24 | else: 25 | return np.random.randint(0, 2) 26 | 27 | # There are only 2 steps, `0, 1`. Choose first step randomly 28 | step = decide_step(0) 29 | 30 | for i in range(n_iter): 31 | # Add interferce at timestamp 20 32 | if i == 20: 33 | for i in range(4): 34 | env.step(0) 35 | if i == 60: 36 | for i in range(4): 37 | env.step(1) 38 | state = env.step(step) 39 | # The giver amount is all-zero, consequently the deviation is -(sum(...)) 40 | e = - (sum(map(lambda x: x[0] * x[1], zip(params, state[0])))) 41 | # Get the control amount 42 | u = controller.step(e) 43 | step = decide_step(u) 44 | us.append(u) 45 | es.append(e) 46 | env.render(close=close) 47 | 48 | loss = sum(map(lambda x: x ** 2, us)) + rho * sum(map(lambda x: x ** 2, es)) 49 | return loss, us, es 50 | 51 | 52 | def cem(func, mean, variance, n=100, ratio=.2, max_iter=1000, min_var=0.001): 53 | """Cross-Entropy Method Optimizer, using Gaussian distribution approximation. Minimize loss returned from func 54 | Args: 55 | func receives parameters and returns loss only 56 | mean and variance: mean and variance of the init gaussian 57 | n: size of tries per iterate 58 | ratio: re-sampling referance ratio 59 | max_iter, min_var: stop condition 60 | 61 | Returns: 62 | param: best parameters 63 | mean: mean of parameters 64 | variance: variance of parameters 65 | traj: min-loss curve durning optimizing 66 | """ 67 | 68 | # Check input 69 | n_ref = int(n * ratio) 70 | n_params = len(tuple(zip(mean, variance))) 71 | traj = list() 72 | assert n > 0, 'n not > 0' 73 | assert 0 < n_ref < n, 'n_ref out-bounded' 74 | assert min_var > 0, 'min_var not > 0' 75 | assert min(variance) > 0, 'variance not > 0' 76 | 77 | pool = multiprocessing.Pool() 78 | for _ in range(max_iter): 79 | params = np.random.randn(n, n_params) 80 | for i, p in enumerate(zip(mean, variance)): 81 | params[:, i] = params[:, i] * np.sqrt(p[1]) + p[0] 82 | loss = pool.map(func, params) 83 | sorted_params, sorted_loss = zip(*sorted(zip(params, loss), key=lambda x: x[-1])) 84 | traj.append(sorted_loss[0]) 85 | params_ref = np.array(sorted_params[:n_ref]) 86 | mean = np.mean(params_ref, axis=0) 87 | variance = np.var(params_ref, axis=0) 88 | if min(variance) < min_var: 89 | break 90 | return list(params_ref[0]), mean, variance, traj 91 | 92 | 93 | def run_(x, rho=.5, close=True): 94 | # This function is designed for multi-subprocess 95 | return run(x[0], x[1], x[2], close=close, rho=rho)[0] 96 | 97 | 98 | def run0(x): 99 | return run_(x, rho=0) 100 | 101 | 102 | def run_25(x): 103 | return run_(x, rho=.25) 104 | 105 | 106 | def run_5(x): 107 | return run_(x, rho=.5) 108 | 109 | 110 | def run_75(x): 111 | return run_(x, rho=.75) 112 | 113 | 114 | def run1(x): 115 | return run_(x, rho=1) 116 | 117 | 118 | def main(): 119 | data = dict() 120 | for func, rho in zip([run0, run_25, run_5, run_75, run1], [0, .25, 0.5, .75, 1]): 121 | best, mean, variance, traj = cem(func, [10, 0, 0], [60, 40, 40]) 122 | loss, us, es = run(mean[0], mean[1], mean[2], rho, n_iter=500, close=False) 123 | data[rho] = { 124 | 'traj': traj, 125 | 'us': us, 126 | 'es': es, 127 | 'best': best, 128 | 'mean': mean, 129 | 'variance': variance, 130 | } 131 | 132 | # Print information 133 | for rho in data: 134 | print('''rho = {} 135 | Best params: {} 136 | Mean of params: {} 137 | Var of params: {} 138 | Var of u: {} 139 | Mean of u: {} 140 | Var of e: {} 141 | Mean of e: {} 142 | \n'''.format( 143 | rho, 144 | data[rho]['best'], 145 | data[rho]['mean'], 146 | data[rho]['variance'], 147 | np.var(data[rho]['us']), 148 | np.mean(data[rho]['us']), 149 | np.var(data[rho]['es']), 150 | np.mean(data[rho]['es']), 151 | )) 152 | 153 | # Plot optimizion trajectory 154 | pyplot.figure() 155 | for rho in data: 156 | pyplot.plot(data[rho]['traj'], '*-', label='$\\rho={}$'.format(rho)) 157 | pyplot.xlabel('Optimizing Iteration') 158 | pyplot.ylabel('Min Loss') 159 | pyplot.legend() 160 | 161 | # Plot us 162 | pyplot.figure() 163 | for rho in data: 164 | plot_data = data[rho]['us'] 165 | # moving_average = list(map(lambda x: x[-1] / 2 + x[-2] / 4 + (x[-3] + x[-4]) / 8, zip(plot_data, plot_data[1:], plot_data[2:], plot_data[3:]))) 166 | pyplot.plot(plot_data[:100], label='$\\rho={}$'.format(rho)) 167 | pyplot.xlabel('$t$') 168 | pyplot.ylabel('Input amount') 169 | pyplot.legend() 170 | 171 | # Plot es 172 | pyplot.figure() 173 | for rho in data: 174 | plot_data = data[rho]['es'] 175 | # moving_average = list(map(lambda x: x[-1] / 2 + x[-2] / 4 + (x[-3] + x[-4]) / 8, zip(plot_data, plot_data[1:], plot_data[2:], plot_data[3:]))) 176 | pyplot.plot(plot_data[:100], label='$\\rho={}$'.format(rho)) 177 | pyplot.xlabel('$t$') 178 | pyplot.ylabel('Deviation') 179 | pyplot.legend() 180 | pyplot.show() 181 | 182 | 183 | if __name__ == '__main__': 184 | main() 185 | -------------------------------------------------------------------------------- /src/Controller/pid.py: -------------------------------------------------------------------------------- 1 | class PID(object): 2 | 3 | def __init__(self, Kp=1, Ki=.1, Kd=0, T_sample=1): 4 | self.Kp = Kp 5 | self.Ki = Ki 6 | self.Kd = Kd 7 | self.T_sample = T_sample 8 | 9 | self.u_prev = 0 10 | self.e_prev = 0 11 | self.e_prev_prev = 0 12 | 13 | def step(self, e): 14 | p = self.Kp * (e - self.e_prev) 15 | i = self.Ki * self.T_sample * e 16 | d = self.Kd * (e - 2 * self.e_prev + self.e_prev_prev) 17 | u = (self.u_prev + p + i + d) 18 | self.u_prev, self.e_prev, self.e_prev_prev = u, e, self.e_prev 19 | return u 20 | -------------------------------------------------------------------------------- /src/MachineLearning/Deep_Learning/dqn_cartpole.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import random 3 | import gym 4 | import tensorflow as tf 5 | from collections import deque 6 | 7 | 8 | ENV_NAME = 'CartPole-v0' 9 | EPISODE = 10000 # Episode limitation 10 | STEP = 300 # Step limitation in an episode 11 | 12 | 13 | class DQN(object): 14 | 15 | """A DQN testing and learning class""" 16 | 17 | def __init__(self, env, mem=1000, phimem=4, skip_frame=0): 18 | self.env = env 19 | self.mem = mem 20 | self.phimem = phimem 21 | self.phi_state = deque() 22 | self.skip_frame = skip_frame 23 | self.state = self.env.reset() 24 | self.sess = tf.InteractiveSession() 25 | self.experience_pool = deque() 26 | self.n_state = self.env.observation_space.shape[0] * self.phimem 27 | self.n_act = self.env.action_space.n 28 | self._create_net(40) 29 | 30 | def _create_net(self, hidden_size=20): 31 | # Creating Q net 32 | self._xq = tf.placeholder(tf.float32, shape=[None, self.n_state], name='x') 33 | self._y_q = tf.placeholder(tf.float32, shape=[None, self.n_act]) 34 | self._weightsq = [ 35 | tf.Variable(tf.random_normal([self.n_state, hidden_size], stddev=0.1)), 36 | tf.Variable(tf.random_normal([hidden_size, self.n_act], stddev=0.1))] 37 | self._biasesq = [ 38 | tf.Variable(tf.constant(0.1, shape=[hidden_size])), 39 | tf.Variable(tf.constant(0.1, shape=[self.n_act]))] 40 | 41 | self.lq = tf.nn.relu(tf.add(tf.matmul(self._xq, self._weightsq[0]), self._biasesq[0])) 42 | self.q = tf.add(tf.matmul(self.lq, self._weightsq[1]), self._biasesq[1]) 43 | 44 | # Creating Q* net 45 | self._x = tf.placeholder(tf.float32, shape=[None, self.n_state], name='x') 46 | self._y_q_ = tf.placeholder(tf.float32, shape=[None, self.n_act]) 47 | self._weightsq_ = [ 48 | tf.Variable(tf.random_normal([self.n_state, hidden_size], stddev=0.1)), 49 | tf.Variable(tf.random_normal([hidden_size, self.n_act], stddev=0.1))] 50 | self._biasesq_ = [ 51 | tf.Variable(tf.constant(0.1, shape=[hidden_size])), 52 | tf.Variable(tf.constant(0.1, shape=[self.n_act]))] 53 | 54 | self.lq_ = tf.nn.relu(tf.add(tf.matmul(self._x, self._weightsq_[0]), self._biasesq_[0])) 55 | self.q_ = tf.add(tf.matmul(self.lq_, self._weightsq_[1]), self._biasesq_[1]) 56 | 57 | #Loss 58 | self.y = tf.placeholder(tf.float32, [None, self.n_act]) 59 | self.loss = tf.square(self.y - self.q) 60 | self.optimizer = tf.train.RMSPropOptimizer(0.01, 61 | momentum=0.1, 62 | use_locking=True, 63 | centered=True).minimize(self.loss) 64 | 65 | # Initialize 66 | tf.global_variables_initializer().run() 67 | self.sync_theta() 68 | 69 | def sync_theta(self): 70 | for i in range(2): 71 | self._biasesq_[i] = self._biasesq[i] 72 | self._weightsq_[i] = self._weightsq[i] 73 | 74 | def phi(self, state=None, reset=False): 75 | if reset: 76 | self.phi_state = deque() 77 | return 78 | if not state: 79 | state = list(self.state) 80 | if np.shape(self.phi_state)[0] < self.phimem: 81 | self.phi_state = deque([state] * (self.phimem - np.shape(self.phi_state)[0])) 82 | else: 83 | self.phi_state.appendleft(state) 84 | self.phi_state.pop() 85 | ret = [] 86 | for i in self.phi_state: 87 | ret.extend(i) 88 | return np.mat(ret) 89 | 90 | def play(self, epsilon=0.05): 91 | if np.random.rand() <= epsilon: 92 | act = np.random.randint(0, self.n_act) 93 | else: 94 | state = self.phi() 95 | result = list(self.q.eval(feed_dict={self._xq: state})) 96 | act = result.index(max(result)) 97 | return self.env.step(act), act 98 | 99 | def train(self, gamma=0.9, EPISODE=5000, STEP=5000, minibatch=32, C=10): 100 | c = 0 101 | epsilon = 1 102 | for episode in range(EPISODE): 103 | self.state = self.env.reset() 104 | self.phi(reset=True) 105 | state = self.phi() 106 | total_reward = 0 107 | total_state = [] 108 | 109 | for t in range(STEP): 110 | (self.state, reward, done, _), act = self.play(epsilon) 111 | #reward = 0 if done else 0.1 112 | total_reward += reward 113 | 114 | state1 = self.phi() 115 | total_state.append([state, reward, act, state1]) 116 | print(act, end=' ') 117 | self.env.render() 118 | if len(self.experience_pool) > self.mem: 119 | epsilon = epsilon - 0.001 if epsilon > 0.95 else 0.05 120 | while len(self.experience_pool) > self.mem: 121 | self.experience_pool.pop() 122 | state = state1 123 | 124 | if done: 125 | for index, content in enumerate(total_state): 126 | total_state[index][1] = total_reward 127 | total_state[-1][1] = 0 128 | self.experience_pool.extendleft(total_state) 129 | print(epsilon) 130 | for i in range(200): 131 | #Replay the experience 132 | if len(self.experience_pool) >= self.mem: 133 | training_set = random.sample(self.experience_pool, minibatch) 134 | phi0 = np.vstack(i[0] for i in training_set) 135 | phi1 = np.vstack(i[-1] for i in training_set) 136 | r = np.mat([i[1] for i in training_set]).T 137 | y = r + gamma * self.q_.eval(feed_dict={self._x: phi1}) 138 | self.optimizer.run(feed_dict={self._xq: phi0, self.y: y}) 139 | c += 1 140 | if c > C: 141 | self.sync_theta() 142 | break 143 | 144 | def main(): 145 | env = gym.make(ENV_NAME) 146 | agent = DQN(env) 147 | print(agent._xq.get_shape()) 148 | agent.train() 149 | print(len(agent.phi_state)) 150 | print(len(agent.phi())) 151 | for i in range(1000): 152 | agent.play(epsilon=0) 153 | return agent 154 | 155 | 156 | if __name__ == '__main__': 157 | agent = main() 158 | -------------------------------------------------------------------------------- /src/MachineLearning/Deep_Learning/pg_cartpole.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Policy Gradient for CartPole-v0" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 1, 13 | "metadata": { 14 | "collapsed": true 15 | }, 16 | "outputs": [], 17 | "source": [ 18 | "from pylab import *\n", 19 | "import gym" 20 | ] 21 | }, 22 | { 23 | "cell_type": "code", 24 | "execution_count": null, 25 | "metadata": { 26 | "collapsed": true 27 | }, 28 | "outputs": [], 29 | "source": [ 30 | "ENV_NAME = 'CartPole-v0'\n", 31 | "EPISODE = 10000 # Episode limitation\n", 32 | "STEP = 300 # Step limitation in an episode" 33 | ] 34 | }, 35 | { 36 | "cell_type": "code", 37 | "execution_count": null, 38 | "metadata": { 39 | "collapsed": true 40 | }, 41 | "outputs": [], 42 | "source": [ 43 | "env = gym.make(ENVNAME)\n", 44 | "env.reset()" 45 | ] 46 | }, 47 | { 48 | "cell_type": "code", 49 | "execution_count": null, 50 | "metadata": { 51 | "collapsed": true 52 | }, 53 | "outputs": [], 54 | "source": [] 55 | }, 56 | { 57 | "cell_type": "code", 58 | "execution_count": null, 59 | "metadata": { 60 | "collapsed": true 61 | }, 62 | "outputs": [], 63 | "source": [] 64 | }, 65 | { 66 | "cell_type": "code", 67 | "execution_count": null, 68 | "metadata": { 69 | "collapsed": true 70 | }, 71 | "outputs": [], 72 | "source": [] 73 | }, 74 | { 75 | "cell_type": "code", 76 | "execution_count": null, 77 | "metadata": { 78 | "collapsed": true 79 | }, 80 | "outputs": [], 81 | "source": [] 82 | }, 83 | { 84 | "cell_type": "code", 85 | "execution_count": null, 86 | "metadata": { 87 | "collapsed": true 88 | }, 89 | "outputs": [], 90 | "source": [] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "execution_count": null, 95 | "metadata": { 96 | "collapsed": true 97 | }, 98 | "outputs": [], 99 | "source": [] 100 | }, 101 | { 102 | "cell_type": "code", 103 | "execution_count": null, 104 | "metadata": { 105 | "collapsed": true 106 | }, 107 | "outputs": [], 108 | "source": [] 109 | }, 110 | { 111 | "cell_type": "code", 112 | "execution_count": null, 113 | "metadata": { 114 | "collapsed": true 115 | }, 116 | "outputs": [], 117 | "source": [] 118 | } 119 | ], 120 | "metadata": { 121 | "kernelspec": { 122 | "display_name": "Python [conda root]", 123 | "language": "python", 124 | "name": "conda-root-py" 125 | }, 126 | "language_info": { 127 | "codemirror_mode": { 128 | "name": "ipython", 129 | "version": 3 130 | }, 131 | "file_extension": ".py", 132 | "mimetype": "text/x-python", 133 | "name": "python", 134 | "nbconvert_exporter": "python", 135 | "pygments_lexer": "ipython3", 136 | "version": "3.5.3" 137 | } 138 | }, 139 | "nbformat": 4, 140 | "nbformat_minor": 2 141 | } 142 | -------------------------------------------------------------------------------- /src/MachineLearning/TensorFlow/Batcher.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | import pandas as pd 4 | import numpy as np 5 | from sklearn.model_selection import train_test_split 6 | from sklearn.utils import shuffle 7 | 8 | __author__ = 'the0demiurge' 9 | 10 | 11 | class Batcher(object): 12 | 13 | '''create a batcher with the same api as tensorflow 14 | usage: 15 | data = Batcher(X, Y) 16 | batch_xs, batch_ys = data.next_batch(100) 17 | ''' 18 | _batch_position = 0 19 | 20 | def __init__( 21 | self, 22 | X, 23 | Y, 24 | train_size=None, 25 | test_size=None, 26 | random_state=None, 27 | to_shuffle=True): 28 | ''' 29 | Args: 30 | X, Y: either array or ndarray or pandas.core.DataFrame, training 31 | inputs and targets 32 | train_size : float, int, or None (default is None) 33 | If float, should be between 0.0 and 1.0 and represent the 34 | proportion of the dataset to include in the train split. If 35 | int, represents the absolute number of train samples. If None, 36 | the value is automatically set to the complement of the test size. 37 | test_size : float, int, or None (default is None) 38 | If float, should be between 0.0 and 1.0 and represent the 39 | proportion of the dataset to include in the test split. If 40 | int, represents the absolute number of test samples. If None, 41 | the value is automatically set to the complement of the train size. 42 | If train size is also None, test size is set to 0.25. 43 | random_state : int or RandomState 44 | Pseudo-random number generator state used for random sampling. 45 | to_shuffle: shuffle the data initially 46 | ''' 47 | X = pd.DataFrame(X) 48 | Y = pd.DataFrame(Y) 49 | 50 | if X.shape[0] != Y.shape[0]: 51 | raise ValueError('Amount of X and Y are not equal!') 52 | 53 | if to_shuffle: 54 | X, Y = shuffle(X, Y) 55 | 56 | if random_state is None: 57 | random_state = np.random.randint(0, 4294967295) 58 | 59 | (self.X_train, 60 | self.X_test, 61 | self.Y_train, 62 | self.Y_test) = train_test_split( 63 | X, 64 | Y, 65 | train_size=train_size, 66 | test_size=test_size, 67 | random_state=random_state) 68 | 69 | self.test_size = self.X_test.shape[0] 70 | self.train_size = self.X_train.shape[0] 71 | 72 | def next_batch(self, batch_size=100): 73 | '''returns next_batch data with batch_size 74 | Args: 75 | batch_size: size per batch 76 | 77 | Returns: 78 | batch_xs, batch_ys: numpy.ndarray 79 | ''' 80 | assert int(batch_size) > 0, 'batch_size {} is not > 0'.format( 81 | batch_size) 82 | batch_xs = self.next_xbatch(batch_size) 83 | batch_ys = self.this_ybatch() 84 | return batch_xs, batch_ys 85 | 86 | def this_xbatch(self): 87 | batch_index = np.array(range( 88 | self._batch_position, 89 | self._batch_position + self._batch_size)) 90 | batch_index %= self.train_size 91 | batch_xs = self.X_train.iloc[batch_index, :] 92 | return batch_xs.values 93 | 94 | def this_ybatch(self): 95 | batch_index = np.array(range( 96 | self._batch_position, 97 | self._batch_position + self._batch_size)) 98 | batch_index %= self.train_size 99 | batch_ys = self.Y_train.iloc[batch_index, :] 100 | return batch_ys.values 101 | 102 | def next_xbatch(self, batch_size=100): 103 | assert int(batch_size) > 0, 'batch_size {} is not > 0'.format( 104 | batch_size) 105 | self._batch_size = batch_size 106 | self._batch_position += batch_size 107 | self._batch_position %= self.train_size 108 | 109 | batch_index = np.array(range( 110 | self._batch_position, 111 | self._batch_position + batch_size)) 112 | batch_index %= self.train_size 113 | batch_xs = self.X_train.iloc[batch_index, :] 114 | return batch_xs.values 115 | 116 | def next_ybatch(self, batch_size=100): 117 | assert int(batch_size) > 0, 'batch_size {} is not > 0'.format( 118 | batch_size) 119 | self._batch_size = batch_size 120 | self._batch_position += batch_size 121 | self._batch_position %= self.train_size 122 | 123 | batch_index = np.array(range( 124 | self._batch_position, 125 | self._batch_position + batch_size)) 126 | batch_index %= self.train_size 127 | batch_ys = self.Y_train.iloc[batch_index, :] 128 | return batch_ys.values 129 | -------------------------------------------------------------------------------- /src/MachineLearning/TensorFlow/IncreaseNN.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import numpy as np 4 | import tensorflow as tf 5 | from tensorflow.examples.tutorials.mnist import input_data 6 | 7 | 8 | class IncreaseNN(object): 9 | 10 | def __init__(self, shape, X=None, Y=None, log_dir='/tmp/tf_charlesxu'): 11 | if X and Y: 12 | shape.insert(0, X.shape[1]) 13 | shape.append(Y.shape[1]) 14 | tf.reset_default_graph() 15 | self.__shape = shape 16 | if not os.path.isdir(log_dir): 17 | os.makedirs(log_dir) 18 | self.log_dir = log_dir + '/' + self.curtime 19 | self.sess = tf.InteractiveSession() 20 | self.net = self.interferece(self.shape) 21 | tf.global_variables_initializer().run() 22 | self.__step = 0 23 | 24 | def _init_placeholders(self, shape): 25 | with tf.name_scope('input'): 26 | placeholders = { 27 | 'x': tf.placeholder(tf.float32, [None, shape[0]]), 28 | 'y': tf.placeholder(tf.float32, [None, shape[-1]]) 29 | } 30 | return placeholders 31 | 32 | def _init_variables(self, shape): 33 | 34 | def init_weight_variable(shape): 35 | """Create a weight variable with appropriate initialization.""" 36 | initial = tf.truncated_normal(shape, stddev=0.1) 37 | return tf.Variable(initial) 38 | 39 | def init_bias_variable(shape): 40 | """Create a bias variable with appropriate initialization.""" 41 | initial = tf.constant(0.1, shape=shape) 42 | return tf.Variable(initial) 43 | 44 | variables = { 45 | 'weights': [init_weight_variable([a, b]) for a, b in zip(shape[:-1], shape[1:])], 46 | 'biases': [init_bias_variable([a]) for a in shape[1:]] 47 | } 48 | for index, (w, b) in enumerate(zip(variables['weights'], variables['biases'])): 49 | self._variable_summaries(w, 'weight_%d' % index) 50 | self._variable_summaries(b, 'bias_%d' % index) 51 | return variables 52 | 53 | def _init_layer(self, name, x, w, b, activation=tf.nn.relu): 54 | with tf.name_scope(name): 55 | z = tf.matmul(x, w) + b 56 | tf.summary.histogram('z', z) 57 | layer = activation(z) 58 | tf.summary.histogram('layer', layer) 59 | return layer 60 | 61 | def _init_layers(self, placeholders, variables): 62 | layers = [placeholders['x']] 63 | for index, (w, b) in enumerate(zip(variables['weights'][:-1], variables['biases'][:-1])): 64 | layer = self._init_layer('layer_%d' % index, layers[-1], w, b) 65 | layers.append(layer) 66 | layer = self._init_layer('last_layer', layers[-1], variables['weights'][-1], variables['biases'][-1], tf.identity) 67 | layers.append(layer) 68 | return layers 69 | 70 | def _init_loss(self, labels, logits): 71 | with tf.name_scope('cross_entropy'): 72 | diff = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits) 73 | with tf.name_scope('total'): 74 | cross_entropy = tf.reduce_mean(diff) 75 | tf.summary.scalar('cross entropy', cross_entropy) 76 | return cross_entropy 77 | 78 | def _init_accuracy(self, labels, logits): 79 | with tf.name_scope('accuracy'): 80 | with tf.name_scope('correct_prediction'): 81 | correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1)) 82 | with tf.name_scope('accuracy'): 83 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) 84 | tf.summary.scalar('accuracy', accuracy) 85 | return accuracy 86 | 87 | def _init_train_ops(self, lost, learning_rate=0.01): 88 | with tf.name_scope('train'): 89 | train_step = tf.train.RMSPropOptimizer(learning_rate).minimize(lost) 90 | return train_step 91 | 92 | def interferece(self, shape): 93 | interfereces = dict() 94 | interfereces['placeholders'] = self._init_placeholders(shape) 95 | interfereces['variables'] = self._init_variables(shape) 96 | interfereces['layers'] = self._init_layers(interfereces['placeholders'], interfereces['variables']) 97 | interfereces['cross_entropy'] = self._init_loss(labels=interfereces['placeholders']['y'], logits=interfereces['layers'][-1]) 98 | interfereces['accuracy'] = self._init_accuracy(labels=interfereces['placeholders']['y'], logits=interfereces['layers'][-1]) 99 | interfereces['train_step'] = self._init_train_ops(interfereces['cross_entropy']) 100 | interfereces['merged'] = tf.summary.merge_all() 101 | interfereces['summary'] = { 102 | 'train_writer': tf.summary.FileWriter(self.log_dir + '/train', self.sess.graph), 103 | 'test_writer': tf.summary.FileWriter(self.log_dir + '/test', self.sess.graph)} 104 | return interfereces 105 | 106 | def _feed_dict(self, data, net): 107 | xs, ys = data.next_batch(100) 108 | feed_dict = { 109 | net['placeholders']['x']: xs, 110 | net['placeholders']['y']: ys 111 | } 112 | return feed_dict 113 | 114 | def fit(self, data_train, data_test, epoches=1000): 115 | net = self.net 116 | for epoch in range(epoches): 117 | # testing 118 | if epoch % 1 == 0: 119 | run_metadata = tf.RunMetadata() 120 | summary, test_accuracy = self.sess.run( 121 | [net['merged'], net['accuracy']], 122 | feed_dict=self._feed_dict(data_test, net), 123 | run_metadata=run_metadata) 124 | net['summary']['test_writer'].add_run_metadata(run_metadata, 'step%06d' % self.__step) 125 | net['summary']['test_writer'].add_summary(summary, self.__step) 126 | print(self.__step, test_accuracy, end='\n') 127 | 128 | # training 129 | run_metadata = tf.RunMetadata() 130 | summary, _ = self.sess.run( 131 | [net['merged'], net['train_step']], 132 | feed_dict=self._feed_dict(data_train, net), 133 | run_metadata=run_metadata) 134 | net['summary']['train_writer'].add_run_metadata(run_metadata, 'step%06d' % self.__step) 135 | net['summary']['train_writer'].add_summary(summary, self.__step) 136 | self.__step += 1 137 | self.__step += 100 138 | 139 | def increase(self, shape): 140 | old_variables = { 141 | 'weights': [var.eval() for var in self.net['variables']['weights']], 142 | 'biases': [var.eval() for var in self.net['variables']['biases']] 143 | } 144 | self.sess.close() 145 | tf.reset_default_graph() 146 | self.sess = tf.InteractiveSession() 147 | interfereces = dict() 148 | interfereces['placeholders'] = self._init_placeholders(shape) 149 | 150 | variables = { 151 | 'weights': [self._increase_variable( 152 | [a, b], old_variables['weights'][index] if index < len(old_variables['weights']) else None 153 | ) for index, (a, b) in enumerate(zip(shape[:-1], shape[1:]))], 154 | 'biases': [self._increase_variable( 155 | [a], old_variables['biases'][index] if index < len(old_variables['biases']) else None 156 | ) for index, a in enumerate(shape[1:])] 157 | } 158 | 159 | for index, (w, b) in enumerate(zip(variables['weights'], variables['biases'])): 160 | self._variable_summaries(w, 'weight_%d' % index) 161 | self._variable_summaries(b, 'bias_%d' % index) 162 | 163 | interfereces['variables'] = variables 164 | interfereces['layers'] = self._init_layers(interfereces['placeholders'], interfereces['variables']) 165 | interfereces['cross_entropy'] = self._init_loss(labels=interfereces['placeholders']['y'], logits=interfereces['layers'][-1]) 166 | interfereces['accuracy'] = self._init_accuracy(labels=interfereces['placeholders']['y'], logits=interfereces['layers'][-1]) 167 | interfereces['train_step'] = self._init_train_ops(interfereces['cross_entropy']) 168 | interfereces['merged'] = tf.summary.merge_all() 169 | interfereces['summary'] = { 170 | 'train_writer': tf.summary.FileWriter(self.log_dir + '/train', self.sess.graph), 171 | 'test_writer': tf.summary.FileWriter(self.log_dir + '/test', self.sess.graph)} 172 | self.net = interfereces 173 | tf.global_variables_initializer().run() 174 | return interfereces 175 | 176 | def predict(self, data_x): 177 | return self.net['layers'][-1].eval(session=self.sess, feed_dict={self.net['placeholders']['x']: data_x}) 178 | 179 | def _variable_summaries(self, variable, name='var'): 180 | """Attach a lot of summaries to a Tensor (for TensorBoard visualization).""" 181 | with tf.name_scope(name): 182 | mean = tf.reduce_mean(variable) 183 | tf.summary.scalar('mean', mean) 184 | with tf.name_scope('stddev'): 185 | stddev = tf.sqrt(tf.reduce_mean(tf.square(variable - mean))) 186 | tf.summary.scalar('stddev', stddev) 187 | tf.summary.scalar('max', tf.reduce_max(variable)) 188 | tf.summary.scalar('min', tf.reduce_min(variable)) 189 | tf.summary.histogram('histogram', variable) 190 | 191 | def _increase_variable(self, shape, from_variable=None): 192 | if from_variable is None: 193 | to_values = np.random.randn(*shape) / 10 194 | var = tf.Variable(to_values, dtype=tf.float32) 195 | tf.variables_initializer([var]).run() 196 | return var 197 | 198 | if isinstance(from_variable, tf.Variable): 199 | try: 200 | from_variable.eval(session=self.sess) 201 | except tf.errors.FailedPreconditionError: 202 | tf.variables_initializer([from_variable]).run() 203 | from_shape = from_variable.get_shape().as_list() 204 | from_values = from_variable.eval(session=self.sess) 205 | elif isinstance(from_variable, np.ndarray): 206 | from_shape = from_variable.shape 207 | from_values = from_variable 208 | else: 209 | raise Exception('Not recognised type %s' % str(type(from_variable))) 210 | to_values = np.random.randn(*shape) / 10 211 | transfer_shape = [min(dim) for dim in zip(from_shape, shape)] 212 | 213 | if len(from_shape) == 1: 214 | to_values += 0.1 215 | to_values[:transfer_shape[0]] = from_values[:transfer_shape[0]] 216 | else: 217 | to_values[..., :transfer_shape[-2], :transfer_shape[-1]] = from_values[..., :transfer_shape[-2], :transfer_shape[-1]] 218 | var = tf.Variable(to_values, dtype=tf.float32) 219 | tf.variables_initializer([var]).run() 220 | return var 221 | 222 | @property 223 | def curtime(self): 224 | cur_time = time.strftime('%Y-%m-%d_%X', time.localtime(time.time())) 225 | return cur_time 226 | 227 | @property 228 | def shape(self): 229 | return self.__shape 230 | 231 | 232 | def original(data): 233 | network = IncreaseNN([784, 20, 10], log_dir='/tmp/tf_charlesxu/original_wide') 234 | network.fit(data.train, data.test, epoches=100) 235 | for hidden in range(20): 236 | network.increase([784, 20, 10]) 237 | network.fit(data.train, data.test, epoches=100) 238 | 239 | structure = [784] + [30] * 19 + [10] 240 | network = IncreaseNN(structure, log_dir='/tmp/tf_charlesxu/original_deep') 241 | network.fit(data.train, data.test, epoches=100) 242 | for hidden in range(20): 243 | network.increase(structure) 244 | network.fit(data.train, data.test, epoches=100) 245 | 246 | 247 | def widen(data): 248 | network = IncreaseNN([784, 1, 10], log_dir='/tmp/tf_charlesxu/widen') 249 | network.fit(data.train, data.test, epoches=100) 250 | for hidden in range(20): 251 | network.increase([784, hidden + 2, 10]) 252 | network.fit(data.train, data.test, epoches=100) 253 | 254 | 255 | def deepen(data): 256 | structure = [784, 30, 10] 257 | network = IncreaseNN(structure, log_dir='/tmp/tf_charlesxu/deepen') 258 | network.fit(data.train, data.test, epoches=100) 259 | for hidden in range(20): 260 | structure.insert(1, 30) 261 | network.increase(structure) 262 | network.fit(data.train, data.test, epoches=100) 263 | 264 | 265 | def main(): 266 | data_path = '/home/charlesxu/Workspace/data/MNIST_data/' 267 | data = input_data.read_data_sets(data_path, one_hot=True) 268 | 269 | original(data) 270 | widen(data) 271 | deepen(data) 272 | 273 | 274 | if __name__ == '__main__': 275 | main() 276 | () 277 | -------------------------------------------------------------------------------- /src/MachineLearning/TensorFlow/TensorflowLearning/Untitled.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import tensorflow as tf" 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": 13, 15 | "metadata": {}, 16 | "outputs": [], 17 | "source": [ 18 | "sess = tf.InteractiveSession()" 19 | ] 20 | }, 21 | { 22 | "cell_type": "code", 23 | "execution_count": 43, 24 | "metadata": {}, 25 | "outputs": [ 26 | { 27 | "name": "stdout", 28 | "output_type": "stream", 29 | "text": [ 30 | "[[ 1.41028273 1.76107073 2.1326015 3.25804257]\n", 31 | " [-0.29294467 1.32900047 3.68154383 3.18772244]\n", 32 | " [-0.45015633 1.11059964 3.40281439 3.56811571]\n", 33 | " [ 0.69193411 1.46399367 3.12278247 4.01428461]\n", 34 | " [ 1.87994528 1.89346516 1.92173779 2.68355751]\n", 35 | " [ 2.22133255 3.30972195 3.50474811 4.51246405]\n", 36 | " [ 1.25771391 0.31267393 2.96506882 4.30967855]]\n", 37 | "mean= [ 0.69804859 1.60123134 3.57658982 4.10836506] \n", 38 | "var= [ 1.01954579 1.23900151 0.58759832 0.73256236]\n" 39 | ] 40 | } 41 | ], 42 | "source": [ 43 | "x = tf.constant([[1,2,3,4]]*7, tf.float32)\n", 44 | "y = tf.truncated_normal(x.shape)\n", 45 | "z = x + y\n", 46 | "mean, var = tf.nn.moments(z, 0)\n", 47 | "print(z.eval())\n", 48 | "print('mean=', mean.eval(), '\\nvar=',var.eval())" 49 | ] 50 | }, 51 | { 52 | "cell_type": "code", 53 | "execution_count": 44, 54 | "metadata": {}, 55 | "outputs": [ 56 | { 57 | "name": "stdout", 58 | "output_type": "stream", 59 | "text": [ 60 | "[[ 1.53355587 0.38661456 0.10948229 1.430058 ]\n", 61 | " [-0.27069056 0.80823851 -0.27680206 -1.31970453]\n", 62 | " [ 0.63089526 -1.77756488 1.54642105 0.50779915]\n", 63 | " [ 0.92064953 -0.56752062 0.82397366 0.3272028 ]\n", 64 | " [-1.43046248 -0.67386436 -1.86051989 0.67843962]\n", 65 | " [-0.28913945 1.45824242 -0.56958675 -0.08079386]\n", 66 | " [-1.09480834 0.36585569 0.22702885 -1.54300475]]\n", 67 | "mean= [ -2.55448480e-08 1.70298989e-07 -3.40597985e-08 -2.04358784e-07] \n", 68 | "var= [ 0.99983579 0.99980336 0.99982071 0.99983197]\n" 69 | ] 70 | } 71 | ], 72 | "source": [ 73 | "w = tf.nn.batch_normalization(z,mean,var,0,1,1e-4)\n", 74 | "mean, var = tf.nn.moments(w, 0)\n", 75 | "print(w.eval())\n", 76 | "print('mean=', mean.eval(), '\\nvar=',var.eval())" 77 | ] 78 | }, 79 | { 80 | "cell_type": "code", 81 | "execution_count": null, 82 | "metadata": {}, 83 | "outputs": [], 84 | "source": [] 85 | } 86 | ], 87 | "metadata": { 88 | "kernelspec": { 89 | "display_name": "Python [conda root]", 90 | "language": "python", 91 | "name": "conda-root-py" 92 | }, 93 | "language_info": { 94 | "codemirror_mode": { 95 | "name": "ipython", 96 | "version": 3 97 | }, 98 | "file_extension": ".py", 99 | "mimetype": "text/x-python", 100 | "name": "python", 101 | "nbconvert_exporter": "python", 102 | "pygments_lexer": "ipython3", 103 | "version": "3.5.4" 104 | } 105 | }, 106 | "nbformat": 4, 107 | "nbformat_minor": 2 108 | } 109 | -------------------------------------------------------------------------------- /src/MachineLearning/TensorFlow/list_devices.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | from tensorflow.python.client import device_lib 3 | print(device_lib.list_local_devices()) 4 | 5 | -------------------------------------------------------------------------------- /src/MachineLearning/TensorFlow/multi-gpu.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import tensorflow as tf 3 | from tensorflow.examples.tutorials import mnist 4 | 5 | data = mnist.input_data.read_data_sets('MNIST_data/', one_hot=True) 6 | # making layers 7 | "we finally reached the goal that talk with each other with English.. . haha" 8 | 9 | 10 | def relu_layer(bef, size): 11 | w = tf.Variable(tf.random_normal(size, stddev=.1, dtype=tf.float16)) 12 | b = tf.Variable(.1 * tf.ones(size[-1], dtype=tf.float16)) 13 | return tf.nn.relu(tf.add(tf.matmul(bef, w), b)) 14 | 15 | 16 | def linear_layer(bef, size): 17 | w = tf.Variable(tf.random_normal(size, stddev=.1, dtype=tf.float16)) 18 | b = tf.Variable(tf.zeros(size[-1], dtype=tf.float16)) 19 | return tf.add(tf.matmul(bef, w), b) 20 | 21 | 22 | graph = tf.Graph() 23 | graph.as_default() 24 | 25 | # defining layers including weights and biases, and assigning it to cpus, gpus 26 | with tf.device('/cpu:0'): 27 | x = tf.placeholder(tf.float16, [None, 784]) 28 | y_ = tf.placeholder(tf.float16, [None, 10]) 29 | 30 | # first layer uses most calculating resources 31 | with tf.device('/gpu:0'): 32 | l1 = relu_layer(x, [784, 500]) 33 | 34 | # assigning gpu:0 and gpu:1 to one network, cuz they can communite with each other, and DO NOT USE /gpu:1 and /gpu:2 35 | with tf.device('/gpu:1'): 36 | # l2 = relu_layer(l1, [500, 800]) 37 | l3 = relu_layer(l1, [500, 200]) 38 | l4 = linear_layer(l3, [200, 10]) 39 | y = tf.nn.softmax(l4) 40 | 41 | cost = tf.nn.softmax_cross_entropy_with_logits(logits=l4, labels=y_) 42 | 43 | correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) 44 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float16)) 45 | 46 | step = tf.train.RMSPropOptimizer(7e-6).minimize(cost) 47 | 48 | sess = tf.InteractiveSession() 49 | sess.run(tf.global_variables_initializer()) 50 | 51 | for i in range(20000): 52 | batch = data.train.next_batch(50) 53 | if i % 100 == 0: 54 | train_accuracy = accuracy.eval(feed_dict={x: batch[0], y_: batch[1]}) 55 | print("step %d, training accuracy %g" % (i, train_accuracy)) 56 | step.run(feed_dict={x: batch[0], y_: batch[1]}) 57 | 58 | print("test accuracy %g" % accuracy.eval(feed_dict={x: data.test.images, y_: mnist.test.labels})) 59 | -------------------------------------------------------------------------------- /src/MachineLearning/algorithm/DecisionTree.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class DT(object): 5 | """Decision Tree""" 6 | 7 | def __init__(self, arg): 8 | super(DT, self).__init__() 9 | self.arg = arg 10 | -------------------------------------------------------------------------------- /src/MachineLearning/algorithm/DeepReinforcementLearning/ReplayBuffer.py: -------------------------------------------------------------------------------- 1 | import random 2 | from collections import deque 3 | 4 | 5 | class ReplayBuffer(object): 6 | """ReplayBuffer for DRL 7 | Accept for tuple, (state, act, reward, next_state)""" 8 | 9 | def __init__(self, maxlen, data=list()): 10 | self._maxlen = maxlen 11 | self._buffer = deque(data, maxlen=maxlen) 12 | 13 | def __len__(self): 14 | return len(self._buffer) 15 | 16 | def sample(self, batch_size=32): 17 | if len(self._buffer) <= batch_size: 18 | return list(self._buffer) 19 | else: 20 | return random.sample(self._buffer, batch_size) 21 | 22 | def append(self, data): 23 | self._buffer.appendleft(data) 24 | 25 | def extend(self, data_list): 26 | self._buffer.extendleft(data_list) 27 | 28 | def clear(self): 29 | self._buffer.clear() 30 | 31 | @property 32 | def content(self): 33 | return list(self._buffer) 34 | 35 | @property 36 | def maxlen(self): 37 | return self._maxlen 38 | 39 | def __repr__(self): 40 | return self._buffer.__repr__() 41 | 42 | def __str__(self): 43 | return self._buffer.__str__() 44 | 45 | 46 | def test_buffer(): 47 | data = [(1, 2, 3)] * 5 + [(2, 3, 4)] * 3 48 | buffer = ReplayBuffer(7, data) 49 | print(buffer.content) 50 | buffer.append((3, 4, 5)) 51 | print(buffer.content) 52 | buffer.extend([(6, 4, 2)] * 2) 53 | print(buffer.content, len(buffer)) 54 | print(buffer.sample(3), len(buffer)) 55 | print(buffer.sample(7)) 56 | print(buffer.sample(8)) 57 | buffer.clear() 58 | print(buffer.content, len(buffer)) 59 | 60 | -------------------------------------------------------------------------------- /src/MachineLearning/algorithm/DeepReinforcementLearning/SimplePolicyGradient.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import gym 3 | import tensorflow as tf 4 | import numpy as np 5 | from tensorflow.contrib import slim 6 | from ReplayBuffer import ReplayBuffer 7 | 8 | env = gym.make('CartPole-v0') 9 | env.reset() 10 | 11 | buf = ReplayBuffer(5000) 12 | x = tf.placeholder(tf.float32, [None, 4], name='x') 13 | a = tf.placeholder(tf.float32, [None, 2], name='a') 14 | r = tf.placeholder(tf.float32, [None, 1], name='r') 15 | 16 | 17 | def build_net(x, a, r): 18 | h1 = slim.layers.fully_connected(x, 20) 19 | h2 = slim.layers.fully_connected(h1, 12) 20 | h3 = slim.layers.fully_connected(h2, 2, activation_fn=tf.identity) 21 | y = tf.nn.softmax(h3) 22 | logp = tf.log(y) 23 | 24 | # good_probabilities = tf.reduce_sum(tf.multiply(y, a), reduction_indices=[1]) 25 | # # maximize the log probability 26 | # log_probabilities = tf.log(good_probabilities) 27 | # loss = -tf.reduce_sum(log_probabilities) 28 | 29 | loss = -tf.reduce_sum( 30 | tf.reduce_sum( 31 | tf.multiply(tf.multiply(r, logp), a), 32 | reduction_indices=[1])) 33 | return y, logp, h3, loss 34 | 35 | 36 | y, logp, h, loss = build_net(x, a, r) 37 | 38 | optimizer = tf.train.RMSPropOptimizer(0.001).minimize(loss) 39 | 40 | sess = tf.InteractiveSession() 41 | tf.global_variables_initializer().run() 42 | buf.clear() 43 | 44 | 45 | def get_act(feed_dict): 46 | act = sess.run(y, feed_dict=feed_dict) 47 | if np.random.uniform() <= act[0][0]: 48 | return np.array([1, 0]) 49 | else: 50 | return np.array([0, 1]) 51 | 52 | 53 | render_close = True 54 | for i in range(40000): 55 | if i > 4000: 56 | render_close = False 57 | s = env.reset() 58 | total_reward = 0 59 | done = False 60 | 61 | bbuf = list() 62 | while not done: 63 | # 重复试验收集数据 64 | act = get_act({x: np.reshape(s, [1, 4])}) 65 | s_next, reward, done, _ = env.step(act[1]) 66 | total_reward += reward 67 | bbuf.append([s, act, total_reward, s_next]) 68 | env.render(close=render_close) 69 | s = s_next 70 | else: 71 | for j in bbuf: 72 | j[2] = total_reward 73 | buf.extend(bbuf) 74 | if i % 150 == 140: 75 | for i in range(4000): 76 | # 使用收集到的数据训练网络 77 | st, at, rt, sn = zip(*buf.sample()) 78 | rt = np.reshape(np.array(rt), [-1, 1]) 79 | # rt 中心标准化 80 | rt_norm = (rt - min(rt)) / (np.sqrt(np.var(rt)) + 1) 81 | feed_dict = { 82 | x: np.array(st), 83 | a: np.array(at), 84 | r: rt_norm 85 | } 86 | loss_runned, y_runned, _ = sess.run([loss, y, optimizer], feed_dict=feed_dict) 87 | print('loss: ', loss_runned, 'y', y_runned[0]) 88 | 89 | -------------------------------------------------------------------------------- /src/MachineLearning/algorithm/LDA.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | from sklearn.datasets import load_breast_cancer 3 | from tsne import * 4 | import pandas as pd 5 | from pylab import * 6 | import seaborn as sns 7 | from functools import reduce 8 | from sklearn.preprocessing import MinMaxScaler 9 | from sklearn.model_selection import train_test_split 10 | data = load_breast_cancer() 11 | 12 | x, y, label_names = data['data'], data['target'], data['target_names'] 13 | scaler = MinMaxScaler() 14 | scaler.fit(x) 15 | x = scaler.transform(x) 16 | 17 | tsne(x, y, label_names=label_names) 18 | 19 | trainx, testx, trainy, testy = train_test_split(x, reshape(y, [-1, 1]), test_size=0.1) 20 | 21 | 22 | def variance(x, u): 23 | x = reshape(x, [-1, 1]) 24 | return (x - u).dot(x - u).T 25 | 26 | 27 | def lda2(x, y): 28 | x0, x1 = mat(x[y.T[0] == 0]), mat(x[y.T[0] == 1]) 29 | u0, u1 = x0.mean(axis=0), x1.mean(axis=0) 30 | 31 | sigma0, sigma1 = reduce(lambda x, y: x + y, [variance(i, u0) for i in x0]), reduce(lambda x, y: x + y, [variance(i, u1) for i in x1]) 32 | 33 | w = (u0 - u1).dot(pinv(sigma0 / x0.shape[0] + sigma1 / x1.shape[0])) 34 | return w 35 | 36 | 37 | def transform(w, x): 38 | return w.dot(x.T).T 39 | 40 | 41 | def predict(reduced, threshold, classes=None): 42 | if isinstance(threshold, (int, float)): 43 | threshold = [threshold] 44 | if classes is None: 45 | classes = range(len(threshold)) 46 | threshold.sort() 47 | predicted = zeros(reduced.shape) 48 | for i, j, c in zip(threshold[:-1], threshold[1:], classes[1:-1]): 49 | predicted[reduced >= i and reduced < j] = c 50 | predicted[reduced < threshold[0]] = classes[0] 51 | predicted[reduced >= threshold[0]] = classes[-1] 52 | return predicted 53 | 54 | 55 | def plot_prediction(reduced, predicted, target, threshold, label_names=None): 56 | colors = 'rgbcmyk' 57 | marks = '+xo.*' 58 | classes = set(list(target)) 59 | if label_names is None: 60 | label_names = [None] * len(classes) 61 | for i in classes: 62 | subscription = target == i 63 | plot(reduced[subscription], predicted[subscription], '{}{}'.format(colors[i], marks[i]), label=label_names[i]) 64 | plot([threshold, threshold], [-.5, 1.5], 'grey', label='Classify boundary') 65 | xlabel('Result after dimention reduced') 66 | ylabel('Predicted') 67 | legend() 68 | 69 | 70 | def plot_prediction2(transformed, predicted, y, label_names, threshold): 71 | plot([threshold, threshold], [-50, 50], 'grey', label='Classify boundary') 72 | t = pd.DataFrame(transformed, columns=['Dimention Reduction Result After LDA Transform']) 73 | p = pd.DataFrame([label_names[int(i[0])] for i in predicted], columns=['prediction']) 74 | lab = pd.DataFrame([label_names[int(i[0])] for i in y], columns=['label']) 75 | data_t = pd.concat([t, p, lab], axis=1) 76 | sns.swarmplot(y='label', x='Dimention Reduction Result After LDA Transform', hue='prediction', data=data_t) 77 | 78 | 79 | w = lda2(trainx, trainy) 80 | threshold = -0.061 81 | 82 | traint = transform(w, trainx) 83 | trainp = predict(traint, threshold, [0, 1]) 84 | trainerr = mean(abs(reshape(trainp, [1, -1]) - reshape(trainy, [1, -1]))) 85 | 86 | testt = transform(w, testx) 87 | testp = predict(testt, threshold, [0, 1]) 88 | testerr = mean(abs(reshape(testp, [1, -1]) - reshape(testy, [1, -1]))) 89 | 90 | 91 | figure() 92 | plot_prediction2( 93 | np.vstack([traint, testt]), 94 | np.vstack([mat(trainp) + 4, mat(testp) + 6]), 95 | np.vstack([trainy, testy + 2]), 96 | list(map(lambda x: 'training set ' + x, label_names)) + 97 | list(map(lambda x: 'testing set ' + x, label_names)) + 98 | list(map(lambda x: 'train prediction ' + x, label_names)) + 99 | list(map(lambda x: 'test prediction ' + x, label_names)), 100 | threshold) 101 | print('train err:', trainerr, 102 | '\ntest err:', testerr) 103 | show() 104 | # figure() 105 | # plot_prediction(transformed, predicted, y, label_names) 106 | 107 | 108 | # \documentclass{article} 109 | # \usepackage{amsmath} 110 | # \usepackage{amssymb} 111 | # \usepackage{ctex} 112 | # \begin{document} 113 | # LDA: 114 | # $\omega = S^{-1}_\omega(\mu_0 - \mu_1)$\\ 115 | # $S^{-1}_\omega = V\Sigma^{-1}U^T$\\ 116 | # 使用奇异值分解,实际$S^{-1}$直接用pinv就行(psedu invert)\\ 117 | # $S_\omega = \Sigma_0 + \Sigma_1 $ 118 | 119 | # \end{document} 120 | -------------------------------------------------------------------------------- /src/MachineLearning/algorithm/NeuralNetwork/nn_mnist.py: -------------------------------------------------------------------------------- 1 | """这个代码还有小问题,不过不想整了 2 | 问题1:里面不该用numpy array,而是应该用mat,否则计算结果有些地方会很令人困惑 3 | 问题2:反向传播好像有地方写的不对,如果有隐含层就会不收敛 4 | 问题3:梯度消失,一直没有解决 5 | """ 6 | import pdb 7 | from pylab import * 8 | from tensorflow.examples.tutorials import mnist 9 | 10 | data = mnist.input_data.read_data_sets('MNIST_data', one_hot=True) 11 | 12 | 13 | def show_pic(image_data, index): 14 | if len(image_data.shape) == 2: 15 | imshow(reshape(image_data[index, :], [28, 28])) 16 | elif len(image_data.shape) == 1: 17 | imshow(reshape(image_data, [28, 28])) 18 | elif len(image_data.shape) == 3: 19 | imshow(image_data[index, :, :]) 20 | 21 | 22 | class mnist_net(object): 23 | 24 | def __init__(self, shape, data): 25 | self.data = data 26 | self.shape = shape 27 | self.weights = [0.1 * randn(a, b) for a, b in zip(shape[:-1], shape[1:])] 28 | self.biases = [zeros([1, a]) for a in shape[1:]] 29 | 30 | def sigmoid(self, z, derivative=False): 31 | sig = 1 / (1 + exp(-z)) 32 | if not derivative: 33 | return sig 34 | else: 35 | return sig * (1 - sig) 36 | 37 | def fp(self, inputs): 38 | ai = inputs 39 | self.z = [] 40 | self.a = [] 41 | for w, b in zip(self.weights, self.biases): 42 | self.a.append(ai) 43 | zi = ai.dot(w) + b 44 | self.z.append(zi) 45 | ai = self.sigmoid(zi) 46 | return ai 47 | 48 | def bp_step(self, batch=1, learning_rate=0.01): 49 | self.delta = [] 50 | inputs, t = self.data.train.next_batch(batch) 51 | y = self.fp(inputs) 52 | d = -(y - t) * self.sigmoid(y, derivative=True) 53 | d = ones([1, batch]).dot(d) / batch 54 | 55 | for w, b, z, a in zip(self.weights[::-1], self.biases[::-1], self.z[::-1], self.a[::-1]): 56 | self.delta.insert(0, d) 57 | d = d.dot(w.T) * self.sigmoid(a, derivative=True) 58 | d = ones([1, batch]).dot(d) / batch 59 | 60 | self.gradw = [] 61 | self.gradb = [] 62 | for a, d in zip(self.a, self.delta): 63 | self.gradw.append(ones([1, batch]).dot(a).T.dot(d) / batch) 64 | self.gradb.append(d) 65 | 66 | for i, (gw, gb) in enumerate(zip(self.gradw, self.gradb)): 67 | self.weights[i] += learning_rate * gw 68 | self.biases[i] += learning_rate * gb 69 | 70 | def training(self, times=150, batch=1, learning_rate=0.1): 71 | correct_prediction = equal(argmax(self.fp(data.test.images), 1), argmax(data.test.labels, 1)) 72 | accuracy = mean(correct_prediction, axis=0) 73 | print(accuracy) 74 | for i in range(times): 75 | self.bp_step(batch, learning_rate) 76 | if i % 50 == 0: 77 | correct_prediction = equal(argmax(self.fp(data.test.images), 1), argmax(data.test.labels, 1)) 78 | accuracy_test = mean(correct_prediction, axis=0) 79 | correct_prediction = equal(argmax(self.fp(data.train.images), 1), argmax(data.train.labels, 1)) 80 | accuracy_train = mean(correct_prediction, axis=0) 81 | print('%2.1f'%(i*100/times), accuracy_train, accuracy_test) 82 | 83 | 84 | net = mnist_net([784, 15, 10], data) 85 | net.training() 86 | for i in net.gradw: 87 | print(i) 88 | print(i.shape) 89 | 90 | def s(data): 91 | for i in data: 92 | print(i.shape) 93 | -------------------------------------------------------------------------------- /src/MachineLearning/algorithm/NeuralNetwork/nn_mnist_static.py: -------------------------------------------------------------------------------- 1 | import pdb 2 | from pylab import * 3 | from tensorflow.examples.tutorials import mnist 4 | 5 | data = mnist.input_data.read_data_sets('MNIST_data', one_hot=True) 6 | 7 | 8 | def show_pic(image_data, index): 9 | if len(image_data.shape) == 2: 10 | imshow(reshape(image_data[index, :], [28, 28])) 11 | elif len(image_data.shape) == 1: 12 | imshow(reshape(image_data, [28, 28])) 13 | elif len(image_data.shape) == 3: 14 | imshow(image_data[index, :, :]) 15 | 16 | 17 | class mnist_net(object): 18 | 19 | def __init__(self, shape, data): 20 | self.data = data 21 | self.W0 = np.mat(np.random.randn(784, 15) * .1) 22 | self.W1 = np.mat(np.random.randn(15, 10) * .1) 23 | self.b0 = np.mat(np.zeros([1, 15])) 24 | self.b1 = np.mat(np.zeros([1, 10])) 25 | 26 | def sigmoid(self, z, derivative=False): 27 | sig = 1 / (1 + exp(-z)) 28 | if not derivative: 29 | return sig 30 | return np.multiply(sig, (1 - sig)) 31 | 32 | def fp(self, inputs): 33 | self.l0 = np.mat(inputs) 34 | self.z1 = inputs.dot(self.W0) + self.b0 35 | self.l1 = self.sigmoid(self.z1) 36 | self.z2 = self.l1.dot(self.W1) + self.b1 37 | self.l2 = self.sigmoid(self.z2) 38 | return self.l2 39 | 40 | def bp_step(self, batch=1, learning_rate=1, lr_ampl=1): 41 | inputs, t = self.data.train.next_batch(batch) 42 | y = self.fp(inputs) 43 | self.d2 = np.multiply(-(y - t), self.sigmoid(self.z2, derivative=True)).mean(axis=0) 44 | 45 | self.d1 = np.multiply(self.d2.dot(self.W1.T).mean(axis=0), self.sigmoid(self.z1, derivative=True)).mean(axis=0) 46 | 47 | self.gradw1 = self.l1.mean(axis=0).T.dot(self.d2) 48 | self.gradw0 = self.l0.mean(axis=0).T.dot(self.d1) 49 | self.gradb1 = self.d2 50 | self.gradb0 = self.d1 51 | 52 | self.W1 += self.gradw1 * learning_rate 53 | self.W0 += self.gradw0 * learning_rate * lr_ampl 54 | self.b1 += self.gradb1 * learning_rate 55 | self.b0 += self.gradb0 * learning_rate 56 | 57 | def training(self, times=35000, batch=100, learning_rate=3): 58 | correct_prediction = equal(argmax(self.fp(data.test.images), 1), np.mat(argmax(data.test.labels, 1)).T) 59 | accuracy = correct_prediction.mean() 60 | print(accuracy) 61 | for i in range(times): 62 | self.bp_step(batch, learning_rate) 63 | if i % 500 == 0: 64 | correct_prediction = equal(argmax(self.fp(data.test.images), 1), np.mat(argmax(data.test.labels, 1)).T) 65 | accuracy_test = correct_prediction.mean() 66 | print('%2.1f'%(i*100/times), accuracy_test) 67 | 68 | 69 | net = mnist_net([784, 15, 10], data) 70 | net.training() 71 | for i in [net.gradw0, net.gradw1, net.W0, net.W1]: 72 | print(i) 73 | print(i.shape) 74 | 75 | def s(data): 76 | for i in data: 77 | print(i.shape) 78 | -------------------------------------------------------------------------------- /src/MachineLearning/algorithm/tsne.py: -------------------------------------------------------------------------------- 1 | ../sklearn/tsne.py -------------------------------------------------------------------------------- /src/MachineLearning/preprocess.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import numpy as np 4 | 5 | 6 | class Scaler(object): 7 | """Scaler for dataset 8 | Methods: 9 | refit: re-obtain min/max/mean/var with given data 10 | trans: standarize the data 11 | itrans: inverse transform data from standarized to original 12 | 13 | Usage: 14 | # loading sklearn datasets 15 | from sklearn.datasets import load_boston() 16 | dataset = load_boston() 17 | X, Y = dataset['data'], dataset['target'] 18 | 19 | # using scaler to fit the variance, mean, and minmax 20 | scaler = Scaler(X, Y, method='minmax', attrs=[-1, 1]) 21 | X, Y = scaler.trans(X, Y) 22 | 23 | # or just transform x or y only 24 | target = scaler.trans(Y=Y) 25 | 26 | # or inverse 27 | predicted_Y = scaler.itrans(Y=predicted) 28 | """ 29 | 30 | def __init__(self, X=None, Y=None, method='minmax', attrs=None, axis=0): 31 | """ 32 | methods: minmax, standard 33 | attrs: 34 | In minmax: min and max, [0, 1]; 35 | In standard: mean and variance, [0, 1] 36 | """ 37 | self._check_None(X, Y) 38 | self._method = method 39 | self._attrs = attrs 40 | self._axis = axis 41 | self._statistics = dict() 42 | 43 | self._trans_dict = { 44 | 'minmax': self._min_max_scaler, 45 | 'standard': self._standard_scaler} 46 | 47 | self.refit(X, Y) 48 | 49 | def refit(self, X=None, Y=None): 50 | # refit the data statics including minmax, mean, var 51 | self._check_None(X, Y) 52 | self._obtain_statistics(X, 'X') 53 | self._obtain_statistics(Y, 'Y') 54 | 55 | def trans(self, X=None, Y=None, inv=False): 56 | self._check_None(X, Y) 57 | if X is None: 58 | return self._trans_dict[self._method](Y, 'Y', inv) 59 | elif Y is None: 60 | return self._trans_dict[self._method](X, 'X', inv) 61 | else: 62 | return self._trans_dict[self._method](X, 'X', inv), self._trans_dict[self._method](Y, 'Y', inv) 63 | 64 | def itrans(self, X=None, Y=None): 65 | return self.trans(X, Y, inv=True) 66 | 67 | def _obtain_statistics(self, data, name): 68 | """Obtain minmax, mean and variance""" 69 | if data is not None: 70 | if not hasattr(data, 'shape'): 71 | raise ValueError("'{}' has no attribute '{}".format(name, 'shape')) 72 | 73 | self._statistics[name] = dict() 74 | self._statistics[name]['shape'] = data.shape 75 | 76 | # judge the dimention of data, decide which axis to use 77 | if len(data.shape) == 1: 78 | axis = 0 79 | elif len(data.shape) == 2: 80 | axis = self._axis 81 | else: 82 | raise ValueError("Dimention of '{}' is not 1 or 2, given {} with shape {}".format( 83 | name, 84 | len(data.shape). 85 | data.shape)) 86 | 87 | # record the real axis and obtain statistics 88 | self._statistics[name]['axis'] = axis 89 | 90 | self._statistics[name]['minmax'] = ( 91 | np.min(data, axis=axis), 92 | np.max(data, axis=axis)) 93 | 94 | self._statistics[name]['norm'] = ( 95 | np.mean(data, axis=axis), 96 | np.var(data, axis=axis)) 97 | 98 | def _min_max_scaler(self, data, name, inv=False): 99 | """MinMax Scaler 100 | inv: inverse transform 101 | """ 102 | if self._attrs is None: 103 | self._attrs = [0, 1] 104 | self._attrs.sort() 105 | 106 | data_min, data_max = self._statistics[name]['minmax'] 107 | 108 | if not inv: 109 | return (data - data_min) / (data_max - data_min) 110 | else: 111 | return data * (data_max - data_min) + data_min 112 | 113 | def _standard_scaler(self, data, name, inv=False): 114 | """Standard Scaler, force the mean and variance to given number 115 | inv: inverse transform 116 | """ 117 | if self._attrs is None: 118 | self._attrs = [0, 1] 119 | 120 | data_mean, data_var = self._statistics[name]['norm'] 121 | 122 | if not inv: 123 | return (data - data_mean) / data_var ** 0.5 124 | else: 125 | return data * data_var ** 0.5 + data_mean 126 | 127 | @property 128 | def statistics(self): 129 | return self._statistics 130 | 131 | def _check_None(self, X, Y): 132 | if X is None and Y is None: 133 | raise ValueError('X and Y cannot both be None') 134 | 135 | def __repr__(self): 136 | return '<{} Scaler with {} fitted, attrs={}>'.format(self._method, tuple(self._statistics.keys()), self._attrs) 137 | -------------------------------------------------------------------------------- /src/MachineLearning/sklearn/tsne.py: -------------------------------------------------------------------------------- 1 | from pylab import * 2 | from sklearn.manifold import TSNE 3 | from sklearn.decomposition import PCA 4 | 5 | 6 | def plot_embedding(X, y, plot_title=None, unbalanced=True, target_names=None): 7 | x_min, x_max = X[:, 0].min(), X[:, 0].max() 8 | y_min, y_max = X[:, 1].min(), X[:, 1].max() 9 | figure() 10 | for i in range(X.shape[0]): 11 | text( 12 | X[i, 0], 13 | X[i, 1], 14 | str(y[i]), 15 | color=cm.Set1(1 - y[i] / 7), 16 | fontdict={ 17 | 'weight': 'bold', 18 | 'size': 4 if y[i] == 0 and unbalanced else 9}) 19 | if title is not None: 20 | title(plot_title) 21 | 22 | factor = 0.1 23 | margin_x, margin_y = factor * (x_max - x_min), factor * (y_max - y_min) 24 | axis([ 25 | x_min - margin_x, 26 | x_max + margin_x, 27 | y_min - margin_y, 28 | y_max + margin_y]) 29 | 30 | figure() 31 | name_dict = set() 32 | for i in range(X.shape[0]): 33 | name = None 34 | if target_names is not None: 35 | if y[i] not in name_dict: 36 | name_dict.add(y[i]) 37 | name = target_names[y[i]] 38 | plot( 39 | X[i, 0], 40 | X[i, 1], 41 | '.', 42 | c=cm.Set1(1 - y[i] / 7), 43 | markersize=2 if y[i] == 0 and unbalanced else 4, 44 | label=name) 45 | if title is not None: 46 | title(plot_title) 47 | legend() 48 | 49 | 50 | def tsne(data_test, data_label, title=None, unbalanced=False, method='tsne', label_names=None, one_hot=False): 51 | if not one_hot: 52 | label_n = data_label 53 | else: 54 | label_n = argmax(data_label, axis=1) 55 | 56 | models = { 57 | 'tsne': TSNE(n_iter=5000), 58 | 'pca': PCA()} 59 | 60 | model = models[method.lower()] 61 | tsne_transformed = model.fit_transform(data_test, label_n) 62 | 63 | plot_embedding(tsne_transformed, label_n, title if title else 't-sne projection', unbalanced, label_names) 64 | return tsne_transformed, label_n 65 | -------------------------------------------------------------------------------- /src/MinorProjects/ECDICT-ultimate/ECDICT.css: -------------------------------------------------------------------------------- 1 | @charset "UTF-8"; 2 | @namespace d url(http://www.apple.com/DTDs/DictionaryService-1.0.rng); 3 | 4 | h1 { 5 | font-size: 150%; 6 | display: inline; 7 | } 8 | 9 | span.phonetic, 10 | span.trans_name { 11 | color: gray; 12 | } 13 | 14 | div.definition { 15 | color: cadetblue; 16 | display: block; 17 | margin: 1em; 18 | 19 | } 20 | 21 | div.translation { 22 | display: block; 23 | margin: 1em; 24 | } 25 | 26 | div.tags { 27 | font-size: 80%; 28 | color: gray; 29 | } 30 | -------------------------------------------------------------------------------- /src/MinorProjects/ECDICT-ultimate/ECDICT.plist: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | CFBundleDevelopmentRegion 6 | Chinese 7 | CFBundleIdentifier 8 | com.apple.dictionary.concise-bing 9 | CFBundleName 10 | 简明英汉必应版 11 | CFBundleShortVersionString 12 | 1.0 13 | DCSDictionaryCopyright 14 | Copyright © 2017 Linwei under MIT License. https://github.com/skywind3000/ECDICT-ultimate/releases 15 | DCSDictionaryManufacturerName 16 | Charles Xu 17 | DCSDictionaryPrefsHTML 18 | ECDICT_prefs.html 19 | DCSDictionaryXSL 20 | ECDICT.xsl 21 | DCSDictionaryDefaultPrefs 22 | 23 | display-defitition 24 | 1 25 | display-tags 26 | 1 27 | display-trans_name 28 | 1 29 | version 30 | 1 31 | 32 | 33 | 34 | -------------------------------------------------------------------------------- /src/MinorProjects/ECDICT-ultimate/Makefile: -------------------------------------------------------------------------------- 1 | # 2 | # Makefile 3 | # 4 | # 5 | # 6 | 7 | ########################### 8 | 9 | # You need to edit these values. 10 | 11 | DICT_NAME = "简明英汉必应版" 12 | DICT_SRC_PATH = ECDICT.xml 13 | CSS_PATH = ECDICT.css 14 | PLIST_PATH = ECDICT.plist 15 | 16 | DICT_BUILD_OPTS = 17 | # Suppress adding supplementary key. 18 | # DICT_BUILD_OPTS = -s 0 # Suppress adding supplementary key. 19 | 20 | ########################### 21 | 22 | # The DICT_BUILD_TOOL_DIR value is used also in "build_dict.sh" script. 23 | # You need to set it when you invoke the script directly. 24 | 25 | DICT_BUILD_TOOL_DIR = "/Users/charles/Desktop/Dictionary Development Kit" 26 | DICT_BUILD_TOOL_BIN = "$(DICT_BUILD_TOOL_DIR)/bin" 27 | 28 | ########################### 29 | 30 | DICT_DEV_KIT_OBJ_DIR = ./objects 31 | export DICT_DEV_KIT_OBJ_DIR 32 | 33 | DESTINATION_FOLDER = ~/Library/Dictionaries 34 | RM = /bin/rm 35 | 36 | ########################### 37 | 38 | all: 39 | "$(DICT_BUILD_TOOL_BIN)/build_dict.sh" $(DICT_BUILD_OPTS) $(DICT_NAME) $(DICT_SRC_PATH) $(CSS_PATH) $(PLIST_PATH) 40 | echo "Done." 41 | 42 | 43 | install: 44 | echo "Installing into $(DESTINATION_FOLDER)". 45 | mkdir -p $(DESTINATION_FOLDER) 46 | ditto --noextattr --norsrc $(DICT_DEV_KIT_OBJ_DIR)/$(DICT_NAME).dictionary $(DESTINATION_FOLDER)/$(DICT_NAME).dictionary 47 | touch $(DESTINATION_FOLDER) 48 | echo "Done." 49 | echo "To test the new dictionary, try Dictionary.app." 50 | 51 | clean: 52 | $(RM) -rf $(DICT_DEV_KIT_OBJ_DIR) 53 | -------------------------------------------------------------------------------- /src/MinorProjects/ECDICT-ultimate/OtherResources/ECDICT.xsl: -------------------------------------------------------------------------------- 1 | 2 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | -------------------------------------------------------------------------------- /src/MinorProjects/ECDICT-ultimate/OtherResources/ECDICT_prefs.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 | 12 |
13 |
14 |
15 | This dictionary is made by Charles Xu 16 |
17 | https://github.com/the0demiurge/Python-Scripts/tree/master/src/MinorProjects/ECDICT-ultimate 18 |
19 |
20 |
21 | 显示:
22 | 英文释义 23 | 人名释义 24 | 标签
25 |
26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /src/MinorProjects/ECDICT-ultimate/README.md: -------------------------------------------------------------------------------- 1 | # ECDICT-ultimate 2 | 3 | Difference between ECDICT: added an option ho hide translations of names. 4 | 5 | ## Build instruction 6 | 7 | Prerequisites: 8 | - python3 9 | - Apple's Dictionary Development Kit 10 | - gnu make 11 | 12 | 1. Download Dictionary Development Kit from apple developer website and decompress. 13 | 2. Download csv file from [ECDICT-ultimate](https://github.com/skywind3000/ECDICT-ultimate/releases) project. 14 | 3. Edit Dictionary Development Kit path in Makefile. 15 | 4. run `python3 csv2dict.py ` 16 | 5. run `make;make install` 17 | -------------------------------------------------------------------------------- /src/MinorProjects/ECDICT-ultimate/csv2dict.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import csv 3 | import sys 4 | from xml.sax.saxutils import escape, quoteattr, unescape 5 | 6 | if len(sys.argv) != 2: 7 | print('usage: python3', __file__, '') 8 | exit() 9 | 10 | template = ''' {index_list}

{word}

{contents}
''' 11 | phonetic_temp = '| {} |' 12 | definition_temp = '
{}
' 13 | translation_temp = '
{}
' 14 | tags_temp = '
{}
' 15 | index_temp = '' 16 | head = ''' 17 | 19 | ''' 20 | tail = '''''' 21 | file_path = sys.argv[-1] 22 | out_path = 'ECDICT.xml' 23 | data = csv.reader(open(file_path, 'r').readlines()[1:]) 24 | 25 | 26 | def gen_entry(entry_id, word, phonetic, definition, translation, tags, exchange): 27 | contents = list() 28 | for data, temp in ( 29 | (phonetic, phonetic_temp), 30 | (translation, translation_temp), 31 | (definition, definition_temp), 32 | (tags, tags_temp), 33 | ): 34 | if data: 35 | contents.append(temp.format(data)) 36 | return template.format(id=entry_id, word_=quoteattr(word), word=word, contents=''.join(contents), index_list=exchange) 37 | 38 | 39 | def name_escape(word, string): 40 | if ')人名' in string: 41 | return '{}'.format(string), 1 42 | else: 43 | return string, 0 44 | 45 | 46 | def format_trans(word, string): 47 | contents = string.strip().split(r'\n') 48 | contents = [name_escape(word, escape(unescape(i))) for i in contents] 49 | contents.sort(key=lambda x: x[1]) 50 | return '
'.join(next(zip(*contents))) 51 | 52 | 53 | def process_exchange(word, exchange): 54 | if not exchange: 55 | return '' 56 | exchange = set([i.split(':', 1)[1] for i in exchange.split('/') if not i.startswith('1')]) 57 | if word in exchange: 58 | exchange.remove(word) 59 | return ''.join(index_temp.format(quoteattr(i)) for i in exchange if i) 60 | 61 | 62 | f = open(out_path, 'w') 63 | f.write(head) 64 | for i, entry in enumerate(data): 65 | [ 66 | word, 67 | phonetic, 68 | definition, 69 | translation, 70 | _, 71 | _, 72 | _, 73 | tag, 74 | _, 75 | _, 76 | exchange, 77 | _, 78 | _ 79 | ] = entry 80 | entry_id = hex(i)[2:] 81 | print( 82 | gen_entry( 83 | entry_id, 84 | escape(unescape(word)), 85 | escape(unescape(phonetic)), 86 | format_trans(word, definition), 87 | format_trans(word, translation), 88 | escape(unescape(tag.replace(' ', ','))), 89 | process_exchange(word, exchange), 90 | ), 91 | file=f 92 | ) 93 | 94 | f.write(tail) 95 | f.close() 96 | -------------------------------------------------------------------------------- /src/MinorProjects/ECDICT/ECDICT.css: -------------------------------------------------------------------------------- 1 | @charset "UTF-8"; 2 | @namespace d url(http://www.apple.com/DTDs/DictionaryService-1.0.rng); 3 | 4 | h1 { 5 | font-size: 150%; 6 | display: inline; 7 | } 8 | 9 | div.tags { 10 | font-size: 80%; 11 | color: gray; 12 | } 13 | 14 | span.phonetic { 15 | color: gray; 16 | } 17 | 18 | div.definition { 19 | color: cadetblue; 20 | display: block; 21 | margin: 1em; 22 | 23 | } 24 | 25 | div.translation { 26 | display: block; 27 | margin: 1em; 28 | } 29 | -------------------------------------------------------------------------------- /src/MinorProjects/ECDICT/ECDICT.plist: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | CFBundleDevelopmentRegion 6 | Chinese 7 | CFBundleIdentifier 8 | com.apple.dictionary.ECDICT 9 | CFBundleName 10 | 简明英汉字典增强版 11 | CFBundleShortVersionString 12 | 1.0 13 | DCSDictionaryCopyright 14 | Copyright © 2017 Linwei under MIT License. https://github.com/skywind3000/ECDICT/releases 15 | DCSDictionaryManufacturerName 16 | Charles Xu 17 | DCSDictionaryPrefsHTML 18 | ECDICT_prefs.html 19 | DCSDictionaryXSL 20 | ECDICT.xsl 21 | DCSDictionaryDefaultPrefs 22 | 23 | display-defitition 24 | 1 25 | display-tags 26 | 1 27 | version 28 | 1 29 | 30 | 31 | 32 | -------------------------------------------------------------------------------- /src/MinorProjects/ECDICT/Makefile: -------------------------------------------------------------------------------- 1 | # 2 | # Makefile 3 | # 4 | # 5 | # 6 | 7 | ########################### 8 | 9 | # You need to edit these values. 10 | 11 | DICT_NAME = "简明英汉字典增强版" 12 | DICT_SRC_PATH = ECDICT.xml 13 | CSS_PATH = ECDICT.css 14 | PLIST_PATH = ECDICT.plist 15 | 16 | DICT_BUILD_OPTS = 17 | # Suppress adding supplementary key. 18 | # DICT_BUILD_OPTS = -s 0 # Suppress adding supplementary key. 19 | 20 | ########################### 21 | 22 | # The DICT_BUILD_TOOL_DIR value is used also in "build_dict.sh" script. 23 | # You need to set it when you invoke the script directly. 24 | 25 | DICT_BUILD_TOOL_DIR = "/Users/charles/Desktop/Dictionary Development Kit" 26 | DICT_BUILD_TOOL_BIN = "$(DICT_BUILD_TOOL_DIR)/bin" 27 | 28 | ########################### 29 | 30 | DICT_DEV_KIT_OBJ_DIR = ./objects 31 | export DICT_DEV_KIT_OBJ_DIR 32 | 33 | DESTINATION_FOLDER = ~/Library/Dictionaries 34 | RM = /bin/rm 35 | 36 | ########################### 37 | 38 | all: 39 | "$(DICT_BUILD_TOOL_BIN)/build_dict.sh" $(DICT_BUILD_OPTS) $(DICT_NAME) $(DICT_SRC_PATH) $(CSS_PATH) $(PLIST_PATH) 40 | echo "Done." 41 | 42 | 43 | install: 44 | echo "Installing into $(DESTINATION_FOLDER)". 45 | mkdir -p $(DESTINATION_FOLDER) 46 | ditto --noextattr --norsrc $(DICT_DEV_KIT_OBJ_DIR)/$(DICT_NAME).dictionary $(DESTINATION_FOLDER)/$(DICT_NAME).dictionary 47 | touch $(DESTINATION_FOLDER) 48 | echo "Done." 49 | echo "To test the new dictionary, try Dictionary.app." 50 | 51 | clean: 52 | $(RM) -rf $(DICT_DEV_KIT_OBJ_DIR) 53 | -------------------------------------------------------------------------------- /src/MinorProjects/ECDICT/OtherResources/ECDICT.xsl: -------------------------------------------------------------------------------- 1 | 2 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | -------------------------------------------------------------------------------- /src/MinorProjects/ECDICT/OtherResources/ECDICT_prefs.html: -------------------------------------------------------------------------------- 1 | 2 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 |
15 | 16 |
17 |
18 | This dictionary is made by Charles Xu (https://github.com/the0demiurge/) 19 |
20 |
21 |
22 | 显示:
23 | 英文释义 24 | 标签
25 |
26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /src/MinorProjects/ECDICT/README.md: -------------------------------------------------------------------------------- 1 | # ECDICT 2 | ## Build instruction 3 | 4 | Prerequisites: 5 | - python3 6 | - Apple's Dictionary Development Kit 7 | - gnu make 8 | 9 | 1. Download Dictionary Development Kit from apple developer website and decompress. 10 | 2. Download csv file from [ECDICT](https://github.com/skywind3000/ECDICT/releases) project. 11 | 3. Edit Dictionary Development Kit path in Makefile. 12 | 4. run `python3 csv2dict.py ` 13 | 5. run `make;make install` 14 | -------------------------------------------------------------------------------- /src/MinorProjects/ECDICT/csv2dict.py: -------------------------------------------------------------------------------- 1 | ../ECDICT-ultimate/csv2dict.py -------------------------------------------------------------------------------- /src/MinorProjects/chat/chat.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, unicode_literals, division 2 | import socket 3 | import argparse 4 | import threading 5 | import time 6 | import select 7 | import queue 8 | 9 | try: 10 | import Cryptodome 11 | from Cryptodome.Cipher import AES 12 | from Cryptodome.PublicKey import RSA 13 | from Cryptodome.Random import get_random_bytes 14 | except ImportError as e: 15 | print(e) 16 | print('You should install python library {} first: {}'.format( 17 | 'pycryptodomex', 'https://pypi.org/project/pycryptodomex/')) 18 | exit() 19 | 20 | 21 | def get_host_ip(address=('8.8.8.8', 80)): 22 | try: 23 | sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) 24 | sock.connect(address) 25 | ip = sock.getsockname()[0] 26 | finally: 27 | sock.close() 28 | return ip 29 | 30 | 31 | class UDPServer(threading.Thread): 32 | """docstring for Server""" 33 | 34 | def __init__(self, 35 | address=('', 54321), 36 | queue=None, 37 | sock_mode=(socket.AF_INET, socket.SOCK_DGRAM), 38 | show_msg=print, 39 | buffer_size=4096): 40 | super(UDPServer, self).__init__(name='UDPServer') 41 | if address[0] not in ('127.0.0.1', '0.0.0.0', '::', ''): 42 | address = get_host_ip(), address[1] 43 | self.address = address 44 | self.queue = queue 45 | self.sock_mode = sock_mode 46 | self.show_msg = show_msg 47 | self.buffer_size = buffer_size 48 | self.sock = socket.socket(*self.sock_mode) 49 | self.sock.setblocking(False) 50 | 51 | self.running_status = True 52 | 53 | self.bind() 54 | 55 | def __del__(self): 56 | self.stop() 57 | 58 | def stop(self): 59 | self.running_status = False 60 | self.sock.close() 61 | self.show_msg('Server Stopped') 62 | 63 | def bind(self): 64 | try: 65 | self.sock.bind(self.address) 66 | self.show_msg('Server started at {}:{}'.format(*self.address)) 67 | except OSError as e: 68 | self.show_msg(e) 69 | return e 70 | return True 71 | 72 | def run(self): 73 | try: 74 | while self.running_status: 75 | try: 76 | data, addr = self.recv() 77 | except OSError as e: 78 | self.show_msg(e) 79 | if data is not None: 80 | self.enqueue(addr, data.decode('utf-8')) 81 | except (KeyboardInterrupt, EOFError): 82 | pass 83 | 84 | def recv(self): 85 | ready = select.select((self.sock,), (), (), 0.1) 86 | data, addr = None, () 87 | if ready[0]: 88 | data, addr = self.sock.recvfrom(self.buffer_size) 89 | return data, addr 90 | 91 | def enqueue(self, *data): 92 | if not self.queue.full(): 93 | self.queue.put(data) 94 | else: 95 | self.show_msg('Queue full') 96 | 97 | 98 | class UDPClient(object): 99 | def __init__(self, 100 | port=12345, 101 | sock_mode=(socket.AF_INET, socket.SOCK_DGRAM), 102 | show_msg=print): 103 | self.port = port 104 | self.sock_mode = sock_mode 105 | self.show_msg = show_msg 106 | self.sock = socket.socket(*sock_mode) 107 | 108 | self.running_status = True 109 | 110 | self.bind() 111 | 112 | def sendto(self, address, msg): 113 | self.sock.sendto(bytes(msg, 'utf-8'), address) 114 | 115 | def bind(self): 116 | try: 117 | self.sock.bind(('127.0.0.1', self.port)) 118 | self.show_msg('Client started at port {}'.format(self.port)) 119 | except OSError as e: 120 | self.show_msg(e) 121 | return e 122 | return True 123 | 124 | def stop(self): 125 | self.running_status = False 126 | self.sock.close() 127 | self.show_msg('Client Stopped') 128 | 129 | def __del__(self): 130 | self.stop() 131 | 132 | 133 | def print_loop(address): 134 | client = UDPClient() 135 | running_status = True 136 | 137 | try: 138 | while running_status: 139 | time.sleep(0.01) 140 | msg = input('> ') 141 | client.sendto(address, msg) 142 | except (KeyboardInterrupt, EOFError): 143 | running_status = False 144 | finally: 145 | client.stop() 146 | 147 | 148 | def parse_args(): 149 | parser = argparse.ArgumentParser(description='Python p2p chat by Charles Xu') 150 | parser.add_argument('-a', '--address', default='127.0.0.1', help='target IP address') 151 | parser.add_argument('-p', '--port', default=8899, type=int, help='remote port') 152 | args = parser.parse_args() 153 | address = (args.address, args.port) 154 | sock_mode = (socket.AF_INET, socket.SOCK_DGRAM) 155 | return address, sock_mode 156 | 157 | 158 | def main(): 159 | message_queue = queue.Queue() 160 | address, sock_mode = parse_args() 161 | print('Connect to:') 162 | print(address, sock_mode, sep='\n', end='\n\n') 163 | thread = UDPServer(address=address, sock_mode=sock_mode, queue=message_queue) 164 | thread.start() 165 | print_loop(address) 166 | thread.stop() 167 | while not message_queue.empty(): 168 | print(message_queue.get()) 169 | print('bye') 170 | 171 | 172 | if __name__ == '__main__': 173 | main() 174 | -------------------------------------------------------------------------------- /src/MinorProjects/chat/doc/structure.md: -------------------------------------------------------------------------------- 1 | # P2P Protocol 2 | 3 | ## Connection Method 4 | 5 | socket, UDP(/TCP) 6 | 7 | ## Client Request 8 | 9 | ```python 10 | ( 11 | ('username', 'ip', port, request, 'request_data'), 12 | 'Markdown message', 13 | (('binary object name', 'binary object data'), (...)) 14 | ) 15 | ``` 16 | 17 | Where request contains: 18 | 19 | - message 20 | - hello : `(('available time', int), ('erase after noconnection', int))`, if always available, set it to -1; if do not available, set to 0 21 | - RSA pubkey 22 | - AES key 23 | - erase history 24 | - erase keys 25 | - erase all 26 | - heart beat 27 | 28 | ## Server handling 29 | - Add timestamp to first tuple 30 | - Use 'ast.literal_eval' to parse client request 31 | 32 | ## Encryption, Decryption 33 | 34 | Use ECDHE, RSA, AES, PSK/SRP 35 | 36 | # Saved Files 37 | 38 | Config must be encrypted by self RSA pubkey, and privkey should be stored securily, thus configs cannot be pried without keys 39 | 40 | ## Contacts 41 | 42 | ```python 43 | { 44 | 'contact name':[ 45 | pubkey, 46 | ('host', port), 47 | aes_key 48 | ] 49 | } 50 | 51 | ``` 52 | ## Messages 53 | ```python 54 | { 55 | ('contact name', 'host', port):[ 56 | 57 | ] 58 | } 59 | ``` 60 | 61 | 62 | ## Client and Server configs 63 | ```python 64 | { 65 | 'item':value 66 | } 67 | ``` 68 | 69 | # Group Chat Protocol 70 | 71 | Broadcast message to all network by traverse the graph, maintain a cluster shared contact list. -------------------------------------------------------------------------------- /src/MinorProjects/chat/http.txt: -------------------------------------------------------------------------------- 1 | GET / HTTP/1.1 2 | Host: localhost:8081 3 | Connection: keep-alive 4 | Upgrade-Insecure-Requests: 1 5 | User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36 6 | DNT: 1 7 | Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8 8 | Accept-Encoding: gzip, deflate, br 9 | Accept-Language: en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7 10 | 11 | 12 | 13 | GET https://www.baidu.com/ HTTP/1.1 14 | Host: www.baidu.com 15 | Connection: keep-alive 16 | Upgrade-Insecure-Requests: 1 17 | User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36 18 | Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8 19 | Referer: http://www.baidu.com/ 20 | Accept-Encoding: gzip, deflate, sdch, br 21 | Accept-Language: zh-CN,zh;q=0.8,en;q=0.6 22 | Cookie: BAIDUID=04E4001F34EA74AD4601512DD3C41A7B:FG=1; BIDUPSID=04E4001F34EA74AD4601512DD3C41A7B; PSTM=1470329258; MCITY=-343%3A340%3A; BDUSS=nF0MVFiMTVLcUh-Q2MxQ0M3STZGQUZ4N2hBa1FFRkIzUDI3QlBCZjg5cFdOd1pZQVFBQUFBJCQAAAAAAAAAAAEAAADpLvgG0KGyvLrcyfrG-AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFaq3ldWqt5XN; H_PS_PSSID=1447_18240_21105_21386_21454_21409_21554; BD_UPN=12314753; sug=3; sugstore=0; ORIGIN=0; bdime=0; H_PS_645EC=7e2ad3QHl181NSPbFbd7PRUCE1LlufzxrcFmwYin0E6b%2BW8bbTMKHZbDP0g; BDSVRTM=0 -------------------------------------------------------------------------------- /src/MinorProjects/chat/udp_client.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import socket 3 | 4 | 5 | if len(sys.argv) != 4: 6 | print('Usage: python udp_client.py host port message') 7 | exit() 8 | 9 | sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) 10 | sock.bind(('localhost', 10000)) 11 | sock.sendto(bytes(sys.argv[3], 'utf-8'), (sys.argv[1], int(sys.argv[2]))) 12 | sock.close() 13 | 14 | -------------------------------------------------------------------------------- /src/MinorProjects/chat/ui.py: -------------------------------------------------------------------------------- 1 | import curses 2 | import locale 3 | from curses import textpad 4 | from math import ceil 5 | from ast import literal_eval as loadstr 6 | 7 | 8 | locale.setlocale(locale.LC_ALL, '') 9 | code = locale.getpreferredencoding() 10 | 11 | 12 | class Reigon(object): 13 | def __init__(self, shape, border=True): 14 | """Draw a reigon, with a border and can judge weather a coord in this reigon 15 | shape: (left up y, left up x, rigth bottom y, rigth bottom x) 16 | """ 17 | assert len(shape) == 4, 'Shape must be 4-tuple' 18 | self.update(shape) 19 | if border: 20 | self.border = 1 21 | else: 22 | self.border = 0 23 | 24 | def update(self, shape): 25 | self.shape = shape 26 | self.luy, self.lux, self.rby, self.rbx = shape 27 | self.height, self.width = self.rby - self.luy, self.rbx-self.lux 28 | 29 | def is_in_window(self, y, x): 30 | return self.luy <= y <= self.rby and self.lux <= x <= self.rbx 31 | 32 | def is_in_content(self, y, x): 33 | return self.luy + self.border <= y <= self.rby - self.border and self.lux + self.border <= x <= self.rbx - self.border 34 | 35 | @staticmethod 36 | def coord2newwin(shape): 37 | luy, lux, rby, rbx = shape 38 | height, width = rby - luy, rbx-lux 39 | return height, width, luy, lux 40 | 41 | 42 | class RectAngle(Reigon): 43 | def __init__(self, shape, parent_window): 44 | """SubWindow 45 | shape: (left up y, left up x, rigth bottom y, rigth bottom x) 46 | """ 47 | super(RectAngle, self).__init__(shape, border=True) 48 | self.parent_window = parent_window 49 | self.draw() 50 | 51 | def draw(self): 52 | textpad.rectangne(self.parent_window, *self.coord2newwin(self.shape)) 53 | 54 | 55 | class SubWindow(Reigon): 56 | def __init__(self, shape, border=True): 57 | """SubWindow 58 | shape: (left up y, left up x, rigth bottom y, rigth bottom x) 59 | """ 60 | super(SubWindow, self).__init__(shape, border) 61 | self.window = curses.newwin(*self.coord2newwin(self.shape)) 62 | if border: 63 | self.window.border() 64 | 65 | def resize(self, shape): 66 | self.update(shape) 67 | 68 | 69 | class Textbox(object): 70 | def __init__(self, shape, parent_window): 71 | self.parent_window = parent_window 72 | self.update(shape) 73 | self.border_rectangle = RectAngle(self.border_rectangle) 74 | self.window = curses.newwin(self.parent_window, *Reigon.coord2newwin(self.input_reigon)) 75 | self.textbox = textpad.Textbox(self.window) 76 | 77 | def update(self, shape): 78 | self.border_reigon = shape 79 | self.input_reigon = (shape[0]+1, shape[1]+1, shape[2]-1, shape[3]-1) 80 | self.border_rectangle.update(self.border_reigon) 81 | 82 | def resize(self, shape): 83 | self.update(shape) 84 | # resize self.window 85 | 86 | 87 | class ContactWindow(SubWindow): 88 | def __init__(self, contact_width=20, config_path=None): 89 | super(ContactWindow, self).__init__() 90 | 91 | def add_contact(self, ip, port): 92 | pass 93 | 94 | def delect_contact(self, index): 95 | pass 96 | 97 | def choose_contact(self, index): 98 | pass 99 | 100 | def save_contact(self): 101 | pass 102 | 103 | def load_contact(self): 104 | pass 105 | 106 | def refresh(self): 107 | pass 108 | 109 | 110 | class MessageWindow(SubWindow): 111 | def __init__(self, contact_width=20, config_path=None): 112 | pass 113 | 114 | def show(self, messages, index): 115 | pass 116 | 117 | def input(self): 118 | pass 119 | 120 | def send(self): 121 | pass 122 | 123 | def scroll(self, down=True): 124 | pass 125 | 126 | 127 | class ChatWindow(object): 128 | def __init__(self): 129 | self.screen = curses.initscr() 130 | self.contact_window = ContactWindow() 131 | self.message_window = MessageWindow() 132 | self.screen.refresh() 133 | 134 | 135 | def handle_quit(*args, **kwargs): 136 | curses.endwin() 137 | exit() 138 | 139 | 140 | def handle_mouse(screen, win, *args, **kwargs): 141 | data = curses.getmouse() 142 | win.window.clear() 143 | win.window.border() 144 | win.window.addstr(5, 5, str(data)) 145 | if data[-1] == curses.BUTTON1_CLICKED: 146 | win.window.addstr(7, 6, str(win.is_in_window(data[1], data[2]))) 147 | win.window.addstr(8, 6, str(win.is_in_content(data[1], data[2]))) 148 | win.window.refresh() 149 | 150 | 151 | def handle_resize(screen, win, *args, **kwargs): 152 | y, x = screen.getmaxyx() 153 | win.window.resize(20, 40) 154 | screen.clear() 155 | win.window.clear() 156 | win.window.addstr(5, 5, str((y, x))) 157 | win.window.border() 158 | screen.refresh() 159 | win.window.refresh() 160 | if y < 10 or x < 30: 161 | screen.clear() 162 | win.window.clear() 163 | screen.addstr(y//2-1, 0, 'Sreen Too Small!'.center(x)[:x]) 164 | 165 | 166 | handlers = { 167 | ord('q'): handle_quit, 168 | curses.KEY_MOUSE: handle_mouse, 169 | curses.KEY_RESIZE: handle_resize, 170 | curses.BUTTON1_CLICKED: None, 171 | curses.REPORT_MOUSE_POSITION: None, 172 | curses.A_BLINK: None, 173 | } 174 | 175 | 176 | def init(): 177 | screen = curses.initscr() 178 | curses.curs_set(False) # hide cursor 179 | screen.keypad(1) 180 | curses.mousemask(curses.ALL_MOUSE_EVENTS) 181 | curses.noecho() 182 | win = SubWindow(shape=(0, 0, 20, 40)) 183 | screen.refresh() 184 | win.window.refresh() 185 | 186 | # textbox = Textbox(shape=(1, 15, 38, 17), parent_window=win.window) 187 | return screen, win 188 | 189 | 190 | def ui_loop(): 191 | screen, win = init() 192 | while True: 193 | event = screen.getch() 194 | if event in handlers: 195 | handlers[event](screen=screen, win=win) 196 | 197 | 198 | try: 199 | ui_loop() 200 | except KeyboardInterrupt: 201 | handle_quit() 202 | curses.endwin() 203 | 204 | 205 | exit() 206 | # ui 207 | # 208 | # 209 | # 210 | import curses 211 | import locale 212 | from curses import textpad 213 | from math import ceil 214 | 215 | locale.setlocale(locale.LC_ALL, '') 216 | code = locale.getpreferredencoding() 217 | 218 | msgs = list() 219 | stdscr = curses.initscr() 220 | # curses.noecho() 221 | curses.cbreak() 222 | 223 | len_contact = 20 224 | contacts = curses.newwin(curses.LINES, len_contact, 0, 0) 225 | messages = curses.newwin(curses.LINES, curses.COLS - len_contact, 0, len_contact) 226 | 227 | 228 | attrs = {'curses.A_BLINK': curses.A_BLINK, 229 | 'curses.A_BOLD': curses.A_BOLD, 230 | 'curses.A_DIM': curses.A_DIM, 231 | 'curses.A_REVERSE': curses.A_REVERSE, 232 | 'curses.A_STANDOUT': curses.A_STANDOUT, 233 | 'curses.A_UNDERLINE': curses.A_UNDERLINE, } 234 | for i, attr in enumerate(attrs): 235 | contacts.addstr(i+2, 1, attr, attrs[attr]) 236 | contacts.addstr(1, 1, '测试') 237 | contacts.border() 238 | messages.border() 239 | textpad.rectangle(messages, curses.LINES-4, 1, curses.LINES-2, curses.COLS-len_contact-2) 240 | stdscr.refresh() 241 | contacts.refresh() 242 | messages.refresh() 243 | try: 244 | for index in range(100): 245 | msglen = curses.COLS-len_contact-4 246 | msgheight = curses.LINES - 5 247 | data = messages.getstr(curses.LINES - 3, 2, msglen).decode('utf-8') 248 | data = [['> ', ' '][i != 0] + data[msglen * i:msglen*(i+1)] for i in range(ceil(len(data)/msglen))] 249 | msgs.extend(data) 250 | 251 | for j, info in zip(range(msgheight), msgs[-(msgheight):]): 252 | messages.addstr(j+1, 1, ' ' * (curses.COLS - len_contact - 2)) 253 | messages.addstr(j+1, 1, info) 254 | 255 | messages.addstr(curses.LINES-3, 2, ' ' * msglen) 256 | messages.border() 257 | contacts.border() 258 | textpad.rectangle(messages, curses.LINES-4, 1, curses.LINES-2, curses.COLS-len_contact-2) 259 | messages.refresh() 260 | contacts.refresh() 261 | messages.getch() 262 | curses.getmouse() 263 | except KeyboardInterrupt: 264 | curses.endwin() 265 | # window resize 266 | # 267 | # 268 | # 269 | import curses 270 | from curses import textpad 271 | 272 | 273 | screen = curses.initscr() 274 | curses.noecho() 275 | # curses.cbreak() 276 | screen.keypad(1) 277 | subwin = curses.newwin(25, 25, 1, 10) 278 | subwin2 = curses.newwin(25, 25, 1, 10+25) 279 | # screen.getch() 280 | # curses.endwin() 281 | # for ki in dir(subwin): 282 | # print(ki, getattr(subwin, ki), sep='\t') 283 | # exit() 284 | # subwin.border() 285 | # subwin2.border() 286 | screen.refresh() 287 | subwin.refresh() 288 | subwin2.refresh() 289 | box = textpad.Textbox(subwin, insert_mode=True) 290 | while True: 291 | y, x = screen.getmaxyx() 292 | resize = curses.is_term_resized(y, x) 293 | key = screen.getch() 294 | if key == ord('q'): 295 | break 296 | 297 | elif key == ord('i'): 298 | string = box.edit() 299 | subwin2.addstr(0, 0, str(string)) 300 | subwin.refresh() 301 | 302 | 303 | curses.endwin() 304 | -------------------------------------------------------------------------------- /src/MinorProjects/copydisk.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | '''Usage: python {filename} 3 | 交互式界面,将一个内存卡中的内容使用dd转移到另一个内存卡。为不熟悉linux和dd命令的用户设计 4 | '''.format(__file__) 5 | import os 6 | 7 | 8 | def run(cmd): 9 | print('$', cmd) 10 | status = os.system(cmd) 11 | if status != 0: 12 | print('命令出错:', status) 13 | exit() 14 | 15 | 16 | print('请选择操作:\n\t[1] 将镜像文件写入存储卡(危险操作)\n\t[2] 从存储卡制作镜像') 17 | method = input('请输入操作标号 [1/2]:') 18 | if method not in {'1', '2'}: 19 | print('输入错误') 20 | exit() 21 | 22 | input('不插任何U盘或存储卡,准备好后按回车,将显示所有设备') 23 | 24 | run('lsblk') 25 | input('插入U盘或存储卡,准备好后按回车,将显示所有设备') 26 | run('lsblk') 27 | 28 | sd = input('输入多出来的设备标号名称,比如“sda”:') 29 | 30 | file = input('输入镜像文件名:') 31 | 32 | if method == '1': 33 | input('\n警告:将会把数据写入到 /dev/{} ,会将设备原来的数据全部删除。\n继续按回车,取消按Ctrl+C'.format(sd)) 34 | if not os.path.exists(file): 35 | print('该文件不存在:', file, ',操作取消') 36 | exit() 37 | run('sudo umount /dev/{}*'.format(sd)) 38 | run('sudo dd if="{file}" of=/dev/{sd}'.format(file=file, sd=sd)) 39 | elif method == '2': 40 | if os.path.exists(file): 41 | print(file, '已存在,操作取消') 42 | exit() 43 | run('sudo umount /dev/{}*'.format(sd)) 44 | run('sudo dd if=/dev/{sd} of="{file}"'.format(file=file, sd=sd)) 45 | 46 | print('成功') 47 | -------------------------------------------------------------------------------- /src/MinorProjects/diff.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | import os 3 | import re 4 | import sys 5 | from functools import reduce 6 | 7 | 8 | INSERTED = '#' 9 | 10 | if len(sys.argv) < 2: 11 | print('Usage: python3', __file__, 'FILE_1 [FILE_2 FILE_3, ...]') 12 | exit() 13 | 14 | files = list() 15 | for filename in sys.argv[1:]: 16 | with open(filename, 'r') as f: 17 | files.append(f.readlines()) 18 | 19 | file_len = len(files) 20 | result = list() 21 | 22 | max_lines, min_lines = len(max(files, key=len)), len(min(files, key=len)) 23 | 24 | def str_comp(a, b, inserted=INSERTED): 25 | result = a if a == b else inserted 26 | return result 27 | 28 | for lines in zip(*files): 29 | this_line = list() 30 | for character in zip(*lines): 31 | this_line.append(reduce(str_comp, character)) 32 | result.append(''.join(this_line)) 33 | 34 | for line in result: 35 | print(line[:-1].strip()) 36 | 37 | -------------------------------------------------------------------------------- /src/MinorProjects/excel.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 把当前文件夹里面多个excel整合到一起, 3 | # 以"学号"的列进行排序,生成result.xlsx 4 | 5 | import os 6 | import pandas as pd 7 | 8 | data = list() 9 | if os.path.exists('result.xlsx'): 10 | os.remove('result.xlsx') 11 | 12 | for i in os.listdir(): 13 | if i[0] != '.' and i.split('.')[-1][:3] == 'xls': 14 | print(i) 15 | data.append(pd.read_excel(i, sheetname='Sheet1')) 16 | 17 | c = pd.concat(data) 18 | 19 | # 如果不需要排序,把下一行删掉即可 20 | c.sort_values('学号', inplace=True) 21 | 22 | c.to_excel('result.xlsx', index=False) 23 | -------------------------------------------------------------------------------- /src/MinorProjects/media/comics/tmp.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | 4 | path = '.' 5 | 6 | 7 | def rename_nums(path): 8 | for index, name in enumerate(os.listdir(path)): 9 | if not os.path.isdir('%s/%s' % (path, name)): 10 | shutil.move('%s/%s' % (path, name), '%s/%d.%s' % (path, index, name.split('.')[-1])) 11 | 12 | 13 | comics = { 14 | 'zip': 'cbz', 15 | 'rar': 'cbr' 16 | } 17 | 18 | 19 | def rename_cbz_cbr(path): 20 | for index, name in enumerate(os.listdir(path)): 21 | if name.split('.')[-1] in comics.keys(): 22 | shutil.move('%s/%s' % (path, name), '%s/%s.%s' % (path, name.split('.')[0], comics[name.split('.')[-1]])) 23 | -------------------------------------------------------------------------------- /src/MinorProjects/media/ffmpeg_convert2mp4.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | path_in, path_out = sys.argv[1:3] 4 | 5 | for dirs in os.listdir(path_in): # 1,2,3 6 | for file_in in os.listdir('{}/{}'.format(path_in, dirs)): 7 | os.makedirs('{}/{}'.format(path_out, dirs), exist_ok=True) 8 | os.system('ffmpeg -i {0}/{2}/{3} -vcodec libx264 -preset fast -crf 25 -y -acodec libmp3lame -ab 128k {1}/{2}/{4}.mp4 -threads 4'.format( 9 | path_in, path_out, dirs, file_in, file_in[3: 6])) 10 | -------------------------------------------------------------------------------- /src/MinorProjects/multy_copy.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ This is a python script copying stock data from a mobile hard-disk. 3 | """ 4 | import os 5 | import random 6 | import shutil 7 | 8 | DATA_PATH = '/media/ash/Seagate Backup Plus Drive' 9 | TARGET_PATH = '/media/ash/39D1F4D861FCE21C' 10 | SAMPLE_RATIO = 0.095 11 | dirs = ['%d' % i for i in range(2009, 2014)] 12 | 13 | for path in dirs: 14 | print(path, '\n') 15 | pathin = DATA_PATH + '/' + path 16 | file_list = os.listdir(pathin) 17 | full_num = len(file_list) 18 | sample_num = int(full_num * SAMPLE_RATIO) 19 | to_copy = random.sample(file_list, sample_num) 20 | 21 | pathout = TARGET_PATH + '/' + path 22 | if not os.path.isdir(pathout): 23 | os.mkdir(pathout) 24 | for index, file in enumerate(to_copy): 25 | shutil.copyfile(pathin + '/' + file, pathout + '/' + file) 26 | print('copying %2.2f' % (100 * index / sample_num), end='%\r') 27 | -------------------------------------------------------------------------------- /src/MinorProjects/plz/car_velocity.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import seaborn as sns 3 | from pylab import * 4 | import random 5 | 6 | 7 | sns.set_style('whitegrid') 8 | v1 = 1 9 | v2 = 2 10 | v3 = 1.5 11 | 12 | nums_v1 = 50 13 | nums_v2 = 30 14 | nums_v3 = 55 15 | 16 | transit = 10 17 | 18 | nums = list() 19 | 20 | v, num = (v1, nums_v1) 21 | for j in range(num): 22 | nums.append(v + random.gauss(0, .01 * v)) 23 | pass 24 | 25 | for j in range(int(transit * (v2 - v1))): 26 | nums.append(v1 + (v2 - v1) / abs(int(transit * (v2 - v1))) * j + random.gauss(0, .02 * v)) 27 | 28 | v, num = (v2, nums_v2) 29 | for j in range(num): 30 | nums.append(v + random.gauss(0, .01 * v)) 31 | pass 32 | 33 | for j in range(abs(int(transit * (v3 - v2)))): 34 | nums.append(v2 + (v3 - v2) / abs(int(transit * (v3 - v2))) * j + random.gauss(0, .02 * v)) 35 | 36 | v, num = (v3, nums_v3) 37 | for j in range(num): 38 | nums.append(v + random.gauss(0, .01 * v)) 39 | pass 40 | 41 | 42 | 43 | 44 | plot(nums) 45 | -------------------------------------------------------------------------------- /src/MinorProjects/plz/compute_time.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Thu Aug 25 11:24:15 2016 5 | 6 | use main() 7 | @author: 老徐 8 | """ 9 | import argparse 10 | from datetime import datetime 11 | from datetime import timedelta 12 | from pandas import read_excel 13 | from numpy import array 14 | from numpy import nan 15 | 16 | 17 | parser = argparse.ArgumentParser(description='为小胖写的自动计算出勤时间的小程序') 18 | parser.add_argument('-f', '--file-path', action='store', dest='filename', help='文件路径') 19 | parser.add_argument('-s', '--sheet-name', action='store', dest='sheetname', default='17.18.19', help='表名,默认为"17.18.19"') 20 | parser.add_argument('-c', '--column-number', action='store', dest='column', default='3', help='第几栏,默认为"3"') 21 | 22 | def str2time(str): 23 | try: 24 | return datetime.strptime(str, '%H:%M') 25 | except: 26 | #print('err with time as', str) 27 | return datetime.strptime('0', '%H') 28 | 29 | 30 | def main(): 31 | args = parser.parse_args() 32 | if not args.filename: 33 | print('没有输入文件路径,请使用 -h 参数查看帮助') 34 | exit(1) 35 | filename = args.filename 36 | sheetname = args.sheetname 37 | column = args.column 38 | xls_column = {'1':'A:N', '2':'P:AC', '3':'AE:AR'} 39 | info_column = {'1': 'B,J', '2': 'Q,Y', '3': 'AF,AN'} 40 | try: 41 | data = read_excel(filename, sheetname=sheetname, parse_cols=xls_column[column], skiprows=10) 42 | except: 43 | sheetname = 0 44 | data = read_excel(filename, sheetname=sheetname, parse_cols=xls_column[column], skiprows=10) 45 | info = read_excel(filename, sheetname=sheetname, parse_cols=info_column[column]).ix[:1, :].values 46 | print(' 姓名 : %s\n记录时间: %s' % (info[0, 1], info[1, 0])) 47 | data = data.replace(to_replace={'旷工' : nan}) 48 | data = array(data) 49 | data = data[:,[1,3,6,8,10,12]] 50 | time_of_a_day = timedelta(0) 51 | for _ in range(len(data)): 52 | for __ in range(3): 53 | if (data[_, __*2] is not nan) and (data[_, __*2+1] is not nan): 54 | time_of_a_day += str2time(data[_, __*2+1]) - str2time(data[_, __*2]) 55 | result = time_of_a_day 56 | sec = result.total_seconds() 57 | s = sec % 3600 58 | hour = int((sec - s) / 3600) 59 | minutes = s / 60 60 | print('出勤时间: {0:d}小时{1:.0f}分钟'.format(hour, minutes)) 61 | print('for my lover, 小胖儿~, 13自动化老徐') 62 | return result 63 | 64 | 65 | if __name__ == "__main__": 66 | main() 67 | -------------------------------------------------------------------------------- /src/MinorProjects/plz/image_cutout.py: -------------------------------------------------------------------------------- 1 | from pylab import * 2 | import os 3 | from PIL import Image 4 | from skimage import morphology 5 | from functools import reduce 6 | 7 | 8 | def change_bg(orig, background=None, 9 | threshold_yellow=[(80, 145), (65, 145), (0, 50)], 10 | threshold_black=[(0, 30), (0, 30), (0, 30)], 11 | threshold_face=[(42, 90), (20, 70), (0, 42)], 12 | show_pic=False, 13 | ): 14 | thresholds = [threshold_yellow, threshold_black, threshold_face] 15 | if background is None: 16 | background = orig * 0 17 | background = array(Image.fromarray(background).resize(orig.shape[:2][::-1])) 18 | masks = [orig.copy() for i in range(len(thresholds))] 19 | for mask_i, threshold in enumerate(thresholds): 20 | for i, (th_lo, th_hi) in enumerate(threshold): 21 | masks[mask_i][:, :, i] = (th_lo <= masks[mask_i][:, :, i]).astype(int) * (masks[mask_i][:, :, i] <= th_hi).astype(int) 22 | mask = masks[mask_i][:, :, 0] * masks[mask_i][:, :, 1] * masks[mask_i][:, :, 2] 23 | masks[mask_i][:, :, 0], masks[mask_i][:, :, 1], masks[mask_i][:, :, 2] = [mask for i in range(3)] 24 | mask = 1-reduce(lambda x, y: x * y, [1-w for w in masks]) 25 | mask_orig = mask.copy() 26 | mask = morphology.dilation(mask, ones([10, 3, 1])) 27 | mask = morphology.opening(mask, ones([18, 12, 1])) 28 | mask = morphology.dilation(mask, ones([3, 6, 1])) 29 | 30 | result = (orig * mask + background * (1 - mask)) 31 | if show_pic: 32 | subplot(221) 33 | imshow(orig) 34 | title('orig') 35 | subplot(222) 36 | imshow(mask*255) 37 | title('mask') 38 | subplot(223) 39 | imshow(mask_orig*255) 40 | title('mask_orig') 41 | subplot(224) 42 | imshow(result) 43 | title('result') 44 | show() 45 | return result, mask, mask_orig 46 | 47 | 48 | if __name__ == '__main__': 49 | pics = tuple(filter(lambda x: x.endswith('.jpg'), os.listdir('original'))) 50 | background = tuple(filter(lambda x: x.split('.')[-1].lower() in {'jpg', 'jpeg', 'png', 'bmp'}, os.listdir('background'))) 51 | 52 | for i in ['result', 'mask', 'denoised_mask']: 53 | if not os.path.isdir(i): 54 | os.mkdir(i) 55 | bg_names = 0 56 | for bg in background: 57 | bg_names += 1 58 | pic_names = 0 59 | for pic in pics: 60 | pic_names += 1 61 | print('processing:', bg, pic) 62 | p = imread(os.path.join('original', pic)) 63 | bgp = imread(os.path.join('background', bg)) 64 | result, denoised_mask, mask = change_bg(p, bgp) 65 | name = 'bg_{}_pic_{}.jpg'.format(bg_names, pic_names) 66 | 67 | imsave(os.path.join('result', name), result, format='jpeg') 68 | imsave(os.path.join('mask', name), mask * 255, format='jpeg') 69 | imsave(os.path.join('denoised_mask', name), denoised_mask * 255, format='jpeg') 70 | -------------------------------------------------------------------------------- /src/MinorProjects/plz/turtle.bot: -------------------------------------------------------------------------------- 1 | ------ 2 | c 3 | red 4 | a 5 | a 6 | a 7 | a 8 | a 9 | a 10 | a 11 | d 12 | d 13 | d 14 | d 15 | d 16 | w 17 | w 18 | w 19 | a 20 | a 21 | a 22 | a 23 | a 24 | a 25 | a 26 | a 27 | a 28 | a 29 | a 30 | a 31 | w 32 | w 33 | w 34 | w 35 | d 36 | d 37 | d 38 | d 39 | d 40 | d 41 | d 42 | d 43 | d 44 | d 45 | d 46 | d 47 | d 48 | d 49 | d 50 | d 51 | d 52 | d 53 | d 54 | d 55 | d 56 | d 57 | d 58 | d 59 | d 60 | d 61 | d 62 | d 63 | d 64 | d 65 | d 66 | d 67 | d 68 | d 69 | d 70 | d 71 | s 72 | s 73 | s 74 | s 75 | s 76 | s 77 | w 78 | w 79 | w 80 | a 81 | a 82 | a 83 | a 84 | a 85 | a 86 | a 87 | a 88 | w 89 | w 90 | a 91 | a 92 | a 93 | a 94 | a 95 | a 96 | a 97 | a 98 | a 99 | a 100 | a 101 | a 102 | a 103 | a 104 | w 105 | d 106 | d 107 | d 108 | d 109 | w 110 | w 111 | d 112 | w 113 | s 114 | szs 115 | s 116 | s 117 | s 118 | s 119 | s 120 | s 121 | s 122 | s 123 | s 124 | s 125 | s 126 | s 127 | ss 128 | s 129 | s 130 | s 131 | s 132 | s 133 | s 134 | s 135 | s 136 | s 137 | s 138 | s 139 | s 140 | s 141 | s 142 | s 143 | s 144 | s 145 | s 146 | s 147 | s 148 | s 149 | s 150 | a 151 | a 152 | a 153 | a 154 | a 155 | a 156 | a 157 | a 158 | a 159 | a 160 | a 161 | a 162 | s 163 | s 164 | s 165 | s 166 | s 167 | s 168 | s 169 | s 170 | s 171 | s 172 | ss 173 | s 174 | s 175 | s 176 | s 177 | s 178 | s 179 | ss 180 | s 181 | s 182 | s 183 | s 184 | s 185 | s 186 | s 187 | s 188 | s 189 | s 190 | s 191 | s 192 | s 193 | s 194 | s 195 | s 196 | 197 | s 198 | s 199 | s 200 | s 201 | s 202 | s 203 | s 204 | s 205 | s 206 | s 207 | s 208 | s 209 | sz 210 | s 211 | s 212 | ss 213 | s 214 | s 215 | s 216 | s 217 | s 218 | s 219 | w 220 | w 221 | s 222 | s 223 | s 224 | w 225 | w 226 | s 227 | d 228 | d 229 | d 230 | d 231 | d 232 | d 233 | d 234 | d 235 | d 236 | d 237 | d 238 | d 239 | d 240 | d 241 | d 242 | d 243 | s 244 | s 245 | s 246 | s 247 | s 248 | s 249 | s 250 | w 251 | w 252 | w 253 | w 254 | w 255 | d 256 | d 257 | d 258 | d 259 | d 260 | w 261 | ww 262 | w 263 | w 264 | w 265 | w 266 | w 267 | s 268 | w 269 | w 270 | sa 271 | a 272 | a 273 | a 274 | w 275 | w 276 | w 277 | a 278 | a 279 | a 280 | a 281 | s 282 | s 283 | s 284 | s 285 | s 286 | s 287 | s 288 | s 289 | s 290 | w 291 | w 292 | a 293 | a 294 | a 295 | a 296 | a 297 | a 298 | a 299 | s 300 | s 301 | s 302 | s 303 | s 304 | s 305 | s 306 | s 307 | s 308 | s 309 | s 310 | a 311 | a 312 | a 313 | a 314 | a 315 | a 316 | a 317 | a 318 | a 319 | a 320 | a 321 | a 322 | a 323 | w 324 | s 325 | s 326 | s 327 | s 328 | s 329 | s 330 | w 331 | w 332 | w 333 | w 334 | w 335 | w 336 | w 337 | w 338 | w 339 | ww 340 | w 341 | w 342 | w 343 | w 344 | w 345 | a 346 | a 347 | a 348 | a 349 | a 350 | a 351 | a 352 | a 353 | a 354 | a 355 | a 356 | a 357 | a 358 | a 359 | a 360 | a 361 | a 362 | a 363 | a 364 | a 365 | a 366 | a 367 | a 368 | a 369 | a 370 | d 371 | d 372 | d 373 | d 374 | d 375 | d 376 | d 377 | d 378 | d 379 | d 380 | d 381 | d 382 | d 383 | d 384 | d 385 | d 386 | ds 387 | s 388 | s 389 | s 390 | s 391 | w 392 | w 393 | w 394 | w 395 | ww 396 | w 397 | w 398 | w 399 | w 400 | w 401 | w 402 | w 403 | w 404 | w 405 | w 406 | w 407 | w 408 | w 409 | w 410 | w 411 | s 412 | w 413 | a 414 | a 415 | a 416 | a 417 | w 418 | w 419 | w 420 | w 421 | w 422 | w 423 | w 424 | a 425 | a 426 | a 427 | a 428 | a 429 | a 430 | a 431 | a 432 | a 433 | a 434 | a 435 | a 436 | a 437 | a 438 | a 439 | a 440 | a 441 | a 442 | a 443 | a 444 | a 445 | a 446 | a 447 | a 448 | d 449 | s 450 | s 451 | s 452 | d 453 | dw 454 | w 455 | w 456 | was 457 | s 458 | s 459 | a 460 | a 461 | a 462 | a 463 | s 464 | s 465 | s 466 | s 467 | s 468 | s 469 | s 470 | s 471 | s 472 | s 473 | s 474 | s 475 | s 476 | ss 477 | s 478 | s 479 | w 480 | w 481 | w 482 | w 483 | w 484 | w 485 | w 486 | w 487 | w 488 | w 489 | w 490 | w 491 | w 492 | wa 493 | a 494 | a 495 | a 496 | a 497 | a 498 | a 499 | a 500 | a 501 | a 502 | a 503 | a 504 | a 505 | a 506 | a 507 | a 508 | a 509 | s 510 | s 511 | s 512 | s 513 | s 514 | s 515 | s 516 | a 517 | a 518 | a 519 | a 520 | a 521 | a 522 | w 523 | w 524 | ww 525 | w 526 | s 527 | s 528 | s 529 | s 530 | s 531 | s 532 | s 533 | s 534 | s 535 | s 536 | s 537 | s 538 | s 539 | s 540 | s 541 | sa 542 | a 543 | a 544 | a 545 | a 546 | a 547 | a 548 | a 549 | a 550 | a 551 | a 552 | d 553 | d 554 | d 555 | d 556 | d 557 | d 558 | d 559 | d 560 | d 561 | d 562 | d 563 | d 564 | d 565 | s 566 | s 567 | a 568 | a 569 | w 570 | w 571 | ww 572 | w 573 | w 574 | w 575 | w 576 | a 577 | a 578 | a 579 | aa 580 | aa 581 | a 582 | a 583 | a 584 | a 585 | a 586 | a 587 | a 588 | a 589 | a 590 | a 591 | a 592 | a 593 | s 594 | s 595 | w 596 | w 597 | w 598 | w 599 | a 600 | a 601 | s 602 | s 603 | s 604 | s 605 | s 606 | s 607 | ss 608 | s 609 | s 610 | s 611 | s 612 | s 613 | a 614 | as 615 | s 616 | s 617 | a 618 | a 619 | a 620 | a 621 | a 622 | a 623 | as 624 | wwd 625 | d 626 | d 627 | d 628 | d 629 | d 630 | d 631 | s 632 | sw 633 | w 634 | w 635 | w 636 | w 637 | w 638 | s 639 | s 640 | s 641 | s 642 | s 643 | s 644 | s 645 | s 646 | s 647 | s 648 | d 649 | d 650 | d 651 | d 652 | d 653 | d 654 | d 655 | d 656 | d 657 | w 658 | w 659 | w 660 | w 661 | w 662 | w 663 | w 664 | a 665 | a 666 | a 667 | a 668 | a 669 | a 670 | a 671 | a 672 | a 673 | a 674 | a 675 | a 676 | a 677 | s 678 | s 679 | s 680 | s 681 | s 682 | s 683 | s 684 | s 685 | s 686 | s 687 | s 688 | s 689 | w 690 | w 691 | s 692 | s 693 | d 694 | d 695 | d 696 | d 697 | a 698 | a 699 | a 700 | a 701 | a 702 | a 703 | a 704 | a 705 | a 706 | a 707 | a 708 | a 709 | a 710 | a 711 | a 712 | a 713 | a 714 | a 715 | a 716 | a 717 | a 718 | a 719 | a 720 | a 721 | aw 722 | w 723 | w 724 | wa 725 | a 726 | d 727 | d 728 | d 729 | d 730 | d 731 | d 732 | d 733 | d 734 | d 735 | d 736 | d 737 | d 738 | d 739 | d 740 | w 741 | ws 742 | s 743 | s 744 | s 745 | w 746 | w 747 | w 748 | w 749 | w 750 | w 751 | w 752 | w 753 | w 754 | w 755 | a 756 | a 757 | a 758 | a 759 | a 760 | a 761 | a 762 | a 763 | a 764 | a 765 | a 766 | a 767 | a 768 | a 769 | a 770 | a 771 | a 772 | a 773 | a 774 | a 775 | a 776 | d 777 | d 778 | d 779 | d 780 | s 781 | s 782 | s 783 | s 784 | s 785 | s 786 | s 787 | s 788 | s 789 | s 790 | s 791 | s 792 | s 793 | a 794 | a 795 | a 796 | a 797 | a 798 | a 799 | w 800 | ws 801 | s 802 | d 803 | d 804 | d 805 | d 806 | d 807 | d 808 | d 809 | d 810 | d 811 | d 812 | d 813 | d 814 | d 815 | d 816 | d 817 | d 818 | d 819 | d 820 | d 821 | d 822 | d 823 | d 824 | d 825 | d 826 | d 827 | d 828 | d 829 | d 830 | w 831 | ws 832 | s 833 | s 834 | s 835 | s 836 | s 837 | s 838 | s 839 | s 840 | s 841 | w 842 | w 843 | w 844 | ww 845 | w 846 | s 847 | s 848 | s 849 | s 850 | s 851 | s 852 | s 853 | s 854 | w 855 | w 856 | w 857 | w 858 | w 859 | w 860 | w 861 | w 862 | w 863 | w 864 | w 865 | s 866 | s 867 | a 868 | a 869 | a 870 | a 871 | a 872 | a 873 | w 874 | w 875 | g 876 | 210 100 877 | c 878 | blue 879 | l 880 | 4 881 | w 882 | w 883 | w 884 | w 885 | w 886 | w 887 | s 888 | s 889 | s 890 | s 891 | s 892 | s 893 | h 894 | p 895 | 4 896 | l 897 | 205 205 898 | s 899 | s 900 | s 901 | s 902 | s 903 | g 904 | 210 100 905 | p 906 | 4 907 | l 908 | 210 105 909 | s 910 | s 911 | s 912 | s 913 | g 914 | 210 100 915 | c 916 | blue 917 | p 918 | 4 919 | l 920 | 210 205 921 | l 922 | 230 230 923 | s 924 | l 925 | 250 250 926 | s 927 | l 928 | 250 270 929 | r 930 | c 931 | green 932 | w 933 | w 934 | w 935 | w 936 | w 937 | w 938 | a 939 | a 940 | a 941 | a 942 | a 943 | a 944 | a 945 | a 946 | a 947 | w 948 | w 949 | d 950 | d 951 | d 952 | d 953 | s 954 | s 955 | s 956 | s 957 | s 958 | d 959 | d 960 | d 961 | d 962 | d 963 | d 964 | d 965 | d 966 | d 967 | w 968 | w 969 | w 970 | w 971 | w 972 | w 973 | w 974 | w 975 | w 976 | w 977 | w 978 | w 979 | w 980 | w 981 | w 982 | w 983 | w 984 | w 985 | w 986 | s 987 | s 988 | s 989 | s 990 | s 991 | s 992 | s 993 | s 994 | s 995 | s 996 | s 997 | s 998 | s 999 | s 1000 | s 1001 | s 1002 | s 1003 | s 1004 | s 1005 | s 1006 | s 1007 | s 1008 | d 1009 | d 1010 | d 1011 | d 1012 | d 1013 | d 1014 | w 1015 | w 1016 | w 1017 | w 1018 | w 1019 | w 1020 | w 1021 | w 1022 | w 1023 | a 1024 | a 1025 | a 1026 | a 1027 | a 1028 | a 1029 | a 1030 | a 1031 | a 1032 | a 1033 | aw 1034 | ws 1035 | s 1036 | s 1037 | w 1038 | w 1039 | w 1040 | w 1041 | w 1042 | w 1043 | w 1044 | wd 1045 | d 1046 | d 1047 | d 1048 | d 1049 | d 1050 | d 1051 | d 1052 | d 1053 | s 1054 | s 1055 | s 1056 | s 1057 | s 1058 | s 1059 | s 1060 | s 1061 | s 1062 | s 1063 | s 1064 | d 1065 | d 1066 | d 1067 | d 1068 | d 1069 | d 1070 | d 1071 | w 1072 | w 1073 | w 1074 | w 1075 | w 1076 | w 1077 | w 1078 | w 1079 | w 1080 | w 1081 | w 1082 | w 1083 | w 1084 | a 1085 | a 1086 | a 1087 | a 1088 | a 1089 | a 1090 | a 1091 | a 1092 | a 1093 | a 1094 | a 1095 | a 1096 | a 1097 | a 1098 | a 1099 | a 1100 | a 1101 | a 1102 | a 1103 | a 1104 | a 1105 | a 1106 | a 1107 | a 1108 | a 1109 | a 1110 | a 1111 | w 1112 | w 1113 | w 1114 | ww 1115 | w 1116 | w 1117 | w 1118 | w 1119 | s 1120 | s 1121 | s 1122 | s 1123 | a 1124 | w 1125 | w 1126 | wd 1127 | d 1128 | d 1129 | d 1130 | d 1131 | w 1132 | w 1133 | wd 1134 | d 1135 | d 1136 | w 1137 | w 1138 | w 1139 | d 1140 | ds 1141 | s 1142 | s 1143 | s 1144 | s 1145 | s 1146 | s 1147 | s 1148 | s 1149 | d 1150 | d 1151 | d 1152 | w 1153 | w 1154 | w 1155 | w 1156 | w 1157 | w 1158 | w 1159 | w 1160 | w 1161 | w 1162 | w 1163 | w 1164 | w 1165 | w 1166 | d 1167 | d 1168 | d 1169 | d 1170 | d 1171 | d 1172 | w 1173 | w 1174 | w 1175 | w 1176 | w 1177 | w 1178 | w 1179 | a 1180 | a 1181 | a 1182 | a 1183 | a 1184 | a 1185 | a 1186 | a 1187 | a 1188 | a 1189 | a 1190 | a 1191 | a 1192 | a 1193 | s 1194 | s 1195 | s 1196 | s 1197 | s 1198 | w 1199 | w 1200 | s 1201 | s 1202 | s 1203 | s 1204 | s 1205 | s 1206 | s 1207 | s 1208 | s 1209 | s 1210 | s 1211 | s 1212 | s 1213 | s 1214 | s 1215 | s 1216 | s 1217 | s 1218 | s 1219 | s 1220 | s 1221 | s 1222 | s 1223 | s 1224 | s 1225 | s 1226 | s 1227 | s 1228 | s 1229 | s 1230 | s 1231 | s 1232 | s 1233 | s 1234 | s 1235 | s 1236 | s 1237 | s 1238 | s 1239 | s 1240 | s 1241 | s 1242 | s 1243 | s 1244 | s 1245 | s 1246 | s 1247 | s 1248 | s 1249 | s 1250 | sad 1251 | d 1252 | d 1253 | d 1254 | d 1255 | d 1256 | d 1257 | w 1258 | w 1259 | w 1260 | w 1261 | s 1262 | s 1263 | s 1264 | s 1265 | a 1266 | a 1267 | a 1268 | a 1269 | a 1270 | a 1271 | a 1272 | a 1273 | a 1274 | a 1275 | a 1276 | a 1277 | a 1278 | a 1279 | a 1280 | a 1281 | s 1282 | 1283 | d 1284 | d 1285 | d 1286 | d 1287 | d 1288 | d 1289 | d 1290 | s 1291 | s 1292 | s 1293 | s 1294 | sd 1295 | d 1296 | ddd 1297 | s 1298 | s 1299 | s 1300 | s 1301 | s 1302 | s 1303 | s 1304 | d 1305 | d 1306 | d 1307 | d 1308 | d 1309 | d 1310 | d 1311 | d 1312 | sww 1313 | w 1314 | w 1315 | d 1316 | d 1317 | s 1318 | s 1319 | s 1320 | s 1321 | s 1322 | s 1323 | s 1324 | s 1325 | s 1326 | s 1327 | s 1328 | as 1329 | s 1330 | d 1331 | d 1332 | s 1333 | s 1334 | s 1335 | s 1336 | s 1337 | d 1338 | d 1339 | d 1340 | d 1341 | d 1342 | d 1343 | d 1344 | d 1345 | d 1346 | d 1347 | d 1348 | w 1349 | w 1350 | w 1351 | w 1352 | w 1353 | w 1354 | w 1355 | w 1356 | w 1357 | w 1358 | w 1359 | w 1360 | w 1361 | w 1362 | w 1363 | w 1364 | w 1365 | w 1366 | w 1367 | w 1368 | w 1369 | w 1370 | w 1371 | w 1372 | w 1373 | w 1374 | d 1375 | d 1376 | d 1377 | d 1378 | d 1379 | d 1380 | d 1381 | d 1382 | d 1383 | d 1384 | a 1385 | a 1386 | a 1387 | a 1388 | a 1389 | a 1390 | a 1391 | w 1392 | w 1393 | a 1394 | a 1395 | a 1396 | w 1397 | w 1398 | wd 1399 | d 1400 | d 1401 | d 1402 | d 1403 | d 1404 | d 1405 | d 1406 | d 1407 | d 1408 | d 1409 | d 1410 | d 1411 | d 1412 | d 1413 | d 1414 | d 1415 | a 1416 | a 1417 | a 1418 | a 1419 | a 1420 | a 1421 | as 1422 | s 1423 | s 1424 | s 1425 | s 1426 | s 1427 | s 1428 | s 1429 | s 1430 | s 1431 | s 1432 | s 1433 | w 1434 | w 1435 | w 1436 | s 1437 | s 1438 | 1439 | s 1440 | ss 1441 | s 1442 | s 1443 | s 1444 | w 1445 | w 1446 | w 1447 | w 1448 | w 1449 | w 1450 | w 1451 | ------ 1452 | o 1453 | ------ 1454 | -------------------------------------------------------------------------------- /src/MinorProjects/plz/turtlebot.py: -------------------------------------------------------------------------------- 1 | 2 | # coding: utf-8 3 | 4 | # In[14]: 5 | 6 | 7 | import sys 8 | import os 9 | import turtle 10 | import tty 11 | import termios 12 | 13 | t = turtle.Pen() 14 | 15 | 16 | # In[18]: 17 | 18 | 19 | # turtle bot 模拟器 20 | # 只有三种动作,前进、左转、右转,转弯半径恒定为 r, 前进距离恒定为 l 21 | r = 15 # 转弯半径 22 | rl = 5 # 转弯时走的路程 23 | l = 5 # 前进时走的路程 24 | 25 | 26 | # In[16]: 27 | 28 | 29 | # 画盒子 30 | ld = (5, 5) 31 | ru = (205, 205) 32 | # 开始画 33 | t.color("") 34 | t.goto(ld) 35 | t.color("grey") 36 | t.pensize(4) 37 | t.goto(ld[0], ru[1]) 38 | t.goto(*ru) 39 | t.goto(ru[0], ld[1]) 40 | t.goto(*ld) 41 | 42 | 43 | # In[11]: 44 | 45 | 46 | def left(): 47 | t.circle(r, rl) 48 | 49 | 50 | def right(): 51 | t.circle(-r, rl) 52 | 53 | 54 | def forward(): 55 | t.forward(l) 56 | 57 | 58 | def undo(): 59 | t.undo() 60 | 61 | 62 | def save(name='turtle.eps'): 63 | t.color("") 64 | t.goto(-500, -500) 65 | ts = turtle.getscreen() 66 | ts.getcanvas().postscript(file=name) 67 | 68 | 69 | def reset(): 70 | t.color("") 71 | t.goto(0, 0) 72 | t.color("black") 73 | t.pensize(1) 74 | t.seth(0) 75 | 76 | 77 | def pensize(s=None): 78 | try: 79 | if s is None: 80 | s = input('pen size(输入数字):') 81 | print(s, file=open(path, 'a')) 82 | t.pensize(float(s)) 83 | except: 84 | print('画笔粗细输入错误') 85 | 86 | 87 | def color(c=None): 88 | try: 89 | if c is None: 90 | c = input('color(什么都不写的话颜色为透明,其他颜色用英文即可):') 91 | print(c, file=open(path, 'a')) 92 | t.color(c) 93 | except: 94 | print('颜色错误') 95 | 96 | 97 | def show_help(): 98 | help_info = '''用法: 99 | 支持解析脚本(直接把动作写到文本文档里即可):python turtle.py 100 | 101 | 机器人运动指令: 102 | w 103 | asd 分别为方向键,前后左右; 104 | 105 | r:reset, 回到原来位置 106 | g:go to, 指定的坐标位置,颜色什么的全部重置 107 | z:set heading 设置头部朝向 108 | l:draw line 从当前位置到指定坐标画一条线 109 | 110 | 属性设置指令: 111 | t:set turtle bot, 设置运动半径和距离 112 | c:color, 改变颜色 113 | p:pen size, 改变画笔粗细 114 | 115 | 其他: 116 | o:save, 保存图片 117 | q:quit, 退出 118 | h:help, 显示帮助''' 119 | print(help_info) 120 | print("方格坐标为左下角", ld, "右上角", ru) 121 | 122 | 123 | def goto(position=None): 124 | try: 125 | t.color("") 126 | if not position: 127 | position = input("请输入坐标,横纵坐标用空格分隔:") 128 | print(position, file=open(path, 'a')) 129 | position = [int(i) for i in position.split()] 130 | t.goto(*position) 131 | t.color("black") 132 | t.pensize(1) 133 | except: 134 | print('坐标输入错误!') 135 | 136 | 137 | def draw_line(position=None): 138 | try: 139 | if not position: 140 | position = input("请输入坐标,横纵坐标用空格分隔:") 141 | print(position, file=open(path, 'a')) 142 | position = [int(i) for i in position.split()] 143 | t.goto(*position) 144 | except: 145 | print('坐标输入错误!') 146 | 147 | 148 | def set_turtle(s=None): 149 | global r 150 | global l 151 | try: 152 | t.color("") 153 | if not s: 154 | s = input("请输入半径和直线距离,用空格分隔:") 155 | print(s, file=open(path, 'a')) 156 | s = [float(i) for i in s.split()] 157 | [r, l] = s 158 | except: 159 | print('r\\l输入错误!') 160 | 161 | 162 | def set_heading(s=None): 163 | try: 164 | if not s: 165 | s = input("请输入头部的角度:") 166 | print(s, file=open(path, 'a')) 167 | s = float(s) 168 | t.seth(s) 169 | except: 170 | print('r\\l输入错误!') 171 | 172 | 173 | def nop(): 174 | pass 175 | 176 | 177 | act = { 178 | 'w': forward, 179 | 's': undo, 180 | 'r': reset, 181 | 'a': left, 182 | 'd': right, 183 | 'c': color, 184 | 'p': pensize, 185 | 'o': save, 186 | 'q': exit, 187 | 'l': draw_line, 188 | 'h': show_help, 189 | 'g': goto, 190 | 't': set_turtle, 191 | 'z': set_heading, 192 | '------': nop, 193 | } 194 | reset() 195 | path = '' 196 | 197 | 198 | # In[12]: 199 | 200 | 201 | # 如果输入脚本则读取脚本 202 | read_data = False 203 | if len(sys.argv) > 1: 204 | path = sys.argv[1] 205 | data = open(sys.argv[1]).readlines() 206 | for i, line in enumerate(data): 207 | try: 208 | if read_data: 209 | read_data = False 210 | act[data[i - 1].strip()](line.strip()) 211 | elif line.strip() in list('gcptlz'): 212 | read_data = True 213 | else: 214 | act[line.strip()]() 215 | except: 216 | print('输入错误', line, i) 217 | 218 | 219 | # In[57]: 220 | 221 | 222 | act['h']() 223 | warns = 'yes' 224 | 225 | if not path: 226 | print('输入历史自动保存,成为脚本') 227 | path = input('请输入脚本文件名:') 228 | if os.path.exists(path): 229 | warns = input('警告!该文件已存在!继续将在该文件之后append内容!继续请输入“yes”') 230 | else: 231 | print("可以继续输入指令,输入的指令将继续保存在该文件,与之前的内容使用分隔线分隔") 232 | 233 | if warns == 'yes': 234 | print('------', file=open(path, 'a')) 235 | while True: 236 | print("当前坐标:", t.pos(), "角度", t.heading()) 237 | fd = sys.stdin.fileno() 238 | old_settings = termios.tcgetattr(fd) 239 | try: 240 | tty.setraw(fd) 241 | info = sys.stdin.read(1) 242 | finally: 243 | termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) 244 | 245 | print(info, file=open(path, 'a')) 246 | try: 247 | act[info]() 248 | except: 249 | print('输入错误!') 250 | act['h']() 251 | -------------------------------------------------------------------------------- /src/MinorProjects/sendmail.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: UTF-8 -*- 3 | 4 | import smtplib 5 | from email.mime.text import MIMEText 6 | from email.mime.image import MIMEImage 7 | from email.mime.multipart import MIMEMultipart 8 | from email.header import Header 9 | from email.utils import formataddr 10 | 11 | 12 | sender = { 13 | 'address': 'sender@domain.tld', 14 | 'nickname': 'My Name', 15 | 'password': 'password', 16 | 'smtp_server': 'smtp.domain.tld', 17 | 'smtp_port': 465, } 18 | 19 | receivers = [('Your Name', 'receiver@domain.tld')] 20 | 21 | mail = { 22 | 'content': 'Email Content', 23 | 'content_html': '

HTML Email Content

', 24 | 'subject': 'Email subject', 25 | 'attachments': [('filename', 'string_or_bytes_like_data')], 26 | 'images': [{ 27 | 'Content-ID': 'image1', 28 | 'data': '''bytes which is read from file, i.e. open("pic.png", "rb").read(), 29 | and use html type mail with '''}], 30 | } 31 | 32 | 33 | def sendmail(sender, receivers, mail): 34 | message = MIMEMultipart() 35 | message['From'] = formataddr((Header(sender['nickname'], 'utf-8').encode(), sender['address'])) 36 | message['To'] = ','.join(map(lambda x: formataddr((Header(x[0], 'utf-8').encode(), x[1])), receivers)) 37 | message['Subject'] = Header(mail['subject'], 'utf-8').encode() 38 | 39 | if 'content' in mail: 40 | message.attach(MIMEText(mail['content'], 'plain', 'utf-8')) 41 | 42 | if 'content_html' in mail: 43 | message.attach(MIMEText(mail['content_html'], 'html', 'utf-8')) 44 | 45 | if 'attachments' in mail: 46 | for attachment in mail['attachments']: 47 | att = MIMEText(attachment[1], 'base64', 'utf-8') 48 | att["Content-Type"] = "application/octet-stream" 49 | att["Content-Disposition"] = 'attachment; filename="{}"'.format(attachment[0]) 50 | message.attach(att) 51 | 52 | if 'images' in mail: 53 | for image in mail['images']: 54 | img = MIMEImage(image['data']) 55 | img.add_header('Content-ID', '<{}>'.format(image['Content-ID'])) 56 | message.attach(img) 57 | 58 | server = smtplib.SMTP_SSL(sender['smtp_server'], sender['smtp_port']) 59 | server.login(sender['address'], sender['password']) 60 | server.sendmail(sender['address'], list(zip(*receivers))[1], message.as_string()) 61 | server.quit() 62 | 63 | 64 | if __name__ == '__main__': 65 | sendmail(sender, receivers, mail) 66 | -------------------------------------------------------------------------------- /src/MinorProjects/tieba_content.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import bs4 4 | import requests 5 | 6 | 7 | def extract_content(url): 8 | url = url.strip() 9 | html = requests.get(url, params={'see_lz': 1}).text 10 | soup = bs4.BeautifulSoup(html, 'html.parser') 11 | contents = soup.find_all('div', attrs={'class': 'p_content'}) 12 | # contents = contents[1:] 13 | return soup.title.text, '\n'.join(map(str, contents)) 14 | 15 | 16 | def crawl(url_list): 17 | head = str(bs4.BeautifulSoup(requests.get(url_list[0]).text, 'html.parser').head) 18 | result_list = [head] 19 | result_list.append('') 20 | for url in url_list: 21 | print(url, end='\t') 22 | title, content = extract_content(url) 23 | print(title) 24 | result_list.append('

{}

'.format(title)) 25 | result_list.append(content) 26 | result_list.append('') 27 | result = '\n'.join(result_list) 28 | return result 29 | 30 | 31 | def main(): 32 | path = 'url_list.txt' 33 | if not os.path.exists(path): 34 | os.system(' '.join(['vim', path])) 35 | urls = [i for i in open(path).readlines() if len(i) > 5] 36 | content = crawl(urls) 37 | open('tieba.html', 'w').write(content) 38 | 39 | 40 | if __name__ == '__main__': 41 | main() 42 | -------------------------------------------------------------------------------- /src/MinorProjects/xyq/xyq.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os 3 | import subprocess 4 | from ast import literal_eval 5 | 6 | 7 | def run(cmd, check=True): 8 | print('$', cmd) 9 | # return 10 | status = os.system(cmd) 11 | if check and status != 0: 12 | print('命令出错:', status) 13 | exit() 14 | 15 | 16 | def get_info(info='TYPE'): 17 | data = subprocess.check_output(['lsblk', '-o', f'PATH,{info}']).decode().strip().split('\n') 18 | result = dict() 19 | for line in data[1:]: 20 | k, v, *_ = line.split(' ', 1) + [''] 21 | result[k.strip()] = v.strip() 22 | return result 23 | 24 | 25 | def get_all_info(): 26 | result = dict() 27 | for info in 'TYPE,PATH,FSTYPE,FSSIZE,FSUSED,FSUSE%,HOTPLUG,MODEL,SIZE,STATE'.split(','): 28 | result[info] = get_info(info) 29 | return result 30 | 31 | 32 | result = get_all_info() 33 | 34 | 35 | def show_dev_info(dev): 36 | return f"{dev}\t{result['MODEL'][dev]}\t{result['SIZE'][dev]}" 37 | 38 | 39 | def select_disk(message='输入选择的存储卡:'): 40 | global result 41 | result = get_all_info() 42 | disks = sorted({k for k, v in result['TYPE'].items() if v == 'disk' and result['HOTPLUG'][k] == '1'}) 43 | if len(disks) == 1: 44 | return disks[0] 45 | elif len(disks) == 0: 46 | print("没有找到内存卡或u盘设备") 47 | exit() 48 | return None 49 | else: 50 | while True: 51 | for i, p in enumerate(disks): 52 | print(f"[{i}]: \t{show_dev_info(p)}") 53 | n = (input(message).strip()) 54 | try: 55 | n = literal_eval(n) 56 | except SyntaxError: 57 | pass 58 | if isinstance(n, int) and 0 <= n < len(disks): 59 | return disks[n] 60 | elif isinstance(n, str) and n in disks: 61 | return n 62 | 63 | 64 | print('请选择操作:\n\t[1] 从存储卡制作镜像\n\t[2] 将镜像文件写入存储卡(危险操作)\n\t[3] 将数据从来源存储卡复制到目标存储卡(危险操作)') 65 | method = input('请输入操作标号 [1/2/3]:') 66 | if method not in {'1', '2', '3'}: 67 | print('输入错误') 68 | exit() 69 | 70 | input('插入U盘或存储卡,准备好后按回车') 71 | 72 | if method == '1': 73 | sd = select_disk('选择来源存储卡:') 74 | print(f'选择了设备:{show_dev_info(sd)}') 75 | file = input('输入镜像文件名:') 76 | if file.strip().startswith('/dev'): 77 | print("镜像文件不能是设备") 78 | exit() 79 | if os.path.exists(file): 80 | print(file, '已存在,操作取消') 81 | exit() 82 | run(f'sudo umount {sd}*', False) 83 | run(f'sudo dd if={sd} of="{file}"') 84 | elif method == '2': 85 | sd = select_disk('选择目标存储卡:') 86 | print(f'选择了设备:{show_dev_info(sd)}') 87 | file = input('输入镜像文件名:') 88 | if file.strip().startswith('/dev'): 89 | print("镜像文件不能是设备") 90 | exit() 91 | prompt = input(f'\n警告:将会把数据从{file}写入到 {sd} ,会将设备原来的数据全部删除。\n继续输入"yes"后按回车,取消按回车:') 92 | if not os.path.exists(file): 93 | print('该文件不存在:', file, ',操作取消') 94 | exit() 95 | if prompt != 'yes': 96 | print("操作取消") 97 | exit() 98 | run(f'sudo umount {sd}*', False) 99 | run(f'sudo dd if="{file}" of={sd}') 100 | elif method == '3': 101 | sd_from = select_disk("选择来源内存卡:") 102 | print(f'选择了来源设备:{show_dev_info(sd_from)}') 103 | sd_to = select_disk("选择目标存储卡:") 104 | print(f'选择了目标设备:{show_dev_info(sd_to)}') 105 | if sd_from == sd_to: 106 | print(f"来源设备{sd_from}不能与目标设备{sd_to}相同") 107 | exit() 108 | prompt = input(f'\n警告:将会把数据从{sd_from}写入到 {sd_to} ,会将设备原来的数据全部删除。\n继续输入"yes"后按回车,取消按回车:') 109 | if prompt != 'yes': 110 | print("操作取消") 111 | exit() 112 | run(f'sudo umount {sd_from}*', False) 113 | run(f'sudo umount {sd_to}*', False) 114 | run(f'sudo dd if={sd_from} of={sd_to}') 115 | 116 | print('成功') 117 | -------------------------------------------------------------------------------- /src/MinorProjects/yyf/donation.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import pandas as pd 3 | import math 4 | import sys 5 | 6 | if len(sys.argv) != 3: 7 | print('用法:\npython3', __file__, '主表路径', '分表路径') 8 | exit() 9 | 10 | master_table_name, partial_table_name = sys.argv[-2:] 11 | 12 | header_master = ["已筹金额(元)\n含转账", "参与捐赠人次(人次)", "已筹金额(元)\n含转账及线下录入", "参与捐赠人次(人次).1", "已筹金额(元)\n不含转账及线下录入", "参与捐赠人次(人次).2", ] 13 | header_partial = ['筹款量', '捐赠次数', '筹款量.1', '捐赠次数.1', '筹款量.2', '捐赠次数.2', ] 14 | 15 | master_table = pd.read_excel(master_table_name, convert_float=False, sheet_name=None, skiprows=0, parse_dates=True, na_values='').popitem()[-1].applymap(lambda x: x.replace(' ', ' ') if type(x) == str else x).dropna(subset=['备案编号']) 16 | partial_table = pd.read_excel(partial_table_name, convert_float=False, sheet_name=None, skiprows=1).popitem()[-1].applymap(lambda x: x.replace(' ', ' ') if type(x) == str else x).drop(['发起机构.1', '项目名称.1', '发起机构.2', '项目名称.2'], 1) 17 | master_table = master_table.applymap(lambda x: x.replace(' ', ' ') if type(x) == str else x) 18 | master_table['目标金额(元)'].fillna('', inplace=True) 19 | partial_table = partial_table.applymap(lambda x: x.replace(' ', ' ') if type(x) == str else x) 20 | print('原始主表:', master_table.shape) 21 | print('原始分表:', partial_table.shape) 22 | 23 | 24 | def convert_return_to_newline(table): 25 | new_columns = [i.replace('\r', '\n').replace('\n\n', '\n') for i in table.columns] 26 | table.columns = new_columns 27 | return table 28 | 29 | 30 | def drop_na_cols(table): 31 | table = table.dropna(axis=1, thresh=math.ceil(len(table) / 2)) 32 | return table 33 | 34 | 35 | def split_index(table, index): 36 | positive = table[index].dropna(axis=1, how='all') 37 | negative = table[index.apply(lambda x: not x)].dropna(axis=1, how='all') 38 | return positive, negative 39 | 40 | 41 | master_table = convert_return_to_newline(drop_na_cols(master_table)) 42 | partial_table = drop_na_cols(partial_table) 43 | # merge 44 | result = pd.merge(left=master_table, right=partial_table, left_on=['慈善组织', '项目名称'], right_on=['发起机构', '项目名称'], how='outer', validate='one_to_one') 45 | print('按照项目名称与组织合并后总数:', result.shape) 46 | # merge with cannot match 47 | abnormal, correct = split_index(result, result.isnull()[['慈善组织', '项目名称', '发起机构']].any(axis=1)) 48 | abnormal_partial, abnormal_master = split_index(abnormal, abnormal.isnull()['序号']) 49 | result_name = pd.merge(abnormal_master, abnormal_partial, on='项目名称', how='outer') 50 | print('匹配:', correct.shape, '未匹配', abnormal.shape, '->', '未匹配主表', abnormal_master.shape, '未匹配副表', abnormal_partial.shape) 51 | 52 | # merge with cannot match of project name 53 | print('仅按照项目名称匹配合并后总数:', result_name.shape) 54 | abnormal_name, correct_name = split_index(result_name, result_name.isnull()[['慈善组织', '项目名称', '发起机构']].any(axis=1)) 55 | abnormal_name_partial, abnormal_name_master = split_index(abnormal_name, abnormal_name.isnull()['序号']) 56 | result_org = pd.merge(abnormal_name_master, abnormal_name_partial, left_on='慈善组织', right_on='发起机构', how='outer') 57 | print('匹配:', correct_name.shape, '未匹配', abnormal_name.shape, '->', '未匹配主表', abnormal_name_master.shape, '未匹配副表', abnormal_name_partial.shape) 58 | 59 | print('仅按照组织匹配合并后总数:', result_org.shape) 60 | abnormal_org, correct_org = split_index(result_org, result_org.isnull()[['慈善组织', '项目名称_x', '项目名称_y', '发起机构']].any(axis=1)) 61 | correct_org = result_org 62 | print('匹配:', correct_org.shape, '未匹配', abnormal_org.shape) 63 | 64 | correct_org.drop('项目名称_y', 1, inplace=True) 65 | correct_org.columns = correct.columns 66 | correct_name.columns = correct.columns 67 | data = pd.concat([correct, correct_name, correct_org]) 68 | 69 | data[header_master] = data[header_partial] 70 | save = data[master_table.columns] 71 | try: 72 | save = save.sort_values(header_master[0], ascending=False) 73 | except KeyError as e: 74 | print('!!!!!!!!!!排序失败,找不到表头:', e) 75 | save.drop(columns='序号', inplace=True) 76 | # save.to_excel('matched.xlsx', index=False) 77 | save.to_excel('matched_without_cols.xlsx', index=False, header=False) 78 | print('最终表格', save.shape, '=', [correct.shape, correct_name.shape, correct_org.shape], '最终未匹配:', abnormal_org.shape) 79 | 80 | if len(abnormal_org): 81 | abnormal_org.to_excel('没对应上.xlsx', index=False) 82 | print('没对应上的数量', abnormal_org.shape) 83 | -------------------------------------------------------------------------------- /src/Web/html/split.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | div+css实现frameset效果 6 | 13 | 14 | 15 |
16 |
17 |
18 |

顶部

19 |
20 |
21 |
22 | 25 |
26 | 内容 27 |
28 |
29 |
30 | 31 | 32 | -------------------------------------------------------------------------------- /src/Web/ipgw.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import requests 3 | import time 4 | import re 5 | import os 6 | import bs4 7 | 8 | help = '''东北大学IP网关登陆器 9 | 用法: 10 | {name} query 查询状态 11 | {name} login username password 登陆网关 12 | {name} logout username password 退出网关 13 | '''.format(name=os.path.split(__file__)[-1]) 14 | 15 | 16 | def connect(payload={}, url='http://ipgw.neu.edu.cn/srun_portal_pc.php'): 17 | not_connetced = True 18 | while not_connetced: 19 | try: 20 | response = requests.post(url, data=payload) 21 | not_connetced = False 22 | except requests.exceptions.ConnectionError: 23 | ... 24 | return response 25 | 26 | 27 | def query(): 28 | # 查询状态 29 | payload = { 30 | 'action': 'get_online_info', 31 | } 32 | r = connect(payload, 'http://ipgw.neu.edu.cn/include/auth_action.php') 33 | return r.content.decode('utf-8') 34 | 35 | 36 | def login(username='', password=''): 37 | # 登陆 38 | # E2531 用户不存在 39 | # E2553 密码错误 40 | # E2606 用户被禁用 41 | # E2616 已欠费 42 | # E2620 已经在线 43 | 44 | # '1. 请' 成功登陆 45 | payload = { 46 | 'action': 'login', 47 | 'ac_id': '1', 48 | 'user_ip': '', 49 | 'nas_ip': '', 50 | 'user_mac': '', 51 | 'url': '', 52 | 'username': '{}'.format(username), 53 | 'password': '{}'.format(password), 54 | 'save_me': '0'} 55 | r = connect(payload) 56 | soup = bs4.BeautifulSoup(r.content.decode('utf-8'), 'html.parser') 57 | if re.findall('5分钟', str(soup.p)): 58 | return 'sleep', str(soup.p) 59 | if str(soup.p)[3:7] == '1. 请': 60 | return 'True', str(soup.p) 61 | else: 62 | return str(soup.p)[3:8], str(soup.p) 63 | 64 | 65 | def logout(username='', password=''): 66 | # 断开连接 67 | payload = { 68 | 'action': 'logout', 69 | 'ajax': '1', 70 | 'username': '{}'.format(username), 71 | 'password': '{}'.format(password)} 72 | r = connect(payload) 73 | return r.text 74 | 75 | if __name__ == '__main__': 76 | import fire 77 | fire.Fire() 78 | -------------------------------------------------------------------------------- /src/probability.py: -------------------------------------------------------------------------------- 1 | import math 2 | import random 3 | from functools import wraps 4 | 5 | 6 | def binsearch_solve(function, lo, hi, e=0.001): 7 | while hi - lo >= e: 8 | mid = (lo + hi) / 2 9 | result = function(mid) 10 | if result * function(hi) <= 0: 11 | lo = mid 12 | elif result * function(lo) <= 0: 13 | hi = mid 14 | else: 15 | lo, hi = (lo + mid) / 2, (hi + mid) / 2 16 | return (lo + hi) / 2 17 | 18 | 19 | def integrate(function, start, end, step=0.001): 20 | result = 0 21 | step = abs(step) 22 | flag = 1 23 | if start > end: 24 | start, end = end, start 25 | flag = -1 26 | elif start == end: 27 | return 0 28 | lo = start 29 | hi = start + step 30 | while hi <= end: 31 | column = function((lo + hi) / 2) * step 32 | if not math.isnan(column): 33 | result += column 34 | lo, hi = hi, hi + step 35 | result = result * flag 36 | return result 37 | 38 | 39 | def iterfunc(function): 40 | @wraps(function) 41 | def iterfunc_wrapper(x, *args, **kwargs): 42 | if '__iter__' in dir(x): 43 | return [function(x_i, *args, **kwargs) for x_i in x.__iter__()] 44 | elif x is None or x is math.isnan(x): 45 | return math.nan 46 | else: 47 | try: 48 | return function(x, *args, **kwargs) 49 | except (ZeroDivisionError, ValueError): 50 | return math.nan 51 | return iterfunc_wrapper 52 | 53 | def pdf2cdf(start, end): 54 | def pdf2cdf_wrapper(PDF): 55 | @wraps(PDF) 56 | @iterfunc 57 | def CDF(x): 58 | if x < start: 59 | return 0 60 | elif x > end: 61 | return 1 62 | else: 63 | y = integrate(PDF, start, x) 64 | return y 65 | return CDF 66 | return pdf2cdf_wrapper 67 | 68 | 69 | def cdf2pdf(step=0.001): 70 | def cdf2pdf_wrapper(CDF): 71 | @wraps(CDF) 72 | @iterfunc 73 | def PDF(x): 74 | if '__iter__' in dir(x): 75 | return [CDF(x_i) for x_i in x.__iter__()] 76 | y = (CDF(x + step / 2) - CDF(x - step / 2)) / step 77 | return y 78 | return PDF 79 | return cdf2pdf_wrapper 80 | 81 | 82 | def reverse_function(lo=-100, hi=100): 83 | def reverse_function_wrapper(function): 84 | @wraps(function) 85 | @iterfunc 86 | def reversed_function(y): 87 | result = binsearch_solve(lambda x: function(x) - y, lo, hi) 88 | return result 89 | return reversed_function 90 | return reverse_function_wrapper 91 | 92 | 93 | def sampler(CDF, lo=-100, hi=100): 94 | y = random.random() 95 | x = reverse_function()(CDF)(y) 96 | return x 97 | 98 | 99 | def integrate_sample(function, CDF, n): # ERR 100 | result = 0 101 | for i in range(n): 102 | x = sampler(CDF) 103 | result += function(x) / (CDF(x) * n) 104 | return result 105 | --------------------------------------------------------------------------------