├── README.md ├── data └── Iris.csv ├── example ├── DeepFM.py ├── load_nn_iris.py ├── logistic_regression_multiclass.py ├── lr_quadratic.py ├── nn_iris.py └── wide_deep_titanic.py ├── tf_serving ├── __init__.py ├── exporter │ ├── __init__.py │ └── exporter.py ├── serving │ ├── __init__.py │ ├── proto │ │ ├── __init__.py │ │ ├── serving.proto │ │ ├── serving_pb2.py │ │ └── serving_pb2_grpc.py │ └── serving.py └── tf_serving.py └── tinyframework ├── __init__.py ├── __pycache__ ├── __init__.cpython-37.pyc └── tinyframework.cpython-37.pyc ├── core ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-37.pyc │ ├── core.cpython-37.pyc │ ├── graph.cpython-37.pyc │ └── node.cpython-37.pyc ├── core.py ├── graph.py └── node.py ├── dist ├── __init__.py ├── dist.py ├── proto │ ├── __init__.py │ ├── comm.proto │ ├── comm_pb2.py │ ├── comm_pb2_grpc.py │ ├── parameter_server.proto │ ├── parameter_server_pb2.py │ └── parameter_server_pb2_grpc.py └── ps │ ├── __init__.py │ └── ps.py ├── layer ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-37.pyc │ └── layer.cpython-37.pyc └── layer.py ├── ops ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-37.pyc │ ├── loss.cpython-37.pyc │ ├── metrics.cpython-37.pyc │ └── ops.cpython-37.pyc ├── loss.py ├── metrics.py └── ops.py ├── optimizer ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-37.pyc │ └── optimizer.cpython-37.pyc └── optimizer.py ├── tinyframework.py ├── trainer ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-37.pyc │ ├── local_trainer.cpython-37.pyc │ ├── saver.cpython-37.pyc │ └── trainer.cpython-37.pyc ├── local_trainer.py ├── saver.py └── trainer.py └── utils ├── __init__.py ├── __pycache__ ├── __init__.cpython-37.pyc ├── draw.cpython-37.pyc └── utils.cpython-37.pyc ├── draw.py └── utils.py /README.md: -------------------------------------------------------------------------------- 1 | # tinyframework 2 | 3 | ### Introduction 4 | Using pure numpy to construct deep learning computational graph framework. 5 | 6 | note:To view the code, please switch to the master branch 7 | 8 | ### Dev Tools: 9 | - pycharm 10 | 11 | ### Dependencies 12 | - python: >= 3.7 13 | 14 | ### Features 15 | - Based on computational graph, can be used to build common machine learning models. 16 | - Support automatic gradient. 17 | - Support common optimization methods (such as GD, Momentum, Adagrad, RMSprop, Adam, etc.) 18 | - Support common evaluation methods (such as Accuracy, Precision, AUC, F1_score, etc.) 19 | - Support model save and load 20 | - Support drawing calculate graph by pyecharts 21 | - Support model serving by grpc 22 | - Support model export 23 | - Support distribute trainning. 24 | 25 | ### logs: 26 | To be added 27 | -------------------------------------------------------------------------------- /data/Iris.csv: -------------------------------------------------------------------------------- 1 | Id,SepalLengthCm,SepalWidthCm,PetalLengthCm,PetalWidthCm,Species 2 | 1,5.1,3.5,1.4,0.2,Iris-setosa 3 | 2,4.9,3.0,1.4,0.2,Iris-setosa 4 | 3,4.7,3.2,1.3,0.2,Iris-setosa 5 | 4,4.6,3.1,1.5,0.2,Iris-setosa 6 | 5,5.0,3.6,1.4,0.2,Iris-setosa 7 | 6,5.4,3.9,1.7,0.4,Iris-setosa 8 | 7,4.6,3.4,1.4,0.3,Iris-setosa 9 | 8,5.0,3.4,1.5,0.2,Iris-setosa 10 | 9,4.4,2.9,1.4,0.2,Iris-setosa 11 | 10,4.9,3.1,1.5,0.1,Iris-setosa 12 | 11,5.4,3.7,1.5,0.2,Iris-setosa 13 | 12,4.8,3.4,1.6,0.2,Iris-setosa 14 | 13,4.8,3.0,1.4,0.1,Iris-setosa 15 | 14,4.3,3.0,1.1,0.1,Iris-setosa 16 | 15,5.8,4.0,1.2,0.2,Iris-setosa 17 | 16,5.7,4.4,1.5,0.4,Iris-setosa 18 | 17,5.4,3.9,1.3,0.4,Iris-setosa 19 | 18,5.1,3.5,1.4,0.3,Iris-setosa 20 | 19,5.7,3.8,1.7,0.3,Iris-setosa 21 | 20,5.1,3.8,1.5,0.3,Iris-setosa 22 | 21,5.4,3.4,1.7,0.2,Iris-setosa 23 | 22,5.1,3.7,1.5,0.4,Iris-setosa 24 | 23,4.6,3.6,1.0,0.2,Iris-setosa 25 | 24,5.1,3.3,1.7,0.5,Iris-setosa 26 | 25,4.8,3.4,1.9,0.2,Iris-setosa 27 | 26,5.0,3.0,1.6,0.2,Iris-setosa 28 | 27,5.0,3.4,1.6,0.4,Iris-setosa 29 | 28,5.2,3.5,1.5,0.2,Iris-setosa 30 | 29,5.2,3.4,1.4,0.2,Iris-setosa 31 | 30,4.7,3.2,1.6,0.2,Iris-setosa 32 | 31,4.8,3.1,1.6,0.2,Iris-setosa 33 | 32,5.4,3.4,1.5,0.4,Iris-setosa 34 | 33,5.2,4.1,1.5,0.1,Iris-setosa 35 | 34,5.5,4.2,1.4,0.2,Iris-setosa 36 | 35,4.9,3.1,1.5,0.1,Iris-setosa 37 | 36,5.0,3.2,1.2,0.2,Iris-setosa 38 | 37,5.5,3.5,1.3,0.2,Iris-setosa 39 | 38,4.9,3.1,1.5,0.1,Iris-setosa 40 | 39,4.4,3.0,1.3,0.2,Iris-setosa 41 | 40,5.1,3.4,1.5,0.2,Iris-setosa 42 | 41,5.0,3.5,1.3,0.3,Iris-setosa 43 | 42,4.5,2.3,1.3,0.3,Iris-setosa 44 | 43,4.4,3.2,1.3,0.2,Iris-setosa 45 | 44,5.0,3.5,1.6,0.6,Iris-setosa 46 | 45,5.1,3.8,1.9,0.4,Iris-setosa 47 | 46,4.8,3.0,1.4,0.3,Iris-setosa 48 | 47,5.1,3.8,1.6,0.2,Iris-setosa 49 | 48,4.6,3.2,1.4,0.2,Iris-setosa 50 | 49,5.3,3.7,1.5,0.2,Iris-setosa 51 | 50,5.0,3.3,1.4,0.2,Iris-setosa 52 | 51,7.0,3.2,4.7,1.4,Iris-versicolor 53 | 52,6.4,3.2,4.5,1.5,Iris-versicolor 54 | 53,6.9,3.1,4.9,1.5,Iris-versicolor 55 | 54,5.5,2.3,4.0,1.3,Iris-versicolor 56 | 55,6.5,2.8,4.6,1.5,Iris-versicolor 57 | 56,5.7,2.8,4.5,1.3,Iris-versicolor 58 | 57,6.3,3.3,4.7,1.6,Iris-versicolor 59 | 58,4.9,2.4,3.3,1.0,Iris-versicolor 60 | 59,6.6,2.9,4.6,1.3,Iris-versicolor 61 | 60,5.2,2.7,3.9,1.4,Iris-versicolor 62 | 61,5.0,2.0,3.5,1.0,Iris-versicolor 63 | 62,5.9,3.0,4.2,1.5,Iris-versicolor 64 | 63,6.0,2.2,4.0,1.0,Iris-versicolor 65 | 64,6.1,2.9,4.7,1.4,Iris-versicolor 66 | 65,5.6,2.9,3.6,1.3,Iris-versicolor 67 | 66,6.7,3.1,4.4,1.4,Iris-versicolor 68 | 67,5.6,3.0,4.5,1.5,Iris-versicolor 69 | 68,5.8,2.7,4.1,1.0,Iris-versicolor 70 | 69,6.2,2.2,4.5,1.5,Iris-versicolor 71 | 70,5.6,2.5,3.9,1.1,Iris-versicolor 72 | 71,5.9,3.2,4.8,1.8,Iris-versicolor 73 | 72,6.1,2.8,4.0,1.3,Iris-versicolor 74 | 73,6.3,2.5,4.9,1.5,Iris-versicolor 75 | 74,6.1,2.8,4.7,1.2,Iris-versicolor 76 | 75,6.4,2.9,4.3,1.3,Iris-versicolor 77 | 76,6.6,3.0,4.4,1.4,Iris-versicolor 78 | 77,6.8,2.8,4.8,1.4,Iris-versicolor 79 | 78,6.7,3.0,5.0,1.7,Iris-versicolor 80 | 79,6.0,2.9,4.5,1.5,Iris-versicolor 81 | 80,5.7,2.6,3.5,1.0,Iris-versicolor 82 | 81,5.5,2.4,3.8,1.1,Iris-versicolor 83 | 82,5.5,2.4,3.7,1.0,Iris-versicolor 84 | 83,5.8,2.7,3.9,1.2,Iris-versicolor 85 | 84,6.0,2.7,5.1,1.6,Iris-versicolor 86 | 85,5.4,3.0,4.5,1.5,Iris-versicolor 87 | 86,6.0,3.4,4.5,1.6,Iris-versicolor 88 | 87,6.7,3.1,4.7,1.5,Iris-versicolor 89 | 88,6.3,2.3,4.4,1.3,Iris-versicolor 90 | 89,5.6,3.0,4.1,1.3,Iris-versicolor 91 | 90,5.5,2.5,4.0,1.3,Iris-versicolor 92 | 91,5.5,2.6,4.4,1.2,Iris-versicolor 93 | 92,6.1,3.0,4.6,1.4,Iris-versicolor 94 | 93,5.8,2.6,4.0,1.2,Iris-versicolor 95 | 94,5.0,2.3,3.3,1.0,Iris-versicolor 96 | 95,5.6,2.7,4.2,1.3,Iris-versicolor 97 | 96,5.7,3.0,4.2,1.2,Iris-versicolor 98 | 97,5.7,2.9,4.2,1.3,Iris-versicolor 99 | 98,6.2,2.9,4.3,1.3,Iris-versicolor 100 | 99,5.1,2.5,3.0,1.1,Iris-versicolor 101 | 100,5.7,2.8,4.1,1.3,Iris-versicolor 102 | 101,6.3,3.3,6.0,2.5,Iris-virginica 103 | 102,5.8,2.7,5.1,1.9,Iris-virginica 104 | 103,7.1,3.0,5.9,2.1,Iris-virginica 105 | 104,6.3,2.9,5.6,1.8,Iris-virginica 106 | 105,6.5,3.0,5.8,2.2,Iris-virginica 107 | 106,7.6,3.0,6.6,2.1,Iris-virginica 108 | 107,4.9,2.5,4.5,1.7,Iris-virginica 109 | 108,7.3,2.9,6.3,1.8,Iris-virginica 110 | 109,6.7,2.5,5.8,1.8,Iris-virginica 111 | 110,7.2,3.6,6.1,2.5,Iris-virginica 112 | 111,6.5,3.2,5.1,2.0,Iris-virginica 113 | 112,6.4,2.7,5.3,1.9,Iris-virginica 114 | 113,6.8,3.0,5.5,2.1,Iris-virginica 115 | 114,5.7,2.5,5.0,2.0,Iris-virginica 116 | 115,5.8,2.8,5.1,2.4,Iris-virginica 117 | 116,6.4,3.2,5.3,2.3,Iris-virginica 118 | 117,6.5,3.0,5.5,1.8,Iris-virginica 119 | 118,7.7,3.8,6.7,2.2,Iris-virginica 120 | 119,7.7,2.6,6.9,2.3,Iris-virginica 121 | 120,6.0,2.2,5.0,1.5,Iris-virginica 122 | 121,6.9,3.2,5.7,2.3,Iris-virginica 123 | 122,5.6,2.8,4.9,2.0,Iris-virginica 124 | 123,7.7,2.8,6.7,2.0,Iris-virginica 125 | 124,6.3,2.7,4.9,1.8,Iris-virginica 126 | 125,6.7,3.3,5.7,2.1,Iris-virginica 127 | 126,7.2,3.2,6.0,1.8,Iris-virginica 128 | 127,6.2,2.8,4.8,1.8,Iris-virginica 129 | 128,6.1,3.0,4.9,1.8,Iris-virginica 130 | 129,6.4,2.8,5.6,2.1,Iris-virginica 131 | 130,7.2,3.0,5.8,1.6,Iris-virginica 132 | 131,7.4,2.8,6.1,1.9,Iris-virginica 133 | 132,7.9,3.8,6.4,2.0,Iris-virginica 134 | 133,6.4,2.8,5.6,2.2,Iris-virginica 135 | 134,6.3,2.8,5.1,1.5,Iris-virginica 136 | 135,6.1,2.6,5.6,1.4,Iris-virginica 137 | 136,7.7,3.0,6.1,2.3,Iris-virginica 138 | 137,6.3,3.4,5.6,2.4,Iris-virginica 139 | 138,6.4,3.1,5.5,1.8,Iris-virginica 140 | 139,6.0,3.0,4.8,1.8,Iris-virginica 141 | 140,6.9,3.1,5.4,2.1,Iris-virginica 142 | 141,6.7,3.1,5.6,2.4,Iris-virginica 143 | 142,6.9,3.1,5.1,2.3,Iris-virginica 144 | 143,5.8,2.7,5.1,1.9,Iris-virginica 145 | 144,6.8,3.2,5.9,2.3,Iris-virginica 146 | 145,6.7,3.3,5.7,2.5,Iris-virginica 147 | 146,6.7,3.0,5.2,2.3,Iris-virginica 148 | 147,6.3,2.5,5.0,1.9,Iris-virginica 149 | 148,6.5,3.0,5.2,2.0,Iris-virginica 150 | 149,6.2,3.4,5.4,2.3,Iris-virginica 151 | 150,5.9,3.0,5.1,1.8,Iris-virginica 152 | -------------------------------------------------------------------------------- /example/DeepFM.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | import tinyframework as tf 4 | from sklearn.preprocessing import LabelEncoder, OneHotEncoder 5 | 6 | # read data. remove useless columns 7 | # |PassengerId |Survived | Pclass |Name |Sex |Age |SibSp |Parch |Ticket |Fare |Cabin |Embarked 8 | data = pd.read_csv("../data/titanic.csv").drop(["PassengerId", "Name", "Ticket", "Cabin"], axis=1) 9 | 10 | le = LabelEncoder() 11 | ohe = OneHotEncoder(sparse=False) 12 | 13 | Pclass = ohe.fit_transform(le.fit_transform(data["Pclass"].fillna(0)).reshape(-1, 1)) # 1列 14 | Sex = ohe.fit_transform(le.fit_transform(data["Sex"].fillna("")).reshape(-1, 1)) 15 | Embarked = ohe.fit_transform(le.fit_transform(data["Embarked"].fillna("")).reshape(-1, 1)) 16 | 17 | # concat columns 18 | features = np.concatenate([Pclass, 19 | Sex, 20 | data[["Age"]].fillna(0), 21 | data[["SibSp"]].fillna(0), 22 | data[["Parch"]].fillna(0), 23 | data[["Fare"]].fillna(0), 24 | Embarked], axis=1) 25 | 26 | labels = data["Survived"].values*2-1 27 | feat_dim = features.shape[1] 28 | 29 | emb_size = 2 30 | # 1 order 31 | x = tf.core.Variable(dim=(feat_dim, 1), init=False, trainable=False) 32 | w = tf.core.Variable(dim=(1, feat_dim), init=True, trainable=True) 33 | 34 | x_Pclass = tf.core.Variable(dim=(Pclass.shape[1], 1), init=False, trainable=False) 35 | x_Sex = tf.core.Variable(dim=(Sex.shape[1], 1), init=False, trainable=False) 36 | x_Embarked = tf.core.Variable(dim=(Embarked.shape[1], 1), init=False, trainable=False) 37 | 38 | emb_weight_Pclass = tf.core.Variable(dim=(emb_size, Pclass.shape[1]), init=True, trainable=True) 39 | emb_weight_Sex = tf.core.Variable(dim=(emb_size, Sex.shape[1]), init=True, trainable=True) 40 | emb_weight_Embark = tf.core.Variable(dim=(emb_size, Embarked.shape[1]), init=True, trainable=True) 41 | 42 | emb_Pclass = tf.ops.MatMul(emb_weight_Pclass, x_Pclass) # emb_size*1 43 | emb_Sex = tf.ops.MatMul(emb_weight_Sex, x_Sex) # emb_size*1 44 | emb_Embarked = tf.ops.MatMul(emb_weight_Embark, x_Embarked) # emb_size*1 45 | 46 | bias = tf.core.Variable(dim=(1, 1), init=True, trainable=True) 47 | 48 | emb_feat = tf.ops.Concat(emb_Pclass, emb_Sex, emb_Embarked) # emb_size*3 49 | 50 | # FM part 51 | fm = tf.ops.Add(tf.ops.MatMul(w, x), # 一次部分 52 | # 二次部分 53 | tf.ops.MatMul(tf.ops.Reshape(emb_feat, shape=(1, 3 * emb_size)), emb_feat) 54 | ) 55 | 56 | # deep part 57 | hidden_1 = tf.layer.fc(emb_feat, 3*emb_size, 8, "Relu") 58 | hidden_2 = tf.layer.fc(hidden_1, 8, 4, "Relu") 59 | deep = tf.layer.fc(hidden_2, 4, 1, None) 60 | 61 | output = tf.ops.Add(fm, deep, bias) 62 | predict = tf.ops.Logistic(output) 63 | label = tf.core.Variable(dim=(1, 1), init=False, trainable=False) 64 | 65 | loss = tf.ops.loss.LogLoss(tf.ops.Multiply(label, output)) 66 | learning_rate = 0.005 67 | optimizer = tf.optimizer.Adam(tf.default_graph, loss, learning_rate) 68 | 69 | batch_size = 64 70 | for epoch in range(200): 71 | batch_cnt = 0 72 | for i in range(len(features)): 73 | x.set_value(np.mat(features[i]).T) 74 | x_Pclass.set_value(np.mat(features[i, :3]).T) 75 | x_Sex.set_value(np.mat(features[i, 3:5]).T) 76 | x_Embarked.set_value(np.mat(features[i, 9:]).T) 77 | label.set_value(np.mat(labels[i])) 78 | 79 | optimizer.one_step() 80 | batch_cnt += 1 81 | 82 | if batch_cnt > batch_size: 83 | optimizer.update() 84 | batch_cnt = 0 85 | 86 | pred = [] 87 | for i in range(len(features)): 88 | x.set_value(np.mat(features[i]).T) 89 | x_Pclass.set_value(np.mat(features[i, :3]).T) 90 | x_Sex.set_value(np.mat(features[i, 3:5]).T) 91 | x_Embarked.set_value(np.mat(features[i, 9:]).T) 92 | 93 | predict.forward() 94 | pred.append(predict.value[0, 0]) 95 | 96 | pred = (np.array(pred) > 0.5).astype(np.int16)*2-1 97 | accuracy = (labels == pred).astype(np.int16).sum() / len(features) 98 | 99 | print("epoch:{:d}, acc:{:.3f}".format(epoch+1, accuracy)) 100 | -------------------------------------------------------------------------------- /example/load_nn_iris.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | 3 | from tinyframework.trainer import saver 4 | import tinyframework as tf 5 | import numpy as np 6 | 7 | saver = saver.Saver('./mode_save') 8 | saver.load(model_file_name='nn_iris.json', weights_file_name='nn_iris.npz') 9 | 10 | x = tf.get_node_from_graph('Variable:0') 11 | pred = tf.get_node_from_graph('SoftMax:16') 12 | 13 | # 6.2 3.4 5.4 2.3 14 | x.set_value(np.mat([7, 3.2, 4.7, 1.4]).T) 15 | pred.forward() 16 | print(pred.value) 17 | print("predict class: {}".format(np.argmax(pred.value)+1)) -------------------------------------------------------------------------------- /example/logistic_regression_multiclass.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | 3 | import numpy as np 4 | import pandas as pd 5 | import tinyframework as tf 6 | from sklearn.preprocessing import LabelEncoder, OneHotEncoder 7 | 8 | data = pd.read_csv('../data/Iris.csv').drop('Id', axis=1) 9 | # shuffle 10 | data = data.sample(len(data), replace=False) 11 | le = LabelEncoder() 12 | num_label = le.fit_transform(data['Species']) 13 | 14 | oh = OneHotEncoder(sparse=False) 15 | one_hot_label = oh.fit_transform(num_label.reshape(-1, 1)) 16 | 17 | # build feature 18 | features = data[['SepalLengthCm', 19 | 'SepalWidthCm', 20 | 'PetalLengthCm', 21 | 'PetalWidthCm']].values 22 | 23 | x = tf.Variable(dim=(4, 1), init=False, trainable=False) 24 | one_hot = tf.Variable(dim=(3, 1), init=False, trainable=False) 25 | w = tf.Variable(dim=(3, 4), init=True, trainable=True) 26 | b = tf.Variable(dim=(3, 1), init=True, trainable=True) 27 | 28 | linear = tf.ops.Add(tf.ops.MatMul(w, x), b) 29 | predict = tf.ops.SoftMax(linear) 30 | 31 | loss = tf.ops.loss.CrossEntropy(linear, one_hot) 32 | learning_rate = 0.02 33 | 34 | optimizer = tf.optimizer.GradientDescent(tf.default_graph, loss, learning_rate) 35 | batch_size = 32 36 | 37 | for epoch in range(200): 38 | batch_cnt = 0 39 | for i in range(len(features)): 40 | feat = np.mat(features[i, :]).T 41 | label = np.mat(one_hot_label[i, :]).T 42 | 43 | x.set_value(feat) 44 | one_hot.set_value(label) 45 | 46 | optimizer.one_step() 47 | batch_cnt +=1 48 | 49 | if batch_cnt >= batch_size: 50 | optimizer.update() 51 | batch_cnt = 0 52 | 53 | pred = [] 54 | for i in range(len(features)): 55 | feat = np.mat(features[i, :]).T 56 | x.set_value(feat) 57 | predict.forward() 58 | pred.append(predict.value.A.ravel()) 59 | 60 | pred = np.array(pred).argmax(axis=1) 61 | 62 | accuracy = (num_label == pred).astype(np.int32).sum() / len(features) 63 | 64 | print("epoch:{:d}, accuracy:{:.3f}".format(epoch + 1, accuracy)) 65 | -------------------------------------------------------------------------------- /example/lr_quadratic.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tinyframework as tf 3 | from sklearn.datasets import make_circles 4 | 5 | # Constructing concentric circle data 6 | X, y = make_circles(200, noise=0.1, factor=0.2) 7 | y = y*2-1 # *2-1 8 | use_quadratic = True 9 | 10 | x1 = tf.Variable(dim=(2, 1), init=False, trainable=False) 11 | label = tf.Variable(dim=(1, 1), init=False, trainable=False) 12 | b = tf.Variable(dim=(1, 1), init=True, trainable=True) 13 | 14 | if use_quadratic: 15 | # trans self and matmul then reshape 16 | x2 = tf.ops.Reshape(tf.ops.MatMul(x1, tf.ops.Reshape(x1, shape=(1, 2))), shape=(4, 1)) 17 | # concat 1 order and 2 order 18 | x = tf.ops.Concat(x1, x2) # this ops first flatten 2 params by line, then concat 19 | w = tf.Variable(dim=(1, 6), init=True, trainable=True) 20 | else: 21 | x = x1 22 | w = tf.Variable(dim=(1, 2), init=True, trainable=True) 23 | 24 | out = tf.ops.Add(tf.ops.MatMul(w, x), b) 25 | predict = tf.ops.Logistic(out) 26 | 27 | loss = tf.ops.loss.LogLoss(tf.ops.MatMul(label, out)) 28 | 29 | learning_rate = 0.01 30 | 31 | optimizer = tf.optimizer.Adam(tf.default_graph, loss, learning_rate) 32 | 33 | batch_size = 50 34 | 35 | for epoch in range(200): 36 | batch_no = 0 37 | for i in range(len(X)): 38 | x1.set_value(np.mat(X[i]).T) 39 | label.set_value(np.mat(y[i])) 40 | 41 | optimizer.one_step() 42 | batch_no += 1 43 | 44 | if batch_no >= batch_size: 45 | optimizer.update() 46 | batch_no = 0 47 | 48 | pred = [] 49 | for i in range(len(X)): 50 | x1.set_value(np.mat(X[i]).T) 51 | label.set_value(np.mat(y[i])) 52 | 53 | predict.forward() 54 | 55 | pred.append(predict.value[0, 0]) 56 | 57 | pred = (np.array(pred) > 0.5).astype(np.int32)*2-1 58 | 59 | accuracy = (y == pred).astype(np.int16).sum() / len(X) 60 | 61 | print("epoch:{:d}, accuracy:{:3f}".format(epoch, accuracy)) 62 | 63 | 64 | -------------------------------------------------------------------------------- /example/nn_iris.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | 3 | import numpy as np 4 | import pandas as pd 5 | import tinyframework as tf 6 | from sklearn.preprocessing import LabelEncoder, OneHotEncoder 7 | from tinyframework.utils import draw 8 | from tinyframework.trainer import saver 9 | from tinyframework import get_node_from_graph 10 | 11 | 12 | data = pd.read_csv('../data/Iris.csv').drop('Id', axis=1) 13 | # shuffle 14 | data = data.sample(len(data), replace=False) 15 | 16 | le = LabelEncoder() 17 | num_label = le.fit_transform(data['Species']) 18 | 19 | oh = OneHotEncoder(sparse=False) 20 | one_hot_label = oh.fit_transform(num_label.reshape(-1, 1)) 21 | 22 | # build feature 23 | features = data[['SepalLengthCm', 24 | 'SepalWidthCm', 25 | 'PetalLengthCm', 26 | 'PetalWidthCm']].values 27 | 28 | x = tf.Variable(dim=(4, 1), init=False, trainable=False) 29 | one_hot = tf.Variable(dim=(3, 1), init=False, trainable=False) 30 | 31 | hidden1 = tf.layer.fc(x, 4, 10, "ReLU") 32 | hidden2 = tf.layer.fc(hidden1, 10, 10, "ReLU") 33 | out = tf.layer.fc(hidden2, 10, 3, None) 34 | 35 | predict = tf.ops.SoftMax(out) 36 | 37 | loss = tf.ops.loss.CrossEntropy(out, one_hot) 38 | learning_rate = 0.005 39 | 40 | optimizer = tf.optimizer.Adam(tf.default_graph, loss, learning_rate) 41 | batch_size = 32 42 | 43 | for epoch in range(200): 44 | batch_cnt = 0 45 | for i in range(len(features)): 46 | feat = np.mat(features[i, :]).T 47 | label = np.mat(one_hot_label[i, :]).T 48 | 49 | x.set_value(feat) 50 | one_hot.set_value(label) 51 | 52 | optimizer.one_step() 53 | batch_cnt += 1 54 | 55 | if batch_cnt >= batch_size: 56 | optimizer.update() 57 | batch_cnt = 0 58 | 59 | pred = [] 60 | for i in range(len(features)): 61 | feat = np.mat(features[i, :]).T 62 | x.set_value(feat) 63 | predict.forward() 64 | pred.append(predict.value.A.ravel()) 65 | 66 | pred = np.array(pred).argmax(axis=1) 67 | 68 | accuracy = (num_label == pred).astype(np.int32).sum() / len(features) 69 | 70 | print("epoch:{:d}, accuracy:{:.3f}".format(epoch + 1, accuracy)) 71 | 72 | 73 | draw.draw_graph(filename='nn_iris') 74 | saver = saver.Saver('./mode_save') 75 | saver.save(model_file_name='nn_iris.json', weights_file_name='nn_iris.npz') 76 | 77 | -------------------------------------------------------------------------------- /example/wide_deep_titanic.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | from sklearn.preprocessing import LabelEncoder, OneHotEncoder 4 | import tinyframework as tf 5 | 6 | # read data. remove useless columns 7 | # |PassengerId |Survived | Pclass |Name |Sex |Age |SibSp |Parch |Ticket |Fare |Cabin |Embarked 8 | data = pd.read_csv('../data/titanic.csv').drop(["PassengerId", "Name", "Ticket", "Cabin"], axis=1) 9 | 10 | le = LabelEncoder() 11 | ohe = OneHotEncoder(sparse=False) 12 | 13 | Pclass = ohe.fit_transform(le.fit_transform(data["Pclass"].fillna(0)).reshape(-1, 1)) # C3 14 | Sex = ohe.fit_transform(le.fit_transform(data["Sex"].fillna("")).reshape(-1, 1)) # C2 15 | Embarked = ohe.fit_transform(le.fit_transform(data["Embarked"].fillna("")).reshape(-1, 1)) # C4 16 | 17 | # construct features 18 | features = np.concatenate([Pclass, 19 | Sex, 20 | data[["Age"]].fillna(0), 21 | data[["SibSp"]].fillna(0), 22 | data[["Parch"]].fillna(0), 23 | data[["Fare"]].fillna(0), 24 | Embarked], axis=1) 25 | 26 | labels = data["Survived"].values*2-1 27 | 28 | feat_dim = features.shape[1] 29 | emb_size = 2 # orig=3 30 | # 1 order feat 31 | x = tf.core.Variable(dim=(feat_dim, 1), init=False, trainable=False) 32 | # 1 order weight 33 | w = tf.core.Variable(dim=(1, feat_dim), init=True, trainable=True) 34 | 35 | x_Pclass = tf.core.Variable(dim=(Pclass.shape[1], 1), init=False, trainable=False) 36 | x_Sex = tf.core.Variable(dim=(Sex.shape[1], 1), init=False, trainable=False) 37 | x_Embarked = tf.core.Variable(dim=(Embarked.shape[1], 1), init=False, trainable=False) 38 | 39 | emb_weight_Pclass = tf.core.Variable(dim=(emb_size, Pclass.shape[1]), init=True, trainable=True) 40 | emb_weight_Sex = tf.core.Variable(dim=(emb_size, Sex.shape[1]), init=True, trainable=True) 41 | emb_weight_Embarked = tf.core.Variable(dim=(emb_size, Embarked.shape[1]), init=True, trainable=True) 42 | 43 | # wide part 44 | wide = tf.ops.MatMul(w, x) 45 | 46 | # deep part 47 | emb_Pclass = tf.ops.MatMul(emb_weight_Pclass, x_Pclass) 48 | emb_Sex = tf.ops.MatMul(emb_weight_Sex, x_Sex) 49 | emb_Embarked = tf.ops.MatMul(emb_weight_Embarked, x_Embarked) 50 | emb_features = tf.ops.Concat(emb_Pclass, emb_Sex, emb_Embarked) 51 | 52 | hidden_1 = tf.layer.fc(emb_features, 3*emb_size, 8, "Relu") 53 | hidden_2 = tf.layer.fc(hidden_1, 8, 4, "Relu") 54 | deep = tf.layer.fc(hidden_2, 4, 1, None) 55 | 56 | bias = tf.core.Variable(dim=(1, 1), init=True, trainable=True) 57 | output = tf.ops.Add(wide, deep, bias) 58 | predict = tf.ops.Logistic(output) 59 | 60 | label = tf.core.Variable(dim=(1, 1), init=False, trainable=False) 61 | 62 | loss = tf.ops.loss.LogLoss(tf.ops.Multiply(label, output)) 63 | learning_rate = 0.05 64 | optimizer = tf.optimizer.Adam(tf.default_graph, loss, learning_rate) 65 | 66 | batch_size = 64 67 | for epoch in range(200): 68 | batch_cnt = 0 69 | for i in range(len(features)): 70 | x.set_value(np.mat(features[i]).T) 71 | x_Pclass.set_value(np.mat(features[i, :3]).T) 72 | x_Sex.set_value(np.mat(features[i, 3:5]).T) 73 | x_Embarked.set_value(np.mat(features[i, 9:]).T) 74 | label.set_value(np.mat(labels[i])) 75 | 76 | optimizer.one_step() 77 | batch_cnt += 1 78 | 79 | if batch_cnt > batch_size: 80 | optimizer.update() 81 | batch_cnt = 0 82 | 83 | pred = [] 84 | for i in range(len(features)): 85 | x.set_value(np.mat(features[i]).T) 86 | x_Pclass.set_value(np.mat(features[i, :3]).T) 87 | x_Sex.set_value(np.mat(features[i, 3:5]).T) 88 | x_Embarked.set_value(np.mat(features[i, 9:]).T) 89 | 90 | predict.forward() 91 | pred.append(predict.value[0, 0]) 92 | 93 | pred = (np.array(pred) > 0.5).astype(np.int16)*2-1 94 | accuracy = (labels == pred).astype(np.int16).sum() / len(features) 95 | 96 | print("epoch:{:d}, acc:{:.3f}".format(epoch+1, accuracy)) -------------------------------------------------------------------------------- /tf_serving/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | from . import tf_serving 3 | from . import serving 4 | from . import exporter -------------------------------------------------------------------------------- /tf_serving/exporter/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | from . import exporter -------------------------------------------------------------------------------- /tf_serving/exporter/exporter.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | 3 | import tinyframework as tf 4 | 5 | class Export(object): 6 | """ 7 | model severing export 8 | """ 9 | def __init__(self, graph=None): 10 | self.graph = tf.default_graph if graph is not None else graph 11 | 12 | def signature(self, input_name, output_name): 13 | """ 14 | return model serving interface signature 15 | """ 16 | input_var = tf.get_node_from_graph(input_name, graph=self.graph) 17 | assert input_var is not None 18 | output_var = tf.get_node_from_graph(output_name, graph=self.graph) 19 | assert output_var is not None 20 | 21 | input_signature = dict() 22 | input_signature['name'] = input_var.name 23 | output_signature = dict() 24 | output_signature['name'] = output_var.name 25 | 26 | return { 27 | 'inputs': input_signature, 28 | 'outputs': output_signature 29 | } -------------------------------------------------------------------------------- /tf_serving/serving/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | from . import serving 3 | from proto import * -------------------------------------------------------------------------------- /tf_serving/serving/proto/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | from serving_pb2 import * 3 | from serving_pb2_grpc import * -------------------------------------------------------------------------------- /tf_serving/serving/proto/serving.proto: -------------------------------------------------------------------------------- 1 | 2 | syntax = "proto3"; 3 | 4 | package tinyframework.serving; 5 | 6 | service TinyFrameworkServing{ 7 | rpc Predict(PredictRequest) returns (PredictResponse) {} 8 | } 9 | 10 | message Matrix{ 11 | repeated float value = 1; 12 | repeated int32 dim = 2; 13 | } 14 | 15 | message PredictRequest{ 16 | repeated Matrix data = 1; 17 | } 18 | 19 | message PredictResponse{ 20 | repeated Matrix data = 1; 21 | } 22 | 23 | // python -m grpc_tools.protoc --python_out=. --grpc_python_out=. -I. *.proto -------------------------------------------------------------------------------- /tf_serving/serving/proto/serving_pb2.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Generated by the protocol buffer compiler. DO NOT EDIT! 3 | # source: serving.proto 4 | """Generated protocol buffer code.""" 5 | from google.protobuf import descriptor as _descriptor 6 | from google.protobuf import message as _message 7 | from google.protobuf import reflection as _reflection 8 | from google.protobuf import symbol_database as _symbol_database 9 | # @@protoc_insertion_point(imports) 10 | 11 | _sym_db = _symbol_database.Default() 12 | 13 | 14 | 15 | 16 | DESCRIPTOR = _descriptor.FileDescriptor( 17 | name='serving.proto', 18 | package='tinyframework.serving', 19 | syntax='proto3', 20 | serialized_options=None, 21 | create_key=_descriptor._internal_create_key, 22 | serialized_pb=b'\n\rserving.proto\x12\x15tinyframework.serving\"$\n\x06Matrix\x12\r\n\x05value\x18\x01 \x03(\x02\x12\x0b\n\x03\x64im\x18\x02 \x03(\x05\"=\n\x0ePredictRequest\x12+\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32\x1d.tinyframework.serving.Matrix\">\n\x0fPredictResponse\x12+\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32\x1d.tinyframework.serving.Matrix2r\n\x14TinyFrameworkServing\x12Z\n\x07Predict\x12%.tinyframework.serving.PredictRequest\x1a&.tinyframework.serving.PredictResponse\"\x00\x62\x06proto3' 23 | ) 24 | 25 | 26 | 27 | 28 | _MATRIX = _descriptor.Descriptor( 29 | name='Matrix', 30 | full_name='tinyframework.serving.Matrix', 31 | filename=None, 32 | file=DESCRIPTOR, 33 | containing_type=None, 34 | create_key=_descriptor._internal_create_key, 35 | fields=[ 36 | _descriptor.FieldDescriptor( 37 | name='value', full_name='tinyframework.serving.Matrix.value', index=0, 38 | number=1, type=2, cpp_type=6, label=3, 39 | has_default_value=False, default_value=[], 40 | message_type=None, enum_type=None, containing_type=None, 41 | is_extension=False, extension_scope=None, 42 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), 43 | _descriptor.FieldDescriptor( 44 | name='dim', full_name='tinyframework.serving.Matrix.dim', index=1, 45 | number=2, type=5, cpp_type=1, label=3, 46 | has_default_value=False, default_value=[], 47 | message_type=None, enum_type=None, containing_type=None, 48 | is_extension=False, extension_scope=None, 49 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), 50 | ], 51 | extensions=[ 52 | ], 53 | nested_types=[], 54 | enum_types=[ 55 | ], 56 | serialized_options=None, 57 | is_extendable=False, 58 | syntax='proto3', 59 | extension_ranges=[], 60 | oneofs=[ 61 | ], 62 | serialized_start=40, 63 | serialized_end=76, 64 | ) 65 | 66 | 67 | _PREDICTREQUEST = _descriptor.Descriptor( 68 | name='PredictRequest', 69 | full_name='tinyframework.serving.PredictRequest', 70 | filename=None, 71 | file=DESCRIPTOR, 72 | containing_type=None, 73 | create_key=_descriptor._internal_create_key, 74 | fields=[ 75 | _descriptor.FieldDescriptor( 76 | name='data', full_name='tinyframework.serving.PredictRequest.data', index=0, 77 | number=1, type=11, cpp_type=10, label=3, 78 | has_default_value=False, default_value=[], 79 | message_type=None, enum_type=None, containing_type=None, 80 | is_extension=False, extension_scope=None, 81 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), 82 | ], 83 | extensions=[ 84 | ], 85 | nested_types=[], 86 | enum_types=[ 87 | ], 88 | serialized_options=None, 89 | is_extendable=False, 90 | syntax='proto3', 91 | extension_ranges=[], 92 | oneofs=[ 93 | ], 94 | serialized_start=78, 95 | serialized_end=139, 96 | ) 97 | 98 | 99 | _PREDICTRESPONSE = _descriptor.Descriptor( 100 | name='PredictResponse', 101 | full_name='tinyframework.serving.PredictResponse', 102 | filename=None, 103 | file=DESCRIPTOR, 104 | containing_type=None, 105 | create_key=_descriptor._internal_create_key, 106 | fields=[ 107 | _descriptor.FieldDescriptor( 108 | name='data', full_name='tinyframework.serving.PredictResponse.data', index=0, 109 | number=1, type=11, cpp_type=10, label=3, 110 | has_default_value=False, default_value=[], 111 | message_type=None, enum_type=None, containing_type=None, 112 | is_extension=False, extension_scope=None, 113 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), 114 | ], 115 | extensions=[ 116 | ], 117 | nested_types=[], 118 | enum_types=[ 119 | ], 120 | serialized_options=None, 121 | is_extendable=False, 122 | syntax='proto3', 123 | extension_ranges=[], 124 | oneofs=[ 125 | ], 126 | serialized_start=141, 127 | serialized_end=203, 128 | ) 129 | 130 | _PREDICTREQUEST.fields_by_name['data'].message_type = _MATRIX 131 | _PREDICTRESPONSE.fields_by_name['data'].message_type = _MATRIX 132 | DESCRIPTOR.message_types_by_name['Matrix'] = _MATRIX 133 | DESCRIPTOR.message_types_by_name['PredictRequest'] = _PREDICTREQUEST 134 | DESCRIPTOR.message_types_by_name['PredictResponse'] = _PREDICTRESPONSE 135 | _sym_db.RegisterFileDescriptor(DESCRIPTOR) 136 | 137 | Matrix = _reflection.GeneratedProtocolMessageType('Matrix', (_message.Message,), { 138 | 'DESCRIPTOR' : _MATRIX, 139 | '__module__' : 'serving_pb2' 140 | # @@protoc_insertion_point(class_scope:tinyframework.serving.Matrix) 141 | }) 142 | _sym_db.RegisterMessage(Matrix) 143 | 144 | PredictRequest = _reflection.GeneratedProtocolMessageType('PredictRequest', (_message.Message,), { 145 | 'DESCRIPTOR' : _PREDICTREQUEST, 146 | '__module__' : 'serving_pb2' 147 | # @@protoc_insertion_point(class_scope:tinyframework.serving.PredictRequest) 148 | }) 149 | _sym_db.RegisterMessage(PredictRequest) 150 | 151 | PredictResponse = _reflection.GeneratedProtocolMessageType('PredictResponse', (_message.Message,), { 152 | 'DESCRIPTOR' : _PREDICTRESPONSE, 153 | '__module__' : 'serving_pb2' 154 | # @@protoc_insertion_point(class_scope:tinyframework.serving.PredictResponse) 155 | }) 156 | _sym_db.RegisterMessage(PredictResponse) 157 | 158 | 159 | 160 | _TINYFRAMEWORKSERVING = _descriptor.ServiceDescriptor( 161 | name='TinyFrameworkServing', 162 | full_name='tinyframework.serving.TinyFrameworkServing', 163 | file=DESCRIPTOR, 164 | index=0, 165 | serialized_options=None, 166 | create_key=_descriptor._internal_create_key, 167 | serialized_start=205, 168 | serialized_end=319, 169 | methods=[ 170 | _descriptor.MethodDescriptor( 171 | name='Predict', 172 | full_name='tinyframework.serving.TinyFrameworkServing.Predict', 173 | index=0, 174 | containing_service=None, 175 | input_type=_PREDICTREQUEST, 176 | output_type=_PREDICTRESPONSE, 177 | serialized_options=None, 178 | create_key=_descriptor._internal_create_key, 179 | ), 180 | ]) 181 | _sym_db.RegisterServiceDescriptor(_TINYFRAMEWORKSERVING) 182 | 183 | DESCRIPTOR.services_by_name['TinyFrameworkServing'] = _TINYFRAMEWORKSERVING 184 | 185 | # @@protoc_insertion_point(module_scope) 186 | -------------------------------------------------------------------------------- /tf_serving/serving/proto/serving_pb2_grpc.py: -------------------------------------------------------------------------------- 1 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! 2 | """Client and server classes corresponding to protobuf-defined services.""" 3 | import grpc 4 | 5 | import serving_pb2 as serving__pb2 6 | 7 | 8 | class TinyFrameworkServingStub(object): 9 | """Missing associated documentation comment in .proto file.""" 10 | 11 | def __init__(self, channel): 12 | """Constructor. 13 | 14 | Args: 15 | channel: A grpc.Channel. 16 | """ 17 | self.Predict = channel.unary_unary( 18 | '/tinyframework.serving.TinyFrameworkServing/Predict', 19 | request_serializer=serving__pb2.PredictRequest.SerializeToString, 20 | response_deserializer=serving__pb2.PredictResponse.FromString, 21 | ) 22 | 23 | 24 | class TinyFrameworkServingServicer(object): 25 | """Missing associated documentation comment in .proto file.""" 26 | 27 | def Predict(self, request, context): 28 | """Missing associated documentation comment in .proto file.""" 29 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 30 | context.set_details('Method not implemented!') 31 | raise NotImplementedError('Method not implemented!') 32 | 33 | 34 | def add_TinyFrameworkServingServicer_to_server(servicer, server): 35 | rpc_method_handlers = { 36 | 'Predict': grpc.unary_unary_rpc_method_handler( 37 | servicer.Predict, 38 | request_deserializer=serving__pb2.PredictRequest.FromString, 39 | response_serializer=serving__pb2.PredictResponse.SerializeToString, 40 | ), 41 | } 42 | generic_handler = grpc.method_handlers_generic_handler( 43 | 'tinyframework.serving.TinyFrameworkServing', rpc_method_handlers) 44 | server.add_generic_rpc_handlers((generic_handler,)) 45 | 46 | 47 | # This class is part of an EXPERIMENTAL API. 48 | class TinyFrameworkServing(object): 49 | """Missing associated documentation comment in .proto file.""" 50 | 51 | @staticmethod 52 | def Predict(request, 53 | target, 54 | options=(), 55 | channel_credentials=None, 56 | call_credentials=None, 57 | insecure=False, 58 | compression=None, 59 | wait_for_ready=None, 60 | timeout=None, 61 | metadata=None): 62 | return grpc.experimental.unary_unary(request, target, '/tinyframework.serving.TinyFrameworkServing/Predict', 63 | serving__pb2.PredictRequest.SerializeToString, 64 | serving__pb2.PredictResponse.FromString, 65 | options, channel_credentials, 66 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata) 67 | -------------------------------------------------------------------------------- /tf_serving/serving/serving.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | 3 | import threading 4 | import time 5 | import numpy as np 6 | from concurrent.futures import ThreadPoolExecutor 7 | import grpc 8 | import tinyframework as tf 9 | from .proto import serving_pb2, serving_pb2_grpc 10 | 11 | 12 | class TinyFrameworkServingService(serving_pb2_grpc.TinyFrameworkServingServicer): 13 | """ 14 | inference service, steps as below: 15 | 1. get input/output nodes in graph according to the interface signature defined in the model file 16 | 2. receive request an deserialize model input 17 | 3. call calculate graph to inference 18 | 4. get output node value and return to interface caller 19 | """ 20 | 21 | def __init__(self, root_dir, model_file_name, weights_file_name): 22 | self.root_dir = root_dir 23 | self.model_file_name = model_file_name 24 | self.weights_file_name = weights_file_name 25 | 26 | saver = tf.trainer.Saver(self.root_dir) 27 | 28 | # load from file and deserialize graph struct and weights, get serving interface signature 29 | _, service = saver.load(model_file_name=self.model_file_name, weights_file_name=self.weights_file_name) 30 | assert service is not None 31 | 32 | inputs = service.get('inputs', None) # get input nodes name 33 | assert inputs is not None 34 | 35 | outputs = service.get('outputs', None) 36 | assert outputs is not None 37 | 38 | # get inputs/outputs node from graph according service signature 39 | self.input_node = tf.get_node_from_graph(inputs['name']) 40 | assert self.input_node is not None 41 | assert isinstance(self.input_node, tf.Variable) 42 | 43 | self.input_dim = self.input_node.dim 44 | 45 | self.output_node = tf.get_node_from_graph(outputs['name']) 46 | assert self.output_node is not None 47 | 48 | @staticmethod 49 | def deserialize(predict_request): 50 | infer_req_mat_list = [] 51 | for proto_mat in predict_request.date: 52 | dim = tuple(proto_mat.dim) 53 | mat = np.mat(proto_mat.value, dtype=np.float32) 54 | mat = np.reshape(mat, dim) 55 | infer_req_mat_list.append(mat) 56 | 57 | return infer_req_mat_list 58 | 59 | def _inference(self, inference_request): 60 | inference_resq_mat_list = [] 61 | for mat in inference_request: 62 | self.input_node.set_value(mat.T) 63 | self.output_node.forward() 64 | inference_resq_mat_list.append(self.output_node.value) 65 | 66 | return inference_resq_mat_list 67 | 68 | @staticmethod 69 | def serialize(inference_response): 70 | response = serving_pb2.PredictResponse() 71 | for mat in inference_response: 72 | proto_mat = response.data.add() 73 | proto_mat.value.extend(np.array(mat).flatten()) 74 | proto_mat.dim.extent(list(mat.shape)) 75 | 76 | return response 77 | 78 | def Predict(self, predict_request, context): 79 | # deserialize protobuf data into np.Mat 80 | inference_request = TinyFrameworkServingService.deserialize(predict_request) 81 | # call graph to execute output nodes forward() 82 | inference_response = self._inference(inference_request) 83 | # serialize the inference result to protobuf format 84 | predict_response = TinyFrameworkServingService.serialize(inference_response) 85 | 86 | return predict_response 87 | 88 | 89 | class TinyFrameworkServer(object): 90 | """ 91 | 92 | """ 93 | def __init__(self, host, root_dir, model_file_name, weights_file_name, max_workers=10): 94 | self.host = host 95 | self.max_workers = max_workers 96 | self.server = grpc.server(ThreadPoolExecutor(max_workers=self.max_workers)) 97 | 98 | serving_pb2_grpc.add_TinyFrameworkServingServicer_to_server( 99 | TinyFrameworkServingService(root_dir, model_file_name, weights_file_name), self.server) 100 | 101 | self.server.add_insecure_port(self.host) 102 | 103 | def serve(self): 104 | # start rpc serving 105 | self.server.start() 106 | print("TinyFramework server running on {}".format(self.host)) 107 | 108 | try: 109 | while True: 110 | time.sleep(3600 * 24) 111 | except KeyboardInterrupt: 112 | self.server.stop(0) -------------------------------------------------------------------------------- /tf_serving/tf_serving.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- -------------------------------------------------------------------------------- /tinyframework/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | 3 | from .tinyframework import * -------------------------------------------------------------------------------- /tinyframework/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Harrison-Ho/tinyframework/3a8f79dc6d34510b616bc3e20688e4078e6a5702/tinyframework/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /tinyframework/__pycache__/tinyframework.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Harrison-Ho/tinyframework/3a8f79dc6d34510b616bc3e20688e4078e6a5702/tinyframework/__pycache__/tinyframework.cpython-37.pyc -------------------------------------------------------------------------------- /tinyframework/core/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from .graph import * 3 | from .node import * 4 | from .core import * -------------------------------------------------------------------------------- /tinyframework/core/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Harrison-Ho/tinyframework/3a8f79dc6d34510b616bc3e20688e4078e6a5702/tinyframework/core/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /tinyframework/core/__pycache__/core.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Harrison-Ho/tinyframework/3a8f79dc6d34510b616bc3e20688e4078e6a5702/tinyframework/core/__pycache__/core.cpython-37.pyc -------------------------------------------------------------------------------- /tinyframework/core/__pycache__/graph.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Harrison-Ho/tinyframework/3a8f79dc6d34510b616bc3e20688e4078e6a5702/tinyframework/core/__pycache__/graph.cpython-37.pyc -------------------------------------------------------------------------------- /tinyframework/core/__pycache__/node.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Harrison-Ho/tinyframework/3a8f79dc6d34510b616bc3e20688e4078e6a5702/tinyframework/core/__pycache__/node.cpython-37.pyc -------------------------------------------------------------------------------- /tinyframework/core/core.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from .node import Variable 4 | from .graph import default_graph 5 | 6 | 7 | def get_node_from_graph(node_name, name_scope=None, graph=None): 8 | if graph is None: 9 | graph = default_graph 10 | if name_scope: 11 | node_name = name_scope + '/' + node_name 12 | for node in graph.nodes: 13 | if node.name == node_name: 14 | return node 15 | return None 16 | 17 | 18 | def get_trainable_variables_from_graph(node_name=None, name_scope=None, graph=None): 19 | if graph is None: 20 | graph = default_graph 21 | if node_name is None: 22 | return [node for node in default_graph.nodes if isinstance(node, Variable) and node.trainable] 23 | if name_scope: 24 | node_name = name_scope + '/' + node_name 25 | return get_node_from_graph(node_name, graph=graph) 26 | 27 | 28 | def update_node_value_in_graph(node_name, new_value, name_scope=None, graph=None): 29 | node = get_node_from_graph(node_name, name_scope, graph) 30 | assert node is not None 31 | 32 | assert node.value.shape == new_value.shape 33 | node.value = new_value 34 | 35 | 36 | class NameScope(object): 37 | """ 38 | NameScope: 39 | """ 40 | def __init__(self, name_scope): 41 | self.name_scope = name_scope 42 | 43 | def __enter__(self): 44 | default_graph.name_scope = self.name_scope 45 | return self 46 | 47 | def __exit__(self, exc_type, exc_value, exc_tb): 48 | default_graph.name_scope = None 49 | 50 | 51 | 52 | 53 | -------------------------------------------------------------------------------- /tinyframework/core/graph.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | 3 | class Graph(object): 4 | """ 5 | TinyFramework 计算图 6 | """ 7 | def __init__(self): 8 | self.nodes = [] 9 | self.name_scope = None 10 | 11 | def add_node(self, node): 12 | self.nodes.append(node) 13 | 14 | def clear_jacobi(self): 15 | """ 16 | when 。。。 need to clear jacobi matrix 17 | """ 18 | for node in self.nodes: 19 | node.clear_jacobi() # node 类需要实现clear jacobi方法 20 | 21 | def reset_value(self): 22 | """ 23 | 节点值重置 24 | """ 25 | for node in self.nodes: 26 | node.reset_value() # as above 27 | 28 | def node_cnt(self): 29 | return len(self.nodes) 30 | 31 | def draw(self): 32 | pass # 33 | 34 | 35 | default_graph = Graph() -------------------------------------------------------------------------------- /tinyframework/core/node.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import abc 4 | import numpy as np 5 | from .graph import Graph, default_graph 6 | 7 | 8 | class Node(object): 9 | """ 10 | TinyFramework 计算图 Node Basic class 11 | """ 12 | def __init__(self, *parents, **kargs): 13 | self.kargs = kargs 14 | self.graph = kargs.get('graph', default_graph) 15 | self.need_save = kargs.get('need_save', True) 16 | #self.gen_node_name(**kargs) 17 | self.parents = list(parents) #接受其它Node类的一个or多个parents对象 18 | self.children = [] 19 | self.value = None 20 | self.jacobi = None # 特指从loss node到本节点的jacobi matrix 21 | """ 22 | assign node name 23 | default name if user not assgin, like 'logist:2' 24 | if name_scope assigned, node_name is updateed like 'outlayer/ logist:2' 25 | """ 26 | self.name = kargs.get('name', '{}:{}'.format(self.__class__.__name__, self.graph.node_cnt())) 27 | if self.graph.name_scope: 28 | self.name = kargs.get('name', '{}/{}'.format(self.graph.name_scope, self.name)) 29 | 30 | for parent in self.parents: 31 | parent.children.append(self) # add self to parents.children list 32 | 33 | # add new node to graph 34 | self.graph.add_node(self) 35 | 36 | def dimention(self): 37 | """ 38 | dimention: rows * columns 39 | """ 40 | return self.value.shape[0] * self.value.shape[1] 41 | 42 | def get_parents(self): 43 | return self.parents 44 | 45 | def get_children(self): 46 | return self.children 47 | 48 | def gen_node_name(self): 49 | return self.name 50 | 51 | def shape(self): 52 | return self.value.shape 53 | 54 | def reset_value(self, recursive=True): 55 | self.value = None 56 | if recursive: 57 | for child in self.children: 58 | child.reset_value() 59 | 60 | def forward(self): 61 | """ 62 | Recursively calculate the value of the node 63 | """ 64 | for node in self.parents: 65 | if node.value is None: # for complex graph, a node may have many parents 66 | node.forward() 67 | self.calculate() 68 | 69 | @abc.abstractmethod 70 | def calculate(self): 71 | """ 72 | calculate self by parent node 73 | """ 74 | @abc.abstractmethod 75 | def get_jacobi(self, parent): 76 | """ 77 | calculate self to every parent node jacobi 78 | need to implement in subClass 79 | f(w+δ) = f(w) + ▽f * δ , 80 | 若 f()是标量,则▽是梯度, 81 | 若 f()是向量,则以▽为行组成的矩阵为jacobi矩阵 82 | """ 83 | 84 | def backward(self, result): # actually, bp return a jacobi of loss->any node 85 | if self.jacobi is None: # for complex graph,a node may be visited many times 86 | if self is result: 87 | self.jacobi = np.mat(np.eye(self.dimention())) # 节点对自身的Jacobi是单位矩阵 88 | else: 89 | self.jacobi = np.mat(np.zeros((result.dimention(), self.dimention()))) # 构造0矩阵作为累加器 90 | """ 91 | 记某个节点为 f, f的子节点为s(可能为多个),结果节点对f的Jacobi记为Jrf,结果节点对s的Jacobi记为Jrs 92 | 每个子节点s对父节点f的Jacobi记为Jsf,数学上可以证明 Jrf = ∑(Jrs * Jsf) 93 | """ 94 | for child in self.get_children(): 95 | if child.value is not None: 96 | self.jacobi += child.backward(result) * child.get_jacobi(self) 97 | 98 | return self.jacobi 99 | 100 | def clear_jacobi(self): 101 | """ 102 | 一次前向传播后,计算出pred,与label运算后得到loss,求loss对每个可训练节点的jacobi视为bp 103 | bp过程中可以更新节点的value,下一次fp后再bp时,因value得到了更新,所以要清除jacobi 104 | """ 105 | self.jacobi = None 106 | 107 | """ 108 | 将节点抽象为两类,一类是op节点,用于向量/矩阵的加、减、乘、除、reshape ..etc.. 在ops包中继承Node节点实现 109 | 一类是Variable节点,用于weight、input、pred、loss..etc.. 110 | """ 111 | 112 | class Variable(Node): 113 | """ 114 | diffirent to op node, Variable has no parents, therefore, it must be assigned dimention 115 | """ 116 | def __init__(self, dim, init=False, trainable=True, **kargs): 117 | Node.__init__(self, **kargs) 118 | self.dim = dim 119 | if init: 120 | self.value = np.mat(np.random.normal(0, 0.01, self.dim)) 121 | self.trainable = trainable 122 | 123 | def set_value(self, value): 124 | assert isinstance(value, np.matrix) and value.shape == self.dim # 类型和维度判断 125 | self.reset_value() 126 | self.value = value -------------------------------------------------------------------------------- /tinyframework/dist/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding=utf-8 -*- 2 | 3 | from .dist import * 4 | from .proto import * 5 | from .ps import * -------------------------------------------------------------------------------- /tinyframework/dist/dist.py: -------------------------------------------------------------------------------- 1 | # -*- coding=utf-8 -*- 2 | 3 | import numpy as np 4 | from ..core import Node 5 | from .proto import comm_pb2 6 | 7 | 8 | class DistComm(object): 9 | @staticmethod 10 | def _serialize_proto_node_gradients(node_gradients_dict): 11 | """ 12 | serialize node_gradient dict to protobuf object 13 | """ 14 | proto_node_gradients = comm_pb2.NodeGradients() 15 | for name, g in node_gradients_dict.items(): 16 | proto_node = proto_node_gradients.nodes.add() 17 | if isinstance(name, Node): 18 | name = name.name 19 | proto_node.name = name 20 | proto_gradient = proto_node_gradients.gradients.add() 21 | proto_gradient.value.extend(np.array(g).flatten()) 22 | proto_gradient.dim.extend(list(g.shape)) 23 | 24 | return proto_node_gradients 25 | 26 | @staticmethod 27 | def _deserialize_proto_node_gradients(node_gradients): 28 | """ 29 | deserialize proto obj to node_gradient_dict 30 | """ 31 | proto_nodes = node_gradients.nodes 32 | proto_gradients = node_gradients.gradients 33 | assert len(proto_nodes) == len(proto_gradients) 34 | node_with_gradients = dict() 35 | 36 | for idx in range(len(proto_nodes)): 37 | node_name = proto_nodes[idx].name 38 | gradients_value = proto_gradients[idx].value 39 | gradients_dim = tuple(proto_gradients[idx].dim) 40 | gradients_mat = np.mat(gradients_value, dtype=np.float32) 41 | gradients_mat = np.reshape(gradients_mat, gradients_dim) 42 | node_with_gradients[node_name] = gradients_mat 43 | 44 | return node_with_gradients 45 | 46 | @staticmethod 47 | def _serialize_proto_variable_weights(variable_weights_dict): 48 | """ 49 | serialize var_weight to proto obj 50 | """ 51 | var_weights_req_resp = comm_pb2.VariableWeightsReqResp() 52 | for name, mat in variable_weights_dict.items(): 53 | var = var_weights_req_resp.variables.add() 54 | if isinstance(name, Node): 55 | name = name.name 56 | var.name = name 57 | weight = var_weights_req_resp.weights.add() 58 | weight.value.extend(np.array(mat).flatten()) 59 | weight.dim.extent(list(mat.shape)) 60 | 61 | return var_weights_req_resp 62 | 63 | @staticmethod 64 | def _deserialize_proto_variable_weights(variable_weights_req_resp): 65 | """ 66 | deserialize proto obj to var-weights dict 67 | """ 68 | proto_variables = variable_weights_req_resp.variables 69 | proto_weights = variable_weights_req_resp.weights 70 | assert len(proto_weights) == len(proto_variables) 71 | 72 | var_weights_dict = dict() 73 | for idx in range(len(proto_weights)): 74 | var_name = proto_variables[idx].name 75 | weights_value = proto_weights[idx].value 76 | weights_dim = tuple(proto_weights[idx].dim) 77 | weights_mat = np.mat(weights_value, dtype=np.float32) 78 | weights_mat = np.reshape(weights_mat, weights_dim) 79 | var_weights_dict[var_name] = weights_mat 80 | 81 | return var_weights_dict 82 | 83 | -------------------------------------------------------------------------------- /tinyframework/dist/proto/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding=utf-8 -*- 2 | 3 | from .comm_pb2 import * 4 | from .parameter_server_pb2 import * 5 | from .parameter_server_pb2_grpc import * 6 | 7 | -------------------------------------------------------------------------------- /tinyframework/dist/proto/comm.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | message Node{ 4 | string name = 1; 5 | string node_type = 2; 6 | } 7 | 8 | message Matrix{ 9 | repeated float value = 1; 10 | repeated float dim = 2; 11 | } 12 | 13 | message NodeGradients{ 14 | repeated Node nodes = 1; 15 | repeated Matrix gradients = 2; 16 | int32 acc_no = 3; 17 | } 18 | 19 | message VariableWeightsReqResp{ 20 | repeated Node variables = 1; 21 | repeated Matrix weights = 2; 22 | } 23 | 24 | //python -m grpc_tools.protoc --python_out=. --grpc_python_out=. -I. comm.proto -------------------------------------------------------------------------------- /tinyframework/dist/proto/comm_pb2.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Generated by the protocol buffer compiler. DO NOT EDIT! 3 | # source: comm.proto 4 | """Generated protocol buffer code.""" 5 | from google.protobuf import descriptor as _descriptor 6 | from google.protobuf import message as _message 7 | from google.protobuf import reflection as _reflection 8 | from google.protobuf import symbol_database as _symbol_database 9 | # @@protoc_insertion_point(imports) 10 | 11 | _sym_db = _symbol_database.Default() 12 | 13 | 14 | 15 | 16 | DESCRIPTOR = _descriptor.FileDescriptor( 17 | name='comm.proto', 18 | package='', 19 | syntax='proto3', 20 | serialized_options=None, 21 | create_key=_descriptor._internal_create_key, 22 | serialized_pb=b'\n\ncomm.proto\"\'\n\x04Node\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x11\n\tnode_type\x18\x02 \x01(\t\"$\n\x06Matrix\x12\r\n\x05value\x18\x01 \x03(\x02\x12\x0b\n\x03\x64im\x18\x02 \x03(\x02\"Q\n\rNodeGradients\x12\x14\n\x05nodes\x18\x01 \x03(\x0b\x32\x05.Node\x12\x1a\n\tgradients\x18\x02 \x03(\x0b\x32\x07.Matrix\x12\x0e\n\x06\x61\x63\x63_no\x18\x03 \x01(\x05\"L\n\x16VariableWeightsReqResp\x12\x18\n\tvariables\x18\x01 \x03(\x0b\x32\x05.Node\x12\x18\n\x07weights\x18\x02 \x03(\x0b\x32\x07.Matrixb\x06proto3' 23 | ) 24 | 25 | 26 | 27 | 28 | _NODE = _descriptor.Descriptor( 29 | name='Node', 30 | full_name='Node', 31 | filename=None, 32 | file=DESCRIPTOR, 33 | containing_type=None, 34 | create_key=_descriptor._internal_create_key, 35 | fields=[ 36 | _descriptor.FieldDescriptor( 37 | name='name', full_name='Node.name', index=0, 38 | number=1, type=9, cpp_type=9, label=1, 39 | has_default_value=False, default_value=b"".decode('utf-8'), 40 | message_type=None, enum_type=None, containing_type=None, 41 | is_extension=False, extension_scope=None, 42 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), 43 | _descriptor.FieldDescriptor( 44 | name='node_type', full_name='Node.node_type', index=1, 45 | number=2, type=9, cpp_type=9, label=1, 46 | has_default_value=False, default_value=b"".decode('utf-8'), 47 | message_type=None, enum_type=None, containing_type=None, 48 | is_extension=False, extension_scope=None, 49 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), 50 | ], 51 | extensions=[ 52 | ], 53 | nested_types=[], 54 | enum_types=[ 55 | ], 56 | serialized_options=None, 57 | is_extendable=False, 58 | syntax='proto3', 59 | extension_ranges=[], 60 | oneofs=[ 61 | ], 62 | serialized_start=14, 63 | serialized_end=53, 64 | ) 65 | 66 | 67 | _MATRIX = _descriptor.Descriptor( 68 | name='Matrix', 69 | full_name='Matrix', 70 | filename=None, 71 | file=DESCRIPTOR, 72 | containing_type=None, 73 | create_key=_descriptor._internal_create_key, 74 | fields=[ 75 | _descriptor.FieldDescriptor( 76 | name='value', full_name='Matrix.value', index=0, 77 | number=1, type=2, cpp_type=6, label=3, 78 | has_default_value=False, default_value=[], 79 | message_type=None, enum_type=None, containing_type=None, 80 | is_extension=False, extension_scope=None, 81 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), 82 | _descriptor.FieldDescriptor( 83 | name='dim', full_name='Matrix.dim', index=1, 84 | number=2, type=2, cpp_type=6, label=3, 85 | has_default_value=False, default_value=[], 86 | message_type=None, enum_type=None, containing_type=None, 87 | is_extension=False, extension_scope=None, 88 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), 89 | ], 90 | extensions=[ 91 | ], 92 | nested_types=[], 93 | enum_types=[ 94 | ], 95 | serialized_options=None, 96 | is_extendable=False, 97 | syntax='proto3', 98 | extension_ranges=[], 99 | oneofs=[ 100 | ], 101 | serialized_start=55, 102 | serialized_end=91, 103 | ) 104 | 105 | 106 | _NODEGRADIENTS = _descriptor.Descriptor( 107 | name='NodeGradients', 108 | full_name='NodeGradients', 109 | filename=None, 110 | file=DESCRIPTOR, 111 | containing_type=None, 112 | create_key=_descriptor._internal_create_key, 113 | fields=[ 114 | _descriptor.FieldDescriptor( 115 | name='nodes', full_name='NodeGradients.nodes', index=0, 116 | number=1, type=11, cpp_type=10, label=3, 117 | has_default_value=False, default_value=[], 118 | message_type=None, enum_type=None, containing_type=None, 119 | is_extension=False, extension_scope=None, 120 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), 121 | _descriptor.FieldDescriptor( 122 | name='gradients', full_name='NodeGradients.gradients', index=1, 123 | number=2, type=11, cpp_type=10, label=3, 124 | has_default_value=False, default_value=[], 125 | message_type=None, enum_type=None, containing_type=None, 126 | is_extension=False, extension_scope=None, 127 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), 128 | _descriptor.FieldDescriptor( 129 | name='acc_no', full_name='NodeGradients.acc_no', index=2, 130 | number=3, type=5, cpp_type=1, label=1, 131 | has_default_value=False, default_value=0, 132 | message_type=None, enum_type=None, containing_type=None, 133 | is_extension=False, extension_scope=None, 134 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), 135 | ], 136 | extensions=[ 137 | ], 138 | nested_types=[], 139 | enum_types=[ 140 | ], 141 | serialized_options=None, 142 | is_extendable=False, 143 | syntax='proto3', 144 | extension_ranges=[], 145 | oneofs=[ 146 | ], 147 | serialized_start=93, 148 | serialized_end=174, 149 | ) 150 | 151 | 152 | _VARIABLEWEIGHTSREQRESP = _descriptor.Descriptor( 153 | name='VariableWeightsReqResp', 154 | full_name='VariableWeightsReqResp', 155 | filename=None, 156 | file=DESCRIPTOR, 157 | containing_type=None, 158 | create_key=_descriptor._internal_create_key, 159 | fields=[ 160 | _descriptor.FieldDescriptor( 161 | name='variables', full_name='VariableWeightsReqResp.variables', index=0, 162 | number=1, type=11, cpp_type=10, label=3, 163 | has_default_value=False, default_value=[], 164 | message_type=None, enum_type=None, containing_type=None, 165 | is_extension=False, extension_scope=None, 166 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), 167 | _descriptor.FieldDescriptor( 168 | name='weights', full_name='VariableWeightsReqResp.weights', index=1, 169 | number=2, type=11, cpp_type=10, label=3, 170 | has_default_value=False, default_value=[], 171 | message_type=None, enum_type=None, containing_type=None, 172 | is_extension=False, extension_scope=None, 173 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), 174 | ], 175 | extensions=[ 176 | ], 177 | nested_types=[], 178 | enum_types=[ 179 | ], 180 | serialized_options=None, 181 | is_extendable=False, 182 | syntax='proto3', 183 | extension_ranges=[], 184 | oneofs=[ 185 | ], 186 | serialized_start=176, 187 | serialized_end=252, 188 | ) 189 | 190 | _NODEGRADIENTS.fields_by_name['nodes'].message_type = _NODE 191 | _NODEGRADIENTS.fields_by_name['gradients'].message_type = _MATRIX 192 | _VARIABLEWEIGHTSREQRESP.fields_by_name['variables'].message_type = _NODE 193 | _VARIABLEWEIGHTSREQRESP.fields_by_name['weights'].message_type = _MATRIX 194 | DESCRIPTOR.message_types_by_name['Node'] = _NODE 195 | DESCRIPTOR.message_types_by_name['Matrix'] = _MATRIX 196 | DESCRIPTOR.message_types_by_name['NodeGradients'] = _NODEGRADIENTS 197 | DESCRIPTOR.message_types_by_name['VariableWeightsReqResp'] = _VARIABLEWEIGHTSREQRESP 198 | _sym_db.RegisterFileDescriptor(DESCRIPTOR) 199 | 200 | Node = _reflection.GeneratedProtocolMessageType('Node', (_message.Message,), { 201 | 'DESCRIPTOR' : _NODE, 202 | '__module__' : 'comm_pb2' 203 | # @@protoc_insertion_point(class_scope:Node) 204 | }) 205 | _sym_db.RegisterMessage(Node) 206 | 207 | Matrix = _reflection.GeneratedProtocolMessageType('Matrix', (_message.Message,), { 208 | 'DESCRIPTOR' : _MATRIX, 209 | '__module__' : 'comm_pb2' 210 | # @@protoc_insertion_point(class_scope:Matrix) 211 | }) 212 | _sym_db.RegisterMessage(Matrix) 213 | 214 | NodeGradients = _reflection.GeneratedProtocolMessageType('NodeGradients', (_message.Message,), { 215 | 'DESCRIPTOR' : _NODEGRADIENTS, 216 | '__module__' : 'comm_pb2' 217 | # @@protoc_insertion_point(class_scope:NodeGradients) 218 | }) 219 | _sym_db.RegisterMessage(NodeGradients) 220 | 221 | VariableWeightsReqResp = _reflection.GeneratedProtocolMessageType('VariableWeightsReqResp', (_message.Message,), { 222 | 'DESCRIPTOR' : _VARIABLEWEIGHTSREQRESP, 223 | '__module__' : 'comm_pb2' 224 | # @@protoc_insertion_point(class_scope:VariableWeightsReqResp) 225 | }) 226 | _sym_db.RegisterMessage(VariableWeightsReqResp) 227 | 228 | 229 | # @@protoc_insertion_point(module_scope) 230 | -------------------------------------------------------------------------------- /tinyframework/dist/proto/comm_pb2_grpc.py: -------------------------------------------------------------------------------- 1 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! 2 | """Client and server classes corresponding to protobuf-defined services.""" 3 | import grpc 4 | 5 | -------------------------------------------------------------------------------- /tinyframework/dist/proto/parameter_server.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | import "comm.proto"; 4 | 5 | service ParameterService{ 6 | // variable init interface 7 | rpc VariableWeightsInit(VariableWeightsReqResp) returns (VariableWeightsReqResp) {} 8 | // push interface, push each node gradient 9 | rpc Push(ParameterPushReq) returns (ParameterPushResp) {} 10 | // pull interface, pull each node gradient 11 | rpc Pull(ParameterPullReq) returns (ParameterPullResp) {} 12 | } 13 | 14 | 15 | //push request, with node_gradients sets 16 | message ParameterPushReq{ 17 | NodeGradients node_gradients = 1; 18 | } 19 | 20 | // push response, return nothing 21 | message ParameterPushResp{ 22 | 23 | } 24 | 25 | // pull request with node 26 | message ParameterPullReq{ 27 | repeated Node nodes = 1; 28 | } 29 | 30 | // pull response, return node gradients 31 | message ParameterPullResp{ 32 | NodeGradients node_gradients = 1; 33 | } -------------------------------------------------------------------------------- /tinyframework/dist/proto/parameter_server_pb2.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Generated by the protocol buffer compiler. DO NOT EDIT! 3 | # source: parameter_server.proto 4 | """Generated protocol buffer code.""" 5 | from google.protobuf import descriptor as _descriptor 6 | from google.protobuf import message as _message 7 | from google.protobuf import reflection as _reflection 8 | from google.protobuf import symbol_database as _symbol_database 9 | # @@protoc_insertion_point(imports) 10 | 11 | _sym_db = _symbol_database.Default() 12 | 13 | 14 | import comm_pb2 as comm__pb2 15 | 16 | 17 | DESCRIPTOR = _descriptor.FileDescriptor( 18 | name='parameter_server.proto', 19 | package='', 20 | syntax='proto3', 21 | serialized_options=None, 22 | create_key=_descriptor._internal_create_key, 23 | serialized_pb=b'\n\x16parameter_server.proto\x1a\ncomm.proto\":\n\x10ParameterPushReq\x12&\n\x0enode_gradients\x18\x01 \x01(\x0b\x32\x0e.NodeGradients\"\x13\n\x11ParameterPushResp\"(\n\x10ParameterPullReq\x12\x14\n\x05nodes\x18\x01 \x03(\x0b\x32\x05.Node\";\n\x11ParameterPullResp\x12&\n\x0enode_gradients\x18\x01 \x01(\x0b\x32\x0e.NodeGradients2\xbf\x01\n\x10ParameterService\x12I\n\x13VariableWeightsInit\x12\x17.VariableWeightsReqResp\x1a\x17.VariableWeightsReqResp\"\x00\x12/\n\x04Push\x12\x11.ParameterPushReq\x1a\x12.ParameterPushResp\"\x00\x12/\n\x04Pull\x12\x11.ParameterPullReq\x1a\x12.ParameterPullResp\"\x00\x62\x06proto3' 24 | , 25 | dependencies=[comm__pb2.DESCRIPTOR,]) 26 | 27 | 28 | 29 | 30 | _PARAMETERPUSHREQ = _descriptor.Descriptor( 31 | name='ParameterPushReq', 32 | full_name='ParameterPushReq', 33 | filename=None, 34 | file=DESCRIPTOR, 35 | containing_type=None, 36 | create_key=_descriptor._internal_create_key, 37 | fields=[ 38 | _descriptor.FieldDescriptor( 39 | name='node_gradients', full_name='ParameterPushReq.node_gradients', index=0, 40 | number=1, type=11, cpp_type=10, label=1, 41 | has_default_value=False, default_value=None, 42 | message_type=None, enum_type=None, containing_type=None, 43 | is_extension=False, extension_scope=None, 44 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), 45 | ], 46 | extensions=[ 47 | ], 48 | nested_types=[], 49 | enum_types=[ 50 | ], 51 | serialized_options=None, 52 | is_extendable=False, 53 | syntax='proto3', 54 | extension_ranges=[], 55 | oneofs=[ 56 | ], 57 | serialized_start=38, 58 | serialized_end=96, 59 | ) 60 | 61 | 62 | _PARAMETERPUSHRESP = _descriptor.Descriptor( 63 | name='ParameterPushResp', 64 | full_name='ParameterPushResp', 65 | filename=None, 66 | file=DESCRIPTOR, 67 | containing_type=None, 68 | create_key=_descriptor._internal_create_key, 69 | fields=[ 70 | ], 71 | extensions=[ 72 | ], 73 | nested_types=[], 74 | enum_types=[ 75 | ], 76 | serialized_options=None, 77 | is_extendable=False, 78 | syntax='proto3', 79 | extension_ranges=[], 80 | oneofs=[ 81 | ], 82 | serialized_start=98, 83 | serialized_end=117, 84 | ) 85 | 86 | 87 | _PARAMETERPULLREQ = _descriptor.Descriptor( 88 | name='ParameterPullReq', 89 | full_name='ParameterPullReq', 90 | filename=None, 91 | file=DESCRIPTOR, 92 | containing_type=None, 93 | create_key=_descriptor._internal_create_key, 94 | fields=[ 95 | _descriptor.FieldDescriptor( 96 | name='nodes', full_name='ParameterPullReq.nodes', index=0, 97 | number=1, type=11, cpp_type=10, label=3, 98 | has_default_value=False, default_value=[], 99 | message_type=None, enum_type=None, containing_type=None, 100 | is_extension=False, extension_scope=None, 101 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), 102 | ], 103 | extensions=[ 104 | ], 105 | nested_types=[], 106 | enum_types=[ 107 | ], 108 | serialized_options=None, 109 | is_extendable=False, 110 | syntax='proto3', 111 | extension_ranges=[], 112 | oneofs=[ 113 | ], 114 | serialized_start=119, 115 | serialized_end=159, 116 | ) 117 | 118 | 119 | _PARAMETERPULLRESP = _descriptor.Descriptor( 120 | name='ParameterPullResp', 121 | full_name='ParameterPullResp', 122 | filename=None, 123 | file=DESCRIPTOR, 124 | containing_type=None, 125 | create_key=_descriptor._internal_create_key, 126 | fields=[ 127 | _descriptor.FieldDescriptor( 128 | name='node_gradients', full_name='ParameterPullResp.node_gradients', index=0, 129 | number=1, type=11, cpp_type=10, label=1, 130 | has_default_value=False, default_value=None, 131 | message_type=None, enum_type=None, containing_type=None, 132 | is_extension=False, extension_scope=None, 133 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), 134 | ], 135 | extensions=[ 136 | ], 137 | nested_types=[], 138 | enum_types=[ 139 | ], 140 | serialized_options=None, 141 | is_extendable=False, 142 | syntax='proto3', 143 | extension_ranges=[], 144 | oneofs=[ 145 | ], 146 | serialized_start=161, 147 | serialized_end=220, 148 | ) 149 | 150 | _PARAMETERPUSHREQ.fields_by_name['node_gradients'].message_type = comm__pb2._NODEGRADIENTS 151 | _PARAMETERPULLREQ.fields_by_name['nodes'].message_type = comm__pb2._NODE 152 | _PARAMETERPULLRESP.fields_by_name['node_gradients'].message_type = comm__pb2._NODEGRADIENTS 153 | DESCRIPTOR.message_types_by_name['ParameterPushReq'] = _PARAMETERPUSHREQ 154 | DESCRIPTOR.message_types_by_name['ParameterPushResp'] = _PARAMETERPUSHRESP 155 | DESCRIPTOR.message_types_by_name['ParameterPullReq'] = _PARAMETERPULLREQ 156 | DESCRIPTOR.message_types_by_name['ParameterPullResp'] = _PARAMETERPULLRESP 157 | _sym_db.RegisterFileDescriptor(DESCRIPTOR) 158 | 159 | ParameterPushReq = _reflection.GeneratedProtocolMessageType('ParameterPushReq', (_message.Message,), { 160 | 'DESCRIPTOR' : _PARAMETERPUSHREQ, 161 | '__module__' : 'parameter_server_pb2' 162 | # @@protoc_insertion_point(class_scope:ParameterPushReq) 163 | }) 164 | _sym_db.RegisterMessage(ParameterPushReq) 165 | 166 | ParameterPushResp = _reflection.GeneratedProtocolMessageType('ParameterPushResp', (_message.Message,), { 167 | 'DESCRIPTOR' : _PARAMETERPUSHRESP, 168 | '__module__' : 'parameter_server_pb2' 169 | # @@protoc_insertion_point(class_scope:ParameterPushResp) 170 | }) 171 | _sym_db.RegisterMessage(ParameterPushResp) 172 | 173 | ParameterPullReq = _reflection.GeneratedProtocolMessageType('ParameterPullReq', (_message.Message,), { 174 | 'DESCRIPTOR' : _PARAMETERPULLREQ, 175 | '__module__' : 'parameter_server_pb2' 176 | # @@protoc_insertion_point(class_scope:ParameterPullReq) 177 | }) 178 | _sym_db.RegisterMessage(ParameterPullReq) 179 | 180 | ParameterPullResp = _reflection.GeneratedProtocolMessageType('ParameterPullResp', (_message.Message,), { 181 | 'DESCRIPTOR' : _PARAMETERPULLRESP, 182 | '__module__' : 'parameter_server_pb2' 183 | # @@protoc_insertion_point(class_scope:ParameterPullResp) 184 | }) 185 | _sym_db.RegisterMessage(ParameterPullResp) 186 | 187 | 188 | 189 | _PARAMETERSERVICE = _descriptor.ServiceDescriptor( 190 | name='ParameterService', 191 | full_name='ParameterService', 192 | file=DESCRIPTOR, 193 | index=0, 194 | serialized_options=None, 195 | create_key=_descriptor._internal_create_key, 196 | serialized_start=223, 197 | serialized_end=414, 198 | methods=[ 199 | _descriptor.MethodDescriptor( 200 | name='VariableWeightsInit', 201 | full_name='ParameterService.VariableWeightsInit', 202 | index=0, 203 | containing_service=None, 204 | input_type=comm__pb2._VARIABLEWEIGHTSREQRESP, 205 | output_type=comm__pb2._VARIABLEWEIGHTSREQRESP, 206 | serialized_options=None, 207 | create_key=_descriptor._internal_create_key, 208 | ), 209 | _descriptor.MethodDescriptor( 210 | name='Push', 211 | full_name='ParameterService.Push', 212 | index=1, 213 | containing_service=None, 214 | input_type=_PARAMETERPUSHREQ, 215 | output_type=_PARAMETERPUSHRESP, 216 | serialized_options=None, 217 | create_key=_descriptor._internal_create_key, 218 | ), 219 | _descriptor.MethodDescriptor( 220 | name='Pull', 221 | full_name='ParameterService.Pull', 222 | index=2, 223 | containing_service=None, 224 | input_type=_PARAMETERPULLREQ, 225 | output_type=_PARAMETERPULLRESP, 226 | serialized_options=None, 227 | create_key=_descriptor._internal_create_key, 228 | ), 229 | ]) 230 | _sym_db.RegisterServiceDescriptor(_PARAMETERSERVICE) 231 | 232 | DESCRIPTOR.services_by_name['ParameterService'] = _PARAMETERSERVICE 233 | 234 | # @@protoc_insertion_point(module_scope) 235 | -------------------------------------------------------------------------------- /tinyframework/dist/proto/parameter_server_pb2_grpc.py: -------------------------------------------------------------------------------- 1 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! 2 | """Client and server classes corresponding to protobuf-defined services.""" 3 | import grpc 4 | 5 | import comm_pb2 as comm__pb2 6 | import parameter_server_pb2 as parameter__server__pb2 7 | 8 | 9 | class ParameterServiceStub(object): 10 | """Missing associated documentation comment in .proto file.""" 11 | 12 | def __init__(self, channel): 13 | """Constructor. 14 | 15 | Args: 16 | channel: A grpc.Channel. 17 | """ 18 | self.VariableWeightsInit = channel.unary_unary( 19 | '/ParameterService/VariableWeightsInit', 20 | request_serializer=comm__pb2.VariableWeightsReqResp.SerializeToString, 21 | response_deserializer=comm__pb2.VariableWeightsReqResp.FromString, 22 | ) 23 | self.Push = channel.unary_unary( 24 | '/ParameterService/Push', 25 | request_serializer=parameter__server__pb2.ParameterPushReq.SerializeToString, 26 | response_deserializer=parameter__server__pb2.ParameterPushResp.FromString, 27 | ) 28 | self.Pull = channel.unary_unary( 29 | '/ParameterService/Pull', 30 | request_serializer=parameter__server__pb2.ParameterPullReq.SerializeToString, 31 | response_deserializer=parameter__server__pb2.ParameterPullResp.FromString, 32 | ) 33 | 34 | 35 | class ParameterServiceServicer(object): 36 | """Missing associated documentation comment in .proto file.""" 37 | 38 | def VariableWeightsInit(self, request, context): 39 | """variable init interface 40 | """ 41 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 42 | context.set_details('Method not implemented!') 43 | raise NotImplementedError('Method not implemented!') 44 | 45 | def Push(self, request, context): 46 | """push interface, push each node gradient 47 | """ 48 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 49 | context.set_details('Method not implemented!') 50 | raise NotImplementedError('Method not implemented!') 51 | 52 | def Pull(self, request, context): 53 | """pull interface, pull each node gradient 54 | """ 55 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 56 | context.set_details('Method not implemented!') 57 | raise NotImplementedError('Method not implemented!') 58 | 59 | 60 | def add_ParameterServiceServicer_to_server(servicer, server): 61 | rpc_method_handlers = { 62 | 'VariableWeightsInit': grpc.unary_unary_rpc_method_handler( 63 | servicer.VariableWeightsInit, 64 | request_deserializer=comm__pb2.VariableWeightsReqResp.FromString, 65 | response_serializer=comm__pb2.VariableWeightsReqResp.SerializeToString, 66 | ), 67 | 'Push': grpc.unary_unary_rpc_method_handler( 68 | servicer.Push, 69 | request_deserializer=parameter__server__pb2.ParameterPushReq.FromString, 70 | response_serializer=parameter__server__pb2.ParameterPushResp.SerializeToString, 71 | ), 72 | 'Pull': grpc.unary_unary_rpc_method_handler( 73 | servicer.Pull, 74 | request_deserializer=parameter__server__pb2.ParameterPullReq.FromString, 75 | response_serializer=parameter__server__pb2.ParameterPullResp.SerializeToString, 76 | ), 77 | } 78 | generic_handler = grpc.method_handlers_generic_handler( 79 | 'ParameterService', rpc_method_handlers) 80 | server.add_generic_rpc_handlers((generic_handler,)) 81 | 82 | 83 | # This class is part of an EXPERIMENTAL API. 84 | class ParameterService(object): 85 | """Missing associated documentation comment in .proto file.""" 86 | 87 | @staticmethod 88 | def VariableWeightsInit(request, 89 | target, 90 | options=(), 91 | channel_credentials=None, 92 | call_credentials=None, 93 | insecure=False, 94 | compression=None, 95 | wait_for_ready=None, 96 | timeout=None, 97 | metadata=None): 98 | return grpc.experimental.unary_unary(request, target, '/ParameterService/VariableWeightsInit', 99 | comm__pb2.VariableWeightsReqResp.SerializeToString, 100 | comm__pb2.VariableWeightsReqResp.FromString, 101 | options, channel_credentials, 102 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata) 103 | 104 | @staticmethod 105 | def Push(request, 106 | target, 107 | options=(), 108 | channel_credentials=None, 109 | call_credentials=None, 110 | insecure=False, 111 | compression=None, 112 | wait_for_ready=None, 113 | timeout=None, 114 | metadata=None): 115 | return grpc.experimental.unary_unary(request, target, '/ParameterService/Push', 116 | parameter__server__pb2.ParameterPushReq.SerializeToString, 117 | parameter__server__pb2.ParameterPushResp.FromString, 118 | options, channel_credentials, 119 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata) 120 | 121 | @staticmethod 122 | def Pull(request, 123 | target, 124 | options=(), 125 | channel_credentials=None, 126 | call_credentials=None, 127 | insecure=False, 128 | compression=None, 129 | wait_for_ready=None, 130 | timeout=None, 131 | metadata=None): 132 | return grpc.experimental.unary_unary(request, target, '/ParameterService/Pull', 133 | parameter__server__pb2.ParameterPullReq.SerializeToString, 134 | parameter__server__pb2.ParameterPullResp.FromString, 135 | options, channel_credentials, 136 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata) 137 | -------------------------------------------------------------------------------- /tinyframework/dist/ps/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding=utf-8 -*- 2 | 3 | from .ps import * -------------------------------------------------------------------------------- /tinyframework/dist/ps/ps.py: -------------------------------------------------------------------------------- 1 | # -*- coding=utf-8 -*- 2 | 3 | import threading 4 | import time 5 | from concurrent.futures import ThreadPoolExecutor 6 | 7 | import numpy as np 8 | import grpc 9 | from ...core import Node 10 | from ..dist import DistComm 11 | from ..proto import parameter_server_pb2 as pspb 12 | from ..proto import parameter_server_pb2_grpc as psrpc 13 | 14 | 15 | class ParameterService(psrpc.ParameterServiceServicer): 16 | """ 17 | Parameter service support sync-mode and async-mode 18 | sync_mode: 19 | 1. all workers push gradients to ps 20 | 2. all workers pull gradients to local 21 | async_node: 22 | workers visit ps randomly, update self gradient to ps or pull average gradient to local 23 | """ 24 | 25 | def __init__(self, worker_num, sync=True): 26 | self.node_gradients_cache = dict() 27 | self.variable_weights_cache = dict() # for init 28 | 29 | self.worker_num = worker_num 30 | self.sync = sync 31 | self.cur_push_num = 0 32 | self.cur_pull_num = self.worker_num 33 | 34 | self.cond = threading.Condition() 35 | self.push_lock = threading.Lock() 36 | self.init_lock = threading.Lock() 37 | self.is_init = False 38 | self.acc_no = 0 39 | 40 | def Push(self, push_req, context): 41 | """ 42 | push gradients to ps 43 | """ 44 | node_with_gradients, acc_no = self._deserialize_push_req(push_req) 45 | 46 | # store gradients to local cache 47 | if self.sync: 48 | self._push_sync(node_with_gradients, acc_no) 49 | else: 50 | self._push_async(node_with_gradients, acc_no) 51 | return pspb.ParameterPushResp() 52 | 53 | def _push_sync(self, node_with_gradients, acc_no): 54 | """ push sync mode """ 55 | # add lock 56 | if self.cond.acquire(): 57 | # waiting until all workers complete last iteration pull ops 58 | while self.cur_push_num != self.worker_num: 59 | self.cond.wait() 60 | 61 | # record push_nums 62 | self.cur_push_num += 1 63 | # update gradients to cache 64 | self._update_gradients_cache(node_with_gradients) 65 | # accelerate gradients nums 66 | self.acc_no += acc_no 67 | # if all workers complete gradients push, notify all to pull 68 | if self.cur_push_num >= self.worker_num: 69 | self.cur_pull_num = 0 70 | self.cond.notify_all() 71 | self.cond.release() 72 | else: 73 | self.cond.wait() 74 | 75 | def _push_async(self, node_with_gradients, acc_no): 76 | self.push_lock.acquire() 77 | self._update_gradients_cache(node_with_gradients) 78 | self.acc_no += acc_no 79 | self.push_lock.release() 80 | 81 | def Pull(self, pull_req, context): 82 | """ 83 | pull gradient from ps 84 | """ 85 | if self.sync: 86 | resp = self._pull_sync() 87 | else: 88 | resp = self._pull_async() 89 | return resp 90 | 91 | def _pull_sync(self): 92 | """ 93 | sync mode 94 | """ 95 | # add lock 96 | if self.cond.acquire(): 97 | # waiting until all workers complete last pull ops 98 | while self.cur_push_num != self.worker_num: 99 | self.cond.wait() 100 | # record pull counts 101 | self.cur_pull_num += 1 102 | # calculate average gradient 103 | self._gradients_cache_mean() 104 | resp = self._serialize_pull_resp() 105 | 106 | # notify all workers push when complete pull 107 | if self.cur_pull_num >= self.worker_num: 108 | self.cur_pull_num = 0 109 | self._reset_gradients_cache() 110 | self.cond.notify_all() 111 | self.cond.release() 112 | else: 113 | self.cond.wait() 114 | return resp 115 | 116 | def _pull_async(self): 117 | """ 118 | async mode 119 | """ 120 | self.push_lock.acquire() 121 | self._gradients_cache_mean() 122 | resp = self._serialize_pull_resp() 123 | self._reset_gradients_cache() 124 | self.push_lock.release() 125 | return resp 126 | 127 | def _update_gradients_cache(self, node_with_gradients): 128 | # unsing node name to update gradients cache 129 | for node_name, gradients in node_with_gradients.items(): 130 | if node_name in self.node_gradients_cache: 131 | exist_gradient = self.node_gradients_cache[node_name] 132 | assert exist_gradient.shape == gradients.shape 133 | self.node_gradients_cache[node_name] = exist_gradient + gradients 134 | else: 135 | self.node_gradients_cache[node_name] = gradients 136 | 137 | def _gradients_cache_mean(self): 138 | """ 139 | get mean gradient in cache 140 | """ 141 | if self.acc_no != 0: 142 | for name, gradient in self.node_gradients_cache.items(): 143 | self.node_gradients_cache[name] = self.node_gradients_cache[name] / self.acc_no 144 | self.acc_no = 0 145 | 146 | def _deserialize_push_req(self, push_req): 147 | """ 148 | deserialize push_req 149 | """ 150 | acc_no = push_req.node_gradients.acc_no 151 | node_with_gradients = DistComm._deserialize_proto_node_gradients(push_req.node_gradients) 152 | return node_with_gradients, acc_no 153 | 154 | def _serialize_pull_resp(self): 155 | """ 156 | serialize pull response 157 | """ 158 | proto_node_gradients = DistComm._serialize_proto_node_gradients(self.node_gradients_cache) 159 | resp = pspb.ParameterPullResp(node_gradients=proto_node_gradients) 160 | return resp 161 | 162 | def _reset_gradients_cache(self): 163 | self.node_gradients_cache.clear() 164 | 165 | def VariableWeightsInit(self, variable_weights_req, context): 166 | """ 167 | weight initialize, all worker push their init_value to ps, ps use the first value received as init_weight 168 | and notify all workers 169 | """ 170 | self.init_lock.acquire() 171 | # if has not been initialized yet, use the first weight ps received 172 | if not self.is_init: 173 | self.variable_weights_cache = DistComm._deserialize_proto_variable_weights(variable_weights_req) 174 | print('[INIT] Parameter service variable weights initialized!') 175 | 176 | # other workers using exist init_weight 177 | resp = DistComm._serialize_proto_variable_weights(self.varibale_weights_cache) 178 | self.is_init = True 179 | self.init_lock.release() 180 | return resp 181 | 182 | 183 | class ParameterServiceClient(object): 184 | """ 185 | 186 | """ 187 | 188 | def __init__(self, ps_host): 189 | # create grpc_stub 190 | self.stub = psrpc.ParameterServiceStub(grpc.insecure_channel(ps_host)) 191 | assert self.stub is not None 192 | print('[GRPC] COnnected to parameter service:{}'.format(ps_host)) 193 | 194 | def variable_weights_init(self, var_weights_dict): 195 | init_req = DistComm._serialize_proto_variable_weights(var_weights_dict) 196 | init_resp = self.stub.VariableWeightsInit(init_req) 197 | duplicate_var_weights_dict = DistComm._deserialize_proto_variable_weights(init_resp) 198 | return duplicate_var_weights_dict 199 | 200 | def push_gradients(self, acc_gradients, acc_no): 201 | # serialize gradients to proto obj 202 | proto_node_gradients = DistComm._serialize_proto_node_gradients(acc_gradients) 203 | proto_node_gradients.acc_no = acc_no 204 | # create req and push 205 | push_req = pspb.ParameterPushReq(node_gradients=proto_node_gradients) 206 | resp = self.stub.push(push_req) 207 | return resp 208 | 209 | def pull_gradients(self, node_name=None): 210 | # create pull req and pull 211 | pull_req = pspb.ParameterPullReq() 212 | pull_resp = self.stub.Pull(pull_req) 213 | # deserialize receive proto obj 214 | node_gradients_dict = DistComm._deserialize_proto_node_gradients(pull_resp.node_gradients) 215 | return node_gradients_dict 216 | 217 | 218 | class ParameterServiceServer(object): 219 | """ 220 | 221 | """ 222 | 223 | def __init__(self, cluster_config, sync=True, max_workers=10): 224 | self.worker_num = len(cluster_config['workers']) 225 | self.host = cluster_config['ps'][0] 226 | self.sync = sync 227 | self.max_workers = max_workers 228 | 229 | self.server = grpc.server(ThreadPoolExecutor(max_workers=self.max_workers)) 230 | psrpc.add_ParameterServiceServicer_to_server(ParameterService(self.worker_num, self.sync), self.server) 231 | self.server.add_insecure_port(self.host) 232 | 233 | def server(self): 234 | # start grpc service 235 | print('[PS] Parameter server (mode:{}) running on {} and worker num{}'.format('Sync' if self.sync else 'Async'), 236 | self.host, self.worker_num) 237 | try: 238 | while True: 239 | time.sleep(3600*24) 240 | except KeyboardInterrupt: 241 | self.server.stop(0) 242 | -------------------------------------------------------------------------------- /tinyframework/layer/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | 3 | from .layer import * -------------------------------------------------------------------------------- /tinyframework/layer/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Harrison-Ho/tinyframework/3a8f79dc6d34510b616bc3e20688e4078e6a5702/tinyframework/layer/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /tinyframework/layer/__pycache__/layer.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Harrison-Ho/tinyframework/3a8f79dc6d34510b616bc3e20688e4078e6a5702/tinyframework/layer/__pycache__/layer.cpython-37.pyc -------------------------------------------------------------------------------- /tinyframework/layer/layer.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | 3 | from ..core import * 4 | from ..ops import * 5 | 6 | 7 | def fc(input_data, input_shape, output_shape, activation): 8 | """ 9 | full connection 10 | :param input_data: input array 11 | :param input_shape: array shape, n*1 12 | :param output_shape: mean output dim m*1 13 | :param activation: 14 | :return: 15 | """ 16 | weights = Variable((output_shape, input_shape), init=True, trainable=True) # m*n 17 | bias = Variable((output_shape, 1), init=True, trainable=True) 18 | affine = Add(MatMul(weights, input_data), bias) # 线性变换,泛函分析中称之为仿射变换(听着高大上) 19 | 20 | if activation == "ReLU": 21 | return ReLU(affine) 22 | elif activation == "Logistic": 23 | return Logistic(affine) 24 | else: 25 | return affine 26 | 27 | 28 | def pooling(feature_maps, kernel_shape, stride): 29 | """ 30 | pooling layer 31 | :param feature_maps: array, include many feature_maps which has same shape 32 | :param kernel_shape: tuple type 33 | :param stride: tuple, include H direction and V direction 34 | :return: 35 | """ 36 | outputs = [] 37 | for feat_map in feature_maps: 38 | outputs.append(MaxPooling(feat_map, size=kernel_shape, stride=stride)) 39 | return outputs 40 | 41 | 42 | def conv(feature_maps, input_shape, kernels_n, kernel_shape, activation): 43 | """ 44 | :param feature_maps: arrays, has same shape 45 | :param input_shape: tuple 46 | :param kernels_n: nums of conv kernels 47 | :param kernel_shape: 48 | :param activation: 49 | :return: 50 | """ 51 | # construct a matrix has same shape with input 52 | ones = Variable(input_shape, init=False, trainable=False) 53 | ones.set_value(np.mat(np.ones(input_shape))) 54 | 55 | outputs = [] 56 | # for every kernel in kernels 57 | for i in range(kernels_n): 58 | channels = [] 59 | for feat_map in feature_maps: 60 | kernel = Variable(kernel_shape, init=True, trainable=True) 61 | convresult = Conv(feat_map, kernel) 62 | channels.append(convresult) 63 | 64 | channels = Add(*channels) 65 | bias = ScalarMultiply(Variable((1, 1), init=True, trainable=True), ones) 66 | affine = Add(channels, bias) 67 | 68 | if activation == "ReLU": 69 | outputs.append(ReLU(affine)) 70 | elif activation == "Logistic": 71 | outputs.append(Logistic(affine)) 72 | else: 73 | outputs = affine 74 | # result layers equals to kernels num 75 | assert len(outputs) == kernels_n 76 | return outputs -------------------------------------------------------------------------------- /tinyframework/ops/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from .ops import * 4 | from .loss import * 5 | from .metrics import * -------------------------------------------------------------------------------- /tinyframework/ops/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Harrison-Ho/tinyframework/3a8f79dc6d34510b616bc3e20688e4078e6a5702/tinyframework/ops/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /tinyframework/ops/__pycache__/loss.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Harrison-Ho/tinyframework/3a8f79dc6d34510b616bc3e20688e4078e6a5702/tinyframework/ops/__pycache__/loss.cpython-37.pyc -------------------------------------------------------------------------------- /tinyframework/ops/__pycache__/metrics.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Harrison-Ho/tinyframework/3a8f79dc6d34510b616bc3e20688e4078e6a5702/tinyframework/ops/__pycache__/metrics.cpython-37.pyc -------------------------------------------------------------------------------- /tinyframework/ops/__pycache__/ops.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Harrison-Ho/tinyframework/3a8f79dc6d34510b616bc3e20688e4078e6a5702/tinyframework/ops/__pycache__/ops.cpython-37.pyc -------------------------------------------------------------------------------- /tinyframework/ops/loss.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from ..core import Node 4 | from ..ops import SoftMax 5 | import numpy as np 6 | 7 | 8 | class LossFunction(Node): 9 | """ 10 | loss function abstract class 11 | """ 12 | pass 13 | 14 | 15 | class LogLoss(LossFunction): 16 | def calculate(self): 17 | assert len(self.parents) == 1 18 | x = self.parents[0].value 19 | self.value = np.log(1 + np.power(np.e, np.where(-x > 1e2, 1e2, -x))) 20 | 21 | def get_jacobi(self, parent): 22 | x = parent.value 23 | diag = -1 / (1 + np.power(np.e, np.where(x > 1e2, 1e2, x))) 24 | return np.diag(diag.ravel()) 25 | 26 | 27 | class CrossEntropy(LossFunction): 28 | """ 29 | softmax(parents[0]) , parents[1] is label (one_hot) 30 | """ 31 | def calculate(self): 32 | prob = SoftMax.softmax(self.parents[0].value) 33 | self.value = np.mat( 34 | -np.sum(np.multiply(self.parents[1].value, np.log(prob + 1e-10)))) 35 | 36 | def get_jacobi(self, parent): 37 | prob = SoftMax.softmax(self.parents[0].value) 38 | if parent is self.parents[0]: 39 | return (prob - self.parents[1].value).T 40 | else: 41 | return (-np.log(prob)).T 42 | -------------------------------------------------------------------------------- /tinyframework/ops/metrics.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import numpy as np 4 | import abc 5 | from ..core import Node 6 | 7 | 8 | class Metrics(Node): 9 | """ 10 | Metrics abstract class 11 | """ 12 | def __init__(self, *parents, **kargs): 13 | kargs['need_save'] = kargs.get('need_save', False) 14 | Node.__init__(self, *parents, **kargs) 15 | self.init() 16 | 17 | def reset(self): 18 | self.reset_value() 19 | self.init() 20 | 21 | @abc.abstractmethod 22 | def init(self): 23 | """ 24 | depend on metrics ,case by case 25 | """ 26 | pass 27 | 28 | @staticmethod 29 | def prob2label(prob, thresholds=0.5): 30 | if prob.shape[0] > 1: 31 | # multi-classifier 32 | labels = np.zeros((prob.shape[0], 1)) 33 | labels[np.argmax(prob, axis=0)] = 1 34 | else: 35 | # 2-classifier 36 | labels = np.where(prob < thresholds, -1, 1) 37 | 38 | return labels 39 | 40 | def get_jacobi(self, parent): 41 | """ 42 | calculate jacobi have no significance for metrics 43 | """ 44 | raise NotImplementedError() 45 | 46 | def value2str(self): 47 | return "{}:{:.4f}".format(self.__class__.__name__, self.value) 48 | 49 | 50 | class Accuracy(Metrics): 51 | """ 52 | for accuracy 53 | Acc = (TP+TN)/All 54 | """ 55 | def __init__(self, *parents, **kargs): 56 | Metrics.__init__(self, *parents, **kargs) 57 | 58 | def init(self): 59 | self.correct_num = 0 60 | self.total_num = 0 61 | 62 | def calculate(self): 63 | """ 64 | assume parents[0] as prob, parents[1] as label 65 | """ 66 | pred = Metrics.prob2label(self.parents[0].value) 67 | groundtruth = self.parents[1].value 68 | assert len(pred) == len(groundtruth) 69 | if pred.shape[0] > 1: # add annotations 70 | self.correct_num += np.sum(np.multiply(pred, groundtruth)) 71 | self.total_num += pred.shape[1] 72 | else: 73 | self.correct_num += np.sum(pred == groundtruth) 74 | self.total_num += len(pred) 75 | self.value = 0 76 | if self.total_num > 0: 77 | self.value = float(self.correct_num) / self.total_num 78 | 79 | 80 | class Precision(Metrics): 81 | """ 82 | for precision 83 | precision = TP/(TP+FP) 84 | also assume parents[0] as prob, parents[1] as label 85 | """ 86 | def __init__(self, *parents, **kargs): 87 | Metrics.__init__(self, *parents, **kargs) 88 | 89 | def init(self): 90 | self.pred_positive = 0 # pread as P 91 | self.true_positive = 0 # pread as P and real as P 92 | 93 | def calculate(self): 94 | assert self.parents[0].value.shape[1] == 1 95 | pred_label = Metrics.prob2label(self.parents[0].value) 96 | groundtruth = self.parents[1].value 97 | self.pred_positive += np.sum(pred_label == 1) 98 | self.true_positive += np.sum(pred_label == groundtruth and pred_label == 1) 99 | self.value = 0 100 | if self.pred_positive > 0: 101 | self.value = float(self.true_positive) / self.pred_positive 102 | 103 | 104 | class Recall(Metrics): 105 | """ 106 | for recall 107 | recall = TP/(TP+FN) 108 | also assume parents[0] as prob, parents[1] as label 109 | """ 110 | def __init__(self, *parents, **kargs): 111 | Metrics.__init__(self, *parents, *kargs) 112 | 113 | def init(self): 114 | self.real_positive = 0 # groudtrue_positive 115 | self.true_positive = 0 # pred as P and real as P 116 | 117 | def calculate(self): 118 | assert self.parents[0].value.shape[0] == 1 119 | pred_label = Metrics.prob2label(self.parents[0].value) 120 | groundtruth = self.parents[1].value 121 | 122 | self.real_positive += np.sum(groundtruth == 1) 123 | self.true_positive += np.sum(pred_label == groundtruth and pred_label == 1) 124 | self.value = 0 125 | if self.true_positive > 0: 126 | self.value = float(self.true_positive) / self.real_positive 127 | 128 | 129 | class ROC(Metrics): 130 | """ 131 | ROC curve 132 | """ 133 | def __init__(self, *parents, **kargs): 134 | Metrics.__init__(self, *parents, **kargs) 135 | 136 | def init(self): 137 | self.count = 100 # set 100 threshold points 138 | self.real_positive = 0 139 | self.real_negative = 0 140 | self.true_positive = np.array([0] * self.count) # pred 1 and real 1 141 | self.false_positive = np.array([0] * self.count) # pred 1 but real 0 142 | self.tpr = np.array([0] * self.count) 143 | self.fpr = np.array([0] * self.count) 144 | 145 | def calculate(self): 146 | prob = self.parents[0].value 147 | groundtruth = self.parents[1].value 148 | self.real_positive += np.sum(groundtruth == 1) 149 | self.real_negative += np.sum(groundtruth == 0) 150 | 151 | thresholds = list(np.arange(0.01, 1.00, 0.01)) # 99 152 | # using thresholds to generate TP and FP 153 | for idx in range(0, len(thresholds)): 154 | pred = Metrics.prob2label(prob, thresholds=thresholds[idx]) 155 | self.true_positive[idx] += np.sum(pred == groundtruth and pred == 1) 156 | self.false_positive[idx] += np.sum(pred != groundtruth and pred == 1) 157 | 158 | # calculate tpr and fpr 159 | if self.true_positive > 0 and self.false_positive > 0: 160 | self.tpr = self.true_positive / self.real_positive 161 | self.fpr = self.false_positive / self.real_negative 162 | """ 163 | draw curve 164 | plt.xlim(0,1) 165 | plt.ylim(0,1) 166 | plt.plot(self.fpr, self.tpr) 167 | plt.show() 168 | """ 169 | 170 | 171 | class AUC(Metrics): 172 | """ 173 | calculate Area under curve 174 | """ 175 | def __init__(self, *parents, **kargs): 176 | Metrics.__init__(self, *parents, *kargs) 177 | 178 | def init(self): 179 | self.real_positive = [] 180 | self.real_negative = [] 181 | 182 | def calculate(self): 183 | prob = self.parents[0].value 184 | groundtruth = self.parents[1].value 185 | 186 | if groundtruth[0, 0] == 1: 187 | self.real_positive.append(prob) 188 | else: 189 | self.real_negative.append(prob) 190 | self.total_area = len(self.real_positive) * len(self.real_negative) 191 | 192 | def auc_area(self): 193 | count = 0 194 | # Visit m x n sample pairs, calculate the number of positive probability greater than negative probability 195 | for real_p in self.real_positive: 196 | for real_n in self.real_negative: 197 | if real_p > real_n: 198 | count += 1 199 | self.value = float(count) / self.total_area 200 | 201 | 202 | class F1Score(Metrics): 203 | """ 204 | F1score = 2 * (precision * recall) / (precision + recall) 205 | """ 206 | def __init__(self, *parents, **kargs): 207 | Metrics.__init__(self, *parents, **kargs) 208 | self.true_positive = 0 209 | self.pred_positive = 0 210 | self.real_positive = 0 211 | 212 | def calculate(self): 213 | assert self.parents[0].value.shape[1] == 1 214 | pred = Metrics.prob2label(self.parents[0].value) 215 | groundtruth = self.parents[1].value 216 | 217 | self.pred_positive += np.sum(pred) 218 | self.real_positive += np.sum(groundtruth) 219 | self.true_positive += np.multiply(pred, groundtruth).sum() 220 | self.value = 0 221 | precision = 0 222 | recall = 0 223 | 224 | if self.pred_positive > 0: 225 | precision = float(self.true_positive) / self.pred_positive 226 | 227 | if self.real_positive > 0: 228 | precision = float(self.true_positive) / self.real_positive 229 | 230 | if precision + recall > 0: 231 | self.value = 2 * (np.multiply(precision, recall)) / (precision + recall) -------------------------------------------------------------------------------- /tinyframework/ops/ops.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from ..core import Node 4 | import numpy as np 5 | 6 | 7 | class Operator(Node): 8 | """ 9 | Node abstract operator 10 | """ 11 | pass 12 | 13 | 14 | class Add(Operator): 15 | """ 16 | matrix add 17 | """ 18 | def calculate(self): 19 | self.value = np.mat(np.zeros(self.parents[0].shape())) 20 | for parent in self.parents: 21 | self.value += parent.value 22 | 23 | def get_jacobi(self, parent): 24 | # + 节点对任意父节点的jacobi矩阵是单位矩阵 25 | return np.mat(np.eye(self.dimention())) 26 | 27 | 28 | def fill_diagonal(dst, filler): 29 | """ 30 | fill filler into dst's diagonal, filler may be a matrix 31 | eg: use [[1,2],[3,4]] fill a mat[4*4]:0 32 | [[1, 2, 0, 0], 33 | [3, 4, 0, 0], 34 | [0, 0, 1, 2], 35 | [0, 0, 3, 4]]) 36 | """ 37 | assert (dst.shape[0] / filler.shape[0]) == (dst.shape[1] / filler.shape[1]) 38 | n = int(dst.shape[0]/filler.shape[0]) 39 | 40 | row, col = filler.shape 41 | for i in range(n): 42 | dst[i*row:(i+1)*row, i*col:(i+1)*col] = filler 43 | 44 | return dst 45 | 46 | 47 | class MatMul(Operator): 48 | """ 49 | matrix multiplicity 50 | """ 51 | def calculate(self): 52 | # A:m*k B:k*n only and only 2 ops 53 | assert len(self.parents) == 2 and self.parents[0].shape()[1] == self.parents[1].shape()[0] 54 | self.value = self.parents[0].value * self.parents[1].value 55 | 56 | def get_jacobi(self, parent): 57 | """ 58 | multi_ops partial 59 | C = A * B A:m*n B:n*k, therefore, C:m*k 60 | 数学上可以推出,C对A的Jacobi:m*k × m*n维度的广义对角阵 61 | C对B的Jacobi:m*k × n*K维度的广义对角阵 62 | 推导比较繁琐,基本思路是C、A、B按照行展开写成向量形式 63 | """ 64 | zeros = np.mat(np.zeros((self.dimention(), parent.dimention()))) 65 | if parent is self.parents[0]: 66 | return fill_diagonal(zeros, self.parents[1].value.T) 67 | else: 68 | jacobi = fill_diagonal(zeros, self.parents[0].value) 69 | row_sort = np.arange(self.dimention()).reshape(self.shape()[::-1]).T.ravel() # flatten返回拉平的副本 70 | col_sort = np.arange(parent.dimention()).reshape(parent.shape()[::-1]).T.ravel() 71 | return jacobi[row_sort, :][:, col_sort] 72 | 73 | 74 | class Logistic(Operator): 75 | """ 76 | 1.0 / (1.0 + exp(-x)) 77 | """ 78 | def calculate(self): 79 | x = self.parents[0].value 80 | self.value = np.mat(1.0 / (1.0 + np.power(np.e, np.where(-x > 1e2, 1e2, -x)))) # avoid overflow 81 | 82 | def get_jacobi(self, parent): 83 | return np.diag(np.mat(np.multiply(self.value, 1-self.value)).A1) 84 | 85 | 86 | class ReLU(Operator): 87 | """ 88 | Return max(eta*X,X) leakRelu 89 | """ 90 | slop = 0.2 91 | 92 | def calculate(self): 93 | self.value = np.mat(np.where( 94 | self.parents[0].value > 0.0, 95 | self.parents[0].value, 96 | self.slop * self.parents[0].value) 97 | ) 98 | 99 | def get_jacobi(self, parent): 100 | return np.diag(np.where(self.parents[0].value.A1 > 0.0, 1.0, self.slop)) 101 | 102 | 103 | class SoftMax(Operator): 104 | """ 105 | 106 | """ 107 | @staticmethod 108 | def softmax(x): 109 | # avoid overflow 110 | x[x > 1e2] = 1e2 111 | ex = np.power(np.e, x) 112 | return ex / np.sum(ex) 113 | 114 | def calculate(self): 115 | self.value = SoftMax.softmax(self.parents[0].value) 116 | 117 | def get_jacobi(self, parent): 118 | """ 119 | CrossEntropy 120 | """ 121 | raise NotImplementedError("we not use softmax's node jacobi") 122 | 123 | 124 | class Reshape(Operator): 125 | """ 126 | reshape parent 127 | """ 128 | def __init__(self, *parent, **kargs): 129 | Operator.__init__(self, *parent, **kargs) 130 | self.to_shape = kargs.get('shape') 131 | assert isinstance(self.to_shape, tuple) and len(self.to_shape) == 2 132 | 133 | def calculate(self): 134 | self.value = self.parents[0].value.reshape(self.to_shape) 135 | 136 | def get_jacobi(self, parent): 137 | assert parent is self.parents[0] 138 | # 数学可以证明,reshape 的Jacobi是一个对角阵,reshape操作也是乘以一个对角阵 139 | return np.mat(np.eye(self.dimention())) 140 | 141 | 142 | class Multiply(Operator): 143 | """ 144 | matrix element wise multiplicity 145 | """ 146 | def calculate(self): 147 | self.value = np.multiply(self.parents[0].value, self.parents[1].value) 148 | 149 | def get_jacobi(self, parent): 150 | if parent is self.parents[0]: 151 | return np.diag(self.parents[1].value.A1) 152 | else: 153 | return np.diag(self.parents[0].value.A1) 154 | 155 | 156 | class Conv(Operator): 157 | """ 158 | feature_map: parents[0] filter: parents[1] 159 | """ 160 | def __init__(self, *parents, **kargs): 161 | assert len(parents) == 2 162 | Operator.__init__(self, *parents, **kargs) 163 | self.padded = None 164 | 165 | def calculate(self): 166 | data = self.parents[0].value # feature 167 | kernel = self.parents[1].value 168 | 169 | w, h = data.shape # feature shape 170 | kw, kh = kernel.shape # kernel shape 171 | _kw, _kh = int(kw/2), int(kh/2) # half of kernel shape 172 | 173 | # padding 174 | pw, ph = tuple(np.add(data.shape, np.multiply((_kw, _kh), 2))) # shape after padding 175 | self.padded = np.mat(np.zeros((pw, ph))) 176 | self.padded[_kw:_kw+w, _kh:_kh+h] = data 177 | self.value = np.mat(np.zeros(w, h)) 178 | 179 | # 2d convolution 180 | for i in np.arange(_kw, _kw+w): 181 | for j in np.arange(_kh, _kh+h): 182 | self.value[i-_kw, j-_kh] = np.sum( 183 | np.multiply(self.padded[i - _kw:i - _kw + kw, j - _kh:j - _kh + kh], kernel)) 184 | 185 | def get_jacobi(self, parent): 186 | data = self.parents[0].value # feature 187 | kernel = self.parents[1].value 188 | 189 | w, h = data.shape 190 | kw, kh = kernel.shape 191 | _kw, _kh = int(kw/2), int(kh/2) 192 | 193 | # padding 194 | pw, ph = tuple(np.add(data.shape, np.multiply((_kw, _kh), 2))) # shape after padding 195 | 196 | jacobi = [] 197 | if parent is self.parents[0]: 198 | for i in np.arange(_kw, _kw+w): 199 | for j in np.arange(_kh, _kh+h): 200 | mask = np.mat(np.zeros((pw, ph))) 201 | mask[i-_kw: i-_kw+kw, j-_kh: j-_kh+kh] = kernel 202 | jacobi.append(mask[_kw:_kw+w, _kh:_kh+h].A1) 203 | elif parent is self.parents[1]: 204 | for i in np.arange(_kw, _kw+w): 205 | for j in np.arange(_kh, _kh+h): 206 | jacobi.append( 207 | self.padded[i-_kw:i-_kw+kw, j-_kh:j-_kh+kh]) 208 | else: 209 | raise Exception("not current node's parent") 210 | 211 | return np.mat(jacobi) 212 | 213 | 214 | class MaxPooling(Operator): 215 | """ 216 | maxpooling 217 | """ 218 | def __init__(self, *parent, **kargs): 219 | Operator.__init__(self, *parent, **kargs) 220 | self.stride = kargs.get('stride') 221 | assert self.stride is not None 222 | self.stride = tuple(self.stride) 223 | assert isinstance(self.stride, tuple) and len(self.stride) == 2 224 | 225 | self.size = kargs.get('size') 226 | assert self.size is not None 227 | self.size = tuple(self.size) 228 | assert isinstance(self.size, tuple) and len(self.size) == 2 229 | 230 | self.flag = None 231 | 232 | def calculate(self): 233 | data = self.parents[0].value # feature map 234 | w, h = data.shape 235 | dim = w * h 236 | sw, sh = self.stride 237 | kw, kh = self.size # pooling kernel size 238 | _kw, _kh = int(kw/2), int(kh/2) # 1/2 of kernel size 239 | 240 | result = [] 241 | flag = [] 242 | 243 | for i in np.arange(0, w, sw): 244 | row = [] 245 | for j in np.arange(0, h, sh): 246 | # get max_vale in pooling window 247 | top, bottom = max(0, i-_kw), min(w, i+_kw+1) 248 | left, right = max(0, j-_kh), min(h, j+_kh+1) 249 | window = data[top:bottom, left:right] 250 | row.append(np.max(window)) 251 | 252 | # record max value position for bp 253 | pos = np.argmax(window) 254 | w_width = right - left 255 | offset_w, offset_h = top+pos//w_width, left+pos % w_width 256 | offset = offset_w * w + offset_h 257 | temp = np.zeros(dim) 258 | temp[offset] = 1 259 | flag.append(temp) 260 | result.append(row) 261 | 262 | self.flag = np.mat(flag) 263 | self.value = np.mat(result) 264 | 265 | def get_jacobi(self, parent): 266 | assert parent is self.parents[0] and self.jacobi is not None 267 | return self.flag 268 | 269 | 270 | class Concat(Operator): 271 | """ 272 | concatenate parents node 273 | """ 274 | def calculate(self): 275 | assert len(self.parents) > 0 276 | self.value = np.concatenate([p.value.flatten() for p in self.parents], axis=1).T 277 | 278 | def get_jacobi(self, parent): 279 | assert parent in self.parents 280 | 281 | dims = [p.dimention() for p in self.parents] 282 | pos = self.parents.index(parent) # index of parents 283 | dim = parent.dimention() # cur parent node elements number 284 | 285 | assert dim == dims[pos] 286 | 287 | jacobi = np.mat(np.zeros((self.dimention(), dim))) 288 | start_row = int(np.sum(dims[:pos])) 289 | jacobi[start_row:start_row+dim, 0:dim] = np.eye(dim) 290 | 291 | return jacobi 292 | 293 | 294 | class ScalarMultiply(Operator): 295 | """ 296 | Scalar * matrix 297 | """ 298 | def calculate(self): 299 | assert self.parents[0].shape() == (1, 1) # parent[0] : scalar 300 | self.value = np.multiply(self.parents[0].value, self.parents[1].value) 301 | 302 | def get_jacobi(self, parent): 303 | assert parent in self.parents 304 | if parent is self.parents[0]: 305 | return self.parents[1].value.platten().T 306 | else: 307 | return np.mat(np.eye(self.parents[1].dimention())) * self.parents[0].value[0, 0] 308 | 309 | 310 | class Step(Operator): 311 | """ 312 | step function 313 | """ 314 | def calculate(self): 315 | self.value = np.mat(np.where(self.parents[0].value >= 0.0, 1.0, 0.0)) 316 | 317 | def get_jacobi(self, parent): 318 | np.mat(np.eye(self.dimention())) 319 | return np.zeros(np.where(self.parents[0].value.A1 > 0.0, 0.0, -1.0)) -------------------------------------------------------------------------------- /tinyframework/optimizer/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | from .optimizer import * -------------------------------------------------------------------------------- /tinyframework/optimizer/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Harrison-Ho/tinyframework/3a8f79dc6d34510b616bc3e20688e4078e6a5702/tinyframework/optimizer/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /tinyframework/optimizer/__pycache__/optimizer.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Harrison-Ho/tinyframework/3a8f79dc6d34510b616bc3e20688e4078e6a5702/tinyframework/optimizer/__pycache__/optimizer.cpython-37.pyc -------------------------------------------------------------------------------- /tinyframework/optimizer/optimizer.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import abc 4 | import numpy as np 5 | from ..core import Node, Variable, get_node_from_graph 6 | from ..core.graph import Graph 7 | 8 | 9 | class Optimizer(object): 10 | """ 11 | Optimizer base class 12 | steps: 13 | 1. 调用 loss的forward方法 14 | 2. 对所有可训练节点调用backward方法得到loss对该节点的jacobi 15 | 3. 做可训练节点value(参数)更新,更新完清除,返回1 16 | """ 17 | def __init__(self, graph, target, learning_rate=0.005): 18 | """ 19 | optimizer init receive a calculation graph object, target node and learning rate 20 | """ 21 | assert isinstance(target, Node) and isinstance(graph, Graph) 22 | self.graph = graph 23 | self.target = target 24 | self.learning_rate = learning_rate 25 | 26 | self.acc_gradient = dict() 27 | self.acc_no = 0 28 | 29 | def one_step(self): 30 | """ 31 | calculate and accumulate(累加) the samples gradient of batch 32 | include a fp and bp 33 | """ 34 | self.forward_backward() 35 | self.acc_no += 1 36 | 37 | def get_gradient(self, node): 38 | """ 39 | return mean gradient for node 40 | """ 41 | assert node in self.acc_gradient 42 | return self.acc_gradient[node] / self.acc_no 43 | 44 | @abc.abstractmethod 45 | def _update(self): 46 | """ 47 | implement in the subClass, GD, RMSProp,Momentum,Adam... 48 | """ 49 | 50 | def forward_backward(self): 51 | """ 52 | for every sample,bp to calculate predict, then fp calculate jacobi(gradient) 53 | """ 54 | self.graph.clear_jacobi() 55 | # bp 56 | self.target.forward() 57 | # fp 58 | for node in self.graph.nodes: 59 | if isinstance(node, Variable) and node.trainable: 60 | node.backward(self.target) 61 | # target 若为标量,target对节点的jacobi是行向量,将其转置后成为列向量即为梯度向量 62 | # 将梯度向量reshape成node形状,便于节点值更新 63 | gradient = node.jacobi.T.reshape(node.shape()) 64 | if node not in self.acc_gradient: 65 | self.acc_gradient[node] = gradient 66 | else: 67 | self.acc_gradient[node] += gradient 68 | 69 | def apply_gradients(self, node_gradients_dict, summarize=False, acc_no=None): 70 | """ 71 | compatible with gradient collection in distributed training 72 | """ 73 | for node, gradient in node_gradients_dict.items(): 74 | if isinstance(node, Node): 75 | pass 76 | else: 77 | target_node = get_node_from_graph(node) 78 | assert target_node is not None 79 | assert self.acc_gradient[target_node].shape == gradient.shape 80 | if summarize: 81 | self.acc_gradient[target_node] += gradient 82 | else: 83 | self.acc_gradient[target_node] = gradient 84 | 85 | if summarize: 86 | self.acc_no += acc_no # for distribute training 87 | else: 88 | if acc_no is None: 89 | # 若输入的是平均梯度,则令acc_no=1,避免梯度更新时再次平均 90 | self.acc_no = 1 91 | else: 92 | self.acc_no = acc_no 93 | 94 | def update(self, var_gradients=None): 95 | if var_gradients is not None: 96 | self.apply_gradients(var_gradients) # used for distribute calculate 97 | self._update() 98 | # clear accumulate gradient dict and acc_num 99 | self.acc_gradient.clear() 100 | self.acc_no = 0 101 | 102 | 103 | class GradientDescent(Optimizer): 104 | """ 105 | gradient descent optimizer 106 | """ 107 | def __init__(self, graph, target, learning_rate=0.005): 108 | Optimizer.__init__(self, graph, target) 109 | self.learning_rate = learning_rate 110 | 111 | def _update(self): 112 | for node in self.graph.nodes: 113 | if isinstance(node, Variable) and node.trainable: 114 | # get every node's mean gradient 115 | gradient = self.get_gradient(node) 116 | node.set_value(node.value - self.learning_rate * gradient) 117 | 118 | 119 | class Momentum(Optimizer): 120 | """ 121 | momentum optimizer 122 | 动量法包括了速度更新和权重更新两步 123 | gradient = ▽f(w) 124 | V = momentum * V - lr * gradient 125 | w = w + v 126 | """ 127 | def __init__(self, graph, target, learning_rate=0.01, momentum=0.9): 128 | Optimizer.__init__(self, graph, target) 129 | self.learning_rate = learning_rate 130 | self.momentum = momentum # 衰减系数默认设置为0.9 131 | self.V = dict() # record history velocity 132 | 133 | def _update(self): 134 | for node in self.graph.nodes: 135 | if isinstance(node, Variable) and node.trainable: 136 | # get the node's mean gradient 137 | gradient = self.get_gradient(node) 138 | if node not in self.V: 139 | self.V[node] = gradient 140 | else: 141 | self.V[node] = self.momentum * self.V[node] - self.learning_rate * gradient 142 | 143 | node.set_value(node.value + self.V[node]) 144 | 145 | 146 | class AdaGrad(Optimizer): 147 | """ 148 | AdaGrad Optimizer 149 | 变学习率的优化方法,历史梯度大,调小学习率,反之亦然 150 | gradient = ▽f(w) 151 | s = s + gradient * gradient 152 | w = w - learning_rate/sqrt(s+小值)* gradient 小值防止一开始梯度为0导致分子为0 153 | """ 154 | def __init__(self, graph, target, learning_rate=0.01): 155 | Optimizer.__init__(self, graph, target) 156 | self.learning_rate = learning_rate 157 | self.s = dict() 158 | 159 | def _update(self): 160 | for node in self.graph.nodes: 161 | if isinstance(node, Variable) and node.trainable: 162 | # get the node's mean gradient 163 | gradient = self.get_gradient(node) 164 | # accumulate the square of gradient 165 | if node not in self.s: 166 | self.s[node] = np.power(gradient, 2) 167 | else: 168 | self.s[node] += np.power(gradient, 2) 169 | 170 | # update weight 171 | node.set_value(node.value - self.learning_rate * gradient / (np.sqrt(self.s[node] + 1e-10))) 172 | 173 | 174 | class RMSProp(Optimizer): 175 | """ 176 | Root Mean Square Propagation 177 | AdaGrad对历史梯度的平方做了累计,此法不妥,应尽可能考虑近期梯度,更远一些的给与衰减 178 | gradient = ▽f(w) 179 | s = β * s + (1 - β)*gradient * gradient 180 | w = w - learning_rate* gradient / sqrt(s+小值) 小值防止一开始梯度为0导致分子为0 181 | """ 182 | def __init__(self, graph, target, learning_rate=0.01, beta=0.9): 183 | Optimizer.__init__(self, graph, target) 184 | self.learning_rate = learning_rate 185 | assert 0.0 < beta < 1.0 186 | self.beta = beta 187 | self.s = dict() 188 | 189 | def _update(self): 190 | for node in self.graph.nodes: 191 | if isinstance(node, Variable) and node.trainable: 192 | # get the node's mean gradient 193 | gradient = self.get_gradient(node) 194 | 195 | if node not in self.s: 196 | self.s[node] = np.power(gradient, 2) 197 | else: 198 | self.s[node] = self.beta * self.s[node] + (1 - self.beta) * np.power(gradient, 2) 199 | 200 | node.set_value(node.value - self.learning_rate * gradient / np.sqrt(self.s[node] + 1e-10)) 201 | 202 | 203 | class Adam(Optimizer): 204 | """ 205 | Adaptive Momentum Estimation Optimizer 206 | Momentum 累计了历史梯度,AdaGrad是变学习率,RMSProp累计了历史梯度的平方变化学习率 207 | Adam结合了Momentum和RMSProp,同时利用嘞V和S,两个超参β_v 和 β_s 208 | 209 | gradient = ▽f(w) 210 | V = β_v * V - (1 - β_v) * gradient 211 | s = β_s * s + (1 - β_s) * gradient * gradient 212 | w = w - leaning_rate * V / sqrt(s+小值) 小值防止一开始梯度为0导致分子为0 213 | """ 214 | def __init__(self, graph, target, learning_rate=0.01, beta_v=0.9, beta_s=0.95): 215 | Optimizer.__init__(self, graph, target) 216 | self.learning_rate = learning_rate 217 | assert 0.0 < beta_v < 1.0 218 | self.beta_v = beta_v 219 | assert 0.0 < beta_s < 1.0 220 | self.beta_s = beta_s 221 | self.v = dict() 222 | self.s = dict() 223 | 224 | def _update(self): 225 | for node in self.graph.nodes: 226 | if isinstance(node, Variable) and node.trainable: 227 | # get the node's mean gradient 228 | gradient = self.get_gradient(node) 229 | 230 | if node not in self.v: 231 | self.v[node] = gradient 232 | self.s[node] = np.power(gradient, 2) 233 | else: 234 | self.v[node] = self.beta_v * self.v[node] + (1 - self.beta_v) * gradient 235 | self.s[node] = self.beta_s * self.s[node] + (1 - self.beta_s) * np.power(gradient, 2) 236 | 237 | node.set_value(node.value - self.learning_rate * self.v[node] / np.sqrt(self.s[node] + 1e-10)) -------------------------------------------------------------------------------- /tinyframework/tinyframework.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | 3 | from .import core 4 | from .import layer 5 | from .import ops 6 | from .import optimizer 7 | from .import utils 8 | from .import trainer 9 | 10 | default_graph = core.default_graph 11 | get_node_from_graph = core.get_node_from_graph 12 | name_scope = core.NameScope 13 | Variable = core.Variable 14 | -------------------------------------------------------------------------------- /tinyframework/trainer/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | 3 | 4 | from .trainer import * 5 | from .saver import * 6 | from .local_trainer import * -------------------------------------------------------------------------------- /tinyframework/trainer/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Harrison-Ho/tinyframework/3a8f79dc6d34510b616bc3e20688e4078e6a5702/tinyframework/trainer/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /tinyframework/trainer/__pycache__/local_trainer.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Harrison-Ho/tinyframework/3a8f79dc6d34510b616bc3e20688e4078e6a5702/tinyframework/trainer/__pycache__/local_trainer.cpython-37.pyc -------------------------------------------------------------------------------- /tinyframework/trainer/__pycache__/saver.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Harrison-Ho/tinyframework/3a8f79dc6d34510b616bc3e20688e4078e6a5702/tinyframework/trainer/__pycache__/saver.cpython-37.pyc -------------------------------------------------------------------------------- /tinyframework/trainer/__pycache__/trainer.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Harrison-Ho/tinyframework/3a8f79dc6d34510b616bc3e20688e4078e6a5702/tinyframework/trainer/__pycache__/trainer.cpython-37.pyc -------------------------------------------------------------------------------- /tinyframework/trainer/local_trainer.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | 3 | from .trainer import Trainer 4 | 5 | 6 | class LocalTrainer(Trainer): 7 | """ 8 | 9 | """ 10 | def __init__(self, *args, **kargs): 11 | Trainer.__init__(self, *args, **kargs) 12 | 13 | def _variable_weights_init(self): 14 | pass 15 | 16 | def _optimizer_update(self): 17 | self.optimzer.update() 18 | -------------------------------------------------------------------------------- /tinyframework/trainer/saver.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | 3 | import datetime 4 | import json 5 | import os 6 | 7 | from ..core.core import get_node_from_graph 8 | from ..core import Variable 9 | from ..core.core import default_graph 10 | from ..ops.metrics import * 11 | from ..utils import ClassMining 12 | 13 | 14 | class Saver(object): 15 | """ 16 | save and load tools for model, graph 17 | include struct of graph and params 18 | """ 19 | def __init__(self, root_dir=''): 20 | self.root_dir = root_dir 21 | if not os.path.exists(self.root_dir): 22 | os.makedirs(self.root_dir) 23 | 24 | def save(self, graph=None, meta=None, service_signature=None, 25 | model_file_name='model.json', weights_file_name='weights.npz'): 26 | if graph is None: 27 | graph = default_graph 28 | 29 | # meta records model save time and node_value filename 30 | meta = {} if meta is None else meta 31 | meta['save_time'] = str(datetime.datetime.now()) 32 | meta['weights_file_name'] = weights_file_name 33 | 34 | # description for interface service 35 | service = {} if service_signature is None else service_signature 36 | 37 | self._save_model_and_weights(graph, meta, service, model_file_name, weights_file_name) 38 | 39 | def _save_model_and_weights(self, graph, meta, service, model_file_name, weights_file_name): 40 | model_json = { 41 | 'meta': meta, 42 | 'service': service 43 | } 44 | graph_json = [] # record node_json 45 | weights_dict = dict() 46 | 47 | # save node meta_info as dict/json 48 | for node in graph.nodes: 49 | if not node.need_save: 50 | continue 51 | node.kargs.pop('name', None) 52 | node_json = { 53 | 'node_type': node.__class__.__name__, 54 | 'name': node.name, 55 | 'parents': [parent.name for parent in node.parents], 56 | 'children': [child.name for child in node.children], 57 | 'kargs': node.kargs 58 | } 59 | 60 | if node.value is not None: 61 | if isinstance(node.value, np.matrix): 62 | node_json['dim'] = node.value.shape 63 | 64 | graph_json.append(node_json) 65 | 66 | if isinstance(node, Variable): 67 | weights_dict[node.name] = node.value 68 | 69 | model_json['graph'] = graph_json 70 | 71 | model_file_path = os.path.join(self.root_dir, model_file_name) 72 | with open(model_file_path, 'w') as model_file: 73 | json.dump(model_json, model_file, indent=4) 74 | print("Save model into file: {}".format(model_file.name)) 75 | 76 | weights_file_path = os.path.join(self.root_dir, weights_file_name) 77 | with open(weights_file_path, 'wb') as weights_file: 78 | np.savez(weights_file, **weights_dict) 79 | print("Save weights to file: {}".format(weights_file.name)) 80 | 81 | @staticmethod 82 | def create_node(graph, from_model_json, node_json): 83 | """ 84 | Recursively create nodes who do not exist 85 | """ 86 | node_type = node_json['node_type'] 87 | node_name = node_json['name'] 88 | parents_name = node_json['parents'] 89 | dim = node_json.get('dim', None) 90 | kargs = node_json.get('kargs', None) 91 | 92 | parents = [] 93 | for parent_name in parents_name: 94 | parent_node = get_node_from_graph(parent_name, graph=graph) 95 | if parent_node is None: 96 | parent_node_json = None 97 | for node in from_model_json: 98 | if node['name'] == parent_name: 99 | parent_node_json = node 100 | assert parent_node_json is not None 101 | # 如果父节点不存在,递归调用 102 | parent_node = Saver.create_node( 103 | graph, from_model_json, parent_node_json) 104 | 105 | parents.append(parent_node) 106 | 107 | # Create node instances by reflex 108 | if node_type == 'Variable': 109 | assert dim is not None 110 | 111 | dim = tuple(dim) 112 | return ClassMining.get_instance_by_subclass_name(Node, node_type)(*parents, dim=dim, name=node_name, **kargs) 113 | else: 114 | return ClassMining.get_instance_by_subclass_name(Node, node_type)(*parents, name=node_name, **kargs) 115 | 116 | def _restore_nodes(self, graph, from_model_json, from_weights_dict): 117 | for i in range(len(from_model_json)): 118 | node_json = from_model_json[i] 119 | node_name = node_json['name'] 120 | 121 | weights = None 122 | if node_name in from_weights_dict: 123 | weights = from_weights_dict[node_name] 124 | 125 | # whether cur node has been created, if exist update weight else create is 126 | target_node = get_node_from_graph(node_name, graph=graph) 127 | if target_node is None: 128 | print("Target node {} of Type {} not exist, try to create the instance".format(node_json['name'], node_json['node_type'])) 129 | target_node = Saver.create_node(graph, from_model_json, node_json) 130 | 131 | target_node.value = weights 132 | 133 | def load(self, to_graph=None, model_file_name='model.json', weights_file_name='weights.npz'): 134 | """ 135 | Read and restore the structure of the calculation graph and the corresponding values from the file 136 | """ 137 | if to_graph is None: 138 | to_graph = default_graph 139 | 140 | model_json = {} 141 | graph_json = [] 142 | weights_dict = dict() 143 | 144 | model_file_path = os.path.join(self.root_dir, model_file_name) 145 | with open(model_file_path, 'r') as model_file: 146 | model_json = json.load(model_file) 147 | 148 | weights_file_path = os.path.join(self.root_dir, weights_file_name) 149 | with open(weights_file_path, 'rb') as weights_file: 150 | weights_npz_files = np.load(weights_file) 151 | for file_name in weights_npz_files.files: 152 | weights_dict[file_name] = weights_npz_files[file_name] 153 | weights_npz_files.close() 154 | 155 | graph_json = model_json['graph'] 156 | self._restore_nodes(to_graph, graph_json, weights_dict) 157 | print('Load and restore model from {} and {}'.format(model_file_path, weights_file_path)) 158 | 159 | self.meta = model_json.get('meta', None) 160 | self.service = model_json.get('service', None) 161 | return self.meta, self.service 162 | -------------------------------------------------------------------------------- /tinyframework/trainer/trainer.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | 3 | import abc 4 | from ..core import Variable, default_graph 5 | import numpy as np 6 | 7 | 8 | class Trainer(object): 9 | """ 10 | 11 | """ 12 | def __init__(self, input_x, input_y, loss_op, optimizer, epoches, batch_size=16, 13 | is_eval_on_train=False, metrics_ops=None, *args, **kargs): 14 | """ 15 | :param input_x: allow graph has multi_input, for correctly match node and sample 16 | here, data input as dict, key:node_name, value:sample_value 17 | :param input_y: the same as input_x 18 | :param loss_op: 19 | :param optimizer: 20 | :param epoches: 21 | :param batch_size: 22 | :param is_eval: 23 | :param metrics_ops: type:list, include acc, precision, recall ... one or more 24 | :param args: 25 | :param kargs: 26 | """ 27 | self.inputs = input_x 28 | self.input_y = input_y 29 | 30 | self.loss_op = loss_op 31 | self.optimizer = optimizer 32 | 33 | self.epochs = epoches 34 | self.epoch = 0 35 | self.batch_size = batch_size 36 | 37 | self.is_eval = is_eval_on_train 38 | self.metrics_ops = metrics_ops 39 | 40 | self.print_iteration_interval = kargs.get("print_iteration_interval", 100) 41 | 42 | def train_and_eval(self, train_x, train_y, test_x=None, test_y=None): 43 | """ 44 | start train(evaluate) 45 | """ 46 | assert len(train_x) == len(self.inputs) 47 | 48 | if test_x is not None and test_y is not None: 49 | assert len(test_x) == len(self.inputs) 50 | 51 | # init weights 52 | self._variable_weights_init() 53 | print('INIT Variable weight init finished') 54 | 55 | self.train_loop(train_x, train_y, test_x, test_y) 56 | 57 | def train_loop(self, train_x, train_y, test_x, test_y): 58 | """ 59 | for every epoch, start training 60 | """ 61 | for self.epoch in range(self.epochs): 62 | self.train(train_x, train_y) 63 | 64 | if self.is_eval and test_x is not None and test_y is not None: 65 | self.eval(test_x, test_y) 66 | 67 | def train(self, train_x, train_y): 68 | for i in range(len(list(train_x.values())[0])): 69 | # for every sample, execute fp and bp 70 | self.one_step(self._get_input_values(train_x, i), train_y[i]) 71 | if (i+1) % self.batch_size == 0: 72 | self._optimizer_update() 73 | 74 | def eval(self, test_x, test_y): 75 | """ 76 | :param test_x: 77 | :param test_y: 78 | :return: 79 | """ 80 | # for every metrics way 81 | for metrics_op in self.metrics_ops: 82 | metrics_op.reset_value() 83 | # for every simple 84 | for i in range(len(list(test_x.values())[0])): 85 | self.one_step(self._get_input_values(test_x, i), test_y[i], is_eval=True) 86 | for metrics_op in self.metrics_ops: 87 | metrics_op.forward() 88 | 89 | metrics_str = 'Epoch[{}] evaluation metrics'.format(self.epoch+1) 90 | for metrics_op in self.metrics_ops: 91 | metrics_str += metrics_op.value2str() 92 | print(metrics_str) 93 | 94 | 95 | 96 | def one_step(self, data_x, data_y, is_eval=False): 97 | """ 98 | execute one step on fp and bp(only on train) 99 | is_eval_step=True, parameters will not be updated 100 | """ 101 | # for inputs every node 102 | for i in range(len(self.inputs)): 103 | # find value from input dict by input node name 104 | input_value = data_x.get(self.inputs[i].name) 105 | self.inputs[i].set_value(np.mat(input_value).T) 106 | 107 | self.input_y.set_value(np.mat(data_y).T) 108 | # only on train stage, optimizer will execute 109 | if not is_eval: 110 | self.optimizer.one_step() 111 | 112 | def _get_input_values(self, x, index): 113 | """ 114 | :param x: dict class 115 | :param index: 116 | :return: 117 | """ 118 | input_values = dict() 119 | # for inputs every node 120 | for input_node_name in x.keys(): 121 | input_values[input_node_name] = x[input_node_name][index] 122 | return input_values 123 | 124 | @abc.abstractmethod 125 | def _variable_weights_init(self): 126 | """ 127 | implement in subclass 128 | """ 129 | raise NotImplementedError() 130 | 131 | @abc.abstractmethod 132 | def _optimizer_update(self): 133 | """ 134 | the way to update parameters, implement in sub class 135 | """ 136 | raise NotImplementedError() 137 | -------------------------------------------------------------------------------- /tinyframework/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | 3 | from .utils import * -------------------------------------------------------------------------------- /tinyframework/utils/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Harrison-Ho/tinyframework/3a8f79dc6d34510b616bc3e20688e4078e6a5702/tinyframework/utils/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /tinyframework/utils/__pycache__/draw.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Harrison-Ho/tinyframework/3a8f79dc6d34510b616bc3e20688e4078e6a5702/tinyframework/utils/__pycache__/draw.cpython-37.pyc -------------------------------------------------------------------------------- /tinyframework/utils/__pycache__/utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Harrison-Ho/tinyframework/3a8f79dc6d34510b616bc3e20688e4078e6a5702/tinyframework/utils/__pycache__/utils.cpython-37.pyc -------------------------------------------------------------------------------- /tinyframework/utils/draw.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | 3 | from pyecharts import options as ops 4 | from pyecharts.charts import Graph 5 | 6 | from ..core.graph import default_graph 7 | 8 | 9 | def draw_graph(filename=''): 10 | nodes_for_draw = [] 11 | edges_for_draw = [] 12 | for node in default_graph.nodes: 13 | nodes_for_draw.append({'name': node.name, "symbolSize": 50}) 14 | for node in default_graph.nodes: 15 | for child in node.children: 16 | edges_for_draw.append({'source':node.name, 'target': child.name}) 17 | graph = Graph(init_opts=ops.InitOpts(width='1800px', height='1000px')) 18 | graph.set_global_opts(title_opts=ops.TitleOpts(title="TinyFramework")) 19 | graph.add("", nodes_for_draw, edges_for_draw, layout='force', repulsion=8000, edge_symbol=['circle', 'arrow']) 20 | if filename == '': 21 | file_name = filename 22 | else: 23 | file_name = filename + '_' 24 | graph.render('./'+file_name+'graph.html') -------------------------------------------------------------------------------- /tinyframework/utils/utils.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | 3 | import gzip 4 | import os 5 | import numpy as np 6 | from urllib.request import urlretrieve 7 | 8 | 9 | def mnist(path=None): 10 | pass 11 | 12 | 13 | class ClassMining(object): 14 | """ 15 | 16 | """ 17 | 18 | @classmethod 19 | def get_subclass_list(cls, model): 20 | subclass_list = [] 21 | for subclass in model.__subclasses__(): 22 | subclass_list.append(subclass) 23 | subclass_list.extend(cls.get_subclass_list(subclass)) 24 | return subclass_list 25 | 26 | @classmethod 27 | def get_subclass_dict(cls, model): 28 | subclass_list = cls.get_subclass_list(model=model) 29 | return {k: k.__name__ for k in subclass_list} 30 | 31 | @classmethod 32 | def get_subclass_names(cls, model): 33 | subclass_list = cls.get_subclass_list(model=model) 34 | return [k.__name__ for k in subclass_list] 35 | 36 | @classmethod 37 | def get_instance_by_subclass_name(cls, model, name): 38 | for subclass in model.__subclasses__(): 39 | if subclass.__name__ == name: # 40 | return subclass 41 | instance = cls.get_instance_by_subclass_name(subclass, name) 42 | if instance: 43 | return instance --------------------------------------------------------------------------------