├── .gitignore ├── src ├── caffe │ ├── Makefile │ ├── util.h │ ├── net_file.txt │ ├── lenet.prototxt │ ├── syntactic_analysis.y │ ├── lexical_analysis.l │ ├── alexnet.prototxt │ └── util.c ├── tf │ ├── Makefile │ ├── util.h │ ├── try_vis.py │ ├── syntactic_analysis.y.old │ ├── mnist.py │ ├── alexnet.py │ ├── demo.tf │ ├── lexical_analysis.l │ ├── util.c │ └── syntactic_analysis.y └── draw_dot.py └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | *.yy.c 2 | *.out 3 | *.tab.h 4 | *.tab.c 5 | -------------------------------------------------------------------------------- /src/caffe/Makefile: -------------------------------------------------------------------------------- 1 | lexical_analysis:lexical_analysis.l util.h util.c syntactic_analysis.y 2 | bison -d syntactic_analysis.y 3 | flex lexical_analysis.l 4 | gcc syntactic_analysis.tab.c lex.yy.c util.c 5 | -------------------------------------------------------------------------------- /src/tf/Makefile: -------------------------------------------------------------------------------- 1 | lexical_analysis:lexical_analysis.l util.h util.c syntactic_analysis.y 2 | bison -d syntactic_analysis.y 3 | flex lexical_analysis.l 4 | gcc syntactic_analysis.tab.c lex.yy.c util.c 5 | cat demo.tf | ./a.out 6 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## What is NNCompiler 2 | NNCompiler is an interpreter which can translate different platforms' (tensorflow,caffe,etc.) Neural Network codes to a common intermediate representation without using platforms' engines. 3 | 4 | 5 | ## What is special for NNCompiler 6 | 7 | * Common intermediate representation for different platforms 8 | * Do not need platforms' engines 9 | * Only use the code itself 10 | * Easy to expand to a new platform 11 | * Robust for almost all kind's of platform 12 | 13 | -------------------------------------------------------------------------------- /src/tf/util.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Authro:HanRuobing 3 | * Created on :2018-2-11 4 | * Description : Define DAG's node 5 | */ 6 | extern char* yytext; 7 | void yyerror(char*s,...); //变长参数错误处理函数 8 | struct node 9 | { 10 | char* op_name; 11 | char* node_name; 12 | int input_cnt; 13 | struct node** input; 14 | char* attrs; 15 | int pid; 16 | }; 17 | struct node* new_node(char* node_op_name,int num,...); 18 | 19 | //char* get_name(char* raw_name);// to resolve name reuse problem 20 | 21 | struct node* get_node(char* variable_name); 22 | 23 | void add_node(char* node_name,struct node* p); 24 | 25 | char* concat_str(int num,...); 26 | -------------------------------------------------------------------------------- /src/caffe/util.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Authro:HanRuobing 3 | * Created on :2018-2-11 4 | * Description : Define DAG's node 5 | */ 6 | extern char* yytext; 7 | void yyerror(char*s,...); //变长参数错误处理函数 8 | struct node 9 | { 10 | char* op_name; 11 | char* node_name; 12 | int input_cnt; 13 | struct node** input; 14 | char* attrs; 15 | int pid; 16 | }; 17 | 18 | struct node* new_node(char* node_op_name,int num,...); 19 | 20 | //char* get_name(char* raw_name);// to resolve name reuse problem 21 | 22 | struct node* get_node(char* variable_name); 23 | 24 | void add_node(char* node_name,struct node* p); 25 | 26 | char* concat_str(int num,...); 27 | -------------------------------------------------------------------------------- /src/tf/try_vis.py: -------------------------------------------------------------------------------- 1 | import networkx as nx 2 | import matplotlib.pyplot as plt 3 | node_name = [] 4 | node_attr = [] 5 | node_op = [] 6 | G = nx.DiGraph() 7 | with open('net_file.txt','r') as f: 8 | node_num , edge_num = f.readline().split() 9 | print(node_num,edge_num) 10 | for idx in range(0,int(node_num)): 11 | node_name.append(f.readline().strip('\n')) 12 | node_op.append(f.readline().strip('\n')) 13 | node_attr.append(f.readline().strip('\n')) 14 | #print(node_attr[-1]) 15 | for _ in range(0,int(edge_num)): 16 | #print(f.readline().split()) 17 | l,r = f.readline().strip('\n').split() 18 | G.add_edge(node_op[int(l)]+":"+l,node_op[int(r)]+":"+r) 19 | G.add_node(node_op[int(l)]+":"+l,attr = node_attr[int(l)],name = node_name[int(l)]) 20 | G.add_node(node_op[int(r)]+":"+r,attr = node_attr[int(r)],name = node_name[int(r)]) 21 | #pos = nx.spectral_layout(G) 22 | nx.draw_spring(G, with_labels=True, node_size=200) 23 | plt.show() 24 | -------------------------------------------------------------------------------- /src/tf/syntactic_analysis.y.old: -------------------------------------------------------------------------------- 1 | /* 2 | *Author:HanRuobing 3 | *Created on:2018-02-09 4 | *Descroption:Semantic analysis for tensorflow. 5 | */ 6 | %{ 7 | #include 8 | %} 9 | %start Program 10 | %union{ 11 | char* str; 12 | struct node* p; 13 | } 14 | 15 | %token INTEGER FLOAT PLUS MINUS MULTIPLY DIV NOT LP RP LB RB DOT 16 | %token VARIABLE LIST ASSIGNOP MATMUL CONSTANT 17 | %type Program ExtDefList ExtDef ExtDecList Exp 18 | %type

FUNC NumericalOP 19 | %type NNFUNC 20 | 21 | /*priority*/ 22 | %right ASSIGNOP 23 | %left PLUS MINUS 24 | %left MULTIPLY DIV 25 | %right NOT 26 | %left LP RP LB RB DOT 27 | %% 28 | Program:{}|ExtDefList {}; 29 | ExtDefList:ExtDef ExtDefList {}| {}; 30 | ExtDef:Dec{}; 31 | Dec:VARIABLE ASSIGNOP Exp {} | VARIABLE ASSIGNOP Dec {}; 32 | NumericalOP:PLUS{}|MINUS{}|MULTIPLY{}|DIV{}; 33 | Exp:VARIABLE{}|Exp NumericalOP Exp{}|FUNC{}; 34 | NNFUNC:MATMUL{}|CONSTANT{}; 35 | FUNC:NNFUNC LP PARAMETERS RP{$$ = &(node($1,0));printf("%s\n",$1)}; 36 | NNFUNC: CONSTANT {$$ = "constant";}|MATMUL{$$ = "matmul";}; 37 | PARAMETERS: PARAMETER PARAMETERS{} | {}; 38 | PARAMETER: VARIABLE{}; 39 | -------------------------------------------------------------------------------- /src/draw_dot.py: -------------------------------------------------------------------------------- 1 | def main(): 2 | with open('net_file.txt') as f: 3 | msg = f.readline().split() 4 | node_num = msg[0] 5 | edge_num = msg[1] 6 | print(node_num,edge_num) 7 | node_name = [] 8 | node_op = [] 9 | node_attr = [] 10 | edge_s = [] 11 | edge_e = [] 12 | for idx in range(int(node_num)): 13 | node_name.append(f.readline().strip()) 14 | node_op.append(f.readline().strip()) 15 | node_attr.append(f.readline().strip()) 16 | 17 | for idx in range(int(edge_num)): 18 | s = f.readline().strip().split() 19 | edge_s.append(s[0]) 20 | edge_e.append(s[1]) 21 | print(s[0],s[1]) 22 | 23 | filename = 'sample.dot' 24 | with open(filename,'w') as f: 25 | f.write("digraph G{\nsize = \"5, 5\";\n") 26 | for idx in range(len(node_name)): 27 | f.write("{}[label=\"{}\",comment=\"{}\"];\n".format(node_name[idx],node_op[idx],node_attr[idx])) 28 | for idx in range(len(edge_s)): 29 | f.write("{}->{};\n".format(node_name[int(edge_s[idx])],node_name[int(edge_e[idx])])) 30 | f.write("}") 31 | if __name__ == '__main__': 32 | main() -------------------------------------------------------------------------------- /src/caffe/net_file.txt: -------------------------------------------------------------------------------- 1 | 24 23 2 | prob 3 | Softmax 4 | (null) 5 | fc8 6 | InnerProduct 7 | num_output:1000 8 | drop7 9 | Dropout 10 | dropout_ratio:0.5 11 | relu7 12 | ReLU 13 | (null) 14 | fc7 15 | InnerProduct 16 | num_output:4096 17 | drop6 18 | Dropout 19 | dropout_ratio:0.5 20 | relu6 21 | ReLU 22 | (null) 23 | fc6 24 | InnerProduct 25 | num_output:4096 26 | pool5 27 | Pooling 28 | pool:MAX,kernel_size:3,stride:2 29 | relu5 30 | ReLU 31 | (null) 32 | conv5 33 | Convolution 34 | num_output:256,pad:1,kernel_size:3,group:2 35 | relu4 36 | ReLU 37 | (null) 38 | conv4 39 | Convolution 40 | num_output:384,pad:1,kernel_size:3,group:2 41 | relu3 42 | ReLU 43 | (null) 44 | conv3 45 | Convolution 46 | num_output:384,pad:1,kernel_size:3 47 | norm2 48 | LRN 49 | local_size:5,alpha:0.0001,beta:0.75 50 | pool2 51 | Pooling 52 | pool:MAX,kernel_size:3,stride:2 53 | relu2 54 | ReLU 55 | (null) 56 | conv2 57 | Convolution 58 | num_output:256,pad:2,kernel_size:5,group:2 59 | norm1 60 | LRN 61 | local_size:5,alpha:0.0001,beta:0.75 62 | pool1 63 | Pooling 64 | pool:MAX,kernel_size:3,stride:2 65 | relu1 66 | ReLU 67 | (null) 68 | conv1 69 | Convolution 70 | num_output:96,kernel_size:11,stride:4 71 | data 72 | Input 73 | shape:10,3,227,227, 74 | 1 0 75 | 2 1 76 | 3 2 77 | 4 3 78 | 5 4 79 | 6 5 80 | 7 6 81 | 8 7 82 | 9 8 83 | 10 9 84 | 11 10 85 | 12 11 86 | 13 12 87 | 14 13 88 | 15 14 89 | 16 15 90 | 17 16 91 | 18 17 92 | 19 18 93 | 20 19 94 | 21 20 95 | 22 21 96 | 23 22 97 | -------------------------------------------------------------------------------- /src/caffe/lenet.prototxt: -------------------------------------------------------------------------------- 1 | name: "LeNet" 2 | layer { 3 | name: "data" 4 | type: "Input" 5 | top: "data" 6 | input_param { shape: { dim: 64 dim: 1 dim: 28 dim: 28 } } 7 | } 8 | layer { 9 | name: "ip1" 10 | type: "InnerProduct" 11 | bottom: "data" 12 | top: "ip1" 13 | param { 14 | lr_mult: 1 15 | } 16 | param { 17 | lr_mult: 2 18 | } 19 | inner_product_param { 20 | num_output: 500 21 | weight_filler { 22 | type: "xavier" 23 | } 24 | bias_filler { 25 | type: "constant" 26 | } 27 | } 28 | } 29 | layer { 30 | name: "relu1" 31 | type: "ReLU" 32 | bottom: "ip1" 33 | top: "ip1" 34 | } 35 | layer { 36 | name: "ip2" 37 | type: "InnerProduct" 38 | bottom: "ip1" 39 | top: "ip2" 40 | param { 41 | lr_mult: 1 42 | } 43 | param { 44 | lr_mult: 2 45 | } 46 | inner_product_param { 47 | num_output: 10 48 | weight_filler { 49 | type: "xavier" 50 | } 51 | bias_filler { 52 | type: "constant" 53 | } 54 | } 55 | } 56 | layer { 57 | name: "relu2" 58 | type: "ReLU" 59 | bottom: "ip2" 60 | top: "ip2" 61 | } 62 | layer { 63 | name: "ip3" 64 | type: "InnerProduct" 65 | bottom: "ip2" 66 | top: "ip3" 67 | param { 68 | lr_mult: 1 69 | } 70 | param { 71 | lr_mult: 2 72 | } 73 | inner_product_param { 74 | num_output: 10 75 | weight_filler { 76 | type: "xavier" 77 | } 78 | bias_filler { 79 | type: "constant" 80 | } 81 | } 82 | } 83 | layer { 84 | name: "prob" 85 | type: "Softmax" 86 | bottom: "ip3" 87 | top: "prob" 88 | } 89 | -------------------------------------------------------------------------------- /src/tf/mnist.py: -------------------------------------------------------------------------------- 1 | # The MNIST dataset has 10 classes, representing the digits 0 through 9. 2 | NUM_CLASSES = 10 3 | 4 | # The MNIST images are always 28x28 pixels. 5 | IMAGE_SIZE = 28 6 | IMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE 7 | hidden1_units = 50 8 | hidden2_units = 50 9 | images=tf.placeholder(tf.float32,[None]) 10 | labels=tf.placeholder(tf.float32,[None]) 11 | learning_rate=tf.placeholder(tf.float32) 12 | 13 | with tf.name_scope('hidden1'): 14 | weights1 = tf.Variable( 15 | tf.truncated_normal([IMAGE_PIXELS, hidden1_units], 16 | stddev=1.0), 17 | name='weights') 18 | biases1 = tf.Variable(tf.zeros([hidden1_units]), 19 | name='biases') 20 | hidden1 = tf.nn.relu(tf.matmul(images, weights1) + biases1) 21 | # Hidden 2 22 | with tf.name_scope('hidden2'): 23 | weights2 = tf.Variable( 24 | tf.truncated_normal([hidden1_units, hidden2_units], 25 | stddev=1.0), 26 | name='weights') 27 | biases2 = tf.Variable(tf.zeros([hidden2_units]), 28 | name='biases') 29 | hidden2 = tf.nn.relu(tf.matmul(hidden1, weights2) + biases2) 30 | # Linear 31 | with tf.name_scope('softmax_linear'): 32 | weights3 = tf.Variable( 33 | tf.truncated_normal([hidden2_units, NUM_CLASSES], 34 | stddev=1.0), 35 | name='weights') 36 | biases3 = tf.Variable(tf.zeros([NUM_CLASSES]), 37 | name='biases') 38 | logits = tf.matmul(hidden2, weights3) + biases3 39 | 40 | loss = tf.losses.sparse_softmax_cross_entropy(labels, logits) 41 | #loss = tf.nn.softmax_cross_entropy_with_logits(labels, logits) 42 | 43 | optimizer = tf.train.GradientDescentOptimizer(learning_rate) 44 | 45 | train_op = optimizer.minimize(loss) 46 | 47 | -------------------------------------------------------------------------------- /src/tf/alexnet.py: -------------------------------------------------------------------------------- 1 | learning_rate=0.001 2 | training_iters=200000 3 | batch_size=64 4 | display_step=20 5 | 6 | n_input=784 7 | n_classes=10 8 | 9 | x=tf.placeholder(tf.float32,[None,n_input]) 10 | y=tf.placeholder(tf.float32,[None,n_classes]) 11 | _dropout=tf.placeholder(tf.float32) 12 | 13 | _X=tf.reshape(x,shape=[-1,28,28,1]) 14 | conv1=tf.nn.relu(tf.nn.conv2d(_X,tf.Variable(tf.random_normal([3,3,1,64])),strides=[1,1,1,1],padding='SAME'),name='conv1') 15 | 16 | pool1=tf.nn.max_pool(conv1,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool1') 17 | norm1=tf.nn.lrn(pool1,4,bias=1.0,alpha=0.001/9,beta=0.75,name='norm1') 18 | drop1=tf.nn.dropout(norm1,_dropout) 19 | 20 | conv2=tf.nn.relu(tf.nn.conv2d(drop1,tf.Variable(tf.random_normal([3,3,64,128])),strides=[1,1,1,1],padding='SAME'),name='conv2') 21 | pool2=tf.nn.max_pool(conv2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool2') 22 | norm2=tf.nn.lrn(pool2,4,bias=1.0,alpha=0.001/9,beta=0.75,name='norm2') 23 | drop2=tf.nn.dropout(norm2,_dropout) 24 | 25 | conv3=tf.nn.relu(tf.nn.conv2d(drop2,tf.Variable(tf.random_normal([3,3,128,256])),strides=[1,1,1,1],padding='SAME'),name='conv3') 26 | pool3=tf.nn.max_pool(conv3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool3') 27 | norm3=tf.nn.lrn(pool3,4,bias=1.0,alpha=0.001/9,beta=0.75,name='norm3') 28 | drop3=tf.nn.dropout(norm3,_dropout) 29 | 30 | dense0=tf.reshape(drop3,shape=[-1,4*4*256]) 31 | dense1=tf.nn.relu(tf.matmul(dense0,tf.Variable(tf.random_normal([4*4*256,1024])),name='fc1')+tf.Variable(tf.random_normal([1024]))) 32 | dense2=tf.nn.relu(tf.matmul(dense1,tf.Variable(tf.random_normal([1024,1024])),name='fc2')+tf.Variable(tf.random_normal([1024]))) 33 | pred=tf.matmul(dense2,tf.Variable(tf.random_normal([1024,10])))+tf.Variable(tf.random_normal([n_classes])) 34 | 35 | cost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred,y)) 36 | 37 | #optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) 38 | 39 | #correct_pred=tf.equal(tf.argmax(pred,1),tf.argmax(y,1)) 40 | #accuracy=tf.reduce_mean(tf.cast(correct_pred,tf.float32)) 41 | -------------------------------------------------------------------------------- /src/tf/demo.tf: -------------------------------------------------------------------------------- 1 | learning_rate=0.001 2 | training_iters=200000 3 | batch_size=64 4 | display_step=20 5 | 6 | n_input=784 7 | n_classes=10 8 | 9 | x=tf.placeholder(tf.float32,[None,n_input]) 10 | y=tf.placeholder(tf.float32,[None,n_classes]) 11 | _dropout=tf.placeholder(tf.float32) 12 | 13 | _X=tf.reshape(x,shape=[-1,28,28,1]) 14 | 15 | conv1=tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(_X,tf.Variable(tf.random_normal([3,3,1,64])),strides=[1,1,1,1],padding='SAME'),tf.Variable(tf.random_normal([64]))),name='conv1') 16 | 17 | pool1=tf.nn.max_pool(conv1,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool1') 18 | norm1=tf.nn.lrn(pool1,depth_radius = 4,bias=1.0,alpha=0.001,beta=0.75,name='norm1') 19 | drop1=tf.nn.dropout(norm1,_dropout) 20 | 21 | conv2=tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(drop1,tf.Variable(tf.random_normal([3,3,64,128])),strides=[1,1,1,1],padding='SAME'),tf.Variable(tf.random_normal([128]))),name='conv2') 22 | pool2=tf.nn.max_pool(conv2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool2') 23 | norm2=tf.nn.lrn(pool2,depth_radius = 4,bias=1.0,alpha=0.001,beta=0.75,name='norm2') 24 | drop2=tf.nn.dropout(norm2,_dropout) 25 | 26 | conv3=tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(drop2,tf.Variable(tf.random_normal([3,3,128,256])),strides=[1,1,1,1],padding='SAME'),tf.Variable(tf.random_normal([256]))),name='conv3') 27 | pool3=tf.nn.max_pool(conv3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool3') 28 | norm3=tf.nn.lrn(pool3,depth_radius = 4,bias=1.0,alpha=0.001,beta=0.75,name='norm3') 29 | drop3=tf.nn.dropout(norm3,_dropout) 30 | 31 | dense1=tf.reshape(drop3,shape=[-1,4*4*256]) 32 | dense1=tf.nn.relu(tf.matmul(dense1,tf.Variable(tf.random_normal([4*4*256,1024]))+tf.Variable(tf.random_normal([1024])),name='fc1')) 33 | dense2=tf.nn.relu(tf.matmul(dense1,tf.Variable(tf.random_normal([1024,1024]))+tf.Variable(tf.random_normal([1024])),name='fc2')) 34 | pred=tf.matmul(dense2,tf.Variable(tf.random_normal([1024,10]))+tf.Variable(tf.random_normal([n_classes]))) 35 | 36 | cost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred,y)) 37 | 38 | optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) 39 | 40 | -------------------------------------------------------------------------------- /src/caffe/syntactic_analysis.y: -------------------------------------------------------------------------------- 1 | /* 2 | *Author:HanRuobing 3 | *Created on:2018-02-09 4 | *Descroption:Semantic analysis for caffe. 5 | */ 6 | %{ 7 | #include 8 | #include 9 | #include "util.h" 10 | struct node* tmp_node = NULL; 11 | %} 12 | %start Program 13 | %union{ 14 | char* str; 15 | struct node *p; 16 | } 17 | 18 | %token LP RP LB RB LC RC COMMA FLOAT WELL SPACE EOL BOOL INTEGER NONE STRING PLUS MINUS MUL DIV DOT SEMICOLON LAYER 19 | %token VARIABLE CONSTANT ASSIGNOP DTYPE NAME TOP BOTTOM TYPE PARAM SHAPE ATTR_NAME DIM POOL_ATTR 20 | %type Program ExtDef ExtDefList DIM_LIST 21 | %type Number NormalOP AttrDefs AttrDef LayerDef PARAM_ATTRS PARAM_ATTR PARAM_ATTR_VAL 22 | /*priority*/ 23 | %right NOT COMMA 24 | %left LP RP LB RB DOT 25 | %% 26 | 27 | Program:ExtDefList{$$ = "program";}; 28 | ExtDefList:ExtDef ExtDefList | {$$="ExtDefList";}; 29 | 30 | NormalOP: PLUS | MINUS | MUL |DIV {$$ = $1;}; 31 | Number: MINUS Number {$$=concat_str(2,"-",$2);}| 32 | FLOAT | INTEGER {$$=$1;} | Number NormalOP Number {$$ = concat_str(3,$1,$2,$3);}; 33 | 34 | ExtDef:NAME SEMICOLON STRING{$$ = $3;printf("get name define\n");}| 35 | LayerDef {$$ = $1;printf("tmp_node->node_name:%s\n",tmp_node->node_name);if(!strcmp(tmp_node->node_name,"prob")){printf("travel\n");travel_node(tmp_node);}}; 36 | LayerDef:LAYER LC {tmp_node = malloc(sizeof(struct node));tmp_node->pid = -1;tmp_node->input_cnt = 0;}AttrDefs RC{$$ = tmp_node->node_name;}; 37 | AttrDefs: |AttrDef AttrDefs {$$="attrs";}; 38 | AttrDef: 39 | NAME SEMICOLON STRING {tmp_node->node_name = $3;}| 40 | TYPE SEMICOLON STRING {tmp_node->op_name = $3;}| 41 | TOP SEMICOLON STRING {add_node($3,tmp_node);}| 42 | BOTTOM SEMICOLON STRING {printf("add input %s\n",$3);add_input(tmp_node,get_node($3));}| 43 | PARAM LC PARAM_ATTRS RC {if(!tmp_node->attrs)tmp_node->attrs = $3; 44 | else tmp_node->attrs = concat_str(3,tmp_node->attrs,",",$3);}; 45 | PARAM_ATTRS: PARAM_ATTR {$$=$1;} | PARAM_ATTR PARAM_ATTRS {$$ = concat_str(3,$1,",",$2);}; 46 | PARAM_ATTR: ATTR_NAME SEMICOLON LC DIM_LIST RC {$$ = concat_str(3,$1,":",$4);}| 47 | TYPE SEMICOLON STRING {$$=concat_str(3,$1,":",$3);}| 48 | ATTR_NAME SEMICOLON PARAM_ATTR_VAL {$$ = concat_str(3,$1,":",$3);}| 49 | ATTR_NAME LC PARAM_ATTRS RC {$$ = concat_str(3,$1,":",$3);}; 50 | PARAM_ATTR_VAL: 51 | Number {$$=$1;}| POOL_ATTR {$$=$1;}|STRING{$$=$1;}; 52 | DIM_LIST: {$$ = "";}| DIM SEMICOLON Number DIM_LIST {$$ = concat_str(3,$3,",",$4);}; 53 | -------------------------------------------------------------------------------- /src/caffe/lexical_analysis.l: -------------------------------------------------------------------------------- 1 | /* 2 | * Author:HanRuobing 3 | *Created on:2018-2-9 4 | *Description:lexical analysis for tensorflow's network construction 5 | */ 6 | %{ 7 | #include "stdio.h" 8 | #include "stdlib.h" 9 | #include "syntactic_analysis.tab.h" 10 | #include "util.h" 11 | %} 12 | type int|float 13 | POOL_ATTR MAX|MIN 14 | NONE None 15 | PLUS \+ 16 | MINUS - 17 | MUL \* 18 | DIV \/ 19 | INTEGER [1-9]+[0-9]*|0 20 | FLOAT [0-9]+\.[0-9]* 21 | BOOL True|False 22 | SPACE [ \t\r]* 23 | EOL [\n]* 24 | SEMI ; 25 | SEMICOLON : 26 | COMMA , 27 | WELL # 28 | ASSIGNOP = 29 | RELOP >|<|>=|<=|==|!= 30 | AND && 31 | OR \|\| 32 | DOT \. 33 | NOT ! 34 | LP \( 35 | RP \) 36 | LB \[ 37 | RB \] 38 | LC \{ 39 | RC \} 40 | PARAM [a-zA-Z_0-9]*param 41 | NAME name 42 | TYPE type 43 | TOP top 44 | BOTTOM bottom 45 | LAYER layer 46 | DIM dim 47 | ATTR_NAME [a-zA-Z_]* 48 | STRING [\'\"][a-zA-Z_0-9]*[\'\"] 49 | MASS [^\{\}]+ 50 | %% 51 | {ASSIGNOP} {return ASSIGNOP;} 52 | {EOL} {} 53 | {NAME} {return NAME;} 54 | {TOP} {return TOP;} 55 | {TYPE} {return TYPE;} 56 | {BOTTOM} {return BOTTOM;} 57 | {LAYER} {return LAYER;} 58 | {SEMICOLON} {return SEMICOLON;} 59 | {FLOAT} {printf("get float:%s\n",yytext); 60 | yylval.str = malloc(sizeof(char*) * strlen(yytext)); 61 | strcpy(yylval.str,yytext); 62 | return FLOAT;} 63 | {INTEGER} {printf("get integer:%s\n",yytext); 64 | yylval.str = malloc(sizeof(char*) * strlen(yytext)); 65 | strcpy(yylval.str,yytext); 66 | return INTEGER;} 67 | {POOL_ATTR} {printf("get pool direction:%s\n",yytext); 68 | yylval.str = malloc(sizeof(char*) * strlen(yytext)); 69 | strcpy(yylval.str,yytext); 70 | return POOL_ATTR;} 71 | True|False {printf("get bool:%s\n",yytext); 72 | yylval.str = malloc(sizeof(char*) * strlen(yytext)); 73 | strcpy(yylval.str,yytext); 74 | return BOOL;} 75 | {PLUS} {yylval.str = "+";return PLUS;} 76 | {MINUS} {yylval.str = "-";return MINUS;} 77 | {MUL} {yylval.str = "*";return MUL;} 78 | {DIV} {yylval.str = "//";return DIV;} 79 | {DOT} {return DOT;} 80 | {NONE} {yylval.str = "None";return NONE;} 81 | {LP} {yylval.str = "(";return LP;} 82 | {RP} {yylval.str = ")";return RP;} 83 | {LB} {yylval.str = "[";return LB;} 84 | {RB} {yylval.str = "]";return RB;} 85 | {LC} {printf("get lc\n");return LC;} 86 | {RC} {printf("get rc\n");return RC;} 87 | {DIM} {return DIM;} 88 | {COMMA} {yylval.str = ",";return COMMA;} 89 | {WELL} {yylval.str = "#";return WELL;} 90 | {PARAM} {printf("get param:%s\n",yytext); 91 | yylval.str = malloc(sizeof(char*) * strlen(yytext)); 92 | strcpy(yylval.str,yytext); 93 | return PARAM;} 94 | {STRING} {printf("get string:%s\n",yytext); 95 | yylval.str = malloc(sizeof(char*) * strlen(yytext)); 96 | strcpy(yylval.str,yytext); 97 | yylval.str[strlen(yytext)-1] = '\0'; 98 | yylval.str++; 99 | return STRING;} 100 | {SPACE} {} 101 | {ATTR_NAME} {printf("get attr name:%s\n",yytext); 102 | yylval.str = malloc(sizeof(char*) * strlen(yytext)); 103 | strcpy(yylval.str,yytext); 104 | return ATTR_NAME;} 105 | %% 106 | int yywrap() 107 | { 108 | return 1; 109 | } 110 | -------------------------------------------------------------------------------- /src/caffe/alexnet.prototxt: -------------------------------------------------------------------------------- 1 | name: "CaffeNet" 2 | layer { 3 | name: "data" 4 | type: "Input" 5 | top: "data" 6 | input_param { shape: { dim: 10 dim: 3 dim: 227 dim: 227 } } 7 | } 8 | layer { 9 | name: "conv1" 10 | type: "Convolution" 11 | bottom: "data" 12 | top: "conv1" 13 | convolution_param { 14 | num_output: 96 15 | kernel_size: 11 16 | stride: 4 17 | } 18 | } 19 | layer { 20 | name: "relu1" 21 | type: "ReLU" 22 | bottom: "conv1" 23 | top: "conv1" 24 | } 25 | layer { 26 | name: "pool1" 27 | type: "Pooling" 28 | bottom: "conv1" 29 | top: "pool1" 30 | pooling_param { 31 | pool: MAX 32 | kernel_size: 3 33 | stride: 2 34 | } 35 | } 36 | layer { 37 | name: "norm1" 38 | type: "LRN" 39 | bottom: "pool1" 40 | top: "norm1" 41 | lrn_param { 42 | local_size: 5 43 | alpha: 0.0001 44 | beta: 0.75 45 | } 46 | } 47 | layer { 48 | name: "drop1" 49 | type: "Dropout" 50 | bottom: "norm1" 51 | top: "drop1" 52 | dropout_param { 53 | dropout_ratio: 0.5 54 | } 55 | } 56 | layer { 57 | name: "conv2" 58 | type: "Convolution" 59 | bottom: "drop1" 60 | top: "conv2" 61 | convolution_param { 62 | num_output: 256 63 | pad: 2 64 | kernel_size: 5 65 | group: 2 66 | } 67 | } 68 | layer { 69 | name: "relu2" 70 | type: "ReLU" 71 | bottom: "conv2" 72 | top: "conv2" 73 | } 74 | layer { 75 | name: "pool2" 76 | type: "Pooling" 77 | bottom: "conv2" 78 | top: "pool2" 79 | pooling_param { 80 | pool: MAX 81 | kernel_size: 3 82 | stride: 2 83 | } 84 | } 85 | layer { 86 | name: "norm2" 87 | type: "LRN" 88 | bottom: "pool2" 89 | top: "norm2" 90 | lrn_param { 91 | local_size: 5 92 | alpha: 0.0001 93 | beta: 0.75 94 | } 95 | } 96 | layer { 97 | name: "drop2" 98 | type: "Dropout" 99 | bottom: "norm2" 100 | top: "drop2" 101 | dropout_param { 102 | dropout_ratio: 0.5 103 | } 104 | } 105 | layer { 106 | name: "conv3" 107 | type: "Convolution" 108 | bottom: "drop2" 109 | top: "conv3" 110 | convolution_param { 111 | num_output: 384 112 | pad: 1 113 | kernel_size: 3 114 | } 115 | } 116 | layer { 117 | name: "relu3" 118 | type: "ReLU" 119 | bottom: "conv3" 120 | top: "conv3" 121 | } 122 | layer { 123 | name: "pool3" 124 | type: "Pooling" 125 | bottom: "conv3" 126 | top: "pool3" 127 | pooling_param { 128 | pool: MAX 129 | kernel_size: 3 130 | stride: 2 131 | } 132 | } 133 | layer { 134 | name: "norm3" 135 | type: "LRN" 136 | bottom: "pool3" 137 | top: "norm3" 138 | lrn_param { 139 | local_size: 5 140 | alpha: 0.0001 141 | beta: 0.75 142 | } 143 | } 144 | layer { 145 | name: "drop3" 146 | type: "Dropout" 147 | bottom: "norm3" 148 | top: "drop3" 149 | dropout_param { 150 | dropout_ratio: 0.5 151 | } 152 | } 153 | 154 | layer { 155 | name: "fc1" 156 | type: "InnerProduct" 157 | bottom: "drop3" 158 | top: "fc1" 159 | inner_product_param { 160 | num_output: 4096 161 | } 162 | } 163 | layer { 164 | name: "relu4" 165 | type: "ReLU" 166 | bottom: "fc1" 167 | top: "relu4" 168 | } 169 | layer { 170 | name: "fc2" 171 | type: "InnerProduct" 172 | bottom: "relu4" 173 | top: "fc2" 174 | inner_product_param { 175 | num_output: 4096 176 | } 177 | } 178 | layer { 179 | name: "relu5" 180 | type: "ReLU" 181 | bottom: "fc2" 182 | top: "relu5" 183 | } 184 | layer { 185 | name: "fc3" 186 | type: "InnerProduct" 187 | bottom: "relu5" 188 | top: "fc3" 189 | inner_product_param { 190 | num_output: 4096 191 | } 192 | } 193 | layer { 194 | name: "prob" 195 | type: "Softmax" 196 | bottom: "fc3" 197 | top: "prob" 198 | } -------------------------------------------------------------------------------- /src/caffe/util.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Author:HanRuobing 3 | * Created on :2018-2-11 4 | * Description : Refer to util.h 5 | */ 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include "util.h" 11 | #define MAXN 100 12 | char* name_list[MAXN]; 13 | int id_list[MAXN]; 14 | struct node* node_list[MAXN]; 15 | int tail = 0; 16 | int edge_num = 0; 17 | int node_num = 0; 18 | int edge_start[MAXN]; 19 | int edge_end[MAXN]; 20 | int visit[MAXN]; 21 | char* node_name[MAXN]; 22 | char* node_attr[MAXN]; 23 | char* node_op[MAXN]; 24 | struct node* new_node(char* node_op_name,int num,...)//construct node of NN DAG 25 | { 26 | struct node* tmp = (struct node*)malloc(sizeof(struct node)); 27 | va_list valist; 28 | va_start(valist,num); 29 | tmp->op_name = node_op_name; 30 | tmp->input_cnt = num; 31 | tmp->input = malloc(num * sizeof(struct node*)); 32 | tmp->pid = -1;//use to dfs 33 | int i; 34 | for(i = 0;iinput[i] = va_arg(valist, struct node*); 36 | va_end(valist); 37 | return tmp; 38 | } 39 | void dfs(struct node* root) 40 | { 41 | //printf("dfs %s\n",root->node_name); 42 | if(visit[root->pid])return;//already visited 43 | visit[root->pid] = 1; 44 | node_name[root->pid] = root->node_name; 45 | node_attr[root->pid] = root->attrs; 46 | node_op[root->pid] = root->op_name; 47 | int i = 0; 48 | for(i = 0;iinput_cnt;i++) 49 | { 50 | if(root->input[i]->pid == -1) 51 | root->input[i]->pid = node_num++; 52 | edge_end[edge_num] = root->pid; 53 | edge_start[edge_num] = root->input[i]->pid; 54 | edge_num++; 55 | } 56 | for(i = 0;iinput_cnt;i++) 57 | dfs(root->input[i]); 58 | } 59 | void travel_node(struct node* start) 60 | { 61 | edge_num = node_num = 0; 62 | memset(visit,0,sizeof(visit)); 63 | int i; 64 | /* 65 | printf("name=%s\n",start->node_name); 66 | printf("op name=%s\nattrs=%s\n",start->op_name,start->attrs); 67 | printf("son_num=%d\n",start->input_cnt); 68 | 69 | for(i = 0;iinput_cnt;i++) 70 | travel_node(start->input[i]); 71 | */ 72 | start->pid = node_num++; 73 | dfs(start); 74 | //printf("after dfs\n"); 75 | FILE* fp; 76 | if((fp=fopen("net_file.txt","w"))==NULL) 77 | { 78 | printf("can not open file\n"); 79 | return; 80 | } 81 | fprintf(fp,"%d %d\n",node_num,edge_num); 82 | for(i = 0;inode_name = variable_name; 142 | return res; 143 | } 144 | return node_list[i]; 145 | } 146 | 147 | void add_node(char* node_name,struct node* p) 148 | { 149 | printf("add node %s\n",node_name); 150 | int i; 151 | for(i = 0; iinput_cnt); 163 | struct node** new_input = malloc((p->input_cnt + 1) * sizeof(struct node*)); 164 | memcpy(new_input,p->input,p->input_cnt * sizeof(struct node*)); 165 | new_input[p->input_cnt] = input_node; 166 | ++(p->input_cnt); 167 | p->input = new_input; 168 | printf("new cnt = %d\n",p->input_cnt); 169 | } 170 | int main() 171 | { 172 | return yyparse(); 173 | } 174 | -------------------------------------------------------------------------------- /src/tf/lexical_analysis.l: -------------------------------------------------------------------------------- 1 | /* 2 | * Author:HanRuobing 3 | *Created on:2018-2-9 4 | *Description:lexical analysis for tensorflow's network construction 5 | */ 6 | %{ 7 | #include "stdio.h" 8 | #include "stdlib.h" 9 | #include "syntactic_analysis.tab.h" 10 | #include "util.h" 11 | %} 12 | type int|float 13 | NONE None 14 | STRUCT struct 15 | RETURN return 16 | IF if 17 | ELSE else 18 | WHILE while 19 | PLUS \+ 20 | MINIMIZE minimize 21 | MINUS - 22 | MUL \* 23 | DIV \/ 24 | INTEGER [1-9]+[0-9]*|0 25 | FLOAT [0-9]+\.[0-9]* 26 | BOOL True|False 27 | SPACE [ \t\r]* 28 | EOL \n 29 | COLON : 30 | SEMI ; 31 | COMMA , 32 | WELL # 33 | ASSIGNOP = 34 | RELOP >|<|>=|<=|==|!= 35 | AND && 36 | OR \|\| 37 | DOT \. 38 | NOT ! 39 | LP \( 40 | RP \) 41 | LB \[ 42 | RB \] 43 | LC \{ 44 | RC \} 45 | WITH with 46 | NAMESCOPE tf\.name_scope 47 | ZEROS tf\.zeros 48 | UNSTACK tf\.unstack 49 | MATMUL tf\.matmul 50 | CONSTANT tf\.constant 51 | RESHAPE tf\.reshape 52 | MAX_POOL tf\.nn\.max_pool 53 | DTYPE tf\.float32 54 | PLACEHOLDER tf\.placeholder 55 | RELU tf\.nn\.relu 56 | BIAS_ADD tf\.nn\.bias_add 57 | CONV2D tf\.nn\.conv2d 58 | SOFTMAX tf\.nn\.softmax 59 | TFVARIABLE tf\.Variable 60 | LRN tf\.nn\.lrn 61 | DROPOUT tf\.nn\.dropout 62 | RANDOM_NORMAL tf\.random_normal 63 | TRUNCATED_NORMAL tf\.truncated_normal 64 | ADAMOPTIMIZER tf\.train\.AdamOptimizer 65 | GDOPTIMIZER tf\.train\.GradientDescentOptimizer 66 | SOFTMAX_CROSS_ENTROPY_WITH_LOGITS tf\.nn\.softmax_cross_entropy_with_logits 67 | SPARSE_SOFTMAX_CROSS_ENTROPY tf\.losses\.sparse_softmax_cross_entropy 68 | REDUCE_MEAN tf\.reduce_mean 69 | BASICLSTMCELL tf\.contrib\.rnn\.BasicLSTMCell 70 | STATIC_RNN tf\.nn\.static_rnn 71 | VARIABLE [a-z_A-Z][a-zA-Z_0-9]* 72 | STRING [\'\"][a-zA-Z_0-9]*[\'\"] 73 | AERROR . 74 | COMMENT #[^\n]*\n 75 | MASS [.]* 76 | %% 77 | {ASSIGNOP} {return ASSIGNOP;} 78 | {EOL} {} 79 | {RELU} {return RELU;} 80 | {BIAS_ADD} {return BIAS_ADD;} 81 | {CONV2D} {return CONV2D;} 82 | {TFVARIABLE} {return TFVARIABLE;} 83 | {RANDOM_NORMAL} {printf("get random normal");return RANDOM_NORMAL;} 84 | {TRUNCATED_NORMAL} {return TRUNCATED_NORMAL;} 85 | {WITH} {return WITH;} 86 | {NAMESCOPE} {return NAMESCOPE;} 87 | {FLOAT} {printf("get float:%s\n",yytext); 88 | yylval.str = malloc(sizeof(char*) * strlen(yytext)); 89 | strcpy(yylval.str,yytext); 90 | return FLOAT;} 91 | {DTYPE} {printf("get dtype:%s\n",yytext); 92 | yylval.str = malloc(sizeof(char*) * strlen(yytext)); 93 | strcpy(yylval.str,yytext); 94 | return DTYPE;} 95 | {INTEGER} {printf("get integer:%s\n",yytext); 96 | yylval.str = malloc(sizeof(char*) * strlen(yytext)); 97 | strcpy(yylval.str,yytext); 98 | return INTEGER;} 99 | True|False {printf("get bool:%s\n",yytext); 100 | yylval.str = malloc(sizeof(char*) * strlen(yytext)); 101 | strcpy(yylval.str,yytext); 102 | return BOOL;} 103 | {PLUS} {yylval.str = "+";return PLUS;} 104 | {MINUS} {yylval.str = "-";return MINUS;} 105 | {MUL} {yylval.str = "*";return MUL;} 106 | {DIV} {yylval.str = "//";return DIV;} 107 | {DOT} {return DOT;} 108 | {COLON} {return COLON;} 109 | {NONE} {yylval.str = "None";return NONE;} 110 | {PLACEHOLDER} {yylval.str = "placeholder";return PLACEHOLDER;} 111 | {UNSTACK} {yylval.str ="unstack";return UNSTACK;} 112 | {MATMUL} {yylval.str = "matmul";return MATMUL;} 113 | {CONSTANT} {yylval.str = "constant";return CONSTANT;} 114 | {RESHAPE} {yylval.str = "reshape";return RESHAPE;} 115 | {MAX_POOL} {yylval.str = "maxpool";return MAX_POOL;} 116 | {LRN} {yylval.str = "LRN";return LRN;} 117 | {DROPOUT} {yylval.str = "dropout";return DROPOUT;} 118 | {ADAMOPTIMIZER} {yylval.str = "adamoptimizer";return ADAMOPTIMIZER;} 119 | {GDOPTIMIZER} {yylval.str ="gdoptimizer";return GDOPTIMIZER;} 120 | {SOFTMAX_CROSS_ENTROPY_WITH_LOGITS} {yylval.str = "softmax_cross_entropy_with_logits";return SOFTMAX_CROSS_ENTROPY_WITH_LOGITS;} 121 | {REDUCE_MEAN} {yylval.str = "reduce_mean";return REDUCE_MEAN;} 122 | {BASICLSTMCELL} {yylval.str = "basicLSTMcell";return BASICLSTMCELL;} 123 | {STATIC_RNN} {yylval.str = "static_rnn";return STATIC_RNN;} 124 | {SOFTMAX} {yylval.str = "softmax";return SOFTMAX;} 125 | {SPARSE_SOFTMAX_CROSS_ENTROPY} {yylval.str = "sparse_softmax_cross_entropy";return SPARSE_SOFTMAX_CROSS_ENTROPY;} 126 | {ZEROS} {yylval.str = "zeros";return ZEROS;} 127 | {LP} {yylval.str = "(";return LP;} 128 | {RP} {yylval.str = ")";return RP;} 129 | {LB} {yylval.str = "[";return LB;} 130 | {RB} {yylval.str = "]";return RB;} 131 | {MINIMIZE} {return MINIMIZE;} 132 | {COMMA} {yylval.str = ",";return COMMA;} 133 | {WELL} {yylval.str = "#";return WELL;} 134 | {COMMENT} {printf("get comment:%s\n",yytext); 135 | yylval.str = malloc(sizeof(char*) * strlen(yytext)); 136 | strcpy(yylval.str,yytext); 137 | return COMMENT;} 138 | {VARIABLE} {printf("get variable:%s\n",yytext); 139 | yylval.str = malloc(sizeof(char*) * strlen(yytext)); 140 | strcpy(yylval.str,yytext); 141 | return VARIABLE;} 142 | {STRING} {printf("get string:%s\n",yytext); 143 | yylval.str = malloc(sizeof(char*) * strlen(yytext)); 144 | strcpy(yylval.str,yytext); 145 | return STRING;} 146 | {MASS} {printf("mass:%s\n",yytext);yylval.str = malloc(sizeof(char*) * strlen(yytext)); 147 | strcpy(yylval.str,yytext); 148 | return MASS;} 149 | {SPACE} {} 150 | %% 151 | int yywrap() 152 | { 153 | return 1; 154 | } 155 | -------------------------------------------------------------------------------- /src/tf/util.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Author:HanRuobing 3 | * Created on :2018-2-11 4 | * Description : Refer to util.h 5 | */ 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include "util.h" 11 | #define MAXN 100 12 | char* name_list[MAXN]; 13 | int id_list[MAXN]; 14 | struct node* node_list[MAXN]; 15 | int tail = 0; 16 | int edge_num = 0; 17 | int node_num = 0; 18 | int tmp_cnt = 0; 19 | int edge_start[MAXN]; 20 | int edge_end[MAXN]; 21 | int visit[MAXN]; 22 | char* node_name[MAXN]; 23 | char* node_attr[MAXN]; 24 | char* node_op[MAXN]; 25 | char* concat_str(int num,...); 26 | char* itoa(int nValue) 27 | { 28 | char* szBuffer = (char *)malloc(sizeof(int) + 1); 29 | memset(szBuffer, 0, sizeof(int) + 1); 30 | sprintf(szBuffer, "%d", nValue); 31 | return szBuffer; 32 | } 33 | struct node* new_node(char* node_op_name,int num,...)//construct node of NN DAG 34 | { 35 | struct node* tmp = (struct node*)malloc(sizeof(struct node)); 36 | va_list valist; 37 | va_start(valist,num); 38 | tmp->op_name = node_op_name; 39 | tmp->input_cnt = num; 40 | tmp->input = malloc(num * sizeof(struct node*)); 41 | tmp->pid = -1;//use to dfs 42 | int i; 43 | for(i = 0;iinput[i] = va_arg(valist, struct node*); 45 | va_end(valist); 46 | return tmp; 47 | } 48 | void dfs(struct node* root) 49 | { 50 | if(visit[root->pid])return;//already visited 51 | visit[root->pid] = 1; 52 | if(root->node_name) 53 | node_name[root->pid] = root->node_name; 54 | else 55 | node_name[root->pid] = concat_str(2,"tmp_",itoa(tmp_cnt++)); 56 | node_attr[root->pid] = root->attrs; 57 | node_op[root->pid] = root->op_name; 58 | int i = 0; 59 | for(i = 0;iinput_cnt;i++) 60 | { 61 | if(root->input[i]->pid == -1) 62 | root->input[i]->pid = node_num++; 63 | edge_end[edge_num] = root->pid; 64 | edge_start[edge_num] = root->input[i]->pid; 65 | edge_num++; 66 | } 67 | for(i = 0;iinput_cnt;i++) 68 | dfs(root->input[i]); 69 | } 70 | void travel_node(struct node* start) 71 | { 72 | edge_num = node_num = 0; 73 | tmp_cnt = 0; 74 | memset(visit,0,sizeof(visit)); 75 | int i; 76 | /* 77 | printf("name=%s\n",start->node_name); 78 | printf("op name=%s\nattrs=%s\n",start->op_name,start->attrs); 79 | printf("son_num=%d\n",start->input_cnt); 80 | 81 | for(i = 0;iinput_cnt;i++) 82 | travel_node(start->input[i]); 83 | */ 84 | start->pid = node_num++; 85 | dfs(start); 86 | FILE* fp; 87 | if((fp=fopen("net_file.txt","w"))==NULL) 88 | { 89 | printf("can not open file\n"); 90 | return; 91 | } 92 | fprintf(fp,"%d %d\n",node_num,edge_num); 93 | for(i = 0;inode_name = variable_name; 153 | return res; 154 | } 155 | return node_list[i]; 156 | } 157 | 158 | void add_node(char* node_name,struct node* p) 159 | { 160 | int i; 161 | for(i = 0; iinput_cnt); 172 | struct node** new_input = malloc((p->input_cnt + 1) * sizeof(struct node*)); 173 | memcpy(new_input,p->input,p->input_cnt * sizeof(struct node*)); 174 | new_input[p->input_cnt] = input_node; 175 | ++(p->input_cnt); 176 | p->input = new_input; 177 | printf("new cnt = %d\n",p->input_cnt); 178 | } 179 | int main() 180 | { 181 | return yyparse(); 182 | } 183 | -------------------------------------------------------------------------------- /src/tf/syntactic_analysis.y: -------------------------------------------------------------------------------- 1 | /* 2 | *Author:HanRuobing 3 | *Created on:2018-02-09 4 | *Descroption:Semantic analysis for tensorflow. 5 | */ 6 | %{ 7 | #include 8 | #include 9 | #include "util.h" 10 | struct node* tmp_node = NULL; 11 | %} 12 | %start Program 13 | %union{ 14 | char* str; 15 | struct node *p; 16 | } 17 | 18 | %token LP RP LB RB COMMA FLOAT WELL SPACE EOL BOOL INTEGER NONE STRING PLUS MINUS MUL DIV DOT WITH NAMESCOPE COLON 19 | %token VARIABLE MATMUL CONSTANT MASS ASSIGNOP PLACEHOLDER DTYPE RESHAPE RELU BIAS_ADD CONV2D TFVARIABLE RANDOM_NORMAL MAX_POOL LRN DROPOUT ADAMOPTIMIZER MINIMIZE SOFTMAX_CROSS_ENTROPY_WITH_LOGITS REDUCE_MEAN COMMENT UNSTACK BASICLSTMCELL STATIC_RNN SOFTMAX GDOPTIMIZER TRUNCATED_NORMAL ZEROS SPARSE_SOFTMAX_CROSS_ENTROPY 20 | %type Program ExtDef ExtDefList List Serial Serial_Element KWARG_LIST Index 21 | %type Number NormalOP Optimizer 22 | %type

EXPRESSION 23 | 24 | /*priority*/ 25 | %right NOT COMMA 26 | %left LP RP LB RB DOT 27 | %% 28 | 29 | Program:ExtDefList{$$ = "program";}; 30 | ExtDefList:ExtDef ExtDefList | {$$="extdeflist";}; 31 | NormalOP: PLUS | MINUS | MUL |DIV {$$ = $1;}; 32 | Number: MINUS Number {$$=concat_str(2,"-",$2);}| 33 | FLOAT | INTEGER {$$=$1;} | Number NormalOP Number {$$ = concat_str(3,$1,$2,$3);}; 34 | ExtDef:VARIABLE ASSIGNOP EXPRESSION{$$ = $1;$3->node_name = $1;add_node($1,$3);if(!strcmp("loss",$1)){printf("travel\n");travel_node($3);}}| 35 | VARIABLE COMMA VARIABLE ASSIGNOP EXPRESSION {$$=$1;$5->node_name = concat_str(3,$1,":",$3);add_node($1,$5);add_node($3,$5);}| 36 | VARIABLE ASSIGNOP Number {$$ = $1;}| 37 | COMMENT | 38 | WITH NAMESCOPE LP STRING RP COLON; 39 | Index: LB Number RB {$$=concat_str(3,$1,$2,$3);}| Index LB Number RB {$$ = concat_str(4,$1,$2,$3,$4);}; 40 | EXPRESSION: 41 | VARIABLE {$$ = get_node($1);}| 42 | VARIABLE Index {$$ = new_node("indexed",1,get_node($1));$$->attrs = $2;}| 43 | MATMUL LP EXPRESSION COMMA EXPRESSION {tmp_node = new_node("matmul",2,$3,$5);} KWARG_LIST RP{$$ = tmp_node;$$->attrs = $7;tmp_node=NULL;}| 44 | CONSTANT LP List RP{$$ = new_node("constant",0);} | 45 | RESHAPE LP EXPRESSION {tmp_node = new_node("reshape",1,$3);} KWARG_LIST RP {$$ = tmp_node;$$->attrs = $5;tmp_node=NULL;}| 46 | PLACEHOLDER LP DTYPE {tmp_node = new_node("placeholder",0);}KWARG_LIST RP {$$ = tmp_node;$$->attrs = $5;tmp_node=NULL;}| 47 | RANDOM_NORMAL LP List {tmp_node = new_node("random_normal",0);}KWARG_LIST RP {$$=tmp_node;$$->attrs = concat_str(2,$3,$5);}| 48 | TRUNCATED_NORMAL LP List {tmp_node = new_node("truncated_normal",0);} KWARG_LIST RP {$$=tmp_node;$$->attrs = concat_str(2,$3,$5);}| 49 | ZEROS LP List {tmp_node = new_node("zeros",0);} KWARG_LIST RP {$$=tmp_node;$$->attrs = concat_str(2,$3,$5);}| 50 | TFVARIABLE LP EXPRESSION {tmp_node = new_node("variable",1,$3);}KWARG_LIST RP {$$=tmp_node;$$->attrs = $5;tmp_node=NULL;}| 51 | CONV2D LP EXPRESSION COMMA EXPRESSION {tmp_node = new_node("conv2d",2,$3,$5);}KWARG_LIST RP {$$ = tmp_node;$$->attrs = $7;tmp_node=NULL;}| 52 | BIAS_ADD LP EXPRESSION COMMA EXPRESSION {tmp_node = new_node("bias_add",2,$3,$5);}KWARG_LIST RP {$$ = tmp_node;$$->attrs=$7;tmp_node=NULL;}| 53 | RELU LP EXPRESSION {tmp_node = new_node("relu",1,$3);}KWARG_LIST RP {$$ = tmp_node;$$->attrs = $5;tmp_node=NULL;}| 54 | MAX_POOL LP EXPRESSION{tmp_node = new_node("max_pool",1,$3);} KWARG_LIST RP {$$ = tmp_node;$$->attrs = $5;tmp_node=NULL;}| 55 | LRN LP EXPRESSION {tmp_node = new_node("lrn",1,$3);}KWARG_LIST RP {$$=tmp_node;$$->attrs = $5;tmp_node=NULL;}| 56 | DROPOUT LP EXPRESSION COMMA EXPRESSION {tmp_node = new_node("dropout",2,$3,$5);}KWARG_LIST RP {$$=tmp_node;$$->attrs = $8;tmp_node=NULL;}| 57 | EXPRESSION NormalOP EXPRESSION {$$ = new_node($2,2,$1,$3);}| 58 | Optimizer DOT MINIMIZE LP EXPRESSION RP {$$ = new_node("optimize",1,$5);$$->attrs = $1;}| 59 | Optimizer {$$=new_node("optimizer",0);$$->attrs = $1;}| 60 | VARIABLE DOT MINIMIZE LP EXPRESSION RP {$$=new_node("optimize",2,get_node($1),$5);}| 61 | SOFTMAX_CROSS_ENTROPY_WITH_LOGITS LP EXPRESSION COMMA EXPRESSION {tmp_node = new_node("softmax_cross_entropy_with_logits",2,$3,$5);}KWARG_LIST RP {$$=tmp_node;$$->attrs = $7;tmp_node=NULL;}| 62 | SPARSE_SOFTMAX_CROSS_ENTROPY LP EXPRESSION COMMA EXPRESSION {tmp_node = new_node("sparse_softmax_cross_entropy",2,$3,$5);} KWARG_LIST RP{$$=tmp_node;$$->attrs=$7;tmp_node=NULL;}| 63 | REDUCE_MEAN LP EXPRESSION RP {$$=new_node("reduce_mean",1,$3);}| 64 | UNSTACK LP EXPRESSION {tmp_node=new_node("unstack",1,$3);}KWARG_LIST RP {$$=tmp_node;$$->attrs = $5;tmp_node=NULL;}| 65 | BASICLSTMCELL LP Serial RP {$$=new_node("basicLSTMcell",0);$$->attrs = $3;}| 66 | STATIC_RNN LP EXPRESSION COMMA EXPRESSION {tmp_node=new_node("static_rnn",2,$3,$5);}KWARG_LIST RP {$$=tmp_node;$$->attrs=$7;tmp_node=NULL;}| 67 | SOFTMAX LP EXPRESSION {tmp_node=new_node("softmax",1,$3);}KWARG_LIST RP{$$=tmp_node;$$->attrs=$5;tmp_node=NULL;}; 68 | Optimizer: 69 | GDOPTIMIZER LP Serial RP {$$=concat_str(2,$1,$3);}| 70 | ADAMOPTIMIZER LP Serial RP {$$ = concat_str(2,$1,$3);}; 71 | KWARG_LIST: {$$ = "";}| 72 | COMMA Serial {$$ = $2;}; 73 | Serial :{$$="";}| 74 | Serial_Element COMMA Serial {printf("serial:%s %s\n",$1,$3);$$ = concat_str(3,$1,",",$3);} | 75 | Serial_Element {$$ = $1;}; 76 | Serial_Element:Number | NONE |List {$$ = $1;}| 77 | VARIABLE ASSIGNOP VARIABLE {$$ = concat_str(3,$1,":",$3);if(tmp_node) add_input(tmp_node,get_node($3));}| 78 | VARIABLE ASSIGNOP STRING {$$=concat_str(3,$1,":",$3);}| 79 | VARIABLE ASSIGNOP Number {$$=concat_str(3,$1,":",$3);}| 80 | VARIABLE ASSIGNOP DTYPE {$$=concat_str(3,$1,":",$3);}| 81 | VARIABLE ASSIGNOP List {$$=concat_str(3,$1,":",$3);}| 82 | EXPRESSION {$$=$1->node_name;}; 83 | List : LB Serial RB {$$ = concat_str(3,"[",$2,"]");}; 84 | /* 85 | COMMENT: SINGLE_QUOTE SINGLE_QUOTE SINGLE_QUOTE Comment_msg_single SINGLE_QUOTE SINGLE_QUOTE SINGLE_QUOTE | 86 | DOUBLE_QUOTE DOUBLE_QUOTE DOUBLE_QUOTE Comment_msg_double DOUBLE_QUOTE DOUBLE_QUOTE DOUBLE_QUOTE {$$ = $4;}; 87 | Comment_msg_single: PURE_COMMENT_SINGLE | Comment_msg_single SINGLE_QUOTE Comment_msg_single{$$ = concat_str(3,$1,"\'",$3);} | Comment_msg_single SINGLE_QUOTE SINGLE_QUOTE Comment_msg_single{$$ = concat(3,$1,"\'\'",$4);}; 88 | Comment_msg_double: PURE_COMMENT_DOUBLE | Comment_msg_double DOUBLE_QUOTE Comment_msg_double{$$ = concat_str(3,$1,"\'",$3);} | Comment_msg_double DOUBLE_QUOTE DOUBLE_QUOTE Comment_msg_double{$$ = concat(3,$1,"\'\'",$4);}; 89 | */ 90 | --------------------------------------------------------------------------------