├── LICENSE ├── README.md ├── dev ├── ap_lp.py ├── ap_main.py ├── ssc.py ├── sscover_lp.py ├── utils.py └── vgg16_example.py ├── exps ├── data │ └── mnist.pkl.gz ├── exp5-1 │ ├── Result.txt │ └── demo-single-neuron-attack.py ├── exp5-2 │ └── exp-random-testing.py ├── exp5-3 │ ├── DSC │ │ ├── exp-dsc.py │ │ └── results-dsc.txt │ ├── DVC │ │ ├── exp-dvc.py │ │ └── results-dvc.txt │ ├── SSC-top-weights │ │ ├── exp-ssc-k.py │ │ └── results-kappa10.txt │ ├── SSC │ │ ├── exp-ssc.py │ │ └── results.txt │ ├── SVC │ │ ├── exp-svc.py │ │ └── results-svc.txt │ ├── lp-call-runtime │ │ └── exp-get-runtime.py │ └── plots │ │ ├── input-distance-plots │ │ ├── 10-ss-results.txt │ │ ├── 8-ss-results.txt │ │ ├── 9-ss-results.txt │ │ ├── parse-and-plot.py │ │ └── ss-distance-map.pdf │ │ ├── layers │ │ ├── 10-ss-results.txt │ │ ├── 8-ss-results.txt │ │ ├── 9-ss-results.txt │ │ ├── layerwise-ss-bugs.pdf │ │ ├── layerwise-ss-coverage.pdf │ │ ├── plot-bar.py │ │ └── plot-bar2.py │ │ └── ss-top10 │ │ ├── plot-ss-vs-ss-top-10.py │ │ ├── ss-top10.pdf │ │ └── ss-vs-ss-top-10.csv ├── exp5-4 │ ├── cnn-results │ │ ├── cnn1-results.txt │ │ ├── cnn2-results.txt │ │ └── parse-results.py │ ├── cnn1-exp-conv-ss.py │ ├── cnn2-exp-conv-ss.py │ └── cnns │ │ ├── cnn1-biases-conv.npy │ │ ├── cnn1-biases-mnist.npy │ │ ├── cnn1-weights-conv.npy │ │ ├── cnn1-weights-mnist.npy │ │ ├── cnn2-biases-conv.npy │ │ ├── cnn2-biases-mnist.npy │ │ ├── cnn2-weights-conv.npy │ │ └── cnn2-weights-mnist.npy └── random-nn │ ├── README.txt │ ├── b_mnist_nnet_index0-67-22-63.txt │ ├── b_mnist_nnet_index1-59-94-56-45.txt │ ├── b_mnist_nnet_index2-72-61-70-77.txt │ ├── b_mnist_nnet_index3-65-99-87-23-31.txt │ ├── b_mnist_nnet_index4-49-61-90-21-48.txt │ ├── b_mnist_nnet_index5-97-83-32.txt │ ├── b_mnist_nnet_index6-33-95-67-43-76.txt │ ├── b_mnist_nnet_index7-78-62-73-47.txt │ ├── b_mnist_nnet_index8-87-33-62.txt │ ├── b_mnist_nnet_index9-76-55-74-98-75.txt │ ├── nnets.txt │ ├── w_mnist_nnet_index0-67-22-63.txt │ ├── w_mnist_nnet_index1-59-94-56-45.txt │ ├── w_mnist_nnet_index2-72-61-70-77.txt │ ├── w_mnist_nnet_index3-65-99-87-23-31.txt │ ├── w_mnist_nnet_index4-49-61-90-21-48.txt │ ├── w_mnist_nnet_index5-97-83-32.txt │ ├── w_mnist_nnet_index6-33-95-67-43-76.txt │ ├── w_mnist_nnet_index7-78-62-73-47.txt │ ├── w_mnist_nnet_index8-87-33-62.txt │ └── w_mnist_nnet_index9-76-55-74-98-75.txt └── src ├── cnnett.py ├── conv_lp.py ├── deepcover_keras.py ├── lp.py ├── nnett.py └── util.py /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2018, Youcheng Sun 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | * Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | * Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | * Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DeepCover 2 | Uncover Bugs in Deep Learning 3 | 4 | DeepCover is a coverage-based bug finder for deep learning applications. 5 | It is able to provide metrics for evaluating the robustness of a Deep Neural Network (DNN). 6 | The basic idea of DeepCover is easy. For any neuron in the DNN, if its state changes, 7 | there must be a cause for it: DeepCover aims to COVER a change and its immediate causes. 8 | More details can be found in the paper [Testing Deep Neural Networks](https://arxiv.org/abs/1803.04792). 9 | 10 | ## To run the tool: 11 | These routine machine learning Python packages are needed and IBM CPLEX is used as the Linear Programming solver 12 | 13 | -------------------------------------------------------------------------------- /dev/ap_lp.py: -------------------------------------------------------------------------------- 1 | import cplex 2 | import sys 3 | import numpy as np 4 | 5 | from utils import * 6 | 7 | epsilon=1.0/(255) 8 | 9 | ## The AP method encodes a DNN instance into LP constraints 10 | ## im: the reference input 11 | ## target_o: the target output 12 | ## d_vect: the distance in each dimension 13 | ## Max: the optimisation goal (max or min) 14 | 15 | def AP(model, activations, im, target_o, d_vect=None, Max=True): 16 | 17 | target_var='' 18 | var_names_vect=[] 19 | objective=[] 20 | lower_bounds=[] 21 | upper_bounds=[] 22 | var_names=[] 23 | 24 | print 'To encode the DNN instance...' 25 | 26 | for l in range(0, len(model.layers)): 27 | 28 | if l==len(model.layers)-1: continue ## skip the softmax 29 | 30 | layer=model.layers[l] 31 | 32 | if is_conv_layer(layer) or is_maxpooling_layer(layer): 33 | if l==0: 34 | isp=layer.input.shape 35 | var_names.append(np.empty((1, isp[1], isp[2], isp[3]), dtype="S40")) 36 | for I in range(0, 1): 37 | for J in range(0, isp[1]): 38 | for K in range(0, isp[2]): 39 | for L in range(0, isp[3]): 40 | var_name='x_{0}_{1}_{2}_{3}_{4}'.format(l, I, J, K, L) 41 | objective.append(0) 42 | lower_bounds.append(-cplex.infinity) 43 | upper_bounds.append(cplex.infinity) 44 | var_names[l][I][J][K][L]=var_name 45 | var_names_vect.append(var_name) 46 | isp=layer.output.shape 47 | var_names.append(np.empty((1, isp[1], isp[2], isp[3]), dtype="S40")) 48 | for I in range(0, 1): 49 | for J in range(0, isp[1]): 50 | for K in range(0, isp[2]): 51 | for L in range(0, isp[3]): 52 | var_name='x_{0}_{1}_{2}_{3}_{4}'.format(l+1, I, J, K, L) 53 | objective.append(0) 54 | lower_bounds.append(-cplex.infinity) 55 | upper_bounds.append(cplex.infinity) 56 | var_names[l+1][I][J][K][L]=var_name 57 | var_names_vect.append(var_name) 58 | elif is_dense_layer(layer): 59 | isp=layer.output.shape 60 | var_names.append(np.empty((1, isp[1]), dtype="S40")) 61 | for I in range(0, 1): 62 | for J in range(0, isp[1]): 63 | var_name='x_{0}_{1}_{2}'.format(l+1, I, J) 64 | if l==len(model.layers)-2 and J==target_o: ## to locate the target output 65 | 66 | target_var=var_name 67 | 68 | if Max: 69 | objective.append(-1) 70 | else: 71 | objective.append(+1) 72 | lower_bounds.append(-100000) 73 | upper_bounds.append(+100000) 74 | 75 | else: 76 | objective.append(0) 77 | lower_bounds.append(-cplex.infinity) 78 | upper_bounds.append(cplex.infinity) 79 | var_names[l+1][I][J]=var_name 80 | var_names_vect.append(var_name) 81 | elif is_activation_layer(layer): 82 | isp=layer.output.shape 83 | if len(isp)>2: ## multiple feature maps 84 | var_names.append(np.empty((1, isp[1], isp[2], isp[3]), dtype="S40")) 85 | for I in range(0, 1): 86 | for J in range(0, isp[1]): 87 | for K in range(0, isp[2]): 88 | for L in range(0, isp[3]): 89 | var_name='x_{0}_{1}_{2}_{3}_{4}'.format(l+1, I, J, K, L) 90 | objective.append(0) 91 | lower_bounds.append(0) 92 | upper_bounds.append(cplex.infinity) 93 | var_names[l+1][I][J][K][L]=var_name 94 | var_names_vect.append(var_name) 95 | else: ## fully connected 96 | var_names.append(np.empty((1, isp[1]), dtype="S40")) 97 | for I in range(0, 1): 98 | for J in range(0, isp[1]): 99 | var_name='x_{0}_{1}_{2}'.format(l+1, I, J) 100 | 101 | #if l==len(model.layers)-2 and J==target_o: ## to locate the target output 102 | 103 | # target_var=var_name 104 | 105 | # if Max: 106 | # objective.append(+1) 107 | # else: 108 | # objective.append(-1) 109 | 110 | #else: 111 | objective.append(0) 112 | 113 | lower_bounds.append(-cplex.infinity) 114 | upper_bounds.append(cplex.infinity) 115 | var_names[l+1][I][J]=var_name 116 | var_names_vect.append(var_name) 117 | elif is_flatten_layer(layer): 118 | isp=model.layers[l].input.shape 119 | tot=isp[1]*isp[2]*isp[3] 120 | var_names.append(np.empty((1, tot), dtype="S40")) 121 | for I in range(0, 1): 122 | for J in range(0, tot): 123 | var_name='x_{0}_{1}_{2}'.format(l+1, I, J) 124 | objective.append(0) 125 | lower_bounds.append(0) 126 | upper_bounds.append(cplex.infinity) 127 | var_names[l+1][I][J]=var_name 128 | var_names_vect.append(var_name) 129 | else: 130 | print 'Un-expected layer!!!', layer 131 | sys.exit(0) 132 | 133 | constraints=[] 134 | rhs=[] 135 | constraint_senses=[] 136 | constraint_names=[] 137 | 138 | for I in range(0, var_names[0].shape[0]): 139 | for J in range(0, var_names[0].shape[1]): 140 | for K in range(0, var_names[0].shape[2]): 141 | for L in range(0, var_names[0].shape[3]): 142 | vn=var_names[0][I][J][K][L] 143 | v=im[J][K][L] 144 | # x<=1 145 | constraints.append([[vn], [1]]) 146 | rhs.append(1.0) 147 | constraint_senses.append("L") 148 | constraint_names.append("x<=1") 149 | ## x>=0 150 | constraints.append([[vn], [1]]) 151 | rhs.append(0.0) 152 | constraint_senses.append("G") 153 | constraint_names.append("x_0_{0}_{1}_{2}_{3}>=0".format(I, J, K, L)) 154 | 155 | if not (d_vect is None): 156 | # x <= v+d_vect[J][K][L] 157 | constraints.append([[vn], [1]]) 158 | rhs.append(v+d_vect[J][K][L]) 159 | constraint_senses.append("L") 160 | constraint_names.append("") 161 | # x >= v-d_vect[J][K][L] 162 | constraints.append([[vn], [1]]) 163 | rhs.append(v-d_vect[J][K][L]) 164 | constraint_senses.append("G") 165 | constraint_names.append("") 166 | 167 | iw=0 168 | for l in range(0, len(model.layers)-1): 169 | layer=model.layers[l] 170 | weights=None 171 | biases=None 172 | if is_conv_layer(layer) or is_dense_layer(layer): 173 | weights=model.get_weights()[iw] 174 | biases=model.get_weights()[iw+1] 175 | iw+=2 176 | 177 | isp=var_names[l].shape 178 | osp=var_names[l+1].shape 179 | if is_conv_layer(layer): 180 | 181 | print '## the convolutional layer {0}'.format(l) 182 | 183 | kernel_size=layer.kernel_size 184 | for I in range(0, osp[0]): 185 | for J in range(0, osp[1]): 186 | for K in range(0, osp[2]): 187 | for L in range(0, osp[3]): 188 | constraint=[[], []] 189 | constraint[0].append(var_names[l+1][I][J][K][L]) 190 | constraint[1].append(-1) 191 | for II in range(0, kernel_size[0]): 192 | for JJ in range(0, kernel_size[1]): 193 | for KK in range(0, weights.shape[2]): 194 | constraint[0].append(var_names[l][0][J+II][K+JJ][KK]) 195 | constraint[1].append(float(weights[II][JJ][KK][L])) 196 | 197 | constraints.append(constraint) 198 | rhs.append(-float(biases[L])) 199 | constraint_senses.append('E') 200 | constraint_names.append('eq: x_{0}_{1}_{2}_{3}_{4}'.format(l+1, I, J, K, L)) 201 | 202 | elif is_dense_layer(layer): 203 | 204 | print '## the dense layer {0}'.format(l) 205 | 206 | for I in range(0, osp[0]): 207 | for J in range(0, osp[1]): 208 | constraint=[[], []] 209 | constraint[0].append(var_names[l+1][I][J]) 210 | constraint[1].append(-1) 211 | for II in range(0, isp[1]): 212 | constraint[0].append(var_names[l][0][II]) 213 | constraint[1].append(float(weights[II][J])) 214 | 215 | constraints.append(constraint) 216 | rhs.append(-float(biases[J])) 217 | constraint_senses.append('E') 218 | constraint_names.append('eq: x_{0}_{1}_{2}'.format(l+1, I, J)) 219 | 220 | elif is_flatten_layer(layer): 221 | 222 | print '## the flatten layer {0}'.format(l) 223 | 224 | tot=isp[1]*isp[2]*isp[3] 225 | for I in range(0, tot): 226 | d0=I/(isp[2]*isp[3]) 227 | d1=(I%(isp[2]*isp[3]))/isp[3] 228 | d2=I-d0*(isp[2]*isp[3])-d1*isp[3] 229 | constraint=[[], []] 230 | constraint[0].append(var_names[l+1][0][I]) 231 | constraint[1].append(-1) 232 | constraint[0].append(var_names[l][0][d0][d1][d2]) 233 | constraint[1].append(+1) 234 | 235 | constraints.append(constraint) 236 | constraint_senses.append('E') 237 | rhs.append(0) 238 | constraint_names.append('eq: x_{0}_{1}_{2}'.format(l+1, 0, I)) 239 | 240 | elif is_maxpooling_layer(layer): 241 | 242 | print '## the maxpooling layer {0}'.format(l) 243 | 244 | pool_size=layer.pool_size 245 | for I in range(0, osp[1]): 246 | for J in range(0, osp[2]): 247 | for K in range(0, osp[3]): 248 | max_found=False 249 | for II in range(I*pool_size[0], (I+1)*pool_size[0]): 250 | for JJ in range(J*pool_size[1], (J+1)*pool_size[1]): 251 | constraint=[[], []] 252 | constraint[0].append(var_names[l+1][0][I][J][K]) 253 | constraint[1].append(1) 254 | constraint[0].append(var_names[l][0][II][JJ][K]) 255 | constraint[1].append(-1) 256 | constraints.append(constraint) 257 | rhs.append(0) 258 | constraint_senses.append('G') 259 | constraint_names.append('maxpooling: x_{0}_{1}_{2}_{3}_{4}'.format(l+1, 0, I, J, K)) 260 | if ((not max_found) and activations[l][0][I][J][K]==activations[l-1][0][II][JJ][K]): 261 | max_found=True 262 | constraint=[[], []] 263 | constraint[0].append(var_names[l+1][0][I][J][K]) 264 | constraint[1].append(1) 265 | constraint[0].append(var_names[l][0][II][JJ][K]) 266 | constraint[1].append(-1) 267 | constraints.append(constraint) 268 | rhs.append(0) 269 | constraint_senses.append('E') 270 | constraint_names.append('maxpooling eq: x_{0}_{1}_{2}_{3}_{4}'.format(l+1, 0, I, J, K)) 271 | #if max_found is False: 272 | # print "maxpooling fails..." 273 | # sys.exit(0) 274 | elif is_activation_layer(layer): 275 | ## for simplicity, we assume that activations are ReLU 276 | 277 | print '## the ReLU activation layer {0}'.format(l) 278 | 279 | if len(osp)>2: 280 | for I in range(0, osp[1]): 281 | for J in range(0, osp[2]): 282 | for K in range(0, osp[3]): 283 | constraint=[[], []] 284 | constraint[0].append(var_names[l+1][0][I][J][K]) 285 | constraint[1].append(1) 286 | if activations[l][0][I][J][K]==0: 287 | constraints.append(constraint) 288 | rhs.append(0) 289 | constraint_senses.append('E') 290 | constraint_names.append('relu not activated: x_{0}_{1}_{2}_{3}_{4}'.format(l+1, 0, I, J, K)) 291 | else: 292 | constraint[0].append(var_names[l][0][I][J][K]) 293 | constraint[1].append(-1) 294 | constraints.append(constraint) 295 | rhs.append(0) 296 | constraint_senses.append('E') 297 | constraint_names.append('relu activated: x_{0}_{1}_{2}_{3}_{4}'.format(l+1, 0, I, J, K)) 298 | else: 299 | for I in range(0, osp[1]): 300 | constraint=[[], []] 301 | constraint[0].append(var_names[l+1][0][I]) 302 | constraint[1].append(1) 303 | if activations[l][0][I]==0: 304 | constraints.append(constraint) 305 | rhs.append(0) 306 | constraint_senses.append('E') 307 | constraint_names.append('relu not activated: x_{0}_{1}_{2}'.format(l+1, 0, I)) 308 | else: 309 | constraint[0].append(var_names[l][0][I]) 310 | constraint[1].append(-1) 311 | constraints.append(constraint) 312 | rhs.append(0) 313 | constraint_senses.append('E') 314 | constraint_names.append('relu activated: x_{0}_{1}_{2}'.format(l+1, 0, I)) 315 | else: 316 | print 'Unexpected layer', model.layers[l] 317 | sys.exit(0) 318 | 319 | try: 320 | print 'The LP encoding phase is done!' 321 | 322 | print 'To solve the LP constraints' 323 | 324 | problem=cplex.Cplex() 325 | problem.variables.add(obj = objective, 326 | lb = lower_bounds, 327 | ub = upper_bounds, 328 | names = var_names_vect) 329 | problem.linear_constraints.add(lin_expr=constraints, 330 | senses = constraint_senses, 331 | rhs = rhs, 332 | names = constraint_names) 333 | 334 | ### 5 minutes threshold 335 | timeLimit = 60*5 336 | problem.parameters.timelimit.set(60*5) 337 | problem.solve() 338 | ### 339 | 340 | print 'Solved!!!' 341 | 342 | 343 | print '***the target var is {0}\n'.format(target_var) 344 | res=problem.solution.get_values(target_var) 345 | print '***the target var: ', res 346 | 347 | return res 348 | 349 | except: 350 | print 'There is one Exception' 351 | return None 352 | -------------------------------------------------------------------------------- /dev/ap_main.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import sys 3 | from datetime import datetime 4 | 5 | import keras 6 | #from keras.datasets import mnist 7 | from keras.models import * 8 | #from keras.layers import * 9 | #from keras import * 10 | 11 | from ap_lp import * 12 | from utils import * 13 | 14 | class effective_layert: 15 | def __init__(self, layer_index, current_layer, is_conv=False): 16 | self.layer_index=layer_index 17 | self.activations=[] 18 | self.is_conv=is_conv 19 | self.current_layer=current_layer 20 | self.fk=1.0 21 | sp=current_layer.output.shape 22 | if is_conv: 23 | #self.cover_map=np.ones((1, sp[1], sp[2], sp[3])) 24 | self.cover_map=np.zeros((1, sp[1], sp[2], sp[3]), dtype=bool) 25 | else: 26 | #self.cover_map=np.ones((1, sp[1])) 27 | self.cover_map=np.zeros((1, sp[1]), dtype=bool) 28 | print 'Created an effective layer: [is_conv {0}] [cover_map {1}]'.format(is_conv, self.cover_map.shape) 29 | 30 | 31 | def main(): 32 | parser=argparse.ArgumentParser( 33 | description='To encode an DNN given a reference input' ) 34 | 35 | parser.add_argument('model', action='store', nargs='+', help='The input neural network model (.h5)') 36 | parser.add_argument("-i", "--inputs", dest="inputs", 37 | help="the reference input seeds directory", metavar="DIR") 38 | #parser.add_argument("-m", "--max", 39 | # action="store_false", dest="Max", default=True, 40 | # help="to max/min ") 41 | 42 | 43 | args=parser.parse_args() 44 | inputs= args.inputs 45 | print inputs, type(inputs) 46 | #Max=args.Max 47 | #print Max, type(Max) 48 | 49 | if inputs is None: 50 | print 'No inputs are given...' 51 | sys.exit(0) 52 | 53 | inputFiles=[] 54 | if inputs[-1]!='/': 55 | inputs+='/' 56 | for f in os.listdir(inputs): 57 | if not os.path.isdir(inputs+f): 58 | inputFiles.append(inputs+f) 59 | print inputFiles 60 | model = load_model(args.model[0]) 61 | 62 | ### input size 63 | inps=model.layers[0].input.shape 64 | row, column, channel=inps[1], inps[2], inps[3] 65 | N=row*column*channel 66 | 67 | ### to get 'x's and 'd's from input files 68 | xs=[] 69 | ds=[] 70 | labels=[] 71 | 72 | for fname in inputFiles: 73 | ## each input file f shall contain 3 lines 74 | #### the first line contains N elements for 'x_0' 75 | #### the second line contains N elements for 'd' 76 | #### the third line is the target output index 77 | x0=np.zeros((1,N)) 78 | d=np.zeros((1,N)) 79 | lines = [line.rstrip('\n') for line in open(fname)] 80 | s=lines[0].split() 81 | for i in range(0, N): 82 | x0[0][i]=float(s[i]) 83 | s=lines[1].split() 84 | for i in range(0, N): 85 | d[0][i]=float(s[i]) 86 | x0=np.reshape(x0, (row, column, channel)) 87 | d=np.reshape(d, (row, column, channel)) 88 | xs.append(x0) 89 | ds.append(d) 90 | label=int(lines[2]) 91 | labels.append(label) 92 | 93 | #### configuration phase 94 | layer_functions=[] 95 | effective_layers=[] 96 | 97 | for l in range(0, len(model.layers)): 98 | layer=model.layers[l] 99 | name=layer.name 100 | 101 | get_current_layer_output = K.function([layer.input], [layer.output]) 102 | layer_functions.append(get_current_layer_output) 103 | 104 | if is_conv_layer(layer) or is_dense_layer(layer): 105 | effective_layers.append(effective_layert(layer_index=l, current_layer=layer, is_conv=is_conv_layer(layer))) 106 | 107 | ## a list of (min, max) to be printed into files 108 | lp_results=[] 109 | 110 | for i in range(0, len(xs)): 111 | activations=eval(layer_functions, xs[i]) 112 | v_min=AP(model, activations, xs[i], labels[i], ds[i], False) 113 | v_max=AP(model, activations, xs[i], labels[i], ds[i], True) 114 | if not os.path.isdir(inputs+'results'): 115 | os.system('mkdir -p {0}'.format(inputs+'results')) 116 | with open(inputs+'results/'+inputFiles[i].split('/')[1], 'w') as the_file: 117 | the_file.write('{0}\n'.format(v_min)) 118 | the_file.write('{0}\n'.format(v_max)) 119 | 120 | if __name__=="__main__": 121 | main() 122 | 123 | -------------------------------------------------------------------------------- /dev/ssc.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import sys 3 | from datetime import datetime 4 | 5 | import keras 6 | from keras.models import * 7 | from keras.datasets import cifar10 8 | from keras.applications.vgg16 import VGG16 9 | from keras.layers import * 10 | from keras import * 11 | 12 | 13 | from utils import * 14 | 15 | class raw_datat: 16 | def __init__(self, data, labels): 17 | self.data=data 18 | self.labels=labels 19 | 20 | class test_objectt: 21 | def __init__(self, dnn, raw_data): 22 | self.dnn=dnn 23 | self.raw_data=raw_data 24 | self.channels_last=True 25 | 26 | def sscover(test_object): 27 | print('\n== Start SSCover testing ==\n') 28 | 29 | layer_functions=get_layer_functions(test_object.dnn) 30 | print('\n== Got layer functions: {0} ==\n'.format(len(layer_functions))) 31 | cover_layers=get_cover_layers(test_object.dnn) 32 | print('\n== Got cover layers: {0} ==\n'.format(len(cover_layers))) 33 | 34 | for c_layer in cover_layers: 35 | isp=c_layer.layer.input.shape 36 | osp=c_layer.layer.output.shape 37 | if c_layer.is_conv: 38 | ## output 39 | for o_i in range(0, osp[3]): # by default, we assume channel last 40 | for o_j in range(0, osp[1]): 41 | for o_k in range(0, osp[2]): 42 | ## input 43 | for i_i in range(0, isp[3]): # by default, we assume channel last 44 | for i_j in range(0, isp[1]): 45 | for i_k in range(0, isp[2]): 46 | sscover_lp(c_layer.layer_index, [o_j, o_k, o_i], [i_j, i_k, i_i], test_object, layer_functions) 47 | #print("SSCover lp: {0}-{1}-{2}".format(c_layer.layer_index, [o_j, o_k, o_i], [i_j, i_k, i_i])) 48 | 49 | 50 | def cover(test_object, criterion): 51 | ## we start from SSC 52 | if (criterion=='SSC'): 53 | sscover(test_object) 54 | else: 55 | print('More to be added...') 56 | return 57 | 58 | def main(): 59 | ## for testing purpose we fix the aicover configuration 60 | ## 61 | dnn=load_model("models/cifar10_complicated.h5") 62 | #dnn=VGG16() 63 | ## 64 | criterion='SSC' 65 | img_rows, img_cols = 32, 32 66 | (x_train, y_train), (x_test, y_test) = cifar10.load_data() 67 | x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 3) 68 | x_test = x_test.astype('float32') 69 | x_test /= 255 70 | raw_data=raw_datat(x_test, y_test) 71 | 72 | cover(test_objectt(dnn, raw_data), criterion) 73 | 74 | if __name__=="__main__": 75 | main() 76 | -------------------------------------------------------------------------------- /dev/sscover_lp.py: -------------------------------------------------------------------------------- 1 | 2 | import cplex 3 | import sys 4 | import numpy as np 5 | 6 | from utils import * 7 | 8 | ## To build the base LP constraints that are shared 9 | ## by different activation patterns 10 | def base_lp(test_object): 11 | var_names_vect=[] 12 | objective=[] 13 | lower_bounds=[] 14 | upper_bounds=[] 15 | var_names=[] 16 | 17 | for l in range(0, len(test_object.dnn.layers)): 18 | layer=test_object.dnn.layers[l] 19 | if is_conv_layer(layer) or is_maxpooling_layer(layer): 20 | if l==0: 21 | isp=layer.input.shape 22 | var_names.append(np.empty((1, isp[1], isp[2], isp[3]), dtype="S40")) 23 | 24 | 25 | def sscover_lp(layer_index, o_pos, i_pos, test_object, layer_functions): 26 | 27 | indices=np.random.permutation(len(test_object.raw_data)) 28 | 29 | for index in indices: 30 | x=test_object.raw_data[index] 31 | activations=eval(layer_functions, x) 32 | -------------------------------------------------------------------------------- /dev/utils.py: -------------------------------------------------------------------------------- 1 | 2 | #import matplotlib.pyplot as plt 3 | from keras import * 4 | from keras import backend as K 5 | 6 | ## some DNN model has an explicit input layer 7 | def is_input_layer(layer): 8 | return str(layer).find('InputLayer')>=0 9 | 10 | def is_conv_layer(layer): 11 | return str(layer).find('conv')>=0 or str(layer).find('Conv')>=0 12 | 13 | def is_dense_layer(layer): 14 | return str(layer).find('dense')>=0 or str(layer).find('Dense')>=0 15 | 16 | def is_activation_layer(layer): 17 | return str(layer).find('activation')>=0 or str(layer).find('Activation')>=0 18 | 19 | def get_activation(layer): 20 | if str(layer.activation).find('relu')>=0: return 'relu' 21 | elif str(layer.activation).find('linear')>=0: return 'linear' 22 | elif str(layer.activation).find('softmax')>=0: return 'softmax' 23 | else: return '' 24 | 25 | def is_maxpooling_layer(layer): 26 | return str(layer).find('MaxPooling')>=0 27 | 28 | def is_flatten_layer(layer): 29 | return str(layer).find('flatten')>=0 or str(layer).find('Flatten')>=0 30 | 31 | def is_dropout_layer(layer): 32 | return False ## we do not allow dropout 33 | 34 | class cover_layert: 35 | def __init__(self, layer, layer_index, is_conv): 36 | self.layer=layer 37 | self.layer_index=layer_index 38 | self.is_conv=is_conv 39 | self.activations=[] 40 | 41 | def get_layer_functions(dnn): 42 | layer_functions=[] 43 | for l in range(0, len(dnn.layers)): 44 | layer=dnn.layers[l] 45 | current_layer_function=K.function([layer.input], [layer.output]) 46 | layer_functions.append(current_layer_function) 47 | return layer_functions 48 | 49 | def get_cover_layers(dnn): 50 | cover_layers=[] 51 | for l in range(0, len(dnn.layers)): 52 | layer=dnn.layers[l] 53 | if is_conv_layer(layer): 54 | cover_layers.append(cover_layert(layer, l, is_conv=True)) 55 | elif is_dense_layer(layer): 56 | cover_layers.append(cover_layert(layer, l, is_conv=False)) 57 | return cover_layers 58 | 59 | 60 | ### given an input image, to evaluate activations 61 | def eval(layer_functions, im): 62 | activations=[] 63 | for l in range(0, len(layer_functions)): 64 | if l==0: 65 | activations.append(layer_functions[l]([[im]])[0]) 66 | else: 67 | activations.append(layer_functions[l]([activations[l-1]])[0]) 68 | return activations 69 | 70 | def eval_batch(layer_functions, ims): 71 | activations=[] 72 | for l in range(0, len(layer_functions)): 73 | if l==0: 74 | activations.append(layer_functions[l]([ims])[0]) 75 | else: 76 | activations.append(layer_functions[l]([activations[l-1]])[0]) 77 | return activations 78 | 79 | #def show_adversarial_examples(imgs, ys, name): 80 | # for i in range(0, 2): 81 | # plt.subplot(1, 2, 1+i) 82 | # print 'imgs[i].shape is ', imgs[i].shape 83 | # plt.imshow(imgs[i].reshape([28,28]), cmap=plt.get_cmap('gray')) 84 | # plt.title("label: "+str(ys[i])) 85 | # plt.savefig(name, bbox_inches='tight') 86 | -------------------------------------------------------------------------------- /dev/vgg16_example.py: -------------------------------------------------------------------------------- 1 | 2 | import keras 3 | import numpy as np 4 | from keras.applications.vgg16 import VGG16 5 | from keras.preprocessing.image import load_img 6 | # load an image from file 7 | image = load_img('mug.jpg', target_size=(224, 224)) 8 | 9 | #Load the VGG model 10 | model = VGG16() 11 | 12 | vgg_model.summary() 13 | -------------------------------------------------------------------------------- /exps/data/mnist.pkl.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TrustAI/DeepCover/c33dcacae961ace4519b4ff12b783564947d8bee/exps/data/mnist.pkl.gz -------------------------------------------------------------------------------- /exps/exp5-1/Result.txt: -------------------------------------------------------------------------------- 1 | mnist_nnet_index0-67-22-63.txt, neuron coverage by 25 inputs: 0.996828752643 2 | mnist_nnet_index1-59-94-56-45.txt, neuron coverage by 25 inputs: 0.990458015267 3 | mnist_nnet_index2-72-61-70-77.txt, neuron coverage by 25 inputs: 0.990689013035 4 | mnist_nnet_index3-65-99-87-23-31.txt, neuron coverage by 25 inputs: 0.987261146497 5 | mnist_nnet_index4-49-61-90-21-48.txt, neuron coverage by 25 inputs: 0.990592662277 6 | mnist_nnet_index5-97-83-32.txt, neuron coverage by 25 inputs: 0.994035785288 7 | mnist_nnet_index6-33-95-67-43-76.txt, neuron coverage by 25 inputs: 0.996389891697 8 | mnist_nnet_index7-78-62-73-47.txt, neuron coverage by 25 inputs: 0.995256166983 9 | mnist_nnet_index8-87-33-62.txt, neuron coverage by 25 inputs: 0.991803278689 10 | mnist_nnet_index9-76-55-74-98-75.txt, neuron coverage by 25 inputs: 0.982081911263 11 | -------------------------------------------------------------------------------- /exps/exp5-1/demo-single-neuron-attack.py: -------------------------------------------------------------------------------- 1 | 2 | import sys 3 | sys.path.insert(0, '../../src/') 4 | import os 5 | from datetime import datetime 6 | import random 7 | import numpy as np 8 | import json 9 | 10 | from util import * 11 | from nnett import * 12 | 13 | training_data, validation_data, test_data = mnist_load_data_shared() 14 | 15 | ## DNNs 16 | di='../random-nn2/' 17 | 18 | with open(di+'README.txt') as f: 19 | lines = f.readlines() 20 | count=-1 21 | for line in lines: 22 | 23 | count+=1 24 | 25 | ## read each DNN 26 | fname=line.split()[0] 27 | print 'Neuron coverage attack: DNN {0} ... '.format(fname), 28 | with open(di+'w_'+fname, "r") as infile: 29 | weights=json.load(infile) 30 | with open(di+'b_'+fname, "r") as infile: 31 | biases=json.load(infile) 32 | 33 | nnet=NNett(weights, biases) 34 | 35 | # randomlize the test data 36 | tot=len(test_data[0].eval()) 37 | ordering=list(range(tot)) 38 | np.random.shuffle(ordering) 39 | 40 | covered=0 41 | tot=0 42 | 43 | X=test_data[0][ordering[0]].eval() 44 | _, act_=nnet.eval(X) 45 | act_b_map=[] 46 | for i in range(0, len(act_)): 47 | act_b_map.append([]) 48 | for j in range(0, len(act_[i])): 49 | act_b_map[i].append(False) 50 | 51 | cex=0 52 | index=-1 53 | ## we select 25 images to attack neuron coverage 54 | while index < 24: 55 | index+=1 56 | X=test_data[0][ordering[index]].eval() 57 | X1=X[:] 58 | 59 | for i in range(0, len(X)): 60 | if X[i]==0: 61 | X[i]+=np.random.uniform(0.0,0.1) 62 | 63 | _, act=nnet.eval(X) 64 | for i in range(0, len(act)): 65 | for j in range(0, len(act[i])): 66 | if act[i][j]>0: act_b_map[i][j]=True 67 | 68 | for i in range(0, len(act)): 69 | for j in range(0, len(act[i])): 70 | tot+=1 71 | if act_b_map[i][j]: covered+=1 72 | 73 | print '{0} covered'.format(covered*1.0/tot) 74 | 75 | with open("./Result.txt", "a") as myfile: 76 | myfile.write('{0}, neuron coverage by 25 inputs: {1} \n'.format(fname, covered*1.0/tot)) 77 | 78 | -------------------------------------------------------------------------------- /exps/exp5-2/exp-random-testing.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.insert(0, '../../src/') 3 | import random 4 | import numpy as np 5 | import json 6 | import os 7 | from datetime import datetime 8 | 9 | from util import * 10 | from nnett import * 11 | 12 | training_data, validation_data, test_data = mnist_load_data_shared() 13 | 14 | def mutate(X, seed): 15 | res=[] 16 | epsilon=0.1 17 | for i in range(0, len(X)): 18 | seed+=1 19 | np.random.seed(seed+i) 20 | delta=np.random.uniform(-epsilon, epsilon) 21 | x=X[i]+delta 22 | if x>1: x=1 23 | if x<0: x=0 24 | res.append(x) 25 | return res 26 | 27 | def main(): 28 | 29 | di='../random-nn/' 30 | seed=1234 31 | 32 | count=0 33 | with open(di+'README.txt') as f: 34 | count+=1 35 | index=999 ## let's start from the 1000th image in test data 36 | lines = f.readlines() 37 | for line in lines: 38 | 39 | index+=1 40 | 41 | ## to read the DNN 42 | fname=line.split()[0] 43 | with open(di+'w_'+fname, "r") as infile: 44 | weights=json.load(infile) 45 | with open(di+'b_'+fname, "r") as infile: 46 | biases=json.load(infile) 47 | 48 | nnet=NNett(weights, biases) 49 | 50 | #os.system('mkdir -p {0}'.format(outs)) 51 | 52 | X=test_data[0][index].eval() 53 | label=test_data[1][index].eval() 54 | 55 | label_, act=nnet.eval(X) 56 | 57 | f=open('./_DNN{0}-README.txt'.format(count), "w") 58 | s='' 59 | s+='Neural net tested: {0}\n'.format(fname) 60 | s+='Input: MNIST test data index: {0}\n'.format(index) 61 | s+='Input: MNIST test data label: {0}\n'.format(label) 62 | f.write(s) 63 | f.close() 64 | print s, 65 | 66 | 67 | #### run nnet testing 68 | cex=0 69 | tests=0 70 | while tests<10*10000: #for x in f_data: 71 | x=mutate(X, seed) 72 | seed+=28*28 73 | label_, act_=nnet.eval(x) 74 | if label_!=label: ## adversarial found 75 | cex+=1 76 | tests+=1 77 | if tests%100==0: 78 | f=open('./_DNN{0}-README.txt'.format(count), "a") 79 | s='###### counterexamples: {0}, total tests {1}\n'.format(cex, tests) 80 | f.write(s) 81 | f.close() 82 | print s, 83 | if cex>100: break 84 | 85 | 86 | with open("./Result-random.txt", "a") as myfile: 87 | myfile.write('{0}, counterexamples {1} \n'.format(fname, cex)) 88 | 89 | if __name__=="__main__": 90 | main() 91 | -------------------------------------------------------------------------------- /exps/exp5-3/DSC/exp-dsc.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.insert(0, '../../../src/') 3 | import random 4 | import numpy as np 5 | import json 6 | import os 7 | from datetime import datetime 8 | 9 | from util import * 10 | from nnett import * 11 | from lp import * 12 | 13 | 14 | def dsc_pair(nnet, I, J, test_data, di): 15 | 16 | index=-1 17 | tot=len(test_data[0].eval()) 18 | 19 | ordering=list(range(tot)) 20 | np.random.shuffle(ordering) 21 | 22 | while index0.3: continue 37 | 38 | ade=False 39 | if feasible: 40 | label__, act=nnet.eval(list(new_x)) ## the next label by DNN 41 | 42 | if label_!=label__: 43 | if label_==label or label_==label: ade=True 44 | return True, ade, index, 0, d, label, label_, label__ 45 | 46 | if index>=40: break ## 47 | 48 | return False, False, index, -1, -1, -1, -1, -1 49 | 50 | def main(): 51 | di='../../random-nn/' 52 | outs="./dsc-pairs"+str(datetime.now()).replace(' ','-')+'/' 53 | os.system('mkdir -p {0}'.format(outs)) 54 | training_data, validation_data, test_data = mnist_load_data_shared(filename="../../data/mnist.pkl.gz") 55 | 56 | nnindex=-1 57 | with open(di+'README.txt') as f: 58 | lines = f.readlines() 59 | for line in lines: 60 | 61 | nnindex+=1 62 | 63 | fname=line.split()[0] 64 | with open(di+'w_'+fname, "r") as infile: 65 | weights=json.load(infile) 66 | with open(di+'b_'+fname, "r") as infile: 67 | biases=json.load(infile) 68 | 69 | nnet=NNett(weights, biases) 70 | N=len(nnet.weights) 71 | 72 | 73 | s='Neural net tested: {0}\n'.format(fname) 74 | fres=fname+'-results.txt' 75 | f=open(outs+fres, "a") 76 | f.write(s) 77 | f.close() 78 | 79 | covered=0 80 | not_covered=0 81 | i_begin=2 82 | j_begin=0 83 | k_begin=0 84 | nade=0 # number of adversarial examples 85 | for I in range(i_begin, N): ## iterate each hidden layer 86 | M=len(nnet.weights[I-1][0]) 87 | for J in range(j_begin, M): 88 | found, is_ade, tested, ncex_, d_, label, label_, label__=dsc_pair(nnet, I, J, test_data, outs) 89 | if found: covered+=1 90 | else: not_covered+=1 91 | 92 | if is_ade: nade+=1 93 | 94 | s='I-J: {0}-{1}, '.format(I, J) 95 | s+='{0}, tested images: {1}, nade={2}, d={3}, covered={4}, not_covered={5}, ncex={6}, labels: {7}:{8}-{9}\n'.format(found, tested, nade, d_, covered, not_covered, nade, label, label_, label__) 96 | f=open(outs+fres, "a") 97 | f.write(s) 98 | f.close() 99 | f=open(di+'results-dsc.txt', "a") 100 | s='{0}: dsc-coverage: {1}, nade={2}, covered={3}, not-covered={4}, CEX={5}\n'.format(fname, 1.0*covered/(covered+not_covered), nade, covered, not_covered, nade) 101 | f.write(s) 102 | 103 | 104 | if __name__=="__main__": 105 | main() 106 | -------------------------------------------------------------------------------- /exps/exp5-3/DSC/results-dsc.txt: -------------------------------------------------------------------------------- 1 | mnist_nnet_index0-67-22-63.txt: dsc-coverage: 1.0, nade=15, covered=95, not-covered=0, CEX=15 2 | mnist_nnet_index1-59-94-56-45.txt: dsc-coverage: 1.0, nade=14, covered=205, not-covered=0, CEX=14 3 | mnist_nnet_index2-72-61-70-77.txt: dsc-coverage: 1.0, nade=11, covered=218, not-covered=0, CEX=11 4 | mnist_nnet_index3-65-99-87-23-31.txt: dsc-coverage: 1.0, nade=18, covered=250, not-covered=0, CEX=18 5 | mnist_nnet_index4-49-61-90-21-48.txt: dsc-coverage: 0.991304347826, nade=22, covered=228, not-covered=2, CEX=22 6 | mnist_nnet_index5-97-83-32.txt: dsc-coverage: 1.0, nade=7, covered=125, not-covered=0, CEX=7 7 | mnist_nnet_index6-33-95-67-43-76.txt: dsc-coverage: 1.0, nade=21, covered=291, not-covered=0, CEX=21 8 | mnist_nnet_index7-78-62-73-47.txt: dsc-coverage: 1.0, nade=18, covered=192, not-covered=0, CEX=18 9 | mnist_nnet_index8-87-33-62.txt: dsc-coverage: 1.0, nade=11, covered=105, not-covered=0, CEX=11 10 | mnist_nnet_index9-76-55-74-98-75.txt: dsc-coverage: 1.0, nade=19, covered=312, not-covered=0, CEX=19 11 | -------------------------------------------------------------------------------- /exps/exp5-3/DVC/exp-dvc.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.insert(0, '../../../src/') 3 | import random 4 | import numpy as np 5 | import json 6 | import os 7 | from datetime import datetime 8 | 9 | from util import * 10 | from nnett import * 11 | from lp import * 12 | 13 | 14 | def dvc_pair(nnet, I, J, test_data, di): 15 | 16 | index=-1 17 | tot=len(test_data[0].eval()) 18 | 19 | ordering=list(range(tot)) 20 | np.random.shuffle(ordering) 21 | 22 | 23 | while index0.3: continue 38 | 39 | ade=False 40 | if feasible: 41 | label__, act=nnet.eval(list(new_x)) ## the next label by DNN 42 | 43 | if label_!=label__: 44 | if label__==label or label_==label: ade=True 45 | return True, ade, index, 0, d, label, label_, label__ 46 | 47 | if index>=40: break ## 48 | 49 | return False, False, index, -1, -1, -1, -1, -1 50 | 51 | def main(): 52 | di='../../random-nn/' 53 | outs="./dvc-pairs"+str(datetime.now()).replace(' ','-')+'/' 54 | os.system('mkdir -p {0}'.format(outs)) 55 | training_data, validation_data, test_data = mnist_load_data_shared(filename="../../data/mnist.pkl.gz") 56 | 57 | nnindex=-1 58 | with open(di+'README.txt') as f: 59 | lines = f.readlines() 60 | for line in lines: 61 | 62 | nnindex+=1 63 | 64 | fname=line.split()[0] 65 | with open(di+'w_'+fname, "r") as infile: 66 | weights=json.load(infile) 67 | with open(di+'b_'+fname, "r") as infile: 68 | biases=json.load(infile) 69 | 70 | nnet=NNett(weights, biases) 71 | N=len(nnet.weights) 72 | 73 | 74 | s='Neural net tested: {0}\n'.format(fname) 75 | fres=fname+'-results.txt' 76 | f=open(outs+fres, "a") 77 | f.write(s) 78 | f.close() 79 | 80 | covered=0 81 | not_covered=0 82 | i_begin=2 83 | j_begin=0 84 | k_begin=0 85 | nade=0 # number of adversarial examples 86 | for I in range(i_begin, N): ## iterate each hidden layer 87 | M=len(nnet.weights[I-1][0]) 88 | for J in range(j_begin, M): 89 | #for K in range(k_begin, len(nnet.weights[I][0])): 90 | found, is_ade, tested, ncex_, d_, label, label_, label__=dvc_pair(nnet, I, J, test_data, outs) 91 | if found: covered+=1 92 | else: not_covered+=1 93 | 94 | if is_ade: nade+=1 95 | 96 | s='I-J: {0}-{1}, '.format(I, J) 97 | s+='{0}, tested images: {1}, nade={2}, d={3}, covered={4}, not_covered={5}, ncex={6}, labels: {7}:{8}-{9}\n'.format(found, tested, nade, d_, covered, not_covered, nade, label, label_, label__) 98 | f=open(outs+fres, "a") 99 | f.write(s) 100 | f.close() 101 | f=open('./results-dvc.txt', "a") 102 | s='{0}: dvc-coverage: {1}, nade={2}, covered={3}, not-covered={4}, CEX={5}\n'.format(fname, 1.0*covered/(covered+not_covered), nade, covered, not_covered, nade) 103 | f.write(s) 104 | 105 | 106 | if __name__=="__main__": 107 | main() 108 | -------------------------------------------------------------------------------- /exps/exp5-3/DVC/results-dvc.txt: -------------------------------------------------------------------------------- 1 | mnist_nnet_index0-67-22-63.txt: vvc-coverage: 1.0, nade=20, covered=95, not-covered=0, CEX=20 2 | mnist_nnet_index1-59-94-56-45.txt: vvc-coverage: 1.0, nade=23, covered=205, not-covered=0, CEX=23 3 | mnist_nnet_index2-72-61-70-77.txt: vvc-coverage: 0.98623853211, nade=24, covered=215, not-covered=3, CEX=24 4 | mnist_nnet_index3-65-99-87-23-31.txt: vvc-coverage: 0.984, nade=28, covered=246, not-covered=4, CEX=28 5 | mnist_nnet_index4-49-61-90-21-48.txt: vvc-coverage: 0.986956521739, nade=21, covered=227, not-covered=3, CEX=21 6 | mnist_nnet_index5-97-83-32.txt: vvc-coverage: 1.0, nade=10, covered=125, not-covered=0, CEX=10 7 | mnist_nnet_index6-33-95-67-43-76.txt: vvc-coverage: 0.962199312715, nade=35, covered=280, not-covered=11, CEX=35 8 | mnist_nnet_index7-78-62-73-47.txt: vvc-coverage: 1.0, nade=14, covered=192, not-covered=0, CEX=14 9 | mnist_nnet_index8-87-33-62.txt: vvc-coverage: 1.0, nade=7, covered=105, not-covered=0, CEX=7 10 | mnist_nnet_index9-76-55-74-98-75.txt: vvc-coverage: 0.939102564103, nade=14, covered=293, not-covered=19, CEX=14 11 | -------------------------------------------------------------------------------- /exps/exp5-3/SSC-top-weights/exp-ssc-k.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.insert(0, '../../../src/') 3 | import random 4 | import numpy as np 5 | import json 6 | import os 7 | from datetime import datetime 8 | 9 | from util import * 10 | from nnett import * 11 | from lp import * 12 | 13 | def ssc_pair(nnet, I, J, K, test_data, di): 14 | 15 | index=-1 16 | tot=len(test_data[0].eval()) 17 | 18 | ordering=list(range(tot)) 19 | np.random.shuffle(ordering) 20 | 21 | cex=False 22 | 23 | while index=40: break ## 44 | 45 | return False, index, cex, -1, -1, -1, -1 46 | 47 | def main(): 48 | kappa=10 49 | di='../../random-nn/' 50 | outs="./ssc-pairs"+str(datetime.now()).replace(' ','-')+'/' 51 | os.system('mkdir -p {0}'.format(outs)) 52 | training_data, validation_data, test_data = mnist_load_data_shared(filename="../../data/mnist.pkl.gz") 53 | 54 | nnindex=-1 55 | with open(di+'README.txt') as f: 56 | lines = f.readlines() 57 | for line in lines: 58 | 59 | nnindex+=1 60 | 61 | fname=line.split()[0] 62 | with open(di+'w_'+fname, "r") as infile: 63 | weights=json.load(infile) 64 | with open(di+'b_'+fname, "r") as infile: 65 | biases=json.load(infile) 66 | 67 | nnet=NNett(weights, biases) 68 | N=len(nnet.weights) 69 | 70 | 71 | s='Neural net tested: {0}\n'.format(fname) 72 | fres=fname+'-results.txt' 73 | f=open(outs+fres, "a") 74 | f.write(s) 75 | f.close() 76 | 77 | ncex=0 78 | covered=0 79 | not_covered=0 80 | i_begin=2 81 | j_begin=0 82 | k_begin=0 83 | for I in range(i_begin, N): ## iterate each hidden layer 84 | for K in range(k_begin, len(nnet.weights[I-1][0])): 85 | ## to find the top-kappa weights to node K 86 | weights_to_k=[] 87 | for J in range(0, len(nnet.weights[I-1])): 88 | weights_to_k.append(abs(nnet.weights[I-1][J][K])) 89 | 90 | top_kappa=[] 91 | for ka in range(0, kappa): 92 | _, J=max( (v, i) for i, v in enumerate(weights_to_k) ) 93 | top_kappa.append(J) 94 | weights_to_k.pop(J) 95 | 96 | for J in top_kappa: #range(j_begin, M): 97 | found, tested, cex, d, label, label_, label__=ssc_pair(nnet, I-1, J, K, test_data, outs) 98 | if found: covered+=1 99 | else: not_covered+=1 100 | if cex: ncex+=1 101 | s='I-J-K: {0}-{1}-{2}, '.format(I-1, J, K) 102 | s+='{0}, tested images: {1}, cex={9}, ncex={2}, covered={3}, not_covered={4}, d={5}, {6}:{7}-{8}\n'.format(found, tested, ncex, covered, not_covered, d, label, label_, label__, cex) 103 | f=open(outs+fres, "a") 104 | f.write(s) 105 | f.close() 106 | k_begin=0 107 | j_begin=0 108 | f=open(di+'results-ssc-kappa{0}.txt'.format(kappa), "a") 109 | tot_pairs=covered+not_covered; 110 | s='{0}: aac-coverage: {1}, CEX\%={2}, #CEX={3}, tot_pairs={4}, covered={5}, not-covered={6}\n'.format(fname, 1.0*covered/tot_pairs, 1.0*ncex/tot_pairs, ncex, tot_pairs, covered, not_covered) 111 | f.write(s) 112 | 113 | 114 | if __name__=="__main__": 115 | main() 116 | -------------------------------------------------------------------------------- /exps/exp5-3/SSC-top-weights/results-kappa10.txt: -------------------------------------------------------------------------------- 1 | mnist_nnet_index0-67-22-63.txt: ssc-coverage: 0.998947368421, CEX\%=0.176842105263, #CEX=168, tot_pairs=950, covered=949, not-covered=1 2 | mnist_nnet_index1-59-94-56-45.txt: ssc-coverage: 0.993658536585, CEX\%=0.0751219512195, #CEX=154, tot_pairs=2050, covered=2037, not-covered=13 3 | mnist_nnet_index2-72-61-70-77.txt: ssc-coverage: 0.995412844037, CEX\%=0.0802752293578, #CEX=175, tot_pairs=2180, covered=2170, not-covered=10 4 | mnist_nnet_index3-65-99-87-23-31.txt: ssc-coverage: 0.9636, CEX\%=0.0652, #CEX=163, tot_pairs=2500, covered=2409, not-covered=91 5 | mnist_nnet_index4-49-61-90-21-48.txt: ssc-coverage: 0.921739130435, CEX\%=0.0839130434783, #CEX=193, tot_pairs=2300, covered=2120, not-covered=180 6 | mnist_nnet_index5-97-83-32.txt: ssc-coverage: 1.0, CEX\%=0.064, #CEX=80, tot_pairs=1250, covered=1250, not-covered=0 7 | mnist_nnet_index6-33-95-67-43-76.txt: ssc-coverage: 0.891065292096, CEX\%=0.0718213058419, #CEX=209, tot_pairs=2910, covered=2593, not-covered=317 8 | mnist_nnet_index7-78-62-73-47.txt: ssc-coverage: 0.9984375, CEX\%=0.0885416666667, #CEX=170, tot_pairs=1920, covered=1917, not-covered=3 9 | mnist_nnet_index8-87-33-62.txt: ssc-coverage: 1.0, CEX\%=0.127619047619, #CEX=134, tot_pairs=1050, covered=1050, not-covered=0 10 | mnist_nnet_index9-76-55-74-98-75.txt: ssc-coverage: 0.899358974359, CEX\%=0.0641025641026, #CEX=200, tot_pairs=3120, covered=2806, not-covered=314 11 | -------------------------------------------------------------------------------- /exps/exp5-3/SSC/exp-ssc.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.insert(0, '../../../src/') 3 | import random 4 | import numpy as np 5 | import json 6 | import os 7 | from datetime import datetime 8 | 9 | from util import * 10 | from nnett import * 11 | from lp import * 12 | 13 | 14 | def ssc_pair(nnet, I, J, K, test_data, di): 15 | 16 | index=-1 17 | tot=len(test_data[0].eval()) 18 | 19 | ordering=list(range(tot)) 20 | np.random.shuffle(ordering) 21 | 22 | cex=False 23 | 24 | while index=40: break ## 45 | 46 | return False, index, cex, -1, -1, -1, -1 47 | 48 | def main(): 49 | di='../../random-nn/' 50 | outs="./ssc-pairs"+str(datetime.now()).replace(' ','-')+'/' 51 | os.system('mkdir -p {0}'.format(outs)) 52 | training_data, validation_data, test_data = mnist_load_data_shared(filename="../../data/mnist.pkl.gz") 53 | nnindex=-1 54 | with open(di+'README.txt') as f: 55 | lines = f.readlines() 56 | for line in lines: 57 | 58 | nnindex+=1 59 | 60 | fname=line.split()[0] 61 | with open(di+'w_'+fname, "r") as infile: 62 | weights=json.load(infile) 63 | with open(di+'b_'+fname, "r") as infile: 64 | biases=json.load(infile) 65 | 66 | nnet=NNett(weights, biases) 67 | N=len(nnet.weights) 68 | 69 | 70 | s='Neural net tested: {0}\n'.format(fname) 71 | fres=fname+'-results.txt' 72 | f=open(outs+fres, "a") 73 | f.write(s) 74 | f.close() 75 | 76 | ncex=0 77 | covered=0 78 | not_covered=0 79 | i_begin=2 80 | j_begin=0 81 | k_begin=0 82 | for I in range(i_begin, N): ## iterate each hidden layer 83 | for K in range(k_begin, len(nnet.weights[I-1][0])): 84 | for J in range(0, len(nnet.weights[I-1])): 85 | found, tested, cex, d, label, label_, label__=ssc_pair(nnet, I-1, J, K, test_data, outs) 86 | if found: covered+=1 87 | else: not_covered+=1 88 | if cex: ncex+=1 89 | s='I-J-K: {0}-{1}-{2}, '.format(I-1, J, K) 90 | s+='{0}, tested images: {1}, cex={9}, ncex={2}, covered={3}, not_covered={4}, d={5}, {6}:{7}-{8}\n'.format(found, tested, ncex, covered, not_covered, d, label, label_, label__, cex) 91 | f=open(outs+fres, "a") 92 | f.write(s) 93 | f.close() 94 | k_begin=0 95 | j_begin=0 96 | f=open('./results-ssc.txt', "a") 97 | tot_pairs=covered+not_covered; 98 | s='{0}: ss-coverage: {1}, CEX\%={2}, #CEX={3}, tot_pairs={4}, covered={5}, not-covered={6}\n'.format(fname, 1.0*covered/tot_pairs, 1.0*ncex/tot_pairs, ncex, tot_pairs, covered, not_covered) 99 | f.write(s) 100 | 101 | if __name__=="__main__": 102 | main() 103 | -------------------------------------------------------------------------------- /exps/exp5-3/SSC/results.txt: -------------------------------------------------------------------------------- 1 | mnist_nnet_index0-67-22-63.txt: ss-coverage: 0.996561604585, CEX=659, covered=3478, not_covered=12 2 | mnist_nnet_index1-59-94-56-45.txt: ss-coverage: 0.984687953556, CEX=1316, covered=13569, not_covered=211 3 | mnist_nnet_index2-72-61-70-77.txt: ss-coverage: 0.992461048752, CEX=1191, covered=11848, not_covered=90 4 | mnist_nnet_index3-65-99-87-23-31.txt: ss-coverage: 1.0, CEX=1499, covered=14787, not_covered=0 5 | mnist_nnet_index4-49-61-90-21-48.txt: ss-coverage: 0.891203508476, CEX=1355, covered=10567, not_covered=1290 6 | mnist_nnet_index5-97-83-32.txt: ss-coverage: 1.0, CEX=1032, covered=11027, not_covered=0 7 | mnist_nnet_index2-72-61-70-77.txt: ss-coverage: 0.994400215895, cex%=0.071110511402, CEX=1054, covered=14739, not-covered=83 8 | mnist_nnet_index3-65-99-87-23-31.txt: ss-coverage: 0.983787073927, cex%=0.0711598052236, CEX=1286, covered=17779, not-covered=293 9 | -------------------------------------------------------------------------------- /exps/exp5-3/SVC/exp-svc.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.insert(0, '../../../src/') 3 | import random 4 | import numpy as np 5 | import json 6 | import os 7 | from datetime import datetime 8 | 9 | from util import * 10 | from nnett import * 11 | from lp import * 12 | 13 | def svc_pair(nnet, I, J, K, test_data, di): 14 | 15 | index=-1 16 | tot=len(test_data[0].eval()) 17 | 18 | ordering=list(range(tot)) 19 | np.random.shuffle(ordering) 20 | 21 | ade=False 22 | 23 | while index=40: break ## 46 | 47 | return False, False, index, -1, -1, -1, -1, -1 48 | 49 | def main(): 50 | 51 | di='../../random-nn/' 52 | 53 | outs="./svc-pairs"+str(datetime.now()).replace(' ','-')+'/' 54 | os.system('mkdir -p {0}'.format(outs)) 55 | training_data, validation_data, test_data = mnist_load_data_shared(filename="../../data/mnist.pkl.gz") 56 | nnindex=-1 57 | with open(di+'README.txt') as f: 58 | lines = f.readlines() 59 | for line in lines: 60 | 61 | nnindex+=1 62 | if nnindex<1: continue 63 | 64 | fname=line.split()[0] 65 | with open(di+'w_'+fname, "r") as infile: 66 | weights=json.load(infile) 67 | with open(di+'b_'+fname, "r") as infile: 68 | biases=json.load(infile) 69 | 70 | nnet=NNett(weights, biases) 71 | N=len(nnet.weights) 72 | 73 | s='Neural net tested: {0}\n'.format(fname) 74 | fres=fname+'-results.txt' 75 | f=open(outs+fres, "a") 76 | f.write(s) 77 | f.close() 78 | 79 | covered=0 80 | not_covered=0 81 | i_begin=1 82 | j_begin=0 83 | k_begin=0 84 | nade=0 # number of adversarial examples 85 | for I in range(i_begin, N-1): ## iterate each hidden layer 86 | M=len(nnet.weights[I-1][0]) 87 | for J in range(j_begin, M): 88 | for K in range(k_begin, len(nnet.weights[I][0])): 89 | found, is_ade, tested, ncex_, d_, label, label_, label__=svc_pair(nnet, I, J, K, test_data, outs) 90 | if found: covered+=1 91 | else: not_covered+=1 92 | 93 | if is_ade: nade+=1 94 | 95 | s='I-J-K: {0}-{1}-{2}, '.format(I, J, K) 96 | s+='{0}, tested images: {1}, nade={2}, d={3}, covered={4}, not_covered={5}, ncex={6}, labels: {7}:{8}-{9}\n'.format(found, tested, nade, d_, covered, not_covered, nade, label, label_, label__) 97 | f=open(outs+fres, "a") 98 | f.write(s) 99 | f.close() 100 | f=open('./results-svc.txt', "a") 101 | s='{0}: svc-coverage: {1}, nade={2}, covered={3}, not-covered={4}, CEX={5}\n'.format(fname, 1.0*covered/(covered+not_covered), nade, covered, not_covered, nade) 102 | f.write(s) 103 | 104 | 105 | if __name__=="__main__": 106 | main() 107 | -------------------------------------------------------------------------------- /exps/exp5-3/SVC/results-svc.txt: -------------------------------------------------------------------------------- 1 | mnist_nnet_index1-59-94-56-45.txt: svc-coverage: 0.99992743106, nade=509, covered=13779, not-covered=1, CEX=509 2 | mnist_nnet_index2-72-61-70-77.txt: svc-coverage: 0.999865065443, nade=548, covered=14820, not-covered=2, CEX=548 3 | mnist_nnet_index3-65-99-87-23-31.txt: svc-coverage: 0.997565294378, nade=669, covered=18028, not-covered=44, CEX=669 4 | mnist_nnet_index4-49-61-90-21-48.txt: svc-coverage: 0.993505945855, nade=578, covered=11780, not-covered=77, CEX=578 5 | mnist_nnet_index5-97-83-32.txt: svc-coverage: 1.0, nade=412, covered=11027, not-covered=0, CEX=412 6 | mnist_nnet_index6-33-95-67-43-76.txt: svc-coverage: 0.991833749771, nade=625, covered=16275, not-covered=134, CEX=625 7 | mnist_nnet_index7-78-62-73-47.txt: svc-coverage: 1.0, nade=530, covered=13263, not-covered=0, CEX=530 8 | mnist_nnet_index8-87-33-62.txt: svc-coverage: 1.0, nade=292, covered=5537, not-covered=0, CEX=292 9 | mnist_nnet_index9-76-55-74-98-75.txt: svc-coverage: 0.983094652996, nade=577, covered=23203, not-covered=399, CEX=577 10 | -------------------------------------------------------------------------------- /exps/exp5-3/lp-call-runtime/exp-get-runtime.py: -------------------------------------------------------------------------------- 1 | 2 | import sys 3 | sys.path.insert(0, '../../../src/') 4 | import random 5 | import numpy as np 6 | import json 7 | import os 8 | import time 9 | from datetime import datetime 10 | 11 | from util import * 12 | from nnett import * 13 | from lp import * 14 | 15 | 16 | def ssc_pair(nnet, I, J, K, test_data, di): 17 | 18 | index=-1 19 | tot=len(test_data[0].eval()) 20 | 21 | ordering=list(range(tot)) 22 | np.random.shuffle(ordering) 23 | 24 | cex=False 25 | 26 | while index=40: break ## 71 | 72 | return False, index, cex, -1, -1, -1, -1 73 | 74 | def main(): 75 | di='../../random-nn/' 76 | training_data, validation_data, test_data = mnist_load_data_shared(filename="../../data/mnist.pkl.gz") 77 | nnindex=-1 78 | with open(di+'README.txt') as f: 79 | lines = f.readlines() 80 | for line in lines: 81 | 82 | nnindex+=1 83 | if nnindex<7: continue 84 | 85 | fname=line.split()[0] 86 | with open(di+'w_'+fname, "r") as infile: 87 | weights=json.load(infile) 88 | with open(di+'b_'+fname, "r") as infile: 89 | biases=json.load(infile) 90 | 91 | nnet=NNett(weights, biases) 92 | N=len(nnet.weights) 93 | 94 | s='Neural net tested: {0}\n'.format(fname) 95 | f=open('./results.txt', "a") 96 | f.write(s) 97 | f.close() 98 | 99 | ncex=0 100 | covered=0 101 | not_covered=0 102 | i_begin=1 103 | j_begin=0 104 | k_begin=0 105 | flag=False 106 | for I in range(i_begin, N-1): ## iterate each hidden layer 107 | M=len(nnet.weights[I-1][0]) 108 | f=open('./results.txt', "a") 109 | s='L{0}-{1}: '.format(I, I+1) 110 | f.write(s) 111 | for J in range(j_begin, M): 112 | for K in range(k_begin, len(nnet.weights[I][0])): 113 | flag=True 114 | found, tested, cex, d, label, label_, label__=ssc_pair(nnet, I, J, K, test_data, './') 115 | if found: covered+=1 116 | else: 117 | not_covered+=1 118 | flag=False 119 | if cex: ncex+=1 120 | #s='I-J-K: {0}-{1}-{2}, '.format(I, J, K) 121 | #s+='{0}, tested images: {1}, ncex={2}, covered={3}, not_covered={4}, d={5}, {6}:{7}-{8}\n'.format(found, tested, ncex, covered, not_covered, d, label, label_, label__) 122 | #f=open(outs+'results.txt', "a") 123 | #f.write(s) 124 | #f.close() 125 | if flag: break 126 | k_begin=0 127 | if flag: break 128 | j_begin=0 129 | #f=open(di+'results.txt', "a") 130 | #s='{0}: mcdc-coverage: {1}, CEX={2}, covered={3}, not-covered={4}\n'.format(fname, 1.0*covered/(covered+not_covered), ncex, covered, not_covered) 131 | #f.write(s) 132 | 133 | 134 | if __name__=="__main__": 135 | main() 136 | -------------------------------------------------------------------------------- /exps/exp5-3/plots/input-distance-plots/parse-and-plot.py: -------------------------------------------------------------------------------- 1 | 2 | import matplotlib.pyplot as plt 3 | import numpy as np 4 | import csv 5 | 6 | """ 7 | Simple demo of a scatter plot. 8 | """ 9 | import numpy as np 10 | import matplotlib.pyplot as plt 11 | 12 | results=['8-ss-results.txt', '9-ss-results.txt', '10-ss-results.txt'] 13 | 14 | x=np.arange(0.01, 0.31, 0.01) 15 | 16 | 17 | tot8=0 18 | vect8=np.zeros(30) 19 | tot9=0 20 | vect9=np.zeros(30) 21 | tot10=0 22 | vect10=np.zeros(30) 23 | 24 | pre_nex=0 25 | with open('8-ss-results.txt','r') as csvfile: 26 | plots = csv.reader(csvfile, delimiter=',') 27 | for row in plots: 28 | nex=int(row[3].split('=')[1]) 29 | if nex>pre_nex: 30 | pre_nex=nex 31 | tot8+=1 32 | d=float(row[6].split('=')[1]) 33 | for i in range(0, 30): 34 | if d<(i+1)*0.01: vect8[i]+=1 35 | for i in range(0, 30): 36 | vect8[i]=1.0*vect8[i]/tot8 37 | 38 | pre_nex=0 39 | with open('9-ss-results.txt','r') as csvfile: 40 | plots = csv.reader(csvfile, delimiter=',') 41 | for row in plots: 42 | nex=int(row[3].split('=')[1]) 43 | if nex>pre_nex: 44 | pre_nex=nex 45 | tot9+=1 46 | d=float(row[6].split('=')[1]) 47 | for i in range(0, 30): 48 | if d<(i+1)*0.01: vect9[i]+=1 49 | for i in range(0, 30): 50 | vect9[i]=1.0*vect9[i]/tot9 51 | 52 | pre_nex=0 53 | with open('10-ss-results.txt','r') as csvfile: 54 | plots = csv.reader(csvfile, delimiter=',') 55 | for row in plots: 56 | nex=int(row[3].split('=')[1]) 57 | if nex>pre_nex: 58 | pre_nex=nex 59 | tot10+=1 60 | d=float(row[6].split('=')[1]) 61 | for i in range(0, 30): 62 | if d<(i+1)*0.01: vect10[i]+=1 63 | for i in range(0, 30): 64 | vect10[i]=1.0*vect10[i]/tot10 65 | 66 | 67 | #plt.axis([0, 11, -0.5, +0.5]) 68 | plt.plot(x,vect10, '--', label='$\mathcal{N}_{10}$') 69 | plt.plot(x,vect8, '--', label='$\mathcal{N}_8$') 70 | plt.plot(x,vect9, '--', label='$\mathcal{N}_9$') 71 | 72 | plt.xlabel('Input distance') 73 | plt.ylabel('Accumulated adversarial examples') 74 | plt.legend() 75 | plt.legend(loc="lower right") 76 | #plt.show() 77 | plt.savefig("ss-distance-map.pdf", bbox_inches='tight') 78 | 79 | 80 | 81 | -------------------------------------------------------------------------------- /exps/exp5-3/plots/input-distance-plots/ss-distance-map.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TrustAI/DeepCover/c33dcacae961ace4519b4ff12b783564947d8bee/exps/exp5-3/plots/input-distance-plots/ss-distance-map.pdf -------------------------------------------------------------------------------- /exps/exp5-3/plots/layers/layerwise-ss-bugs.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TrustAI/DeepCover/c33dcacae961ace4519b4ff12b783564947d8bee/exps/exp5-3/plots/layers/layerwise-ss-bugs.pdf -------------------------------------------------------------------------------- /exps/exp5-3/plots/layers/layerwise-ss-coverage.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TrustAI/DeepCover/c33dcacae961ace4519b4ff12b783564947d8bee/exps/exp5-3/plots/layers/layerwise-ss-coverage.pdf -------------------------------------------------------------------------------- /exps/exp5-3/plots/layers/plot-bar.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | 5 | # data to plot 6 | n_groups = 5 7 | n8 = (1, 1, 0.99621101, 0.96808511, 0) 8 | n9 = (1, 1, 1, 0, 0) 9 | n10 = (1, 1, 0.9783508, 0.64231293, 0.524) 10 | 11 | # create plot 12 | fig, ax = plt.subplots() 13 | index = np.arange(n_groups) 14 | bar_width = 0.2 15 | opacity = 0.8 16 | 17 | rect_n8 = plt.bar(index+2*bar_width, n8, bar_width, 18 | alpha=opacity, 19 | color='b', 20 | label='$\mathcal{N}_{8}$') 21 | 22 | rect_n9 = plt.bar(index + 0*bar_width, n9, bar_width, 23 | alpha=opacity, 24 | color='g', 25 | label='$\mathcal{N}_9$') 26 | 27 | rect_n10 = plt.bar(index + 1*bar_width, n10, bar_width, 28 | alpha=opacity, 29 | color='red', 30 | label='$\mathcal{N}_{10}$') 31 | 32 | 33 | plt.xlabel('Adjacent layers') 34 | plt.ylabel('Layerwise SS coverage') 35 | #plt.title('Scores by person') 36 | plt.xticks(index + bar_width, ('$L2-3$', '$L3-4$', '$L4-5$', '$L5-6$', '$L6-7$')) 37 | plt.legend() 38 | 39 | plt.tight_layout() 40 | #plt.show() 41 | plt.savefig("layerwise-ss-coverage.pdf", bbox_inches='tight') 42 | 43 | -------------------------------------------------------------------------------- /exps/exp5-3/plots/layers/plot-bar2.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | 5 | # data to plot 6 | n_groups = 5 7 | n8 = (0.35220126,0.49865229,0.1410602, 0.00808625, 0) 8 | n9 = (0.37142857,0.59548872,0.03308271, 0, 0) 9 | n10 = (0.27193619,0.38941262,0.24655547,0.08919507, 0.00290065) 10 | 11 | # create plot 12 | fig, ax = plt.subplots() 13 | index = np.arange(n_groups) 14 | bar_width = 0.2 15 | opacity = 0.8 16 | 17 | rect_n8 = plt.bar(index+2*bar_width, n8, bar_width, 18 | alpha=opacity, 19 | color='b', 20 | label='$\mathcal{N}_8$') 21 | 22 | rect_n9 = plt.bar(index + 0*bar_width, n9, bar_width, 23 | alpha=opacity, 24 | color='g', 25 | label='$\mathcal{N}_9$') 26 | 27 | rect_n10 = plt.bar(index + 1*bar_width, n10, bar_width, 28 | alpha=opacity, 29 | color='red', 30 | label='$\mathcal{N}_{10}$') 31 | 32 | 33 | plt.xlabel('Adjacent layers') 34 | plt.ylabel('Adversarial examples') 35 | #plt.title('Scores by person') 36 | plt.xticks(index + bar_width, ('$L2-3$', '$L3-4$', '$L4-5$', '$L5-6$', '$L6-7$')) 37 | plt.legend() 38 | 39 | plt.tight_layout() 40 | #plt.show() 41 | plt.savefig("layerwise-ss-bugs.pdf", bbox_inches='tight') 42 | 43 | -------------------------------------------------------------------------------- /exps/exp5-3/plots/ss-top10/plot-ss-vs-ss-top-10.py: -------------------------------------------------------------------------------- 1 | 2 | import matplotlib.pyplot as plt 3 | import numpy as np 4 | import csv 5 | 6 | """ 7 | Simple demo of a scatter plot. 8 | """ 9 | import numpy as np 10 | import matplotlib.pyplot as plt 11 | 12 | 13 | N = 50 14 | x = np.random.rand(N) 15 | y = np.random.rand(N) 16 | colors1 = np.random.rand(N) 17 | colors2 = np.random.rand(N) 18 | area = np.pi *5**2 #* (15 * np.random.rand(2))**2 # 0 to 15 point radii 19 | 20 | 21 | nns=[] 22 | ss1=[] 23 | ae1=[] 24 | ss2=[] 25 | ae2=[] 26 | 27 | ss1m2=[] 28 | ae1m2=[] 29 | 30 | with open('ss-vs-ss-top-10.csv','r') as csvfile: 31 | plots = csv.reader(csvfile, delimiter=',') 32 | for row in plots: 33 | print row 34 | nns.append(row[0]) 35 | ss1.append(row[1]) 36 | ae1.append(row[2]) 37 | ss2.append(row[3]) 38 | ae2.append(row[4]) 39 | 40 | ss1m2.append(float(row[1])-float(row[3])) 41 | ae1m2.append(float(row[2])-float(row[4])) 42 | 43 | plt.axis([0, 11, -0.1, +0.1]) 44 | 45 | plt.plot([0, 11],[0, 0], '--', alpha=0.5) 46 | plt.scatter(nns, ss1m2, s=area, color='red', alpha=0.25, label='$Mcov_{SS}-Mcov_{SS}^{w10}$') 47 | plt.scatter(nns, ae1m2, s=area, color='green', alpha=0.25, label='$AEcov_{SS}-AE_{SS}^{w10}$') 48 | 49 | # 50 | plt.xlabel('DNN index') 51 | plt.ylabel('Difference in testing results') 52 | plt.legend() 53 | plt.savefig("ss-top10.pdf", bbox_inches='tight') 54 | 55 | -------------------------------------------------------------------------------- /exps/exp5-3/plots/ss-top10/ss-top10.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TrustAI/DeepCover/c33dcacae961ace4519b4ff12b783564947d8bee/exps/exp5-3/plots/ss-top10/ss-top10.pdf -------------------------------------------------------------------------------- /exps/exp5-3/plots/ss-top10/ss-vs-ss-top-10.csv: -------------------------------------------------------------------------------- 1 | 1, 0.997, 0.189, 0.999, 0.177 2 | 2, 0.985, 0.095, 0.994, 0.075 3 | 3, 0.994, 0.071, 0.995, 0.080 4 | 4, 0.984, 0.071, 0.964, 0.065 5 | 5, 0.891, 0.114, 0.922, 0.084 6 | 6, 1.000, 0.094, 1.000, 0.064 7 | 7, 0.869, 0.088, 0.891, 0.072 8 | 8, 0.998, 0.084, 0.998, 0.089 9 | 9, 1.000, 0.120, 1.000, 0.128 10 | 10,0.867, 0.058, 0.899, 0.064 11 | -------------------------------------------------------------------------------- /exps/exp5-4/cnn-results/parse-results.py: -------------------------------------------------------------------------------- 1 | 2 | import matplotlib.pyplot as plt 3 | import numpy as np 4 | import csv 5 | 6 | import numpy as np 7 | import matplotlib.pyplot as plt 8 | 9 | x=np.arange(3, 6, 1) 10 | 11 | n20_10=0 12 | ae20_10=0 13 | co20_10=0 14 | # 15 | n20_11=0 16 | ae20_11=0 17 | co20_11=0 18 | # 19 | n21_10=0 20 | ae21_10=0 21 | co21_10=0 22 | # 23 | n20_11=0 24 | ae20_11=0 25 | co20_11=0 26 | # 27 | n21_11=0 28 | ae21_11=0 29 | co21_11=0 30 | 31 | 32 | N20_10=0 33 | AE20_10=0 34 | CO20_10=0 35 | N20_11=0 36 | AE20_11=0 37 | CO20_11=0 38 | N21_10=0 39 | AE21_10=0 40 | CO21_10=0 41 | 42 | with open('cnn2-results.txt','r') as csvfile: 43 | plots = csv.reader(csvfile, delimiter=',') 44 | for row in plots: 45 | locs=row[0].split() 46 | I=int(locs[0].split('-')[1]) 47 | J=int(locs[1].split('-')[1]) 48 | cex=(row[3]==' cex=True') 49 | covered=(row[1]==' True') 50 | if I==0 and J==0: 51 | n20_10+=1 52 | if cex: ae20_10+=1 53 | if covered: co20_10+=1 54 | elif I==0 and J==1: 55 | n21_10+=1 56 | if cex: ae21_10+=1 57 | if covered: co21_10+=1 58 | elif I==1 and J==0: 59 | n20_11+=1 60 | if cex: ae20_11+=1 61 | if covered: co20_11+=1 62 | elif I==1 and J==1: 63 | n21_11+=1 64 | if cex: ae21_11+=1 65 | if covered: co21_11+=1 66 | 67 | print "CNN2: " 68 | print '{0}, {1}, {2}'.format(n20_10, ae20_10, co20_10) 69 | print 'cf2,1: cf1,1==> {0}, {1}'.format(1.0*co20_10/n20_10, 1.0*ae20_10/n20_10) 70 | print '{0}, {1}, {2}'.format(n21_10, ae21_10, co21_10) 71 | print 'cf2,2: cf1,1==> {0}, {1}'.format(1.0*co21_10/n21_10, 1.0*ae21_10/n21_10) 72 | print '{0}, {1}, {2}'.format(n20_11, ae20_11, co20_11) 73 | print 'cf2,1: cf1,2==> {0}, {1}'.format(1.0*co20_11/n20_11, 1.0*ae20_11/n20_11) 74 | print '{0}, {1}, {2}'.format(n21_11, ae21_11, co21_11) 75 | print 'cf2,2: cf1,2==> {0}, {1}'.format(1.0*co21_11/n21_11, 1.0*ae21_11/n21_11) 76 | 77 | 78 | with open('cnn1-results.txt','r') as csvfile: 79 | plots = csv.reader(csvfile, delimiter=',') 80 | for row in plots: 81 | locs=row[0].split() 82 | I=int(locs[0].split('-')[1]) 83 | J=int(locs[1].split('-')[1]) 84 | cex=(row[3]==' cex=True') 85 | covered=(row[1]==' True') 86 | if I==0 and J==0: 87 | N20_10+=1 88 | if cex: AE20_10+=1 89 | if covered: CO20_10+=1 90 | elif I==1 and J==0: 91 | N20_11+=1 92 | if cex: AE20_11+=1 93 | if covered: CO20_11+=1 94 | elif I==0 and J==1: 95 | N21_10+=1 96 | if cex: AE21_10+=1 97 | if covered: CO21_10+=1 98 | 99 | print "\nCNN1: " 100 | print '{0}, {1}, {2}'.format(N20_10, AE20_10, CO20_10) 101 | print 'cf2,1: cf1,1==> {0}, {1}'.format(1.0*CO20_10/N20_10, 1.0*AE20_10/N20_10) 102 | print '{0}, {1}, {2}'.format(N21_10, AE21_10,CO21_10) 103 | print 'cf2,2: cf1,1==> {0}, {1}'.format(1.0*CO21_10/N21_10, 1.0*AE21_10/N21_10) 104 | 105 | -------------------------------------------------------------------------------- /exps/exp5-4/cnn1-exp-conv-ss.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.insert(0, '../../src/') 3 | import random 4 | import numpy as np 5 | import json 6 | import os 7 | from datetime import datetime 8 | 9 | from cnnett import Layert 10 | from cnnett import CNNett 11 | from conv_lp import * 12 | from util import * 13 | 14 | def ssc_pair_conv(cnnet, current_layer, current_filter, current_I, current_J, prior_filter, prior_I, prior_J, test_data, di, prior_fs): 15 | 16 | index=-1 17 | tot=len(test_data[0].eval()) 18 | 19 | ordering=list(range(tot)) 20 | np.random.shuffle(ordering) 21 | 22 | cex=False 23 | 24 | while index=40: break ## 45 | 46 | return False, index, cex, -1, -1, -1, -1 47 | 48 | def main(): 49 | outs="ss-pairs"+str(datetime.now()).replace(' ','-')+'/' 50 | os.system('mkdir -p {0}'.format(outs)) 51 | training_data, validation_data, test_data = mnist_load_data_shared(filename="../data/mnist.pkl.gz") 52 | 53 | fname='cnn1' 54 | 55 | ws=np.load('cnns/cnn1-weights-mnist.npy') 56 | bs=np.load('cnns/cnn1-biases-mnist.npy') 57 | 58 | layer1=Layert(ws[0], bs[0], True, 2) 59 | layer2=Layert(ws[1], bs[1], True, 2) 60 | layer3=Layert(ws[2], bs[2]) 61 | layer4=Layert(ws[3], bs[3]) 62 | 63 | cnnet=CNNett([layer1, layer2, layer3, layer4]) 64 | 65 | 66 | outs_=outs+fname+"/" 67 | if not os.path.exists(outs_): 68 | os.system('mkdir -p {0}'.format(outs_)) 69 | 70 | s='Neural net tested: {0}\n'.format(fname) 71 | fres=fname+'-results.txt' 72 | f=open(outs_+fres, "a") 73 | f.write(s) 74 | f.close() 75 | 76 | ## to simplify things, let's have compute an act here 77 | X=test_data[0][0].eval() 78 | ## act2 is before max-pooling 79 | _, act, act2=cnnet.eval(list(X)) 80 | 81 | ncex=0 82 | covered=0 83 | not_covered=0 84 | 85 | N=len(act) 86 | for current_layer in range(2,N): 87 | for current_filter in range(0, len(act2[current_layer])): 88 | a=act2[current_layer][current_filter] 89 | for current_I in range(0, len(a)): 90 | for current_J in range(0, len(a[current_I])): 91 | ##### To test (current_layer, current_filter, current_I, current_J) 92 | prior_mps=set() ## these at prior mp layer that affect current_I,current_J 93 | nfr=cnnet.hidden_layers[current_layer-1].w[current_filter][0].shape[0] # number of filter rows 94 | nfc=cnnet.hidden_layers[current_layer-1].w[current_filter][0].shape[1] # number of filter columns 95 | for l in range(0, nfr): 96 | for m in range(0, nfc): 97 | prior_mps.add((current_I+nfr-m-1, current_J+nfc-l-1)) 98 | 99 | prior_fs=set() ### these at prior filters that affect the current_I,current_J 100 | for x in prior_mps: 101 | for ii in range(cnnet.hidden_layers[current_layer-2].mp_size_x*x[0], cnnet.hidden_layers[current_layer-2].mp_size_x*(x[0]+1)): 102 | for jj in range(cnnet.hidden_layers[current_layer-2].mp_size_y*x[1], cnnet.hidden_layers[current_layer-2].mp_size_y*(x[1]+1)): 103 | prior_fs.add((ii,jj)) 104 | 105 | for prior_filter in range(0, len(act2[current_layer-1])): 106 | # 107 | b=act2[current_layer-1][prior_filter] 108 | for prior_I in range(0, len(b)): 109 | for prior_J in range(0, len(b[prior_I])): ###(prior_layer, prior_filter) 110 | if not ((prior_I, prior_J) in prior_fs): 111 | continue 112 | found, tested, cex, d, label, label_, label__=ssc_pair_conv(cnnet, current_layer, current_filter, current_I, current_J, prior_filter, prior_I, prior_J, test_data, outs_, prior_fs) 113 | if found: covered+=1 114 | else: not_covered+=1 115 | if cex: ncex+=1 116 | s='{0}-{1}-{2}-{3}: {4}-{5}-{6}-{7}, '.format(current_layer-1, prior_filter, prior_I, prior_J, current_layer, current_filter, current_I, current_J) 117 | s+='{0}, tested images: {1}, cex={9}, ncex={2}, covered={3}, not_covered={4}, d={5}, {6}:{7}-{8}\n'.format(found, tested, ncex, covered, not_covered, d, label, label_, label__, cex) 118 | f=open(outs_+fres, "a") 119 | f.write(s) 120 | f.close() 121 | f=open('./results-ssc.txt', "a") 122 | tot_pairs=covered+not_covered; 123 | s='{0}: ssc-coverage: {1}, CEX\%={2}, #CEX={3}, tot_pairs={4}, covered={5}, not-covered={6}\n'.format(fname, 1.0*covered/tot_pairs, 1.0*ncex/tot_pairs, ncex, tot_pairs, covered, not_covered) 124 | f.write(s) 125 | 126 | 127 | if __name__=="__main__": 128 | main() 129 | -------------------------------------------------------------------------------- /exps/exp5-4/cnn2-exp-conv-ss.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.insert(0, '../../src/') 3 | import random 4 | import numpy as np 5 | import json 6 | import os 7 | from datetime import datetime 8 | 9 | from cnnett import Layert 10 | from cnnett import CNNett 11 | from conv_lp import * 12 | from util import * 13 | 14 | def ssc_pair_conv(cnnet, current_layer, current_filter, current_I, current_J, prior_filter, prior_I, prior_J, test_data, di, prior_fs): 15 | 16 | index=-1 17 | tot=len(test_data[0].eval()) 18 | 19 | ordering=list(range(tot)) 20 | np.random.shuffle(ordering) 21 | 22 | cex=False 23 | 24 | while index=40: break ## 45 | 46 | return False, index, cex, -1, -1, -1, -1 47 | 48 | def main(): 49 | outs="ss-pairs"+str(datetime.now()).replace(' ','-')+'/' 50 | os.system('mkdir -p {0}'.format(outs)) 51 | training_data, validation_data, test_data = mnist_load_data_shared(filename="../data/mnist.pkl.gz") 52 | 53 | fname='cnn2' 54 | 55 | ws=np.load('cnns/cnn2-weights-mnist.npy') 56 | bs=np.load('cnns/cnn2-biases-mnist.npy') 57 | 58 | layer1=Layert(ws[0], bs[0], True, 2) 59 | layer2=Layert(ws[1], bs[1], True, 2) 60 | layer3=Layert(ws[2], bs[2]) 61 | layer4=Layert(ws[3], bs[3]) 62 | 63 | cnnet=CNNett([layer1, layer2, layer3, layer4]) 64 | 65 | 66 | outs_=outs+fname+"/" 67 | if not os.path.exists(outs_): 68 | os.system('mkdir -p {0}'.format(outs_)) 69 | 70 | s='Neural net tested: {0}\n'.format(fname) 71 | fres=fname+'-results.txt' 72 | f=open(outs_+fres, "a") 73 | f.write(s) 74 | f.close() 75 | 76 | ## to simplify things, let's have compute an act here 77 | X=test_data[0][0].eval() 78 | ## act2 is before max-pooling 79 | _, act, act2=cnnet.eval(list(X)) 80 | 81 | ncex=0 82 | covered=0 83 | not_covered=0 84 | 85 | N=len(act) 86 | for current_layer in range(2,N): 87 | for current_filter in range(0, len(act2[current_layer])): 88 | a=act2[current_layer][current_filter] 89 | for current_I in range(0, len(a)): 90 | for current_J in range(0, len(a[current_I])): 91 | ##### To test (current_layer, current_filter, current_I, current_J) 92 | prior_mps=set() ## these at prior mp layer that affect current_I,current_J 93 | nfr=cnnet.hidden_layers[current_layer-1].w[current_filter][0].shape[0] # number of filter rows 94 | nfc=cnnet.hidden_layers[current_layer-1].w[current_filter][0].shape[1] # number of filter columns 95 | for l in range(0, nfr): 96 | for m in range(0, nfc): 97 | prior_mps.add((current_I+nfr-m-1, current_J+nfc-l-1)) 98 | 99 | prior_fs=set() ### these at prior filters that affect the current_I,current_J 100 | for x in prior_mps: 101 | for ii in range(cnnet.hidden_layers[current_layer-2].mp_size_x*x[0], cnnet.hidden_layers[current_layer-2].mp_size_x*(x[0]+1)): 102 | for jj in range(cnnet.hidden_layers[current_layer-2].mp_size_y*x[1], cnnet.hidden_layers[current_layer-2].mp_size_y*(x[1]+1)): 103 | prior_fs.add((ii,jj)) 104 | 105 | for prior_filter in range(0, len(act2[current_layer-1])): 106 | # 107 | b=act2[current_layer-1][prior_filter] 108 | for prior_I in range(0, len(b)): 109 | for prior_J in range(0, len(b[prior_I])): ###(prior_layer, prior_filter) 110 | if not ((prior_I, prior_J) in prior_fs): 111 | continue 112 | found, tested, cex, d, label, label_, label__=ssc_pair_conv(cnnet, current_layer, current_filter, current_I, current_J, prior_filter, prior_I, prior_J, test_data, outs_, prior_fs) 113 | if found: covered+=1 114 | else: not_covered+=1 115 | if cex: ncex+=1 116 | s='{0}-{1}-{2}-{3}: {4}-{5}-{6}-{7}, '.format(current_layer-1, prior_filter, prior_I, prior_J, current_layer, current_filter, current_I, current_J) 117 | s+='{0}, tested images: {1}, cex={9}, ncex={2}, covered={3}, not_covered={4}, d={5}, {6}:{7}-{8}\n'.format(found, tested, ncex, covered, not_covered, d, label, label_, label__, cex) 118 | f=open(outs_+fres, "a") 119 | f.write(s) 120 | f.close() 121 | f=open('./results-ssc.txt', "a") 122 | tot_pairs=covered+not_covered; 123 | s='{0}: ssc-coverage: {1}, CEX\%={2}, #CEX={3}, tot_pairs={4}, covered={5}, not-covered={6}\n'.format(fname, 1.0*covered/tot_pairs, 1.0*ncex/tot_pairs, ncex, tot_pairs, covered, not_covered) 124 | f.write(s) 125 | 126 | 127 | if __name__=="__main__": 128 | main() 129 | -------------------------------------------------------------------------------- /exps/exp5-4/cnns/cnn1-biases-conv.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TrustAI/DeepCover/c33dcacae961ace4519b4ff12b783564947d8bee/exps/exp5-4/cnns/cnn1-biases-conv.npy -------------------------------------------------------------------------------- /exps/exp5-4/cnns/cnn1-biases-mnist.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TrustAI/DeepCover/c33dcacae961ace4519b4ff12b783564947d8bee/exps/exp5-4/cnns/cnn1-biases-mnist.npy -------------------------------------------------------------------------------- /exps/exp5-4/cnns/cnn1-weights-conv.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TrustAI/DeepCover/c33dcacae961ace4519b4ff12b783564947d8bee/exps/exp5-4/cnns/cnn1-weights-conv.npy -------------------------------------------------------------------------------- /exps/exp5-4/cnns/cnn1-weights-mnist.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TrustAI/DeepCover/c33dcacae961ace4519b4ff12b783564947d8bee/exps/exp5-4/cnns/cnn1-weights-mnist.npy -------------------------------------------------------------------------------- /exps/exp5-4/cnns/cnn2-biases-conv.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TrustAI/DeepCover/c33dcacae961ace4519b4ff12b783564947d8bee/exps/exp5-4/cnns/cnn2-biases-conv.npy -------------------------------------------------------------------------------- /exps/exp5-4/cnns/cnn2-biases-mnist.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TrustAI/DeepCover/c33dcacae961ace4519b4ff12b783564947d8bee/exps/exp5-4/cnns/cnn2-biases-mnist.npy -------------------------------------------------------------------------------- /exps/exp5-4/cnns/cnn2-weights-conv.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TrustAI/DeepCover/c33dcacae961ace4519b4ff12b783564947d8bee/exps/exp5-4/cnns/cnn2-weights-conv.npy -------------------------------------------------------------------------------- /exps/exp5-4/cnns/cnn2-weights-mnist.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TrustAI/DeepCover/c33dcacae961ace4519b4ff12b783564947d8bee/exps/exp5-4/cnns/cnn2-weights-mnist.npy -------------------------------------------------------------------------------- /exps/random-nn/README.txt: -------------------------------------------------------------------------------- 1 | mnist_nnet_index0-67-22-63.txt 97.03% 2 | mnist_nnet_index1-59-94-56-45.txt 97.65% 3 | mnist_nnet_index2-72-61-70-77.txt 97.50% 4 | mnist_nnet_index3-65-99-87-23-31.txt 97.38% 5 | mnist_nnet_index4-49-61-90-21-48.txt 97.34% 6 | mnist_nnet_index5-97-83-32.txt 97.45% 7 | mnist_nnet_index6-33-95-67-43-76.txt 97.03% 8 | mnist_nnet_index7-78-62-73-47.txt 97.37% 9 | mnist_nnet_index8-87-33-62.txt 97.26% 10 | mnist_nnet_index9-76-55-74-98-75.txt 97.49% 11 | -------------------------------------------------------------------------------- /exps/random-nn/b_mnist_nnet_index0-67-22-63.txt: -------------------------------------------------------------------------------- 1 | [[0.68517546394966089, -0.2421427589897289, 0.16353186613742871, 0.22169359633841351, -0.69497128981667466, -1.220784356010955, -0.11323689978583328, -1.2731967604794727, -0.64892391225137536, -0.40412575471439066, 0.37445342397196058, -1.5060872797791649, 0.48459124568495887, 0.30810379028869139, 0.80597450523684022, -1.6487742445553224, -0.31523779816001096, 0.38424622509667034, -1.5997236392795073, -0.62262293270253832, 0.38014761434127592, -1.4127806849304256, -0.31598361714111795, 1.055656878345546, 0.010401192039249484, 1.1004309592512256, 0.15405247586843673, -0.6758735433316575, -0.98918561577549824, -0.58023836217450064, 0.37227743975687155, -0.052014534009449706, -2.0910353894890377, -1.7594407145012874, 0.36572549416693184, 0.27039063796328611, 0.93787734231403808, 0.8395514725917228, 0.056511984760142173, 0.54845223513237618, -0.34362191427795347, -0.47226435179719561, -0.6048896393937031, 0.41715007600202908, 0.13269177988859315, -0.28247386750076559, -0.77554937651988498, -0.030044646028110689, -1.155881415930768, 0.56267127136944117, -1.2802246200803666, -0.79249827062807854, 1.2313580545903797, 0.22951697031635282, -0.46670835859890408, -0.82803043206151905, 0.86615670812169976, 0.17055514216636541, -1.0062266456747595, -0.055732498641491955, -1.6402075991433638, 0.92204575766882102, -0.84429072856715415, 1.1901588299592103, 0.40749580036959981, -1.1436735922856758, 1.2441941597580284], [-1.4471931224224726, 1.3550318431224344, 1.2433213018402396, 0.029504107627325259, -0.25643791996438076, 1.0451865104488081, -1.411771263334213, -1.8331533993726552, -0.06487566269270327, 0.33915674607594926, -2.2000823440627255, -0.98731973199126577, -1.5343442631646045, -0.17175476152770372, 0.31606935689913151, 0.43528462533431123, -1.0985882633093143, -1.373588366825889, -0.070564251851120766, -0.17562877573206967, -0.021090467743571008, 0.52753355667278845], [0.97125682535315949, -1.6181448785907337, -0.46520516162175024, 0.18822053691923471, -0.26886810111241527, -0.88121071540674922, 0.42804866975085942, -0.040962528745954283, 0.51388934643011897, -0.042792511293548913, 1.8701218483391009, -0.60755137292783634, 0.80020451887497657, 3.4161915733809076, -0.028222783588078278, 1.8625708645896359, 0.86088689307987221, -0.73649036311594662, -0.72918355348151564, 0.48812581502927138, 1.3148888012317677, -0.4593389466642594, 1.3750270176015302, 1.6489186370834543, 0.87889085862373895, 0.40583820444129598, -1.4396219297985726, 1.4178520382465076, -1.5241968230818801, 0.60455768930083609, -0.38314598088403556, 0.69295737150060777, -0.76007711465836558, 0.88747866162236511, 0.87279382218036272, -1.6474479783572613, 2.106534404673793, -0.4320966198835311, -1.7953553379653291, -0.22629910155494926, 0.39837407133336367, -1.1689031822865157, -0.60055198611737204, 1.8337258944620736, 0.24598152307775897, 0.17408183916783507, -0.11887926151058824, -1.6853605574888293, -0.084000143336093194, -0.4693358125302613, -0.24173212255248142, -1.1951038078396465, -0.20680047308437202, -0.11458516354198127, -0.058160910305487437, 0.61060558375144269, 0.15299573689995111, -1.1477939523698437, -1.6839923359759092, -0.9703911381812409, 0.21144991529461446, 0.0618492802619659, 0.84026320457914172], [-0.045024570293258265, 0.012599281069311234, -0.2568633244604151, 0.11407131282022147, -0.57615335946466872, -0.45411614326259464, -0.25915539121989384, -0.21579287444968073, 1.047150765423392, 0.63328430383752854]] -------------------------------------------------------------------------------- /exps/random-nn/b_mnist_nnet_index1-59-94-56-45.txt: -------------------------------------------------------------------------------- 1 | [[1.2385646031556721, 0.97641297180937758, 0.15594768758076893, 0.98038630618854128, -2.6129573740083631, -1.8478762084881712, 0.66483899652109313, 0.89304485569728231, -0.51562403398390255, -0.21109351194409057, 0.88981088085292526, -0.31084908061152283, 0.06833414771860212, -0.22416355708845173, 0.85970252456821339, 0.16528164415609192, -0.29818990103044385, -0.66103022674559464, -0.70355154612387194, 1.7137386512222861, 1.0154951001860173, 0.93670920355627241, 1.1278835850104625, 2.4967273811353783, -0.12590819709783907, 0.73923411298146446, 0.73454468065272371, 1.9385867681449076, -0.053494119175846629, 1.3045382792377951, -0.066612095358186643, -0.93799781997509646, -0.23606324726785469, -1.4292241522908455, 0.86982334140478279, -0.061051239325965727, -0.42456264348720324, 1.0096760018737767, -0.76631359840838975, -0.0066935507031257143, 0.78820163386711617, 0.78341756779080463, -0.75450904501811311, -0.19090196860334005, 0.78655661351834627, -0.21871408412270832, -1.3826738805487875, -0.20465619407481231, -0.75418175499874474, 0.52721742131571359, 1.4975973224805332, 1.1008878484935118, -0.48831594532304645, -1.0816364510959144, 1.7442710526082423, -0.87539198795868789, -1.2150861165626012, 1.7552191469281491, -0.61271880031531589], [-0.29765794558034725, -0.47093791109883287, -0.80127817152522185, 0.56554736323605959, -0.70753596482220105, 1.3523374174990892, 1.6482881184502711, 0.089480607901866543, 1.4706423312359769, -1.7269420017822517, -0.8801915570417812, -0.76183921290883216, -0.77912190508254742, 1.0901017829138446, -1.6340615475201619, 1.6095407558093324, 1.7807690053555911, 1.1726885057436838, -0.067926473480451596, -1.2290228676377741, -0.81370293221915169, -1.3700822840195619, 0.54537919181850902, 0.18069494594298707, 1.2397672965620372, -1.0628242264608505, 0.47035843246694614, 0.19661313909414685, 1.4368600953449997, 0.048744896707652689, 1.7236309766407218, -0.049957757188881173, -0.95121650357748677, -2.1844005192119424, 2.1535355152054922, -0.97054394933202204, -1.4479143993308412, -0.028917789889004085, 1.0849702307216773, 0.27649737459383122, 0.07831237888655565, 1.7349226830483282, -0.50616338290503038, -0.80254814554331588, -0.97687050980660184, -0.37884481271428355, -0.62499415516551671, -1.0105292271522912, -1.2503264360883251, -2.5869310117708357, 0.77694230171000023, 0.25497245948687108, -0.87592589151914413, -0.68495423642091158, 1.1852305633559457, -0.57051425567125857, 1.7213658464609394, 0.091600167688027825, 0.33875222593009235, -1.0868035315450162, -0.042619298088389615, -0.54924569088790653, -0.83996593503602135, 1.0879183476566974, -0.51113547415082572, -1.4961597775521829, -0.45267905945279907, -0.85424810976793908, 0.55248748036882089, -0.83112919851962841, -0.13408668017411279, 0.075368700338174291, -1.7467538621019587, -0.96205620737842934, 1.9945266701775852, 1.5262644032184136, -0.039135463750847629, 0.1381189700657374, -0.55205622841597113, 1.8512963125555231, 0.26573667359192904, -1.2440765651438024, 0.38518332690055779, 0.80589179768499752, -0.058165462704624804, -1.7821283770150143, 0.51000074945799989, -0.73944709108787743, -1.5604822012558788, -0.65127717528504436, 0.24642406814123938, -0.025780425251100955, -0.39798890230231815, 0.94905429144459041], [0.085322529094127117, -1.2025986403929594, 1.0479263147318503, 0.34571317982743272, 1.0023818269284084, 2.5847649946577311, -0.58591734911387416, -0.83897409954934599, -1.1592760759685345, -2.0708101260328218, 0.0059868788832519639, 0.49704479953670583, -0.1653035727631143, 2.1518544749130259, -0.93648415101390814, 0.16161996583882507, -0.0040344617700714177, 1.4348205229516997, -0.50777094734889061, -0.022440459818756538, 0.40809140714018965, 0.68001759585059907, -1.8715394164952921, 1.432236599140551, 0.99524040722147034, -2.9614445883899072, 0.64300008568695532, 0.38481285541606042, 1.3826369237709029, -2.4712668592207963, 0.64007608103303038, -1.0491434567057651, 0.74439363116013058, -0.72141109962167216, 1.1514274303497118, -0.46667189500331402, 1.162728283794245, 2.0188821434961621, 0.78422445345901737, -0.63750359091905517, 0.34701579446890457, 1.6452382289583654, 0.41348751425997082, -0.81980721267343515, -0.10734746942794611, -0.46972176719160469, 0.26712466325113815, -0.26008040339049743, 1.2706158511700807, -1.0923691470570061, 0.86789156533553402, -1.5333929233255104, 0.82732686235824249, -0.23762547592143071, -0.20747294492801779, -0.23829183753663549], [-0.25442090757445301, 0.34134264058125879, 0.32687838114794487, 1.3891216183523807, 0.65773979424227669, 0.54321300358981961, -0.94542569017251632, -1.6050919247569237, -0.44524682367214341, -1.7471897303521979, 0.11105942709195729, -0.62414141497607856, -0.11450887381820726, 0.75354669743310343, -1.7471579379574615, 1.9658453907449378, 1.0396175735919788, -1.0974124780308934, -1.2554140908574043, 1.2553719355159683, -1.0087235639500891, 0.94825834054912717, -1.7927086040181617, 0.92706486639768226, 0.52480826308069506, 0.75381265669779196, -3.2382423473120303, -0.10092992460487479, -2.7395461576617772, -0.5908414938337665, -0.15561489982832147, -0.71295484351905947, -0.050136717530837233, -0.10827765566260696, -0.38639771307640913, 0.59873603916051976, -0.29594091415724438, 1.3063513049364281, 0.76102662515419228, -0.094290004915297668, -1.1385195585789303, 0.82911344270045206, -0.97546993275465277, -0.58041952971468225, -0.8841153950480245], [-0.16246202922070696, -0.17461363086682763, 0.024091459838535889, 0.282158165596328, -0.39715854405170775, -0.031299172972275136, -0.4185104317857487, -0.37210057788994289, 0.76859397388397677, 0.48130078746834043]] -------------------------------------------------------------------------------- /exps/random-nn/b_mnist_nnet_index2-72-61-70-77.txt: -------------------------------------------------------------------------------- 1 | [[-0.72049255143711799, -0.11720450846782896, -0.058889057223189566, 0.079326589775192954, -2.7943409767477045, -1.6360048958347722, 1.061584422230466, 0.85925906677208952, 0.48972532653294493, -1.4902423558818663, 1.138583989438166, -0.95454351268508342, 0.097758458135509152, 0.039059388254198184, -0.98352672149138187, -0.069129989923174151, -1.5700458018117378, -0.27578770891709825, -0.32948046101112005, 0.57718215366543901, 0.96991609027980064, -1.1046162909745323, -1.5226125658156107, -0.44873796459097776, 0.16244776617359188, -0.080349031179533695, -2.1217825918421269, -0.58580016373286237, -0.84909125319945455, -0.38766832293145775, -0.84449648083285533, 0.14376511438145839, -0.90683525571810264, 0.60169756102363192, 0.369192025888542, 0.39559343182584844, 1.6679718064505875, -0.18639629282010528, 2.5485402326508826, 0.44075277216905323, 0.069560709220925318, -1.4646765220167421, 1.2290247879296825, 0.30057074022307256, 2.3588142935191665, 0.52535056701610483, -0.58612821612657851, -0.6166373321611216, -0.52289495885546533, 0.70772953056797894, -0.31220218316163678, -0.83239332941802036, -0.65018104294051038, 0.56334550532985794, -0.47158696388245958, 1.4481036317321527, -0.5711633944205825, -0.53991298773750607, -0.12860620590534791, 0.38357851401765858, 0.031190939027355383, 0.65655897351791759, -0.16543494006345882, -0.17500436594725718, -0.72658430304845301, -1.1384640861566335, -0.11284339124571299, 0.50261113544906855, 0.45270006646533473, -0.42328733855672701, -1.0912648803852174, 2.0115498950884692], [0.35800111930361656, 1.3974679203544005, -0.53853356601394298, -0.64444834025577891, 0.37019293540791537, -0.59572794274192309, -0.69328594035031221, -0.80667975859556329, -0.65837680592092096, 0.21844113305955201, 0.23119559860357269, -1.2310751492162053, -0.14783251849545193, -0.21710801342441421, 1.6574937450684879, 0.032576882288441383, 0.53295610690037853, -0.16568469164949087, -1.5955846686610227, 1.0267298686710744, -0.28981284571412513, 1.0313077102892529, 1.8811706046421215, 0.12653380646469273, 0.50889991066598461, -1.0895436835855723, -1.792704607228909, 0.21681839296861904, 3.991442230940998, -1.5439843660754382, 0.35990044245442571, -0.21800456712918587, -0.072027724893145767, 0.21819580001075323, -0.40000885759071325, -0.78699292123772691, 0.32396876565578453, 0.9248526279401833, -0.26310726548799646, -1.0929812561526902, 0.87087762891448606, 1.4090986544848636, 1.7538251579903406, 1.127396101929679, 0.33452529379082035, 0.45782838245311597, -0.14874910175626574, -2.3620363681195511, 0.80806748018165719, 0.50083895880733731, -1.1357120933145897, -0.39728947339111198, -0.50839641590208806, -0.40302346926404592, -0.9918634950284938, -0.81646547663977909, -1.1653402616950161, 0.35731353909591262, -0.60687783532250839, 1.6629436475021757, -0.11528192565563429], [-0.65403987599225721, -0.18863873541522994, -1.1238476461580631, 0.2484181579429649, 0.098895164032377941, -1.842677176420866, -0.53271759801051277, -0.03561333433844293, -1.265919635533157, 1.0896815985847657, -0.88727108567474366, 1.0421705906236627, -0.32926906662128824, 0.27799616025948487, 0.1954160560546801, -1.0034760417969228, -0.44037497157519101, -0.50669895625190697, -0.21347141451933305, 0.35925391026085757, -0.96528207573330493, -0.053491890954885137, -0.92361944476463953, -2.4775682490517488, 1.2135749680668899, 1.0444597530979216, -0.42057451849377359, -0.51109124439369991, 0.95851850666473193, 2.6020835387860601, 1.3106831738764639, 0.95188880022824096, 1.2319055222567492, -1.2650488031023488, -0.056173673860665234, -2.515749282134327, 1.9875911810273716, 0.097289626989843087, -1.0623998911590391, 0.56885555512465069, 0.30113963556481715, -0.050452431891877841, -1.3464246715064372, 0.67426095825247023, -0.83723777228360741, 0.35762645689776457, -0.45396236073768181, -0.53767477613975823, -1.1061224576853292, 0.76619551056598456, -0.71991194748362197, 0.6571939117048472, 1.090494791752147, -1.9111027281350237, 2.0045471343236465, -1.2145882161231907, 0.69416494971168774, 0.48046046537603798, 2.0438380721048337, -1.0653388383498748, 1.6510819008595063, 0.42122015969838716, 0.12589523661583069, 0.41387195987361747, 0.13419558418477034, -0.83455724687476807, 0.026426142292691458, 0.17674595960726708, -0.61618887881058027, -1.0992635470566654], [-1.1607335781637693, -0.33495936039569568, 0.686212989913071, -1.5539994738393994, 0.909757839251777, -0.73664973215705032, -1.4080761202993763, -1.476872860970514, 1.3501137023661778, 0.63921259456822888, -0.77940962685443815, 1.3368585862729505, 0.14071923963852903, 0.27049256400549943, -0.63616152063534737, 0.68647412326657253, -0.13667015493041801, -0.06140564452063197, 0.08812617058672749, -0.2409361252740421, 0.075588167856907612, -0.58634276508164218, -1.2336028821006197, -0.50734179817972946, 0.80134767003976226, 0.46640780976485824, -0.39518489627333048, 0.36896940249441107, -0.70336365812770474, -0.49614061625644035, -1.2375287879627186, 0.90586415282992627, 1.0947794634275372, 1.1272982238876701, -0.95971764901000622, 2.222016857067191, 1.126078037553939, -2.1276187399015156, 0.058222352756863889, 0.65400839824886947, -1.0835703782406516, 1.7684332970248084, 0.64883135923754887, 1.8411893179319165, 0.69044267435601203, -0.12298931158484346, 0.89001845117856626, 0.7856875007577474, 1.6028993078706504, -0.60949835963402177, 0.15195897732118882, -0.10174805512055743, -0.16302046845967644, -0.31284516486319069, 0.1427400240682091, 0.01782778608350806, 1.1595399863383511, 0.47920972871203832, -1.6444512532855318, -1.660906227204606, -0.19319276946012021, -0.47149443050473466, -0.59423348755782757, -2.648879174587448, -2.4010522342792577, 0.77040413282367171, -2.1785278098796157, 0.28823252219525891, 0.18085244006500564, -0.82135871310308006, -0.35785506984262921, 0.90524890675982883, -1.6025970413539272, -0.95935945839289727, -1.2284754233639603, 0.11957089058853804, 3.6360350805517605], [0.0025777063732093965, -0.32637075876697347, -0.017161207553122112, 0.31638116686523959, -0.20502260212746765, 0.11183074361752784, -0.10380727237809625, -0.40768647835808941, 0.21351772482270251, 0.41574097750502043]] -------------------------------------------------------------------------------- /exps/random-nn/b_mnist_nnet_index3-65-99-87-23-31.txt: -------------------------------------------------------------------------------- 1 | [[0.71235322697618098, -1.4240240026042952, -1.9216045673255504, 1.0511107662452164, 2.77012129835117, 0.86358777019616106, -0.46349646749539486, -0.13430437954346877, 0.16743369392769725, 0.12600247046526394, 1.6417149997027916, 0.4422860100867177, -1.5989624972405108, 0.51045681375331775, 1.232945808528662, 1.0731807739521391, -0.19327915353788733, -2.4243476676854909, 1.7531877717831759, 1.2768103901888603, -1.7286072980495388, -0.29166997481859241, 0.19782823818041098, -0.23998887860747484, -1.5288312442229119, -0.89755672954322541, -1.4290308837104484, -0.33038471588374951, -0.46405378162787209, 1.0105103155087829, -0.25212595058557991, -0.86123316723495724, -1.5367665822467667, -2.3391478302670516, 0.3946391391761625, -0.55263091008876553, -0.016594938941373493, -0.1892564453183766, -0.21999474501028488, -0.56871714924337047, 1.2476693792930091, -0.96283022621525205, -0.13394974227275916, 0.64529210102243817, -1.8533507415494321, -0.0022480182394748315, -2.0727262844912873, 0.77011429516149388, 1.6153006367295417, 0.2321841563245837, 0.82021398461762485, 0.44382043828319084, -0.29891761740130968, -0.57750100273567273, -2.1354233576717294, 0.61059414764765318, 0.3770315071492959, 0.61967586635650862, 0.10053836241418942, -1.8645941491820874, -1.082584565187702, -0.32541451771138658, 1.3997319703744437, -0.025425840320559882, -0.82982721509261759], [0.46806969499769396, 0.14704962137188268, -0.19177495281386564, 0.87331827634771397, 1.5134631471064466, -1.9318302491947625, -0.48101593927179664, -0.1870551584197549, -0.282937394928506, -0.23044100352196006, 0.39816844431980991, -2.0346219471426581, -0.37486108142121333, 0.34639695710581225, -0.33704739792194205, -0.59392404097608043, 3.181071330049078, -0.10935322600699321, 0.39931924039332012, 0.26526683277462232, -0.45788748220606701, 0.33442643049224469, -0.39485782121453061, 0.076144436572807908, 0.75239896446524002, -1.0459796941660904, -0.68610578999143135, -1.4028070610618837, 2.3944928689410623, -0.53742892050463797, -1.0565311345398756, 0.16359914956491017, 0.40257231913131958, 0.30907105208735636, 0.63610571210831679, 0.55177180242461543, 1.7238560553890379, 0.64700218418805555, -1.0469727018031436, 1.2424364264338672, -1.0004518236929536, 0.042202315912090491, -0.48436899571926417, -0.44139637222939221, -0.98199619377754455, -0.91548718794660411, 0.098906218685428579, -0.085246706782079462, 0.689990417029134, -0.74453795505966758, -0.37071965403248264, -3.8885535275940155, -0.21168008196739538, 0.82577391782790044, -0.62666927668623262, 0.65648600962839732, 2.358372808487641, -1.5065469376533522, 1.0910624662273525, -2.330304358386972, -1.8032409519400117, -0.81535688440394749, 0.35588771650516182, -0.5582580315832103, -0.95470373522755869, -1.5481926362222773, 0.11963181230706121, -1.1426920130327411, 0.093522565258787022, -0.61722130526836039, -0.26427248187331787, -0.27437390661425332, -0.93620793535590607, 1.3136214406305293, 0.38045437347750072, -1.1209172425160299, -0.0020719229842058326, -1.1556454550790658, -1.9961855313827959, -0.040616061675353005, 0.6988574928839365, 0.26547587020575009, -1.0844205363515373, -2.6362657261303788, 0.35786149588212396, -0.81812096838141968, -0.81138694177046067, -1.8715613774650433, -0.62438254634733714, -0.48297993690868601, -0.019918255850667307, 0.09261219675041879, -0.24580104664878413, 1.6932084333684727, 1.1062570292926448, 0.62577348795634502, -0.98473287228925299, 1.2681184607845986, -0.49486650266691068], [0.97121264397417884, 0.23230082086689727, 1.5955277668095531, 1.1047371205065037, -0.30234647102214324, 0.47273192058441882, 1.2094655091358204, -0.82396971162469101, 0.15726100620890626, -1.359540803046497, -0.53319962461077886, -0.79014035257120807, -1.6777956997641472, 0.58133788366292138, 0.12014472747534151, -0.78046938973608437, -1.3000713632187995, -1.3275585332614983, 1.8070206608494688, -0.70997596688737041, -0.06640519639332447, -1.2900841424954805, -1.0584961791383023, 0.018862022246301652, -0.6954756673476592, 0.68391305397897817, -0.81195915796853235, -0.67252951300379238, -0.83029307680587638, -0.76760673588819106, -0.54542478345856737, 1.2072965853306861, 1.1006381874067332, 0.12889950220240423, -0.21253728592076807, 0.65114763748063775, -2.4780238224839528, -0.73810606360181197, 1.5505967230492179, -0.79181689575894498, -0.33108258751356828, -0.64234354398414018, -1.0674919547438424, 2.4290382048009675, -0.82591867388800622, -1.3273591346093658, -2.2310668598356771, -1.1878768130063837, -0.64738322987264985, 0.86054639857991366, -1.9291491980191782, 0.75765124222824742, 1.3131923885668693, 1.6051726989529964, -0.24981801132909759, 0.18889038298301444, -1.933306333520576, 0.31700415404788018, 0.16132426156848426, 0.24139022729396409, 0.65710712535066484, -0.23870030440726669, 0.85492092897940952, 0.4312393196445119, -0.26754119220575245, 0.48472449466499906, 0.84394891853338316, -0.21469035108612147, 0.54820698483686059, -0.65764659269520132, -0.67081567635950312, -0.65001163814543539, -1.183537434734073, -0.79952998905702022, 2.0042999608956285, 0.47148425982503134, -1.3148730814295906, -2.1798316180688837, 0.187533523747868, -0.61235372652013143, 0.089508421850980541, 0.071032179268216741, 0.29563476479261985, -1.9150829066336008, -1.9228219013078269, 0.51587034522816888, -1.5295354706432347], [0.58009747984685178, -0.89909709690719142, 1.1561861273027019, 0.33832166968570093, -1.3098395531900995, -1.0916663941477405, -0.27162978994108267, -0.66978146206566136, -0.8703890077799864, 2.0659773792192095, -1.4243805524478048, 0.601098838089135, -0.9560248075897777, -1.5385090763497147, 1.6188038500867832, -0.20676476740892336, -0.62626550285429639, 0.64937026916132434, 0.28259982227551783, 0.085984116743851244, 0.71538624597888967, 1.2986865209423886, -0.38186057341089297], [1.1243043913000377, 1.0383964318869627, -0.21891470670419483, 0.70002700049419331, -0.69946206781105602, 0.027076217376855529, -1.2888342536515296, -0.43186848542814432, 0.43794578979396831, -0.00484127826369018, -0.25648728233056245, -1.7832016206488883, -1.2923752534845649, 2.4257718318298154, -0.29987813977039429, 0.72748324479888415, 0.72866687718580558, 0.97739085112265345, -1.0311849932868247, 1.6254486669491066, -0.87956809047239437, -1.2763214214152347, 2.1459568803774416, -0.19407042537319366, 0.69787878949036208, 1.0205873190843902, -1.141607677385827, -0.11902117718901159, 1.3006494239129449, 0.20175597412268709, 0.57072653764468217], [-0.13838253238257067, -0.26974343360992026, 0.11720336404527595, 0.33365953076061466, -0.23680261144176998, 0.048417726215595966, -0.44076849615409408, -0.32488984931333709, 0.35002607981986045, 0.56128022206029948]] -------------------------------------------------------------------------------- /exps/random-nn/b_mnist_nnet_index4-49-61-90-21-48.txt: -------------------------------------------------------------------------------- 1 | [[-1.1891334994728298, -1.3880855181592204, 0.63819106224627897, 0.1548885699018577, -0.47706143179292526, -0.04606835918378703, -2.2340872326969814, 0.67133413255085095, 0.26629434058826573, -1.2646634349126369, 0.25041553969337227, 0.12025156808359955, -1.6136566272878714, 1.4139029315069973, 0.53785320909205059, -1.9762668570201647, 1.6218995301844872, -2.0894464822744174, -0.5723644692659301, -0.92287282501881018, 0.0960530911031863, 0.97056902998440131, -2.3376424794155253, -0.13723621877187481, -0.87736981134180159, 0.21871195353601136, -1.4994254194650738, -2.5433607168411023, -0.89970158634310138, 0.34994432886179994, 1.0266928904938308, 0.53999708491230169, -1.236489633850036, -0.76241672211115064, -0.48476697700179405, -0.19010382835651174, -0.75118560324492645, -1.0126692687038374, 3.157887370444159, -1.4235448146142629, 0.47353118691364349, 1.18220383093139, 2.4459713255933835, -2.2296987893682014, -0.099309772551601949, 1.3189561915122792, -0.96247641469578293, 1.178179997502788, -1.2160907688813847], [0.143299328359828, 0.70394721776677982, -0.7314015604400822, 0.40649342109041564, -1.1971003756548357, -0.19349441067277792, 0.042017793483971333, -0.96575210666817002, -1.0251309254861907, -1.4379848692552468, 0.4430815930597653, 0.90235975566142534, -0.90503165462615864, -1.2762573095568202, 0.35308438388385432, -0.85486923387504155, -0.69996695668165709, -1.0567827684823543, -0.46250609998292291, 0.87836878823557651, 1.3784399783948942, -0.39013312704168984, -0.33583656552323854, 1.4506005744955803, 0.42932108232247446, -0.43872998403071611, -1.0788108253489439, -2.436757871567421, 1.1363595387416854, 2.1289652226243869, -0.76646007869347921, -1.1580238032326446, 1.5577272824815371, -1.7657478994706035, -1.2314959969480204, -0.88239387479962117, -0.48576847538630913, -1.4544612987972607, -0.76342911830037585, 1.2123071603258171, -0.18872799307208191, -1.1342618027667848, -0.14532167625718342, 1.7142334566926987, 0.16319735678584044, -0.54915459330467964, 0.19912995976276268, 0.28865408491685507, 0.21284583038078281, -0.0057518477902309493, 0.79141938574470194, -1.0470097635793516, 2.1676321712173001, 0.94107068756495083, 0.66359986595981235, 1.3797980280906794, 1.0074979043034671, -2.1069235442870977, 2.026635375565597, -0.60542946788929297, -0.222166407740713], [0.72490167734990729, -1.2042135159969511, -0.42448885688261839, -0.56086679233329528, 0.69322374487731397, -0.67617001734807536, 1.3690343911908307, -1.3612783892658933, 0.26032559075538209, 0.28677487513206501, -0.6619530077888599, -1.1143599442993171, -0.62787963660717139, -0.053227213650697908, 1.9188637710782384, 0.25355783066151472, -1.1429478765399224, 0.92773070402190738, 0.37365326416649663, -0.30925952514055677, 0.78054573717637321, 0.75561513239454814, 1.1435586356093148, 0.54773317196478366, -1.1732250254544958, 1.3441226736481864, -0.0069125502003489527, -0.082413960997082633, -1.4709035987168688, 2.3257426713594613, -0.18852052489577006, 0.44184802740431417, 0.076527823709267292, 0.73005577508914032, 1.0858667582099797, 0.39729251422264716, 0.98122357022220252, -0.37082079894239761, 0.39321844657751193, -0.4303023311476829, 0.31767247219841815, 1.0046535333581539, -0.091049257930585673, -1.0284446276507653, 0.44135985944009748, -0.18294295585009465, 0.55118788778810024, -0.57663643190932234, 0.014969298070158496, -0.17598703077321173, 1.5275721991511553, 1.9499932660402315, 0.3433945192105291, 1.3958387535096655, 0.34327347585740642, -0.21546071489595572, 0.82849653801598677, -0.17315075173616298, -0.68092724331820531, 0.20745640322551404, -1.0797009436934719, -0.88704411710449749, -0.73250619543282469, -0.64587031979636012, 0.10657814067667527, -0.30540643390527211, 1.9969111569112097, -1.0453667043809372, -1.5710054125582733, 0.74148797041579884, -0.15136057022821925, -2.1076273218151913, 0.45382400529463496, -1.1537730748649015, 0.17083240144226683, 0.48698371378030225, 1.0226534525262456, 1.2171040718682105, 0.31705895897225045, -1.4976257566120901, 1.2761693551386639, -1.5803587036173721, -0.67981588860413389, -1.0874547322475336, -0.65754490609962202, 0.44422332495553879, -0.65966870400341782, -1.1535898364312811, -0.42690678068728555, 0.37772922005814058], [0.9093006849284595, 0.41748820122377939, -1.0597624931648455, 0.75817835178960724, 1.0736192995251481, -1.4482196255611501, 2.124382958597228, -0.28283089386456378, 0.093405163006994474, -0.75213126715495426, 1.1483180818589274, 0.9760368200199564, 0.62845635783505405, 0.29961014446552198, 2.4941136631806171, 0.14950831553053112, -0.26578518445039678, -0.47851733304165556, 1.4366667800810813, 0.55933462086707164, -1.9093112121612332], [-0.13545834758821718, 1.4393282408494423, 1.6445496540357929, -0.45989880424983709, 0.79960332241369059, -0.5978114321645478, 0.10616721892637088, 0.42482719868092422, -0.089586313963911604, -0.83195754304058478, -0.65104652866165591, 1.2708163372523247, 0.3599079172805475, 0.31005256577749613, -0.13600810795251159, 0.3346138329278327, 0.9970176304406968, -0.97467913508330861, 1.7595190630545245, 1.4431279629918727, -1.0501400472849833, -1.0687074902075686, -0.65023290710183668, -3.4941972777755557, 1.2174136142865821, -0.96278466003979979, -0.83905781776032129, 0.11061936151341176, -0.27428571803091162, -1.0468097087340227, -1.5504100857631751, 1.3217130052224113, 1.0141455356165945, 2.6154431698224512, 1.4759140936888635, 0.97671875618855464, 0.23039281241789444, -0.021962814488121606, 1.9933094466693739, -0.12221366993654428, 0.60367942867639057, 0.33583548628253374, 0.83128215647391257, 0.11004168633665411, -1.2381908256960759, 1.6593113060416385, 0.50966363117000135, 0.68431057141462481], [-0.039809363414628582, -0.34650121968836262, 0.063748486579152941, 0.80186090359072182, -0.20371407818352882, 0.047402509250415163, -0.40464189234209436, -0.36559789835850909, 0.39261986335935095, 0.054632689207444053]] -------------------------------------------------------------------------------- /exps/random-nn/b_mnist_nnet_index5-97-83-32.txt: -------------------------------------------------------------------------------- 1 | [[0.16250513710591197, -1.2004842155674347, -0.81627446435482487, -0.056047609040663728, 1.0422683775644659, -0.1934087001495518, 0.12518946496049468, -1.6927315426577885, -0.40717411733945041, -0.77829144217733104, -0.99654052291496609, -1.2616608080724994, 0.5178776810573491, 1.1210585730185096, -0.75895353832596157, 0.97434537439181956, -0.41472969533860765, -2.0379413309457548, 0.60906289748089126, -0.38700621793294832, -0.20445544778455071, -0.094140831409217446, -0.25131086844910239, -0.54114747599100199, 1.2679137669110743, -1.1418718675838915, 0.59791417491213139, 0.29917895253066301, 1.1150395640184758, 0.60270343187360353, 0.45001271449448765, 0.082128084518259856, -0.61977203029796613, -0.016074202362958988, -1.4573057476020961, -0.0064485028268086702, 1.0820839262398281, -1.1232964288725766, 1.2989475728366853, -0.75108846620808944, -0.76665246520865071, -0.64171937121797962, 0.27227622516083416, 0.22636125824806477, 0.74503802512188322, 0.5557339950792286, 0.34096816315542278, -0.32942183166465772, -0.14121220787687813, 1.0646549496023019, 0.87294890382122903, -0.77638435514461568, 1.8908059810157345, -0.31404148906276752, -1.2246821986086582, -0.73141393477426531, 0.14606746305595125, -0.64032186724626416, 1.1319192195670871, -1.5900799476543179, 0.25135331477777012, 0.2225979973080911, -0.19658474797706099, -1.3456395872622755, -0.51001941159631303, 2.3128021783679427, -1.4607776105122119, 0.041671607476849067, 2.0336255621249419, -0.80100906686530393, -0.11997212880118266, -0.065668835302072182, 0.095451016032353675, -2.251354635259927, -0.47452725397868706, -0.5902768636273853, -1.3195623843775406, -0.54628516313977715, -0.13991168252278646, -1.1433308694939881, -1.2994550046985709, 1.6501154494546189, -1.8491912807382314, 0.82119179884992788, 0.041082189637456655, -0.03225831204033984, -1.564960151169108, 2.2066330082189021, -0.38415910637655681, -0.81996982469118906, -0.68664200136535047, -0.82784771931072554, 1.2856928650202919, -0.67959254055873453, 0.33957547782687814, 1.9224047533114745, -0.43038616330561796], [-1.0039887449484999, 1.2502686618914942, -0.91827662521163966, -0.71102838093766918, -0.47665487160968817, -1.254754880952287, 0.21862265936240438, -1.9739223375861488, -0.032896418028846942, 0.18740648694986226, 2.1025809538041735, 0.113918860608146, 0.15096010373537569, -0.38520058671912166, 0.051821403573332384, -0.96902109162706462, 0.81269802545494396, 2.1745220675109516, 0.75236810621367556, 0.1303250273385605, -1.1495124204667972, -0.26731097368417966, -0.18195138235684721, -0.55432177339206401, -2.1015591755452996, 0.99202761013694873, -0.2883711853142123, 0.52381288730697195, -0.46789560264827412, 1.9945807262077719, 2.3638466882420937, -0.74621083549738298, 0.0054970357984132521, -0.6969934366443451, -1.3084024991916647, 0.4572067891909582, -0.53025017457950385, 0.69037507725942482, -0.29412062370124253, 0.21724056119107091, -2.3333185835254828, 3.0541427183863554, 0.23657721301956494, -0.88628430985391515, 0.15025078465772757, 0.53146052437525726, 0.31877778208027596, -0.056981011098505618, -0.94883369881592416, -0.049862399205358496, -0.70359196104946953, 1.2199747243611785, -0.41427408963127532, -1.2468500531968452, -0.5402986729389575, 3.0061397562988756, 1.2099156228825632, -0.20787796506625428, -0.40166485310230116, -0.84098907933426026, -1.6000222910471986, 0.7046201787026275, -2.0560125840097458, -1.6346696159027259, -0.34091483766268521, -0.56995410701087479, -0.4045170977940924, 0.35416108159112253, -0.53369274782040144, -0.81574538017279385, 1.1999916444045191, 0.34961183677530178, -0.77038258297789897, 0.51336497400578829, -0.99891709864315892, -1.3171233281574894, 0.075344220156589065, -1.391521399238181, -0.42356473857794091, 0.43106670448285744, 0.40446464166125334, 0.015584201934513042, -0.034307917996127604], [-0.81284554742110382, 0.22325886431659589, -0.19643271110428515, 1.482001090819506, -0.068282923962035988, -0.21867739368149422, 0.87793190511197283, -0.37907804078876411, -0.32987927500705155, -0.77944042222293641, 1.8059056422534943, 0.31141804807529172, -0.62262755064939912, -0.12272334125323464, 1.7659009592741404, -0.44405025350342009, -0.1348143612761819, 0.84734912226445913, -2.1504276350401876, -0.91925539523336119, 1.0903869607378389, 0.048827173504588345, -1.065473519820332, 0.31273285874286283, -1.0902398608827142, 0.24527105993139131, -1.1494831611150282, -0.048038507743292935, -0.29428885550426098, -0.87727989550312635, 1.1116162134759067, 1.5423134267777192], [-0.041357792546455822, -0.22662507993168565, -0.33974744939382656, 0.44151880381147907, -0.32403344588290295, -0.030958149301210332, -0.12802377938127305, -0.12447790748460492, 0.43696740108954069, 0.33673739902086858]] -------------------------------------------------------------------------------- /exps/random-nn/b_mnist_nnet_index6-33-95-67-43-76.txt: -------------------------------------------------------------------------------- 1 | [[-0.79435490303967493, 0.33726574411010157, 0.22221391673698465, -0.63614953990655121, 1.0759330268576246, 0.55074366592939483, 0.13063185883669143, -0.22816344139260941, -2.1396433444670779, 1.1406834323797139, 0.85609196419319911, 1.0167462447913185, 0.51918191588391371, -0.31135504450961121, -1.2806668587227601, 0.57200704676758429, -1.4119194470779388, 0.81811867184222464, 0.67206325364095709, -0.47487269307503677, 0.0072140288970963954, 1.6242396225175937, 1.1573101784854376, -0.30959226438776849, -0.093242946970712309, -1.8819131843274424, 0.49265204022244902, 0.15460689316456921, 0.095091859151120811, -0.60691910335338239, 0.4005473394761741, 0.24218434325405302, 0.3793755724225194], [0.59301432336843851, -0.35299824462315482, -0.54145526697811397, 0.047339913833428143, 1.7051058260694727, -0.081876358510316799, -0.062056865897499362, -0.16053169129206035, -1.624472330805413, 1.2979553274765798, 1.2983520267748128, 1.1845740643993863, -0.39730046721554968, -0.79421597316066828, -0.89275802662506665, 0.36504700491891928, -0.73105980041085505, -0.69561534363112754, 1.3665439563974819, -0.7427118902563461, 1.0341796440205224, -0.5105849314571802, 0.47613619096150772, 0.48747400754555426, -0.22946220195295566, -0.27148195883392262, -0.84500630169400914, 0.32919875890923883, -0.099748292413768577, 0.3605143085584292, 0.28473661655317456, -0.2797082934441768, -0.32399825791399972, 0.4170287415523023, 0.27660204089069468, -1.1306707462567234, 1.7385289558865027, -0.12347055137477055, -0.46514834399175514, -0.81759607969899173, -0.068406043970410627, 2.2934695222973334, 1.8486476701966357, -1.1511833490118608, -1.2048644255791248, -0.73235464213248069, 1.2127279301688088, -2.1763995693039782, -1.7645009046345406, -1.6339185055000165, 2.8916650081249862, 0.95611112830269152, -1.8538810021716867, -0.64181300569071487, 1.275302283128499, -2.1828623534171552, 0.02108224939213476, 0.46955192368197074, 1.1335778670154451, -0.8641604942211395, -0.44218899544267298, -0.47191842505870307, -1.3982508446381698, -0.77864359832638441, 1.4114801858283781, 0.40671552359724256, -0.18591569330500829, 1.8011036482864624, -0.7938182299839367, -0.48999969802082693, -0.42078040958112972, 1.9434452285088408, -1.7654529325767137, 0.77957543434461096, 1.9003502196949742, 0.0055748700621291569, 0.94471722314592443, 0.60127171159503379, 1.1949182338389583, 0.085630380970357814, 1.3849633936597052, -0.89204066081328859, 0.87000417065913616, 1.1172590796201116, 1.231747639370731, 0.37371941717633023, -0.4927189776478077, 1.5101361074333897, 0.5036883513481788, 0.012269342838334687, 0.44971746852291539, 0.97498723772207097, -0.70752023305710954, 0.079075393485349144, -1.0977579277021539], [1.6849770954280026, -1.3908292903095558, -0.75341510952896762, 1.1411823239130718, 1.9712971613185384, 0.039642903719254156, 0.18496851009135698, 0.29338478013606334, 1.8085153233283584, 0.10510708744960957, -0.29985543017063593, -0.53374745464309792, -0.45985149532202518, 0.81123495383828681, 0.015765055305654425, 0.71333637278999307, 1.8285187139848276, 1.8323442823174174, -0.59212750325560048, -1.8293554939998897, 1.4644429491410353, 0.43342116558923005, -1.952119204599488, 1.5772483988004864, -1.499441412035208, -1.8116184897911409, -0.08646934538629053, 1.288871254042862, -0.51317074760461701, 0.50937129540136594, -0.9995069694221661, 0.045314869330795156, -0.59011858979401677, -1.0430178753812904, -0.08121123816573339, -0.19119216574659165, 1.0524758828755498, -0.55830351209124063, -0.44909996467132618, 0.22381171608381764, -1.1118391156702514, 0.332797711265156, -0.28681267392484666, 0.45129690914945747, -0.62113532881325662, -0.022015564990342743, 0.0027688662671682403, 2.0791787260401509, 0.30945591765547326, 0.55367387123876144, 2.4171842301512529, 0.12355821246397669, -0.94581095061203813, -1.0884147865104183, 2.7235252994696642, -0.43771812524402987, 0.22975684552056605, 1.5474627784195316, -0.32332828169207845, 0.76467614762258229, 0.229792932317129, -1.0440676498450361, 0.31013859894569273, -1.4656100611313023, 0.23088728830698999, 2.3454382965539589, 0.081196935979060594], [-0.41570494901466315, -0.23733780286770492, 0.92992413167348476, 0.14763729329279582, 0.67286795260421883, 1.242843661265586, 0.12746545814159854, -1.1866798859168215, -0.20418842014352409, -1.6677535572084694, -1.7689664291405494, -0.03392997485461198, 0.90194629074169363, 1.2621941892162292, 0.71909058446228868, -2.0528781581888129, -0.62576398326042548, -0.55583052223855911, 2.4041289901640788, 0.34133814221212555, -0.92451291210009368, 0.71632217858656433, -3.1241049562753997, -0.43985696071542552, 1.1619624319441813, 1.7503727262446891, -0.63591919557159293, -0.48154431411492798, -1.07820062753412, -2.410915938731542, -1.0299577906740254, 0.32691482177200387, -0.57446885972625616, 0.073039062046870551, 0.35484937947912065, -0.064922661366008272, -0.37663708843926752, -1.7957638970522347, 1.0780861538094182, 0.45428880575550012, -0.43486842589635116, -1.0219508712152305, -1.9970257206951534], [-0.96168252597428194, -1.4850109289125188, 1.5124569613063295, -0.086288612316577362, 0.19407944977745525, -0.89915793546449463, 1.6884882042359508, 0.33614056557929745, 0.40724989013200763, -0.889691579276225, 1.4386211158031341, 1.2570973412677033, 0.27024745298823943, -0.90710806003521238, -1.5516654812501538, 1.6575286502680013, 2.0720280046016426, -1.3631762120046542, -0.056692867918599808, -0.21092318545280517, 0.27610104866571472, -0.14222262913056016, -0.74220626718956606, -0.64151534870266513, -1.437498100769387, 0.27914699352154188, 1.2215869575164029, -0.40285074516648262, 2.5661910476270955, -0.25206739608934969, -0.078161041937501008, -0.64259910704118417, -1.9765560589573219, -1.438366942359562, 0.4953980125107228, -0.1829911350256288, 0.13602251223727146, 0.82772341175017194, 1.294030412985032, 0.53336430895320353, -0.4032283336231533, 0.58999063122535211, 0.37205966290432629, 0.18546027589098943, 1.2775251069885445, 0.29760256162304383, 1.3580626465297627, 0.5905166272101765, -0.9130787636183858, -0.36813448610723026, -1.7091530062258102, -0.39411027759479833, 1.3346098092952408, 0.1277749478907626, -1.2902892670376691, -2.0052237075103565, 0.94286930467226004, 1.034538392168624, 0.87888581194038251, 0.95525948472337163, -1.3185071998065969, -2.7676390828892434, 1.2526410395226013, 0.021128812398617042, -2.2822407221268968, 0.43296111337661075, 1.3199018234013968, -0.66765362931836747, 2.1112244626016166, -1.2742100639543139, -0.58905669711735731, 0.12617260477940212, -0.33197184757260489, 0.22968989580038238, -0.80125029541718462, -0.0034809269889706767], [-0.11109784672410292, -0.41527524908542007, -0.031324880558614827, 0.22341678893494776, -0.011119238512827593, 0.25870581242877122, -0.25969347556925204, -0.16817535751264792, 0.43480644774718646, 0.079756998851932992]] -------------------------------------------------------------------------------- /exps/random-nn/b_mnist_nnet_index7-78-62-73-47.txt: -------------------------------------------------------------------------------- 1 | [[-0.15179574112642999, 0.90728822009785726, 1.0135548851528833, -0.56746634589205547, -0.41826495702042776, -1.0788109044248666, 0.30798758636247481, -1.0383132973981781, -0.25513597536875965, 0.27916403956044827, -0.55131360353951475, -0.43924447480113427, -1.0110392298628987, 1.1192013780905894, -0.53915298032255943, -0.17821921542103369, -0.47475450042666939, 1.2540791226801795, -0.34379271570503267, -0.31192140697842857, -0.44278410689775166, 0.064204788324865092, -0.68128488324751468, 0.16051761085208552, 1.2436978909917329, 2.1610176747001719, -0.5419641421684418, -1.1644039271407738, -0.26315547975258607, 0.028636170503176329, 0.21631229501579796, 2.8769096754474464, -0.2054848361227275, -0.94260849802282076, 0.35434567031759673, 1.9115303935988033, -0.31716912255769042, 1.0023578193193412, -0.77630400807481759, -0.04051629290905389, 0.62921890356479582, 1.2470625642151896, -2.2341935850502872, -0.85835561641640468, 1.0842026974782442, -2.4626489896262957, -1.2379753985223292, -0.69809232735010485, -0.03852187398474434, 1.6459030867860198, 0.7147806885109611, 0.21140307252804139, 1.6357121596123327, -1.0062208253765907, 0.36238540336583397, 0.070328681393709194, 0.93653130912416915, 0.9882655907295339, -0.63094073324687761, -0.85180985561906697, -2.0602353725874449, -0.14406209455612623, 0.17868119922237932, 0.28985328388387516, 0.95398379565046709, 2.6629531579090737, -0.089396352022434608, -0.88552619110501951, 0.065139046324083238, -0.66649583756260888, -0.28053229568637211, 0.1407575620793135, -0.53425314059430484, -0.47970922460375659, 0.17542136207503384, 0.36045909304754231, -0.73125648325118831, 0.12435276057589045], [-1.0681731194095427, 1.7017313581451659, 0.41828225064845737, 0.34932389459515462, 1.5902074736764737, 0.73837649527653304, 0.0061229507995520247, -1.0438691212720173, -0.026243546736852637, 0.67332113065636934, 0.14492679550872173, -0.7423945925910771, 0.87217411436910708, 0.80253067519394161, -0.45216508869846905, -1.0039346587205573, 0.8860712079713593, 0.60425707225649583, -1.3281033507271931, 2.0070691264782967, 0.55535588178842032, -0.83239237303904323, -0.8378551866783257, -0.17166459930765629, 0.2912911475734189, -0.0025274882307199736, 1.0056666547658519, 0.7715739317332736, 0.16770557929380808, -0.36927814111641383, 0.31201446577439534, -0.33831440580145122, 0.3224389721865184, 2.5002418624427314, 0.43954772786960195, 1.4312990505651964, -0.23306793723568348, -0.40037267413903788, -1.6787730318114806, 0.98635968675814634, -0.97114555590382934, -2.3310109576171256, 0.065584012092315638, -1.5403928790935286, -0.38764291967962694, 0.95452677738922254, -0.95000138657351119, -1.8673554440121491, 0.40361487308731298, -1.3271439923814241, 0.93292857845066612, -0.19558463281711758, 2.0248752544330499, -0.71465762177294512, -0.83116865677810481, 1.2086463290846827, -0.67314612508417049, 0.7245695669285549, -1.1527011587676979, -0.16167149013232096, -1.5400040382766433, -0.8023407337875732], [1.2172209769446793, -1.5911807975876096, -0.67167902505439081, -1.1370403587632194, 2.3200904214677847, -1.2685255337797929, 0.1452974618055474, 0.31045689595329728, 1.8332366443425125, 0.64174702423516461, 0.16949201648068668, -0.92492156448686813, -2.0485129345033162, -0.96908292599271073, 0.84526846702220804, 0.7100247104028099, 0.34619372851217289, -1.2320213606516923, -1.7801401065846429, 2.1335530076463209, -0.5041183267681516, 0.62215639709113291, -1.1965492081179627, -0.54793136425062339, -0.095085211951992307, -0.37583699046690366, -0.97053547399419382, 1.5848168632778055, -1.025990251112292, 0.017019794626737365, 0.4931776954140093, 0.49045931185854225, -1.8624121363681654, -1.0492371071009774, -1.0509465222484076, 0.24692841500124016, -0.39417683485147947, -0.17784466149628522, 0.44423830475388282, 0.97020453195806911, -1.5272842096135126, 0.95852682069112127, 2.8354671055996348, 1.6876164202241974, 0.71771187723629004, 0.38833366341711673, 0.82375949184853903, 2.2052864928528386, 0.18904982218689362, 0.35003194989256825, -2.2642943631774406, -0.45095809096323319, 2.2461216856042894, -1.1272026955163919, 0.1573640313273747, -0.46051974692935238, -0.32223421131326518, -0.53512327624919243, -1.0827950318906583, 0.10959130085840008, 1.003109941599319, -2.1908867632830988, 0.48417786392379691, -1.073627330803594, 0.11846022244181073, -1.8994009043322329, -1.0802292537360416, -0.97208713014111758, -0.45048401141873701, 1.8057245202644276, -0.8893058225687841, -1.4380356674138535, 1.1892656703252247], [-0.3728154455570063, -0.088598580439876853, 0.19746729288872406, 0.070558002681601645, 0.14365091608968991, -1.6977639061267222, 0.82924715561129825, 0.56324391677853691, 1.382187256651876, 1.3126198880741935, 1.0435125497213009, 0.30304326444480639, -0.76832413361685858, 0.6865462943860634, -1.5615917056624002, 0.8830609608513047, 0.58163918492658806, -0.79364533028203965, 0.7701999982325396, 0.18158884400289446, -0.34766384200703848, -0.099340119152494355, -1.5993434373630386, 0.29339303360278168, -1.1135990937695028, 0.48410815619665631, -1.531020423853118, 0.55637815392410928, 0.63591844702374067, -1.7129831475355835, 0.90099469156739564, 2.1316125612829691, 0.45916013527974042, -1.2634110056836596, -1.342439962873847, 0.96392012933488069, 0.91927584460263989, 0.92585860920386953, -1.3160704675000383, -0.926229723820858, -1.4131715727950576, 0.82503588043806531, -1.6301164105468324, -0.27567689433599263, -1.0367762181021329, -0.52794824439863053, 0.43056991148324203], [-0.16101456959393917, -0.14644496976219268, -0.049215801941696426, 0.11601603275320715, -0.16801949100106542, -0.21526142820377686, -0.25277363410901266, -0.15390231203257715, 0.53633237286212443, 0.49428380102887481]] -------------------------------------------------------------------------------- /exps/random-nn/b_mnist_nnet_index8-87-33-62.txt: -------------------------------------------------------------------------------- 1 | [[0.036807734916255178, 0.0085548454423392834, -0.043444808233154622, -0.47212980597156229, -0.96642996128753034, -0.82466059614262299, 0.42976761642448508, 0.071548439659955679, -0.50806343110156615, 0.76834410634718031, 1.4051985450425741, -0.22629977693019648, -2.0065563843911605, -1.3149963468175883, -0.18281522908091052, -0.31970151359735405, 0.30068789446317623, -0.6874445969846763, 0.18696825511529042, -0.38877308633908764, -1.1876821436405336, 0.28492897788183219, 1.5570595241281069, 0.79321671697303842, -0.40127906790211665, 0.85335433334790278, -0.67462056697752915, 0.37664183745284407, 0.57859191641253904, -0.094417251714039613, -1.0414904434812899, -0.076854764408954612, -1.6318865771831457, 1.0971160870727756, 0.66168735266985779, 0.24319226935029095, 0.0010866357650883395, -0.9088304048342265, -3.0679147331674677, 1.7087737532817524, 0.13224718683292835, -0.47901038591173806, -1.6952318512241804, -0.49142659084575402, 0.099614879188102107, -1.0980626112101566, -2.5969127588337528, -0.054387506360124141, 1.3359611963038229, -0.014137057748209361, -0.83501441041535918, -1.8292721760761732, -1.4338870814283398, -1.5268640297420948, -0.82127683745739333, 0.15681869999139891, -0.58090206016234924, -1.9241033087617567, 0.62455217501399529, 0.83156225569986641, -1.8040184491847779, 1.6421428438588093, 0.23177145504554789, -0.85315970842413713, -0.27030830135982753, -0.42249139270911229, -0.31807912886374462, 1.0925930748012509, 0.4487820806829369, -0.80914973501586684, -0.26535256328231743, 0.12962473873303398, 0.15518881446096749, 0.90828648009901347, 1.815557995179925, -0.91792289767546842, 0.75687778858012766, -1.0513205272234138, 3.3746012220470205, 0.035966513729545256, 0.46347663921568499, 0.4253058517960584, 0.84665888083162666, 1.0454816559859419, 0.56901899731755046, -0.035069441563874343, 0.61309565090685769], [1.0237317498148912, -0.013166346135414142, -0.89926535879611924, -0.84936264335186906, -0.67229694701238285, -1.019661859517748, -0.49198242310989898, 0.75173548151300373, 1.4823983407379799, -0.39516679543084188, -0.0069692348481383976, 1.7854400213104964, -0.23114262414010633, -0.4463032081728312, 1.5584629616512671, -0.041391223683798509, -0.2295991980739204, -0.41175795716587893, 1.0176842659027723, 0.72024138562693141, 0.90020660254174756, 1.572404000967299, -1.3588299132528074, 0.42410822224127626, -1.4479711005591265, 1.2626528462958324, -0.69246979273213971, -0.02698877069859933, -0.37451799941773234, 0.55731850123802995, -1.2419058702442538, 0.94766144699456567, 0.50511304735529416], [-1.5826453275828667, -0.93660725670617129, 1.4062465170408398, 0.42521428151438817, 0.59666046563120467, 1.570980475221178, -0.76004665995070075, 1.3497160715548091, 0.23131437914890668, -0.18754397747035223, -0.082291885958442335, -0.11585416515297334, -0.31855452442831134, 1.4306540082759915, 0.3709464287361004, 0.16245059054828059, 1.1419484173282131, -2.0034386044412402, 0.64449796633153378, 0.63633212062279954, -0.50606029510893535, 0.68372803954292027, -2.4932875985164538, 0.055569539086434971, 0.35439450720815968, 1.5960437336401301, 0.21799789315421064, 0.9291577481703418, 2.8119313289744809, -1.4384240649992943, -0.13653572778137332, -0.13591599720163688, -1.2836038816144306, -0.47803125962592169, -0.4039928928316911, -1.3845803609733753, -0.44710413811244881, -1.2258410503404351, 0.65502281798556594, -0.02575569310108286, 0.27539581598400409, -0.33065612949457185, -0.05439689026536576, 0.6565182800531818, -0.17192121349614639, -2.0111751810441434, 0.29008764857879066, 1.5671136748800287, -1.5636213258691969, -2.056819695575884, 0.97660468687976043, 2.1307546857798694, -1.2792049055007002, -1.170212198288372, -1.8708413409505635, 0.32864539636976442, -0.024401037798913044, -0.58534151896531705, 0.033738789746681513, 2.1962541812430478, 0.40735948083141471, 0.45866170670702172], [-0.16118725747197418, -0.33075385193439916, -0.17362924176537259, 0.22718798658395717, -0.27858172374751589, -0.23048226522259957, -0.07024496637624493, -0.37864837229433268, 0.96188048261552617, 0.43445920961287715]] -------------------------------------------------------------------------------- /exps/random-nn/b_mnist_nnet_index9-76-55-74-98-75.txt: -------------------------------------------------------------------------------- 1 | [[-1.5767595812409172, -0.28439625669250468, 1.5463250663126731, 1.5571893037323172, -1.5396125567970982, -0.25902523990603843, -1.3129817644085007, 0.49408277849574217, 1.0899185677555774, -0.60538638853264815, -1.5965686467123303, 0.49486119801216316, -0.24241582030652412, -0.40385993925400315, 0.32935772245762124, 1.2676741946466012, 0.19875610731553345, -0.41907151444903085, 0.20824319723437382, 0.67676802545155212, -0.5984221626375732, -0.83833068883890316, -0.31700883910056765, 1.4431422288584532, -1.0095039656865588, 1.0355602239142945, 0.84088242174055994, -2.0725874440958156, -0.86867723182905854, -0.75248330231967697, -0.27121273558458359, 1.0677827293580993, 0.90005515931169067, -2.2606106001785307, 2.6612215855808117, -0.50323526663201867, -1.1978211391819922, -1.7045340163902023, -1.2007291669406761, 0.98378890763406235, 1.1650110269836438, -0.38581650080640151, -1.4112608332306655, 0.65986183893112293, 0.032855389717067709, 0.62651300116043152, -0.81299911748081644, -0.81385020252153639, -1.7294492997060467, 0.09035271183809207, -1.8600542440568624, -1.9067213432319328, 0.67935603741956307, 0.11523644077098977, -1.7715923895041785, -0.60471273787793622, -1.7810496233524906, 1.8217645073002304, -2.0365061414107442, -0.74394196436872173, 0.25774562817304231, -0.19798818277089472, 1.2281193675868842, 2.2728179083753384, -0.084500630418442621, -0.28048718120016869, -0.50633378332881485, 0.0084792307389139291, 1.0853903134768981, 1.4338133715082764, 1.1550292730079716, -1.2611342073917886, -0.20254158022848762, 1.4379853800760432, -0.86171160670047475, -1.8448874920957208], [0.4383964984052639, -0.45259231894589724, -1.2977947380283088, -0.47464403311822084, -0.40897422341342693, 0.25327356527513489, 2.5130027448075971, 0.76903670787568468, -0.33178008302761597, -3.3113055845247357, 1.1884096314605646, 1.29570800987942, -1.2458862815264617, 1.7046526550278265, 0.39673228027689322, -0.39223383325043315, 0.66406472469734523, -0.79505375994029226, 0.25674698271146057, -1.438046116245703, -0.88788533331333463, 1.2657557727412938, -0.26206992139310087, -0.72313118364708406, -0.15064806095624669, 0.56027608415522778, -1.3896844958766992, -0.38139397138570674, 1.3340780854057681, -0.80421870229174486, -1.0440995219836959, 1.3936217406884757, 0.20821716249658018, -1.2022662686691388, 0.54873256273796744, 0.0027075344478581227, -1.4592007771873243, 2.7565480120491377, 0.79070678980216191, 0.59088424575988208, -2.4876775957755046, 1.2611562037754593, 1.2183290588011226, -1.0150428536447094, 0.67954755839180792, 0.28194117776009026, -0.84364590234117498, -0.21002266424234586, -0.50316662550174063, -1.7609420942666394, 1.753849841160428, -1.3803186880024674, -0.43938269164837956, -0.5766578620154017, 0.99242181934095663], [-1.3572213527753931, 0.64735511800832124, -0.8388625099506386, 1.2658492385358564, -0.015706069608346616, -0.39379689505581661, -0.11271521334685272, 0.24889006885475348, 0.29785919699109403, 0.075421961052148337, 1.2266353299145478, -0.99293726029542773, 0.26831462173518628, 2.1261784107404411, -0.18618501377342514, -0.99985946558786432, -0.17813929443192011, -0.12677382267537116, -1.3956850003795078, -0.26290224110786242, -0.95907603201303548, 1.5632935934854821, -0.33105724446524026, 0.90792978710158923, -0.40881554589416408, 0.80459541369142262, 0.17342853869668928, -0.20806000253675594, 0.12040966670247205, 0.24788475285325096, -0.2647255798461125, -0.45279461378158797, 0.20065117934019283, -0.50682692169003096, -0.4231880727945001, -0.7040402418611621, -1.3375497701773615, -0.63716180531733113, -1.683409476509784, 0.27193274142857032, 1.5252631753723542, 0.6944608699207746, 1.2462740606227694, 1.8790947302044181, -0.019573612976729351, -0.51558227692424752, -0.77393756753130893, 1.0697456106473942, -1.4164841490383355, 0.97623302166893033, 0.68860897625033313, 0.086029864821265109, 0.53310812659757478, 0.80299179911159602, 0.55517339532039367, -1.8681477989372648, -0.084383380968340901, -1.8108410475636756, -0.77309844119803051, 1.1887676387562063, 0.12914120765433104, -1.5804408011588986, 0.16623244025596795, 0.71983989401705661, 1.7040529357717935, -0.26091004587432975, 0.93205426009586656, 2.493504554317524, 0.88114516864867909, -0.73791749423025155, -0.72225041565175707, -0.10307909874045834, -0.99474551302007552, 0.067604429189123391], [-0.23222072593940407, -0.15558520078736296, 0.8375198865123834, 0.34565924354265576, -0.8605465786030505, 2.0852377975942931, 0.52047499201878122, -0.25338501645814049, -0.040084697336813954, -1.2096529579596096, 0.48436515271758618, 0.0032875136532488289, 0.88732844463941962, 2.6123630098998287, 0.57585928474101444, 1.6434616581000208, 1.3837786080411969, -0.41608708054687427, 0.73515659169934233, 1.2553311861505092, -0.54468179514638226, -1.7026099483978274, -0.68503825958763909, -1.848595744449169, -0.78074063477413935, 0.79146230594149747, -0.35245525054281784, 1.3521691876028379, 2.4164317294161544, -1.5057212905100206, -0.77393089614036414, 0.17062857937090772, -0.48644147191445125, 0.22187015718544323, -1.5438349859585663, -2.2073641880742616, 0.22007642484672901, 0.13418849851376224, 0.28629386809196633, -0.13391826899740866, 1.0561421111132836, -0.09639804390369873, -0.1738548895327636, 0.3598314693685491, -1.3603959269961146, 0.21431993099016228, -0.050068266528418261, 1.7366120786591328, -1.0184341856637231, -0.25223247870942023, -0.57456600362496757, 0.37013863017111992, -0.46805559162563154, 0.10585705583587542, 0.064006188438319678, 3.3662938738920261, -0.060507157474128562, 0.94489541231650309, 0.19428441880653546, -0.089468223338123348, 1.2672085487670319, 0.94215437438203087, -0.76864843396411342, -0.005875107424971081, -1.0309056219846964, -1.9570763450261497, 0.18103941551464647, 0.0024849453459568041, 0.04341995726845601, -1.2832771809973726, 0.64686812080931355, 0.50319691451904991, -1.0838732270425395, 0.32483266164082453, -1.5675812707793486, -0.64437572878583183, -1.0227856103030122, -2.008710821892751, 1.3368111759086083, 0.55318841575083044, 0.83319315148667261, 1.0366853626149419, -0.98415060173258095, -0.54091630008170533, 2.4033012120693065, -0.46485490325787582, -0.54435597381362355, 0.87174200948673397, -0.415752728626476, -0.20740789859944922, -0.46207779302672158, 0.68416598113691929, -2.3921363617847997, -0.57410378904811088, 1.9559513266462198, 0.033708720379507048, -0.054832414132415776, -0.10117976195778022], [-0.93043496506220846, 1.4036003728037802, -1.5451517677156485, -1.3711829744627146, -2.4622515480387217, 1.0408977420537908, 0.85163871711492889, -0.20458935236839396, -0.44464296585501506, 0.44942454269303217, -1.1284093229006604, -0.014110413335777866, 1.4073879807074823, 0.44501262795773289, -1.152425229379056, -0.29729069116125262, -1.4703401353385843, 0.30770221866411962, -1.3313841077319348, 1.7292379920591083, -1.4901807609069329, 1.427846353177461, -0.19490963796353461, -2.120835637236155, -0.19446746076244301, 0.88885942480086999, -0.0075569355996472415, -0.2233246746127879, 0.10209728121729739, -0.21701939701814715, -0.20121658106171667, -0.012488184263778413, 1.6854926081019783, 0.47184233218609323, 1.1312281101404611, 1.7884532632531838, 1.2227537602683398, -1.0829776391351176, -0.34156928691667493, -0.79497551968959623, 0.60321972577240934, -0.47713748302218961, 1.1385146894092943, 1.5177040432988822, 0.21502339322905376, 0.57966975559684353, 0.70575669573742761, 0.0040951775925506086, 1.5897759862560839, -0.08457784766274741, -1.595136688736372, 1.1822632800984194, 0.35125903267175057, -0.25979027916497344, 0.43532042345546534, 1.5350234532997999, 2.1376622082963865, -0.02699010247349045, -0.76046257133723738, 2.8399443874402346, -1.0496264131891497, -0.55468564473450555, 0.86218644260762745, -0.55585680691633532, 0.73528806161442462, -0.80827062957388252, 0.54998200192364444, -0.92396789848332217, -0.52895313772823227, -1.2776353174489756, -0.89946807329512823, -2.3251669112011575, 0.72994852927153875, 1.3989631123431805, -0.2099482596385574], [-0.071047193433648514, -0.12837194991126102, -0.020678507133531008, 0.39964746418877689, 0.0487472138109626, -0.20202220241021138, -0.34420169017752605, -0.15478725877573593, 0.17345537123461557, 0.29925875260750467]] -------------------------------------------------------------------------------- /exps/random-nn/nnets.txt: -------------------------------------------------------------------------------- 1 | mnist_nnet_index0-67-22-63.txt 97.03% 2 | mnist_nnet_index1-59-94-56-45.txt 97.65% 3 | mnist_nnet_index2-72-61-70-77.txt 97.50% 4 | mnist_nnet_index3-65-99-87-23-31.txt 97.38% 5 | mnist_nnet_index4-49-61-90-21-48.txt 97.34% 6 | mnist_nnet_index5-97-83-32.txt 97.45% 7 | mnist_nnet_index6-33-95-67-43-76.txt 97.03% 8 | mnist_nnet_index7-78-62-73-47.txt 97.37% 9 | mnist_nnet_index8-87-33-62.txt 97.26% 10 | mnist_nnet_index9-76-55-74-98-75.txt 97.49% 11 | -------------------------------------------------------------------------------- /src/cnnett.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import sys 3 | 4 | class Layert: 5 | def __init__(self, _w, _b, _is_conv=False, _mp_size=0): 6 | self.w=_w 7 | self.b=_b 8 | self.is_conv=_is_conv 9 | self.mp_size_x=_mp_size 10 | self.mp_size_y=_mp_size 11 | 12 | class CNNett: 13 | def __init__(self, _hidden_layers): 14 | self.hidden_layers=_hidden_layers ## hidden layers 15 | 16 | def eval(self, X): 17 | ### X shall be an array ==> 28x28 18 | #X=np.reshape(X, (28, 28)) 19 | X=np.array(X) 20 | X=X.reshape(28, 28) 21 | 22 | N=len(self.hidden_layers)+1 23 | 24 | ## the final activation vector 25 | ## act shall be a vector of 'arrays' 26 | act=[] 27 | act2=[] 28 | 29 | act.append(np.array([X])) ## input layer 30 | act2.append(np.array([X])) ## input layer 31 | index=0 32 | 33 | ## to propagate through each hidden layer 34 | for layer in self.hidden_layers: 35 | #print 'We are at layer {0}'.format(index+1) 36 | if layer.is_conv: ## is convolutional layer 37 | nf=len(layer.w) ## number of filters 38 | #print '** number of filters: {0}'.format(nf) 39 | conv_act=[] ## conv_act contains these filtered activations 40 | conv_act2=[] ## conv_act contains these filtered activations 41 | _nf=len(act[index]) ## number of filter from the preceding layer 42 | #print '**** number of preceding filters: {0}'.format(_nf) 43 | ## to apply each filter 44 | for i in range(0, nf): 45 | #acts_for_mp=[] 46 | ## there may be multiple filtered pieces from last layer 47 | nr=act[index][0].shape[0] # number of rows 48 | nc=act[index][0].shape[1] # number of columns 49 | nfr=layer.w[i][0].shape[0] # number of filter rows 50 | nfc=layer.w[i][0].shape[1] # number of filter columns 51 | f_act=np.zeros((nr-nfr+1,nc-nfc+1)) 52 | #print 'fact shape: {0}'.format(f_act.shape) 53 | #for J in range(0, f_act.shape[0]): 54 | # for K in range(0, f_act.shape[1]): 55 | # 56 | # for j in range(0, _nf): 57 | # ## act[index][j] is the input 58 | # a=act[index][j] 59 | # 60 | # for l in range(0, nfr): 61 | # for m in range(0, nfc): 62 | # f_act[J][K]+=layer.w[i][j][l][m]*a[J+l][K+m] 63 | # f_act[J][K]+=layer.b[i] 64 | # if f_act[J][K]<0: f_act[J][K]=0 65 | for J in range(0, f_act.shape[0]): 66 | for K in range(0, f_act.shape[1]): 67 | 68 | for j in range(0, _nf): 69 | ## act[index][j] is the input 70 | a=act[index][j] 71 | 72 | #print a 73 | #print '===========' 74 | #print layer.w[i][j] 75 | 76 | for l in range(0, nfr): 77 | for m in range(0, nfc): 78 | f_act[J][K]+=layer.w[i][j][m][l]*a[J+nfr-m-1][K+nfc-l-1] 79 | f_act[J][K]+=layer.b[i] 80 | if f_act[J][K]<0: f_act[J][K]=0 81 | 82 | ### max-pool 83 | nr=f_act.shape[0] 84 | nc=f_act.shape[1] 85 | #### shape after max-pooling 86 | #p_act=f_act 87 | p_act=np.zeros((nr/layer.mp_size_x, nc/layer.mp_size_y)) 88 | #print 'pact shape: {0}'.format(p_act.shape) 89 | for I in range(0, p_act.shape[0]): 90 | for J in range(0, p_act.shape[1]): 91 | ########## 92 | for ii in range(layer.mp_size_x*I, layer.mp_size_x*(I+1)): 93 | for jj in range(layer.mp_size_y*J, layer.mp_size_y*(J+1)): 94 | if f_act[ii][jj]> p_act[I][J]: p_act[I][J]=f_act[ii][jj] 95 | ##print p_act 96 | ##sys.exit(0) 97 | conv_act.append(np.array(p_act)) 98 | conv_act2.append(np.array(f_act)) 99 | #conv_act=np.array(conv_act) ## ==> array 100 | act.append(np.array(conv_act)) 101 | act2.append(np.array(conv_act2)) 102 | #if index==0: 103 | # print act[1].shape 104 | # print act2[1].shape 105 | # sys.exit(0) 106 | else: ## fully connected layer 107 | #a=act[index] # the preceeding layer 108 | nr=layer.w.shape[0] 109 | nc=layer.w.shape[1] 110 | ### reshape 111 | aa=act[index].reshape(1, nr) 112 | #print '*** shape: {0}'.format(aa.shape) 113 | #print '*** w shape: {0}'.format(layer.w.shape) 114 | 115 | this_act=np.zeros((1,nc)) 116 | for I in range(0, nc): 117 | for II in range(0, nr): 118 | this_act[0][I]+=aa[0][II]*layer.w[II][I] 119 | this_act[0][I]+=layer.b[I] 120 | if index < N-2 and this_act[0][I]<0: this_act[0][I]=0 121 | act.append(np.array([this_act])) 122 | act2.append(np.array([this_act])) 123 | 124 | ### next layer 125 | index+=1 126 | 127 | label=np.argmax(act[index][0]) 128 | #print act[index][0] 129 | #print 'label is {0}'.format(label) 130 | return label, act, act2 131 | -------------------------------------------------------------------------------- /src/conv_lp.py: -------------------------------------------------------------------------------- 1 | 2 | import cplex 3 | import random 4 | from util import * 5 | from cnnett import * 6 | import sys 7 | 8 | 9 | # act2==>act 10 | def conv_ss(prior_layer, prior_layer_filter, prior_I, prior_J, current_layer_filter, current_I, current_J, cnnet, X, act, act0, prior_fs): 11 | var_names0=['d'] 12 | objective=[1] 13 | lower_bounds=[0.0] 14 | upper_bounds=[1.0] 15 | if True: #prior_layer==0: 16 | upper_bounds=[0.3] 17 | 18 | var_names=[] ## var_names are variables for neurons before max-pooling 19 | 20 | N=len(act) # #layers 21 | for i in range(0, N): 22 | M=len(act[i]) # #neurons at layer i 23 | var_names.append(np.empty(act[i].shape, dtype="S40")) 24 | for j in range(0, M): 25 | a=act[i][j] 26 | for k in range(0, len(a)): 27 | for l in range(0, len(a[k])): 28 | var_name='x_{0}_{1}_{2}_{3}'.format(i,j,k,l) 29 | objective.append(0) 30 | lower_bounds.append(-cplex.infinity) 31 | upper_bounds.append(cplex.infinity) 32 | var_names[i][j][k][l]=var_name 33 | var_names0.append(var_name) 34 | 35 | 36 | constraints=[] 37 | rhs=[] 38 | constraint_senses=[] 39 | constraint_names=[] 40 | 41 | for i in range(0, len(var_names[0])): 42 | a=var_names[0][i] 43 | for k in range(0, len(a)): 44 | for l in range(0, len(a[k])): 45 | v=a[k][l] 46 | # x<=x0+d 47 | constraints.append([['d', v], [-1, 1]]) 48 | rhs.append(act[0][i][k][l]) 49 | constraint_senses.append("L") 50 | constraint_names.append("x<=x"+str(i)+"+d") 51 | # x>=x0-d 52 | constraints.append([['d', v], [1, 1]]) 53 | rhs.append(act[0][i][k][l]) 54 | constraint_senses.append("G") 55 | constraint_names.append("x>=x"+str(i)+"-d") 56 | # x<=1 57 | constraints.append([[v], [1]]) 58 | rhs.append(1.0) 59 | constraint_senses.append("L") 60 | constraint_names.append("x<=1") 61 | # x>=0 62 | constraints.append([[v], [1]]) 63 | rhs.append(0.0) 64 | constraint_senses.append("G") 65 | constraint_names.append("x>=0") 66 | if (0==prior_layer and i==prior_layer_filter and prior_I==k and prior_J==l): 67 | if act[0][i][k][l]==0: 68 | # x>=0.1 69 | constraints.append([[v], [1]]) 70 | rhs.append(0.004) 71 | constraint_senses.append("G") 72 | constraint_names.append("x>=0") 73 | else: #x==0 74 | constraints.append([[v], [1]]) 75 | rhs.append(0.0) 76 | constraint_senses.append("L") 77 | constraint_names.append("x<=0") 78 | 79 | 80 | index=0 81 | conv_acts=[np.copy(var_names[0])] 82 | for layer in cnnet.hidden_layers: 83 | if layer.is_conv: 84 | nf=len(layer.w) 85 | _nf=len(act[index]) 86 | conv_act=[] 87 | for i in range(0, nf): 88 | #nr=act0[index][0].shape[0] # number of rows 89 | #nc=act0[index][0].shape[1] # number of columns 90 | nfr=layer.w[i][0].shape[0] # number of filter rows 91 | nfc=layer.w[i][0].shape[1] # number of filter columns 92 | f_act=act[index+1][i] #np.zeros((nr-nfr+1,nc-nfc+1)) 93 | 94 | if index+1==prior_layer+1 and i!=current_layer_filter: continue 95 | 96 | for J in range(0, f_act.shape[0]): 97 | if index+1==prior_layer+1 and i==current_layer_filter and current_I!=J: continue 98 | for K in range(0, f_act.shape[1]): 99 | if index+1==prior_layer+1 and i==current_layer_filter and current_I==J and current_J!=K: continue 100 | 101 | if index+1==prior_layer and not (J,K) in prior_fs: continue 102 | 103 | constraint=[[],[]] 104 | constraint[0].append(var_names[index+1][i][J][K]) 105 | constraint[1].append(-1) 106 | 107 | for j in range(0, _nf): 108 | #a=var_names[index][j] 109 | a=conv_acts[index][j] 110 | 111 | for l in range(0, nfr): 112 | for m in range(0, nfc): 113 | #f_act[J][K]+=layer.w[i][j][m][l]*a[J+nfr-m-1][K+nfc-l-1] 114 | # we assume the existence of max-pooling... 115 | #print a 116 | #print index 117 | #print j 118 | #print a.shape 119 | constraint[0].append(a[J+nfr-m-1][K+nfc-l-1]) 120 | #if (index==prior_layer and i==prior_layer_filter and prior_I==J and prior_J==K): 121 | #if (index==prior_layer and j==prior_layer_filter and prior_I==J+nfr-m-1 and prior_J==K+nfc-l-1): 122 | # if act[index][j][J+nfr-m-1][K+nfc-l-1]>0: 123 | # constraint[1].append(0) 124 | # else: 125 | # constraint[1].append(layer.w[i][j][m][l]) 126 | #else: 127 | #if act[index][j][J+nfr-m-1][K+nfc-l-1]>0 or index==0: 128 | constraint[1].append(layer.w[i][j][m][l]) 129 | #else: 130 | # constraint[1].append(0) 131 | constraints.append(constraint) 132 | rhs.append(-layer.b[i]) 133 | constraint_senses.append("E") 134 | constraint_names.append("eq:"+"x_"+str(i)+"_"+str(j)) 135 | ### ReLU 136 | _constraint=[[],[]] 137 | v=var_names[index+1][i][J][K] 138 | _constraint[0].append(v) 139 | _constraint[1].append(1) 140 | constraints.append(_constraint) 141 | rhs.append(0) 142 | if (index+1==prior_layer and i==prior_layer_filter and prior_I==J and prior_J==K) or (index+1==prior_layer+1 and i==current_layer_filter and current_I==J and current_J==K): 143 | #if (index+1==prior_layer+1 and i==current_layer_filter and current_I==J and current_J==K): 144 | if act[index+1][i][J][K]>0: 145 | constraint_senses.append("L") 146 | else: 147 | constraint_senses.append("G") 148 | else: 149 | if act[index+1][i][J][K]>0: 150 | constraint_senses.append("G") 151 | else: 152 | constraint_senses.append("L") 153 | constraint_names.append("relu: "+v) 154 | 155 | ### max-pool 156 | nr=f_act.shape[0] 157 | nc=f_act.shape[1] 158 | #### shape after max-pooling 159 | #p_act=np.zeros((nr/layer.mp_size_x, nc/layer.mp_size_y)) 160 | p_act=np.empty((nr/layer.mp_size_x, nc/layer.mp_size_y), dtype="S40") 161 | #print '/////' 162 | #print p_act.shape 163 | #print '/////' 164 | #print f_act.shape 165 | #sys.exit(0) 166 | for pi in range(0, len(p_act)): 167 | for pj in range(0, len(p_act[pi])): 168 | v='L{0}F{1}-{2}-{3}'.format(index+1, i, pi, pj) 169 | p_act[pi][pj]=v 170 | var_names0.append(v) 171 | objective.append(0) 172 | lower_bounds.append(-cplex.infinity) 173 | upper_bounds.append(cplex.infinity) 174 | for Ii in range(0, p_act.shape[0]): 175 | for Ji in range(0, p_act.shape[1]): 176 | ########## 177 | II=layer.mp_size_x*Ii 178 | JJ=layer.mp_size_y*Ji 179 | for ii in range(layer.mp_size_x*Ii, layer.mp_size_x*(Ii+1)): 180 | for jj in range(layer.mp_size_y*Ji, layer.mp_size_y*(Ji+1)): 181 | #if act[index+1][i][ii][jj]> act[index+1][i][II][JJ]: 182 | # #if index==prior_layer and i==prior_layer_filter and prior_I==ii and prior_J==jj: 183 | # # continue ### this one has been negated to 0 184 | # II=ii 185 | # JJ=jj 186 | ##p_act[Ii][Ji]=var_names[ii][jj] 187 | _constraint=[[],[]] 188 | _constraint=[[p_act[Ii][Ji], var_names[index+1][i][ii][jj]],[1,-1]] 189 | constraints.append(_constraint) 190 | rhs.append(0) 191 | constraint_senses.append("G") 192 | constraint_names.append("max-pooling") 193 | _constraint=[[],[]] 194 | _constraint=[[p_act[Ii][Ji]],[1]] 195 | constraints.append(_constraint) 196 | rhs.append(0) 197 | constraint_senses.append("G") 198 | constraint_names.append("max-pooling") 199 | conv_act.append(np.array(p_act)) 200 | conv_acts.append(np.array(conv_act)) 201 | else: 202 | nr=layer.w.shape[0] 203 | nc=layer.w.shape[1] 204 | ### reshape 205 | #vs=var_names[index].reshape(1, nr) 206 | vs=conv_acts[index].reshape(1,nr) 207 | #print vs 208 | #sys.exit(0) 209 | #sh=act[index].shape 210 | sh=conv_act[index].shape 211 | #aa=act[index].reshape(1, nr) 212 | 213 | #this_act=np.zeros((1,nc)) 214 | this_act=np.empty((1, nc), dtype="S40") 215 | for ti in range(0, len(this_act)): 216 | for tj in range(0, len(this_act[ti])): 217 | this_act[ti][tj]=var_names[index+1][0][ti][tj] 218 | ###### 219 | for I in range(0, nc): 220 | if index+1==prior_layer+1 and not (current_J==I): continue 221 | constraint=[[],[]] 222 | constraint[0].append(var_names[index+1][0][0][I]) 223 | constraint[1].append(-1) 224 | for II in range(0, nr): 225 | #this_act[0][I]+=aa[0][II]*layer.w[II][I] 226 | constraint[0].append(vs[0][II]) 227 | if cnnet.hidden_layers[index-1].is_conv: ### at least one convolutional layer before the 1st fully connected layer 228 | constraint[1].append(layer.w[II][I]) 229 | elif (index==prior_layer and II==prior_layer_filter*sh[1]*sh[2] + prior_I*sh[2] + prior_J): # i==prior_layer_filter and prior_I==0 and prior_J==I): 230 | aa=act[index].reshape(1, nr) 231 | if aa[0][II]>0: 232 | constraint[1].append(0) 233 | else: 234 | constraint[1].append(layer.w[II][I]) 235 | else: 236 | aa=act[index].reshape(1, nr) 237 | if aa[0][II]>0 or index==0: 238 | constraint[1].append(layer.w[II][I]) 239 | else: 240 | constraint[1].append(0) 241 | #this_act[0][I]+=layer.b[I] 242 | constraints.append(constraint) 243 | rhs.append(-layer.b[I]) 244 | constraint_senses.append("E") 245 | constraint_names.append('') 246 | 247 | if True: #index < N-2: 248 | #this_act[0][I]=0 249 | _constraint=[[],[]] 250 | v=var_names[index+1][0][0][I] 251 | _constraint[0].append(v) 252 | _constraint[1].append(1) 253 | constraints.append(_constraint) 254 | rhs.append(0) 255 | if (index+1==prior_layer and i==prior_layer_filter and prior_I==0 and prior_J==I) or (index+1==prior_layer+1 and i==current_layer_filter and current_I==0 and current_J==I): 256 | if act[index+1][0][0][I]>0: 257 | constraint_senses.append("L") 258 | else: 259 | constraint_senses.append("G") 260 | elif index0: 262 | constraint_senses.append("G") 263 | else: 264 | constraint_senses.append("L") 265 | constraint_names.append("act{0}{1}{2}{3}>0".format(index+1, 0, 0, I)) 266 | #if index==N-2: 267 | # if (index+1==prior_layer and i==prior_layer_filter and prior_I==0 and prior_J==I) or (index+1==prior_layer+1 and i==current_layer_filter and current_I==0 and current_J==I): 268 | # _constraint=[[],[]] 269 | # v=var_names[index+1][0][0][I] 270 | # _constraint[0].append(v) 271 | # _constraint[1].append(1) 272 | # constraints.append(_constraint) 273 | # rhs.append(0) 274 | # if act[index+1][0][0][I]>0: 275 | # constraint_senses.append("L") 276 | # else: 277 | # constraint_senses.append("G") 278 | # constraint_names.append("relu: "+v) 279 | conv_acts.append(np.array([this_act])) 280 | 281 | ########## 282 | if index==prior_layer: break 283 | index+=1 284 | ###### solve 285 | try: 286 | problem=cplex.Cplex() 287 | problem.variables.add(obj = objective, 288 | lb = lower_bounds, 289 | ub = upper_bounds, 290 | names = var_names0) 291 | problem.linear_constraints.add(lin_expr=constraints, 292 | senses = constraint_senses, 293 | rhs = rhs, 294 | names = constraint_names) 295 | problem.solve() 296 | 297 | #### 298 | d=problem.solution.get_values("d") 299 | new_x=[] 300 | for i in range(0, len(var_names[0])): 301 | for var_x in var_names[0][i]: 302 | for var_xy in var_x: 303 | v=problem.solution.get_values(var_xy) 304 | if v<0 or v>1: 305 | print d 306 | print var_xy 307 | print v 308 | return False, X, -1 309 | new_x.append(v) 310 | 311 | #if d==0 or d==1: 312 | # return False, _, _ 313 | 314 | return True, new_x, d 315 | #return True, X, d 316 | 317 | except: 318 | return False,[],-1 319 | # 320 | # try: 321 | # d=problem.solution.get_values("d") 322 | # print 'd is {0}'.format(d) 323 | # new_x=[] 324 | # #for i in len(X): 325 | # # new_x.append(problem.solution.get_values('x_0_'+str(i))) 326 | # #return True, new_x, d 327 | # except: 328 | # print 'Exception for feasible model???' 329 | # sys.exit(0) 330 | 331 | -------------------------------------------------------------------------------- /src/deepcover_keras.py: -------------------------------------------------------------------------------- 1 | 2 | import argparse 3 | import sys 4 | 5 | import keras 6 | from keras.models import * 7 | from keras.layers import * 8 | from keras import * 9 | 10 | 11 | def main(): 12 | parser=argparse.ArgumentParser( 13 | description='DeepCover: Uncover Bugs in Deep Learning' ) 14 | 15 | parser.add_argument('model', action='store', nargs='+', help='The input neural network model (.h5)') 16 | 17 | parser.add_argument( 18 | '--cover', metavar='ss', action='store', help='The covering method: ss, sv, ds, dv', default='ss') 19 | 20 | args=parser.parse_args() 21 | 22 | model = load_model(args.model[0]) 23 | 24 | if not (args.cover in ['ss', 'sv', 'ds', 'dv']): 25 | print ('Covering method cannot be recognized: ' + args.cover) 26 | sys.exit(0) 27 | 28 | print ('\n== WARNING == \n') 29 | print ( 30 | 'The input model: ' + args.model[0] + '\n' + 31 | 'The covering method: ' + args.cover + '\n' 32 | ) 33 | print ('This keras compatible implementation of DeepCover testing is currently under deverlopment...\n') 34 | print ('=============\n') 35 | 36 | if __name__=="__main__": 37 | main() 38 | -------------------------------------------------------------------------------- /src/lp.py: -------------------------------------------------------------------------------- 1 | 2 | import cplex 3 | import random 4 | from util import * 5 | from nnett import * 6 | import sys 7 | 8 | def rp_ssc(I, J, K, nnet, X, act): 9 | var_names=['d'] 10 | objective=[1] 11 | lower_bounds=[0.0] 12 | upper_bounds=[1.0] 13 | 14 | N=len(act) # #layers 15 | for i in range(0, N): 16 | if i>I+1: break 17 | M=len(act[i]) # #neurons at layer i 18 | for j in range(0, M): 19 | if i==I+1 and j!=K: continue 20 | var_names.append('x_'+str(i)+'_'+str(j)) 21 | objective.append(0) 22 | lower_bounds.append(-cplex.infinity) 23 | upper_bounds.append(cplex.infinity) 24 | 25 | 26 | constraints=[] 27 | rhs=[] 28 | constraint_senses=[] 29 | constraint_names=[] 30 | 31 | 32 | for i in range(0, len(X)): 33 | # x<=x0+d 34 | constraints.append([[0, i+1], [-1, 1]]) 35 | rhs.append(X[i]) 36 | constraint_senses.append("L") 37 | constraint_names.append("x<=x"+str(i)+"+d") 38 | # x>=x0-d 39 | constraints.append([[0, i+1], [1, 1]]) 40 | rhs.append(X[i]) 41 | constraint_senses.append("G") 42 | constraint_names.append("x>=x"+str(i)+"-d") 43 | # x<=1 44 | constraints.append([[i+1], [1]]) 45 | rhs.append(1.0) 46 | constraint_senses.append("L") 47 | constraint_names.append("x<=1") 48 | # x>=0 49 | constraints.append([[i+1], [1]]) 50 | rhs.append(0.0) 51 | constraint_senses.append("G") 52 | constraint_names.append("x>=0") 53 | 54 | # there is nothing to constrain for layer 0 55 | # and we start from layer 1 56 | # the last layer shall be handled individually 57 | for i in range(1, I+2): 58 | M=len(act[i]) # number of neurons at layer i 59 | for j in range(0, M): 60 | #### for layer (I+1) we only need to access one neuron 61 | if i==I+1 and j!=K: continue 62 | constraint=[[],[]] 63 | constraint[0].append("x_"+str(i)+"_"+str(j)) 64 | constraint[1].append(-1) 65 | for k in range(0, len(act[i-1])): 66 | constraint[0].append("x_"+str(i-1)+"_"+str(k)) 67 | if i==1 or act[i-1][k]>0: 68 | if not (i-1==I and k==J): 69 | constraint[1].append(nnet.weights[i-1][k][j]) 70 | else: 71 | constraint[1].append(0) 72 | else: 73 | if not (i-1==I and k==J): 74 | constraint[1].append(0) 75 | else: 76 | constraint[1].append(nnet.weights[i-1][k][j]) 77 | constraints.append(constraint) 78 | rhs.append(-nnet.biases[i][j]) 79 | constraint_senses.append("E") 80 | constraint_names.append("eq:"+"x_"+str(i)+"_"+str(j)) 81 | 82 | ###### ReLU 83 | if i0: 91 | constraint_senses.append("G") 92 | else: 93 | constraint_senses.append("L") 94 | constraint_names.append("relu:"+"x_"+str(i)+"_"+str(j)) 95 | else: 96 | if act[i][j]>0: 97 | constraint_senses.append("L") 98 | else: 99 | constraint_senses.append("G") 100 | constraint_names.append("not relu:"+"x_"+str(i)+"_"+str(j)) 101 | 102 | if I==N-2: # I+1==N-1 103 | #### Now, we are at the output layer 104 | #### x_{N-1, K}>=x_{N-1,old_label} 105 | label=np.argmax(act[N-1]) 106 | for i in range(0, len(act[N-1])): 107 | if i!=K: continue 108 | constraint=[[],[]] 109 | constraint[0].append("x_"+str(N-1)+"_"+str(i)) 110 | constraint[1].append(1) 111 | #constraint[0].append("x_"+str(N-1)+"_"+str(label)) 112 | #constraint[1].append(-1) 113 | constraints.append(constraint) 114 | rhs.append(0.0) 115 | if act[N-1][K]>0: 116 | constraint_senses.append("L") 117 | else: 118 | constraint_senses.append("G") 119 | constraint_names.append("not K") 120 | 121 | ###### solve 122 | try: 123 | problem=cplex.Cplex() 124 | problem.variables.add(obj = objective, 125 | lb = lower_bounds, 126 | ub = upper_bounds, 127 | names = var_names) 128 | problem.linear_constraints.add(lin_expr=constraints, 129 | senses = constraint_senses, 130 | rhs = rhs, 131 | names = constraint_names) 132 | problem.solve() 133 | 134 | #### 135 | d=problem.solution.get_values("d") 136 | new_x=[] 137 | for i in range(0, len(X)): 138 | v=(problem.solution.get_values('x_0_'+str(i))) 139 | if v<0 or v>1: return False, _, _ 140 | new_x.append(v) 141 | 142 | if d==0 or d==1: 143 | return False, _, _, _, _ 144 | 145 | 146 | #print problem.variables.get_num(), problem.linear_constraints.get_num() 147 | return True, new_x, d, problem.variables.get_num(), problem.linear_constraints.get_num() 148 | 149 | except: 150 | return False,[],-1, -1, -1 151 | 152 | try: 153 | d=problem.solution.get_values("d") 154 | print 'd is {0}'.format(d) 155 | new_x=[] 156 | #for i in len(X): 157 | # new_x.append(problem.solution.get_values('x_0_'+str(i))) 158 | #return True, new_x, d 159 | except: 160 | print 'Exception for feasible model???' 161 | sys.exit(0) 162 | 163 | def rp_dsc(I, J, nnet, X, act): 164 | 165 | var_names=['d'] 166 | objective=[1] 167 | lower_bounds=[0.0] 168 | upper_bounds=[1.0] 169 | 170 | N=len(act) # #layers 171 | for i in range(0, N): 172 | M=len(act[i]) # #neurons at layer i 173 | for j in range(0, M): 174 | var_names.append('x_'+str(i)+'_'+str(j)) 175 | objective.append(0) 176 | lower_bounds.append(-cplex.infinity) 177 | upper_bounds.append(cplex.infinity) 178 | 179 | 180 | constraints=[] 181 | rhs=[] 182 | constraint_senses=[] 183 | constraint_names=[] 184 | 185 | 186 | for i in range(0, len(X)): 187 | # x<=x0+d 188 | constraints.append([[0, i+1], [-1, 1]]) 189 | rhs.append(X[i]) 190 | constraint_senses.append("L") 191 | constraint_names.append("x<=x"+str(i)+"+d") 192 | # x>=x0-d 193 | constraints.append([[0, i+1], [1, 1]]) 194 | rhs.append(X[i]) 195 | constraint_senses.append("G") 196 | constraint_names.append("x>=x"+str(i)+"-d") 197 | # x<=1 198 | constraints.append([[i+1], [1]]) 199 | rhs.append(1.0) 200 | constraint_senses.append("L") 201 | constraint_names.append("x<=1") 202 | # x>=0 203 | constraints.append([[i+1], [1]]) 204 | rhs.append(0.0) 205 | constraint_senses.append("G") 206 | constraint_names.append("x>=0") 207 | 208 | # there is nothing to constrain for layer 0 209 | # and we start from layer 1 210 | # the last layer shall be handled individually 211 | for i in range(1, I+1): 212 | M=len(act[i]) # number of neurons at layer i 213 | for j in range(0, M): 214 | #### for layer (I+1) we only need to access one neuron 215 | if i==I and j!=J: continue 216 | constraint=[[],[]] 217 | constraint[0].append("x_"+str(i)+"_"+str(j)) 218 | constraint[1].append(-1) 219 | for k in range(0, len(act[i-1])): 220 | constraint[0].append("x_"+str(i-1)+"_"+str(k)) 221 | if i==1 or act[i-1][k]>0: 222 | constraint[1].append(nnet.weights[i-1][k][j]) 223 | else: 224 | constraint[1].append(0) 225 | constraints.append(constraint) 226 | rhs.append(-nnet.biases[i][j]) 227 | constraint_senses.append("E") 228 | constraint_names.append("eq:"+"x_"+str(i)+"_"+str(j)) 229 | 230 | ###### ReLU 231 | if i0: 239 | constraint_senses.append("G") 240 | else: 241 | constraint_senses.append("L") 242 | constraint_names.append("relu:"+"x_"+str(i)+"_"+str(j)) 243 | else: ## I+1, K 244 | ## ReLU sign does not change 245 | rhs.append(0) 246 | if act[i][j]>0: 247 | constraint_senses.append("L") 248 | else: 249 | constraint_senses.append("G") 250 | constraint_names.append("relu:"+"x_"+str(i)+"_"+str(j)) 251 | 252 | if I==N-1: # I+1==N-1 253 | #### Now, we are at the output layer 254 | #### x_{N-1, K}>=x_{N-1,old_label} 255 | label=np.argmax(act[N-1]) 256 | for i in range(0, len(act[N-1])): 257 | if i!=J: continue 258 | constraint=[[],[]] 259 | constraint[0].append("x_"+str(N-1)+"_"+str(i)) 260 | constraint[1].append(1) 261 | constraints.append(constraint) 262 | 263 | ##1) ReLU sign does not change 264 | rhs.append(0) 265 | if act[I][J]>0: 266 | constraint_senses.append("L") 267 | else: 268 | constraint_senses.append("G") 269 | constraint_names.append("relu sign:"+"x_"+str(I)+"_"+str(J)) 270 | 271 | ###### solve 272 | try: 273 | problem=cplex.Cplex() 274 | problem.variables.add(obj = objective, 275 | lb = lower_bounds, 276 | ub = upper_bounds, 277 | names = var_names) 278 | problem.linear_constraints.add(lin_expr=constraints, 279 | senses = constraint_senses, 280 | rhs = rhs, 281 | names = constraint_names) 282 | problem.solve() 283 | 284 | #### 285 | d=problem.solution.get_values("d") 286 | new_x=[] 287 | for i in range(0, len(X)): 288 | v=(problem.solution.get_values('x_0_'+str(i))) 289 | if v<0 or v>1: return False, _, _ 290 | new_x.append(v) 291 | 292 | if d==0 or d==1: 293 | return False, _, _ 294 | 295 | return True, new_x, d 296 | 297 | except: 298 | return False,[],-1 299 | 300 | try: 301 | d=problem.solution.get_values("d") 302 | print 'd is {0}'.format(d) 303 | new_x=[] 304 | #for i in len(X): 305 | # new_x.append(problem.solution.get_values('x_0_'+str(i))) 306 | #return True, new_x, d 307 | except: 308 | print 'Exception for feasible model???' 309 | sys.exit(0) 310 | 311 | def rp_svc(I, J, K, nnet, X, act, sfactor): 312 | 313 | var_names=['d'] 314 | objective=[1] 315 | lower_bounds=[0.0] 316 | upper_bounds=[1.0] 317 | 318 | N=len(act) # #layers 319 | for i in range(0, N): 320 | M=len(act[i]) # #neurons at layer i 321 | for j in range(0, M): 322 | var_names.append('x_'+str(i)+'_'+str(j)) 323 | objective.append(0) 324 | lower_bounds.append(-cplex.infinity) 325 | upper_bounds.append(cplex.infinity) 326 | 327 | 328 | constraints=[] 329 | rhs=[] 330 | constraint_senses=[] 331 | constraint_names=[] 332 | 333 | 334 | for i in range(0, len(X)): 335 | # x<=x0+d 336 | constraints.append([[0, i+1], [-1, 1]]) 337 | rhs.append(X[i]) 338 | constraint_senses.append("L") 339 | constraint_names.append("x<=x"+str(i)+"+d") 340 | # x>=x0-d 341 | constraints.append([[0, i+1], [1, 1]]) 342 | rhs.append(X[i]) 343 | constraint_senses.append("G") 344 | constraint_names.append("x>=x"+str(i)+"-d") 345 | # x<=1 346 | constraints.append([[i+1], [1]]) 347 | rhs.append(1.0) 348 | constraint_senses.append("L") 349 | constraint_names.append("x<=1") 350 | # x>=0 351 | constraints.append([[i+1], [1]]) 352 | rhs.append(0.0) 353 | constraint_senses.append("G") 354 | constraint_names.append("x>=0") 355 | 356 | # there is nothing to constrain for layer 0 357 | # and we start from layer 1 358 | # the last layer shall be handled individually 359 | for i in range(1, I+2): 360 | M=len(act[i]) # number of neurons at layer i 361 | for j in range(0, M): 362 | #### for layer (I+1) we only need to access one neuron 363 | if i==I+1 and j!=K: continue 364 | constraint=[[],[]] 365 | constraint[0].append("x_"+str(i)+"_"+str(j)) 366 | constraint[1].append(-1) 367 | for k in range(0, len(act[i-1])): 368 | constraint[0].append("x_"+str(i-1)+"_"+str(k)) 369 | if i==1 or act[i-1][k]>0: 370 | if not (i-1==I and k==J): 371 | constraint[1].append(nnet.weights[i-1][k][j]) 372 | else: 373 | constraint[1].append(0) 374 | else: 375 | if not (i-1==I and k==J): 376 | constraint[1].append(0) 377 | else: 378 | constraint[1].append(nnet.weights[i-1][k][j]) 379 | constraints.append(constraint) 380 | rhs.append(-nnet.biases[i][j]) 381 | constraint_senses.append("E") 382 | constraint_names.append("eq:"+"x_"+str(i)+"_"+str(j)) 383 | 384 | ###### ReLU 385 | if i0: 393 | constraint_senses.append("G") 394 | else: 395 | constraint_senses.append("L") 396 | constraint_names.append("relu:"+"x_"+str(i)+"_"+str(j)) 397 | elif (i==I and j==J): #Activation change 398 | rhs.append(0) 399 | if act[i][j]>0: 400 | constraint_senses.append("L") 401 | else: 402 | constraint_senses.append("G") 403 | constraint_names.append("not relu:"+"x_"+str(i)+"_"+str(j)) 404 | else: ## I+1, K 405 | ## ReLU sign does not change 406 | rhs.append(0) 407 | if act[i][j]>0: 408 | constraint_senses.append("G") 409 | else: 410 | constraint_senses.append("L") 411 | constraint_names.append("relu:"+"x_"+str(i)+"_"+str(j)) 412 | 413 | ## ReLU value changed 414 | _constraint=[[],[]] 415 | _constraint[0].append("x_"+str(i)+"_"+str(j)) 416 | _constraint[1].append(1) 417 | constraints.append(_constraint) 418 | rhs.append(sfactor*act[I+1][K]) 419 | if act[i][j]>0: 420 | if sfactor>1.0: 421 | constraint_senses.append("G") 422 | else: 423 | constraint_senses.append("L") 424 | else: 425 | if sfactor>1.0: 426 | constraint_senses.append("L") 427 | else: 428 | constraint_senses.append("G") 429 | constraint_names.append("relu value change:"+"x_"+str(i)+"_"+str(j)) 430 | 431 | if I==N-2: # I+1==N-1 432 | #### Now, we are at the output layer 433 | #### x_{N-1, K}>=x_{N-1,old_label} 434 | label=np.argmax(act[N-1]) 435 | for i in range(0, len(act[N-1])): 436 | if i!=K: continue 437 | constraint=[[],[]] 438 | constraint[0].append("x_"+str(N-1)+"_"+str(i)) 439 | constraint[1].append(1) 440 | constraints.append(constraint) 441 | 442 | ##1) ReLU sign does not change 443 | rhs.append(0) 444 | if act[I+1][K]>0: 445 | constraint_senses.append("G") 446 | else: 447 | constraint_senses.append("L") 448 | constraint_names.append("relu sign:"+"x_"+str(I+1)+"_"+str(K)) 449 | 450 | ## ReLU value changed 451 | _constraint=[[],[]] 452 | _constraint[0].append("x_"+str(I+1)+"_"+str(K)) 453 | _constraint[1].append(1) 454 | constraints.append(_constraint) 455 | rhs.append(sfactor*act[I+1][K]) 456 | if act[I+1][K]>0: 457 | if sfactor>1.0: 458 | constraint_senses.append("G") 459 | else: 460 | constraint_senses.append("L") 461 | else: 462 | if sfactor>1.0: 463 | constraint_senses.append("L") 464 | else: 465 | constraint_senses.append("G") 466 | constraint_names.append("relu value change:"+"x_"+str(I+1)+"_"+str(K)) 467 | 468 | 469 | ###### solve 470 | try: 471 | problem=cplex.Cplex() 472 | problem.variables.add(obj = objective, 473 | lb = lower_bounds, 474 | ub = upper_bounds, 475 | names = var_names) 476 | problem.linear_constraints.add(lin_expr=constraints, 477 | senses = constraint_senses, 478 | rhs = rhs, 479 | names = constraint_names) 480 | problem.solve() 481 | 482 | #### 483 | d=problem.solution.get_values("d") 484 | new_x=[] 485 | for i in range(0, len(X)): 486 | v=(problem.solution.get_values('x_0_'+str(i))) 487 | if v<0 or v>1: return False, _, _ 488 | new_x.append(v) 489 | 490 | if d==0 or d==1: 491 | return False, _, _ 492 | 493 | return True, new_x, d 494 | 495 | except: 496 | return False,[],-1 497 | 498 | try: 499 | d=problem.solution.get_values("d") 500 | print 'd is {0}'.format(d) 501 | new_x=[] 502 | #for i in len(X): 503 | # new_x.append(problem.solution.get_values('x_0_'+str(i))) 504 | #return True, new_x, d 505 | except: 506 | print 'Exception for feasible model???' 507 | sys.exit(0) 508 | 509 | def rp_dvc(I, J, nnet, X, act, sfactor): 510 | 511 | var_names=['d'] 512 | objective=[1] 513 | lower_bounds=[0.0] 514 | upper_bounds=[1.0] 515 | 516 | N=len(act) # #layers 517 | for i in range(0, N): 518 | M=len(act[i]) # #neurons at layer i 519 | for j in range(0, M): 520 | var_names.append('x_'+str(i)+'_'+str(j)) 521 | objective.append(0) 522 | lower_bounds.append(-cplex.infinity) 523 | upper_bounds.append(cplex.infinity) 524 | 525 | 526 | constraints=[] 527 | rhs=[] 528 | constraint_senses=[] 529 | constraint_names=[] 530 | 531 | 532 | for i in range(0, len(X)): 533 | # x<=x0+d 534 | constraints.append([[0, i+1], [-1, 1]]) 535 | rhs.append(X[i]) 536 | constraint_senses.append("L") 537 | constraint_names.append("x<=x"+str(i)+"+d") 538 | # x>=x0-d 539 | constraints.append([[0, i+1], [1, 1]]) 540 | rhs.append(X[i]) 541 | constraint_senses.append("G") 542 | constraint_names.append("x>=x"+str(i)+"-d") 543 | # x<=1 544 | constraints.append([[i+1], [1]]) 545 | rhs.append(1.0) 546 | constraint_senses.append("L") 547 | constraint_names.append("x<=1") 548 | # x>=0 549 | constraints.append([[i+1], [1]]) 550 | rhs.append(0.0) 551 | constraint_senses.append("G") 552 | constraint_names.append("x>=0") 553 | 554 | # there is nothing to constrain for layer 0 555 | # and we start from layer 1 556 | # the last layer shall be handled individually 557 | for i in range(1, I+1): 558 | M=len(act[i]) # number of neurons at layer i 559 | for j in range(0, M): 560 | #### for layer (I+1) we only need to access one neuron 561 | if i==I and j!=J: continue 562 | constraint=[[],[]] 563 | constraint[0].append("x_"+str(i)+"_"+str(j)) 564 | constraint[1].append(-1) 565 | for k in range(0, len(act[i-1])): 566 | constraint[0].append("x_"+str(i-1)+"_"+str(k)) 567 | if i==1 or act[i-1][k]>0: 568 | constraint[1].append(nnet.weights[i-1][k][j]) 569 | else: 570 | constraint[1].append(0) 571 | constraints.append(constraint) 572 | rhs.append(-nnet.biases[i][j]) 573 | constraint_senses.append("E") 574 | constraint_names.append("eq:"+"x_"+str(i)+"_"+str(j)) 575 | 576 | ###### ReLU 577 | if i0: 585 | constraint_senses.append("G") 586 | else: 587 | constraint_senses.append("L") 588 | constraint_names.append("relu:"+"x_"+str(i)+"_"+str(j)) 589 | else: ## I+1, K 590 | ## ReLU sign does not change 591 | rhs.append(0) 592 | if act[i][j]>0: 593 | constraint_senses.append("G") 594 | else: 595 | constraint_senses.append("L") 596 | constraint_names.append("relu:"+"x_"+str(i)+"_"+str(j)) 597 | 598 | ## ReLU value changed 599 | _constraint=[[],[]] 600 | _constraint[0].append("x_"+str(i)+"_"+str(j)) 601 | _constraint[1].append(1) 602 | constraints.append(_constraint) 603 | rhs.append(sfactor*act[I][J]) 604 | if act[i][j]>0: 605 | if sfactor>1.0: 606 | constraint_senses.append("G") 607 | else: 608 | constraint_senses.append("L") 609 | else: 610 | if sfactor>1.0: 611 | constraint_senses.append("L") 612 | else: 613 | constraint_senses.append("G") 614 | constraint_names.append("relu value change:"+"x_"+str(i)+"_"+str(j)) 615 | 616 | if I==N-1: # I+1==N-1 617 | #### Now, we are at the output layer 618 | #### x_{N-1, K}>=x_{N-1,old_label} 619 | label=np.argmax(act[N-1]) 620 | for i in range(0, len(act[N-1])): 621 | if i!=J: continue 622 | constraint=[[],[]] 623 | constraint[0].append("x_"+str(N-1)+"_"+str(i)) 624 | constraint[1].append(1) 625 | constraints.append(constraint) 626 | 627 | ##1) ReLU sign does not change 628 | rhs.append(0) 629 | if act[I][J]>0: 630 | constraint_senses.append("G") 631 | else: 632 | constraint_senses.append("L") 633 | constraint_names.append("relu sign:"+"x_"+str(I)+"_"+str(J)) 634 | 635 | ## ReLU value changed 636 | _constraint=[[],[]] 637 | _constraint[0].append("x_"+str(I)+"_"+str(J)) 638 | _constraint[1].append(1) 639 | constraints.append(_constraint) 640 | rhs.append(sfactor*act[I][J]) 641 | if act[I][J]>0: 642 | if sfactor>1.0: 643 | constraint_senses.append("G") 644 | else: 645 | constraint_senses.append("L") 646 | else: 647 | if sfactor>1.0: 648 | constraint_senses.append("L") 649 | else: 650 | constraint_senses.append("G") 651 | constraint_names.append("relu value change:"+"x_"+str(I)+"_"+str(J)) 652 | 653 | 654 | ###### solve 655 | try: 656 | problem=cplex.Cplex() 657 | problem.variables.add(obj = objective, 658 | lb = lower_bounds, 659 | ub = upper_bounds, 660 | names = var_names) 661 | problem.linear_constraints.add(lin_expr=constraints, 662 | senses = constraint_senses, 663 | rhs = rhs, 664 | names = constraint_names) 665 | problem.solve() 666 | 667 | #### 668 | d=problem.solution.get_values("d") 669 | new_x=[] 670 | for i in range(0, len(X)): 671 | v=(problem.solution.get_values('x_0_'+str(i))) 672 | if v<0 or v>1: return False, _, _ 673 | new_x.append(v) 674 | 675 | if d==0 or d==1: 676 | return False, _, _ 677 | 678 | return True, new_x, d 679 | 680 | except: 681 | return False,[],-1 682 | 683 | try: 684 | d=problem.solution.get_values("d") 685 | print 'd is {0}'.format(d) 686 | new_x=[] 687 | #for i in len(X): 688 | # new_x.append(problem.solution.get_values('x_0_'+str(i))) 689 | #return True, new_x, d 690 | except: 691 | print 'Exception for feasible model???' 692 | sys.exit(0) 693 | 694 | -------------------------------------------------------------------------------- /src/nnett.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | #from intervalt import * 4 | import sys 5 | 6 | def __MAX__(x, y): 7 | if x>y: return x 8 | else: return y 9 | 10 | class NNett: 11 | def __init__(self, _weights, _biases): 12 | self.weights=_weights 13 | self.biases=_biases 14 | self.weights.append([]) # output layer has empty weight vector 15 | self.biases=[[]]+self.biases # input layer has empty bias vector 16 | 17 | def eval(self, X): 18 | # act[i][j] is the activation value (after ReLU) of the j-th neuron at the i-th layer 19 | act=[] 20 | act.append(X) # X is the input vector to be evaluated 21 | 22 | N=len(self.weights) # N is the #layers 23 | 24 | for i in range(1, N): 25 | act.append([]) 26 | M=len(self.weights[i-1][0]) # M is the #neurons at layer (i+1) 27 | # to compute the activation value for each neuron at layer i 28 | for j in range(0, M): 29 | val=0 # the activation value is the weighted sum of input from previous layer, plus the bias 30 | for k in range(0, len(self.weights[i-1])): 31 | val+=__MAX__(act[i-1][k],0) * self.weights[i-1][k][j] 32 | val+=self.biases[i][j] 33 | #if i