├── COGP ├── COGP_main.py ├── FEVal_norm_fast.py ├── STGPdataType.py ├── evalGP.py ├── f1_properties.txt ├── f1_test_data.npy ├── f1_test_label.npy ├── f1_train_data.npy ├── f1_train_label.npy ├── functionSet.py ├── gp_restrict.py └── gp_tree.py ├── FELGP ├── FELGP_main.py ├── algo_iegp.py ├── f1_properties.txt ├── f1_test_data.npy ├── f1_test_label.npy ├── f1_train_data.npy ├── f1_train_label.npy ├── felgp_functions.py ├── gp_restrict.py ├── saveFile.py ├── sift_features.py └── strongGPDataType.py ├── FlexGP ├── FGP_main.py ├── evalGP_fgp.py ├── f1_properties.txt ├── f1_test_data.npy ├── f1_test_label.npy ├── f1_train_data.npy ├── f1_train_label.npy ├── fgp_functions.py ├── gp_restrict.py ├── sift_features.py └── strongGPDataType.py ├── IDGP ├── IDGP_main.py ├── evalGP_main.py ├── f1_properties.txt ├── f1_test_data.npy ├── f1_test_label.npy ├── f1_train_data.npy ├── f1_train_label.npy ├── feature_extractors.py ├── feature_function.py ├── gp_restrict.py ├── sift_features.py └── strongGPDataType.py ├── MLGP ├── MLGP_main.py ├── evalGP.py ├── fitnessEvaluation.py ├── functionSet.py ├── gp_alter.py ├── gp_restrict.py ├── uiuc_train_data.npy └── uiuc_train_label.npy └── README.md /COGP/COGP_main.py: -------------------------------------------------------------------------------- 1 | #python packages 2 | import random 3 | import time 4 | import operator 5 | #for using multiple CPU cores in fitness evaluation 6 | from scoop import futures 7 | import numpy as np 8 | # deap package 9 | import evalGP 10 | import gp_restrict 11 | import gp_tree 12 | from deap import base, creator, tools, gp 13 | # fitness function and function for testing 14 | from FEVal_norm_fast import evalTest, feature_length 15 | from sklearn.svm import LinearSVC 16 | from sklearn.model_selection import cross_val_score 17 | from sklearn import preprocessing 18 | #COGP data types 19 | from STGPdataType import Img, Img2, Vector, Int, Double, Filter # defined by author 20 | import functionSet as fs 21 | 22 | randomSeeds =3 23 | dataSetName = 'f1' 24 | 25 | x_train = np.load(dataSetName+'_train_data.npy') 26 | y_train = np.load(dataSetName+'_train_label.npy') 27 | x_test = np.load(dataSetName+'_test_data.npy') 28 | y_test = np.load(dataSetName+'_test_label.npy') 29 | print(x_train.shape,y_train.shape, x_test.shape, y_test.shape) 30 | 31 | #COGP parameters: 32 | population=500 33 | generation=50 34 | cxProb=0.5 35 | mutProb=0.49 36 | elitismProb=0.01 37 | totalRuns = 1 38 | initialMinDepth=2 39 | initialMaxDepth=6 40 | maxDepth=8 41 | ##COGP tree structure, function set and terminal set 42 | pset = gp_tree.PrimitiveSetTyped('MAIN',[Img], Vector, prefix='Image') 43 | #Functions at Concatenation layer 44 | pset.addPrimitive(fs.root_conVector2,[Vector,Vector],Vector,name='Root1') 45 | pset.addPrimitive(fs.root_conVector2,[Img2, Img2],Vector,name='Root2') 46 | pset.addPrimitive(fs.root_conVector3,[Img2, Img2, Img2],Vector,name='Root3') 47 | pset.addPrimitive(fs.root_conVector4,[Img2, Img2, Img2, Img2],Vector,name='Root4') 48 | #Filtering at a flexible layer. Use *F as the names of the functions to avoid the same names, which is not allowed in DEAP 49 | pset.addPrimitive(fs.ZeromaxP,[Img2, Int, Int],Img2,name='ZMaxPF') 50 | #Filtering at a flexible layer. Use *F as the names of the functions to avoid the same names, which is not allowed in DEAP 51 | pset.addPrimitive(fs.mixconadd, [Img2, Double, Img2, Double], Img2, name='AddF') 52 | pset.addPrimitive(fs.mixconsub, [Img2, Double, Img2, Double], Img2, name='SubF') 53 | pset.addPrimitive(np.abs, [Img2], Img2, name='AbsF') 54 | pset.addPrimitive(fs.sqrt, [Img2], Img2, name='SqrtF') 55 | pset.addPrimitive(fs.relu, [Img2], Img2, name='ReluF') 56 | pset.addPrimitive(fs.conv_filters, [Img2, Filter], Img2, name='ConvF') #convolution operator 57 | #Pooling functions at the Pooling layer. 58 | pset.addPrimitive(fs.maxP,[Img2, Int, Int], Img2,name='MaxPF') # max-pooling operator 59 | pset.addPrimitive(fs.maxP,[Img, Int, Int], Img2,name='MaxP') #max-pooling operator 60 | #Filteing functions at the Filtering layer 61 | pset.addPrimitive(fs.mixconadd, [Img, Double, Img, Double], Img, name='Add') 62 | pset.addPrimitive(fs.mixconsub, [Img, Double, Img, Double], Img, name='Sub') 63 | pset.addPrimitive(np.abs, [Img], Img, name='Abs') 64 | pset.addPrimitive(fs.sqrt, [Img], Img, name='Sqrt') 65 | pset.addPrimitive(fs.relu, [Img], Img, name='Relu') 66 | pset.addPrimitive(fs.conv_filters, [Img, Filter], Img, name='Conv') #convolution operator 67 | #Terminals 68 | pset.renameArguments(ARG0='grey') #the input image 69 | pset.addEphemeralConstant('randomD',lambda:round(random.random(),3),Double) # parameters for the Add, Sub, AddF and SubF functions 70 | pset.addEphemeralConstant('filters3',lambda:list(fs.random_filters(3)), Filter) #3 * 3 filters 71 | pset.addEphemeralConstant('filters5',lambda:list(fs.random_filters(5)), Filter) #5 * 5 filters 72 | pset.addEphemeralConstant('filters7',lambda:list(fs.random_filters(7)), Filter) #7 * 7 filters 73 | pset.addEphemeralConstant('kernelSize',lambda:random.randrange(2,5,2), Int) # kernel size for the max-pooling functions 74 | 75 | creator.create("FitnessMax", base.Fitness, weights=(1.0,)) 76 | creator.create("Individual", gp.PrimitiveTree, fitness=creator.FitnessMax) 77 | 78 | toolbox = base.Toolbox() 79 | toolbox.register("expr", gp_restrict.genHalfAndHalfMD, pset=pset, min_=initialMinDepth, max_=initialMaxDepth) 80 | toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.expr) 81 | toolbox.register("population", tools.initRepeat, list, toolbox.individual) 82 | toolbox.register("compile", gp.compile, pset=pset) 83 | toolbox.register("mapp", futures.map) 84 | 85 | #Fitness evaluation 86 | def evalTrain(individual): 87 | try: 88 | func = toolbox.compile(expr=individual) 89 | train_tf = [] 90 | for i in range(0, len(y_train)): 91 | train_tf.append(np.asarray(func(x_train[i, :, :]))) 92 | min_max_scaler = preprocessing.MinMaxScaler() 93 | train_norm = min_max_scaler.fit_transform(np.asarray(train_tf)) 94 | lsvm= LinearSVC() 95 | accuracy = round(100*cross_val_score(lsvm, train_norm, y_train, cv=5).mean(),2) 96 | except: 97 | accuracy=0 98 | return accuracy, 99 | 100 | toolbox.register("evaluate", evalTrain) 101 | toolbox.register("select", tools.selTournament,tournsize=7) 102 | toolbox.register("selectElitism", tools.selBest) 103 | toolbox.register("mate", gp.cxOnePoint) 104 | toolbox.register("expr_mut", gp_restrict.genFull, min_=0, max_=6) 105 | toolbox.register("mutate", gp.mutUniform, expr=toolbox.expr_mut, pset=pset) 106 | toolbox.register("mutate_eph", gp.mutEphemeral, mode='all') 107 | toolbox.decorate("mate", gp.staticLimit(key=operator.attrgetter("height"), max_value=maxDepth)) 108 | toolbox.decorate("mutate", gp.staticLimit(key=operator.attrgetter("height"), max_value=maxDepth)) 109 | 110 | def GPMain(randomSeeds): 111 | random.seed(randomSeeds) 112 | pop = toolbox.population(population) 113 | hof = tools.HallOfFame(10) 114 | log = tools.Logbook() 115 | stats_fit = tools.Statistics(key=lambda ind: ind.fitness.values) 116 | stats_size_tree = tools.Statistics(key=len) 117 | mstats = tools.MultiStatistics(fitness=stats_fit,size_tree=stats_size_tree) 118 | mstats.register("avg", np.mean) 119 | mstats.register("std", np.std) 120 | mstats.register("min", np.min) 121 | mstats.register("max", np.max) 122 | log.header = ["gen", "evals"] + mstats.fields 123 | 124 | pop, log = evalGP.eaSimple(randomSeeds, pop, toolbox, cxProb, mutProb, elitismProb, generation, 125 | stats=mstats, halloffame=hof, verbose=True) 126 | 127 | return pop,log, hof 128 | 129 | if __name__ == "__main__": 130 | beginTime = time.process_time() 131 | pop, log, hof = GPMain(randomSeeds) 132 | endTime = time.process_time() 133 | trainTime = endTime - beginTime 134 | 135 | train_tf, test_tf, trainLabel, testL, testResults = evalTest(toolbox, hof[0], x_train, y_train, x_test, y_test) 136 | testTime = time.process_time() - endTime 137 | 138 | print('Best individual ', hof[0]) 139 | print('Test results ', testResults) 140 | print('Train time ', trainTime) 141 | print('Test time ', testTime) 142 | print('Train set shape ', train_tf.shape) 143 | print('Test set shape ', test_tf.shape) 144 | print('End') -------------------------------------------------------------------------------- /COGP/FEVal_norm_fast.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from sklearn.svm import LinearSVC 3 | from sklearn import preprocessing 4 | 5 | def feature_length(ind, instances, toolbox): 6 | func=toolbox.compile(ind) 7 | try: 8 | feature_len = len(func(instances)) 9 | except: feature_len=0 10 | return feature_len, 11 | 12 | 13 | def evalTest(toolbox, individual, trainData, trainLabel, test, testL): 14 | func = toolbox.compile(expr=individual) 15 | train_tf = [] 16 | test_tf = [] 17 | for i in range(0, len(trainLabel)): 18 | train_tf.append(np.asarray(func(trainData[i, :, :]))) 19 | for j in range(0, len(testL)): 20 | test_tf.append(np.asarray(func(test[j, :, :]))) 21 | min_max_scaler = preprocessing.MinMaxScaler() 22 | train_norm = min_max_scaler.fit_transform(np.asarray(train_tf)) 23 | test_norm = min_max_scaler.transform(np.asarray(test_tf)) 24 | lsvm= LinearSVC() 25 | lsvm.fit(train_norm, trainLabel) 26 | accuracy = round(100*lsvm.score(test_norm, testL),2) 27 | return np.asarray(train_tf), np.asarray(test_tf), trainLabel, testL, accuracy 28 | 29 | 30 | 31 | 32 | -------------------------------------------------------------------------------- /COGP/STGPdataType.py: -------------------------------------------------------------------------------- 1 | class Img: 2 | def __init__(ndarray): 3 | pass 4 | 5 | class Img2: 6 | def __init__(ndarray): 7 | pass 8 | 9 | class Double : 10 | def __init__(float): 11 | pass 12 | 13 | class Int: 14 | def __init__(int): 15 | pass 16 | 17 | class Vector: 18 | def __init__(ndarray): 19 | pass 20 | 21 | class Filter: 22 | def __init__(ndarray): 23 | pass -------------------------------------------------------------------------------- /COGP/evalGP.py: -------------------------------------------------------------------------------- 1 | import random 2 | from deap import tools 3 | 4 | def varAnd(population, toolbox, cxpb, mutpb): 5 | offspring = [toolbox.clone(ind) for ind in population] 6 | new_cxpb = cxpb / (cxpb + mutpb) 7 | new_mutpb = mutpb / (cxpb + mutpb) 8 | i = 1 9 | while i < len(offspring): 10 | if random.random() < new_cxpb: 11 | offspring[i - 1], offspring[i] = toolbox.mate(offspring[i - 1], offspring[i]) 12 | del offspring[i - 1].fitness.values, offspring[i].fitness.values 13 | i = i + 2 14 | elif random.random() < 0.5: 15 | offspring[i], = toolbox.mutate(offspring[i]) 16 | del offspring[i].fitness.values 17 | else: 18 | offspring[i], = toolbox.mutate_eph(offspring[i]) 19 | del offspring[i].fitness.values 20 | i = i + 1 21 | return offspring 22 | 23 | def varAndp(population, toolbox, cxpb, mutpb): 24 | """Part of an evolutionary algorithm applying only the variation part 25 | (crossover **and** mutation). The modified individuals have their 26 | fitness invalidated. The individuals are cloned so returned population is 27 | independent of the input population. 28 | 29 | :param population: A list of individuals to vary. 30 | :param toolbox: A :class:`~deap.base.Toolbox` that contains the evolution 31 | operators. 32 | :param cxpb: The probability of mating two individuals. 33 | :param mutpb: The probability of mutating an individual. 34 | :param elitpb: The probability of mutating an individual. 35 | :returns: A list of varied individuals that are independent of their 36 | parents. 37 | 38 | The variation goes as follow. First, the parental population 39 | :math:`P_\mathrm{p}` is duplicated using the :meth:`toolbox.clone` method 40 | and the result is put into the offspring population :math:`P_\mathrm{o}`. A 41 | first loop over :math:`P_\mathrm{o}` is executed to mate pairs of 42 | consecutive individuals. According to the crossover probability *cxpb*, the 43 | individuals :math:`\mathbf{x}_i` and :math:`\mathbf{x}_{i+1}` are mated 44 | using the :meth:`toolbox.mate` method. The resulting children 45 | :math:`\mathbf{y}_i` and :math:`\mathbf{y}_{i+1}` replace their respective 46 | parents in :math:`P_\mathrm{o}`. A second loop over the resulting 47 | :math:`P_\mathrm{o}` is executed to mutate every individual with a 48 | probability *mutpb*. When an individual is mutated it replaces its not 49 | mutated version in :math:`P_\mathrm{o}`. The resulting :math:`P_\mathrm{o}` 50 | is returned. 51 | 52 | This variation is named *And* beceause of its propention to apply both 53 | crossover and mutation on the individuals. Note that both operators are 54 | not applied systematicaly, the resulting individuals can be generated from 55 | crossover only, mutation only, crossover and mutation, and reproduction 56 | according to the given probabilities. Both probabilities should be in 57 | :math:`[0, 1]`. 58 | """ 59 | offspring = [toolbox.clone(ind) for ind in population] 60 | new_cxpb=cxpb/(cxpb+mutpb) 61 | 62 | #num_cx=int(new_cxpb*len(offspring)) 63 | #num_mu=len(offspring)-num_cx 64 | #print(new_cxpb, new_mutpb) 65 | # Apply crossover and mutation on the offspring 66 | i = 1 67 | while i < len(offspring): 68 | if random.random() < new_cxpb: 69 | if (offspring[i - 1] == offspring[i]) : 70 | offspring[i - 1], = toolbox.mutate(offspring[i - 1]) 71 | offspring[i], = toolbox.mutate(offspring[i]) 72 | else: 73 | offspring[i - 1], offspring[i] = toolbox.mate(offspring[i - 1], offspring[i]) 74 | del offspring[i - 1].fitness.values, offspring[i].fitness.values 75 | i = i + 2 76 | else: 77 | offspring[i], = toolbox.mutate(offspring[i]) 78 | del offspring[i].fitness.values 79 | i = i + 1 80 | return offspring 81 | 82 | 83 | def eaSimple(randomseed, population, toolbox, cxpb, mutpb, elitpb, ngen , stats=None, 84 | halloffame=None, verbose=__debug__): 85 | """This algorithm reproduce the simplest evolutionary algorithm as 86 | presented in chapter 7 of [Back2000]_. 87 | 88 | :param population: A list of individuals. 89 | :param toolbox: A :class:`~deap.base.Toolbox` that contains the evolution 90 | operators. 91 | :param cxpb: The probability of mating two individuals. 92 | :param mutpb: The probability of mutating an individual. 93 | :param etilpb: The probability of elitism 94 | :param ngen: The number of generation. 95 | :param stats: A :class:`~deap.tools.Statistics` object that is updated 96 | inplace, optional. 97 | :param halloffame: A :class:`~deap.tools.HallOfFame` object that will 98 | contain the best individuals, optional. 99 | :param verbose: Whether or not to log the statistics. 100 | :returns: The final population 101 | :returns: A class:`~deap.tools.Logbook` with the statistics of the 102 | evolution 103 | 104 | The algorithm takes in a population and evolves it in place using the 105 | :meth:`varAnd` method. It returns the optimized population and a 106 | :class:`~deap.tools.Logbook` with the statistics of the evolution. The 107 | logbook will contain the generation number, the number of evalutions for 108 | each generation and the statistics if a :class:`~deap.tools.Statistics` is 109 | given as argument. The *cxpb* and *mutpb* arguments are passed to the 110 | :func:`varAnd` function. The pseudocode goes as follow :: 111 | 112 | evaluate(population) 113 | for g in range(ngen): 114 | elitismNum 115 | offspringE=selectElitism(population,elitismNum) 116 | population = select(population, len(population)-elitismNum) 117 | offspring = varAnd(population, toolbox, cxpb, mutpb) 118 | offspring=offspring+offspringE 119 | evaluate(offspring) 120 | population = offspring. 121 | 122 | This function expects the :meth:`toolbox.mate`, :meth:`toolbox.mutate`, 123 | :meth:`toolbox.select` and :meth:`toolbox.evaluate` and :meth::`toolbox.selectElitism`, 124 | aliases to be 125 | registered in the toolbox. 126 | 127 | .. [Back2000] Back, Fogel and Michalewicz, "Evolutionary Computation 1 : 128 | Basic Algorithms and Operators", 2000. 129 | """ 130 | logbook = tools.Logbook() 131 | logbook.header = ['gen', 'nevals'] + (stats.fields if stats else []) 132 | # Evaluate the individuals with an invalid fitness 133 | #invalid_ind = [ind for ind in population if not ind.fitness.valid] 134 | #print(len(invalid_ind)) 135 | #print(toolbox.evaluate(population[0])) 136 | #print(population[0]) 137 | fitnesses = toolbox.mapp(toolbox.evaluate, population) 138 | for ind, fit in zip(population, fitnesses): 139 | ind.fitness.values = fit 140 | 141 | if halloffame is not None: 142 | halloffame.update(population) 143 | hof_store = tools.HallOfFame(5 * len(population)) 144 | hof_store.update(population) 145 | record = stats.compile(population) if stats else {} 146 | logbook.record(gen=0, nevals=len(population), **record) 147 | if verbose: 148 | print(logbook.stream) 149 | 150 | for gen in range(1, ngen + 1): 151 | #Select the next generation individuals by elitism 152 | elitismNum=int(elitpb * len(population)) 153 | population_for_eli=[toolbox.clone(ind) for ind in population] 154 | offspringE = toolbox.selectElitism(population_for_eli, k=elitismNum) 155 | offspring = toolbox.select(population, len(population)-elitismNum) 156 | # Vary the pool of individuals 157 | offspring = varAnd(offspring, toolbox, cxpb, mutpb) 158 | # add offspring from elitism into current offspring 159 | #generate the next generation individuals 160 | 161 | # Evaluate the individuals with an invalid fitness 162 | for i in offspring: 163 | ind = 0 164 | while ind= min_ 45 | return generate(pset, min_, max_, condition, type_) 46 | 47 | def genHalfAndHalf(pset, min_, max_, type_=None): 48 | """Generate an expression with a PrimitiveSet *pset*. 49 | Half the time, the expression is generated with :func:`~deap.gp.genGrow`, 50 | the other half, the expression is generated with :func:`~deap.gp.genFull`. 51 | 52 | :param pset: Primitive set from which primitives are selected. 53 | :param min_: Minimum height of the produced trees. 54 | :param max_: Maximum Height of the produced trees. 55 | :param type_: The type that should return the tree when called, when 56 | :obj:`None` (default) no return type is enforced. 57 | :returns: Either, a full or a grown tree. 58 | """ 59 | method = random.choice((genGrow, genFull)) 60 | #print(method) 61 | return method(pset, min_, max_, type_) 62 | 63 | def genRamped(pset, min_, max_, type_=None): 64 | """ 65 | .. deprecated:: 1.0 66 | The function has been renamed. Use :func:`~deap.gp.genHalfAndHalf` instead. 67 | """ 68 | warnings.warn("gp.genRamped has been renamed. Use genHalfAndHalf instead.", 69 | FutureWarning) 70 | return genHalfAndHalf(pset, min_, max_, type_) 71 | 72 | def generate(pset, min_, max_, condition, type_=__type__): 73 | """Generate a Tree as a list of list. The tree is build 74 | from the root to the leaves, and it stop growing when the 75 | condition is fulfilled. 76 | :param pset: A primitive set from wich to select primitives of the trees. 77 | :param min_: Minimum height of the produced trees. 78 | :param max_: Maximum Height of the produced trees. 79 | :param condition: The condition is a function that takes two arguments, 80 | the height of the tree to build and the current 81 | depth in the tree. 82 | :param type_: The type that should return the tree when called, when 83 | :obj:`None` (default) no return type is enforced. 84 | :returns: A grown tree with leaves at possibly different depths 85 | dependending on the condition function. 86 | 87 | 88 | DUMMY NODE ISSUES 89 | 90 | DEAP will only place terminals if we're at the bottom of a branch. 91 | This creates two issues: 92 | 1. A primitive that takes other primitives as inputs could be placed at the 93 | second to last layer. 94 | SOLUTION: You need to allow the tree to end whenever the height condition is met, 95 | so create "dummy" terminals for every type possible in the tree. 96 | 2. A primitive that takes terminals as inputs could be placed above the second to 97 | last layer. 98 | SOLUTION: You need to allow the tree to continue extending the branch until the 99 | height condition is met, so create "dummy" primitives that just pass 100 | through the terminal types. 101 | 102 | These "dummy" terminals and "dummy" primitives introduce unnecessary and sometimes 103 | nonsensical solutions into populations. These "dummy" nodes can be eliminated 104 | if the height requirement is relaxed. 105 | 106 | 107 | HOW TO PREVENT DUMMY NODE ISSUES 108 | 109 | Relaxing the height requirement: 110 | When at the bottom of the branch, check for terminals first, then primitives. 111 | When checking for primitives, skirt the height requirement by adjusting 112 | the branch depth to be the second to last layer of the tree. 113 | If neither a terminal or primitive fits this node, then throw an error. 114 | When not at the bottom of the branch, check for primitives first, then terminals. 115 | 116 | Issue with relaxing the height requirement: 117 | 1. Endless loops are possible when primitive sets have any type loops. 118 | A primitive with an output of one type may not take an input type of 119 | itself or a parent type. 120 | SOLUTION: A primitive set must be well-designed to prevent those type loops. 121 | 122 | """ 123 | if type_ is None: 124 | type_ = pset.ret 125 | expr = [] 126 | height = random.randint(min_, max_) 127 | stack = [(0, type_)] 128 | #print(len(stack)) 129 | #print(pset.terminals) 130 | #print(pset.primitives) 131 | while len(stack) != 0: 132 | ## print(len(expr)) 133 | if (len(expr)>60): 134 | expr = [] 135 | #if type_ is None: 136 | type_ = pset.ret 137 | stack = [(0, type_)] 138 | ## print(depth, type_) 139 | #depth, type_ = stack.pop() 140 | height = random.randint(min_, max_) 141 | depth, type_ = stack.pop() 142 | if condition(height, depth): 143 | # Try finding a terminal 144 | try: 145 | term = random.choice(pset.terminals[type_]) 146 | #print('term',term) 147 | if isclass(term): 148 | term = term() 149 | expr.append(term) 150 | # No terminal fits 151 | except: 152 | # So pull the depth back one layer, and start looking for primitives 153 | try: 154 | depth -= 1 155 | prim = random.choice(pset.primitives[type_]) 156 | #print('prim',prim) 157 | expr.append(prim) 158 | for arg in reversed(prim.args): 159 | stack.append((depth, arg)) 160 | 161 | # No primitive fits, either - that's an error 162 | except IndexError: 163 | _, _, traceback = sys.exc_info() 164 | raise IndexError("The gp.generate function tried to add " \ 165 | "a primitive of type '%s', but there is " \ 166 | "none available." % (type_,), traceback) 167 | # Not at the bottom of the tree 168 | else: 169 | # Check for primitives 170 | try: 171 | prim = random.choice(pset.primitives[type_]) 172 | expr.append(prim) 173 | for arg in reversed(prim.args): 174 | stack.append((depth + 1, arg)) 175 | # No primitive fits 176 | except: 177 | # So check for terminals 178 | try: 179 | term = random.choice(pset.terminals[type_]) 180 | 181 | # No terminal fits, either - that's an error 182 | except IndexError: 183 | _, _, traceback = sys.exc_info() 184 | raise IndexError("The gp.generate function tried to add " \ 185 | "a terminal of type '%s', but there is " \ 186 | "none available." % (type_,), traceback) 187 | if isclass(term): 188 | term = term() 189 | expr.append(term) 190 | #print(len(expr)) 191 | return expr 192 | 193 | 194 | def generateMD(pset, min_, max_, condition, type_=__type__): 195 | if type_ is None: 196 | type_ = pset.ret 197 | expr = [] 198 | height = random.randint(min_, max_) 199 | stack = [(0, type_)] 200 | ## print(len(stack)) 201 | #print(pset.terminals) 202 | #print(pset.primitives) 203 | while len(stack) != 0: 204 | depth, type_ = stack.pop() 205 | if condition(height, depth): 206 | # Try finding a terminal 207 | try: 208 | term = random.choice(pset.terminals[type_]) 209 | #print('term',term) 210 | if isclass(term): 211 | term = term() 212 | expr.append(term) 213 | # No terminal fits 214 | except: 215 | # So pull the depth back one layer, and start looking for primitives 216 | try: 217 | depth -= 1 218 | prim = random.choice(pset.primitives[type_]) 219 | #print('prim',prim) 220 | expr.append(prim) 221 | for arg in reversed(prim.args): 222 | stack.append((depth, arg)) 223 | 224 | # No primitive fits, either - that's an error 225 | except IndexError: 226 | _, _, traceback = sys.exc_info() 227 | raise IndexError("The gp.generate function tried to add " \ 228 | "a primitive of type '%s', but there is " \ 229 | "none available." % (type_,), traceback) 230 | # Not at the bottom of the tree 231 | else: 232 | # Check for primitives 233 | try: 234 | prim = random.choice(pset.primitives[type_]) 235 | expr.append(prim) 236 | for arg in reversed(prim.args): 237 | stack.append((depth + 1, arg)) 238 | # No primitive fits 239 | except: 240 | # So check for terminals 241 | try: 242 | term = random.choice(pset.terminals[type_]) 243 | 244 | # No terminal fits, either - that's an error 245 | except IndexError: 246 | _, _, traceback = sys.exc_info() 247 | raise IndexError("The gp.generate function tried to add " \ 248 | "a terminal of type '%s', but there is " \ 249 | "none available." % (type_,), traceback) 250 | if isclass(term): 251 | term = term() 252 | expr.append(term) 253 | #print(len(expr)) 254 | return expr 255 | 256 | def genHalfAndHalfMD(pset, min_, max_, type_=None): 257 | expr=genHalfAndHalf(pset, min_, max_, type_=None) 258 | #print(expr) 259 | #print('expr before', len(expr)) 260 | while len(expr)>60: 261 | expr=genHalfAndHalf(pset, min_, max_, type_=None) 262 | #print('expr before', len(expr)) 263 | #print('expr after',len(expr)) 264 | return expr 265 | 266 | def genFullMD(pset, min_, max_, type_=None): 267 | """Generate an expression where each leaf has a the same depth 268 | between *min* and *max*. 269 | 270 | :param pset: Primitive set from which primitives are selected. 271 | :param min_: Minimum height of the produced trees. 272 | :param max_: Maximum Height of the produced trees. 273 | :param type_: The type that should return the tree when called, when 274 | :obj:`None` (default) no return type is enforced. 275 | :returns: A full tree with all leaves at the same depth. 276 | """ 277 | def condition(height, depth): 278 | """Expression generation stops when the depth is equal to height.""" 279 | return depth == height 280 | #print('it works', pset)] 281 | expr=generateMD(pset, min_, max_, condition, type_) 282 | ## print(len(expr)) 283 | ## while len(expr)>20: 284 | ## expr=generateMD(pset, min_, max_, condition, type_) 285 | return expr 286 | -------------------------------------------------------------------------------- /COGP/gp_tree.py: -------------------------------------------------------------------------------- 1 | # This file is part of DEAP. 2 | # 3 | # DEAP is free software: you can redistribute it and/or modify 4 | # it under the terms of the GNU Lesser General Public License as 5 | # published by the Free Software Foundation, either version 3 of 6 | # the License, or (at your option) any later version. 7 | # 8 | # DEAP is distributed in the hope that it will be useful, 9 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | # GNU Lesser General Public License for more details. 12 | # 13 | # You should have received a copy of the GNU Lesser General Public 14 | # License along with DEAP. If not, see . 15 | 16 | """The :mod:`gp` module provides the methods and classes to perform 17 | Genetic Programming with DEAP. It essentially contains the classes to 18 | build a Genetic Program Tree, and the functions to evaluate it. 19 | 20 | This module support both strongly and loosely typed GP. 21 | """ 22 | import copy 23 | import re 24 | from collections import defaultdict, deque 25 | 26 | 27 | ###################################### 28 | # GP Data structure # 29 | ###################################### 30 | 31 | # Define the name of type for any types. 32 | __type__ = object 33 | 34 | 35 | class PrimitiveTree(list): 36 | """Tree specifically formatted for optimization of genetic 37 | programming operations. The tree is represented with a 38 | list where the nodes are appended in a depth-first order. 39 | The nodes appended to the tree are required to 40 | have an attribute *arity* which defines the arity of the 41 | primitive. An arity of 0 is expected from terminals nodes. 42 | """ 43 | def __init__(self, content): 44 | list.__init__(self, content) 45 | 46 | def __deepcopy__(self, memo): 47 | new = self.__class__(self) 48 | new.__dict__.update(copy.deepcopy(self.__dict__, memo)) 49 | return new 50 | 51 | def __setitem__(self, key, val): 52 | # Check for most common errors 53 | # Does NOT check for STGP constraints 54 | if isinstance(key, slice): 55 | if key.start >= len(self): 56 | raise IndexError("Invalid slice object (try to assign a %s" 57 | " in a tree of size %d). Even if this is allowed by the" 58 | " list object slice setter, this should not be done in" 59 | " the PrimitiveTree context, as this may lead to an" 60 | " unpredictable behavior for searchSubtree or evaluate." 61 | % (key, len(self))) 62 | total = val[0].arity 63 | for node in val[1:]: 64 | total += node.arity - 1 65 | if total != 0: 66 | raise ValueError("Invalid slice assignation : insertion of" 67 | " an incomplete subtree is not allowed in PrimitiveTree." 68 | " A tree is defined as incomplete when some nodes cannot" 69 | " be mapped to any position in the tree, considering the" 70 | " primitives' arity. For instance, the tree [sub, 4, 5," 71 | " 6] is incomplete if the arity of sub is 2, because it" 72 | " would produce an orphan node (the 6).") 73 | elif val.arity != self[key].arity: 74 | raise ValueError("Invalid node replacement with a node of a" 75 | " different arity.") 76 | list.__setitem__(self, key, val) 77 | 78 | def __str__(self): 79 | """Return the expression in a human readable string. 80 | """ 81 | string = "" 82 | stack = [] 83 | for node in self: 84 | stack.append((node, [])) 85 | while len(stack[-1][1]) == stack[-1][0].arity: 86 | prim, args = stack.pop() 87 | string = prim.format(*args) 88 | if len(stack) == 0: 89 | break # If stack is empty, all nodes should have been seen 90 | stack[-1][1].append(string) 91 | 92 | return string 93 | 94 | @classmethod 95 | def from_string(cls, string, pset): 96 | """Try to convert a string expression into a PrimitiveTree given a 97 | PrimitiveSet *pset*. The primitive set needs to contain every primitive 98 | present in the expression. 99 | 100 | :param string: String representation of a Python expression. 101 | :param pset: Primitive set from which primitives are selected. 102 | :returns: PrimitiveTree populated with the deserialized primitives. 103 | """ 104 | tokens = re.split("[ \t\n\r\f\v(),]", string) 105 | expr = [] 106 | ret_types = deque() 107 | for token in tokens: 108 | if token == '': 109 | continue 110 | if len(ret_types) != 0: 111 | type_ = ret_types.popleft() 112 | else: 113 | type_ = None 114 | 115 | if token in pset.mapping: 116 | primitive = pset.mapping[token] 117 | 118 | if type_ is not None and not issubclass(primitive.ret, type_): 119 | raise TypeError("Primitive {} return type {} does not " 120 | "match the expected one: {}." 121 | .format(primitive, primitive.ret, type_)) 122 | 123 | expr.append(primitive) 124 | if isinstance(primitive, Primitive): 125 | ret_types.extendleft(reversed(primitive.args)) 126 | else: 127 | try: 128 | token = eval(token) 129 | except NameError: 130 | raise TypeError("Unable to evaluate terminal: {}.".format(token)) 131 | 132 | if type_ is None: 133 | type_ = type(token) 134 | 135 | if not issubclass(type(token), type_): 136 | raise TypeError("Terminal {} type {} does not " 137 | "match the expected one: {}." 138 | .format(token, type(token), type_)) 139 | 140 | expr.append(Terminal(token, False, type_)) 141 | return cls(expr) 142 | 143 | @property 144 | def height(self): 145 | """Return the height of the tree, or the depth of the 146 | deepest node. 147 | """ 148 | stack = [0] 149 | max_depth = 0 150 | for elem in self: 151 | depth = stack.pop() 152 | max_depth = max(max_depth, depth) 153 | stack.extend([depth + 1] * elem.arity) 154 | return max_depth 155 | 156 | @property 157 | def root(self): 158 | """Root of the tree, the element 0 of the list. 159 | """ 160 | return self[0] 161 | 162 | def searchSubtree(self, begin): 163 | """Return a slice object that corresponds to the 164 | range of values that defines the subtree which has the 165 | element with index *begin* as its root. 166 | """ 167 | end = begin + 1 168 | total = self[begin].arity 169 | while total > 0: 170 | total += self[end].arity - 1 171 | end += 1 172 | return slice(begin, end) 173 | 174 | 175 | class Primitive(object): 176 | """Class that encapsulates a primitive and when called with arguments it 177 | returns the Python code to call the primitive with the arguments. 178 | 179 | # >>> pr = Primitive("mul", (int, int), int) 180 | # >>> pr.format(1, 2) 181 | 'mul(1, 2)' 182 | """ 183 | __slots__ = ('name', 'arity', 'args', 'ret', 'seq') 184 | 185 | def __init__(self, name, args, ret): 186 | self.name = name 187 | self.arity = len(args) 188 | self.args = args 189 | self.ret = ret 190 | args = ", ".join(map("{{{0}}}".format, list(range(self.arity)))) 191 | self.seq = "{name}({args})".format(name=self.name, args=args) 192 | 193 | def format(self, *args): 194 | return self.seq.format(*args) 195 | 196 | def __eq__(self, other): 197 | if type(self) is type(other): 198 | return all(getattr(self, slot) == getattr(other, slot) 199 | for slot in self.__slots__) 200 | else: 201 | return NotImplemented 202 | 203 | 204 | class Terminal(object): 205 | """Class that encapsulates terminal primitive in expression. Terminals can 206 | be values or 0-arity functions. 207 | """ 208 | __slots__ = ('name', 'value', 'ret', 'conv_fct') 209 | 210 | def __init__(self, terminal, symbolic, ret): 211 | self.ret = ret 212 | self.value = terminal 213 | self.name = str(terminal) 214 | self.conv_fct = str if symbolic else repr 215 | 216 | @property 217 | def arity(self): 218 | return 0 219 | 220 | def format(self): 221 | return self.conv_fct(self.value) 222 | 223 | def __eq__(self, other): 224 | if type(self) is type(other): 225 | return all(getattr(self, slot) == getattr(other, slot) 226 | for slot in self.__slots__) 227 | else: 228 | return NotImplemented 229 | 230 | 231 | class Ephemeral(Terminal): 232 | """Class that encapsulates a terminal which value is set when the 233 | object is created. To mutate the value, a new object has to be 234 | generated. This is an abstract base class. When subclassing, a 235 | staticmethod 'func' must be defined. 236 | """ 237 | def __init__(self): 238 | Terminal.__init__(self, self.func(), symbolic=False, ret=self.ret) 239 | 240 | @staticmethod 241 | def func(): 242 | """Return a random value used to define the ephemeral state. 243 | """ 244 | raise NotImplementedError 245 | 246 | 247 | class PrimitiveSetTyped(object): 248 | """Class that contains the primitives that can be used to solve a 249 | Strongly Typed GP problem. The set also defined the researched 250 | function return type, and input arguments type and number. 251 | """ 252 | def __init__(self, name, in_types, ret_type, prefix="ARG"): 253 | self.terminals = defaultdict(list) 254 | self.primitives = defaultdict(list) 255 | self.arguments = [] 256 | # setting "__builtins__" to None avoid the context 257 | # being polluted by builtins function when evaluating 258 | # GP expression. 259 | self.context = {"__builtins__": None} 260 | self.mapping = dict() 261 | self.terms_count = 0 262 | self.prims_count = 0 263 | 264 | self.name = name 265 | self.ret = ret_type 266 | self.ins = in_types 267 | for i, type_ in enumerate(in_types): 268 | arg_str = "{prefix}{index}".format(prefix=prefix, index=i) 269 | self.arguments.append(arg_str) 270 | term = Terminal(arg_str, True, type_) 271 | self._add(term) 272 | self.terms_count += 1 273 | 274 | def renameArguments(self, **kargs): 275 | """Rename function arguments with new names from *kargs*. 276 | """ 277 | for i, old_name in enumerate(self.arguments): 278 | if old_name in kargs: 279 | new_name = kargs[old_name] 280 | self.arguments[i] = new_name 281 | self.mapping[new_name] = self.mapping[old_name] 282 | self.mapping[new_name].value = new_name 283 | del self.mapping[old_name] 284 | 285 | def _add(self, prim): 286 | def addType(dict_, ret_type): 287 | if ret_type not in dict_: 288 | new_list = [] 289 | for type_, list_ in list(dict_.items()): 290 | if issubclass(type_, ret_type): 291 | for item in list_: 292 | if item not in new_list: 293 | new_list.append(item) 294 | dict_[ret_type] = new_list 295 | 296 | addType(self.primitives, prim.ret) 297 | addType(self.terminals, prim.ret) 298 | 299 | self.mapping[prim.name] = prim 300 | if isinstance(prim, Primitive): 301 | for type_ in prim.args: 302 | addType(self.primitives, type_) 303 | addType(self.terminals, type_) 304 | dict_ = self.primitives 305 | else: 306 | dict_ = self.terminals 307 | 308 | for type_ in dict_: 309 | if issubclass(prim.ret, type_): 310 | dict_[type_].append(prim) 311 | 312 | def addPrimitive(self, primitive, in_types, ret_type, name=None): 313 | """Add a primitive to the set. 314 | 315 | :param primitive: callable object or a function. 316 | :parma in_types: list of primitives arguments' type 317 | :param ret_type: type returned by the primitive. 318 | :param name: alternative name for the primitive instead 319 | of its __name__ attribute. 320 | """ 321 | if name is None: 322 | name = primitive.__name__ 323 | prim = Primitive(name, in_types, ret_type) 324 | 325 | assert name not in self.context or \ 326 | self.context[name] is primitive, \ 327 | "Primitives are required to have a unique name. " \ 328 | "Consider using the argument 'name' to rename your "\ 329 | "second '%s' primitive." % (name,) 330 | 331 | self._add(prim) 332 | self.context[prim.name] = primitive 333 | self.prims_count += 1 334 | 335 | def addTerminal(self, terminal, ret_type, name=None): 336 | """Add a terminal to the set. Terminals can be named 337 | using the optional *name* argument. This should be 338 | used : to define named constant (i.e.: pi); to speed the 339 | evaluation time when the object is long to build; when 340 | the object does not have a __repr__ functions that returns 341 | the code to build the object; when the object class is 342 | not a Python built-in. 343 | 344 | :param terminal: Object, or a function with no arguments. 345 | :param ret_type: Type of the terminal. 346 | :param name: defines the name of the terminal in the expression. 347 | """ 348 | symbolic = False 349 | if name is None and callable(terminal): 350 | name = terminal.__name__ 351 | 352 | assert name not in self.context, \ 353 | "Terminals are required to have a unique name. " \ 354 | "Consider using the argument 'name' to rename your "\ 355 | "second %s terminal." % (name,) 356 | 357 | if name is not None: 358 | self.context[name] = terminal 359 | terminal = name 360 | symbolic = True 361 | elif terminal in (True, False): 362 | # To support True and False terminals with Python 2. 363 | self.context[str(terminal)] = terminal 364 | 365 | prim = Terminal(terminal, symbolic, ret_type) 366 | self._add(prim) 367 | self.terms_count += 1 368 | 369 | def addEphemeralConstant(self, name, ephemeral, ret_type): 370 | """Add an ephemeral constant to the set. An ephemeral constant 371 | is a no argument function that returns a random value. The value 372 | of the constant is constant for a Tree, but may differ from one 373 | Tree to another. 374 | 375 | :param name: name used to refers to this ephemeral type. 376 | :param ephemeral: function with no arguments returning a random value. 377 | :param ret_type: type of the object returned by *ephemeral*. 378 | """ 379 | module_gp = globals() 380 | if name not in module_gp: 381 | class_ = type(name, (Ephemeral,), {'func': staticmethod(ephemeral), 382 | 'ret': ret_type}) 383 | module_gp[name] = class_ 384 | else: 385 | class_ = module_gp[name] 386 | if issubclass(class_, Ephemeral): 387 | #if class_.func is not ephemeral: 388 | # raise Exception("Ephemerals with different functions should " 389 | # "be named differently, even between psets.") 390 | if class_.ret is not ret_type: 391 | raise Exception("Ephemerals with the same name and function " 392 | "should have the same type, even between psets.") 393 | else: 394 | raise Exception("Ephemerals should be named differently " 395 | "than classes defined in the gp module.") 396 | 397 | self._add(class_) 398 | self.terms_count += 1 399 | 400 | def addADF(self, adfset): 401 | """Add an Automatically Defined Function (ADF) to the set. 402 | 403 | :param adfset: PrimitiveSetTyped containing the primitives with which 404 | the ADF can be built. 405 | """ 406 | prim = Primitive(adfset.name, adfset.ins, adfset.ret) 407 | self._add(prim) 408 | self.prims_count += 1 409 | 410 | @property 411 | def terminalRatio(self): 412 | """Return the ratio of the number of terminals on the number of all 413 | kind of primitives. 414 | """ 415 | return self.terms_count / float(self.terms_count + self.prims_count) 416 | 417 | 418 | class PrimitiveSet(PrimitiveSetTyped): 419 | """Class same as :class:`~deap.gp.PrimitiveSetTyped`, except there is no 420 | definition of type. 421 | """ 422 | def __init__(self, name, arity, prefix="ARG"): 423 | args = [__type__] * arity 424 | PrimitiveSetTyped.__init__(self, name, args, __type__, prefix) 425 | 426 | def addPrimitive(self, primitive, arity, name=None): 427 | """Add primitive *primitive* with arity *arity* to the set. 428 | If a name *name* is provided, it will replace the attribute __name__ 429 | attribute to represent/identify the primitive. 430 | """ 431 | assert arity > 0, "arity should be >= 1" 432 | args = [__type__] * arity 433 | PrimitiveSetTyped.addPrimitive(self, primitive, args, __type__, name) 434 | 435 | def addTerminal(self, terminal, name=None): 436 | """Add a terminal to the set.""" 437 | PrimitiveSetTyped.addTerminal(self, terminal, __type__, name) 438 | 439 | def addEphemeralConstant(self, name, ephemeral): 440 | """Add an ephemeral constant to the set.""" 441 | PrimitiveSetTyped.addEphemeralConstant(self, name, ephemeral, __type__) 442 | 443 | 444 | if __name__ == "__main__": 445 | import doctest 446 | doctest.testmod() -------------------------------------------------------------------------------- /FELGP/FELGP_main.py: -------------------------------------------------------------------------------- 1 | #python packages 2 | import operator 3 | import random 4 | import time 5 | import gp_restrict as gp_restrict 6 | import algo_iegp as evalGP 7 | import numpy as np 8 | from deap import base, creator, tools, gp 9 | import felgp_functions as felgp_fs 10 | from strongGPDataType import Int1, Int2, Int3, Int4, Int5, Int6 11 | from strongGPDataType import Float1, Float2, Float3 12 | from strongGPDataType import Array1, Array2, Array3, Array4, Array5, Array6 13 | # defined by author 14 | import saveFile 15 | import sys 16 | 17 | randomSeeds = 12 18 | dataSetName = 'f1' 19 | 20 | 21 | x_train = np.load('../'+dataSetName+'_train_data.npy')/255.0 22 | y_train = np.load('../'+dataSetName+'_train_label.npy') 23 | x_test = np.load('../'+dataSetName+'_test_data.npy')/255.0 24 | y_test = np.load('../'+dataSetName+'_test_label.npy') 25 | 26 | print(x_train.shape,y_train.shape, x_test.shape,y_test.shape) 27 | print(x_train.max()) 28 | #parameters: 29 | num_train = x_train.shape[0] 30 | pop_size=100 31 | generation=50 32 | cxProb=0.8 33 | mutProb=0.19 34 | elitismProb=0.01 35 | totalRuns = 1 36 | initialMinDepth=2 37 | initialMaxDepth=8 38 | maxDepth=8 39 | 40 | ##GP 41 | pset = gp.PrimitiveSetTyped('MAIN', [Array1, Array2], Array6, prefix = 'Image') 42 | # Combination, use 'Combine' for increasing the depth of the GP tree 43 | pset.addPrimitive(felgp_fs.combine, [Array6, Array6, Array6], Array6, name='Combine') 44 | pset.addPrimitive(felgp_fs.combine, [Array5, Array5, Array5], Array6, name='Combine3') 45 | pset.addPrimitive(felgp_fs.combine, [Array5, Array5, Array5, Array5, Array5], Array6, name='Combine5') 46 | pset.addPrimitive(felgp_fs.combine, [Array5, Array5, Array5, Array5, Array5, Array5, Array5], Array6, name='Combine7') 47 | #Classification 48 | pset.addPrimitive(felgp_fs.linear_svm, [Array4, Array2, Int4], Array5, name='SVM') 49 | pset.addPrimitive(felgp_fs.lr, [Array4, Array2, Int4], Array5, name='LR') 50 | pset.addPrimitive(felgp_fs.randomforest, [Array4, Array2, Int5, Int6], Array5, name='RF') 51 | pset.addPrimitive(felgp_fs.erandomforest, [Array4, Array2, Int5, Int6], Array5, name='ERF') 52 | ###Feature Concatenation 53 | pset.addPrimitive(felgp_fs.FeaCon2, [Array4, Array4], Array4, name ='FeaCon') 54 | pset.addPrimitive(felgp_fs.FeaCon2, [Array3, Array3], Array4, name ='FeaCon2') 55 | pset.addPrimitive(felgp_fs.FeaCon3, [Array3, Array3, Array3], Array4, name ='FeaCon3') 56 | pset.addPrimitive(felgp_fs.FeaCon4, [Array3, Array3, Array3, Array3], Array4, name ='FeaCon4') 57 | #Feature Extraction 58 | pset.addPrimitive(felgp_fs.global_hog_small, [Array1], Array3, name = 'F_HOG') 59 | pset.addPrimitive(felgp_fs.all_lbp, [Array1], Array3, name = 'F_uLBP') 60 | pset.addPrimitive(felgp_fs.all_sift, [Array1], Array3, name = 'F_SIFT') 61 | ##Filtering and Pooling 62 | pset.addPrimitive(felgp_fs.maxP, [Array1, Int3, Int3], Array1,name='MaxP') 63 | pset.addPrimitive(felgp_fs.gau, [Array1, Int1], Array1, name='Gau') 64 | pset.addPrimitive(felgp_fs.gauD, [Array1, Int1, Int2, Int2], Array1, name='GauD') 65 | pset.addPrimitive(felgp_fs.gab, [Array1, Float1, Float2], Array1, name='Gabor') 66 | pset.addPrimitive(felgp_fs.laplace, [Array1], Array1, name='Lap') 67 | pset.addPrimitive(felgp_fs.gaussian_Laplace1, [Array1], Array1, name='LoG1') 68 | pset.addPrimitive(felgp_fs.gaussian_Laplace2, [Array1], Array1, name='LoG2') 69 | pset.addPrimitive(felgp_fs.sobelxy, [Array1], Array1, name='Sobel') 70 | pset.addPrimitive(felgp_fs.sobelx, [Array1], Array1, name='SobelX') 71 | pset.addPrimitive(felgp_fs.sobely, [Array1], Array1, name='SobelY') 72 | pset.addPrimitive(felgp_fs.lbp, [Array1], Array1, name='LBP') 73 | pset.addPrimitive(felgp_fs.hog_feature, [Array1], Array1, name='HoG') 74 | pset.addPrimitive(felgp_fs.medianf, [Array1], Array1,name='Med') 75 | pset.addPrimitive(felgp_fs.maxf, [Array1], Array1,name='Max') 76 | pset.addPrimitive(felgp_fs.minf, [Array1], Array1,name='Min') 77 | pset.addPrimitive(felgp_fs.meanf, [Array1], Array1,name='Mean') 78 | pset.addPrimitive(felgp_fs.sqrt, [Array1], Array1, name='Sqrt') 79 | pset.addPrimitive(felgp_fs.mixconadd, [Array1, Float3, Array1, Float3], Array1, name='W_Add') 80 | pset.addPrimitive(felgp_fs.mixconsub, [Array1, Float3, Array1, Float3], Array1, name='W_Sub') 81 | pset.addPrimitive(felgp_fs.relu, [Array1], Array1, name='Relu') 82 | #Terminals 83 | pset.renameArguments(ARG0='grey') 84 | pset.addEphemeralConstant('Singma', lambda: random.randint(1, 4), Int1) 85 | pset.addEphemeralConstant('Order', lambda: random.randint(0, 3), Int2) 86 | pset.addEphemeralConstant('Theta', lambda: random.randint(0, 8), Float1) 87 | pset.addEphemeralConstant('Frequency', lambda: random.randint(0, 5), Float2) 88 | pset.addEphemeralConstant('n', lambda: round(random.random(), 3), Float3) 89 | pset.addEphemeralConstant('KernelSize', lambda: random.randrange(2, 5, 2), Int3) 90 | pset.addEphemeralConstant('C', lambda: random.randint(-2, 5), Int4) 91 | pset.addEphemeralConstant('num_Tree', lambda: random.randrange(50, 501, 10), Int5) 92 | pset.addEphemeralConstant('tree_Depth', lambda: random.randrange(10, 101, 10), Int6) 93 | ## 94 | creator.create("FitnessMax", base.Fitness, weights=(1.0,)) 95 | creator.create("Individual", gp.PrimitiveTree, fitness=creator.FitnessMax) 96 | 97 | toolbox = base.Toolbox() 98 | toolbox.register("expr", gp_restrict.genHalfAndHalfMD, pset=pset, min_=initialMinDepth, max_=initialMaxDepth) 99 | toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.expr) 100 | toolbox.register("population", tools.initRepeat, list, toolbox.individual) 101 | toolbox.register("compile", gp.compile, pset=pset) 102 | toolbox.register("mapp", map) 103 | 104 | 105 | def evalTrain(toolbox, individual, hof, trainData, trainLabel): 106 | if len(hof) != 0 and individual in hof: 107 | ind = 0 108 | while ind < len(hof): 109 | if individual == hof[ind]: 110 | accuracy, = hof[ind].fitness.values 111 | ind = len(hof) 112 | else: ind+=1 113 | else: 114 | try: 115 | func = toolbox.compile(expr=individual) 116 | output = np.asarray(func(trainData, trainLabel)) 117 | y_predict = np.argmax(output, axis=1) 118 | accuracy = 100*np.sum(y_predict == trainLabel) / len(trainLabel) 119 | except: 120 | accuracy=0 121 | return accuracy, 122 | 123 | 124 | toolbox.register("evaluate", evalTrain,toolbox, trainData=x_train,trainLabel=y_train) 125 | toolbox.register("select", tools.selTournament,tournsize=7) 126 | toolbox.register("selectElitism", tools.selBest) 127 | toolbox.register("mate", gp.cxOnePoint) 128 | toolbox.register("expr_mut", gp_restrict.genFull, min_=0, max_=2) 129 | toolbox.register("mutate_eph", gp.mutEphemeral, mode='all') 130 | toolbox.register("mutate", gp.mutUniform, expr=toolbox.expr_mut, pset=pset) 131 | toolbox.decorate("mate", gp.staticLimit(key=operator.attrgetter("height"), max_value=maxDepth)) 132 | toolbox.decorate("mutate", gp.staticLimit(key=operator.attrgetter("height"), max_value=maxDepth)) 133 | 134 | def GPMain(randomSeeds): 135 | 136 | random.seed(randomSeeds) 137 | 138 | pop = toolbox.population(pop_size) 139 | hof = tools.HallOfFame(10) 140 | log = tools.Logbook() 141 | stats_fit = tools.Statistics(key=lambda ind: ind.fitness.values) 142 | stats_size_tree = tools.Statistics(key=len) 143 | mstats = tools.MultiStatistics(fitness=stats_fit, size_tree=stats_size_tree) 144 | mstats.register("avg", np.mean) 145 | mstats.register("std", np.std) 146 | mstats.register("min", np.min) 147 | mstats.register("max", np.max) 148 | log.header = ["gen", "evals"] + mstats.fields 149 | 150 | pop, log = evalGP.eaSimple(pop, toolbox, cxProb, mutProb, elitismProb, generation,randomSeeds, 151 | stats=mstats, halloffame=hof, verbose=True) 152 | 153 | return pop,log, hof 154 | 155 | def evalTest(toolbox, individual, trainData, trainLabel, test, testL): 156 | x_train = np.concatenate((trainData, test), axis=0) 157 | func = toolbox.compile(expr=individual) 158 | output = np.asarray(func(x_train, trainLabel)) 159 | print(output.shape) 160 | y_predict = np.argmax(output, axis=1) 161 | accuracy = 100*np.sum(y_predict==testL)/len(testL) 162 | return accuracy 163 | 164 | if __name__ == "__main__": 165 | beginTime = time.process_time() 166 | pop, log, hof = GPMain(randomSeeds) 167 | endTime = time.process_time() 168 | trainTime = endTime - beginTime 169 | 170 | testResults = evalTest(toolbox, hof[0], x_train, y_train,x_test, y_test) 171 | saveFile.saveAllResults(randomSeeds, dataSetName, hof, trainTime, testResults, log) 172 | 173 | testTime = time.process_time() - endTime 174 | print('testResults ', testResults) 175 | 176 | -------------------------------------------------------------------------------- /FELGP/algo_iegp.py: -------------------------------------------------------------------------------- 1 | import random 2 | from deap import tools 3 | from collections import defaultdict 4 | 5 | 6 | def pop_compare(ind1, ind2): 7 | # List all available primitive types in each individual 8 | types1 = defaultdict(list) 9 | types2 = defaultdict(list) 10 | for idx, node in enumerate(ind1[1:],1): 11 | types1[node.ret].append(idx) 12 | for idx, node in enumerate(ind2[1:],1): 13 | types2[node.ret].append(idx) 14 | return types1==types2 15 | 16 | def varAnd(population, toolbox, cxpb, mutpb): 17 | """Part of an evolutionary algorithm applying only the variation part 18 | (crossover **and** mutation). The modified individuals have their 19 | fitness invalidated. The individuals are cloned so returned population is 20 | independent of the input population. 21 | 22 | :param population: A list of individuals to vary. 23 | :param toolbox: A :class:`~deap.base.Toolbox` that contains the evolution 24 | operators. 25 | :param cxpb: The probability of mating two individuals. 26 | :param mutpb: The probability of mutating an individual. 27 | :param elitpb: The probability of mutating an individual. 28 | :returns: A list of varied individuals that are independent of their 29 | parents. 30 | 31 | The variation goes as follow. First, the parental population 32 | :math:`P_\mathrm{p}` is duplicated using the :meth:`toolbox.clone` method 33 | and the result is put into the offspring population :math:`P_\mathrm{o}`. A 34 | first loop over :math:`P_\mathrm{o}` is executed to mate pairs of 35 | consecutive individuals. According to the crossover probability *cxpb*, the 36 | individuals :math:`\mathbf{x}_i` and :math:`\mathbf{x}_{i+1}` are mated 37 | using the :meth:`toolbox.mate` method. The resulting children 38 | :math:`\mathbf{y}_i` and :math:`\mathbf{y}_{i+1}` replace their respective 39 | parents in :math:`P_\mathrm{o}`. A second loop over the resulting 40 | :math:`P_\mathrm{o}` is executed to mutate every individual with a 41 | probability *mutpb*. When an individual is mutated it replaces its not 42 | mutated version in :math:`P_\mathrm{o}`. The resulting :math:`P_\mathrm{o}` 43 | is returned. 44 | 45 | This variation is named *And* beceause of its propention to apply both 46 | crossover and mutation on the individuals. Note that both operators are 47 | not applied systematicaly, the resulting individuals can be generated from 48 | crossover only, mutation only, crossover and mutation, and reproduction 49 | according to the given probabilities. Both probabilities should be in 50 | :math:`[0, 1]`. 51 | """ 52 | offspring = [toolbox.clone(ind) for ind in population] 53 | new_cxpb=cxpb/(cxpb+mutpb) 54 | 55 | #num_cx=int(new_cxpb*len(offspring)) 56 | #num_mu=len(offspring)-num_cx 57 | #print(new_cxpb, new_mutpb) 58 | # Apply crossover and mutation on the offspring 59 | i = 1 60 | while i < len(offspring): 61 | if random.random() < new_cxpb: 62 | if (offspring[i - 1] == offspring[i]) or pop_compare(offspring[i - 1], offspring[i]): 63 | offspring[i - 1], = toolbox.mutate(offspring[i - 1]) 64 | offspring[i], = toolbox.mutate(offspring[i]) 65 | else: 66 | offspring[i - 1], offspring[i] = toolbox.mate(offspring[i - 1], offspring[i]) 67 | del offspring[i - 1].fitness.values, offspring[i].fitness.values 68 | i = i + 2 69 | else: 70 | offspring[i], = toolbox.mutate(offspring[i]) 71 | del offspring[i].fitness.values 72 | i = i + 1 73 | return offspring 74 | 75 | 76 | def eaSimple(population, toolbox, cxpb, mutpb, elitpb, ngen, randomseed, stats=None, 77 | halloffame=None, verbose=__debug__): 78 | """This algorithm reproduce the simplest evolutionary algorithm as 79 | presented in chapter 7 of [Back2000]_. 80 | 81 | :param population: A list of individuals. 82 | :param toolbox: A :class:`~deap.base.Toolbox` that contains the evolution 83 | operators. 84 | :param cxpb: The probability of mating two individuals. 85 | :param mutpb: The probability of mutating an individual. 86 | :param etilpb: The probability of elitism 87 | :param ngen: The number of generation. 88 | :param stats: A :class:`~deap.tools.Statistics` object that is updated 89 | inplace, optional. 90 | :param halloffame: A :class:`~deap.tools.HallOfFame` object that will 91 | contain the best individuals, optional. 92 | :param verbose: Whether or not to log the statistics. 93 | :returns: The final population 94 | :returns: A class:`~deap.tools.Logbook` with the statistics of the 95 | evolution 96 | 97 | The algorithm takes in a population and evolves it in place using the 98 | :meth:`varAnd` method. It returns the optimized population and a 99 | :class:`~deap.tools.Logbook` with the statistics of the evolution. The 100 | logbook will contain the generation number, the number of evalutions for 101 | each generation and the statistics if a :class:`~deap.tools.Statistics` is 102 | given as argument. The *cxpb* and *mutpb* arguments are passed to the 103 | :func:`varAnd` function. The pseudocode goes as follow :: 104 | 105 | evaluate(population) 106 | for g in range(ngen): 107 | elitismNum 108 | offspringE=selectElitism(population,elitismNum) 109 | population = select(population, len(population)-elitismNum) 110 | offspring = varAnd(population, toolbox, cxpb, mutpb) 111 | offspring=offspring+offspringE 112 | evaluate(offspring) 113 | population = offspring. 114 | 115 | This function expects the :meth:`toolbox.mate`, :meth:`toolbox.mutate`, 116 | :meth:`toolbox.select` and :meth:`toolbox.evaluate` and :meth::`toolbox.selectElitism`, 117 | aliases to be 118 | registered in the toolbox. 119 | 120 | .. [Back2000] Back, Fogel and Michalewicz, "Evolutionary Computation 1 : 121 | Basic Algorithms and Operators", 2000. 122 | """ 123 | logbook = tools.Logbook() 124 | logbook.header = ['gen', 'nevals'] + (stats.fields if stats else []) 125 | # Evaluate the individuals with an invalid fitness 126 | # invalid_ind = [ind for ind in population if not ind.fitness.valid] 127 | # print(len(invalid_ind)) 128 | 129 | for i in population: 130 | i.fitness.values = toolbox.evaluate(individual=i, hof=[]) 131 | 132 | if halloffame is not None: 133 | halloffame.update(population) 134 | hof_store = tools.HallOfFame(5 * len(population)) 135 | hof_store.update(population) 136 | cop_po = population 137 | record = stats.compile(population) if stats else {} 138 | logbook.record(gen=0, nevals=len(population), **record) 139 | if verbose: 140 | print(logbook.stream) 141 | 142 | for gen in range(1, ngen + 1): 143 | 144 | # Select the next generation individuals by elitism 145 | elitismNum = int(elitpb * len(population)) 146 | population_for_eli = [toolbox.clone(ind) for ind in population] 147 | offspringE = toolbox.selectElitism(population_for_eli, k=elitismNum) 148 | 149 | # Select the next generation individuals for crossover and mutation 150 | offspring = toolbox.select(population, len(population) - elitismNum) 151 | # Vary the pool of individuals 152 | offspring = varAnd(offspring, toolbox, cxpb, mutpb) 153 | # add offspring from elitism into current offspring 154 | # generate the next generation individuals 155 | 156 | # Evaluate the individuals with an invalid fitness 157 | invalid_ind = [ind for ind in offspring if not ind.fitness.valid] 158 | # print(len(invalid_ind)) 159 | for i in invalid_ind: 160 | i.fitness.values = toolbox.evaluate(individual=i, hof=cop_po) 161 | 162 | offspring[0:0] = offspringE 163 | 164 | # Update the hall of fame with the generated 165 | if halloffame is not None: 166 | halloffame.update(offspring) 167 | cop_po = offspring.copy() 168 | hof_store.update(offspring) 169 | for i in hof_store: 170 | cop_po.append(i) 171 | population[:] = offspring 172 | # Append the current generation statistics to the logbook 173 | record = stats.compile(population) if stats else {} 174 | # print(record) 175 | logbook.record(gen=gen, nevals=len(offspring), **record) 176 | # print(record) 177 | if verbose: 178 | print(logbook.stream) 179 | return population, logbook 180 | 181 | -------------------------------------------------------------------------------- /FELGP/f1_properties.txt: -------------------------------------------------------------------------------- 1 | Number of Classes 2 2 | positive 0 3 | negative 1 4 | Instances for training 150 5 | Instances for testing 50 6 | Image Size 60 40 7 | -------------------------------------------------------------------------------- /FELGP/f1_test_data.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YingBi92/BookCode/9a3ee607444f136e7c3a3e466f7bfd3a9b417ea4/FELGP/f1_test_data.npy -------------------------------------------------------------------------------- /FELGP/f1_test_label.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YingBi92/BookCode/9a3ee607444f136e7c3a3e466f7bfd3a9b417ea4/FELGP/f1_test_label.npy -------------------------------------------------------------------------------- /FELGP/f1_train_data.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YingBi92/BookCode/9a3ee607444f136e7c3a3e466f7bfd3a9b417ea4/FELGP/f1_train_data.npy -------------------------------------------------------------------------------- /FELGP/f1_train_label.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YingBi92/BookCode/9a3ee607444f136e7c3a3e466f7bfd3a9b417ea4/FELGP/f1_train_label.npy -------------------------------------------------------------------------------- /FELGP/felgp_functions.py: -------------------------------------------------------------------------------- 1 | import sift_features 2 | import numpy 3 | from pylab import * 4 | from scipy import ndimage 5 | from skimage.filters import gabor 6 | import skimage 7 | from skimage.feature import local_binary_pattern 8 | from skimage.feature import hog 9 | import numpy as np 10 | from sklearn.svm import LinearSVC 11 | from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier 12 | from sklearn import preprocessing 13 | from sklearn.model_selection import StratifiedKFold 14 | from sklearn.linear_model import LogisticRegression 15 | 16 | 17 | def combine(*args): 18 | output = args[0] 19 | for i in range(1, len(args)): 20 | output += args[i] 21 | #print(output.shape) 22 | return output 23 | 24 | def linear_svm(x_train, y_train, cm=0): 25 | #parameters c 26 | c = 10**(cm) 27 | #print(x_train.shape, y_train.shape, num_train, x_train[0:num_train,:].shape, x_train[num_train:-1,:].shape) 28 | classifier = LinearSVC(C=c) 29 | num_train = y_train.shape[0] 30 | if num_train == x_train.shape[0]: 31 | y_labels = svm_train_model(classifier, x_train, y_train) 32 | else: 33 | y_labels = test_function_svm(classifier, x_train[0:num_train,:], y_train, x_train[num_train:x_train.shape[0],:]) 34 | return y_labels 35 | 36 | def lr(x_train, y_train, cm=0): 37 | c = 10**(cm) 38 | #print(x_train.shape, y_train.shape, num_train, x_train[0:num_train,:].shape, x_train[num_train:-1,:].shape) 39 | classifier = LogisticRegression(C=c, solver='sag', multi_class= 'auto', max_iter=1000) 40 | num_train = y_train.shape[0] 41 | if num_train==x_train.shape[0]: 42 | y_labels = svm_train_model(classifier, x_train, y_train) 43 | else: 44 | y_labels = test_function_svm(classifier, x_train[0:num_train,:], y_train, x_train[num_train:x_train.shape[0],:]) 45 | return y_labels 46 | 47 | def randomforest(x_train, y_train, n_tree = 500, max_dep = 100): 48 | #print(x_train.shape, y_train.shape, num_train, x_train[0:num_train,:].shape, x_train[num_train:-1,:].shape) 49 | classifier = RandomForestClassifier(n_estimators=n_tree, max_depth=max_dep) 50 | num_train = y_train.shape[0] 51 | if num_train == x_train.shape[0]: 52 | y_labels = train_model_prob(classifier, x_train, y_train) 53 | else: 54 | y_labels = test_function_prob(classifier, x_train[0:num_train,:], y_train, x_train[num_train:x_train.shape[0],:]) 55 | return y_labels 56 | 57 | def erandomforest(x_train, y_train, n_tree = 500, max_dep = 100): 58 | #print(x_train.shape, y_train.shape, num_train, x_train[0:num_train,:].shape, x_train[num_train:-1,:].shape) 59 | classifier = ExtraTreesClassifier(n_estimators=n_tree, max_depth=max_dep) 60 | num_train = y_train.shape[0] 61 | if num_train == x_train.shape[0]: 62 | y_labels = train_model_prob(classifier, x_train, y_train) 63 | else: 64 | y_labels = test_function_prob(classifier, x_train[0:num_train,:], y_train, x_train[num_train:x_train.shape[0],:]) 65 | return y_labels 66 | 67 | def svm_train_model(model, x, y, k=3): 68 | min_max_scaler = preprocessing.MinMaxScaler() 69 | x = min_max_scaler.fit_transform(np.asarray(x)) 70 | kf = StratifiedKFold(n_splits=k) 71 | ni = np.unique(y) 72 | num_class = ni.shape[0] 73 | y_predict = np.zeros((len(y), num_class)) 74 | for train_index, test_index in kf.split(x,y): 75 | x_train, x_test = x[train_index], x[test_index] 76 | y_train, y_test = y[train_index], y[test_index] 77 | model.fit(x_train,y_train) 78 | y_pred = model.predict(x_test) 79 | y_label = [] 80 | for i in y_pred: 81 | binary_label = np.zeros((num_class)) 82 | binary_label[int(i)] = 1 83 | y_label.append(binary_label) 84 | y_predict[test_index,:] = np.asarray(y_label) 85 | return y_predict 86 | 87 | def test_function_svm(model, x_train, y_train, x_test): 88 | min_max_scaler = preprocessing.MinMaxScaler() 89 | x_train = min_max_scaler.fit_transform(np.asarray(x_train)) 90 | x_test = min_max_scaler.transform(np.asarray(x_test)) 91 | model.fit(x_train, y_train) 92 | y_pred = model.predict(x_test) 93 | y_label = [] 94 | ni = np.unique(y_train) 95 | num_class = ni.shape[0] 96 | for i in y_pred: 97 | binary_label = np.zeros((num_class)) 98 | binary_label[int(i)] = 1 99 | y_label.append(binary_label) 100 | y_predict = np.asarray(y_label) 101 | return y_predict 102 | 103 | def train_model_prob(model, x, y, k=3): 104 | ## min_max_scaler = preprocessing.MinMaxScaler() 105 | ## x = min_max_scaler.fit_transform(np.asarray(x)) 106 | kf = StratifiedKFold(n_splits=k) 107 | ni = np.unique(y) 108 | num_class = ni.shape[0] 109 | y_predict = np.zeros((len(y), num_class)) 110 | for train_index, test_index in kf.split(x, y): 111 | x_train, x_test = x[train_index], x[test_index] 112 | y_train, y_test = y[train_index], y[test_index] 113 | model.fit(x_train,y_train) 114 | y_predict[test_index,:] = model.predict_proba(x_test) 115 | return y_predict 116 | 117 | def test_function_prob(model, x_train, y_train, x_test): 118 | ##min_max_scaler = preprocessing.MinMaxScaler() 119 | #x_train = min_max_scaler.fit_transform(np.asarray(x_train)) 120 | #x_test = min_max_scaler.transform(np.asarray(x_test)) 121 | model.fit(x_train, y_train) 122 | y_pred = model.predict_proba(x_test) 123 | return y_pred 124 | 125 | def conVector(img): 126 | try: 127 | img_vector=numpy.concatenate((img)) 128 | except: 129 | img_vector=img 130 | return img_vector 131 | 132 | def FeaCon2(img1, img2): 133 | x_features = [] 134 | for i in range(img1.shape[0]): 135 | image1 = conVector(img1[i, :]) 136 | image2 = conVector(img2[i, :]) 137 | feature_vector = numpy.concatenate((image1, image2), axis=0) 138 | x_features.append(feature_vector) 139 | return numpy.asarray(x_features) 140 | 141 | def FeaCon3(img1, img2, img3): 142 | x_features = [] 143 | for i in range(img1.shape[0]): 144 | image1 = conVector(img1[i, :]) 145 | image2 = conVector(img2[i, :]) 146 | image3 = conVector(img3[i, :]) 147 | feature_vector = numpy.concatenate((image1, image2, image3), axis=0) 148 | x_features.append(feature_vector) 149 | return numpy.asarray(x_features) 150 | 151 | 152 | def FeaCon4(img1, img2, img3, img4): 153 | x_features = [] 154 | for i in range(img1.shape[0]): 155 | image1 = conVector(img1[i, :]) 156 | image2 = conVector(img2[i, :]) 157 | image3 = conVector(img3[i, :]) 158 | image4 = conVector(img4[i, :]) 159 | feature_vector = numpy.concatenate((image1, image2, image3, image4), axis=0) 160 | x_features.append(feature_vector) 161 | return numpy.asarray(x_features) 162 | 163 | def histLBP(image,radius,n_points): 164 | # 'uniform','default','ror','var' 165 | lbp = local_binary_pattern(image, n_points, radius, method='nri_uniform') 166 | n_bins = 59 167 | hist,ax=numpy.histogram(lbp,n_bins,[0,59]) 168 | return hist 169 | 170 | def all_lbp(image): 171 | # global and local 172 | feature = [] 173 | for i in range(image.shape[0]): 174 | feature_vector = histLBP(image[i,:,:], radius=1.5, n_points=8) 175 | feature.append(feature_vector) 176 | return numpy.asarray(feature) 177 | 178 | # 179 | def HoGFeatures(image): 180 | try: 181 | img,realImage=hog(image,orientations=9, pixels_per_cell=(8, 8), 182 | cells_per_block=(3, 3), block_norm='L2-Hys', visualize=True, 183 | transform_sqrt=False, feature_vector=True) 184 | return realImage 185 | except: 186 | return image 187 | 188 | 189 | def hog_features_patches(image,patch_size,moving_size): 190 | img=numpy.asarray(image) 191 | width, height = img.shape 192 | w = int(width / moving_size) 193 | h = int(height / moving_size) 194 | patch = [] 195 | for i in range(0, w): 196 | for j in range(0, h): 197 | patch.append([moving_size * i, moving_size * j]) 198 | hog_features = numpy.zeros((len(patch))) 199 | realImage=HoGFeatures(img) 200 | for i in range(len(patch)): 201 | hog_features[i] = numpy.mean( 202 | realImage[patch[i][0]:(patch[i][0] + patch_size), patch[i][1]:(patch[i][1] + patch_size)]) 203 | return hog_features 204 | 205 | def global_hog_small(image): 206 | feature = [] 207 | for i in range(image.shape[0]): 208 | feature_vector = hog_features_patches(image[i,:,:], 4, 4) 209 | feature.append(feature_vector) 210 | return numpy.asarray(feature) 211 | 212 | def all_sift(image): 213 | width, height = image[0, :, :].shape 214 | min_length = numpy.min((width, height)) 215 | feature = [] 216 | for i in range(image.shape[0]): 217 | img = numpy.asarray(image[i, 0:width, 0:height]) 218 | extractor = sift_features.SingleSiftExtractor(min_length) 219 | feaArrSingle = extractor.process_image(img[0:min_length, 0:min_length]) 220 | # dimension 128 for all images 221 | w, h = feaArrSingle.shape 222 | feature_vector = numpy.reshape(feaArrSingle, (h,)) 223 | feature.append(feature_vector) 224 | return numpy.asarray(feature) 225 | 226 | 227 | def gau(left, si): 228 | img = [] 229 | for i in range(left.shape[0]): 230 | img.append(ndimage.gaussian_filter(left[i, :, :], sigma=si)) 231 | return np.asarray(img) 232 | 233 | def gauD(left, si, or1, or2): 234 | img = [] 235 | for i in range(left.shape[0]): 236 | img.append(ndimage.gaussian_filter(left[i,:,:],sigma=si, order=[or1,or2])) 237 | return np.asarray(img) 238 | 239 | def gab(left,the,fre): 240 | fmax=numpy.pi/2 241 | a=numpy.sqrt(2) 242 | freq=fmax/(a**fre) 243 | thea=numpy.pi*the/8 244 | img = [] 245 | for i in range(left.shape[0]): 246 | filt_real,filt_imag=numpy.asarray(gabor(left[i,:,:],theta=thea,frequency=freq)) 247 | img.append(filt_real) 248 | return numpy.asarray(img) 249 | 250 | def gaussian_Laplace1(left): 251 | return ndimage.gaussian_laplace(left,sigma=1) 252 | 253 | def gaussian_Laplace2(left): 254 | return ndimage.gaussian_laplace(left,sigma=2) 255 | 256 | def laplace(left): 257 | return ndimage.laplace(left) 258 | 259 | def sobelxy(left): 260 | img = [] 261 | for i in range(left.shape[0]): 262 | img.append(ndimage.sobel(left[i, :, :])) 263 | return np.asarray(img) 264 | 265 | def sobelx(left): 266 | img = [] 267 | for i in range(left.shape[0]): 268 | img.append(ndimage.sobel(left[i,:,:], axis=0)) 269 | return np.asarray(img) 270 | 271 | def sobely(left): 272 | img = [] 273 | for i in range(left.shape[0]): 274 | img.append(ndimage.sobel(left[i, :, :], axis=1)) 275 | return np.asarray(img) 276 | 277 | #max filter 278 | def maxf(image): 279 | img = [] 280 | size = 3 281 | for i in range(image.shape[0]): 282 | img.append(ndimage.maximum_filter(image[i,:,:],size)) 283 | return np.asarray(img) 284 | 285 | #median_filter 286 | def medianf(image): 287 | img = [] 288 | size = 3 289 | for i in range(image.shape[0]): 290 | img.append(ndimage.median_filter(image[i,:,:],size)) 291 | return np.asarray(img) 292 | 293 | #mean_filter 294 | def meanf(image): 295 | img = [] 296 | size = 3 297 | for i in range(image.shape[0]): 298 | img.append(ndimage.convolve(image[i,:,:], numpy.full((3, 3), 1 / (size * size)))) 299 | return np.asarray(img) 300 | 301 | #minimum_filter 302 | def minf(image): 303 | img = [] 304 | size = 3 305 | for i in range(image.shape[0]): 306 | img.append(ndimage.minimum_filter(image[i,:,:],size)) 307 | return np.asarray(img) 308 | 309 | def lbp(image): 310 | img = [] 311 | for i in range(image.shape[0]): 312 | # 'uniform','default','ror','var' 313 | lbp = local_binary_pattern(image[i,:,:], 8, 1.5, method='nri_uniform') 314 | img.append(np.divide(lbp,59)) 315 | return np.asarray(img) 316 | 317 | 318 | def hog_feature(image): 319 | try: 320 | img = [] 321 | for i in range(image.shape[0]): 322 | img1, realImage = hog(image[i, :, :], orientations=9, pixels_per_cell=(8, 8), 323 | cells_per_block=(3, 3), block_norm='L2-Hys', visualize=True, 324 | transform_sqrt=False, feature_vector=True) 325 | img.append(realImage) 326 | data = np.asarray(img) 327 | except: data = image 328 | return data 329 | 330 | def mis_match(img1,img2): 331 | n, w1,h1=img1.shape 332 | n, w2,h2=img2.shape 333 | w=min(w1,w2) 334 | h=min(h1,h2) 335 | return img1[:, 0:w,0:h],img2[:, 0:w,0:h] 336 | 337 | def mixconadd(img1, w1, img2, w2): 338 | img11,img22=mis_match(img1,img2) 339 | return numpy.add(img11*w1,img22*w2) 340 | 341 | def mixconsub(img1, w1, img2, w2): 342 | img11,img22=mis_match(img1,img2) 343 | return numpy.subtract(img11*w1,img22*w2) 344 | 345 | def sqrt(left): 346 | with numpy.errstate(divide='ignore',invalid='ignore'): 347 | x = numpy.sqrt(left,) 348 | if isinstance(x, numpy.ndarray): 349 | x[numpy.isinf(x)] = 1 350 | x[numpy.isnan(x)] = 1 351 | elif numpy.isinf(x) or numpy.isnan(x): 352 | x = 1 353 | return x 354 | 355 | def relu(left): 356 | return (abs(left)+left)/2 357 | 358 | def maxP(left, kel1, kel2): 359 | img = [] 360 | for i in range(left.shape[0]): 361 | current = skimage.measure.block_reduce(left[i,:,:], (kel1,kel2),numpy.max) 362 | img.append(current) 363 | return np.asarray(img) 364 | -------------------------------------------------------------------------------- /FELGP/gp_restrict.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import random 3 | import re 4 | import sys 5 | import warnings 6 | 7 | from collections import defaultdict, deque 8 | from functools import partial, wraps 9 | from inspect import isclass 10 | from operator import eq, lt 11 | from deap import tools 12 | 13 | # Define the name of type for any types. 14 | __type__ = object 15 | ###################################### 16 | # GP Program generation functions # 17 | ###################################### 18 | def genFull(pset, min_, max_, type_=None): 19 | """Generate an expression where each leaf has a the same depth 20 | between *min* and *max*. 21 | 22 | :param pset: Primitive set from which primitives are selected. 23 | :param min_: Minimum height of the produced trees. 24 | :param max_: Maximum Height of the produced trees. 25 | :param type_: The type that should return the tree when called, when 26 | :obj:`None` (default) no return type is enforced. 27 | :returns: A full tree with all leaves at the same depth. 28 | """ 29 | def condition(height, depth): 30 | """Expression generation stops when the depth is equal to height.""" 31 | return depth == height 32 | #print('it works', pset) 33 | return generate(pset, min_, max_, condition, type_) 34 | 35 | def genGrow(pset, min_, max_, type_=None): 36 | """Generate an expression where each leaf might have a different depth 37 | between *min* and *max*. 38 | 39 | :param pset: Primitive set from which primitives are selected. 40 | :param min_: Minimum height of the produced trees. 41 | :param max_: Maximum Height of the produced trees. 42 | :param type_: The type that should return the tree when called, when 43 | :obj:`None` (default) no return type is enforced. 44 | :returns: A grown tree with leaves at possibly different depths. 45 | """ 46 | def condition(height, depth): 47 | """Expression generation stops when the depth is equal to height 48 | or when it is randomly determined that a a node should be a terminal. 49 | """ 50 | return depth == height or depth >= min_ 51 | return generate(pset, min_, max_, condition, type_) 52 | 53 | def genHalfAndHalf(pset, min_, max_, type_=None): 54 | """Generate an expression with a PrimitiveSet *pset*. 55 | Half the time, the expression is generated with :func:`~deap.gp.genGrow`, 56 | the other half, the expression is generated with :func:`~deap.gp.genFull`. 57 | 58 | :param pset: Primitive set from which primitives are selected. 59 | :param min_: Minimum height of the produced trees. 60 | :param max_: Maximum Height of the produced trees. 61 | :param type_: The type that should return the tree when called, when 62 | :obj:`None` (default) no return type is enforced. 63 | :returns: Either, a full or a grown tree. 64 | """ 65 | method = random.choice((genGrow, genFull)) 66 | #print(method) 67 | return method(pset, min_, max_, type_) 68 | 69 | def genRamped(pset, min_, max_, type_=None): 70 | """ 71 | .. deprecated:: 1.0 72 | The function has been renamed. Use :func:`~deap.gp.genHalfAndHalf` instead. 73 | """ 74 | warnings.warn("gp.genRamped has been renamed. Use genHalfAndHalf instead.", 75 | FutureWarning) 76 | return genHalfAndHalf(pset, min_, max_, type_) 77 | 78 | def generate(pset, min_, max_, condition, type_=__type__): 79 | """Generate a Tree as a list of list. The tree is build 80 | from the root to the leaves, and it stop growing when the 81 | condition is fulfilled. 82 | :param pset: A primitive set from wich to select primitives of the trees. 83 | :param min_: Minimum height of the produced trees. 84 | :param max_: Maximum Height of the produced trees. 85 | :param condition: The condition is a function that takes two arguments, 86 | the height of the tree to build and the current 87 | depth in the tree. 88 | :param type_: The type that should return the tree when called, when 89 | :obj:`None` (default) no return type is enforced. 90 | :returns: A grown tree with leaves at possibly different depths 91 | dependending on the condition function. 92 | 93 | 94 | DUMMY NODE ISSUES 95 | 96 | DEAP will only place terminals if we're at the bottom of a branch. 97 | This creates two issues: 98 | 1. A primitive that takes other primitives as inputs could be placed at the 99 | second to last layer. 100 | SOLUTION: You need to allow the tree to end whenever the height condition is met, 101 | so create "dummy" terminals for every type possible in the tree. 102 | 2. A primitive that takes terminals as inputs could be placed above the second to 103 | last layer. 104 | SOLUTION: You need to allow the tree to continue extending the branch until the 105 | height condition is met, so create "dummy" primitives that just pass 106 | through the terminal types. 107 | 108 | These "dummy" terminals and "dummy" primitives introduce unnecessary and sometimes 109 | nonsensical solutions into populations. These "dummy" nodes can be eliminated 110 | if the height requirement is relaxed. 111 | 112 | 113 | HOW TO PREVENT DUMMY NODE ISSUES 114 | 115 | Relaxing the height requirement: 116 | When at the bottom of the branch, check for terminals first, then primitives. 117 | When checking for primitives, skirt the height requirement by adjusting 118 | the branch depth to be the second to last layer of the tree. 119 | If neither a terminal or primitive fits this node, then throw an error. 120 | When not at the bottom of the branch, check for primitives first, then terminals. 121 | 122 | Issue with relaxing the height requirement: 123 | 1. Endless loops are possible when primitive sets have any type loops. 124 | A primitive with an output of one type may not take an input type of 125 | itself or a parent type. 126 | SOLUTION: A primitive set must be well-designed to prevent those type loops. 127 | 128 | """ 129 | if type_ is None: 130 | type_ = pset.ret 131 | expr = [] 132 | height = random.randint(min_, max_) 133 | stack = [(0, type_)] 134 | #print(len(stack)) 135 | #print(pset.terminals) 136 | #print(pset.primitives) 137 | while len(stack) != 0: 138 | ## print(len(expr)) 139 | if (len(expr)>60): 140 | expr = [] 141 | #if type_ is None: 142 | type_ = pset.ret 143 | stack = [(0, type_)] 144 | ## print(depth, type_) 145 | #depth, type_ = stack.pop() 146 | height = random.randint(min_, max_) 147 | depth, type_ = stack.pop() 148 | if condition(height, depth): 149 | # Try finding a terminal 150 | try: 151 | term = random.choice(pset.terminals[type_]) 152 | #print('term',term) 153 | if isclass(term): 154 | term = term() 155 | expr.append(term) 156 | # No terminal fits 157 | except: 158 | # So pull the depth back one layer, and start looking for primitives 159 | try: 160 | depth -= 1 161 | prim = random.choice(pset.primitives[type_]) 162 | #print('prim',prim) 163 | expr.append(prim) 164 | for arg in reversed(prim.args): 165 | stack.append((depth, arg)) 166 | 167 | # No primitive fits, either - that's an error 168 | except IndexError: 169 | _, _, traceback = sys.exc_info() 170 | raise IndexError("The gp.generate function tried to add " \ 171 | "a primitive of type '%s', but there is " \ 172 | "none available." % (type_,), traceback) 173 | # Not at the bottom of the tree 174 | else: 175 | # Check for primitives 176 | try: 177 | prim = random.choice(pset.primitives[type_]) 178 | expr.append(prim) 179 | for arg in reversed(prim.args): 180 | stack.append((depth + 1, arg)) 181 | # No primitive fits 182 | except: 183 | # So check for terminals 184 | try: 185 | term = random.choice(pset.terminals[type_]) 186 | 187 | # No terminal fits, either - that's an error 188 | except IndexError: 189 | _, _, traceback = sys.exc_info() 190 | raise IndexError("The gp.generate function tried to add " \ 191 | "a terminal of type '%s', but there is " \ 192 | "none available." % (type_,), traceback) 193 | if isclass(term): 194 | term = term() 195 | expr.append(term) 196 | #print(len(expr)) 197 | return expr 198 | 199 | 200 | def generateMD(pset, min_, max_, condition, type_=__type__): 201 | if type_ is None: 202 | type_ = pset.ret 203 | expr = [] 204 | height = random.randint(min_, max_) 205 | stack = [(0, type_)] 206 | ## print(len(stack)) 207 | #print(pset.terminals) 208 | #print(pset.primitives) 209 | while len(stack) != 0: 210 | depth, type_ = stack.pop() 211 | if condition(height, depth): 212 | # Try finding a terminal 213 | try: 214 | term = random.choice(pset.terminals[type_]) 215 | #print('term',term) 216 | if isclass(term): 217 | term = term() 218 | expr.append(term) 219 | # No terminal fits 220 | except: 221 | # So pull the depth back one layer, and start looking for primitives 222 | try: 223 | depth -= 1 224 | prim = random.choice(pset.primitives[type_]) 225 | #print('prim',prim) 226 | expr.append(prim) 227 | for arg in reversed(prim.args): 228 | stack.append((depth, arg)) 229 | 230 | # No primitive fits, either - that's an error 231 | except IndexError: 232 | _, _, traceback = sys.exc_info() 233 | raise IndexError("The gp.generate function tried to add " \ 234 | "a primitive of type '%s', but there is " \ 235 | "none available." % (type_,), traceback) 236 | # Not at the bottom of the tree 237 | else: 238 | # Check for primitives 239 | try: 240 | prim = random.choice(pset.primitives[type_]) 241 | expr.append(prim) 242 | for arg in reversed(prim.args): 243 | stack.append((depth + 1, arg)) 244 | # No primitive fits 245 | except: 246 | # So check for terminals 247 | try: 248 | term = random.choice(pset.terminals[type_]) 249 | 250 | # No terminal fits, either - that's an error 251 | except IndexError: 252 | _, _, traceback = sys.exc_info() 253 | raise IndexError("The gp.generate function tried to add " \ 254 | "a terminal of type '%s', but there is " \ 255 | "none available." % (type_,), traceback) 256 | if isclass(term): 257 | term = term() 258 | expr.append(term) 259 | #print(len(expr)) 260 | return expr 261 | 262 | def genHalfAndHalfMD(pset, min_, max_, type_=None): 263 | expr=genHalfAndHalf(pset, min_, max_, type_=None) 264 | #print(expr) 265 | #print('expr before', len(expr)) 266 | while len(expr)>60: 267 | expr=genHalfAndHalf(pset, min_, max_, type_=None) 268 | #print('expr before', len(expr)) 269 | #print('expr after',len(expr)) 270 | return expr 271 | 272 | def genFullMD(pset, min_, max_, type_=None): 273 | """Generate an expression where each leaf has a the same depth 274 | between *min* and *max*. 275 | 276 | :param pset: Primitive set from which primitives are selected. 277 | :param min_: Minimum height of the produced trees. 278 | :param max_: Maximum Height of the produced trees. 279 | :param type_: The type that should return the tree when called, when 280 | :obj:`None` (default) no return type is enforced. 281 | :returns: A full tree with all leaves at the same depth. 282 | """ 283 | def condition(height, depth): 284 | """Expression generation stops when the depth is equal to height.""" 285 | return depth == height 286 | #print('it works', pset)] 287 | expr=generateMD(pset, min_, max_, condition, type_) 288 | ## print(len(expr)) 289 | ## while len(expr)>20: 290 | ## expr=generateMD(pset, min_, max_, condition, type_) 291 | return expr 292 | -------------------------------------------------------------------------------- /FELGP/saveFile.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | 3 | from deap import gp 4 | 5 | 6 | def saveResults(fileName, *args, **kwargs): 7 | f = open(fileName, 'w') 8 | for i in args: 9 | f.writelines(str(i)+'\n') 10 | f.close() 11 | return 12 | 13 | 14 | 15 | def saveLog (fileName, log): 16 | f = open(fileName, 'wb') 17 | pickle.dump(log, f) 18 | f.close() 19 | return 20 | # def saveLog (fileName, log): 21 | # f=open(fileName, 'wb') 22 | # pickle.dump(log, f) 23 | # f.close() 24 | # return 25 | 26 | 27 | # def plotTree(pathName,individual): 28 | # nodes, edges, labels = gp.graph(individual) 29 | # g = pgv.AGraph() 30 | # g.add_nodes_from(nodes) 31 | # g.add_edges_from(edges) 32 | # g.layout(prog="dot") 33 | 34 | # for i in nodes: 35 | # n = g.get_node(i) 36 | # n.attr["label"] = labels[i] 37 | # g.draw(pathName) 38 | # return 39 | 40 | 41 | def bestInd(toolbox, population, number): 42 | bestInd = [] 43 | best = toolbox.selectElitism(population, k=number) 44 | for i in best: 45 | bestInd.append(i) 46 | return bestInd 47 | 48 | 49 | def saveAllResults(randomSeeds, dataSetName, hof, trainTime, testResults, log): 50 | fileName1 = str(randomSeeds) + 'Results_on' + dataSetName + '.txt' 51 | saveLog(fileName1, log) 52 | fileName = str(randomSeeds) + 'Final_Result_son' + dataSetName + '.txt' 53 | saveResults(fileName, 'randomSeed', randomSeeds, 'trainTime', trainTime, 54 | 'trainResults', hof[0].fitness, 55 | 'testResults', testResults, 'bestInd in training', 56 | hof[0]) 57 | 58 | return 59 | -------------------------------------------------------------------------------- /FELGP/sift_features.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy import signal 3 | # sift features 4 | Nangles = 8 5 | Nbins = 4 6 | Nsamples = Nbins ** 2 7 | alpha = 9.0 8 | angles = np.array(range(Nangles)) * 2.0 * np.pi / Nangles 9 | 10 | 11 | def gen_dgauss(sigma): 12 | ''' 13 | generating a derivative of Gauss filter on both the X and Y 14 | direction. 15 | ''' 16 | fwid = np.int(2 * np.ceil(sigma)) 17 | G = np.array(range(-fwid, fwid + 1)) ** 2 18 | G = G.reshape((G.size, 1)) + G 19 | G = np.exp(- G / 2.0 / sigma / sigma) 20 | G /= np.sum(G) 21 | GH, GW = np.gradient(G) 22 | GH *= 2.0 / np.sum(np.abs(GH)) 23 | GW *= 2.0 / np.sum(np.abs(GW)) 24 | #print(GH,GW) 25 | return GH, GW 26 | 27 | 28 | class DsiftExtractor: 29 | ''' 30 | The class that does dense sift feature extractor. 31 | Sample Usage: 32 | extractor = DsiftExtractor(gridSpacing,patchSize,[optional params]) 33 | feaArr,positions = extractor.process_image(Image) 34 | ''' 35 | 36 | def __init__(self, gridSpacing, patchSize,nrml_thres=1.0, 37 | sigma_edge=0.8, 38 | sift_thres=0.2): 39 | ''' 40 | gridSpacing: the spacing for sampling dense descriptors 41 | patchSize: the size for each sift patch 42 | nrml_thres: low contrast normalization threshold 43 | sigma_edge: the standard deviation for the gaussian smoothing 44 | before computing the gradient 45 | sift_thres: sift thresholding (0.2 works well based on 46 | Lowe's SIFT paper) 47 | ''' 48 | self.gS = gridSpacing 49 | self.pS = patchSize 50 | self.nrml_thres = nrml_thres 51 | self.sigma = sigma_edge 52 | self.sift_thres = sift_thres 53 | # compute the weight contribution map 54 | sample_res = self.pS / np.double(Nbins) 55 | sample_p = np.array(range(self.pS)) 56 | sample_ph, sample_pw = np.meshgrid(sample_p, sample_p) 57 | sample_ph.resize(sample_ph.size) 58 | sample_pw.resize(sample_pw.size) 59 | bincenter = np.array(range(1, Nbins * 2, 2)) / 2.0 / Nbins * self.pS - 0.5 60 | bincenter_h, bincenter_w = np.meshgrid(bincenter, bincenter) 61 | bincenter_h.resize((bincenter_h.size, 1)) 62 | bincenter_w.resize((bincenter_w.size, 1)) 63 | dist_ph = abs(sample_ph - bincenter_h) 64 | dist_pw = abs(sample_pw - bincenter_w) 65 | weights_h = dist_ph / sample_res 66 | weights_w = dist_pw / sample_res 67 | weights_h = (1 - weights_h) * (weights_h <= 1) 68 | weights_w = (1 - weights_w) * (weights_w <= 1) 69 | # weights is the contribution of each pixel to the corresponding bin center 70 | self.weights = weights_h * weights_w 71 | # pyplot.imshow(self.weights) 72 | # pyplot.show() 73 | 74 | def process_image(self, image, positionNormalize=True,verbose=True): 75 | ''' 76 | processes a single image, return the locations 77 | and the values of detected SIFT features. 78 | image: a M*N image which is a numpy 2D array. If you 79 | pass a color image, it will automatically be converted 80 | to a grayscale image. 81 | positionNormalize: whether to normalize the positions 82 | to [0,1]. If False, the pixel-based positions of the 83 | top-right position of the patches is returned. 84 | 85 | Return values: 86 | feaArr: the feature array, each row is a feature 87 | positions: the positions of the features 88 | ''' 89 | 90 | image = image.astype(np.double) 91 | if image.ndim == 3: 92 | # we do not deal with color images. 93 | image = np.mean(image, axis=2) 94 | # compute the grids 95 | H, W = image.shape 96 | gS = self.gS 97 | pS = self.pS 98 | remH = np.mod(H - pS, gS) 99 | remW = np.mod(W - pS, gS) 100 | offsetH = int(remH / 2) 101 | offsetW = int(remW / 2) 102 | gridH, gridW = np.meshgrid(range(offsetH, H - pS + 1, gS), range(offsetW, W - pS + 1, gS)) 103 | gridH = gridH.flatten() 104 | gridW = gridW.flatten() 105 | if verbose: 106 | print 107 | 'Image: w {}, h {}, gs {}, ps {}, nFea {}'. \ 108 | format(W, H, gS, pS, gridH.size) 109 | feaArr = self.calculate_sift_grid(image, gridH, gridW) 110 | feaArr = self.normalize_sift(feaArr) 111 | if positionNormalize: 112 | positions = np.vstack((gridH / np.double(H), gridW / np.double(W))) 113 | else: 114 | positions = np.vstack((gridH, gridW)) 115 | return feaArr, positions 116 | 117 | def calculate_sift_grid(self, image, gridH, gridW): 118 | ''' 119 | This function calculates the unnormalized sift features 120 | It is called by process_image(). 121 | ''' 122 | H, W = image.shape 123 | Npatches = gridH.size 124 | feaArr = np.zeros((Npatches, Nsamples * Nangles)) 125 | 126 | # calculate gradient 127 | GH, GW = gen_dgauss(self.sigma) 128 | IH = signal.convolve2d(image, GH, mode='same') 129 | IW = signal.convolve2d(image, GW, mode='same') 130 | Imag = np.sqrt(IH ** 2 + IW ** 2) 131 | Itheta = np.arctan2(IH, IW) 132 | Iorient = np.zeros((Nangles, H, W)) 133 | for i in range(Nangles): 134 | Iorient[i] = Imag * np.maximum(np.cos(Itheta - angles[i]) ** alpha, 0) 135 | # pyplot.imshow(Iorient[i]) 136 | # pyplot.show() 137 | for i in range(Npatches): 138 | currFeature = np.zeros((Nangles, Nsamples)) 139 | for j in range(Nangles): 140 | currFeature[j] = np.dot(self.weights, 141 | Iorient[j, gridH[i]:gridH[i] + self.pS, gridW[i]:gridW[i] + self.pS].flatten()) 142 | feaArr[i] = currFeature.flatten() 143 | return feaArr 144 | 145 | def normalize_sift(self, feaArr): 146 | ''' 147 | This function does sift feature normalization 148 | following David Lowe's definition (normalize length -> 149 | thresholding at 0.2 -> renormalize length) 150 | ''' 151 | siftlen = np.sqrt(np.sum(feaArr ** 2, axis=1)) 152 | hcontrast = (siftlen >= self.nrml_thres) 153 | siftlen[siftlen < self.nrml_thres] = self.nrml_thres 154 | # normalize with contrast thresholding 155 | feaArr /= siftlen.reshape((siftlen.size, 1)) 156 | # suppress large gradients 157 | feaArr[feaArr > self.sift_thres] = self.sift_thres 158 | # renormalize high-contrast ones 159 | feaArr[hcontrast] /= np.sqrt(np.sum(feaArr[hcontrast] ** 2, axis=1)). \ 160 | reshape((feaArr[hcontrast].shape[0], 1)) 161 | return feaArr 162 | 163 | 164 | class SingleSiftExtractor(DsiftExtractor): 165 | ''' 166 | The simple wrapper class that does feature extraction, treating 167 | the whole image as a local image patch. 168 | ''' 169 | 170 | def __init__(self, patchSize,nrml_thres=1.0, sigma_edge=0.8,sift_thres=0.2): 171 | # simply call the super class __init__ with a large gridSpace 172 | DsiftExtractor.__init__(self, patchSize, patchSize, nrml_thres, sigma_edge, sift_thres) 173 | 174 | def process_image(self, image): 175 | return DsiftExtractor.process_image(self, image, False, False)[0] -------------------------------------------------------------------------------- /FELGP/strongGPDataType.py: -------------------------------------------------------------------------------- 1 | class Int1: 2 | def __init__(int): 3 | pass 4 | 5 | class Int2: 6 | def __init__(int): 7 | pass 8 | 9 | class Int3: 10 | def __init__(int): 11 | pass 12 | 13 | class Int4: 14 | def __init__(int): 15 | pass 16 | 17 | class Int5: 18 | def __init__(int): 19 | pass 20 | 21 | class Int6: 22 | def __init__(int): 23 | pass 24 | 25 | class Float1: 26 | def __init__(float): 27 | pass 28 | 29 | class Float2: 30 | def __init__(float): 31 | pass 32 | 33 | class Float3: 34 | def __init__(float): 35 | pass 36 | 37 | 38 | class Array1: 39 | def __init__(ndarray): 40 | pass 41 | 42 | class Array2: 43 | def __init__(ndarray): 44 | pass 45 | 46 | class Array3: 47 | def __init__(ndarray): 48 | pass 49 | 50 | class Array4: 51 | def __init__(ndarray): 52 | pass 53 | 54 | class Array5: 55 | def __init__(ndarray): 56 | pass 57 | 58 | class Array6: 59 | def __init__(ndarray): 60 | pass 61 | -------------------------------------------------------------------------------- /FlexGP/FGP_main.py: -------------------------------------------------------------------------------- 1 | # python packages 2 | import random 3 | import time 4 | import evalGP_fgp as evalGP 5 | import gp_restrict as gp_restrict 6 | import numpy 7 | from deap import base, creator, tools, gp 8 | from strongGPDataType import Int1, Int2, Int3, Float1, Float2, Float3, Img, Img1, Vector, Vector1 9 | import fgp_functions as fe_fs 10 | from sklearn.svm import LinearSVC 11 | from sklearn.model_selection import cross_val_score 12 | from sklearn import preprocessing 13 | import saveFile 14 | import sys 15 | 16 | randomSeeds = 12 17 | dataSetName = 'f1' 18 | 19 | 20 | x_train = numpy.load(dataSetName + '_train_data.npy') / 255.0 21 | y_train = numpy.load(dataSetName + '_train_label.npy') 22 | x_test = numpy.load(dataSetName + '_test_data.npy') / 255.0 23 | y_test = numpy.load(dataSetName + '_test_label.npy') 24 | 25 | print(x_train.shape, y_train.shape, x_test.shape, y_test.shape) 26 | # parameters: 27 | population = 500 28 | generation = 50 29 | cxProb = 0.8 30 | mutProb = 0.19 31 | elitismProb = 0.01 32 | totalRuns = 1 33 | initialMinDepth = 2 34 | initialMaxDepth = 6 35 | maxDepth = 8 36 | ##GP 37 | pset = gp.PrimitiveSetTyped('MAIN', [Img], Vector1, prefix='Image') 38 | #feature concatenation 39 | pset.addPrimitive(fe_fs.root_con, [Vector1, Vector1], Vector1, name='Root') 40 | pset.addPrimitive(fe_fs.root_conVector2, [Img1, Img1], Vector1, name='Root2') 41 | pset.addPrimitive(fe_fs.root_conVector3, [Img1, Img1, Img1], Vector1, name='Root3') 42 | pset.addPrimitive(fe_fs.root_con, [Vector, Vector], Vector1, name='Roots2') 43 | pset.addPrimitive(fe_fs.root_con, [Vector, Vector, Vector], Vector1, name='Roots3') 44 | pset.addPrimitive(fe_fs.root_con, [Vector, Vector, Vector, Vector], Vector1, name='Roots4') 45 | ##feature extraction 46 | pset.addPrimitive(fe_fs.global_hog_small, [Img1], Vector, name='Global_HOG') 47 | pset.addPrimitive(fe_fs.all_lbp, [Img1], Vector, name='Global_uLBP') 48 | pset.addPrimitive(fe_fs.all_sift, [Img1], Vector, name='Global_SIFT') 49 | pset.addPrimitive(fe_fs.global_hog_small, [Img], Vector, name='FGlobal_HOG') 50 | pset.addPrimitive(fe_fs.all_lbp, [Img], Vector, name='FGlobal_uLBP') 51 | pset.addPrimitive(fe_fs.all_sift, [Img], Vector, name='FGlobal_SIFT') 52 | # pooling 53 | pset.addPrimitive(fe_fs.maxP, [Img1, Int3, Int3], Img1, name='MaxPF') 54 | #filtering 55 | pset.addPrimitive(fe_fs.gau, [Img1, Int1], Img1, name='GauF') 56 | pset.addPrimitive(fe_fs.gauD, [Img1, Int1, Int2, Int2], Img1, name='GauDF') 57 | pset.addPrimitive(fe_fs.gab, [Img1, Float1, Float2], Img1, name='GaborF') 58 | pset.addPrimitive(fe_fs.laplace, [Img1], Img1, name='LapF') 59 | pset.addPrimitive(fe_fs.gaussian_Laplace1, [Img1], Img1, name='LoG1F') 60 | pset.addPrimitive(fe_fs.gaussian_Laplace2, [Img1], Img1, name='LoG2F') 61 | pset.addPrimitive(fe_fs.sobelxy, [Img1], Img1, name='SobelF') 62 | pset.addPrimitive(fe_fs.sobelx, [Img1], Img1, name='SobelXF') 63 | pset.addPrimitive(fe_fs.sobely, [Img1], Img1, name='SobelYF') 64 | pset.addPrimitive(fe_fs.medianf, [Img1], Img1, name='MedF') 65 | pset.addPrimitive(fe_fs.meanf, [Img1], Img1, name='MeanF') 66 | pset.addPrimitive(fe_fs.minf, [Img1], Img1, name='MinF') 67 | pset.addPrimitive(fe_fs.maxf, [Img1], Img1, name='MaxF') 68 | pset.addPrimitive(fe_fs.lbp, [Img1], Img1, name='LBPF') 69 | pset.addPrimitive(fe_fs.hog_feature, [Img1], Img1, name='HoGF') 70 | pset.addPrimitive(fe_fs.mixconadd, [Img1, Float3, Img1, Float3], Img1, name='W_AddF') 71 | pset.addPrimitive(fe_fs.mixconsub, [Img1, Float3, Img1, Float3], Img1, name='W_SubF') 72 | pset.addPrimitive(fe_fs.sqrt, [Img1], Img1, name='SqrtF') 73 | pset.addPrimitive(fe_fs.relu, [Img1], Img1, name='ReLUF') 74 | # pooling 75 | pset.addPrimitive(fe_fs.maxP, [Img, Int3, Int3], Img1, name='MaxP') 76 | # filtering 77 | pset.addPrimitive(fe_fs.gau, [Img, Int1], Img, name='Gau') 78 | pset.addPrimitive(fe_fs.gauD, [Img, Int1, Int2, Int2], Img, name='GauD') 79 | pset.addPrimitive(fe_fs.gab, [Img, Float1, Float2], Img, name='Gabor') 80 | pset.addPrimitive(fe_fs.laplace, [Img], Img, name='Lap') 81 | pset.addPrimitive(fe_fs.gaussian_Laplace1, [Img], Img, name='LoG1') 82 | pset.addPrimitive(fe_fs.gaussian_Laplace2, [Img], Img, name='LoG2') 83 | pset.addPrimitive(fe_fs.sobelxy, [Img], Img, name='Sobel') 84 | pset.addPrimitive(fe_fs.sobelx, [Img], Img, name='SobelX') 85 | pset.addPrimitive(fe_fs.sobely, [Img], Img, name='SobelY') 86 | pset.addPrimitive(fe_fs.medianf, [Img], Img, name='Med') 87 | pset.addPrimitive(fe_fs.meanf, [Img], Img, name='Mean') 88 | pset.addPrimitive(fe_fs.minf, [Img], Img, name='Min') 89 | pset.addPrimitive(fe_fs.maxf, [Img], Img, name='Max') 90 | pset.addPrimitive(fe_fs.lbp, [Img], Img, name='LBP_F') 91 | pset.addPrimitive(fe_fs.hog_feature, [Img], Img, name='HOG_F') 92 | pset.addPrimitive(fe_fs.mixconadd, [Img, Float3, Img, Float3], Img, name='W_Add') 93 | pset.addPrimitive(fe_fs.mixconsub, [Img, Float3, Img, Float3], Img, name='W_Sub') 94 | pset.addPrimitive(fe_fs.sqrt, [Img], Img, name='Sqrt') 95 | pset.addPrimitive(fe_fs.relu, [Img], Img, name='ReLU') 96 | # Terminals 97 | pset.renameArguments(ARG0='Image') 98 | pset.addEphemeralConstant('Singma', lambda: random.randint(1, 4), Int1) 99 | pset.addEphemeralConstant('Order', lambda: random.randint(0, 3), Int2) 100 | pset.addEphemeralConstant('Theta', lambda: random.randint(0, 8), Float1) 101 | pset.addEphemeralConstant('Frequency', lambda: random.randint(0, 5), Float2) 102 | pset.addEphemeralConstant('n', lambda: round(random.random(), 3), Float3) 103 | pset.addEphemeralConstant('KernelSize', lambda: random.randrange(2, 5, 2), Int3) 104 | 105 | ## 106 | creator.create("FitnessMax", base.Fitness, weights=(1.0,)) 107 | creator.create("Individual", gp.PrimitiveTree, fitness=creator.FitnessMax) 108 | 109 | toolbox = base.Toolbox() 110 | toolbox.register("expr", gp_restrict.genHalfAndHalfMD, pset=pset, min_=initialMinDepth, max_=initialMaxDepth) 111 | toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.expr) 112 | toolbox.register("population", tools.initRepeat, list, toolbox.individual) 113 | toolbox.register("compile", gp.compile, pset=pset) 114 | toolbox.register("mapp", map) 115 | 116 | def evalTrain(individual): 117 | try: 118 | func = toolbox.compile(expr=individual) 119 | train_tf = [] 120 | for i in range(0, len(y_train)): 121 | train_tf.append(numpy.asarray(func(x_train[i, :, :]))) 122 | train_tf = numpy.asarray(train_tf, dtype=float) 123 | min_max_scaler = preprocessing.MinMaxScaler() 124 | train_norm = min_max_scaler.fit_transform(train_tf) 125 | lsvm = LinearSVC() 126 | accuracy = round(100 * cross_val_score(lsvm, train_norm, y_train, cv=5).mean(), 2) 127 | except: 128 | accuracy = 0 129 | return accuracy, 130 | 131 | # genetic operator 132 | toolbox.register("evaluate", evalTrain) 133 | toolbox.register("select", tools.selTournament, tournsize=7) 134 | toolbox.register("selectElitism", tools.selBest) 135 | toolbox.register("mate", gp.cxOnePoint) 136 | toolbox.register("expr_mut", gp_restrict.genFull, min_=0, max_=2) 137 | toolbox.register("mutate", gp.mutUniform, expr=toolbox.expr_mut, pset=pset) 138 | # toolbox.decorate("mate", gp.staticLimit(key=operator.attrgetter("height"), max_value=maxDepth)) 139 | # toolbox.decorate("mutate", gp.staticLimit(key=operator.attrgetter("height"), max_value=maxDepth)) 140 | 141 | def GPMain(randomSeeds): 142 | random.seed(randomSeeds) 143 | 144 | pop = toolbox.population(population) 145 | hof = tools.HallOfFame(10) 146 | log = tools.Logbook() 147 | stats_fit = tools.Statistics(key=lambda ind: ind.fitness.values) 148 | stats_size_tree = tools.Statistics(key=len) 149 | mstats = tools.MultiStatistics(fitness=stats_fit, size_tree=stats_size_tree) 150 | mstats.register("avg", numpy.mean) 151 | mstats.register("std", numpy.std) 152 | mstats.register("min", numpy.min) 153 | mstats.register("max", numpy.max) 154 | log.header = ["gen", "evals"] + mstats.fields 155 | 156 | pop, log = evalGP.eaSimple(pop, toolbox, cxProb, mutProb, elitismProb, generation, 157 | stats=mstats, halloffame=hof, verbose=True) 158 | 159 | return pop, log, hof 160 | 161 | 162 | def evalTest(toolbox, individual, trainData, trainLabel, test, testL): 163 | func = toolbox.compile(expr=individual) 164 | train_tf = [] 165 | test_tf = [] 166 | for i in range(0, len(trainLabel)): 167 | train_tf.append(numpy.asarray(func(trainData[i, :, :]))) 168 | for j in range(0, len(testL)): 169 | test_tf.append(numpy.asarray(func(test[j, :, :]))) 170 | train_tf = numpy.asarray(train_tf, dtype=float) 171 | test_tf = numpy.asarray(test_tf, dtype=float) 172 | min_max_scaler = preprocessing.MinMaxScaler() 173 | train_norm = min_max_scaler.fit_transform(train_tf) 174 | test_norm = min_max_scaler.transform(test_tf) 175 | lsvm= LinearSVC() 176 | lsvm.fit(train_norm, trainLabel) 177 | accuracy = round(100*lsvm.score(test_norm, testL),2) 178 | return numpy.asarray(train_tf), numpy.asarray(test_tf), trainLabel, testL, accuracy 179 | 180 | 181 | if __name__ == "__main__": 182 | beginTime = time.process_time() 183 | pop, log, hof = GPMain(randomSeeds) 184 | endTime = time.process_time() 185 | trainTime = endTime - beginTime 186 | 187 | train_tf, test_tf, trainLabel, testL, testResults = evalTest(toolbox, hof[0], x_train, y_train, x_test, y_test) 188 | testTime = time.process_time() - endTime 189 | saveFile.saveAllResults(randomSeeds, dataSetName, hof, trainTime, testResults, log) 190 | 191 | print(testResults) 192 | print(train_tf.shape, test_tf.shape) 193 | print(hof[0]) 194 | print('End') 195 | -------------------------------------------------------------------------------- /FlexGP/evalGP_fgp.py: -------------------------------------------------------------------------------- 1 | import random 2 | from deap import tools 3 | from collections import defaultdict 4 | 5 | def pop_compare(ind1, ind2): 6 | # List all available primitive types in each individual 7 | types1 = defaultdict(list) 8 | types2 = defaultdict(list) 9 | for idx, node in enumerate(ind1[1:],1): 10 | types1[node.ret].append(idx) 11 | for idx, node in enumerate(ind2[1:],1): 12 | types2[node.ret].append(idx) 13 | return types1==types2 14 | 15 | def varAnd(population, toolbox, cxpb, mutpb): 16 | offspring = [toolbox.clone(ind) for ind in population] 17 | new_cxpb=cxpb/(cxpb+mutpb) 18 | i = 1 19 | while i < len(offspring): 20 | if random.random() < new_cxpb: 21 | if (offspring[i - 1] == offspring[i]) or pop_compare(offspring[i - 1], offspring[i]): 22 | offspring[i - 1], = toolbox.mutate(offspring[i - 1]) 23 | offspring[i], = toolbox.mutate(offspring[i]) 24 | else: 25 | offspring[i - 1], offspring[i] = toolbox.mate(offspring[i - 1], offspring[i]) 26 | del offspring[i - 1].fitness.values, offspring[i].fitness.values 27 | i = i + 2 28 | else: 29 | offspring[i], = toolbox.mutate(offspring[i]) 30 | del offspring[i].fitness.values 31 | i = i + 1 32 | return offspring 33 | 34 | 35 | def eaSimple(population, toolbox, cxpb, mutpb, elitpb, ngen , stats=None, 36 | halloffame=None, verbose=__debug__): 37 | logbook = tools.Logbook() 38 | logbook.header = ['gen', 'nevals'] + (stats.fields if stats else []) 39 | # Evaluate the individuals with an invalid fitness 40 | invalid_ind = [ind for ind in population if not ind.fitness.valid] 41 | fitnesses = toolbox.mapp(toolbox.evaluate, invalid_ind) 42 | for ind, fit in zip(population, fitnesses): 43 | ind.fitness.values = fit 44 | 45 | halloffame.update(population) 46 | hof_store = tools.HallOfFame(5 * len(population)) 47 | hof_store.update(population) 48 | record = stats.compile(population) if stats else {} 49 | logbook.record(gen=0, nevals=len(population), **record) 50 | print(logbook.stream) 51 | for gen in range(1, ngen + 1): 52 | # Select the next generation individuals by elitism 53 | elitismNum=int(elitpb * len(population)) 54 | population_for_eli=[toolbox.clone(ind) for ind in population] 55 | offspringE = toolbox.selectElitism(population_for_eli, k=elitismNum) 56 | # Select the next generation individuals for crossover and mutation 57 | offspring = toolbox.select(population, len(population)-elitismNum) 58 | # Vary the pool of individuals 59 | offspring = varAnd(offspring, toolbox, cxpb, mutpb) 60 | # add offspring from elitism into current offspring 61 | # generate the next generation individuals 62 | 63 | # Evaluate the individuals with an invalid fitness 64 | for i in offspring: 65 | ind = 0 66 | while ind 1: 125 | size = args[1] 126 | else: 127 | size=3 128 | x = ndimage.maximum_filter(x,size) 129 | return x 130 | 131 | #median_filter 132 | def medianf(*args): 133 | """ 134 | :type args: arguments and filter size 135 | """ 136 | x = args[0] 137 | if len(args) > 1: 138 | size = args[1] 139 | else: 140 | size=3 141 | x = ndimage.median_filter(x,size) 142 | return x 143 | 144 | #mean_filter 145 | def meanf(*args): 146 | """ 147 | :type args: arguments and filter size 148 | """ 149 | x = args[0] 150 | if len(args) > 1: 151 | size = args[1] 152 | else: 153 | size=3 154 | x = ndimage.convolve(x, numpy.full((3, 3), 1 / (size * size))) 155 | return x 156 | 157 | #minimum_filter 158 | def minf(*args): 159 | """ 160 | :type args: arguments and filter size 161 | """ 162 | x = args[0] 163 | if len(args) > 1: 164 | size = args[1] 165 | else: 166 | size=3 167 | x=ndimage.minimum_filter(x,size) 168 | return x 169 | 170 | def lbp(image): 171 | # 'uniform','default','ror','var' 172 | try: 173 | lbp = local_binary_pattern(image, 8, 1.5, method='nri_uniform') 174 | lbp = np.divide(lbp,59) 175 | except: lbp = image 176 | return lbp 177 | 178 | 179 | def hog_feature(image): 180 | try: 181 | img, realImage = hog(image, orientations=9, pixels_per_cell=(8, 8), 182 | cells_per_block=(3, 3), block_norm='L2-Hys', visualize=True, 183 | transform_sqrt=False, feature_vector=True) 184 | return realImage 185 | except: 186 | return image 187 | 188 | def mis_match(img1,img2): 189 | w1,h1=img1.shape 190 | w2,h2=img2.shape 191 | w=min(w1,w2) 192 | h=min(h1,h2) 193 | return img1[0:w,0:h],img2[0:w,0:h] 194 | 195 | def mixconadd(img1, w1,img2, w2): 196 | img11,img22=mis_match(img1,img2) 197 | return numpy.add(img11*w1,img22*w2) 198 | 199 | def mixconsub(img1, w1,img2, w2): 200 | img11,img22=mis_match(img1,img2) 201 | return numpy.subtract(img11*w1,img22*w2) 202 | 203 | def sqrt(left): 204 | with numpy.errstate(divide='ignore',invalid='ignore'): 205 | x = numpy.sqrt(left,) 206 | if isinstance(x, numpy.ndarray): 207 | x[numpy.isinf(x)] = 1 208 | x[numpy.isnan(x)] = 1 209 | elif numpy.isinf(x) or numpy.isnan(x): 210 | x = 1 211 | return x 212 | 213 | def relu(left): 214 | return (abs(left)+left)/2 215 | 216 | def maxP(left, kel1, kel2): 217 | try: 218 | current = skimage.measure.block_reduce(left,(kel1,kel2),numpy.max) 219 | except ValueError: 220 | current=left 221 | return current 222 | 223 | -------------------------------------------------------------------------------- /FlexGP/gp_restrict.py: -------------------------------------------------------------------------------- 1 | import random 2 | import sys 3 | import warnings 4 | from inspect import isclass 5 | 6 | # Define the name of type for any types. 7 | __type__ = object 8 | ###################################### 9 | # GP Program generation functions # 10 | ###################################### 11 | def genFull(pset, min_, max_, type_=None): 12 | """Generate an expression where each leaf has a the same depth 13 | between *min* and *max*. 14 | 15 | :param pset: Primitive set from which primitives are selected. 16 | :param min_: Minimum height of the produced trees. 17 | :param max_: Maximum Height of the produced trees. 18 | :param type_: The type that should return the tree when called, when 19 | :obj:`None` (default) no return type is enforced. 20 | :returns: A full tree with all leaves at the same depth. 21 | """ 22 | def condition(height, depth): 23 | """Expression generation stops when the depth is equal to height.""" 24 | return depth == height 25 | #print('it works', pset) 26 | return generate(pset, min_, max_, condition, type_) 27 | 28 | def genGrow(pset, min_, max_, type_=None): 29 | """Generate an expression where each leaf might have a different depth 30 | between *min* and *max*. 31 | 32 | :param pset: Primitive set from which primitives are selected. 33 | :param min_: Minimum height of the produced trees. 34 | :param max_: Maximum Height of the produced trees. 35 | :param type_: The type that should return the tree when called, when 36 | :obj:`None` (default) no return type is enforced. 37 | :returns: A grown tree with leaves at possibly different depths. 38 | """ 39 | def condition(height, depth): 40 | """Expression generation stops when the depth is equal to height 41 | or when it is randomly determined that a a node should be a terminal. 42 | """ 43 | return depth == height or depth >= min_ 44 | return generate(pset, min_, max_, condition, type_) 45 | 46 | def genHalfAndHalf(pset, min_, max_, type_=None): 47 | """Generate an expression with a PrimitiveSet *pset*. 48 | Half the time, the expression is generated with :func:`~deap.gp.genGrow`, 49 | the other half, the expression is generated with :func:`~deap.gp.genFull`. 50 | 51 | :param pset: Primitive set from which primitives are selected. 52 | :param min_: Minimum height of the produced trees. 53 | :param max_: Maximum Height of the produced trees. 54 | :param type_: The type that should return the tree when called, when 55 | :obj:`None` (default) no return type is enforced. 56 | :returns: Either, a full or a grown tree. 57 | """ 58 | method = random.choice((genGrow, genFull)) 59 | #print(method) 60 | return method(pset, min_, max_, type_) 61 | 62 | def genRamped(pset, min_, max_, type_=None): 63 | """ 64 | .. deprecated:: 1.0 65 | The function has been renamed. Use :func:`~deap.gp.genHalfAndHalf` instead. 66 | """ 67 | warnings.warn("gp.genRamped has been renamed. Use genHalfAndHalf instead.", 68 | FutureWarning) 69 | return genHalfAndHalf(pset, min_, max_, type_) 70 | 71 | def generate(pset, min_, max_, condition, type_=__type__): 72 | """Generate a Tree as a list of list. The tree is build 73 | from the root to the leaves, and it stop growing when the 74 | condition is fulfilled. 75 | :param pset: A primitive set from wich to select primitives of the trees. 76 | :param min_: Minimum height of the produced trees. 77 | :param max_: Maximum Height of the produced trees. 78 | :param condition: The condition is a function that takes two arguments, 79 | the height of the tree to build and the current 80 | depth in the tree. 81 | :param type_: The type that should return the tree when called, when 82 | :obj:`None` (default) no return type is enforced. 83 | :returns: A grown tree with leaves at possibly different depths 84 | dependending on the condition function. 85 | 86 | 87 | DUMMY NODE ISSUES 88 | 89 | DEAP will only place terminals if we're at the bottom of a branch. 90 | This creates two issues: 91 | 1. A primitive that takes other primitives as inputs could be placed at the 92 | second to last layer. 93 | SOLUTION: You need to allow the tree to end whenever the height condition is met, 94 | so create "dummy" terminals for every type possible in the tree. 95 | 2. A primitive that takes terminals as inputs could be placed above the second to 96 | last layer. 97 | SOLUTION: You need to allow the tree to continue extending the branch until the 98 | height condition is met, so create "dummy" primitives that just pass 99 | through the terminal types. 100 | 101 | These "dummy" terminals and "dummy" primitives introduce unnecessary and sometimes 102 | nonsensical solutions into populations. These "dummy" nodes can be eliminated 103 | if the height requirement is relaxed. 104 | 105 | 106 | HOW TO PREVENT DUMMY NODE ISSUES 107 | 108 | Relaxing the height requirement: 109 | When at the bottom of the branch, check for terminals first, then primitives. 110 | When checking for primitives, skirt the height requirement by adjusting 111 | the branch depth to be the second to last layer of the tree. 112 | If neither a terminal or primitive fits this node, then throw an error. 113 | When not at the bottom of the branch, check for primitives first, then terminals. 114 | 115 | Issue with relaxing the height requirement: 116 | 1. Endless loops are possible when primitive sets have any type loops. 117 | A primitive with an output of one type may not take an input type of 118 | itself or a parent type. 119 | SOLUTION: A primitive set must be well-designed to prevent those type loops. 120 | 121 | """ 122 | if type_ is None: 123 | type_ = pset.ret 124 | expr = [] 125 | height = random.randint(min_, max_) 126 | stack = [(0, type_)] 127 | #print(len(stack)) 128 | #print(pset.terminals) 129 | #print(pset.primitives) 130 | while len(stack) != 0: 131 | ## print(len(expr)) 132 | if (len(expr)>60): 133 | expr = [] 134 | #if type_ is None: 135 | type_ = pset.ret 136 | stack = [(0, type_)] 137 | ## print(depth, type_) 138 | #depth, type_ = stack.pop() 139 | height = random.randint(min_, max_) 140 | depth, type_ = stack.pop() 141 | if condition(height, depth): 142 | # Try finding a terminal 143 | try: 144 | term = random.choice(pset.terminals[type_]) 145 | #print('term',term) 146 | if isclass(term): 147 | term = term() 148 | expr.append(term) 149 | # No terminal fits 150 | except: 151 | # So pull the depth back one layer, and start looking for primitives 152 | try: 153 | depth -= 1 154 | prim = random.choice(pset.primitives[type_]) 155 | #print('prim',prim) 156 | expr.append(prim) 157 | for arg in reversed(prim.args): 158 | stack.append((depth, arg)) 159 | 160 | # No primitive fits, either - that's an error 161 | except IndexError: 162 | _, _, traceback = sys.exc_info() 163 | raise IndexError("The gp.generate function tried to add " \ 164 | "a primitive of type '%s', but there is " \ 165 | "none available." % (type_,), traceback) 166 | # Not at the bottom of the tree 167 | else: 168 | # Check for primitives 169 | try: 170 | prim = random.choice(pset.primitives[type_]) 171 | expr.append(prim) 172 | for arg in reversed(prim.args): 173 | stack.append((depth + 1, arg)) 174 | # No primitive fits 175 | except: 176 | # So check for terminals 177 | try: 178 | term = random.choice(pset.terminals[type_]) 179 | 180 | # No terminal fits, either - that's an error 181 | except IndexError: 182 | _, _, traceback = sys.exc_info() 183 | raise IndexError("The gp.generate function tried to add " \ 184 | "a terminal of type '%s', but there is " \ 185 | "none available." % (type_,), traceback) 186 | if isclass(term): 187 | term = term() 188 | expr.append(term) 189 | #print(len(expr)) 190 | return expr 191 | 192 | 193 | def generateMD(pset, min_, max_, condition, type_=__type__): 194 | if type_ is None: 195 | type_ = pset.ret 196 | expr = [] 197 | height = random.randint(min_, max_) 198 | stack = [(0, type_)] 199 | ## print(len(stack)) 200 | #print(pset.terminals) 201 | #print(pset.primitives) 202 | while len(stack) != 0: 203 | depth, type_ = stack.pop() 204 | if condition(height, depth): 205 | # Try finding a terminal 206 | try: 207 | term = random.choice(pset.terminals[type_]) 208 | #print('term',term) 209 | if isclass(term): 210 | term = term() 211 | expr.append(term) 212 | # No terminal fits 213 | except: 214 | # So pull the depth back one layer, and start looking for primitives 215 | try: 216 | depth -= 1 217 | prim = random.choice(pset.primitives[type_]) 218 | #print('prim',prim) 219 | expr.append(prim) 220 | for arg in reversed(prim.args): 221 | stack.append((depth, arg)) 222 | 223 | # No primitive fits, either - that's an error 224 | except IndexError: 225 | _, _, traceback = sys.exc_info() 226 | raise IndexError("The gp.generate function tried to add " \ 227 | "a primitive of type '%s', but there is " \ 228 | "none available." % (type_,), traceback) 229 | # Not at the bottom of the tree 230 | else: 231 | # Check for primitives 232 | try: 233 | prim = random.choice(pset.primitives[type_]) 234 | expr.append(prim) 235 | for arg in reversed(prim.args): 236 | stack.append((depth + 1, arg)) 237 | # No primitive fits 238 | except: 239 | # So check for terminals 240 | try: 241 | term = random.choice(pset.terminals[type_]) 242 | 243 | # No terminal fits, either - that's an error 244 | except IndexError: 245 | _, _, traceback = sys.exc_info() 246 | raise IndexError("The gp.generate function tried to add " \ 247 | "a terminal of type '%s', but there is " \ 248 | "none available." % (type_,), traceback) 249 | if isclass(term): 250 | term = term() 251 | expr.append(term) 252 | #print(len(expr)) 253 | return expr 254 | 255 | def genHalfAndHalfMD(pset, min_, max_, type_=None): 256 | expr=genHalfAndHalf(pset, min_, max_, type_=None) 257 | #print(expr) 258 | #print('expr before', len(expr)) 259 | while len(expr)>60: 260 | expr=genHalfAndHalf(pset, min_, max_, type_=None) 261 | #print('expr before', len(expr)) 262 | #print('expr after',len(expr)) 263 | return expr 264 | 265 | def genFullMD(pset, min_, max_, type_=None): 266 | """Generate an expression where each leaf has a the same depth 267 | between *min* and *max*. 268 | 269 | :param pset: Primitive set from which primitives are selected. 270 | :param min_: Minimum height of the produced trees. 271 | :param max_: Maximum Height of the produced trees. 272 | :param type_: The type that should return the tree when called, when 273 | :obj:`None` (default) no return type is enforced. 274 | :returns: A full tree with all leaves at the same depth. 275 | """ 276 | def condition(height, depth): 277 | """Expression generation stops when the depth is equal to height.""" 278 | return depth == height 279 | #print('it works', pset)] 280 | expr=generateMD(pset, min_, max_, condition, type_) 281 | ## print(len(expr)) 282 | ## while len(expr)>20: 283 | ## expr=generateMD(pset, min_, max_, condition, type_) 284 | return expr 285 | -------------------------------------------------------------------------------- /FlexGP/sift_features.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy import signal 3 | # sift features 4 | Nangles = 8 5 | Nbins = 4 6 | Nsamples = Nbins ** 2 7 | alpha = 9.0 8 | angles = np.array(range(Nangles)) * 2.0 * np.pi / Nangles 9 | 10 | 11 | def gen_dgauss(sigma): 12 | ''' 13 | generating a derivative of Gauss filter on both the X and Y 14 | direction. 15 | ''' 16 | fwid = np.int(2 * np.ceil(sigma)) 17 | G = np.array(range(-fwid, fwid + 1)) ** 2 18 | G = G.reshape((G.size, 1)) + G 19 | G = np.exp(- G / 2.0 / sigma / sigma) 20 | G /= np.sum(G) 21 | GH, GW = np.gradient(G) 22 | GH *= 2.0 / np.sum(np.abs(GH)) 23 | GW *= 2.0 / np.sum(np.abs(GW)) 24 | #print(GH,GW) 25 | return GH, GW 26 | 27 | 28 | class DsiftExtractor: 29 | ''' 30 | The class that does dense sift feature extractor. 31 | Sample Usage: 32 | extractor = DsiftExtractor(gridSpacing,patchSize,[optional params]) 33 | feaArr,positions = extractor.process_image(Image) 34 | ''' 35 | 36 | def __init__(self, gridSpacing, patchSize,nrml_thres=1.0, 37 | sigma_edge=0.8, 38 | sift_thres=0.2): 39 | ''' 40 | gridSpacing: the spacing for sampling dense descriptors 41 | patchSize: the size for each sift patch 42 | nrml_thres: low contrast normalization threshold 43 | sigma_edge: the standard deviation for the gaussian smoothing 44 | before computing the gradient 45 | sift_thres: sift thresholding (0.2 works well based on 46 | Lowe's SIFT paper) 47 | ''' 48 | self.gS = gridSpacing 49 | self.pS = patchSize 50 | self.nrml_thres = nrml_thres 51 | self.sigma = sigma_edge 52 | self.sift_thres = sift_thres 53 | # compute the weight contribution map 54 | sample_res = self.pS / np.double(Nbins) 55 | sample_p = np.array(range(self.pS)) 56 | sample_ph, sample_pw = np.meshgrid(sample_p, sample_p) 57 | sample_ph.resize(sample_ph.size) 58 | sample_pw.resize(sample_pw.size) 59 | bincenter = np.array(range(1, Nbins * 2, 2)) / 2.0 / Nbins * self.pS - 0.5 60 | bincenter_h, bincenter_w = np.meshgrid(bincenter, bincenter) 61 | bincenter_h.resize((bincenter_h.size, 1)) 62 | bincenter_w.resize((bincenter_w.size, 1)) 63 | dist_ph = abs(sample_ph - bincenter_h) 64 | dist_pw = abs(sample_pw - bincenter_w) 65 | weights_h = dist_ph / sample_res 66 | weights_w = dist_pw / sample_res 67 | weights_h = (1 - weights_h) * (weights_h <= 1) 68 | weights_w = (1 - weights_w) * (weights_w <= 1) 69 | # weights is the contribution of each pixel to the corresponding bin center 70 | self.weights = weights_h * weights_w 71 | # pyplot.imshow(self.weights) 72 | # pyplot.show() 73 | 74 | def process_image(self, image, positionNormalize=True,verbose=True): 75 | ''' 76 | processes a single image, return the locations 77 | and the values of detected SIFT features. 78 | image: a M*N image which is a numpy 2D array. If you 79 | pass a color image, it will automatically be converted 80 | to a grayscale image. 81 | positionNormalize: whether to normalize the positions 82 | to [0,1]. If False, the pixel-based positions of the 83 | top-right position of the patches is returned. 84 | 85 | Return values: 86 | feaArr: the feature array, each row is a feature 87 | positions: the positions of the features 88 | ''' 89 | 90 | image = image.astype(np.double) 91 | if image.ndim == 3: 92 | # we do not deal with color images. 93 | image = np.mean(image, axis=2) 94 | # compute the grids 95 | H, W = image.shape 96 | gS = self.gS 97 | pS = self.pS 98 | remH = np.mod(H - pS, gS) 99 | remW = np.mod(W - pS, gS) 100 | offsetH = int(remH / 2) 101 | offsetW = int(remW / 2) 102 | gridH, gridW = np.meshgrid(range(offsetH, H - pS + 1, gS), range(offsetW, W - pS + 1, gS)) 103 | gridH = gridH.flatten() 104 | gridW = gridW.flatten() 105 | if verbose: 106 | print 107 | 'Image: w {}, h {}, gs {}, ps {}, nFea {}'. \ 108 | format(W, H, gS, pS, gridH.size) 109 | feaArr = self.calculate_sift_grid(image, gridH, gridW) 110 | feaArr = self.normalize_sift(feaArr) 111 | if positionNormalize: 112 | positions = np.vstack((gridH / np.double(H), gridW / np.double(W))) 113 | else: 114 | positions = np.vstack((gridH, gridW)) 115 | return feaArr, positions 116 | 117 | def calculate_sift_grid(self, image, gridH, gridW): 118 | ''' 119 | This function calculates the unnormalized sift features 120 | It is called by process_image(). 121 | ''' 122 | H, W = image.shape 123 | Npatches = gridH.size 124 | feaArr = np.zeros((Npatches, Nsamples * Nangles)) 125 | 126 | # calculate gradient 127 | GH, GW = gen_dgauss(self.sigma) 128 | IH = signal.convolve2d(image, GH, mode='same') 129 | IW = signal.convolve2d(image, GW, mode='same') 130 | Imag = np.sqrt(IH ** 2 + IW ** 2) 131 | Itheta = np.arctan2(IH, IW) 132 | Iorient = np.zeros((Nangles, H, W)) 133 | for i in range(Nangles): 134 | Iorient[i] = Imag * np.maximum(np.cos(Itheta - angles[i]) ** alpha, 0) 135 | # pyplot.imshow(Iorient[i]) 136 | # pyplot.show() 137 | for i in range(Npatches): 138 | currFeature = np.zeros((Nangles, Nsamples)) 139 | for j in range(Nangles): 140 | currFeature[j] = np.dot(self.weights, 141 | Iorient[j, gridH[i]:gridH[i] + self.pS, gridW[i]:gridW[i] + self.pS].flatten()) 142 | feaArr[i] = currFeature.flatten() 143 | return feaArr 144 | 145 | def normalize_sift(self, feaArr): 146 | ''' 147 | This function does sift feature normalization 148 | following David Lowe's definition (normalize length -> 149 | thresholding at 0.2 -> renormalize length) 150 | ''' 151 | siftlen = np.sqrt(np.sum(feaArr ** 2, axis=1)) 152 | hcontrast = (siftlen >= self.nrml_thres) 153 | siftlen[siftlen < self.nrml_thres] = self.nrml_thres 154 | # normalize with contrast thresholding 155 | feaArr /= siftlen.reshape((siftlen.size, 1)) 156 | # suppress large gradients 157 | feaArr[feaArr > self.sift_thres] = self.sift_thres 158 | # renormalize high-contrast ones 159 | feaArr[hcontrast] /= np.sqrt(np.sum(feaArr[hcontrast] ** 2, axis=1)). \ 160 | reshape((feaArr[hcontrast].shape[0], 1)) 161 | return feaArr 162 | 163 | 164 | class SingleSiftExtractor(DsiftExtractor): 165 | ''' 166 | The simple wrapper class that does feature extraction, treating 167 | the whole image as a local image patch. 168 | ''' 169 | 170 | def __init__(self, patchSize,nrml_thres=1.0, sigma_edge=0.8,sift_thres=0.2): 171 | # simply call the super class __init__ with a large gridSpace 172 | DsiftExtractor.__init__(self, patchSize, patchSize, nrml_thres, sigma_edge, sift_thres) 173 | 174 | def process_image(self, image): 175 | return DsiftExtractor.process_image(self, image, False, False)[0] -------------------------------------------------------------------------------- /FlexGP/strongGPDataType.py: -------------------------------------------------------------------------------- 1 | class Int1: 2 | def __init__(int): 3 | pass 4 | 5 | class Int2: 6 | def __init__(int): 7 | pass 8 | 9 | class Int3: 10 | def __init__(int): 11 | pass 12 | 13 | class Int4: 14 | def __init__(int): 15 | pass 16 | 17 | class Float1: 18 | def __init__(float): 19 | pass 20 | 21 | class Float2: 22 | def __init__(float): 23 | pass 24 | 25 | class Float3: 26 | def __init__(float): 27 | pass 28 | 29 | 30 | class Img: 31 | def __init__(ndarray): 32 | pass 33 | 34 | class Img1: 35 | def __init__(ndarray): 36 | pass 37 | 38 | class Img2: 39 | def __init__(ndarray): 40 | pass 41 | 42 | class Img3: 43 | def __init__(ndarray): 44 | pass 45 | 46 | class Vector: 47 | def __init__(ndarray): 48 | pass 49 | 50 | class Vector1: 51 | def __init__(ndarray): 52 | pass 53 | -------------------------------------------------------------------------------- /IDGP/IDGP_main.py: -------------------------------------------------------------------------------- 1 | # python packages 2 | import random 3 | import time 4 | import operator 5 | import evalGP_main as evalGP 6 | # only for strongly typed GP 7 | import gp_restrict 8 | import numpy as np 9 | # deap package 10 | from deap import base, creator, tools, gp 11 | from strongGPDataType import Int1, Int2, Int3, Img, Region, Vector, Vector1 12 | import feature_function as fe_fs 13 | from sklearn.svm import LinearSVC 14 | from sklearn.model_selection import cross_val_score 15 | from sklearn import preprocessing 16 | 17 | 'FLGP' 18 | 19 | dataSetName='f1' 20 | randomSeeds=2 21 | 22 | x_train = np.load(dataSetName + '_train_data.npy')/ 255.0 23 | y_train = np.load(dataSetName + '_train_label.npy') 24 | x_test = np.load(dataSetName + '_test_data.npy')/ 255.0 25 | y_test = np.load(dataSetName + '_test_label.npy') 26 | 27 | print(x_train.shape) 28 | 29 | # parameters: 30 | population = 100 31 | generation = 50 32 | cxProb = 0.8 33 | mutProb = 0.19 34 | elitismProb = 0.01 35 | totalRuns = 1 36 | initialMinDepth = 2 37 | initialMaxDepth = 6 38 | maxDepth = 8 39 | 40 | bound1, bound2 = x_train[1, :, :].shape 41 | ##GP 42 | 43 | pset = gp.PrimitiveSetTyped('MAIN', [Img], Vector1, prefix='Image') 44 | #Feature concatenation 45 | pset.addPrimitive(fe_fs.root_con, [Vector1, Vector1], Vector1, name='FeaCon') 46 | pset.addPrimitive(fe_fs.root_con, [Vector, Vector], Vector1, name='FeaCon2') 47 | pset.addPrimitive(fe_fs.root_con, [Vector, Vector, Vector], Vector1, name='FeaCon3') 48 | # Global feature extraction 49 | pset.addPrimitive(fe_fs.all_dif, [Img], Vector, name='Global_DIF') 50 | pset.addPrimitive(fe_fs.all_histogram, [Img], Vector, name='Global_Histogram') 51 | pset.addPrimitive(fe_fs.global_hog, [Img], Vector, name='Global_HOG') 52 | pset.addPrimitive(fe_fs.all_lbp, [Img], Vector, name='Global_uLBP') 53 | pset.addPrimitive(fe_fs.all_sift, [Img], Vector, name='Global_SIFT') 54 | # Local feature extraction 55 | pset.addPrimitive(fe_fs.all_dif, [Region], Vector, name='Local_DIF') 56 | pset.addPrimitive(fe_fs.all_histogram, [Region], Vector, name='Local_Histogram') 57 | pset.addPrimitive(fe_fs.local_hog, [Region], Vector, name='Local_HOG') 58 | pset.addPrimitive(fe_fs.all_lbp, [Region], Vector, name='Local_uLBP') 59 | pset.addPrimitive(fe_fs.all_sift, [Region], Vector, name='Local_SIFT') 60 | # Region detection operators 61 | pset.addPrimitive(fe_fs.regionS, [Img, Int1, Int2, Int3], Region, name='Region_S') 62 | pset.addPrimitive(fe_fs.regionR, [Img, Int1, Int2, Int3, Int3], Region, name='Region_R') 63 | # Terminals 64 | pset.renameArguments(ARG0='Grey') 65 | pset.addEphemeralConstant('X', lambda: random.randint(0, bound1 - 20), Int1) 66 | pset.addEphemeralConstant('Y', lambda: random.randint(0, bound2 - 20), Int2) 67 | pset.addEphemeralConstant('Size', lambda: random.randint(20, 51), Int3) 68 | 69 | #fitnesse evaluaiton 70 | creator.create("FitnessMax", base.Fitness, weights=(1.0,)) 71 | creator.create("Individual", gp.PrimitiveTree, fitness=creator.FitnessMax) 72 | 73 | toolbox = base.Toolbox() 74 | toolbox.register("expr", gp_restrict.genHalfAndHalfMD, pset=pset, min_=initialMinDepth, max_=initialMaxDepth) 75 | toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.expr) 76 | toolbox.register("population", tools.initRepeat, list, toolbox.individual) 77 | toolbox.register("compile", gp.compile, pset=pset) 78 | toolbox.register("mapp", map) 79 | 80 | def evalTrain(individual): 81 | # print(individual) 82 | func = toolbox.compile(expr=individual) 83 | train_tf = [] 84 | for i in range(0, len(y_train)): 85 | train_tf.append(np.asarray(func(x_train[i, :, :]))) 86 | min_max_scaler = preprocessing.MinMaxScaler() 87 | train_norm = min_max_scaler.fit_transform(np.asarray(train_tf)) 88 | # print(train_norm.shape) 89 | lsvm = LinearSVC(max_iter=100) 90 | accuracy = round(100 * cross_val_score(lsvm, train_norm, y_train, cv=3).mean(), 2) 91 | return accuracy, 92 | 93 | # genetic operator 94 | toolbox.register("evaluate", evalTrain) 95 | toolbox.register("select", tools.selTournament, tournsize=5) 96 | toolbox.register("selectElitism", tools.selBest) 97 | toolbox.register("mate", gp.cxOnePoint) 98 | toolbox.register("expr_mut", gp_restrict.genFull, min_=0, max_=2) 99 | toolbox.register("mutate", gp.mutUniform, expr=toolbox.expr_mut, pset=pset) 100 | toolbox.decorate("mate", gp.staticLimit(key=operator.attrgetter("height"), max_value=maxDepth)) 101 | toolbox.decorate("mutate", gp.staticLimit(key=operator.attrgetter("height"), max_value=maxDepth)) 102 | 103 | def GPMain(randomSeeds): 104 | random.seed(randomSeeds) 105 | 106 | pop = toolbox.population(population) 107 | hof = tools.HallOfFame(10) 108 | log = tools.Logbook() 109 | stats_fit = tools.Statistics(key=lambda ind: ind.fitness.values) 110 | stats_size_tree = tools.Statistics(key=len) 111 | mstats = tools.MultiStatistics(fitness=stats_fit, size_tree=stats_size_tree) 112 | mstats.register("avg", np.mean) 113 | mstats.register("std", np.std) 114 | mstats.register("min", np.min) 115 | mstats.register("max", np.max) 116 | log.header = ["gen", "evals"] + mstats.fields 117 | 118 | pop, log = evalGP.eaSimple(pop, toolbox, cxProb, mutProb, elitismProb, generation, 119 | stats=mstats, halloffame=hof, verbose=True) 120 | 121 | return pop, log, hof 122 | 123 | def evalTest(individual): 124 | func = toolbox.compile(expr=individual) 125 | train_tf = [] 126 | test_tf = [] 127 | for i in range(0, len(y_train)): 128 | train_tf.append(np.asarray(func(x_train[i, :, :]))) 129 | for j in range(0, len(y_test)): 130 | test_tf.append(np.asarray(func(x_test[j, :, :]))) 131 | train_tf = np.asarray(train_tf) 132 | test_tf = np.asarray(test_tf) 133 | min_max_scaler = preprocessing.MinMaxScaler() 134 | train_norm = min_max_scaler.fit_transform(np.asarray(train_tf)) 135 | test_norm = min_max_scaler.transform(np.asarray(test_tf)) 136 | lsvm= LinearSVC() 137 | lsvm.fit(train_norm, y_train) 138 | accuracy = round(100*lsvm.score(test_norm, y_test),2) 139 | return train_tf.shape[1], accuracy 140 | 141 | if __name__ == "__main__": 142 | beginTime = time.process_time() 143 | pop, log, hof = GPMain(randomSeeds) 144 | endTime = time.process_time() 145 | trainTime = endTime - beginTime 146 | 147 | num_features, testResults = evalTest(hof[0]) 148 | endTime1 = time.process_time() 149 | testTime = endTime1 - endTime 150 | 151 | print('Best individual ', hof[0]) 152 | print('Test results ', testResults) 153 | print('Train time ', trainTime) 154 | print('Test time ', testTime) 155 | print('End') 156 | -------------------------------------------------------------------------------- /IDGP/evalGP_main.py: -------------------------------------------------------------------------------- 1 | import random 2 | from deap import tools 3 | 4 | def varAnd(population, toolbox, cxpb, mutpb): 5 | offspring = [toolbox.clone(ind) for ind in population] 6 | new_cxpb=cxpb/(cxpb+mutpb) 7 | # Apply crossover and mutation on the offspring 8 | i = 1 9 | while i < len(offspring): 10 | if random.random() < new_cxpb: 11 | if (offspring[i - 1] == offspring[i]): 12 | offspring[i - 1], = toolbox.mutate(offspring[i - 1]) 13 | offspring[i], = toolbox.mutate(offspring[i]) 14 | else: 15 | offspring[i - 1], offspring[i] = toolbox.mate(offspring[i - 1], offspring[i]) 16 | del offspring[i - 1].fitness.values, offspring[i].fitness.values 17 | i = i + 2 18 | else: 19 | offspring[i], = toolbox.mutate(offspring[i]) 20 | del offspring[i].fitness.values 21 | i = i + 1 22 | return offspring 23 | 24 | 25 | def eaSimple(population, toolbox, cxpb, mutpb, elitpb, ngen , stats=None, 26 | halloffame=None, verbose=__debug__): 27 | logbook = tools.Logbook() 28 | logbook.header = ['gen', 'nevals'] + (stats.fields if stats else []) 29 | fitnesses = toolbox.mapp(toolbox.evaluate, population) 30 | for ind, fit in zip(population, fitnesses): 31 | ind.fitness.values = fit 32 | 33 | if halloffame is not None: 34 | halloffame.update(population) 35 | hof_store = tools.HallOfFame(5 * len(population)) 36 | hof_store.update(population) 37 | record = stats.compile(population) if stats else {} 38 | logbook.record(gen=0, nevals=len(population), **record) 39 | if verbose: 40 | print(logbook.stream) 41 | for gen in range(1, ngen + 1): 42 | #Select the next generation individuals by elitism 43 | elitismNum=int(elitpb * len(population)) 44 | population_for_eli=[toolbox.clone(ind) for ind in population] 45 | offspringE = toolbox.selectElitism(population_for_eli, k=elitismNum) 46 | offspring = toolbox.select(population, len(population)-elitismNum) 47 | # Vary the pool of individuals 48 | offspring = varAnd(offspring, toolbox, cxpb, mutpb) 49 | for i in offspring: 50 | ind = 0 51 | while ind= min_ 44 | return generate(pset, min_, max_, condition, type_) 45 | 46 | def genHalfAndHalf(pset, min_, max_, type_=None): 47 | """Generate an expression with a PrimitiveSet *pset*. 48 | Half the time, the expression is generated with :func:`~deap.gp.genGrow`, 49 | the other half, the expression is generated with :func:`~deap.gp.genFull`. 50 | 51 | :param pset: Primitive set from which primitives are selected. 52 | :param min_: Minimum height of the produced trees. 53 | :param max_: Maximum Height of the produced trees. 54 | :param type_: The type that should return the tree when called, when 55 | :obj:`None` (default) no return type is enforced. 56 | :returns: Either, a full or a grown tree. 57 | """ 58 | method = random.choice((genGrow, genFull)) 59 | #print(method) 60 | return method(pset, min_, max_, type_) 61 | 62 | def genRamped(pset, min_, max_, type_=None): 63 | """ 64 | .. deprecated:: 1.0 65 | The function has been renamed. Use :func:`~deap.gp.genHalfAndHalf` instead. 66 | """ 67 | warnings.warn("gp.genRamped has been renamed. Use genHalfAndHalf instead.", 68 | FutureWarning) 69 | return genHalfAndHalf(pset, min_, max_, type_) 70 | 71 | def generate(pset, min_, max_, condition, type_=__type__): 72 | """Generate a Tree as a list of list. The tree is build 73 | from the root to the leaves, and it stop growing when the 74 | condition is fulfilled. 75 | :param pset: A primitive set from wich to select primitives of the trees. 76 | :param min_: Minimum height of the produced trees. 77 | :param max_: Maximum Height of the produced trees. 78 | :param condition: The condition is a function that takes two arguments, 79 | the height of the tree to build and the current 80 | depth in the tree. 81 | :param type_: The type that should return the tree when called, when 82 | :obj:`None` (default) no return type is enforced. 83 | :returns: A grown tree with leaves at possibly different depths 84 | dependending on the condition function. 85 | 86 | 87 | DUMMY NODE ISSUES 88 | 89 | DEAP will only place terminals if we're at the bottom of a branch. 90 | This creates two issues: 91 | 1. A primitive that takes other primitives as inputs could be placed at the 92 | second to last layer. 93 | SOLUTION: You need to allow the tree to end whenever the height condition is met, 94 | so create "dummy" terminals for every type possible in the tree. 95 | 2. A primitive that takes terminals as inputs could be placed above the second to 96 | last layer. 97 | SOLUTION: You need to allow the tree to continue extending the branch until the 98 | height condition is met, so create "dummy" primitives that just pass 99 | through the terminal types. 100 | 101 | These "dummy" terminals and "dummy" primitives introduce unnecessary and sometimes 102 | nonsensical solutions into populations. These "dummy" nodes can be eliminated 103 | if the height requirement is relaxed. 104 | 105 | 106 | HOW TO PREVENT DUMMY NODE ISSUES 107 | 108 | Relaxing the height requirement: 109 | When at the bottom of the branch, check for terminals first, then primitives. 110 | When checking for primitives, skirt the height requirement by adjusting 111 | the branch depth to be the second to last layer of the tree. 112 | If neither a terminal or primitive fits this node, then throw an error. 113 | When not at the bottom of the branch, check for primitives first, then terminals. 114 | 115 | Issue with relaxing the height requirement: 116 | 1. Endless loops are possible when primitive sets have any type loops. 117 | A primitive with an output of one type may not take an input type of 118 | itself or a parent type. 119 | SOLUTION: A primitive set must be well-designed to prevent those type loops. 120 | 121 | """ 122 | if type_ is None: 123 | type_ = pset.ret 124 | expr = [] 125 | height = random.randint(min_, max_) 126 | stack = [(0, type_)] 127 | #print(pset.terminals) 128 | #print(pset.primitives) 129 | while len(stack) != 0: 130 | depth, type_ = stack.pop() 131 | # At the bottom of the tree 132 | if condition(height, depth): 133 | # Try finding a terminal 134 | try: 135 | term = random.choice(pset.terminals[type_]) 136 | #print('term',term) 137 | if isclass(term): 138 | term = term() 139 | expr.append(term) 140 | # No terminal fits 141 | except: 142 | # So pull the depth back one layer, and start looking for primitives 143 | try: 144 | depth -= 1 145 | prim = random.choice(pset.primitives[type_]) 146 | #print('prim',prim) 147 | expr.append(prim) 148 | for arg in reversed(prim.args): 149 | stack.append((depth + 1, arg)) 150 | 151 | # No primitive fits, either - that's an error 152 | except IndexError: 153 | _, _, traceback = sys.exc_info() 154 | raise IndexError("The gp.generate function tried to add " \ 155 | "a primitive of type '%s', but there is " \ 156 | "none available." % (type_,), traceback) 157 | 158 | # Not at the bottom of the tree 159 | else: 160 | # Check for primitives 161 | try: 162 | prim = random.choice(pset.primitives[type_]) 163 | expr.append(prim) 164 | for arg in reversed(prim.args): 165 | stack.append((depth + 1, arg)) 166 | # No primitive fits 167 | except: 168 | # So check for terminals 169 | try: 170 | term = random.choice(pset.terminals[type_]) 171 | 172 | # No terminal fits, either - that's an error 173 | except IndexError: 174 | _, _, traceback = sys.exc_info() 175 | raise IndexError("The gp.generate function tried to add " \ 176 | "a terminal of type '%s', but there is " \ 177 | "none available." % (type_,), traceback) 178 | if isclass(term): 179 | term = term() 180 | expr.append(term) 181 | #print(len(expr)) 182 | return expr 183 | 184 | def genHalfAndHalfMD(pset, min_, max_, type_=None): 185 | expr=genHalfAndHalf(pset, min_, max_, type_=None) 186 | #print('expr before', len(expr)) 187 | while len(expr)>80: 188 | expr=genHalfAndHalf(pset, min_, max_, type_=None) 189 | #print('expr before', len(expr)) 190 | #print('expr after',len(expr)) 191 | return expr 192 | 193 | def genFullMD(pset, min_, max_, type_=None): 194 | expr=genFull(pset, min_, max_, type_=None) 195 | #print('expr before', len(expr)) 196 | while len(expr)>80: 197 | expr=genFull(pset, min_, max_, type_=None) 198 | #print('expr before', len(expr)) 199 | #print('expr after',len(expr)) 200 | return expr 201 | -------------------------------------------------------------------------------- /IDGP/sift_features.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy import signal 3 | # sift features 4 | Nangles = 8 5 | Nbins = 4 6 | Nsamples = Nbins ** 2 7 | alpha = 9.0 8 | angles = np.array(range(Nangles)) * 2.0 * np.pi / Nangles 9 | 10 | 11 | def gen_dgauss(sigma): 12 | ''' 13 | generating a derivative of Gauss filter on both the X and Y 14 | direction. 15 | ''' 16 | fwid = np.int(2 * np.ceil(sigma)) 17 | G = np.array(range(-fwid, fwid + 1)) ** 2 18 | G = G.reshape((G.size, 1)) + G 19 | G = np.exp(- G / 2.0 / sigma / sigma) 20 | G /= np.sum(G) 21 | GH, GW = np.gradient(G) 22 | GH *= 2.0 / np.sum(np.abs(GH)) 23 | GW *= 2.0 / np.sum(np.abs(GW)) 24 | #print(GH,GW) 25 | return GH, GW 26 | 27 | 28 | class DsiftExtractor: 29 | ''' 30 | The class that does dense sift feature extractor. 31 | Sample Usage: 32 | extractor = DsiftExtractor(gridSpacing,patchSize,[optional params]) 33 | feaArr,positions = extractor.process_image(Image) 34 | ''' 35 | 36 | def __init__(self, gridSpacing, patchSize,nrml_thres=1.0, 37 | sigma_edge=0.8, 38 | sift_thres=0.2): 39 | ''' 40 | gridSpacing: the spacing for sampling dense descriptors 41 | patchSize: the size for each sift patch 42 | nrml_thres: low contrast normalization threshold 43 | sigma_edge: the standard deviation for the gaussian smoothing 44 | before computing the gradient 45 | sift_thres: sift thresholding (0.2 works well based on 46 | Lowe's SIFT paper) 47 | ''' 48 | self.gS = gridSpacing 49 | self.pS = patchSize 50 | self.nrml_thres = nrml_thres 51 | self.sigma = sigma_edge 52 | self.sift_thres = sift_thres 53 | # compute the weight contribution map 54 | sample_res = self.pS / np.double(Nbins) 55 | sample_p = np.array(range(self.pS)) 56 | sample_ph, sample_pw = np.meshgrid(sample_p, sample_p) 57 | sample_ph.resize(sample_ph.size) 58 | sample_pw.resize(sample_pw.size) 59 | bincenter = np.array(range(1, Nbins * 2, 2)) / 2.0 / Nbins * self.pS - 0.5 60 | bincenter_h, bincenter_w = np.meshgrid(bincenter, bincenter) 61 | bincenter_h.resize((bincenter_h.size, 1)) 62 | bincenter_w.resize((bincenter_w.size, 1)) 63 | dist_ph = abs(sample_ph - bincenter_h) 64 | dist_pw = abs(sample_pw - bincenter_w) 65 | weights_h = dist_ph / sample_res 66 | weights_w = dist_pw / sample_res 67 | weights_h = (1 - weights_h) * (weights_h <= 1) 68 | weights_w = (1 - weights_w) * (weights_w <= 1) 69 | # weights is the contribution of each pixel to the corresponding bin center 70 | self.weights = weights_h * weights_w 71 | # pyplot.imshow(self.weights) 72 | # pyplot.show() 73 | 74 | def process_image(self, image, positionNormalize=True,verbose=True): 75 | ''' 76 | processes a single image, return the locations 77 | and the values of detected SIFT features. 78 | image: a M*N image which is a numpy 2D array. If you 79 | pass a color image, it will automatically be converted 80 | to a grayscale image. 81 | positionNormalize: whether to normalize the positions 82 | to [0,1]. If False, the pixel-based positions of the 83 | top-right position of the patches is returned. 84 | 85 | Return values: 86 | feaArr: the feature array, each row is a feature 87 | positions: the positions of the features 88 | ''' 89 | 90 | image = image.astype(np.double) 91 | if image.ndim == 3: 92 | # we do not deal with color images. 93 | image = np.mean(image, axis=2) 94 | # compute the grids 95 | H, W = image.shape 96 | gS = self.gS 97 | pS = self.pS 98 | remH = np.mod(H - pS, gS) 99 | remW = np.mod(W - pS, gS) 100 | offsetH = int(remH / 2) 101 | offsetW = int(remW / 2) 102 | gridH, gridW = np.meshgrid(range(offsetH, H - pS + 1, gS), range(offsetW, W - pS + 1, gS)) 103 | gridH = gridH.flatten() 104 | gridW = gridW.flatten() 105 | if verbose: 106 | print 107 | 'Image: w {}, h {}, gs {}, ps {}, nFea {}'. \ 108 | format(W, H, gS, pS, gridH.size) 109 | feaArr = self.calculate_sift_grid(image, gridH, gridW) 110 | feaArr = self.normalize_sift(feaArr) 111 | if positionNormalize: 112 | positions = np.vstack((gridH / np.double(H), gridW / np.double(W))) 113 | else: 114 | positions = np.vstack((gridH, gridW)) 115 | return feaArr, positions 116 | 117 | def calculate_sift_grid(self, image, gridH, gridW): 118 | ''' 119 | This function calculates the unnormalized sift features 120 | It is called by process_image(). 121 | ''' 122 | H, W = image.shape 123 | Npatches = gridH.size 124 | feaArr = np.zeros((Npatches, Nsamples * Nangles)) 125 | 126 | # calculate gradient 127 | GH, GW = gen_dgauss(self.sigma) 128 | IH = signal.convolve2d(image, GH, mode='same') 129 | IW = signal.convolve2d(image, GW, mode='same') 130 | Imag = np.sqrt(IH ** 2 + IW ** 2) 131 | Itheta = np.arctan2(IH, IW) 132 | Iorient = np.zeros((Nangles, H, W)) 133 | for i in range(Nangles): 134 | Iorient[i] = Imag * np.maximum(np.cos(Itheta - angles[i]) ** alpha, 0) 135 | # pyplot.imshow(Iorient[i]) 136 | # pyplot.show() 137 | for i in range(Npatches): 138 | currFeature = np.zeros((Nangles, Nsamples)) 139 | for j in range(Nangles): 140 | currFeature[j] = np.dot(self.weights, 141 | Iorient[j, gridH[i]:gridH[i] + self.pS, gridW[i]:gridW[i] + self.pS].flatten()) 142 | feaArr[i] = currFeature.flatten() 143 | return feaArr 144 | 145 | def normalize_sift(self, feaArr): 146 | ''' 147 | This function does sift feature normalization 148 | following David Lowe's definition (normalize length -> 149 | thresholding at 0.2 -> renormalize length) 150 | ''' 151 | siftlen = np.sqrt(np.sum(feaArr ** 2, axis=1)) 152 | hcontrast = (siftlen >= self.nrml_thres) 153 | siftlen[siftlen < self.nrml_thres] = self.nrml_thres 154 | # normalize with contrast thresholding 155 | feaArr /= siftlen.reshape((siftlen.size, 1)) 156 | # suppress large gradients 157 | feaArr[feaArr > self.sift_thres] = self.sift_thres 158 | # renormalize high-contrast ones 159 | feaArr[hcontrast] /= np.sqrt(np.sum(feaArr[hcontrast] ** 2, axis=1)). \ 160 | reshape((feaArr[hcontrast].shape[0], 1)) 161 | return feaArr 162 | 163 | 164 | class SingleSiftExtractor(DsiftExtractor): 165 | ''' 166 | The simple wrapper class that does feature extraction, treating 167 | the whole image as a local image patch. 168 | ''' 169 | 170 | def __init__(self, patchSize,nrml_thres=1.0, sigma_edge=0.8,sift_thres=0.2): 171 | # simply call the super class __init__ with a large gridSpace 172 | DsiftExtractor.__init__(self, patchSize, patchSize, nrml_thres, sigma_edge, sift_thres) 173 | 174 | def process_image(self, image): 175 | return DsiftExtractor.process_image(self, image, False, False)[0] -------------------------------------------------------------------------------- /IDGP/strongGPDataType.py: -------------------------------------------------------------------------------- 1 | 2 | class Img: 3 | def __init__(array): 4 | pass 5 | 6 | class Region: 7 | def __init__(array): 8 | pass 9 | 10 | class Vector: 11 | def __init__(array): 12 | pass 13 | 14 | class Vector1: 15 | def __init__(array): 16 | pass 17 | 18 | class Int1: 19 | def __init__(int): 20 | pass 21 | 22 | class Int2: 23 | def __init__(int): 24 | pass 25 | 26 | class Int3: 27 | def __init__(int): 28 | pass 29 | -------------------------------------------------------------------------------- /MLGP/MLGP_main.py: -------------------------------------------------------------------------------- 1 | #python packages 2 | import random 3 | import time 4 | import operator 5 | import numpy as np 6 | # deap package 7 | import evalGP 8 | import gp_restrict 9 | from deap import base, creator, tools, gp 10 | # fitness function 11 | from fitnessEvaluation import evalAccuracy 12 | from strongGPDataType import Img, Int1, Int2, Int3, Region, Double # defined by author 13 | import functionSet as fs 14 | 15 | dataSetName='uiuc' 16 | randomSeeds=12 17 | 18 | x_train=np.load(dataSetName+'_train_data.npy') 19 | y_train=np.load(dataSetName+'_train_label.npy') 20 | x_validation=np.load(dataSetName+'_vali_data.npy') 21 | y_validation=np.load(dataSetName+'_vali_label.npy') 22 | x_test=np.load(dataSetName+'_test_data.npy') 23 | y_test=np.load(dataSetName+'_test_label.npy') 24 | #parameters: 25 | population=100 26 | generation=5 27 | cxProb=0.8 28 | mutProb=0.19 29 | elitismProb=0.01 30 | totalRuns = 1 31 | initialMinDepth=2 32 | initialMaxDepth=6 33 | maxDepth=6 34 | image_width, image_height = x_train[0].shape 35 | ##GP 36 | pset = gp.PrimitiveSetTyped('MAIN',[Img], Double, prefix='Raw') 37 | #Functions at the feature constructions tier 38 | pset.addPrimitive(operator.sub, [Double, Double], Double, name='Sub') 39 | # Functions at the feature extraction layer 40 | pset.addPrimitive(np.std, [Region], Double, name='G_Std1') 41 | pset.addPrimitive(np.std, [Region], Double, name='G_Std2') 42 | pset.addPrimitive(np.std, [Region], Double, name='G_Std3') 43 | pset.addPrimitive(fs.hist_equal, [Region], Region, name='Hist_Eq') 44 | pset.addPrimitive(fs.gaussian_1, [Region], Region, name='Gau1') 45 | pset.addPrimitive(fs.gaussian_11, [Region], Region, name='Gau11') 46 | pset.addPrimitive(fs.gauGM, [Region], Region, name='GauXY') 47 | pset.addPrimitive(fs.laplace, [Region], Region, name='Lap') 48 | pset.addPrimitive(fs.sobelx, [Region], Region, name='Sobel_X') 49 | pset.addPrimitive(fs.sobely, [Region], Region, name='Sobel_Y') 50 | pset.addPrimitive(fs.gaussian_Laplace1, [Region], Region, name='LoG1') 51 | pset.addPrimitive(fs.gaussian_Laplace2, [Region], Region, name='LoG2') 52 | pset.addPrimitive(fs.lbp, [Region], Region, name='LBP') 53 | pset.addPrimitive(fs.hog_feature, [Region], Region, name='HOG') 54 | # Functions at the region detection layer 55 | pset.addPrimitive(fs.regionS, [Img, Int1, Int2, Int3], Region, name='Region_S') 56 | pset.addPrimitive(fs.regionS, [Img, Int1, Int2, Int3], Region, name='Region_S1') 57 | pset.addPrimitive(fs.regionS, [Img, Int1, Int2, Int3], Region, name='Region_S2') 58 | pset.addPrimitive(fs.regionR, [Img, Int1, Int2, Int3, Int3], Region, name='Region_R') 59 | pset.addPrimitive(fs.regionR, [Img, Int1, Int2, Int3, Int3], Region, name='Region_R1') 60 | pset.addPrimitive(fs.regionR, [Img, Int1, Int2, Int3, Int3], Region, name='Region_R2') 61 | # Terminals 62 | pset.renameArguments(ARG0='grey') 63 | pset.addEphemeralConstant('randomDouble', lambda: round(random.random(), 2), float) 64 | pset.addEphemeralConstant('X', lambda: random.randint(0, image_width-20), Int1) 65 | pset.addEphemeralConstant('Y', lambda: random.randint(0, image_height-20), Int2) 66 | pset.addEphemeralConstant('Size', lambda: random.randint(20, 70), Int3) 67 | ## 68 | creator.create("FitnessMax", base.Fitness, weights=(1.0,)) 69 | creator.create("Individual", gp.PrimitiveTree, fitness=creator.FitnessMax) 70 | 71 | toolbox = base.Toolbox() 72 | toolbox.register("expr", gp_restrict.genHalfAndHalfMD, pset=pset, min_=initialMinDepth, max_=initialMaxDepth) 73 | toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.expr) 74 | toolbox.register("population", tools.initRepeat, list, toolbox.individual) 75 | toolbox.register("compile", gp.compile, pset=pset) 76 | 77 | toolbox.register("evaluate", evalAccuracy,toolbox,x_train=x_train,y_train=y_train) 78 | toolbox.register("validation", evalAccuracy,toolbox,x_train=x_validation,y_train=y_validation) 79 | toolbox.register("select", tools.selTournament,tournsize=7) 80 | toolbox.register("selectElitism", tools.selBest) 81 | toolbox.register("mate", gp.cxOnePoint) 82 | toolbox.register("expr_mut", gp_restrict.genFull, min_=0, max_=2) 83 | toolbox.register("mutate", gp.mutUniform, expr=toolbox.expr_mut, pset=pset) 84 | toolbox.decorate("mate", gp.staticLimit(key=operator.attrgetter("height"), max_value=maxDepth)) 85 | toolbox.decorate("mutate", gp.staticLimit(key=operator.attrgetter("height"), max_value=maxDepth)) 86 | 87 | def GPMain(randomSeeds): 88 | 89 | random.seed(randomSeeds) 90 | pop = toolbox.population(population) 91 | hof = tools.HallOfFame(1) 92 | log = tools.Logbook() 93 | stats_fit = tools.Statistics(key=lambda ind: ind.fitness.values) 94 | stats_size = tools.Statistics(key=len) 95 | mstats = tools.MultiStatistics(fitness=stats_fit,size=stats_size) 96 | mstats.register("avg", np.mean) 97 | mstats.register("std", np.std) 98 | mstats.register("min", np.min) 99 | mstats.register("max", np.max) 100 | log.header = ["gen", "evals"] + mstats.fields 101 | 102 | pop, log, hof2= evalGP.eaSimple(pop, toolbox, cxProb, mutProb,elitismProb, generation, 103 | stats=mstats, halloffame=hof, verbose=True) 104 | 105 | return pop,log, hof,hof2 106 | 107 | if __name__ == "__main__": 108 | 109 | beginTime = time.process_time() 110 | pop, log, hof, hof2 = GPMain(randomSeeds) 111 | endTime = time.process_time() 112 | trainTime = endTime - beginTime 113 | 114 | testResults = evalAccuracy(toolbox, hof2[0], x_test, y_test) 115 | testTime = time.process_time() - endTime 116 | 117 | print('Best individual ', hof[0]) 118 | print('Test results ', testResults) 119 | print('Train time ', trainTime) 120 | print('Test time ', testTime) 121 | print('End') 122 | -------------------------------------------------------------------------------- /MLGP/evalGP.py: -------------------------------------------------------------------------------- 1 | import random 2 | from deap import tools 3 | 4 | 5 | def varAnd(population, toolbox, cxpb, mutpb): 6 | """Part of an evolutionary algorithm applying only the variation part 7 | (crossover **and** mutation). The modified individuals have their 8 | fitness invalidated. The individuals are cloned so returned population is 9 | independent of the input population. 10 | 11 | :param population: A list of individuals to vary. 12 | :param toolbox: A :class:`~deap.base.Toolbox` that contains the evolution 13 | operators. 14 | :param cxpb: The probability of mating two individuals. 15 | :param mutpb: The probability of mutating an individual. 16 | :param elitpb: The probability of mutating an individual. 17 | :returns: A list of varied individuals that are independent of their 18 | parents. 19 | 20 | The variation goes as follow. First, the parental population 21 | :math:`P_\mathrm{p}` is duplicated using the :meth:`toolbox.clone` method 22 | and the result is put into the offspring population :math:`P_\mathrm{o}`. A 23 | first loop over :math:`P_\mathrm{o}` is executed to mate pairs of 24 | consecutive individuals. According to the crossover probability *cxpb*, the 25 | individuals :math:`\mathbf{x}_i` and :math:`\mathbf{x}_{i+1}` are mated 26 | using the :meth:`toolbox.mate` method. The resulting children 27 | :math:`\mathbf{y}_i` and :math:`\mathbf{y}_{i+1}` replace their respective 28 | parents in :math:`P_\mathrm{o}`. A second loop over the resulting 29 | :math:`P_\mathrm{o}` is executed to mutate every individual with a 30 | probability *mutpb*. When an individual is mutated it replaces its not 31 | mutated version in :math:`P_\mathrm{o}`. The resulting :math:`P_\mathrm{o}` 32 | is returned. 33 | 34 | This variation is named *And* beceause of its propention to apply both 35 | crossover and mutation on the individuals. Note that both operators are 36 | not applied systematicaly, the resulting individuals can be generated from 37 | crossover only, mutation only, crossover and mutation, and reproduction 38 | according to the given probabilities. Both probabilities should be in 39 | :math:`[0, 1]`. 40 | """ 41 | offspring = [toolbox.clone(ind) for ind in population] 42 | new_cxpb=cxpb/(cxpb+mutpb) 43 | new_mutpb=mutpb/(cxpb+mutpb) 44 | 45 | #num_cx=int(new_cxpb*len(offspring)) 46 | #num_mu=len(offspring)-num_cx 47 | #print(new_cxpb, new_mutpb) 48 | # Apply crossover and mutation on the offspring 49 | i=1 50 | while i 0 and y_train[i] == 1: 13 | correctNum += 1.0 14 | accuracy = round(100*correctNum / len(y_train), 2) 15 | return accuracy, 16 | -------------------------------------------------------------------------------- /MLGP/functionSet.py: -------------------------------------------------------------------------------- 1 | from pylab import * 2 | from scipy import ndimage 3 | from skimage.feature import local_binary_pattern 4 | from skimage.exposure import equalize_hist 5 | from skimage.feature import hog 6 | 7 | def gaussian_1(left): 8 | left=ndimage.gaussian_filter(left,sigma=1) 9 | return left 10 | 11 | #gaussian filter with sigma=1 with the second derivatives 12 | def gaussian_11(left): 13 | left = ndimage.gaussian_filter(left, sigma=1,order=1) 14 | return left 15 | 16 | #gaussian_gradient_magnitude(input, sigma, output=None, mode='reflect', cval=0.0, **kwargs) 17 | def gauGM(left): 18 | left=ndimage.gaussian_gradient_magnitude(left,sigma=1) 19 | return left 20 | 21 | #gaussian_laplace(input, sigma, output=None, mode='reflect', cval=0.0, **kwargs) 22 | def gaussian_Laplace1(left): 23 | left=ndimage.gaussian_laplace(left,sigma=1) 24 | return left 25 | 26 | def gaussian_Laplace2(left): 27 | left=ndimage.gaussian_laplace(left,sigma=2) 28 | return left 29 | 30 | #laplace(input, output=None, mode='reflect', cval=0.0) 31 | def laplace(left): 32 | left=ndimage.laplace(left) 33 | return left 34 | 35 | #sobel(input, axis=-1, output=None, mode='reflect', cval=0.0) 36 | def sobelx(left): 37 | left=ndimage.sobel(left,axis=0) 38 | return left 39 | 40 | def sobely(left): 41 | left=ndimage.sobel(left,axis=1) 42 | return left 43 | 44 | def lbp(image): 45 | # 'uniform','default','ror','var' 46 | lbp = local_binary_pattern(image, 8, 1.5, method='nri_uniform') 47 | lbp=np.divide(lbp,59) 48 | return lbp 49 | 50 | def hist_equal(image): 51 | equal_image = equalize_hist(image, nbins=256, mask=None) 52 | return equal_image 53 | 54 | def hog_feature(image): 55 | img, realImage = hog(image, orientations=9, pixels_per_cell=(8, 8), 56 | cells_per_block=(3, 3), block_norm='L2-Hys', visualise=True, 57 | transform_sqrt=False, feature_vector=True) 58 | return realImage 59 | 60 | def regionS(left,x,y,windowSize): 61 | width,height=left.shape 62 | x_end = min(width, x+windowSize) 63 | y_end = min(height, y+windowSize) 64 | slice = left[x:x_end, y:y_end] 65 | return slice 66 | 67 | def regionR(left, x, y, windowSize1,windowSize2): 68 | width, height = left.shape 69 | x_end = min(width, x + windowSize1) 70 | y_end = min(height, y + windowSize2) 71 | slice = left[x:x_end, y:y_end] 72 | return slice 73 | -------------------------------------------------------------------------------- /MLGP/gp_alter.py: -------------------------------------------------------------------------------- 1 | import random 2 | import sys 3 | import warnings 4 | from inspect import isclass 5 | 6 | 7 | # Define the name of type for any types. 8 | __type__ = object 9 | ###################################### 10 | # GP Program generation functions # 11 | ###################################### 12 | def genFull(pset, min_, max_, type_=None): 13 | """Generate an expression where each leaf has a the same depth 14 | between *min* and *max*. 15 | 16 | :param pset: Primitive set from which primitives are selected. 17 | :param min_: Minimum height of the produced trees. 18 | :param max_: Maximum Height of the produced trees. 19 | :param type_: The type that should return the tree when called, when 20 | :obj:`None` (default) no return type is enforced. 21 | :returns: A full tree with all leaves at the same depth. 22 | """ 23 | def condition(height, depth): 24 | """Expression generation stops when the depth is equal to height.""" 25 | return depth == height 26 | #print('it works', pset) 27 | return generate(pset, min_, max_, condition, type_) 28 | 29 | def genGrow(pset, min_, max_, type_=None): 30 | """Generate an expression where each leaf might have a different depth 31 | between *min* and *max*. 32 | 33 | :param pset: Primitive set from which primitives are selected. 34 | :param min_: Minimum height of the produced trees. 35 | :param max_: Maximum Height of the produced trees. 36 | :param type_: The type that should return the tree when called, when 37 | :obj:`None` (default) no return type is enforced. 38 | :returns: A grown tree with leaves at possibly different depths. 39 | """ 40 | def condition(height, depth): 41 | """Expression generation stops when the depth is equal to height 42 | or when it is randomly determined that a a node should be a terminal. 43 | """ 44 | return depth == height or \ 45 | (depth >= min_ and random.random() < pset.terminalRatio) 46 | return generate(pset, min_, max_, condition, type_) 47 | 48 | def genHalfAndHalf(pset, min_, max_, type_=None): 49 | """Generate an expression with a PrimitiveSet *pset*. 50 | Half the time, the expression is generated with :func:`~deap.gp.genGrow`, 51 | the other half, the expression is generated with :func:`~deap.gp.genFull`. 52 | 53 | :param pset: Primitive set from which primitives are selected. 54 | :param min_: Minimum height of the produced trees. 55 | :param max_: Maximum Height of the produced trees. 56 | :param type_: The type that should return the tree when called, when 57 | :obj:`None` (default) no return type is enforced. 58 | :returns: Either, a full or a grown tree. 59 | """ 60 | method = random.choice((genGrow, genFull)) 61 | #print(method) 62 | return method(pset, min_, max_, type_) 63 | 64 | def genRamped(pset, min_, max_, type_=None): 65 | """ 66 | .. deprecated:: 1.0 67 | The function has been renamed. Use :func:`~deap.gp.genHalfAndHalf` instead. 68 | """ 69 | warnings.warn("gp.genRamped has been renamed. Use genHalfAndHalf instead.", 70 | FutureWarning) 71 | return genHalfAndHalf(pset, min_, max_, type_) 72 | 73 | def generate(pset, min_, max_, condition, type_=__type__): 74 | """Generate a Tree as a list of list. The tree is build 75 | from the root to the leaves, and it stop growing when the 76 | condition is fulfilled. 77 | :param pset: A primitive set from wich to select primitives of the trees. 78 | :param min_: Minimum height of the produced trees. 79 | :param max_: Maximum Height of the produced trees. 80 | :param condition: The condition is a function that takes two arguments, 81 | the height of the tree to build and the current 82 | depth in the tree. 83 | :param type_: The type that should return the tree when called, when 84 | :obj:`None` (default) no return type is enforced. 85 | :returns: A grown tree with leaves at possibly different depths 86 | dependending on the condition function. 87 | 88 | 89 | DUMMY NODE ISSUES 90 | 91 | DEAP will only place terminals if we're at the bottom of a branch. 92 | This creates two issues: 93 | 1. A primitive that takes other primitives as inputs could be placed at the 94 | second to last layer. 95 | SOLUTION: You need to allow the tree to end whenever the height condition is met, 96 | so create "dummy" terminals for every type possible in the tree. 97 | 2. A primitive that takes terminals as inputs could be placed above the second to 98 | last layer. 99 | SOLUTION: You need to allow the tree to continue extending the branch until the 100 | height condition is met, so create "dummy" primitives that just pass 101 | through the terminal types. 102 | 103 | These "dummy" terminals and "dummy" primitives introduce unnecessary and sometimes 104 | nonsensical solutions into populations. These "dummy" nodes can be eliminated 105 | if the height requirement is relaxed. 106 | 107 | 108 | HOW TO PREVENT DUMMY NODE ISSUES 109 | 110 | Relaxing the height requirement: 111 | When at the bottom of the branch, check for terminals first, then primitives. 112 | When checking for primitives, skirt the height requirement by adjusting 113 | the branch depth to be the second to last layer of the tree. 114 | If neither a terminal or primitive fits this node, then throw an error. 115 | When not at the bottom of the branch, check for primitives first, then terminals. 116 | 117 | Issue with relaxing the height requirement: 118 | 1. Endless loops are possible when primitive sets have any type loops. 119 | A primitive with an output of one type may not take an input type of 120 | itself or a parent type. 121 | SOLUTION: A primitive set must be well-designed to prevent those type loops. 122 | 123 | """ 124 | if type_ is None: 125 | type_ = pset.ret 126 | expr = [] 127 | height = random.randint(min_, max_) 128 | stack = [(0, type_)] 129 | #print(pset.terminals) 130 | #print(pset.primitives) 131 | while len(stack) != 0: 132 | depth, type_ = stack.pop() 133 | # At the bottom of the tree 134 | if condition(height, depth): 135 | # Try finding a terminal 136 | try: 137 | term = random.choice(pset.terminals[type_]) 138 | #print('term',term) 139 | if isclass(term): 140 | term = term() 141 | expr.append(term) 142 | # No terminal fits 143 | except: 144 | # So pull the depth back one layer, and start looking for primitives 145 | try: 146 | depth -= 1 147 | prim = random.choice(pset.primitives[type_]) 148 | #print('prim',prim) 149 | expr.append(prim) 150 | for arg in reversed(prim.args): 151 | stack.append((depth + 1, arg)) 152 | 153 | # No primitive fits, either - that's an error 154 | except IndexError: 155 | _, _, traceback = sys.exc_info() 156 | raise IndexError("The gp.generate function tried to add " \ 157 | "a primitive of type '%s', but there is " \ 158 | "none available." % (type_,), traceback) 159 | 160 | # Not at the bottom of the tree 161 | else: 162 | # Check for primitives 163 | try: 164 | prim = random.choice(pset.primitives[type_]) 165 | expr.append(prim) 166 | for arg in reversed(prim.args): 167 | stack.append((depth + 1, arg)) 168 | # No primitive fits 169 | except: 170 | # So check for terminals 171 | try: 172 | term = random.choice(pset.terminals[type_]) 173 | 174 | # No terminal fits, either - that's an error 175 | except IndexError: 176 | _, _, traceback = sys.exc_info() 177 | raise IndexError("The gp.generate function tried to add " \ 178 | "a terminal of type '%s', but there is " \ 179 | "none available." % (type_,), traceback) 180 | if isclass(term): 181 | term = term() 182 | expr.append(term) 183 | return expr 184 | 185 | -------------------------------------------------------------------------------- /MLGP/gp_restrict.py: -------------------------------------------------------------------------------- 1 | import random 2 | import sys 3 | import warnings 4 | from inspect import isclass 5 | 6 | 7 | # Define the name of type for any types. 8 | __type__ = object 9 | ###################################### 10 | # GP Program generation functions # 11 | ###################################### 12 | def genFull(pset, min_, max_, type_=None): 13 | """Generate an expression where each leaf has a the same depth 14 | between *min* and *max*. 15 | 16 | :param pset: Primitive set from which primitives are selected. 17 | :param min_: Minimum height of the produced trees. 18 | :param max_: Maximum Height of the produced trees. 19 | :param type_: The type that should return the tree when called, when 20 | :obj:`None` (default) no return type is enforced. 21 | :returns: A full tree with all leaves at the same depth. 22 | """ 23 | def condition(height, depth): 24 | """Expression generation stops when the depth is equal to height.""" 25 | return depth == height 26 | #print('it works', pset) 27 | return generate(pset, min_, max_, condition, type_) 28 | 29 | def genGrow(pset, min_, max_, type_=None): 30 | """Generate an expression where each leaf might have a different depth 31 | between *min* and *max*. 32 | 33 | :param pset: Primitive set from which primitives are selected. 34 | :param min_: Minimum height of the produced trees. 35 | :param max_: Maximum Height of the produced trees. 36 | :param type_: The type that should return the tree when called, when 37 | :obj:`None` (default) no return type is enforced. 38 | :returns: A grown tree with leaves at possibly different depths. 39 | """ 40 | def condition(height, depth): 41 | """Expression generation stops when the depth is equal to height 42 | or when it is randomly determined that a a node should be a terminal. 43 | """ 44 | return depth == height or depth >= min_ 45 | return generate(pset, min_, max_, condition, type_) 46 | 47 | def genHalfAndHalf(pset, min_, max_, type_=None): 48 | """Generate an expression with a PrimitiveSet *pset*. 49 | Half the time, the expression is generated with :func:`~deap.gp.genGrow`, 50 | the other half, the expression is generated with :func:`~deap.gp.genFull`. 51 | 52 | :param pset: Primitive set from which primitives are selected. 53 | :param min_: Minimum height of the produced trees. 54 | :param max_: Maximum Height of the produced trees. 55 | :param type_: The type that should return the tree when called, when 56 | :obj:`None` (default) no return type is enforced. 57 | :returns: Either, a full or a grown tree. 58 | """ 59 | method = random.choice((genGrow, genFull)) 60 | #print(method) 61 | return method(pset, min_, max_, type_) 62 | 63 | def genRamped(pset, min_, max_, type_=None): 64 | """ 65 | .. deprecated:: 1.0 66 | The function has been renamed. Use :func:`~deap.gp.genHalfAndHalf` instead. 67 | """ 68 | warnings.warn("gp.genRamped has been renamed. Use genHalfAndHalf instead.", 69 | FutureWarning) 70 | return genHalfAndHalf(pset, min_, max_, type_) 71 | 72 | def generate(pset, min_, max_, condition, type_=__type__): 73 | """Generate a Tree as a list of list. The tree is build 74 | from the root to the leaves, and it stop growing when the 75 | condition is fulfilled. 76 | :param pset: A primitive set from wich to select primitives of the trees. 77 | :param min_: Minimum height of the produced trees. 78 | :param max_: Maximum Height of the produced trees. 79 | :param condition: The condition is a function that takes two arguments, 80 | the height of the tree to build and the current 81 | depth in the tree. 82 | :param type_: The type that should return the tree when called, when 83 | :obj:`None` (default) no return type is enforced. 84 | :returns: A grown tree with leaves at possibly different depths 85 | dependending on the condition function. 86 | 87 | 88 | DUMMY NODE ISSUES 89 | 90 | DEAP will only place terminals if we're at the bottom of a branch. 91 | This creates two issues: 92 | 1. A primitive that takes other primitives as inputs could be placed at the 93 | second to last layer. 94 | SOLUTION: You need to allow the tree to end whenever the height condition is met, 95 | so create "dummy" terminals for every type possible in the tree. 96 | 2. A primitive that takes terminals as inputs could be placed above the second to 97 | last layer. 98 | SOLUTION: You need to allow the tree to continue extending the branch until the 99 | height condition is met, so create "dummy" primitives that just pass 100 | through the terminal types. 101 | 102 | These "dummy" terminals and "dummy" primitives introduce unnecessary and sometimes 103 | nonsensical solutions into populations. These "dummy" nodes can be eliminated 104 | if the height requirement is relaxed. 105 | 106 | 107 | HOW TO PREVENT DUMMY NODE ISSUES 108 | 109 | Relaxing the height requirement: 110 | When at the bottom of the branch, check for terminals first, then primitives. 111 | When checking for primitives, skirt the height requirement by adjusting 112 | the branch depth to be the second to last layer of the tree. 113 | If neither a terminal or primitive fits this node, then throw an error. 114 | When not at the bottom of the branch, check for primitives first, then terminals. 115 | 116 | Issue with relaxing the height requirement: 117 | 1. Endless loops are possible when primitive sets have any type loops. 118 | A primitive with an output of one type may not take an input type of 119 | itself or a parent type. 120 | SOLUTION: A primitive set must be well-designed to prevent those type loops. 121 | 122 | """ 123 | if type_ is None: 124 | type_ = pset.ret 125 | expr = [] 126 | height = random.randint(min_, max_) 127 | stack = [(0, type_)] 128 | #print(pset.terminals) 129 | #print(pset.primitives) 130 | while len(stack) != 0: 131 | depth, type_ = stack.pop() 132 | # At the bottom of the tree 133 | if condition(height, depth): 134 | # Try finding a terminal 135 | try: 136 | term = random.choice(pset.terminals[type_]) 137 | #print('term',term) 138 | if isclass(term): 139 | term = term() 140 | expr.append(term) 141 | # No terminal fits 142 | except: 143 | # So pull the depth back one layer, and start looking for primitives 144 | try: 145 | depth -= 1 146 | prim = random.choice(pset.primitives[type_]) 147 | #print('prim',prim) 148 | expr.append(prim) 149 | for arg in reversed(prim.args): 150 | stack.append((depth + 1, arg)) 151 | 152 | # No primitive fits, either - that's an error 153 | except IndexError: 154 | _, _, traceback = sys.exc_info() 155 | raise IndexError("The gp.generate function tried to add " \ 156 | "a primitive of type '%s', but there is " \ 157 | "none available." % (type_,), traceback) 158 | 159 | # Not at the bottom of the tree 160 | else: 161 | # Check for primitives 162 | try: 163 | prim = random.choice(pset.primitives[type_]) 164 | expr.append(prim) 165 | for arg in reversed(prim.args): 166 | stack.append((depth + 1, arg)) 167 | # No primitive fits 168 | except: 169 | # So check for terminals 170 | try: 171 | term = random.choice(pset.terminals[type_]) 172 | 173 | # No terminal fits, either - that's an error 174 | except IndexError: 175 | _, _, traceback = sys.exc_info() 176 | raise IndexError("The gp.generate function tried to add " \ 177 | "a terminal of type '%s', but there is " \ 178 | "none available." % (type_,), traceback) 179 | if isclass(term): 180 | term = term() 181 | expr.append(term) 182 | #print(len(expr)) 183 | return expr 184 | 185 | def genHalfAndHalfMD(pset, min_, max_, type_=None): 186 | expr=genHalfAndHalf(pset, min_, max_, type_=None) 187 | #print('expr before', len(expr)) 188 | while len(expr)>80: 189 | expr=genHalfAndHalf(pset, min_, max_, type_=None) 190 | #print('expr before', len(expr)) 191 | #print('expr after',len(expr)) 192 | return expr 193 | 194 | def genFullMD(pset, min_, max_, type_=None): 195 | expr=genFull(pset, min_, max_, type_=None) 196 | #print('expr before', len(expr)) 197 | while len(expr)>80: 198 | expr=genFull(pset, min_, max_, type_=None) 199 | #print('expr before', len(expr)) 200 | #print('expr after',len(expr)) 201 | return expr 202 | -------------------------------------------------------------------------------- /MLGP/uiuc_train_data.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YingBi92/BookCode/9a3ee607444f136e7c3a3e466f7bfd3a9b417ea4/MLGP/uiuc_train_data.npy -------------------------------------------------------------------------------- /MLGP/uiuc_train_label.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YingBi92/BookCode/9a3ee607444f136e7c3a3e466f7bfd3a9b417ea4/MLGP/uiuc_train_label.npy -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # BookCode 2 | 3 | No commerial use or other distribution 4 | 5 | 6 | 7 | This is the code for the paper: 8 | ------ 9 | Y. Bi, B. Xue and M. Zhang, "Genetic Programming for Image Classification: An Automated Approach to Feature Learning," 2021, Springer International Publishing 10 | 11 | If you uses the code, please cite the book:
12 | ---- 13 | @book{bi2021gpimage, 14 | title={Genetic Programming for Image Classification: An Automated Approach to Feature Learning}, 15 | author={Bi, Ying and Xue, Bing and Zhang, Mengjie}, 16 | year={2021}, 17 | publisher={Springer International Publishing}} 18 | 19 | The codes have been updated from 17 August 2021. Please refer to these new versions, otherwise you may get lower accuracies. 20 | ---- 21 | The codes have not been carefully checked and rerun after I tidying them up. If you find any problems/bugs or cannot reproduce similar accuracies as the paper, feel free to contact me (Ying.Bi@ecs.vuw.ac.nz). 22 | 23 | --------------------------------------------------------------------------------