├── .gitignore ├── requirements.txt ├── requirements-train.txt ├── Utils ├── Face │ ├── recognize.py │ ├── normalize.py │ ├── encoded.py │ └── vam.py ├── Training │ ├── config.py │ └── param_generator.py ├── Vam │ └── window.py └── VamMod │ └── VamMod.cs ├── Tools ├── MergeCsv.py ├── MergeJson.py ├── CreateTrainingImages.py ├── Train.py ├── MakePrediction.py ├── CreateTrainingVariations.py ├── CreateTrainingCsv.py ├── CreateTrainingEncodings.py └── TrainSelf.py ├── foto2vam.py ├── mergeBase.json └── LICENSE /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | Sample/Results 3 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | pywin32 2 | pyautogui 3 | cmake 4 | dlib 5 | face_recognition 6 | deap 7 | opencv-python 8 | imutils 9 | tensorflow 10 | keras -------------------------------------------------------------------------------- /requirements-train.txt: -------------------------------------------------------------------------------- 1 | pywin32 2 | pyautogui 3 | cmake 4 | dlib>=19.15.99 5 | git+https://github.com/ChrisTopherTa54321/face_recognition#egg=face_recognition_hst>=1.2.4 6 | deap 7 | opencv-python 8 | imutils 9 | keras 10 | tensorflow-gpu 11 | msgpack 12 | tqdm -------------------------------------------------------------------------------- /Utils/Face/recognize.py: -------------------------------------------------------------------------------- 1 | # Class to manipulate a normalize a face image 2 | import imutils 3 | from imutils.face_utils import FaceAligner 4 | from imutils.face_utils import rect_to_bb 5 | from PIL import Image 6 | import cv2 7 | import dlib 8 | import face_recognition_models 9 | import numpy 10 | 11 | class FaceNormalizer: 12 | 13 | def __init__(self, size=256, align = True, histogram = True): 14 | predictor = dlib.shape_predictor( face_recognition_models.pose_predictor_model_location() ) 15 | self._detector = dlib.get_frontal_face_detector() 16 | self._size = size 17 | self._align = align 18 | self._histogram = histogram 19 | 20 | if self._align: 21 | self._aligner = FaceAligner( predictor=predictor) 22 | else: 23 | self._aligner = None 24 | 25 | 26 | def normalize(self, image): 27 | npImg = numpy.array(image) 28 | # PIL loads RGB, CV2 wants BGR 29 | npImg = cv2.cvtColor(npImg, cv2.COLOR_RGB2BGR) 30 | npImg = imutils.resize(npImg, width=800) 31 | aligned = self._alignNpImg( npImg ) 32 | if aligned is None: 33 | raise Exception("No face found in image!") 34 | npImg = cv2.cvtColor(aligned, cv2.COLOR_BGR2RGB) 35 | return Image.fromarray(npImg) 36 | 37 | 38 | def _alignNpImg(self, npImg): 39 | gray = cv2.cvtColor(npImg, cv2.COLOR_BGR2GRAY) 40 | rects = self._detector(gray, 1) 41 | if len(rects) == 0: 42 | return None 43 | return self._aligner.align(npImg, gray, rects[0]) -------------------------------------------------------------------------------- /Utils/Face/normalize.py: -------------------------------------------------------------------------------- 1 | # Class to manipulate a normalize a face image 2 | import imutils 3 | from imutils.face_utils import FaceAligner 4 | from imutils.face_utils import rect_to_bb 5 | from PIL import Image 6 | import cv2 7 | import dlib 8 | import face_recognition_models 9 | import numpy 10 | 11 | class FaceNormalizer: 12 | 13 | def __init__(self, size=256, align = True, histogram = True): 14 | predictor = dlib.shape_predictor( face_recognition_models.pose_predictor_model_location() ) 15 | self._detector = dlib.get_frontal_face_detector() 16 | self._size = size 17 | self._align = align 18 | self._histogram = histogram 19 | 20 | if self._align: 21 | self._aligner = FaceAligner( predictor=predictor, desiredFaceWidth = self._size) 22 | else: 23 | self._aligner = None 24 | 25 | 26 | def normalize(self, image): 27 | npImg = numpy.array(image) 28 | # PIL loads RGB, CV2 wants BGR 29 | npImg = cv2.cvtColor(npImg, cv2.COLOR_RGB2BGR) 30 | npImg = imutils.resize(npImg, width=800) 31 | aligned = self._alignNpImg( npImg ) 32 | if aligned is None: 33 | raise Exception("No face found in image!") 34 | npImg = cv2.cvtColor(aligned, cv2.COLOR_BGR2RGB) 35 | return Image.fromarray(npImg) 36 | 37 | 38 | def _alignNpImg(self, npImg): 39 | gray = cv2.cvtColor(npImg, cv2.COLOR_BGR2GRAY) 40 | rects = self._detector(gray, 1) 41 | if len(rects) == 0: 42 | return None 43 | return self._aligner.align(npImg, gray, rects[0]) -------------------------------------------------------------------------------- /Tools/MergeCsv.py: -------------------------------------------------------------------------------- 1 | # Generate training data from existing faces 2 | import argparse 3 | import glob 4 | import os 5 | import fnmatch 6 | 7 | ############################### 8 | # Run the program 9 | # 10 | def main( args ): 11 | if args.pydev: 12 | print("Enabling debugging with pydev") 13 | import pydevd 14 | pydevd.settrace(suspend=False) 15 | 16 | inputPath = args.inputPath 17 | outputFile = args.outputFile 18 | recursive = args.recursive 19 | fileFilter = args.filter 20 | 21 | print(" Creating output CSV file: {}".format(outputFile)) 22 | # Read in all of the files from inputpath 23 | outFile = open(outputFile, 'w') 24 | #writer = csv.writer( outFile, lineterminator='\n') 25 | commentLine = None 26 | for root, subdirs, files in os.walk(inputPath): 27 | print("Entering directory {}".format(root)) 28 | for file in fnmatch.filter(files, fileFilter): 29 | print("Reading {}".format(file)) 30 | csvInputFile = os.path.join(root, file) 31 | with open( csvInputFile ) as f: 32 | for line in f: 33 | if line.startswith("#"): 34 | if commentLine is None: 35 | commentLine = line 36 | else: 37 | if commentLine != line: 38 | print("Header mismatch in {}! Got {}, expected {}".format(csvInputFile, line, commentLine ) ) 39 | break 40 | continue # skip comment lines (except header comment) 41 | outFile.write(line) 42 | 43 | if not recursive: 44 | break 45 | 46 | 47 | ############################### 48 | # parse arguments 49 | # 50 | def parseArgs(): 51 | parser = argparse.ArgumentParser( description="Generate training data" ) 52 | parser.add_argument('--inputPath', help="Root directory containing csv files to merge", required=True) 53 | parser.add_argument('--filter', help="Filter for files to merge", default="*.csv") 54 | parser.add_argument('--outputFile', help="Merged output CSV file", default="output.csv") 55 | parser.add_argument("--recursive", action='store_true', default=False, help="Recursively enter directories") 56 | parser.add_argument("--pydev", action='store_true', default=False, help="Enable pydevd debugging") 57 | 58 | return parser.parse_args() 59 | 60 | 61 | ############################### 62 | # program entry point 63 | # 64 | if __name__ == "__main__": 65 | args = parseArgs() 66 | main( args ) -------------------------------------------------------------------------------- /Tools/MergeJson.py: -------------------------------------------------------------------------------- 1 | # Copy parts of one model to another 2 | import argparse 3 | import os 4 | import fnmatch 5 | from Utils.Face.vam import VamFace 6 | 7 | ############################### 8 | # Run the program 9 | # 10 | def main( args ): 11 | if args.pydev: 12 | print("Enabling debugging with pydev") 13 | import pydevd 14 | pydevd.settrace(suspend=False) 15 | 16 | templateFace = VamFace( args.templateJson ) 17 | invertTemplate = args.invertTemplate 18 | templateFace.trimToAnimatable() 19 | fromFace = VamFace( args.fromJson, discardExtra = False ) 20 | fileFilter = args.filter 21 | inputDir = args.toJsonDir 22 | outputDir = args.outputJsonDir 23 | 24 | for root, subdirs, files in os.walk(inputDir): 25 | print("Entering directory {}".format(root)) 26 | for file in fnmatch.filter(files, fileFilter): 27 | try: 28 | toName = os.path.splitext(file)[0] 29 | outDir = root.lstrip(inputDir) 30 | outDir = outDir.lstrip('/') 31 | outDir = outDir.lstrip('\\') 32 | outDir = os.path.join( outputDir, outDir ) 33 | outName = "{}_mergedWith_{}.json".format( os.path.splitext(file)[0], os.path.splitext(os.path.basename(args.fromJson))[0]) 34 | toFace = VamFace( os.path.join(root, file), discardExtra = False ) 35 | newFace = VamFace.mergeFaces( templateFace=templateFace, toFace=toFace, fromFace=fromFace, invertTemplate = invertTemplate, copyNonMorphs = True) 36 | try: 37 | os.makedirs(outDir) 38 | except: 39 | pass 40 | outputName = os.path.join(outDir, outName ) 41 | newFace.save( outputName ) 42 | print( "Generated {}".format(outputName ) ) 43 | except Exception as e: 44 | print("Error merging {} - {}".format(file, str(e))) 45 | 46 | 47 | 48 | 49 | ############################### 50 | # parse arguments 51 | # 52 | def parseArgs(): 53 | parser = argparse.ArgumentParser( description="Generate training data" ) 54 | parser.add_argument('--templateJson', help="Model specifying morphs to copy set as 'animatable'", required=True) 55 | parser.add_argument("--invertTemplate", action='store_true', default=False, help="TemplateJson actually has morphs *not* to copy") 56 | parser.add_argument('--toJsonDir', help="Path to find models to copy morphs TO", required=True) 57 | parser.add_argument('--filter', help="Filter for To morphs. Defaults to *.json", default="*.json") 58 | parser.add_argument("--recursive", action='store_true', default=False, help="Iterate to subdirectories of toJsonDir") 59 | parser.add_argument('--fromJson', help="Model to copy morphs FROM", required=True) 60 | parser.add_argument('--outputJsonDir', help="Destination model path", required=True) 61 | 62 | parser.add_argument("--pydev", action='store_true', default=False, help="Enable pydevd debugging") 63 | 64 | 65 | return parser.parse_args() 66 | 67 | 68 | ############################### 69 | # program entry point 70 | # 71 | if __name__ == "__main__": 72 | args = parseArgs() 73 | main( args ) -------------------------------------------------------------------------------- /foto2vam.py: -------------------------------------------------------------------------------- 1 | # Quickly thrown together root script to run Tools 2 | import argparse 3 | import os 4 | import glob 5 | import Tools.CreateTrainingEncodings as encodings 6 | import Tools.MakePrediction as predictor 7 | import Tools.MergeJson as mergeJson 8 | import json 9 | import multiprocessing 10 | 11 | ############################### 12 | # Run the program 13 | # 14 | def main( args ): 15 | if args.pydev: 16 | print("Enabling debugging with pydev") 17 | import pydevd 18 | pydevd.settrace(suspend=False) 19 | 20 | inputPath = args.inputPath 21 | modelGlob = args.modelPath 22 | outputPath = args.outputPath 23 | defaultJsonPath = args.defaultJson 24 | mergedJsonPath = args.mergedOutputPath 25 | 26 | print( "Processing images from {}".format(inputPath)) 27 | 28 | print( "First running CreateTrainingEncodings tool") 29 | params = argparse.Namespace(inputPath=inputPath, filter="*.png,*.jpg", normalizeSize=150, normalize=True, numJitters=10, numThreads=4, pydev=False, recursive=True, debugPose = False, flipFirst = False) 30 | encodings.main( params ) 31 | 32 | for modelFile in glob.glob( modelGlob ): 33 | jsonPath = os.path.splitext(modelFile)[0] + ".json" 34 | print( "Processing encodings from {} and using model/json {}/{}".format(inputPath, modelFile, jsonPath)) 35 | print( "Running MakePredictions tool") 36 | print( "With model {}".format(modelFile)) 37 | params = argparse.Namespace(modelFile=modelFile, inputDir=inputPath, pydev=False, outputDir=outputPath, multiDir=False, skipChance=0.0, recursive=True ) 38 | predictor.main(params) 39 | 40 | print( "Running MergeJson tool" ) 41 | jsonData = json.loads( open(jsonPath).read() ) 42 | # Run MergeTool using the inverted baseJson (copy all attributes except the ones trained on) 43 | templateJson = os.path.join(os.path.dirname(jsonPath), jsonData["baseJson"]) 44 | 45 | params = None 46 | filter = "*{}".format( os.path.basename( jsonPath ) ) # Don't have two models end with same text or later one will overwrite previous output merge! 47 | params = argparse.Namespace(templateJson=templateJson, invertTemplate=True, toJsonDir=outputPath, filter=filter, recursive=True, fromJson=defaultJsonPath, outputJsonDir=mergedJsonPath, pydev=False) 48 | mergeJson.main(params) 49 | 50 | 51 | ############################### 52 | # parse arguments 53 | # 54 | def parseArgs(): 55 | parser = argparse.ArgumentParser( description="Generate training data" ) 56 | parser.add_argument('--inputPath', help="Directory containing images", default="Input") 57 | parser.add_argument('--modelPath', help="Path to model, can include wildcard", default=os.path.join("models", "*.model") ) 58 | parser.add_argument('--defaultJson', help="JSON file to copy base look from", default=os.path.join("mergeBase.json") ) 59 | parser.add_argument('--outputPath', help="Directory to store output", default="Output") 60 | parser.add_argument('--mergedOutputPath', help="Path to store output merged with defaultJson", default="Output_Merged") 61 | parser.add_argument("--pydev", action='store_true', default=False, help="Enable pydevd debugging") 62 | 63 | return parser.parse_args() 64 | 65 | 66 | ############################### 67 | # program entry point 68 | # 69 | if __name__ == "__main__": 70 | multiprocessing.freeze_support() 71 | args = parseArgs() 72 | main( args ) -------------------------------------------------------------------------------- /Tools/CreateTrainingImages.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | import glob 4 | from Utils.Vam.window import VamWindow 5 | import time 6 | from win32api import GetKeyState 7 | from win32con import VK_CAPITAL, VK_SCROLL 8 | from Utils.Face.vam import VamFace 9 | import multiprocessing 10 | import queue 11 | import fnmatch 12 | from collections import deque 13 | 14 | # Set DPI Awareness (Windows 10 and 8). Makes GetWindowRect return pxiel coordinates 15 | import ctypes 16 | errorCode = ctypes.windll.shcore.SetProcessDpiAwareness(2) 17 | 18 | 19 | ############################### 20 | # Run the program 21 | # 22 | def main( args ): 23 | global testJsonPath 24 | global outputPath 25 | if args.pydev: 26 | print("Enabling debugging with pydev") 27 | import pydevd 28 | pydevd.settrace(suspend=False) 29 | 30 | inputPath = args.inputJsonPath 31 | recursive = args.recursive 32 | fileFilter = args.filter 33 | 34 | print( "Input path: {}\n\n".format( inputPath ) ) 35 | 36 | # Initialize the Vam window 37 | vamWindow = VamWindow( pipe = "foto2vamPipe" ) 38 | 39 | angles = [0, 35] 40 | skipCnt = 0 41 | screenshots = deque(maxlen=2) 42 | for root, subdirs, files in os.walk(inputPath): 43 | print("Entering directory {}".format(root)) 44 | for file in fnmatch.filter(files, fileFilter): 45 | try: 46 | anglesToProcess = [] + angles 47 | for angle in angles: 48 | fileName = "{}_{}.png".format( file, angle) 49 | fileName = os.path.join( root,fileName) 50 | if os.path.exists(fileName) or os.path.exists("{}.failed".format(fileName) ): 51 | anglesToProcess.remove(angle) 52 | 53 | if len(anglesToProcess) == 0: 54 | skipCnt += 1 55 | #print("Nothing to do for {}".format(file)) 56 | continue 57 | print("Processing {} (after skipping {})".format(file, skipCnt)) 58 | skipCnt = 0 59 | 60 | if (GetKeyState(VK_CAPITAL) or GetKeyState(VK_SCROLL)): 61 | print("WARNING: Suspending script due to Caps Lock or Scroll Lock being on. Push CTRL+PAUSE/BREAK or mash CTRL+C to exit script.") 62 | while GetKeyState(VK_CAPITAL) or GetKeyState(VK_SCROLL): 63 | time.sleep(1) 64 | # Get screenshots of face and submit them to worker threads 65 | inputFile = os.path.join( os.path.abspath(root), file ) 66 | vamWindow.loadLook(inputFile, anglesToProcess ) 67 | continue 68 | except Exception as e: 69 | print("Failed to process {} - {}".format(file, str(e))) 70 | 71 | if not recursive: 72 | break 73 | 74 | print("Generator done!") 75 | 76 | 77 | ############################### 78 | # parse arguments 79 | # 80 | def parseArgs(): 81 | parser = argparse.ArgumentParser( description="Generate images for json" ) 82 | parser.add_argument('--inputJsonPath', help="Directory containing json files to start with", required=True) 83 | parser.add_argument('--filter', help="File filter to process. Defaults to *.json", default="*.json") 84 | parser.add_argument('--normalizeSize', type=int, help="Size of normalized output. Defaults to 500", default=500) 85 | parser.add_argument("--pydev", action='store_true', default=False, help="Enable pydevd debugging") 86 | parser.add_argument("--recursive", action='store_true', default=False, help="Recursively enter directories") 87 | return parser.parse_args() 88 | 89 | ############################### 90 | # program entry point 91 | # 92 | if __name__ == "__main__": 93 | args = parseArgs() 94 | main( args ) -------------------------------------------------------------------------------- /Utils/Training/config.py: -------------------------------------------------------------------------------- 1 | # Class to handle configurations 2 | 3 | import json 4 | import os 5 | from Utils.Face.vam import VamFace 6 | from Utils.Training.param_generator import ParamGenerator 7 | 8 | class Config: 9 | CONFIG_VERSION = 1 10 | 11 | def __init__(self, configJson, basePath = "" ): 12 | minJson = os.path.join(basePath, configJson["minJson"]) if "minJson" in configJson else None 13 | maxJson = os.path.join(basePath, configJson["maxJson"]) if "maxJson" in configJson else None 14 | self._baseFace = VamFace( os.path.join(basePath, configJson["baseJson"]), minJson, maxJson ) 15 | self._baseFace.trimToAnimatable() 16 | 17 | self._paramShape = None 18 | angles = set() 19 | self._input_params = [] 20 | if "inputs" in configJson: 21 | inputs = configJson["inputs"] 22 | for param in inputs: 23 | try: 24 | paramName = param["name"] 25 | paramList = [] 26 | for paramParam in param["params"]: 27 | paramList.append( { "name": paramParam["name"], "value": paramParam["value"] } ) 28 | if paramParam["name"] == "angle": 29 | angles.add( float(paramParam["value"])) 30 | self._input_params.append( { "name": paramName, "params": paramList } ) 31 | except: 32 | print("Error parsing parameter") 33 | 34 | 35 | self._output_params = [] 36 | if "outputs" in configJson: 37 | outputs = configJson["outputs"] 38 | for param in outputs: 39 | try: 40 | paramName = param["name"] 41 | paramList = [] 42 | for paramParam in param["params"]: 43 | paramList.append( { "name": paramParam["name"], "value": paramParam["value"] } ) 44 | if paramParam["name"] == "angle": 45 | angles.add( float(paramParam["value"])) 46 | self._output_params.append( { "name": paramName, "params": paramList } ) 47 | except: 48 | print("Error parsing parameter") 49 | 50 | 51 | self._angles = list(angles) 52 | self._angles.sort() 53 | 54 | @staticmethod 55 | def createFromFile( fileName ): 56 | data = open(fileName).read() 57 | jsonData = json.loads(data) 58 | 59 | if "config_version" in jsonData and jsonData["config_version"] is not Config.CONFIG_VERSION: 60 | raise Exception("Config version mismatch! File was {}, reader was {}".format(jsonData["config_version"], Config.CONFIG_VERSION ) ) 61 | 62 | return Config( jsonData, os.path.dirname(fileName) ) 63 | 64 | def getBaseFace(self): 65 | return self._baseFace 66 | 67 | def getShape(self): 68 | return self._paramShape 69 | 70 | def getAngles(self): 71 | return self._angles 72 | 73 | def generateParams(self, relatedFiles ): 74 | if self._paramShape is None: 75 | paramGen = ParamGenerator( self._input_params, self._angles, relatedFiles, self._baseFace ) 76 | inputParams = paramGen.getParams() 77 | inputLen = len(inputParams) 78 | paramGen = ParamGenerator( self._output_params, self._angles, relatedFiles, self._baseFace ) 79 | outputParams = paramGen.getParams() 80 | outputLen = len(outputParams) 81 | self._paramShape = (inputLen, outputLen) 82 | outParams = inputParams + outputParams 83 | else: 84 | paramGen = ParamGenerator( self._input_params + self._output_params, self._angles, relatedFiles, self._baseFace ) 85 | outParams = paramGen.getParams() 86 | return outParams 87 | -------------------------------------------------------------------------------- /Tools/Train.py: -------------------------------------------------------------------------------- 1 | # Generate training data from existing faces 2 | import argparse 3 | import os 4 | import numpy 5 | import collections 6 | from keras.models import load_model, Model 7 | from keras.initializers import RandomUniform 8 | from keras.optimizers import Adam 9 | 10 | from keras.layers import Input, Dense, Dropout, BatchNormalization, LeakyReLU 11 | 12 | # Work around low-memory GPU issue 13 | import tensorflow as tf 14 | config = tf.ConfigProto() 15 | config.gpu_options.allow_growth = True 16 | session = tf.Session(config=config) 17 | 18 | ############################### 19 | # Run the program 20 | # 21 | def main( args ): 22 | if args.pydev: 23 | print("Enabling debugging with pydev") 24 | import pydevd 25 | pydevd.settrace(suspend=False) 26 | 27 | validationCsv = args.validationCsv 28 | trainingCsv = args.trainingCsv 29 | outputModelFile = args.outputFile 30 | 31 | # First read parameters from trainingCsv and validation, ensure they match 32 | trainingParams = open(trainingCsv).readline() 33 | validationParams = open(validationCsv).readline() 34 | 35 | if trainingParams != validationParams: 36 | print("Training CSV mismatches Validation CSV! [{}] vs [{}]".format( trainingParams, validationParams ) ) 37 | 38 | trainingParams = trainingParams.lstrip('#') 39 | trainingParams = trainingParams.split(',') 40 | configFile = trainingParams[0] 41 | inputSize = int(trainingParams[1]) 42 | outputSize = int(trainingParams[2]) 43 | 44 | print( "Using {} with {} inputs and {} outputs".format(configFile, inputSize, outputSize )) 45 | 46 | if os.path.exists(outputModelFile): 47 | print("Loading existing model") 48 | model = load_model(outputModelFile) 49 | else: 50 | model = generateModel( numInputs = inputSize, numOutputs=outputSize ) 51 | 52 | print( "Reading validation set...") 53 | dataSet = numpy.loadtxt(validationCsv, delimiter=',', comments='#') 54 | vX=dataSet[:,0:inputSize] 55 | vY=dataSet[:,inputSize:] 56 | print("Validation Dataset: {}\nX: {}\nY: {}\n".format(dataSet.shape, vX.shape, vY.shape)) 57 | 58 | print( "Reading training set..." ) 59 | dataSet = numpy.loadtxt(trainingCsv, delimiter=',', comments='#') 60 | X=dataSet[:,0:inputSize] 61 | Y=dataSet[:,inputSize:] 62 | print("Training Dataset: {}\nX: {}\nY: {}\n".format(dataSet.shape, X.shape, Y.shape)) 63 | 64 | print("Training...") 65 | scoreHistory = collections.deque( maxlen=5 ) 66 | while True: 67 | scores= model.evaluate(vX,vY, verbose=0) 68 | scoreHistory.append(float(scores)) 69 | print("Saving progress... {} Last {}: {}".format(scores, len(scoreHistory), sum(scoreHistory)/len(scoreHistory))) 70 | model.save(outputModelFile) 71 | #model.fit(X,Y, epochs=25, batch_size=16384, verbose=0, shuffle=True) 72 | model.fit(X,Y, epochs=25, batch_size=256, verbose=0, shuffle=True) 73 | 74 | def generateModel( numInputs, numOutputs ): 75 | print("Generating a model with {} inputs and {} outputs".format(numInputs, numOutputs)) 76 | layer1 = 2*numInputs 77 | layer2 = 10*numInputs 78 | layer3 = 5*numInputs 79 | print("Layer 1: {}\nLayer 2: {}\nLayer 3: {}".format(layer1, layer2, layer3)) 80 | 81 | input_layer = Input(shape=(numInputs,)) 82 | 83 | x = Dense( layer1, activation='linear' )(input_layer) 84 | x = LeakyReLU()(x) 85 | x = Dropout(.2)(x) 86 | 87 | x = Dense( layer1, activation='linear' )(input_layer) 88 | x = LeakyReLU()(x) 89 | x = Dropout(.2)(x) 90 | 91 | x = Dense( layer1, activation='linear' )(input_layer) 92 | x = LeakyReLU()(x) 93 | x = Dropout(.2)(x) 94 | 95 | output_layer = Dense( numOutputs, activation='linear')(x) 96 | 97 | model = Model(inputs=input_layer, outputs=output_layer) 98 | adam = Adam(lr=0.0001) 99 | model.compile( optimizer=adam, 100 | loss='logcosh' ) 101 | 102 | return model 103 | 104 | ############################### 105 | # parse arguments 106 | # 107 | def parseArgs(): 108 | parser = argparse.ArgumentParser( description="Generate training data" ) 109 | parser.add_argument('--trainingCsv', help="Path to training CSV", required=True) 110 | parser.add_argument('--validationCsv', help="Path to training containing validation JSON and encoding files", required=True) 111 | parser.add_argument('--outputFile', help="File to write output model to") 112 | parser.add_argument("--pydev", action='store_true', default=False, help="Enable pydevd debugging") 113 | 114 | 115 | return parser.parse_args() 116 | 117 | 118 | ############################### 119 | # program entry point 120 | # 121 | if __name__ == "__main__": 122 | args = parseArgs() 123 | main( args ) -------------------------------------------------------------------------------- /Tools/MakePrediction.py: -------------------------------------------------------------------------------- 1 | # Generate training data from existing faces 2 | from Utils.Training.config import Config 3 | import argparse 4 | import os 5 | import numpy 6 | import random 7 | 8 | ############################### 9 | # Run the program 10 | # 11 | def main( args ): 12 | if args.pydev: 13 | print("Enabling debugging with pydev") 14 | import pydevd 15 | pydevd.settrace(suspend=False) 16 | 17 | modelFile = args.modelFile 18 | modelCfg = os.path.splitext(modelFile)[0] + ".json" 19 | config = Config.createFromFile( modelCfg ) 20 | inputDir = args.inputDir 21 | recursive = args.recursive 22 | outputDir = args.outputDir 23 | multiDir = args.multiDir 24 | 25 | # Delay heavy imports 26 | from keras.models import load_model 27 | 28 | # Work around low-memory GPU issue 29 | import tensorflow as tf 30 | tfconfig = tf.ConfigProto() 31 | tfconfig.gpu_options.allow_growth = True 32 | session = tf.Session(config=tfconfig) 33 | 34 | model = load_model(modelFile) 35 | modelName = os.path.splitext(os.path.basename(modelFile))[0] 36 | baseName = "" 37 | 38 | face = config.getBaseFace() 39 | # Read in all of the files from inputDir 40 | for root, subdirs, files in os.walk(inputDir): 41 | for file in files: 42 | try: 43 | skipSample = random.random() < args.skipChance 44 | relatedFiles = [] 45 | if multiDir: 46 | if skipSample: 47 | continue 48 | if not file.endswith(".json"): 49 | continue 50 | baseName = os.path.splitext( file )[0] 51 | for rfile in filter( lambda x: x.startswith(baseName), files ): 52 | relatedFiles.append( os.path.join(root,rfile ) ) 53 | else: 54 | if skipSample: 55 | break 56 | for rfile in files: 57 | relatedFiles.append( os.path.join(root, rfile ) ) 58 | 59 | 60 | outRow = config.generateParams( relatedFiles ) 61 | outShape = config.getShape() 62 | dataSet = numpy.array([outRow[:outShape[0]]]) 63 | predictions = model.predict(dataSet) 64 | rounded = [float(round(x,5)) for x in predictions[0]] 65 | face.importFloatList(rounded) 66 | 67 | outName = root.lstrip(inputDir) 68 | outName = outName.lstrip('/') 69 | outName = outName.lstrip('\\') 70 | outputFolder = os.path.join( outputDir, outName ) 71 | try: 72 | os.makedirs(outputFolder) 73 | except: 74 | pass 75 | 76 | # In multiDir, the 'folder' is the baseName of the file 77 | if multiDir: 78 | folderName = baseName 79 | else: 80 | folderName = os.path.split(root)[-1] 81 | 82 | outputFullPath = os.path.join( outputFolder, "{}_{}.json".format(folderName, modelName)) 83 | # discard animatable flags 84 | face.updateJson( discardAnimatable = True ) 85 | face.save( outputFullPath ) 86 | print( "Generated {}".format(outputFullPath) ) 87 | except Exception as e: 88 | print( "ERROR: Failed to generate model from {} - {}".format(root, str(e) ) ) 89 | 90 | # If not multiDir then we've already processed all of the files 91 | if not multiDir: 92 | break 93 | 94 | if not args.recursive: 95 | break 96 | 97 | 98 | 99 | 100 | ############################### 101 | # parse arguments 102 | # 103 | def parseArgs(): 104 | parser = argparse.ArgumentParser( description="Generate a VaM model from a face encoding" ) 105 | parser.add_argument('--modelFile', help="Model to use for predictions", required=True) 106 | parser.add_argument('--inputDir', help="Directory containing input encodings", required=True) 107 | parser.add_argument("--recursive", action='store_true', default=False, help="Iterate to subdirectories of input path") 108 | parser.add_argument('--outputDir', help="Output VaM files directory", required=True) 109 | parser.add_argument("--multiDir", action='store_true', default=False, help="Allow multiple predictions per directory. Assume supporting files start with json files name") 110 | parser.add_argument("--skipChance", type=float, default=0.0, help="Chance to skip generating a model. Used for training set sampling. Defaults to 0.0") 111 | parser.add_argument("--pydev", action='store_true', default=False, help="Enable pydevd debugging") 112 | 113 | 114 | return parser.parse_args() 115 | 116 | 117 | ############################### 118 | # program entry point 119 | # 120 | if __name__ == "__main__": 121 | args = parseArgs() 122 | main( args ) -------------------------------------------------------------------------------- /Tools/CreateTrainingVariations.py: -------------------------------------------------------------------------------- 1 | # Generate training data from existing faces 2 | 3 | from Utils.Face.vam import VamFace 4 | import argparse 5 | import os 6 | import glob 7 | import copy 8 | import random 9 | 10 | ############################### 11 | # Run the program 12 | # 13 | def main( args ): 14 | if args.pydev: 15 | print("Enabling debugging with pydev") 16 | import pydevd 17 | pydevd.settrace(suspend=False) 18 | inputPath = args.inputJsonPath 19 | basePath = args.baseJsonPath 20 | outputPath = args.outputPath 21 | dirRotateInterval = args.rotateDirectoryInterval 22 | baseFace = VamFace( basePath ) 23 | baseFace.trimToAnimatable() 24 | 25 | # Read in all of the files from inputpath 26 | inputFaces = [] 27 | print( "Loading input faces from {}".format(inputPath)) 28 | for entry in glob.glob(os.path.join(inputPath, '*.json')): 29 | try: 30 | newFace = VamFace( entry ) 31 | # Only keep the relevant morphs 32 | morphCnt = len(newFace.morphFloats) 33 | newFace.matchMorphs(baseFace) 34 | inputFaces.append(newFace) 35 | except: 36 | print("Error loading {}".format(entry)) 37 | 38 | print( "Loaded {} faces".format(len(inputFaces))) 39 | if len(inputFaces) == 0: 40 | print("No starting point faces were loaded!") 41 | exit(-1) 42 | faceCnt = 0; 43 | print( "Generating variations") 44 | 45 | maxVariantsSize = 10000 46 | mutateChance = .6 47 | mateChance = .7 48 | faceVariants = [] + inputFaces 49 | nextRotation = faceCnt + dirRotateInterval 50 | rotatedOutputPath = getNextDir( outputPath ) 51 | while faceCnt < args.numFaces: 52 | #for face1 in faceVariants: 53 | face1 = faceVariants[random.randint(0,len(faceVariants)-1)] 54 | 55 | # Randomly take parameters from the other face 56 | shouldMate = random.random() < mateChance 57 | shouldMutate = random.random() < mutateChance 58 | 59 | if shouldMate or shouldMutate: 60 | newFace = copy.deepcopy(face1) 61 | 62 | if shouldMate: 63 | mateIdx = random.randint(0, len(faceVariants)-1) 64 | mate(newFace, faceVariants[mateIdx], random.randint(1, len(newFace.morphFloats))) 65 | 66 | # Randomly apply mutations to the current face 67 | if shouldMutate: 68 | mutate(newFace, random.randint(0,random.randint(1,50)) ) 69 | 70 | newFace.save( os.path.join(rotatedOutputPath, "face_variant_{}_{}.json".format(faceCnt, random.randint(0,99999)))) 71 | # If at max size, replace a random element. Otherwise append 72 | if len(faceVariants) >= maxVariantsSize: 73 | faceVariants[ random.randint(0, len(faceVariants) - 1) ] = newFace 74 | else: 75 | faceVariants.append(newFace) 76 | faceCnt += 1 77 | if faceCnt % 500 == 0: 78 | print( "{}/{}".format(faceCnt,args.numFaces) ) 79 | if faceCnt >= nextRotation: 80 | nextRotation = faceCnt + dirRotateInterval 81 | rotatedOutputPath = getNextDir( outputPath ) 82 | 83 | 84 | def getNextDir( root ): 85 | for i in range(9999): 86 | nextDir = os.path.join(root, "{}".format(i)) 87 | if not os.path.exists(nextDir): 88 | os.makedirs( nextDir ) 89 | return nextDir 90 | raise Exception("Couldn't find unused directory!") 91 | 92 | 93 | def mutate(face, mutationCount): 94 | for i in range(mutationCount): 95 | face.randomize( random.randint(0, len(face.morphFloats) - 1 ) ) 96 | 97 | 98 | def mate(targetFace, otherFace, mutationCount ): 99 | if len(targetFace.morphFloats) != len(otherFace.morphFloats): 100 | raise Exception("Morph float list didn't match! {} != {}".format(len(targetFace.morphFloats), len(otherFace.morphFloats))) 101 | for i in range(mutationCount): 102 | morphIdx = random.randint(0, len(otherFace.morphFloats) - 1) 103 | targetFace.morphFloats[morphIdx] = otherFace.morphFloats[morphIdx] 104 | 105 | ############################### 106 | # parse arguments 107 | # 108 | def parseArgs(): 109 | parser = argparse.ArgumentParser( description="Generate training data" ) 110 | parser.add_argument('--inputJsonPath', help="Directory containing json files to start with", required=True) 111 | parser.add_argument('--baseJsonPath', help="JSON file with relevant morphs marked as Animatable", required=True) 112 | parser.add_argument('--outputPath', help="Directory to write output data to", default="output") 113 | parser.add_argument('--numFaces', type=int, help="Number of faces to generate before stopping", default=10000 ) 114 | parser.add_argument("--rotateDirectoryInterval", type=int, default=1000, help="How often to rotate directories") 115 | parser.add_argument("--pydev", action='store_true', default=False, help="Enable pydevd debugging") 116 | 117 | 118 | return parser.parse_args() 119 | 120 | 121 | ############################### 122 | # program entry point 123 | # 124 | if __name__ == "__main__": 125 | args = parseArgs() 126 | main( args ) -------------------------------------------------------------------------------- /Tools/CreateTrainingCsv.py: -------------------------------------------------------------------------------- 1 | # Generate training data from existing faces 2 | from Utils.Training.config import Config 3 | import multiprocessing 4 | import queue 5 | import argparse 6 | import glob 7 | import os 8 | import csv 9 | import fnmatch 10 | import time 11 | 12 | ############################### 13 | # Run the program 14 | # 15 | def main( args ): 16 | if args.pydev: 17 | print("Enabling debugging with pydev") 18 | import pydevd 19 | pydevd.settrace(suspend=False) 20 | 21 | inputPath = args.inputPath 22 | outputName = args.outputName 23 | numThreads = args.numThreads 24 | overwrite = args.overwrite; 25 | config = Config.createFromFile( args.configFile ) 26 | 27 | 28 | poolWorkQueue = multiprocessing.Queue(maxsize=2*numThreads) 29 | doneEvent = multiprocessing.Event() 30 | if numThreads > 1: 31 | pool = [] 32 | for idx in range(numThreads): 33 | proc = multiprocessing.Process(target=worker_process_func, args=(idx, poolWorkQueue, doneEvent, config, args) ) 34 | proc.start() 35 | pool.append( proc ) 36 | else: 37 | pool = None 38 | doneEvent.set() 39 | 40 | 41 | # Read in all of the files from inputpath 42 | for root, subdirs, files in os.walk(inputPath): 43 | print("Generator entering directory {}".format(root)) 44 | outCsvFile = os.path.join(root,outputName) 45 | if overwrite or not os.path.exists( outCsvFile ): 46 | poolWorkQueue.put( (root, outCsvFile) ) 47 | if pool is None: 48 | worker_process_func(0, poolWorkQueue, doneEvent, config, args) 49 | 50 | if not args.recursive: 51 | break 52 | 53 | print("Generator done!") 54 | doneEvent.set() 55 | if pool: 56 | for proc in pool: 57 | proc.join() 58 | 59 | 60 | 61 | ############################### 62 | # Worker function for helper processes 63 | ############################### 64 | def worker_process_func(procId, workQueue, doneEvent, config, args): 65 | print("Worker {} started".format(procId)) 66 | while not ( doneEvent.is_set() and workQueue.empty() ): 67 | try: 68 | work = workQueue.get(block=True, timeout=1) 69 | dirPath = work[0] 70 | outCsvFile = work[1] 71 | outFile = None 72 | numCreated = 0 73 | start = time.time() 74 | try: 75 | globPath = os.path.join( dirPath, "*.json") 76 | for file in glob.glob( globPath ): 77 | try: 78 | basename = os.path.splitext(file)[0] 79 | # Check if we have all support encodings for this json 80 | relatedFilesGlob = "{}*".format(basename) 81 | relatedFiles = [] 82 | for rfile in glob.glob( relatedFilesGlob ): 83 | relatedFiles.append( rfile ) 84 | 85 | # Have all files? Convert them to CSV 86 | outRow = config.generateParams( relatedFiles ) 87 | if outFile is None: 88 | print( "Worker {} creating {}".format(procId, outCsvFile)) 89 | outFile = open( outCsvFile, 'w' ) 90 | writer = csv.writer( outFile, lineterminator='\n') 91 | shape = config.getShape() 92 | print( "#{},{},{}".format( args.configFile, shape[0], shape[1] ), file=outFile ) 93 | writer.writerow( outRow ) 94 | numCreated += 1 95 | 96 | except Exception as e: 97 | pass 98 | #print( "Failed to generate CSV from {} - {}".format( file, str(e))) 99 | print( "Worker {} done with {} ({} entries took {} seconds, at {} entries/second)".format(procId, outCsvFile, numCreated, time.time() - start, numCreated/( time.time() - start ) ) ) 100 | 101 | except Exception as e: 102 | print("Worker {} failed generating {} : {}".format(procId, outCsvFile, str(e))) 103 | 104 | except queue.Empty: 105 | pass 106 | print("Worker {} done!".format(procId)) 107 | 108 | 109 | ############################### 110 | # parse arguments 111 | # 112 | def parseArgs(): 113 | parser = argparse.ArgumentParser( description="Generate training data" ) 114 | parser.add_argument('--inputPath', help="Directory containing JSON and encoding files", required=True) 115 | parser.add_argument('--configFile', help="File with training data generation parameters", required=True) 116 | parser.add_argument("--recursive", action='store_true', default=False, help="Iterate to subdirectories of input path") 117 | parser.add_argument("--outputName", help="Name of CSV file to create in each directory") 118 | parser.add_argument("--pydev", action='store_true', default=False, help="Enable pydevd debugging") 119 | parser.add_argument("--numThreads", type=int, default=1, help="Number of processes to use") 120 | parser.add_argument("--overwrite", action='store_true', default=False, help="Overwrite existing CSV files") 121 | 122 | 123 | return parser.parse_args() 124 | 125 | 126 | ############################### 127 | # program entry point 128 | # 129 | if __name__ == "__main__": 130 | args = parseArgs() 131 | main( args ) -------------------------------------------------------------------------------- /Tools/CreateTrainingEncodings.py: -------------------------------------------------------------------------------- 1 | # Generate training data from existing faces 2 | 3 | from Utils.Face.encoded import EncodedFace 4 | from Utils.Face.normalize import FaceNormalizer 5 | from PIL import Image 6 | import multiprocessing 7 | import argparse 8 | import glob 9 | import os 10 | import queue 11 | import fnmatch 12 | 13 | ############################### 14 | # Run the program 15 | # 16 | def main( args ): 17 | if args.pydev: 18 | print("Enabling debugging with pydev") 19 | import pydevd 20 | pydevd.settrace(suspend=False) 21 | inputPath = args.inputPath 22 | #outputPath = args.outputPath 23 | numThreads = args.numThreads 24 | recursive = args.recursive 25 | fileFilter = args.filter.split(',') 26 | debugPose = args.debugPose 27 | 28 | poolWorkQueue = multiprocessing.Queue(maxsize=2*numThreads) 29 | doneEvent = multiprocessing.Event() 30 | if numThreads > 1: 31 | pool = [] 32 | for idx in range(numThreads): 33 | proc = multiprocessing.Process(target=worker_process_func, args=(idx, poolWorkQueue, doneEvent, args) ) 34 | proc.start() 35 | pool.append( proc ) 36 | else: 37 | pool = None 38 | doneEvent.set() 39 | 40 | 41 | # Read in all of the files from inputpath 42 | for root, subdirs, files in os.walk(inputPath): 43 | print("Entering directory {}".format(root)) 44 | for filter in fileFilter: 45 | for file in fnmatch.filter(files, filter): 46 | fileName = "{}.encoding".format( os.path.splitext(file)[0] ) 47 | inputFile = os.path.join(root, file ) 48 | outputFile = os.path.join( root, fileName ) 49 | if os.path.exists( "{}.failed".format(outputFile ) ) or \ 50 | os.path.splitext(inputFile)[0].endswith("normalized"): 51 | continue 52 | try: 53 | # If this doesn't throw an exception, then we've already made this encoding 54 | EncodedFace.createFromFile(outputFile) 55 | except: 56 | poolWorkQueue.put( (inputFile, outputFile )) 57 | if pool is None: 58 | worker_process_func(0, poolWorkQueue, doneEvent, args) 59 | 60 | if not recursive: 61 | break 62 | 63 | print("Generator done!") 64 | doneEvent.set() 65 | if pool: 66 | for proc in pool: 67 | proc.join() 68 | 69 | 70 | 71 | ############################### 72 | # Worker function for helper processes 73 | ############################### 74 | def worker_process_func(procId, workQueue, doneEvent, args): 75 | print("Worker {} started".format(procId)) 76 | if args.normalize: 77 | normalizer = FaceNormalizer(args.normalizeSize) 78 | else: 79 | normalizer = None 80 | 81 | while not ( doneEvent.is_set() and workQueue.empty() ): 82 | try: 83 | work = workQueue.get(block=True, timeout=1) 84 | inputFile = work[0] 85 | outputFile = work[1] 86 | #print("Worker thread {} to generate {}->{}".format(procId, inputFile,outputFile)) 87 | try: 88 | image = Image.open(inputFile) 89 | if args.flipFirst: 90 | image = image.transpose(Image.FLIP_LEFT_RIGHT) 91 | if normalizer: 92 | image = normalizer.normalize(image) 93 | fileName = "{}_normalized.png".format( os.path.splitext(inputFile)[0]) 94 | image.save( fileName) 95 | 96 | encodedFace = EncodedFace(image, debugPose = args.debugPose ) 97 | mirrored = "" 98 | if encodedFace.getAngle() < 0: 99 | #print( "Mirroring image to face left") 100 | old_angle = encodedFace.getAngle() 101 | encodedFace = EncodedFace( image.transpose(Image.FLIP_LEFT_RIGHT), debugPose = args.debugPose ) 102 | new_angle = encodedFace.getAngle() 103 | mirrored = "[mirrored] {} : {}".format(old_angle, new_angle) 104 | encodedFace.saveEncodings(outputFile) 105 | print("Worker {} generated {} {}".format(procId, outputFile, mirrored ) ) 106 | except Exception as e: 107 | print("Worker {} failed to generate {} : {}".format(procId, outputFile, str(e))) 108 | open( "{}.failed".format(outputFile), 'w' ) 109 | except queue.Empty: 110 | pass 111 | print("Worker {} done!".format(procId)) 112 | 113 | ############################### 114 | # parse arguments 115 | # 116 | def parseArgs(): 117 | parser = argparse.ArgumentParser( description="Generate training data" ) 118 | parser.add_argument('--inputPath', help="Directory containing images files to encode", required=True) 119 | parser.add_argument('--filter', help="File filter to process. Defaults to \"*.png,*.jpg\"", default="*.png,*.jpg") 120 | parser.add_argument('--normalizeSize', type=int, help="Size of normalized output. Defaults to 150", default=150) 121 | parser.add_argument('--numJitters', type=int, help="Number of times to jitter each image. Defaults to 3, which is 3x slower than 1", default=3) 122 | #parser.add_argument('--outputPath', help="Directory to write output data to", default="output") 123 | parser.add_argument("--debugPose", action='store_true', default=False, help="Display landmarks and pose on each image") 124 | parser.add_argument("--recursive", action='store_true', default=False, help="Recursively enter directories") 125 | parser.add_argument("--normalize", action='store_true', default=True, help="Perform image normalization") 126 | parser.add_argument("--pydev", action='store_true', default=False, help="Enable pydevd debugging") 127 | parser.add_argument("--numThreads", type=int, default=1, help="Number of processes to use") 128 | parser.add_argument("--flipFirst", action='store_true', default=False, help="Mirror images by default") 129 | 130 | 131 | return parser.parse_args() 132 | 133 | 134 | ############################### 135 | # program entry point 136 | # 137 | if __name__ == "__main__": 138 | args = parseArgs() 139 | main( args ) -------------------------------------------------------------------------------- /Utils/Vam/window.py: -------------------------------------------------------------------------------- 1 | # Class to interact with VAM window 2 | 3 | import win32gui 4 | import win32con 5 | import pyautogui 6 | import win32pipe; 7 | import win32file; 8 | import pywintypes; 9 | import os 10 | import time 11 | import json 12 | import random 13 | 14 | # convert a rect of (left, top, right, bottom) to (x, y, w, h) 15 | def _rect2xywh(rect): 16 | left,top,right,bottom = rect 17 | return (left, top, right-left, bottom-top ) 18 | 19 | 20 | class VamWindow: 21 | _wHndl = 0 22 | _pipe = None 23 | _idx = 0 24 | _eom = "" 25 | _pipeReadBuf = "" 26 | 27 | def __init__(self, pipe = None ): 28 | if pipe != None: 29 | try: 30 | self._pipe = win32file.CreateFile( 31 | r'\\.\\pipe\\' + pipe, 32 | win32file.GENERIC_READ | win32file.GENERIC_WRITE, 33 | 0, 34 | None, 35 | win32file.OPEN_EXISTING, 36 | 0, 37 | None 38 | ) 39 | except pywintypes.error as e: 40 | print( str(e) ); 41 | 42 | self._getVamHndl() 43 | 44 | self._clickLocations = None 45 | if 0 == self._getVamHndl(): 46 | raise Exception("Failed to get handle to VaM window! Is VaM running?") 47 | 48 | 49 | def _getVamHndl(self): 50 | if not win32gui.IsWindow(self._wHndl): 51 | def callback(hwnd, hwnds): 52 | if win32gui.GetWindowText(hwnd) == "VaM": 53 | hwnds.append(hwnd) 54 | 55 | hwnds = [] 56 | win32gui.EnumWindows( callback, hwnds ) 57 | self._wHndl = hwnds[self._idx] 58 | return self._wHndl 59 | 60 | 61 | def _getVamRect(self): 62 | return win32gui.GetWindowRect(self._getVamHndl()) 63 | 64 | def setClickLocations(self, clickLocations = [(130,39), (248,178)]): 65 | self._clickLocations = clickLocations 66 | 67 | def focus(self): 68 | delay = None # if the window wasn't already active we want to give it time to focus 69 | hwnd = self._getVamHndl() 70 | 71 | # Is the window minimized? 72 | if win32gui.IsIconic( hwnd ): 73 | win32gui.ShowWindow(hwnd, win32con.SW_RESTORE) 74 | delay = "Window was minimized" 75 | 76 | # Delay if the window wasn't already active 77 | if hwnd != win32gui.GetForegroundWindow(): 78 | delay = "Window was not in focus" 79 | 80 | win32gui.ShowWindow(hwnd, 5) 81 | win32gui.SetForegroundWindow( hwnd ) 82 | 83 | 84 | if delay: 85 | print( "Delaying because: {}".format( delay )) 86 | time.sleep(.2) 87 | 88 | 89 | # Find an image in a region. Originalyl used for loadLook, but is very unreliable 90 | def locateInWindow(self, image, region=None, entireScreen=False): 91 | wX,wY,wW,wH = _rect2xywh(self._getVamRect() ) 92 | 93 | # If a region was supplied then convert the window-relative coordinates to screen coordinates 94 | if not region is None: 95 | print( "Searching in region {}".format(region)) 96 | rX,rY,rW,rH = region 97 | wX += rX 98 | wY += rY 99 | wW = min( rW, wW - rX ) 100 | wH = min( rH, wH - rY ) 101 | 102 | windowRegion = ( wX, wY, wW, wH ) 103 | ret = pyautogui.locate( image, pyautogui.screenshot("haystack.png", region=windowRegion) ) 104 | 105 | # Found a result in specified region. Convert to window coordinates 106 | if ret and region: 107 | retX,retY,retW,retH = ret 108 | ret = ( retX + rX, retY + rY, retW + rW, retH + rH ) 109 | 110 | # If not found in region then search entire screen 111 | if ret is None and entireScreen: 112 | print( "Switching to entire screen") 113 | return self.locateInWindow( image ) 114 | print("Returning: {}".format(ret)) 115 | return ret 116 | 117 | 118 | def clickInWindow(self, x, y = None ): 119 | if y is None and type(x) == tuple: 120 | y = x[1] 121 | x = x[0] 122 | offsetX, offsetY, _, _ = self._getVamRect() 123 | pyautogui.click( x + offsetX, y + offsetY ) 124 | 125 | 126 | # Click the buttons to load the look. 127 | # ClickLocations is an array of (x,y) tuples which will be clicked in order 128 | def clickLoadLook(self): 129 | self.focus() 130 | # If coordinates were passed in, just click the mouse on them one after the other 131 | if self._clickLocations: 132 | for coords in self._clickLocations: 133 | self.clickInWindow( coords ) 134 | return 135 | 136 | 137 | def loadLook(self, jsonPath, angles = [0]): 138 | if self._pipe is not None: 139 | self.loadLookPipe( jsonPath, self._pipe, angles ) 140 | else: 141 | raise Exception("Todo: Reimplement window clicking. Specify a pipe for control") 142 | 143 | def loadLookPipe(self, jsonPath, pipe, angles ): 144 | msg = {}; 145 | msg["cmd"] = "screenshot"; 146 | msg["angles"] = angles 147 | msg["json"] = jsonPath 148 | msg["outputPath"] = jsonPath 149 | msg["dimensions"] = [ 256, 256 ] 150 | self._writeToPipe( pipe, json.dumps(msg)) 151 | 152 | def syncPipe(self, pipe ): 153 | time.sleep(.1) 154 | return 155 | print("Sync pipe!") 156 | syncId = random.randint(0,1000) 157 | msg = {}; 158 | msg["cmd"] = "echo"; 159 | msg["id"] = syncId; 160 | self._writeToPipe( pipe, json.dumps(msg)) 161 | data = self._readFromPipe( pipe ) 162 | print("Response: {}".format(data)) 163 | 164 | def _writeToPipe(self, pipe, msg): 165 | win32file.WriteFile( pipe, (msg + self._eom).encode() ) 166 | 167 | def _readFromPipe(self, pipe ): 168 | retBuf = "" 169 | while True: 170 | rc,data = win32file.ReadFile( pipe, 32 ) 171 | self._pipeReadBuf += data.decode() 172 | eomIdx = self._pipeReadBuf.find( self._eom ) 173 | if eomIdx >= 0: 174 | retBuf = self._pipeReadBuf[:eomIdx] 175 | _pipeReadBuf = self._pipeReadBuf[ eomIdx + len(self._eom ):] 176 | break 177 | print( "Read {}".format(data.decode())) 178 | return retBuf 179 | 180 | def getScreenShot(self, region=None): 181 | self.focus() 182 | wX,wY,wW,wH = _rect2xywh(self._getVamRect() ) 183 | 184 | # If a region was supplied then convert the window-relative coordinates to screen coordinates 185 | if not region is None: 186 | rX,rY,rW,rH = region 187 | wX += rX 188 | wY += rY 189 | wW = min( rW, wW - rX ) 190 | wH = min( rH, wH - rY ) 191 | else: 192 | wX,wY,wW,wH = _rect2xywh( self._getVamRect() ) 193 | 194 | windowRegion = ( wX, wY, wW, wH ) 195 | img = pyautogui.screenshot(region=windowRegion) 196 | return img 197 | -------------------------------------------------------------------------------- /Utils/Training/param_generator.py: -------------------------------------------------------------------------------- 1 | # Class to handle formatting data into csv parameters 2 | 3 | import json 4 | import csv 5 | import math 6 | from Utils.Face.encoded import EncodedFace 7 | from Utils.Face.vam import VamFace 8 | 9 | class ParamGenerator: 10 | 11 | def __init__(self, paramConfig, requiredAngles, relatedFiles, baseFace ): 12 | self._config = paramConfig 13 | self._encodings = [] 14 | self._vamFaces = [] 15 | self._angles = requiredAngles 16 | self._angles.sort() 17 | self._facebuckets = {} 18 | self._baseFace = baseFace 19 | 20 | self._generators = { "encoding": self._encodingParams, 21 | "json": self._jsonParams, 22 | "eye_mouth_ratio": self._eye_mouth_ratio_params, 23 | "mouth_chin_ratio": self._mouth_chin_ratio_params, 24 | "eye_height_width_ratio": self._eye_height_width_ratio_params, 25 | "nose_height_width_ratio": self._nose_height_width_ratio_params, 26 | "brow_height_width_ratio": self._brow_height_width_ratio_params, 27 | "brow_chin_ratio": self._brow_chin_ratio_params, 28 | "custom_action": self._custom_action } 29 | 30 | for angle in self._angles: 31 | self._facebuckets[angle] = [] 32 | 33 | # Read all encodings in from the file list 34 | for file in relatedFiles: 35 | try: 36 | if isinstance(file, EncodedFace): 37 | newFace = file 38 | else: 39 | newFace = EncodedFace.createFromFile(file) 40 | self._encodings.append(newFace) 41 | except: 42 | try: 43 | vamFace = VamFace(file) 44 | vamFace.matchMorphs( baseFace ) 45 | self._vamFaces.append( vamFace ) 46 | except: 47 | continue 48 | 49 | # Now put the encodings into the bucket with the closest angle 50 | for encoding in self._encodings: 51 | nearestBucket = abs(self._angles[0]) 52 | for angle in self._angles: 53 | if abs( abs( encoding.getAngle() ) - abs( angle ) ) < abs( abs( encoding.getAngle() ) - abs(nearestBucket) ): 54 | nearestBucket = abs(angle) 55 | self._facebuckets[nearestBucket].append(encoding) 56 | 57 | 58 | def getParams(self): 59 | outArray = [] 60 | for param in self._config: 61 | if param["name"] in self._generators: 62 | outArray.extend(self._generators[param["name"]]( param["params"])) 63 | else: 64 | raise Exception( "Generator {} not found!".format(param["name"])) 65 | return outArray 66 | 67 | def _jsonParams(self, params): 68 | averages = [0] * len(self._baseFace.morphFloats) 69 | for face in self._vamFaces: 70 | for idx,val in enumerate(face.morphFloats): 71 | averages[idx] += val/len(self._vamFaces) 72 | return averages 73 | 74 | def _encodingParams(self, params): 75 | angle = None 76 | for param in params: 77 | if "name" in param and param["name"] == "angle": 78 | angle = float(param["value"]) 79 | break 80 | 81 | encodings = [] 82 | for encoding in self._facebuckets[angle]: 83 | encodings.append(encoding.getEncodings()) 84 | 85 | if len(encodings) == 0: 86 | raise Exception( "No encodings found for angle {}".format(angle)) 87 | averages = [0]*len(encodings[0]) 88 | 89 | for encoding in encodings: 90 | for idx,val in enumerate(encoding): 91 | averages[idx] += val 92 | for idx,val in enumerate(averages): 93 | averages[idx] /= len(encodings) 94 | 95 | return averages 96 | 97 | 98 | 99 | def _eye_mouth_ratio_params(self, params): 100 | averages = self._getAverages( params ) 101 | return [( averages["left_eye"][0] + averages["right_eye"][0] ) / (averages["top_lip"][0] + averages["bottom_lip"][0] )] 102 | 103 | def _eye_height_width_ratio_params(self, params): 104 | averages = self._getAverages( params ) 105 | return [( averages["left_eye"][1] + averages["right_eye"][1] ) / ( averages["left_eye"][0] + averages["right_eye"][0] )] 106 | 107 | def _mouth_chin_ratio_params(self, params): 108 | averages = self._getAverages( params ) 109 | return [averages["top_lip"][0] / averages["chin"][0]] 110 | 111 | 112 | def _nose_height_width_ratio_params(self, params): 113 | averages = self._getAverages( params ) 114 | return [averages["nose_bridge"][1] / averages["nose_tip"][0]] 115 | 116 | 117 | def _brow_height_width_ratio_params(self, params): 118 | averages = self._getAverages( params ) 119 | return [( averages["left_eyebrow"][1] + averages["right_eyebrow"][1] ) / ( averages["left_eyebrow"][0] + averages["right_eyebrow"][0] )] 120 | 121 | def _brow_chin_ratio_params(self, params): 122 | averages = self._getAverages( params ) 123 | return [( averages["left_eyebrow"][0] + averages["right_eyebrow"][0] ) / ( averages["chin"][0]) ] 124 | 125 | 126 | @staticmethod 127 | def _vmAdd( workarea, param1, param2 ): 128 | l = ParamGenerator._vmResolveVariable( workarea, param1 ) 129 | r = ParamGenerator._vmResolveVariable( workarea, param2 ) 130 | return l+r; 131 | 132 | @staticmethod 133 | def _vmSub( workarea, param1, param2 ): 134 | l = ParamGenerator._vmResolveVariable( workarea, param1 ) 135 | r = ParamGenerator._vmResolveVariable( workarea, param2 ) 136 | return l-r; 137 | 138 | @staticmethod 139 | def _vmDiv( workarea, param1, param2 ): 140 | l = ParamGenerator._vmResolveVariable( workarea, param1 ) 141 | r = ParamGenerator._vmResolveVariable( workarea, param2 ) 142 | return l/r 143 | 144 | @staticmethod 145 | def _vmMult( workarea, param1, param2 ): 146 | l = ParamGenerator._vmResolveVariable( workarea, param1 ) 147 | r = ParamGenerator._vmResolveVariable( workarea, param2 ) 148 | return l*r; 149 | 150 | @staticmethod 151 | def _vmSet( workarea, param1, param2 ): 152 | return 0; 153 | 154 | @staticmethod 155 | def _vmResolveVariable( workarea, varName ): 156 | ret = None 157 | if '.' in varName: 158 | axisMap = { 'w': 0, 'h':1 } 159 | landmark,axis = varName.split('.') 160 | if landmark in workarea["landmarks"]: 161 | ret = workarea["landmarks"][landmark][axisMap[axis]] 162 | else: 163 | if varName in workarea["variables"]: 164 | ret = workarea["variables"][varName] 165 | return ret 166 | 167 | @staticmethod 168 | def _vmSetVariable( workarea, varName, value ): 169 | workarea["variables"][varName] = value 170 | 171 | def _custom_action(self, params): 172 | opcodes = { 173 | "add": ParamGenerator._vmAdd, 174 | "subtract": ParamGenerator._vmSub, 175 | "divide": ParamGenerator._vmDiv, 176 | "multiply": ParamGenerator._vmMult, 177 | "set": ParamGenerator._vmSet 178 | } 179 | 180 | averages = self._getAverages( params ) 181 | actionArray = None 182 | for param in params: 183 | if param["name"] == "actions": 184 | actionArray = param["value"]; 185 | 186 | if actionArray is not None: 187 | workArea = {} 188 | workArea["landmarks"] = averages 189 | workArea["variables"] = {} 190 | vm = {} 191 | for op in actionArray: 192 | opcode = op["op"] 193 | param1 = op["param1"] if "param1" in op else None 194 | param2 = op["param2"] if "param2" in op else None 195 | dest = op["dest"] if "dest" in op else None 196 | 197 | if opcode in opcodes: 198 | opret = opcodes[opcode]( workArea, param1, param2 ) 199 | if dest: 200 | ParamGenerator._vmSetVariable( workArea, dest, opret ) 201 | elif opcode == "return": 202 | return [ParamGenerator._vmResolveVariable(workArea, param1)] 203 | raise Exception( "Ill-formed action: {}".format(params)) 204 | 205 | def _getAverages(self, params): 206 | angle = None 207 | for param in params: 208 | if "name" in param and param["name"] == "angle": 209 | angle = float(param["value"]) 210 | break 211 | 212 | landmarks = [] 213 | for encoding in self._facebuckets[angle]: 214 | landmarks.append(encoding.getLandmarks()) 215 | 216 | averages = ParamGenerator._calcAverageSizes( landmarks ) 217 | return averages 218 | 219 | 220 | @staticmethod 221 | def _calcAverageSizes( landmarks ): 222 | averages = {} 223 | for landmark in landmarks: 224 | sizes = ParamGenerator._calcSizes(landmark) 225 | for key,shape in sizes.items(): 226 | if key not in averages: 227 | averages[key] = [0,0] 228 | for idx,dim in enumerate(shape): 229 | averages[key][idx] += ( dim / len(landmarks) ) 230 | return averages 231 | 232 | @staticmethod 233 | def _calcSizes(landmarks): 234 | # Find the height and widths of all landmarks 235 | sizes = {} 236 | for key,pts in landmarks.items(): 237 | leftmost = min( pts, key = lambda t: t[0] ) 238 | rightmost = max( pts, key = lambda t: t[0] ) 239 | highest = min( pts, key = lambda t: t[1] ) 240 | lowest = max( pts, key = lambda t: t[1] ) 241 | width = math.hypot( rightmost[0] - leftmost[0], rightmost[1] - leftmost[1] ) 242 | height = math.hypot( lowest[0] - highest[0], lowest[1] - highest[1] ) 243 | sizes[key] = [ width, height ] 244 | return sizes -------------------------------------------------------------------------------- /Utils/VamMod/VamMod.cs: -------------------------------------------------------------------------------- 1 | // Foto2Vam Mod for VAM. Use dnSpy to inject this into the executable and trigger creation of Foto2VamServer. 2 | 3 | using SimpleJSON; 4 | using System; 5 | using System.Collections; 6 | using System.Collections.Generic; 7 | using System.IO; 8 | using System.IO.Pipes; 9 | using System.Text; 10 | using System.Threading; 11 | using UnityEngine; 12 | 13 | 14 | namespace VamMod 15 | { 16 | public class Foto2VamServer 17 | { 18 | ~Foto2VamServer() 19 | { 20 | this._exitThread = true; 21 | this._event.Set(); 22 | UnityEngine.Object.Destroy(this._imageMaker); 23 | this._thread.Join(); 24 | } 25 | 26 | private void HandleTakeScreenshot(JSONNode aJsonNode) 27 | { 28 | string value = aJsonNode["json"].Value; 29 | string value2 = aJsonNode["outputPath"].Value; 30 | int asInt = aJsonNode["dimensions"][0].AsInt; 31 | int asInt2 = aJsonNode["dimensions"][1].AsInt; 32 | List list = new List(); 33 | foreach (object obj in aJsonNode["angles"].AsArray) 34 | { 35 | JSONNode jsonnode = (JSONNode)obj; 36 | list.Add(jsonnode.AsInt); 37 | } 38 | this.Enqueue(delegate 39 | { 40 | this._imageMaker.TakeScreenshot("Person", value, value2, list, asInt, asInt2); 41 | }); 42 | } 43 | 44 | private void WorkerThread() 45 | { 46 | while (!this._exitThread) 47 | { 48 | this._event.WaitOne(); 49 | object @lock = this._lock; 50 | lock (@lock) 51 | { 52 | while (this._queue.Count > 0) 53 | { 54 | this._queue.Dequeue()(); 55 | } 56 | } 57 | } 58 | } 59 | 60 | public Foto2VamServer() 61 | { 62 | this._pipeServer = new PipeServer(); 63 | this._pipeServer.RegisterHandler("screenshot", new Action(this.HandleTakeScreenshot)); 64 | this._imageMaker = new GameObject().AddComponent(); 65 | this._thread = new Thread(new ThreadStart(this.WorkerThread)); 66 | this._queue = new Queue(); 67 | this._event = new AutoResetEvent(false); 68 | this._lock = new object(); 69 | this._exitThread = false; 70 | this._thread.Start(); 71 | } 72 | 73 | private void Enqueue(Action aAction) 74 | { 75 | object @lock = this._lock; 76 | lock (@lock) 77 | { 78 | this._queue.Enqueue(aAction); 79 | } 80 | this._event.Set(); 81 | } 82 | 83 | private ImageMaker _imageMaker; 84 | 85 | private Thread _thread; 86 | 87 | private Queue _queue; 88 | 89 | private AutoResetEvent _event; 90 | 91 | private bool _exitThread; 92 | 93 | private object _lock; 94 | 95 | private PipeServer _pipeServer; 96 | } 97 | } 98 | 99 | 100 | 101 | 102 | namespace VamMod 103 | { 104 | public class ImageMaker : MonoBehaviour 105 | { 106 | public ImageMaker() 107 | { 108 | this._event = new AutoResetEvent(false); 109 | } 110 | 111 | public void Update() 112 | { 113 | if (this.pendingAction != null) 114 | { 115 | try 116 | { 117 | this.pendingAction(); 118 | } 119 | catch (Exception ex) 120 | { 121 | Debug.LogError("Exception: " + ex.ToString()); 122 | } 123 | finally 124 | { 125 | this.pendingAction = null; 126 | } 127 | } 128 | } 129 | 130 | public void TakeScreenshot(string aName, string aJsonPath, string aOutputPath, List aAngles, int aWidth, int aHeight) 131 | { 132 | this.pendingAction = delegate() 133 | { 134 | GameObject gameObject = GameObject.Find(aName); 135 | if (gameObject == null) 136 | { 137 | foreach (GameObject gameObject2 in UnityEngine.Object.FindObjectsOfType()) 138 | { 139 | if (gameObject2.name.StartsWith(aName)) 140 | { 141 | gameObject = gameObject2; 142 | break; 143 | } 144 | } 145 | } 146 | if (gameObject == null) 147 | { 148 | this._event.Set(); 149 | return; 150 | } 151 | Atom component = gameObject.GetComponent(); 152 | component.LoadAppearancePreset(aJsonPath); 153 | this.StartCoroutine(this.TakeScreenshotCo(component, aOutputPath, aAngles, aWidth, aHeight)); 154 | }; 155 | this._event.WaitOne(); 156 | } 157 | 158 | private IEnumerator TakeScreenshotCo(Atom atom, string aOutputPath, List aAngles, int aWidth, int aHeight) 159 | { 160 | List cameras = new List(); 161 | Component head = null; 162 | Component component = null; 163 | foreach (Rigidbody rigidbody in atom.rigidbodies) 164 | { 165 | if (rigidbody.name == "head") 166 | { 167 | head = rigidbody; 168 | } 169 | else if (rigidbody.name == "headControl") 170 | { 171 | component = rigidbody; 172 | } 173 | if (null != component && null != head) 174 | { 175 | break; 176 | } 177 | } 178 | foreach (int aAngle in aAngles) 179 | { 180 | cameras.Add(new ScreenshotCamera(aWidth, aHeight, head.transform, aAngle, 1f)); 181 | } 182 | if (component.transform.rotation != Quaternion.identity) 183 | { 184 | component.transform.SetPositionAndRotation(component.transform.position, Quaternion.identity); 185 | } 186 | SuperController.singleton.HideMainHUD(); 187 | do 188 | { 189 | yield return null; 190 | } 191 | while (SuperController.singleton.IsSimulationPaused()); 192 | Vector3 prevPos = head.transform.position; 193 | for (;;) 194 | { 195 | yield return null; 196 | if ((double)(prevPos - head.transform.position).sqrMagnitude <= 0.01) 197 | { 198 | break; 199 | } 200 | prevPos = head.transform.position; 201 | } 202 | foreach (ScreenshotCamera screenshotCamera in cameras) 203 | { 204 | screenshotCamera.SetPosition(); 205 | } 206 | yield return null; 207 | foreach (ScreenshotCamera screenshotCamera2 in cameras) 208 | { 209 | screenshotCamera2.TakeScreenshot(string.Concat(new string[] 210 | { 211 | aOutputPath, 212 | "_", 213 | screenshotCamera2._angle.ToString(), 214 | ".png" 215 | })); 216 | screenshotCamera2.Release(); 217 | } 218 | this._event.Set(); 219 | yield break; 220 | } 221 | 222 | private Action pendingAction; 223 | 224 | private AutoResetEvent _event; 225 | } 226 | } 227 | 228 | 229 | 230 | 231 | namespace VamMod 232 | { 233 | public class PipeServer 234 | { 235 | public PipeServer() 236 | { 237 | this._connectionCallback = new AsyncCallback(this.HandleConnection); 238 | this._readCallback = new AsyncCallback(this.HandleRead); 239 | this._readBuffer = new byte[4096]; 240 | this._handlers = new Dictionary>(); 241 | this.StartServer(); 242 | } 243 | 244 | private void HandleConnection(IAsyncResult ar) 245 | { 246 | this._pipeServer.EndWaitForConnection(ar); 247 | if (this._pipeServer.IsConnected) 248 | { 249 | this._pipeServer.BeginRead(this._readBuffer, 0, this._readBuffer.Length, this._readCallback, null); 250 | } 251 | } 252 | 253 | private void HandleRead(IAsyncResult ar) 254 | { 255 | int num = this._pipeServer.EndRead(ar); 256 | if (num == 0) 257 | { 258 | this.Disconnect(); 259 | return; 260 | } 261 | this._recvdString += Encoding.Default.GetString(this._readBuffer, 0, num); 262 | string text = ""; 263 | int num2; 264 | while ((num2 = this._recvdString.IndexOf(text)) >= 0) 265 | { 266 | string aJSON = this._recvdString.Substring(0, num2); 267 | this._recvdString = this._recvdString.Substring(num2 + text.Length); 268 | JSONNode aMsg = JSON.Parse(aJSON); 269 | this.HandleMessage(aMsg); 270 | } 271 | this._pipeServer.BeginRead(this._readBuffer, 0, this._readBuffer.Length, this._readCallback, null); 272 | } 273 | 274 | private void Disconnect() 275 | { 276 | if (this._pipeServer.IsConnected) 277 | { 278 | this._pipeServer.Disconnect(); 279 | } 280 | this._pipeServer.Dispose(); 281 | this._pipeServer = null; 282 | this.StartServer(); 283 | } 284 | 285 | ~PipeServer() 286 | { 287 | this.Disconnect(); 288 | } 289 | 290 | private void StartServer() 291 | { 292 | if (this._pipeServer != null) 293 | { 294 | this.Disconnect(); 295 | } 296 | this._pipeServer = new NamedPipeServerStream("foto2vamPipe", PipeDirection.InOut); 297 | this._pipeServer.BeginWaitForConnection(this._connectionCallback, null); 298 | } 299 | 300 | private void HandleMessage(JSONNode aMsg) 301 | { 302 | string value = aMsg["cmd"].Value; 303 | if (this._handlers.ContainsKey(value)) 304 | { 305 | this._handlers[value](aMsg); 306 | } 307 | } 308 | 309 | public void RegisterHandler(string aCmd, Action aHandler) 310 | { 311 | this._handlers[aCmd] = aHandler; 312 | } 313 | 314 | private AsyncCallback _connectionCallback; 315 | 316 | private AsyncCallback _readCallback; 317 | 318 | private NamedPipeServerStream _pipeServer; 319 | 320 | private byte[] _readBuffer; 321 | 322 | private string _recvdString; 323 | 324 | private Dictionary> _handlers; 325 | } 326 | } 327 | 328 | 329 | 330 | namespace VamMod 331 | { 332 | internal class ScreenshotCamera 333 | { 334 | public ScreenshotCamera(int aWidth, int aHeight, Transform aTarget, int aAngle, float aDistance) 335 | { 336 | this._camera = new GameObject().AddComponent(); 337 | this._camera.name = "ScreenshotCamera"; 338 | this._camera.enabled = true; 339 | this._camera.fieldOfView = 20f; 340 | this._renderTexture = RenderTexture.GetTemporary(aWidth, aHeight, 24); 341 | this._camera.targetTexture = this._renderTexture; 342 | this._texture2d = new Texture2D(this._renderTexture.width, this._renderTexture.height, TextureFormat.RGB24, false); 343 | this._target = aTarget; 344 | this._angle = aAngle; 345 | this._distance = aDistance; 346 | } 347 | 348 | public void SetPosition() 349 | { 350 | this._camera.transform.SetPositionAndRotation(this._target.position + new Vector3(0f, 0f, this._distance), Quaternion.identity); 351 | this._camera.transform.RotateAround(this._target.position, Vector3.up, (float)this._angle); 352 | this._camera.transform.LookAt(this._target.transform); 353 | } 354 | 355 | public void TakeScreenshot(string aFilename) 356 | { 357 | RenderTexture.active = this._renderTexture; 358 | this._texture2d.ReadPixels(new Rect(0f, 0f, (float)this._renderTexture.width, (float)this._renderTexture.height), 0, 0); 359 | this._texture2d.Apply(); 360 | byte[] bytes = this._texture2d.EncodeToPNG(); 361 | File.WriteAllBytes(aFilename, bytes); 362 | RenderTexture.active = null; 363 | } 364 | 365 | public void Release() 366 | { 367 | UnityEngine.Object.Destroy(this._camera); 368 | UnityEngine.Object.Destroy(this._texture2d); 369 | RenderTexture.ReleaseTemporary(this._renderTexture); 370 | } 371 | 372 | private Camera _camera; 373 | 374 | private RenderTexture _renderTexture; 375 | 376 | private Texture2D _texture2d; 377 | 378 | private Transform _target; 379 | 380 | public int _angle; 381 | 382 | private float _distance; 383 | } 384 | } 385 | -------------------------------------------------------------------------------- /Utils/Face/encoded.py: -------------------------------------------------------------------------------- 1 | # Class to handle faces encoded for recognition 2 | 3 | try: 4 | import face_recognition_hst as face_recognition 5 | except: 6 | import face_recognition 7 | import numpy 8 | from PIL import Image, ImageDraw 9 | import cv2 10 | import math 11 | import json 12 | 13 | 14 | class EncodedFace: 15 | ENCODING_TYPE = "dlib.face_recognition" 16 | ENCODING_VERSION = 1 17 | 18 | def __init__(self, image, region=None, keepImg=False, imgPadding=125, num_jitters=2, debugPose = False): 19 | if image is None: 20 | return 21 | 22 | nImg = numpy.array(image) 23 | 24 | if region is None: 25 | try: 26 | self._region = face_recognition.face_locations(nImg)[0] 27 | except Exception as e: 28 | raise Exception("Failed to find a face in the picture") 29 | 30 | # print("Face found at {}".format(self._region)) 31 | else: 32 | self._region = _region 33 | top, right, bottom, left = self._region 34 | 35 | # Apply padding to save more of image 36 | top = max(0, top - imgPadding) 37 | left = max(0, left - imgPadding) 38 | bottom = min(nImg.shape[0], bottom + imgPadding) 39 | right = min(nImg.shape[1], right + imgPadding) 40 | 41 | # crop image to just the face 42 | self._img = nImg[top:bottom, left:right] 43 | 44 | # Get encodings for the face in the image 45 | try: 46 | self._encodings = face_recognition.face_encodings(self._img, num_jitters=num_jitters)[0] 47 | self._landmarks = face_recognition.face_landmarks(self._img)[0] 48 | except: 49 | raise Exception("Failed to find face in image") 50 | (_, self._angle, _) = self._estimatePose(debugPose = debugPose) 51 | 52 | if not keepImg: 53 | self._img = None 54 | 55 | @staticmethod 56 | def msgpack_encode(obj): 57 | if isinstance(obj, EncodedFace): 58 | return {'__EncodedFace__': True, 'angle': obj._angle, 'encodings': obj._encodings, 'landmarks': obj._landmarks } 59 | return obj 60 | 61 | @staticmethod 62 | def msgpack_decode(obj): 63 | if '__EncodedFace__' in obj: 64 | decodedFace = EncodedFace(None) 65 | decodedFace._angle = obj['angle'] 66 | decodedFace._encodings = obj['encodings'] 67 | decodedFace._landmarks = obj['landmarks'] 68 | obj = decodedFace 69 | return obj 70 | 71 | 72 | 73 | @staticmethod 74 | def batchEncode( imageList, batch_size = 128, keepImage = False, debugPose = False ): 75 | encodings, landmarks = face_recognition.batch_face_encodings_and_landmarks( imageList, landmark_model="large", batch_size=batch_size, location_model="hog" ) 76 | encodedList = [] 77 | for data in zip(encodings,landmarks, imageList): 78 | if len(data[0]) > 0: 79 | encodedFace = EncodedFace(None) 80 | encodedFace._encodings = list(data[0][0]) 81 | encodedFace._landmarks = data[1][0] 82 | encodedFace._img = data[2] 83 | _, encodedFace._angle, _ = encodedFace._estimatePose( debugPose = debugPose ) 84 | 85 | if not keepImage: 86 | encodedFace._img = None 87 | else: 88 | encodedFace = None 89 | encodedList.append(encodedFace) 90 | return encodedList 91 | 92 | @staticmethod 93 | def createFromFile( fileName ): 94 | data = open(fileName).read() 95 | jsonData = json.loads(data) 96 | 97 | if jsonData["encoding_version"] is not EncodedFace.ENCODING_VERSION: 98 | raise Exception("Encoding version mismatch! File was {}, reader was {}".format(jsonData["encoding_version"], EncodedFace.ENCODING_VERSION ) ) 99 | 100 | newEncoding = EncodedFace( None ) 101 | newEncoding._encodings = numpy.array(jsonData["encoding"]) 102 | newEncoding._landmarks = jsonData["landmarks"] 103 | newEncoding._angle = jsonData["angle"] 104 | return newEncoding 105 | 106 | 107 | # Determine angle the face is facing 108 | def _estimatePose(self, img_size = None , landmarks = None, debugPose = False): 109 | if img_size is None: 110 | img_size = self._img.shape 111 | if landmarks is None: 112 | landmarks = self._landmarks 113 | # 2D image points. Adapted from https://www.learnopencv.com/head-pose-estimation-using-opencv-and-dlib/ 114 | image_points = numpy.array([ 115 | landmarks['nose_bridge'][3], # Nose tip 116 | landmarks['chin'][8], # Chin 117 | landmarks['left_eye'][0], # Left eye left corner 118 | landmarks['right_eye'][3], # Right eye right corner 119 | landmarks['top_lip'][0], # Left mouth corner 120 | landmarks['bottom_lip'][0], # Right mouth corner 121 | ], dtype="double") 122 | 123 | # 3D model points 124 | model_points = numpy.array([ 125 | (0.0, 0.0, 0.0), # Nose tip 126 | (0.0, -330.0, -65.0), # Chin 127 | (-225.0, 170.0, -135.0), # Left eye left corner 128 | (225.0, 170.0, -135.0), # Right eye right corne 129 | (-150.0, -150.0, -125.0), # Left Mouth corner 130 | (150.0, -150.0, -125.0) # Right mouth corner 131 | 132 | ]) 133 | 134 | # Camera internals 135 | focal_length = img_size[1] 136 | center = (img_size[1] / 2, img_size[0] / 2) 137 | camera_matrix = numpy.array( 138 | [[focal_length, 0, center[0]], 139 | [0, focal_length, center[1]], 140 | [0, 0, 1]], dtype="double" 141 | ) 142 | 143 | dist_coeffs = numpy.zeros((4, 1)) # Assuming no lens distortion 144 | (success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE) 145 | 146 | rMat = cv2.Rodrigues(rotation_vector)[0] 147 | attitude = self._rotation_matrix_to_attitude_angles(rMat) 148 | 149 | attitude_list = [] 150 | for val in numpy.nditer(attitude.T): 151 | attitude_list.append( math.degrees(val) ) 152 | 153 | # Display image with markings 154 | if debugPose: 155 | debugImage = self._img.copy() 156 | 157 | # Draw face landmarks 158 | for p in image_points: 159 | cv2.circle(debugImage, (int(p[0]), int(p[1])), 3, (0,0,255), -1) 160 | 161 | origin, _ = cv2.projectPoints(numpy.array([(0.0, 0.0, 0.0)]), rotation_vector, translation_vector, camera_matrix, dist_coeffs) 162 | xAxis, _ = cv2.projectPoints(numpy.array([(100.0, 0.0, 0.0)]), rotation_vector, translation_vector, camera_matrix, dist_coeffs) 163 | yAxis, _ = cv2.projectPoints(numpy.array([(0.0, 100.0, 0.0)]), rotation_vector, translation_vector, camera_matrix, dist_coeffs) 164 | zAxis, _ = cv2.projectPoints(numpy.array([(0.0, 0.0, 100.0)]), rotation_vector, translation_vector, camera_matrix, dist_coeffs) 165 | 166 | origin = tuple(origin.reshape(-1,2)[0]) 167 | xAxis = tuple(xAxis.reshape(-1,2)[0]) 168 | yAxis = tuple(yAxis.reshape(-1,2)[0]) 169 | zAxis = tuple(zAxis.reshape(-1,2)[0]) 170 | 171 | origin = tuple([int(val) for val in origin]) 172 | xAxis = tuple([int(val) for val in xAxis]) 173 | yAxis = tuple([int(val) for val in yAxis]) 174 | zAxis = tuple([int(val) for val in zAxis]) 175 | 176 | cv2.line(debugImage, origin, xAxis, (255,0,0), 2 ) 177 | cv2.line(debugImage, origin, yAxis, (0,255,0), 2 ) 178 | cv2.line(debugImage, origin, zAxis, (0,0,255), 2 ) 179 | 180 | for _,pointList in landmarks.items(): 181 | for p in pointList: 182 | cv2.circle(debugImage, p, 1, (0,255,0), -1) 183 | 184 | cv2.putText( debugImage, "Rot: {}, {}, {}".format(round(attitude_list[0],3), round(attitude_list[1],3), round(attitude_list[2],3)), 185 | (10,30), 186 | cv2.FONT_HERSHEY_SIMPLEX, 187 | .4, 188 | (0,255,0), 189 | 1 190 | ) 191 | 192 | trans_list = [] 193 | for val in numpy.nditer(translation_vector.T): 194 | trans_list.append(float(val)) 195 | 196 | cv2.putText( debugImage, "Trans: {}, {}, {}".format(round(trans_list[0],3), round(trans_list[1],3), round(trans_list[2],3)), 197 | (10,60), 198 | cv2.FONT_HERSHEY_SIMPLEX, 199 | .4, 200 | (0,255,0), 201 | 1 202 | ) 203 | 204 | 205 | cv2.imshow( "Debug Output", debugImage ) 206 | cv2.waitKey(0) 207 | 208 | return attitude_list 209 | 210 | # Taken from https://stackoverflow.com/questions/44726404/camera-pose-from-solvepnp 211 | # Only seems to get 'yaw' correct, but that's what we want 212 | def _rotation_matrix_to_attitude_angles(self, R): 213 | cos_beta = math.sqrt(R[2,1] * R[2,1] + R[2,2] * R[2,2]) 214 | validity = cos_beta < 1e-6 215 | if not validity: 216 | alpha = math.atan2(R[1,0], R[0,0]) # yaw [z] 217 | beta = math.atan2(-R[2,0], cos_beta) # pitch [y] 218 | gamma = math.atan2(R[2,1], R[2,2]) # roll [x] 219 | else: 220 | alpha = math.atan2(R[1,0], R[0,0]) # yaw [z] 221 | beta = math.atan2(-R[2,0], cos_beta) # pitch [y] 222 | gamma = 0 # roll [x] 223 | return numpy.array([alpha, beta, gamma]) 224 | 225 | def getAngle(self): 226 | return self._angle 227 | 228 | def getImage(self): 229 | return self._img 230 | 231 | def getEncodings(self): 232 | return list(self._encodings) 233 | 234 | def getLandmarks(self): 235 | return self._landmarks 236 | 237 | def getRegion(self): 238 | return self._region 239 | 240 | def compare(self, otherFace): 241 | return face_recognition.face_distance([numpy.array(self._encodings)], numpy.array(otherFace._encodings)).mean() 242 | 243 | def getEncodingJson(self): 244 | return { 'angle': self._angle, 'landmarks': self._landmarks, 'encoding': self._encodings.tolist(), 'encoding_format': self.ENCODING_TYPE, 'encoding_version': self.ENCODING_VERSION } 245 | 246 | def saveEncodings(self, filename): 247 | jsonData = self.getEncodingJson() 248 | with open(filename, 'w') as outfile: 249 | json.dump(jsonData, outfile) 250 | 251 | def saveImage(self, filename, landmarks=True): 252 | if self._img is None: 253 | raise Exception("Image was not saved in constructor!") 254 | 255 | if isinstance( self._img, numpy.ndarray ): 256 | img = Image.fromarray( self._img ) 257 | else: 258 | img = self._img 259 | if landmarks: 260 | draw = ImageDraw.Draw(img) 261 | for key, val in self._landmarks.items(): 262 | draw.point(val) 263 | 264 | img.save(filename) 265 | -------------------------------------------------------------------------------- /Utils/Face/vam.py: -------------------------------------------------------------------------------- 1 | # Class to manipulate a VAM face 2 | import json 3 | import random 4 | import copy 5 | 6 | class VamFace: 7 | wHndl = 0 8 | rect = () 9 | 10 | # Initialize a base face from a JSON file 11 | # Get minimum and maximum values for parameters from minFace and maxFace files 12 | def __init__(self, baseFileName, minFileName = None, maxFileName = None, discardExtra = True): 13 | self.jsonData = {} 14 | 15 | # reference to the 'morphs' in the json 16 | self.morphs = None 17 | # reference to head rotation in the json 18 | self.headRotation = None 19 | # reference to storables in the json 20 | self._storables = None 21 | 22 | # morphs as a list of floats 23 | self.morphFloats = [] 24 | # valid ranges for each morph value 25 | self.morphInfo = [] 26 | 27 | # Abort out if not loading a file. Leaves face partially initialized 28 | if baseFileName is None: 29 | return 30 | 31 | self.load( baseFileName, discardExtra = discardExtra ) 32 | 33 | self.minFace = VamFace( minFileName ) if not minFileName is None else None 34 | self.maxFace = VamFace( maxFileName ) if not maxFileName is None else None 35 | 36 | self._createMorphFloats() 37 | 38 | def _createMorphFloats(self): 39 | # Create a list of floats representing each morph. Pull minimum and maximum 40 | # values, defaulting to 0-1.0 if a value is not present 41 | self.morphFloats = [] 42 | self.morphInfo = [] 43 | for morph in self.morphs: 44 | minVal = 0 45 | maxVal = 1.0 46 | defaultVal = 0 47 | 48 | val = self.minFace._getMorphValue(morph['name']) if not self.minFace is None else None 49 | minVal = float(val) if not val is None else minVal 50 | 51 | val = self.maxFace._getMorphValue(morph['name']) if not self.maxFace is None else None 52 | maxVal = float(val) if not val is None else maxVal 53 | 54 | if 'value' in morph: 55 | defaultVal = float(morph['value']) 56 | self.morphFloats.append( defaultVal ) 57 | self.morphInfo.append( { 'min': minVal, 'max': maxVal, 'name': morph['name'] } ) 58 | 59 | # Note: msgpack really only is good for verifying a cache uses the same face, not for really saving off faces 60 | @staticmethod 61 | def msgpack_encode(obj): 62 | if isinstance(obj, VamFace): 63 | return {'__VamFace__': True, 'morphs': obj.morphs, 'minFace': obj.minFace, 'maxFace': obj.maxFace } 64 | return obj 65 | 66 | @staticmethod 67 | def msgpack_decode(obj): 68 | if '__VamFace__' in obj: 69 | decodedFace = VamFace(None) 70 | decodedFace.morphs = obj['morphs'] 71 | decodedFace.minFace = obj['minFace'] 72 | decodedFace.maxFace = obj['maxFace'] 73 | decodedFace._createMorphFloats() 74 | obj = decodedFace 75 | return obj 76 | 77 | 78 | 79 | @staticmethod 80 | def mergeFaces( templateFace, fromFace, toFace, invertTemplate = False, copyNonMorphs = False ): 81 | newFace = copy.deepcopy( toFace ) 82 | 83 | # Copy non-morphs, like clothes and skin 84 | if copyNonMorphs: 85 | for storable in fromFace._storables: 86 | id = storable['id'] if 'id' in storable else None 87 | # Storable must have an id, and we aren't currently copying morphs 88 | if id is None: 89 | continue 90 | 91 | # Special case geometry, since we don't want to overwrite morphs 92 | if id == 'geometry': 93 | newStorable = VamFace.getStorable( newFace._storables, id, create = True ) 94 | # Merge fromFace geometry with toFace 95 | newStorable.update( storable ) 96 | # But keep toFace morphs for now 97 | newStorable['morphs'] = VamFace.getStorable( toFace._storables, "geometry", create=True)["morphs"] 98 | else: 99 | # Otherwise copy this morph into newFace 100 | newStorable = VamFace.getStorable( newFace._storables, id, create = True ) 101 | newStorable.clear() 102 | newStorable.update( storable ) 103 | 104 | # Now copy, based on the template, the morphs 105 | newMorphs = [] 106 | for morph in fromFace.morphs: 107 | # First check the template to see if we want to copy the morph 108 | templateMorph = templateFace._getMorph( morph['name'] ) 109 | 110 | copyMorph = False 111 | if templateMorph and "animatable" in templateMorph and templateMorph["animatable"]: 112 | copyMorph = True 113 | 114 | if invertTemplate: 115 | copyMorph = not copyMorph 116 | 117 | toMorph = toFace._getMorph( morph['name'] ) 118 | 119 | # If we want this morph then copy it to newFace 120 | if copyMorph: 121 | morphCopy = morph.copy() 122 | # Maintain original animatable flag or clear it 123 | if toMorph and 'animatable' in toMorph: 124 | morphCopy['animatable'] = toMorph['animatable'] 125 | else: 126 | morphCopy['animatable'] = False 127 | newMorphs.append( morphCopy ) 128 | continue 129 | 130 | # Okay, we didn't want to copy it from fromFace, so keep the old morph (or set to 0) 131 | if not toMorph: 132 | morphCopy = morph.copy() 133 | morphCopy['value'] = 0 134 | morphCopy['animatable'] = False 135 | else: 136 | morphCopy = toMorph.copy() 137 | 138 | newMorphs.append( morphCopy ) 139 | # 140 | # Now merge newMorphs in with the current morphs 141 | # Now add in any morphs from toFace that aren't in newMorphs 142 | origStorables = VamFace.getStorable( newFace._storables, "geometry", create=True) 143 | if "morphs" in origStorables: 144 | origMorphs = origStorables["morphs"] 145 | for morph in origMorphs: 146 | morphName = morph["name"] 147 | newMorph = VamFace.getStorable( newMorphs, id=morphName, key="name") 148 | # If a morph was in origMorphs, but not in newMorphs, append it to newMorphs 149 | if newMorph is None: 150 | newMorphs.append(morph) 151 | 152 | VamFace.setStorable( newFace._storables, "geometry", "morphs", newMorphs, create=True) 153 | newFace.morphs = newMorphs 154 | newFace._createMorphFloats() 155 | 156 | return newFace 157 | 158 | # Aligns morphFloats between the two faces, discarding any morphs not in otherFace 159 | def matchMorphs(self, otherFace, copyUnknowns = False, templateFace = None, invertTemplate = False): 160 | newMorphs = [] 161 | self.updateJson() 162 | 163 | # Loop through other morph, copying any required morphs 164 | for otherMorph in otherFace.morphs: 165 | if templateFace: 166 | templateMorph = templateFace._getMorph( otherMorph['name'] ) 167 | 168 | if templateMorph and "animatable" in templateMorph and templateMorph["animatable"]: 169 | copyMorph = True 170 | 171 | if invertTemplate: 172 | copyMorph = not copyMorph 173 | 174 | # If we don't want to copy this morph, just copy the original morph 175 | if not copyMorph: 176 | newMorphs.append( otherMorph ) 177 | continue 178 | 179 | morph = self._getMorph(otherMorph['name']) 180 | 181 | if not morph: 182 | # If we didn't have a value for the morph in the other face, then set to 0 183 | morph = otherMorph.copy() 184 | if not copyUnknowns: 185 | morph['value'] = 0 186 | newMorphs.append(morph) 187 | 188 | if self._storables is not None: 189 | geometry = VamFace.getStorable( self._storables, "geometry", create=True ) 190 | geometry['morphs'] = newMorphs 191 | self.morphs = newMorphs 192 | self._createMorphFloats() 193 | 194 | def trimToAnimatable(self): 195 | newMorphs = [] 196 | print("Starting trim with {} morphs".format(len(self.morphs))) 197 | self.updateJson() 198 | for morph in self.morphs: 199 | if 'animatable' in morph: 200 | newMorphs.append(morph) 201 | geometry = VamFace.getStorable( self._storables, "geometry", create=True ) 202 | geometry['morphs'] = newMorphs 203 | self.morphs = newMorphs 204 | print("Ending trim with {} morphs".format(len(self.morphs))) 205 | self._createMorphFloats() 206 | 207 | # Load a JSON file 208 | def load(self, filename, discardExtra = True ): 209 | data = open(filename).read() 210 | self.jsonData = json.loads(data) 211 | atoms = self.jsonData["atoms"][0] 212 | self._storables = atoms["storables"] 213 | 214 | # Get a reference to the object containing 'morphs' so we can completely replace 'morphs' 215 | geometry = VamFace.getStorable( self._storables, "geometry" ) 216 | self.morphs = geometry['morphs'] 217 | 218 | if discardExtra: 219 | # Check for male 220 | geometry = VamFace.getStorable( self._storables, "geometry") 221 | skin = "Female 1" 222 | if "character" in geometry and "Male" in geometry["character"]: 223 | skin = "Male 1" 224 | 225 | # Throw away everything from storables 226 | self._storables = [] 227 | atoms["storables"] = self._storables 228 | 229 | VamFace.setStorable( self._storables, "geometry", "morphs", self.morphs, create=True) 230 | VamFace.setStorable( self._storables, "geometry", "hair", "No Hair", create=True) 231 | VamFace.setStorable( self._storables, "geometry", "clothing", [], create=True) 232 | VamFace.setStorable( self._storables, "geometry", "character", skin, create=True) 233 | VamFace.setStorable( self._storables, "rescaleObject", "scale", 1.0 ) 234 | VamFace.setStorable( self._storables, "JawControl", "targetRotationX", 0 ) 235 | VamFace.setStorable( self._storables, "EyelidControl", "blinkEnabled", "false", create=True ) 236 | VamFace.setStorable( self._storables, "AutoExpressions", "enabled", "false", create=True ) 237 | 238 | # Find the head rotation value in the json 239 | VamFace.setStorable( self._storables, "headControl", "rotation", { "x": 0, "y": 0, "z": 0}, create=True ) 240 | VamFace.setStorable( self._storables, "headControl", "positionState", "Off" ) 241 | VamFace.setStorable( self._storables, "headControl", "rotationState", "On" ) 242 | self.headRotation = VamFace.getStorable( self._storables, "headControl")['rotation'] 243 | 244 | # Save json file 245 | def save(self, filename): 246 | self.updateJson() 247 | with open(filename, 'w') as outfile: 248 | json.dump(self.jsonData, outfile, indent=3) 249 | 250 | # randomize all face values 251 | def changeMorph(self, morphIdx, delta): 252 | newValue = self.morphFloats[morphIdx] + delta 253 | newValue = max( self.morphInfo[morphIdx]['min'], newValue ) 254 | newValue = min( self.morphInfo[morphIdx]['max'], newValue ) 255 | self.morphFloats[morphIdx] = newValue 256 | 257 | # randomize all face values 258 | def randomize(self, morphIdx = None): 259 | if morphIdx is None: 260 | for idx in range(len(self.morphFloats)): 261 | self.morphFloats[idx] = random.uniform( self.morphInfo[idx]['min'], self.morphInfo[idx]['max'] ) 262 | else: 263 | self.morphFloats[morphIdx] = random.uniform( self.morphInfo[morphIdx]['min'], self.morphInfo[morphIdx]['max'] ) 264 | 265 | def importFloatList(self, floatList): 266 | if len(floatList) == len(self.morphFloats): 267 | for i in range(len(floatList)): 268 | self.morphFloats[i] = float(floatList[i]) 269 | else: 270 | raise Exception("Import list length [{}] is different than face's morph list length [{}]".format(len(floatList), len(self.morphFloats))) 271 | 272 | def setRotation(self, angle): 273 | self.headRotation['y'] = angle 274 | 275 | # Update the json with the values from the float list 276 | def updateJson(self, discardAnimatable = False): 277 | for idx,morph in enumerate(self.morphs): 278 | morph["value"] = self.morphFloats[idx] 279 | if discardAnimatable and 'animatable' in morph: 280 | del morph['animatable'] 281 | 282 | 283 | def _getMorph(self, key): 284 | morph = list( filter( lambda x : x['name'] == key, self.morphs ) ) 285 | if len(morph) > 0: 286 | return morph[0] 287 | return None 288 | 289 | def _getMorphValue(self, key): 290 | morph = self._getMorph(key) 291 | return morph['value'] if morph and 'value' in morph else None 292 | 293 | @staticmethod 294 | def setStorable(storables, id, param, value, create = False ): 295 | storable = VamFace.getStorable(storables, id, create) 296 | if storable: 297 | storable[param] = value 298 | return storable 299 | return None 300 | 301 | 302 | @staticmethod 303 | def getStorable( storables, id, create = False, key="id" ): 304 | storable = list(filter(lambda x : x[key] == id, storables ) ) 305 | if len(storable) > 0: 306 | return storable[0] 307 | elif create: 308 | newNode = { key: id } 309 | storables.append( newNode ) 310 | return newNode 311 | return None 312 | -------------------------------------------------------------------------------- /mergeBase.json: -------------------------------------------------------------------------------- 1 | { 2 | "atoms" : [ 3 | { 4 | "id" : "Person", 5 | "type" : "Person", 6 | "storables" : [ 7 | { 8 | "id" : "geometry", 9 | "character" : "Female 3", 10 | "clothing" : [ 11 | { 12 | "name" : "Simple Underwear Shorts", 13 | "enabled" : "true" 14 | }, 15 | { 16 | "name" : "Simple Top", 17 | "enabled" : "true" 18 | } 19 | ], 20 | "hair" : "Ponytail", 21 | "morphs" : [ 22 | { 23 | "name" : "Aiko 6 Body", 24 | "animatable" : "true" 25 | }, 26 | { 27 | "name" : "Androgynous", 28 | "animatable" : "true" 29 | }, 30 | { 31 | "name" : "Body Size", 32 | "animatable" : "true" 33 | }, 34 | { 35 | "name" : "Body Tone", 36 | "value" : "0.4753637" 37 | }, 38 | { 39 | "name" : "Fitness Details", 40 | "value" : "0.9420856" 41 | }, 42 | { 43 | "name" : "Teen Josie Body", 44 | "value" : "0.5531501" 45 | }, 46 | { 47 | "name" : "Eyelids Bottom Define", 48 | "animatable" : "true" 49 | }, 50 | { 51 | "name" : "Eyelids Upper Height", 52 | "animatable" : "true" 53 | }, 54 | { 55 | "name" : "Eyes Almond Inner", 56 | "animatable" : "true" 57 | }, 58 | { 59 | "name" : "Eyes Almond Outer", 60 | "animatable" : "true" 61 | }, 62 | { 63 | "name" : "Eyes Angle", 64 | "animatable" : "true" 65 | }, 66 | { 67 | "name" : "Eyes Depth", 68 | "animatable" : "true" 69 | }, 70 | { 71 | "name" : "Eyes Height", 72 | "animatable" : "true" 73 | }, 74 | { 75 | "name" : "Eyes Height Inner", 76 | "animatable" : "true" 77 | }, 78 | { 79 | "name" : "Eyes Height Outer", 80 | "animatable" : "true" 81 | }, 82 | { 83 | "name" : "Eyes Inner Depth", 84 | "animatable" : "true" 85 | }, 86 | { 87 | "name" : "Eyes Puffy Lower", 88 | "animatable" : "true" 89 | }, 90 | { 91 | "name" : "Eyes Puffy Upper", 92 | "animatable" : "true" 93 | }, 94 | { 95 | "name" : "Eyes Size", 96 | "animatable" : "true" 97 | }, 98 | { 99 | "name" : "Eyes Width", 100 | "animatable" : "true" 101 | }, 102 | { 103 | "name" : "Eyes Wrinkle", 104 | "animatable" : "true" 105 | }, 106 | { 107 | "name" : "Brow Define", 108 | "animatable" : "true" 109 | }, 110 | { 111 | "name" : "Brow Depth", 112 | "animatable" : "true" 113 | }, 114 | { 115 | "name" : "Brow Height", 116 | "animatable" : "true" 117 | }, 118 | { 119 | "name" : "Brow Inner Height", 120 | "animatable" : "true" 121 | }, 122 | { 123 | "name" : "Brow Inner Width", 124 | "animatable" : "true" 125 | }, 126 | { 127 | "name" : "Brow Outer Height", 128 | "animatable" : "true" 129 | }, 130 | { 131 | "name" : "Brow Outer Width", 132 | "animatable" : "true" 133 | }, 134 | { 135 | "name" : "Brow Shape Inner", 136 | "animatable" : "true" 137 | }, 138 | { 139 | "name" : "Brow Shape Middle", 140 | "animatable" : "true" 141 | }, 142 | { 143 | "name" : "Brow Shape Outer", 144 | "animatable" : "true" 145 | }, 146 | { 147 | "name" : "Brow Width", 148 | "animatable" : "true" 149 | }, 150 | { 151 | "name" : "Brows Arch", 152 | "animatable" : "true" 153 | }, 154 | { 155 | "name" : "Brows Size", 156 | "animatable" : "true" 157 | }, 158 | { 159 | "name" : "Cheek Bone Define ", 160 | "animatable" : "true" 161 | }, 162 | { 163 | "name" : "Cheek Bones Arch ", 164 | "animatable" : "true" 165 | }, 166 | { 167 | "name" : "Cheek Bones Height", 168 | "animatable" : "true" 169 | }, 170 | { 171 | "name" : "Cheek Bones Round", 172 | "animatable" : "true" 173 | }, 174 | { 175 | "name" : "Cheek Bones Size", 176 | "animatable" : "true" 177 | }, 178 | { 179 | "name" : "Cheek Bones Width", 180 | "animatable" : "true" 181 | }, 182 | { 183 | "name" : "Cheek Jowl", 184 | "animatable" : "true" 185 | }, 186 | { 187 | "name" : "Cheek Lower Depth", 188 | "animatable" : "true" 189 | }, 190 | { 191 | "name" : "Cheek Lower Width", 192 | "animatable" : "true" 193 | }, 194 | { 195 | "name" : "Cheeks Define", 196 | "animatable" : "true" 197 | }, 198 | { 199 | "name" : "Cheeks Depth", 200 | "animatable" : "true" 201 | }, 202 | { 203 | "name" : "Cheeks Dimple Crease", 204 | "animatable" : "true" 205 | }, 206 | { 207 | "name" : "Cheeks Inner Puffy", 208 | "animatable" : "true" 209 | }, 210 | { 211 | "name" : "Cheeks Sink", 212 | "animatable" : "true" 213 | }, 214 | { 215 | "name" : "Cheeks Sink Lower", 216 | "animatable" : "true" 217 | }, 218 | { 219 | "name" : "Chin Cleft", 220 | "animatable" : "true" 221 | }, 222 | { 223 | "name" : "Chin Crease", 224 | "animatable" : "true" 225 | }, 226 | { 227 | "name" : "Chin Crease Smooth", 228 | "animatable" : "true" 229 | }, 230 | { 231 | "name" : "Chin Depth", 232 | "animatable" : "true" 233 | }, 234 | { 235 | "name" : "Chin Height", 236 | "animatable" : "true" 237 | }, 238 | { 239 | "name" : "Chin Round", 240 | "animatable" : "true" 241 | }, 242 | { 243 | "name" : "Chin Width", 244 | "animatable" : "true" 245 | }, 246 | { 247 | "name" : "Chin Width 2", 248 | "animatable" : "true" 249 | }, 250 | { 251 | "name" : "Cranium Slope", 252 | "animatable" : "true" 253 | }, 254 | { 255 | "name" : "Eye Fold", 256 | "animatable" : "true" 257 | }, 258 | { 259 | "name" : "Eyelid Upper Inner Shape", 260 | "animatable" : "true" 261 | }, 262 | { 263 | "name" : "Eyelid Upper Shape Outer", 264 | "animatable" : "true" 265 | }, 266 | { 267 | "name" : "Eyes Bags", 268 | "animatable" : "true" 269 | }, 270 | { 271 | "name" : "Eyes Inner Shape", 272 | "animatable" : "true" 273 | }, 274 | { 275 | "name" : "Eyes Inner Corner Height", 276 | "animatable" : "true" 277 | }, 278 | { 279 | "name" : "Eyes Inner Corner Width", 280 | "animatable" : "true" 281 | }, 282 | { 283 | "name" : "Eyes Lower Shape", 284 | "animatable" : "true" 285 | }, 286 | { 287 | "name" : "Eyes Open", 288 | "animatable" : "true" 289 | }, 290 | { 291 | "name" : "Eyes Outer Shape", 292 | "animatable" : "true" 293 | }, 294 | { 295 | "name" : "Eyes Puffy Outer", 296 | "animatable" : "true" 297 | }, 298 | { 299 | "name" : "Eyes Round", 300 | "animatable" : "true" 301 | }, 302 | { 303 | "name" : "Eyes Round Lower", 304 | "animatable" : "true" 305 | }, 306 | { 307 | "name" : "Eyes Round Upper", 308 | "animatable" : "true" 309 | }, 310 | { 311 | "name" : "Eyes Slant Outer ", 312 | "animatable" : "true" 313 | }, 314 | { 315 | "name" : "Eyes Upper Shape", 316 | "animatable" : "true" 317 | }, 318 | { 319 | "name" : "Face Center Depth", 320 | "animatable" : "true" 321 | }, 322 | { 323 | "name" : "Face Depth Lower ", 324 | "animatable" : "true" 325 | }, 326 | { 327 | "name" : "Face Flat", 328 | "animatable" : "true" 329 | }, 330 | { 331 | "name" : "Face Heart", 332 | "animatable" : "true" 333 | }, 334 | { 335 | "name" : "Face Height", 336 | "animatable" : "true" 337 | }, 338 | { 339 | "name" : "Face Round", 340 | "animatable" : "true" 341 | }, 342 | { 343 | "name" : "Face Square", 344 | "animatable" : "true" 345 | }, 346 | { 347 | "name" : "Face Young", 348 | "animatable" : "true" 349 | }, 350 | { 351 | "name" : "Forehead Define", 352 | "animatable" : "true" 353 | }, 354 | { 355 | "name" : "Forehead Flat", 356 | "animatable" : "true" 357 | }, 358 | { 359 | "name" : "Forehead Round", 360 | "animatable" : "true" 361 | }, 362 | { 363 | "name" : "Forehead Width", 364 | "animatable" : "true" 365 | }, 366 | { 367 | "name" : "Jaw Angle", 368 | "animatable" : "true" 369 | }, 370 | { 371 | "name" : "Jaw Chin Shape", 372 | "animatable" : "true" 373 | }, 374 | { 375 | "name" : "Jaw Corner Height", 376 | "animatable" : "true" 377 | }, 378 | { 379 | "name" : "Jaw Corner Width", 380 | "animatable" : "true" 381 | }, 382 | { 383 | "name" : "Jaw Curve", 384 | "animatable" : "true" 385 | }, 386 | { 387 | "name" : "Jaw Define", 388 | "animatable" : "true" 389 | }, 390 | { 391 | "name" : "Jaw Height", 392 | "animatable" : "true" 393 | }, 394 | { 395 | "name" : "Jaw Line Depth", 396 | "animatable" : "true" 397 | }, 398 | { 399 | "name" : "Jaw Size", 400 | "animatable" : "true" 401 | }, 402 | { 403 | "name" : "Jaw Square", 404 | "animatable" : "true" 405 | }, 406 | { 407 | "name" : "Laugh Lines", 408 | "animatable" : "true" 409 | }, 410 | { 411 | "name" : "Mouth Corner Height", 412 | "animatable" : "true" 413 | }, 414 | { 415 | "name" : "Mouth Corner Width", 416 | "animatable" : "true" 417 | }, 418 | { 419 | "name" : "Nose Bridge Curve", 420 | "animatable" : "true" 421 | }, 422 | { 423 | "name" : "Nose Bridge Height 2", 424 | "animatable" : "true" 425 | }, 426 | { 427 | "name" : "Nose Bridge Lower Width ", 428 | "animatable" : "true" 429 | }, 430 | { 431 | "name" : "Nose Bridge Middle Width", 432 | "animatable" : "true" 433 | }, 434 | { 435 | "name" : "Nose Bridge Middlle Depth", 436 | "animatable" : "true" 437 | }, 438 | { 439 | "name" : "Nose Bridge Root Width", 440 | "animatable" : "true" 441 | }, 442 | { 443 | "name" : "Nose Septum Shape", 444 | "animatable" : "true" 445 | }, 446 | { 447 | "name" : "Nose Tilt", 448 | "animatable" : "true" 449 | }, 450 | { 451 | "name" : "Nose Tip Bottom Shape", 452 | "animatable" : "true" 453 | }, 454 | { 455 | "name" : "Nose Tip Upper Shape ", 456 | "animatable" : "true" 457 | }, 458 | { 459 | "name" : "Nose Tip Width Lower", 460 | "animatable" : "true" 461 | }, 462 | { 463 | "name" : "Nose Tip Width Upper", 464 | "animatable" : "true" 465 | }, 466 | { 467 | "name" : "Nostrils Flare", 468 | "animatable" : "true" 469 | }, 470 | { 471 | "name" : "Nostrils Inner Height", 472 | "animatable" : "true" 473 | }, 474 | { 475 | "name" : "Nostrils Rotation", 476 | "animatable" : "true" 477 | }, 478 | { 479 | "name" : "Nostrils Shape Bottom", 480 | "animatable" : "true" 481 | }, 482 | { 483 | "name" : "Nostrils Shape Height", 484 | "animatable" : "true" 485 | }, 486 | { 487 | "name" : "Nostrils Shape Top", 488 | "animatable" : "true" 489 | }, 490 | { 491 | "name" : "Temples Define", 492 | "animatable" : "true" 493 | }, 494 | { 495 | "name" : "Adrianna", 496 | "animatable" : "true" 497 | }, 498 | { 499 | "name" : "Aiko 6 Head", 500 | "animatable" : "true" 501 | }, 502 | { 503 | "name" : "Aneta", 504 | "animatable" : "true" 505 | }, 506 | { 507 | "name" : "Carmen Face", 508 | "animatable" : "true" 509 | }, 510 | { 511 | "name" : "Cranium Height", 512 | "animatable" : "true" 513 | }, 514 | { 515 | "name" : "Cranium Width", 516 | "animatable" : "true" 517 | }, 518 | { 519 | "name" : "Danika Head", 520 | "animatable" : "true" 521 | }, 522 | { 523 | "name" : "Kori Head", 524 | "animatable" : "true" 525 | }, 526 | { 527 | "name" : "Mei Lin 6 Head", 528 | "animatable" : "true" 529 | }, 530 | { 531 | "name" : "Monique 6 Head", 532 | "animatable" : "true" 533 | }, 534 | { 535 | "name" : "Stephanie 6 Head", 536 | "animatable" : "true" 537 | }, 538 | { 539 | "name" : "Sumiko Head", 540 | "animatable" : "true" 541 | }, 542 | { 543 | "name" : "Teen Josie Head", 544 | "animatable" : "true" 545 | }, 546 | { 547 | "name" : "Victoria 6 Head", 548 | "animatable" : "true" 549 | }, 550 | { 551 | "name" : "Lip Lower Depth", 552 | "animatable" : "true" 553 | }, 554 | { 555 | "name" : "Lip Lower Size", 556 | "animatable" : "true" 557 | }, 558 | { 559 | "name" : "Lip Lower Width", 560 | "animatable" : "true" 561 | }, 562 | { 563 | "name" : "Lip Top Peak", 564 | "animatable" : "true" 565 | }, 566 | { 567 | "name" : "Lip Upper Curves", 568 | "animatable" : "true" 569 | }, 570 | { 571 | "name" : "Lip Upper Depth", 572 | "animatable" : "true" 573 | }, 574 | { 575 | "name" : "Lip Upper Size", 576 | "animatable" : "true" 577 | }, 578 | { 579 | "name" : "Lips Bottom Full", 580 | "animatable" : "true" 581 | }, 582 | { 583 | "name" : "Lips Bottom Shape ", 584 | "animatable" : "true" 585 | }, 586 | { 587 | "name" : "Lips Bottom Small", 588 | "animatable" : "true" 589 | }, 590 | { 591 | "name" : "Lips Bow Height", 592 | "animatable" : "true" 593 | }, 594 | { 595 | "name" : "Lips Bow Shape", 596 | "animatable" : "true" 597 | }, 598 | { 599 | "name" : "Lips Edge Define", 600 | "animatable" : "true" 601 | }, 602 | { 603 | "name" : "Lips Heart", 604 | "animatable" : "true" 605 | }, 606 | { 607 | "name" : "Lips Square", 608 | "animatable" : "true" 609 | }, 610 | { 611 | "name" : "Lips Thin", 612 | "animatable" : "true" 613 | }, 614 | { 615 | "name" : "Lips Top Full", 616 | "animatable" : "true" 617 | }, 618 | { 619 | "name" : "LIps Top Width", 620 | "animatable" : "true" 621 | }, 622 | { 623 | "name" : "Lips Upper Curves Round", 624 | "animatable" : "true" 625 | }, 626 | { 627 | "name" : "Lips Upper Curves Corner", 628 | "animatable" : "true" 629 | }, 630 | { 631 | "name" : "Mouth Corner Depth", 632 | "animatable" : "true" 633 | }, 634 | { 635 | "name" : "Mouth Curves", 636 | "animatable" : "true" 637 | }, 638 | { 639 | "name" : "Mouth Curves Arch", 640 | "animatable" : "true" 641 | }, 642 | { 643 | "name" : "Mouth Curves Corner", 644 | "animatable" : "true" 645 | }, 646 | { 647 | "name" : "Mouth Depth", 648 | "animatable" : "true" 649 | }, 650 | { 651 | "name" : "Mouth Height", 652 | "animatable" : "true" 653 | }, 654 | { 655 | "name" : "Mouth Marionette lines", 656 | "animatable" : "true" 657 | }, 658 | { 659 | "name" : "Mouth Size", 660 | "animatable" : "true" 661 | }, 662 | { 663 | "name" : "Mouth Width", 664 | "animatable" : "true" 665 | }, 666 | { 667 | "name" : "Nose Bridge Depth", 668 | "animatable" : "true" 669 | }, 670 | { 671 | "name" : "Nose Bridge Height", 672 | "animatable" : "true" 673 | }, 674 | { 675 | "name" : "Nose Bridge Skew", 676 | "animatable" : "true" 677 | }, 678 | { 679 | "name" : "Nose Bridge Width", 680 | "animatable" : "true" 681 | }, 682 | { 683 | "name" : "Nose Bump", 684 | "animatable" : "true" 685 | }, 686 | { 687 | "name" : "Nose Depth", 688 | "animatable" : "true" 689 | }, 690 | { 691 | "name" : "Nose Flesh Size", 692 | "animatable" : "true" 693 | }, 694 | { 695 | "name" : "Nose Height", 696 | "animatable" : "true" 697 | }, 698 | { 699 | "name" : "Nose Pinch", 700 | "animatable" : "true" 701 | }, 702 | { 703 | "name" : "Nose Ridge", 704 | "animatable" : "true" 705 | }, 706 | { 707 | "name" : "Nose Ridge Width", 708 | "animatable" : "true" 709 | }, 710 | { 711 | "name" : "Nose Septum Depth", 712 | "animatable" : "true" 713 | }, 714 | { 715 | "name" : "Nose Septum Height", 716 | "animatable" : "true" 717 | }, 718 | { 719 | "name" : "Nose Septum Width", 720 | "animatable" : "true" 721 | }, 722 | { 723 | "name" : "Nose Size", 724 | "animatable" : "true" 725 | }, 726 | { 727 | "name" : "Nose Tip Depth", 728 | "animatable" : "true" 729 | }, 730 | { 731 | "name" : "Nose Tip Height", 732 | "animatable" : "true" 733 | }, 734 | { 735 | "name" : "Nose Tip Round", 736 | "animatable" : "true" 737 | }, 738 | { 739 | "name" : "Nose Tip Width", 740 | "animatable" : "true" 741 | }, 742 | { 743 | "name" : "Nose Width", 744 | "animatable" : "true" 745 | }, 746 | { 747 | "name" : "Nostrils Depth", 748 | "animatable" : "true" 749 | }, 750 | { 751 | "name" : "Nostrils Flesh Size", 752 | "animatable" : "true" 753 | }, 754 | { 755 | "name" : "Nostrils Height", 756 | "animatable" : "true" 757 | }, 758 | { 759 | "name" : "Nostrils Width", 760 | "animatable" : "true" 761 | }, 762 | { 763 | "name" : "Philtrum Angle", 764 | "animatable" : "true" 765 | }, 766 | { 767 | "name" : "Philtrum Depth", 768 | "animatable" : "true" 769 | }, 770 | { 771 | "name" : "Philtrum Width", 772 | "animatable" : "true" 773 | } 774 | ] 775 | }, 776 | { 777 | "id" : "BreastPhysicsMesh", 778 | "softVerticesColliderRadius" : "0.03476021" 779 | } 780 | ] 781 | } 782 | ] 783 | } -------------------------------------------------------------------------------- /Tools/TrainSelf.py: -------------------------------------------------------------------------------- 1 | # Generate training data from existing faces 2 | import argparse 3 | import os 4 | import glob 5 | import multiprocessing 6 | import queue 7 | import numpy as np 8 | import tempfile 9 | import shutil 10 | import time 11 | import random 12 | import copy 13 | import msgpack 14 | import gc 15 | import tqdm 16 | from win32api import GetKeyState 17 | from win32con import VK_SCROLL, VK_CAPITAL 18 | 19 | NORMALIZE_SIZE=150 20 | 21 | ############################### 22 | # Run the program 23 | # 24 | def main( args ): 25 | from Utils.Training.config import Config 26 | print( "Initializing training...") 27 | while GetKeyState(VK_SCROLL): 28 | print("Please turn off scroll lock") 29 | time.sleep(1) 30 | if args.pydev: 31 | print("Enabling debugging with pydev") 32 | import pydevd 33 | pydevd.settrace(suspend=False) 34 | 35 | modelFile = args.outputFile 36 | trainingCacheFile = args.trainingDataCache 37 | tmpDir = args.tmpDir 38 | onlySeed = args.onlySeedImages 39 | 40 | 41 | # If not none, the neural net process will generate from the training cache 42 | nnTrainingCache = None 43 | if args.useTrainingDataCache: 44 | nnTrainingCache = trainingCacheFile 45 | 46 | print("Creating initial encodings...") 47 | if args.seedImagePath is None: 48 | initialEncodings = [] 49 | else: 50 | initialEncodings = getEncodingsFromPaths( [args.seedImagePath], recursive=True, cache=True) 51 | 52 | config = Config.createFromFile(args.configFile) 53 | # Try encodings until one succeeds 54 | initParams = None 55 | for encoding in initialEncodings: 56 | try: 57 | initParams = config.generateParams( encoding ) 58 | break 59 | except: 60 | continue 61 | 62 | if initParams is None: 63 | raise Exception("Failed to create an initial encoding!") 64 | print("Shape is {}".format(config.getShape())) 65 | 66 | print("Starting child processes...") 67 | encBatchSize = args.encBatchSize 68 | trainBatchSize = 256 69 | morph2imageQueue = multiprocessing.Queue() 70 | image2encodingQueue = multiprocessing.Queue(maxsize=encBatchSize) 71 | encoding2morphQueue = multiprocessing.Queue() 72 | vamFaceQueue = multiprocessing.Queue() 73 | doneEvent = multiprocessing.Event() 74 | encodingDiedEvent = multiprocessing.Event() 75 | safeToExitEvents = [] 76 | 77 | # Set up worker processes 78 | procs = [] 79 | safeToExitEvent = multiprocessing.Event() 80 | morphs2image = multiprocessing.Process(target=morphs_to_image_proc, args=( config, morph2imageQueue, image2encodingQueue, tmpDir, doneEvent, safeToExitEvent, args.pydev ) ) 81 | procs.append(morphs2image) 82 | safeToExitEvents.append( safeToExitEvent ) 83 | 84 | safeToExitEvent = multiprocessing.Event() 85 | image2encoding = multiprocessing.Process(target=image_to_encoding_proc, args=( config, encBatchSize, image2encodingQueue, encoding2morphQueue, vamFaceQueue, doneEvent, encodingDiedEvent, safeToExitEvent, args.pydev ) ) 86 | procs.append( image2encoding ) 87 | safeToExitEvents.append( safeToExitEvent ) 88 | 89 | safeToExitEvent = multiprocessing.Event() 90 | neuralnet = multiprocessing.Process(target=neural_net_proc, args=( config, modelFile, trainBatchSize, initialEncodings, nnTrainingCache, encoding2morphQueue, morph2imageQueue, doneEvent, safeToExitEvent, onlySeed, args.pydev ) ) 91 | procs.append(neuralnet) 92 | safeToExitEvents.append( safeToExitEvent ) 93 | 94 | safeToExitEvent = multiprocessing.Event() 95 | trainingDataSaver = multiprocessing.Process( target=save_training_data_proc, args=( config, vamFaceQueue, trainingCacheFile, doneEvent, safeToExitEvent, args.pydev )) 96 | procs.append(trainingDataSaver) 97 | safeToExitEvents.append( safeToExitEvent ) 98 | 99 | for proc in procs: 100 | proc.start() 101 | 102 | print("Begin processing!") 103 | 104 | #To kick start the process, feed the neural net the initial params 105 | for encoding in initialEncodings: 106 | try: 107 | params = config.generateParams(encoding) 108 | encoding2morphQueue.put( ( False, params ) ) 109 | except: 110 | pass 111 | 112 | # Any seed json files? 113 | if args.seedJsonPath: 114 | seedLooks = getLooksFromPath( args.seedJsonPath ) 115 | # Now match morphs and submit 116 | for look in seedLooks: 117 | look.matchMorphs( config.getBaseFace() ) 118 | if len(look.morphFloats ) == config.getShape()[1]: 119 | morph2imageQueue.put( [0]*config.getShape()[0] + look.morphFloats ) 120 | 121 | 122 | print("Enable ScrollLock to exit, CapsLock to pause image generation") 123 | while True: 124 | if GetKeyState(VK_SCROLL): 125 | break 126 | time.sleep(1) 127 | 128 | # image2encoding dies from OOM fairly often. Restart it if that happens 129 | if not image2encoding.is_alive() or encodingDiedEvent.is_set(): 130 | print("Restarting Image2Encoding process!") 131 | encodingDiedEvent.clear() 132 | procs.remove( image2encoding ) 133 | if image2encoding.is_alive(): 134 | print("Terminating stuck process..") 135 | image2encoding.join(5) 136 | image2encoding.terminate() 137 | image2encoding = multiprocessing.Process(target=image_to_encoding_proc, args=( config, encBatchSize, image2encodingQueue, encoding2morphQueue, doneEvent, encodingDiedEvent, args.pydev ) ) 138 | image2encoding.start() 139 | procs.append( image2encoding ) 140 | 141 | print("Waiting for processes to exit...") 142 | # Wait for children to finish 143 | doneEvent.set() 144 | 145 | # for proc in procs: 146 | # proc.join() 147 | # Join isn't working right 148 | for exitEvent in safeToExitEvents: 149 | exitEvent.wait() 150 | 151 | print("Exit successful. If you're still stuck here, I don't know why. Just kill me with CTRL+C or CTRL+BREAK.") 152 | 153 | def getLooksFromPath( seedJsonPath, recursive = True ): 154 | from Utils.Face.vam import VamFace 155 | lookList = [] 156 | for root, subdirs, files in os.walk(seedJsonPath): 157 | for file in files: 158 | if file.endswith( ( '.json' ) ): 159 | try: 160 | newFace = VamFace( os.path.join(root, file ) ) 161 | lookList.append(newFace) 162 | except: 163 | pass 164 | if not recursive: 165 | break 166 | 167 | return lookList 168 | 169 | def getEncodingsFromPaths( imagePaths, recursive = True, cache = False ): 170 | # We'll create a flat fileList, and placeholder arrays for the return encodings 171 | fileList = [] 172 | encodings = [] 173 | for imagePath in imagePaths: 174 | for root, subdirs, files in os.walk(imagePath): 175 | encoding = [] 176 | for file in files: 177 | if file.endswith( ( '.png', '.jpg' ) ): 178 | fileList.append( os.path.join( root, file ) ) 179 | encoding.append(None) 180 | if len(encoding) > 0: 181 | encodings.append(encoding) 182 | if not recursive: 183 | break 184 | 185 | # Now batch create the encodings! 186 | if len(fileList) > 0: 187 | batched_encodings = createEncodings( fileList ) 188 | 189 | # Now unflatten the batched encodings 190 | idx = 0 191 | for encoding in encodings: 192 | for i in range(len(encoding)): 193 | encoding[i] = batched_encodings[idx] 194 | idx += 1 195 | 196 | return encodings 197 | 198 | 199 | def createEncodings( fileList ): 200 | from PIL import Image 201 | from Utils.Face.encoded import EncodedFace 202 | 203 | imageList = [] 204 | for file in fileList: 205 | imageList.append( np.array( Image.open(file) ) ) 206 | encodedFaces = EncodedFace.batchEncode( imageList, batch_size=64, keepImage = True ) 207 | 208 | return encodedFaces 209 | 210 | 211 | # Previously we've just been saving the training number lists, but 212 | # if we want to change a parameter we'd have to regenerate all data. This 213 | # process saves the entire data set 214 | def save_training_data_proc( config, inputQueue, trainingCacheFile, doneEvent, exitEvent, pydev ): 215 | if pydev: 216 | import pydevd 217 | pydevd.settrace(suspend=False) 218 | 219 | trainingData = [] 220 | 221 | if os.path.exists( trainingCacheFile ): 222 | trainingData = load_training_cache( config, trainingCacheFile ) 223 | print("Initial training cache entries: {}".format(len(trainingData))) 224 | 225 | saveInterval = 10000 226 | pendingSave = False 227 | 228 | while not doneEvent.is_set(): 229 | try: 230 | ( faces, morphs ) = inputQueue.get(block=True, timeout=1) 231 | for face in faces: 232 | face._img = None # Don't want to save images 233 | 234 | trainingData.append( ( faces, morphs ) ) 235 | if len(trainingData) % saveInterval == 0: 236 | pendingSave = True 237 | except queue.Empty: 238 | if pendingSave: 239 | print("Saving {} entries to training cache...".format(len(trainingData))) 240 | save_training_cache( config, trainingData, trainingCacheFile ) 241 | print("Done saving training cache") 242 | pendingSave = False 243 | except Exception as e: 244 | print("Error caching faces: {}".format(str(e))) 245 | 246 | print("Saving {} entries in training cache before exiting...".format(len(trainingData))) 247 | save_training_cache( config, trainingData, trainingCacheFile ) 248 | print("Done saving training cache") 249 | exitEvent.set() 250 | 251 | 252 | def decode_training_data( obj ): 253 | from Utils.Face.encoded import EncodedFace 254 | from Utils.Face.vam import VamFace 255 | 256 | decoders = [ VamFace.msgpack_decode, EncodedFace.msgpack_decode ] 257 | for decoder in decoders: 258 | newObj = decoder(obj) 259 | if newObj != obj: 260 | obj = newObj 261 | break 262 | return obj 263 | 264 | def encode_training_data( obj ): 265 | from Utils.Face.encoded import EncodedFace 266 | from Utils.Face.vam import VamFace 267 | 268 | encoders = [ VamFace.msgpack_encode, EncodedFace.msgpack_encode ] 269 | for encoder in encoders: 270 | newObj = encoder(obj) 271 | if newObj != obj: 272 | obj = newObj 273 | break 274 | return obj 275 | 276 | def load_training_cache( config, path ): 277 | import gc 278 | 279 | inFile = open( path, "rb") 280 | unpacker = msgpack.Unpacker( inFile, object_hook=decode_training_data, use_list=True, encoding='utf-8' ) 281 | baseFace = unpacker.unpack() 282 | 283 | #Check if the config face matches this cache 284 | origMorphCnt = len(baseFace.morphs) 285 | baseFace.matchMorphs( config.getBaseFace() ) 286 | newMorphCnt = len(baseFace.morphs) 287 | 288 | if newMorphCnt != origMorphCnt: 289 | raise Exception("Configuration morphs don't match cache morphs!") 290 | 291 | trainingData = [] 292 | gc.disable() 293 | for item in unpacker: 294 | trainingData.append(item) 295 | inFile.close() 296 | 297 | gc.enable() 298 | return trainingData 299 | 300 | def save_training_cache( config, cacheData, path ): 301 | if len(cacheData) == 0: 302 | return 303 | 304 | outFile = open( path, 'wb' ) 305 | outFile.write( msgpack.packb( config.getBaseFace(), default=encode_training_data, use_bin_type=True) ) 306 | for item in cacheData: 307 | outFile.write( msgpack.packb( item, default=encode_training_data, use_bin_type=True) ) 308 | outFile.close() 309 | 310 | def morphs_to_image_proc( config, inputQueue, outputQueue, tmpDir, doneEvent, exitEvent, pydev ): 311 | if pydev: 312 | import pydevd 313 | pydevd.settrace(suspend=False) 314 | 315 | from Utils.Vam.window import VamWindow 316 | from Utils.Face.vam import VamFace 317 | # Initialize the Vam window 318 | vamWindow = VamWindow( pipe = "foto2vamPipe" ) 319 | vamFace = config.getBaseFace() 320 | 321 | inputCnt = config.getShape()[0] 322 | 323 | 324 | while not doneEvent.is_set(): 325 | try: 326 | while GetKeyState(VK_CAPITAL): 327 | time.sleep(1) 328 | params = inputQueue.get(block=True, timeout=1) 329 | morphs = params[inputCnt:] 330 | vamFace.importFloatList(morphs) 331 | 332 | tmpdir = tempfile.mkdtemp( dir=tmpDir ) 333 | jsonFile = os.path.join( tmpdir, "face.json" ) 334 | vamFace.save( jsonFile ) 335 | vamWindow.loadLook( jsonFile, config.getAngles() ) 336 | vamWindow.syncPipe( vamWindow._pipe ) 337 | outputQueue.put( tmpdir ) 338 | 339 | except queue.Empty: 340 | pass 341 | except Exception as e: 342 | print("Error in morphs_to_image_proc: {}".format(str(e))) 343 | 344 | exitEvent.set() 345 | 346 | 347 | def image_to_encoding_proc( config, batchSize, inputQueue, outputQueue, trainingCacheQueue, doneEvent, encodingDiedEvent, exitEvent, pydev ): 348 | if pydev: 349 | import pydevd 350 | pydevd.settrace(suspend=False) 351 | 352 | pathList = [] 353 | inputCnt = config.getShape()[0] 354 | outputCnt = config.getShape()[1] 355 | while not doneEvent.is_set(): 356 | submitWork = False 357 | try: 358 | work = inputQueue.get(block=True, timeout=1) 359 | pathList.append( work ) 360 | submitWork = len(pathList) >= batchSize 361 | except queue.Empty: 362 | submitWork = len(pathList) > 0 363 | 364 | if submitWork: 365 | try: 366 | encodings = getEncodingsFromPaths( pathList, recursive=False, cache = False ) 367 | for data in zip( pathList, encodings ): 368 | try: 369 | if not validatePerson( data[1] ): 370 | raise Exception("Image failed validation!") 371 | params = config.generateParams( data[1] + [os.path.join( data[0], "face.json") ] ) 372 | params_valid = True 373 | # Cache off the face 374 | trainingCacheQueue.put( ( data[1], params[inputCnt:] ) ) 375 | # Send it off to the neural net training 376 | outputQueue.put( ( params_valid, params ) ) 377 | except Exception as e: 378 | pass 379 | 380 | except RuntimeError as e: 381 | # Probably OOM. Kill the process 382 | print("RunTime error! Process is exiting") 383 | encodingDiedEvent.set() 384 | raise SystemExit() 385 | except Exception as e: 386 | print( str(e) ) 387 | finally: 388 | for path in pathList: 389 | try: 390 | shutil.rmtree( path, ignore_errors=True) 391 | except: 392 | pass 393 | pathList.clear() 394 | 395 | exitEvent.set() 396 | 397 | def validatePerson( encodingList ): 398 | ok = samePerson( encodingList, tolerance=0.6 ) 399 | for encoding in encodingList: 400 | if not ok: 401 | break 402 | valid = landmarksValid( encoding ) 403 | ok = valid > 0.9 404 | return ok 405 | 406 | 407 | def samePerson( encodingList, tolerance=.6 ): 408 | for idx,encoding in enumerate(encodingList): 409 | for encoding2 in encodingList[idx+1:]: 410 | if encoding.compare(encoding2) > tolerance: 411 | return False 412 | return True 413 | 414 | 415 | def landmarksValid( encoding ): 416 | landmarks = encoding.getLandmarks() 417 | img = encoding.getImage() 418 | bgColor = img[0][0] 419 | 420 | totalPoints = 0 421 | invalidPoints = 0 422 | for feature,points in landmarks.items(): 423 | for point in points: 424 | totalPoints += 1 425 | try: 426 | if (img[point[1]][point[0]] == bgColor).all(): 427 | invalidPoints += 1 428 | except IndexError: 429 | invalidPoints += 1 430 | return (totalPoints-invalidPoints)/totalPoints 431 | 432 | 433 | def saveTrainingData( dataName, trainingInputs, trainingOutputs ): 434 | if len(trainingInputs) != len(trainingOutputs): 435 | raise Exception("Input length mismatch with output length!") 436 | 437 | outFile = open( dataName, 'wb' ) 438 | msgpack.pack( ( trainingInputs, trainingOutputs ), outFile, use_bin_type=True ) 439 | outFile.close() 440 | 441 | def readTrainingData( dataName ): 442 | dataFile = open( dataName, "rb" ) 443 | gc.disable() 444 | inputList,outputList = msgpack.unpack( dataFile ) 445 | gc.enable() 446 | dataFile.close() 447 | return list(inputList), list(outputList) 448 | 449 | 450 | def queueRandomOutputParams( config, trainingMorphsList, queue ): 451 | inputCnt = config.getShape()[0] 452 | outputCnt = config.getShape()[1] 453 | inputParams = [0]*inputCnt 454 | 455 | newFace = copy.deepcopy(config.getBaseFace()) 456 | 457 | # Choose random number to decide what modification we apply 458 | rand = random.random() 459 | # select which morphs to modify 460 | modifyIdxs = random.sample( range(len(newFace.morphFloats)), random.randint(1,25) ) 461 | 462 | if len(trainingMorphsList) > 10: 463 | randomIdxs = random.sample( range(len(trainingMorphsList)), 2 ) 464 | 465 | newFaceMorphs = trainingMorphsList[randomIdxs[0]] 466 | newFace.importFloatList( newFaceMorphs ) 467 | 468 | if rand < .6: 469 | face2Morphs = trainingMorphsList[randomIdxs[1]] 470 | face2 = copy.deepcopy( config.getBaseFace() ) 471 | face2.importFloatList( face2Morphs ) 472 | mate(newFace, face2, modifyIdxs ) 473 | queue.put_nowait( inputParams + newFace.morphFloats ) 474 | elif rand < .9: 475 | for idx in modifyIdxs: 476 | newFace.changeMorph( idx, -1 + 2*random.random() ) 477 | queue.put_nowait( inputParams + newFace.morphFloats ) 478 | elif rand < .95: 479 | numSteps = 5#random.randint(5,15) 480 | for idx in modifyIdxs: 481 | face2 = copy.deepcopy( newFace ) 482 | minVal = face2.morphInfo[idx]['min'] 483 | maxVal = face2.morphInfo[idx]['max'] 484 | stepSize = ( maxVal - minVal) / numSteps 485 | 486 | face2.morphFloats[idx] = minVal 487 | queue.put( inputParams + face2.morphFloats ) 488 | for step in range(numSteps): 489 | face2.changeMorph( idx, stepSize ) 490 | queue.put( inputParams + face2.morphFloats ) 491 | else: 492 | mutate(newFace, modifyIdxs ) 493 | queue.put_nowait( inputParams + newFace.morphFloats ) 494 | else: 495 | # 90% chance to use baseface, otherwise completely random morphs. 496 | if rand < .9: 497 | mutate(newFace, modifyIdxs ) 498 | else: 499 | newFace.randomize() 500 | queue.put_nowait( inputParams + newFace.morphFloats ) 501 | 502 | 503 | def mutate(face, idxList): 504 | for idx in idxList: 505 | face.randomize( idx ) 506 | 507 | 508 | def mate(targetFace, otherFace, idxList ): 509 | if len(targetFace.morphFloats) != len(otherFace.morphFloats): 510 | raise Exception("Morph float list didn't match! {} != {}".format(len(targetFace.morphFloats), len(otherFace.morphFloats))) 511 | for idx in idxList: 512 | weightA = random.randint(1,100) 513 | weightB = 100 - weightA 514 | matedValue = ( ( weightA * targetFace.morphFloats[idx] ) + ( weightB * otherFace.morphFloats[idx] ) ) / ( weightA + weightB ) 515 | targetFace.morphFloats[idx] = matedValue 516 | 517 | def load_cache_param_gen_helper( config, item ): 518 | faces,morphs = item 519 | inputCnt = config.getShape()[0] 520 | params = config.generateParams(faces) 521 | return params[:inputCnt] + list(morphs) 522 | 523 | def neural_net_proc( config, modelFile, batchSize, initialEncodings, cacheToGenerateFrom, inputQueue, outputQueue, doneEvent, exitEvent, onlySeed, pydev ): 524 | # Work around low-memory GPU issue 525 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' 526 | import tensorflow as tf 527 | tfConfig = tf.ConfigProto() 528 | tfConfig.gpu_options.allow_growth = True 529 | session = tf.Session(config=tfConfig) 530 | 531 | if pydev: 532 | import pydevd 533 | pydevd.settrace(suspend=False) 534 | 535 | dataName = modelFile + ".train" 536 | inputCnt = config.getShape()[0] 537 | outputCnt = config.getShape()[1] 538 | 539 | validationInputs = [] 540 | validationOutputs = [] 541 | validationPercent = .05 542 | 543 | trainingInputs = [] 544 | trainingOutputs = [] 545 | 546 | neuralNet = create_neural_net( inputCnt, outputCnt, modelFile ) 547 | if os.path.exists( dataName ) and not onlySeed: 548 | trainingInputs,trainingOutputs = readTrainingData( dataName ) 549 | 550 | lastSaveIdx = len(trainingInputs) 551 | pendingSave = False 552 | 553 | if cacheToGenerateFrom is not None: 554 | from functools import partial 555 | 556 | print("Currently have {} samples, now generating from training cache...".format(len(trainingInputs))) 557 | cache = load_training_cache( config, cacheToGenerateFrom ) 558 | pendingSave = True 559 | 560 | # Multi-process loading the cache 561 | poolFunc = partial( load_cache_param_gen_helper, config ) 562 | pool = multiprocessing.Pool(multiprocessing.cpu_count()) 563 | for res in tqdm.tqdm(pool.imap_unordered( poolFunc, cache ), total=len(cache) ): 564 | trainingInputs.append( res[:inputCnt] ) 565 | trainingOutputs.append( res[inputCnt:] ) 566 | pool.close() 567 | 568 | print("Starting with {} training samples".format(len(trainingInputs))) 569 | 570 | lastSeedOnlyInputTime = 0 571 | lastSeedOnlyInputCount = 0 572 | lastSave = time.time() 573 | outputQueueSize = 256 574 | outputQueueSaveSize = 1024 575 | lastReEnqueueCnt = 0 576 | while not doneEvent.is_set(): 577 | try: 578 | morphsValid, params = inputQueue.get(block=False) 579 | inputs = params[:inputCnt] 580 | outputs = params[inputCnt:] 581 | 582 | # If valid we can train on it 583 | if morphsValid: 584 | if random.random() < .25 and len(validationInputs) < validationPercent*len(trainingInputs): 585 | validationInputs.append( inputs ) 586 | validationOutputs.append( outputs ) 587 | else: 588 | trainingInputs.append( inputs ) 589 | trainingOutputs.append( outputs ) 590 | if time.time() > lastSave + 120*60 and len(trainingInputs) % 50 == 0: 591 | pendingSave = True 592 | #print( "{} valid faces".format( len(trainingInputs) ) ) 593 | 594 | if len(trainingInputs) != lastReEnqueueCnt and len(trainingInputs) % 100 == 0: 595 | lastReEnqueueCnt = len(trainingInputs) 596 | # Periodically re-enqueue the initial encodings 597 | for encoding in initialEncodings: 598 | try: 599 | params = config.generateParams(encoding) 600 | inputQueue.put( ( False, params ) ) 601 | except: 602 | pass 603 | 604 | # Don't use predictions until we have trained a bit 605 | if ( len(trainingInputs) > 10000 ) or onlySeed: 606 | # Now given the encoding, what morphs would we have predicted? 607 | predictedOutputs = create_prediction( neuralNet, np.array([inputs]) ) 608 | predictedParams = inputs + list(predictedOutputs[0]) 609 | outputQueue.put( predictedParams ) 610 | # Queue a random look for every predicted look. Sometimes we get stuck with 611 | # only predicted looks filling the queue, and it causes a downward spiral 612 | queueRandomOutputParams(config, trainingOutputs, outputQueue) 613 | 614 | except queue.Empty as e: 615 | # Been having issue with Queue Empty falsely triggering... 616 | if inputQueue.qsize() > 10: 617 | continue 618 | reqdSize = outputQueueSaveSize if pendingSave else outputQueueSize 619 | try: 620 | if not onlySeed: 621 | while outputQueue.qsize() < reqdSize: 622 | queueRandomOutputParams(config, trainingOutputs, outputQueue) 623 | elif ( len(trainingInputs) > lastSeedOnlyInputCount ) or ( time.time() > lastSeedOnlyInputTime + 10 ): 624 | lastSeedOnlyInputCount = len(trainingInputs) 625 | lastSeedOnlyInputTime = time.time() 626 | for encoding in initialEncodings: 627 | try: 628 | params = config.generateParams(encoding) 629 | inputQueue.put( ( False, params ) ) 630 | except: 631 | pass 632 | 633 | finally: 634 | while True: 635 | if len(trainingInputs) > 5000 or ( onlySeed and len(trainingInputs) > 0 ): 636 | neuralNet.fit( x=np.array(trainingInputs), y=np.array(trainingOutputs), batch_size=batchSize, epochs=1, verbose=1) 637 | if len(validationInputs) > 0: 638 | metrics = neuralNet.evaluate( x=np.array(validationInputs), y=np.array(validationOutputs), batch_size=batchSize, verbose=1) 639 | print( "Trained over {} samples, validated over {} samples. ".format(len(trainingInputs), len(validationInputs)), end='\t') 640 | for metric,val in zip(neuralNet.metrics_names, metrics): 641 | print("( {} : {} )".format(metric,val), end='\t') 642 | print("") 643 | if not GetKeyState(VK_CAPITAL): 644 | break 645 | pendingSave = True 646 | print("Caps Lock is enabled. Continually training and not feeding image generator") 647 | 648 | 649 | if pendingSave: 650 | print("Saving model...") 651 | neuralNet.save( modelFile ) 652 | print("Done saving model, saving training data...") 653 | saveTrainingData( dataName, trainingInputs, trainingOutputs) 654 | lastSaveIdx = len(trainingInputs) 655 | print("Save complete!") 656 | lastSave = time.time() 657 | 658 | # Was our queue big enough to keep the generator busy while we trained? 659 | if outputQueue.qsize() == 0 and not onlySeed: 660 | if pendingSave: 661 | outputQueueSize *= 1.5 662 | print("Increased outputQueueSize to {}".format(outputQueueSize)) 663 | else: 664 | outputQueueSaveSize *= 1.15 665 | print("Increased outputSaveQueueSize to {}".format(outputQueueSize)) 666 | pendingSave = False 667 | 668 | 669 | print("Saving before exit...") 670 | neuralNet.save( modelFile ) 671 | print("Model saved. Saving training data") 672 | saveTrainingData( dataName, trainingInputs, trainingOutputs) 673 | print("Save complete.") 674 | exitEvent.set() 675 | 676 | def create_prediction( nn, input ): 677 | prediction = nn.predict(input) 678 | 679 | #limit range of output 680 | #prediction = np.around(np.clip(prediction, -1.5, 1.5 ),3) 681 | return prediction 682 | 683 | 684 | def create_neural_net( inputCnt, outputCnt, modelPath ): 685 | from keras.models import load_model, Model, Sequential 686 | from keras.optimizers import Adam 687 | from keras.layers import Input, Dense, Dropout, LeakyReLU, BatchNormalization 688 | 689 | if os.path.exists(modelPath): 690 | print("Loading existing model") 691 | return load_model(modelPath) 692 | 693 | model = Sequential() 694 | 695 | model.add(Dense(7*inputCnt, input_shape=(inputCnt,), kernel_initializer='random_uniform')) 696 | model.add(LeakyReLU()) 697 | model.add(Dropout(.5)) 698 | 699 | model.add(Dense(7*inputCnt, kernel_initializer='random_uniform')) 700 | model.add(LeakyReLU()) 701 | model.add(Dropout(.5)) 702 | 703 | model.add(Dense(7*inputCnt, kernel_initializer='random_uniform')) 704 | model.add(LeakyReLU()) 705 | model.add(Dropout(.5)) 706 | 707 | model.add(Dense(7*inputCnt, kernel_initializer='random_uniform')) 708 | model.add(LeakyReLU()) 709 | model.add(Dropout(.5)) 710 | 711 | model.add(Dense(outputCnt, activation='linear')) 712 | 713 | model.summary() 714 | 715 | input = Input(shape=(inputCnt,)) 716 | predictor = model(input) 717 | nn = Model( input, predictor ) 718 | 719 | optimizer = Adam( lr=0.0001 ) 720 | nn.compile(loss='logcosh', 721 | optimizer=optimizer, 722 | metrics=['accuracy']) 723 | 724 | return nn 725 | 726 | ############################### 727 | # parse arguments 728 | # 729 | def parseArgs(): 730 | parser = argparse.ArgumentParser( description="Train GAN" ) 731 | parser.add_argument('--configFile', help="Model configuration file", required=True) 732 | parser.add_argument('--seedImagePath', help="Root path for seed images. Must have at least 1 valid seed imageset", required=True) 733 | parser.add_argument('--onlySeedImages', action='store_true', default=False, help="Train *only* on the seed images") 734 | parser.add_argument('--seedJsonPath', help="Path to JSON looks to seed training with", default=None) 735 | parser.add_argument('--tmpDir', help="Directory to store temporary files. Recommend to use a RAM disk.", default='D:/Generated/') 736 | parser.add_argument('--encBatchSize', help="Batch size for generating encodings", default=64) 737 | parser.add_argument('--outputFile', help="File to write output model to", default="output.model") 738 | parser.add_argument('--trainingDataCache', help="File to cache raw training data", default="training.cache") 739 | parser.add_argument('--useTrainingDataCache', default=False, action='store_true', help="Generates training data from the cache and adds it to training data. Useful on first run with new config") 740 | parser.add_argument("--pydev", action='store_true', default=False, help="Enable pydevd debugging") 741 | 742 | 743 | return parser.parse_args() 744 | 745 | 746 | ############################### 747 | # program entry point 748 | # 749 | if __name__ == "__main__": 750 | multiprocessing.freeze_support() 751 | args = parseArgs() 752 | main( args ) -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | 623 | How to Apply These Terms to Your New Programs 624 | 625 | If you develop a new program, and you want it to be of the greatest 626 | possible use to the public, the best way to achieve this is to make it 627 | free software which everyone can redistribute and change under these terms. 628 | 629 | To do so, attach the following notices to the program. It is safest 630 | to attach them to the start of each source file to most effectively 631 | state the exclusion of warranty; and each file should have at least 632 | the "copyright" line and a pointer to where the full notice is found. 633 | 634 | 635 | Copyright (C) 636 | 637 | This program is free software: you can redistribute it and/or modify 638 | it under the terms of the GNU General Public License as published by 639 | the Free Software Foundation, either version 3 of the License, or 640 | (at your option) any later version. 641 | 642 | This program is distributed in the hope that it will be useful, 643 | but WITHOUT ANY WARRANTY; without even the implied warranty of 644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 645 | GNU General Public License for more details. 646 | 647 | You should have received a copy of the GNU General Public License 648 | along with this program. If not, see . 649 | 650 | Also add information on how to contact you by electronic and paper mail. 651 | 652 | If the program does terminal interaction, make it output a short 653 | notice like this when it starts in an interactive mode: 654 | 655 | Copyright (C) 656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 657 | This is free software, and you are welcome to redistribute it 658 | under certain conditions; type `show c' for details. 659 | 660 | The hypothetical commands `show w' and `show c' should show the appropriate 661 | parts of the General Public License. Of course, your program's commands 662 | might be different; for a GUI interface, you would use an "about box". 663 | 664 | You should also get your employer (if you work as a programmer) or school, 665 | if any, to sign a "copyright disclaimer" for the program, if necessary. 666 | For more information on this, and how to apply and follow the GNU GPL, see 667 | . 668 | 669 | The GNU General Public License does not permit incorporating your program 670 | into proprietary programs. If your program is a subroutine library, you 671 | may consider it more useful to permit linking proprietary applications with 672 | the library. If this is what you want to do, use the GNU Lesser General 673 | Public License instead of this License. But first, please read 674 | . 675 | --------------------------------------------------------------------------------