├── .gitignore ├── CircuitQuantifier ├── __init__.py ├── circuit_critic.py ├── circuit_validator.py ├── critic_double_well.py ├── critic_example_multi_evaluations.py ├── critic_target_spectrum.py └── critics.py ├── DatabaseHandler ├── __init__.py ├── circuit_handler.py ├── database_handler.py ├── db_werkzeug.py ├── master_handler.py ├── merit_handler.py ├── sqlite_interface.py ├── sqlite_operations.py └── task_handler.py ├── Designers ├── __init__.py ├── abstract_designer.py ├── circuit_designer.py ├── design_utils.py ├── particle_swarm_designer.py ├── random_designer.py └── scipy_minimize_designer.py ├── LICENSE ├── README.md ├── Submitter ├── .nfs00000000080a4c1b00019518 ├── QcircSim_wrapper.py ├── __init__.py ├── abstract_submitter.py ├── circuit_submitter.py ├── execute.py ├── local_submitter.py ├── slurm_submitter.py ├── solver_2node.py └── solver_JJcircuitSimV3.py ├── TaskSets ├── __init__.py ├── calculation_task_set.py ├── filtering_task_set.py └── task_set.py ├── Utilities ├── __init__.py ├── decorators.py ├── defaults.py ├── file_logger.py ├── native_event_handler.py ├── settings.py └── watchdog_event_handler.py ├── circuit_searcher.py ├── environment.yml ├── main_benchmark.py ├── main_fourcoupler.py ├── run_coupler.sh ├── scratch_file └── target_fluxqubit.p /.gitignore: -------------------------------------------------------------------------------- 1 | # Specific to operating system 2 | __pycache__/ 3 | **/__pycache__/ 4 | .DS_Store 5 | **/*.pyc 6 | 7 | # Databases should be kept separate 8 | **/*.db 9 | **/*.db-journal 10 | 11 | # Temporary directories and files 12 | .scratch/ 13 | TIME* 14 | .scratch_dir* 15 | wraps* 16 | log_* 17 | LOG 18 | 19 | # Miscellaneous 20 | JJcircuitSim*/ 21 | Mathematica_script* 22 | -------------------------------------------------------------------------------- /CircuitQuantifier/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from CircuitQuantifier.circuit_critic import CircuitCritic 3 | from CircuitQuantifier.circuit_validator import CircuitValidator 4 | -------------------------------------------------------------------------------- /CircuitQuantifier/circuit_critic.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | #==================================================== 4 | 5 | import copy 6 | import uuid 7 | import numpy as np 8 | import threading 9 | 10 | from Utilities.decorators import thread 11 | 12 | #==================================================== 13 | 14 | class CircuitCritic(object): 15 | 16 | def __init__(self, circuit_params): 17 | self.circuit_params = circuit_params 18 | self.CRITICIZED_CIRCUITS = [] 19 | self.EXTRA_TASKS = [] 20 | self.RECEIVED_EXTRA_EVALUATIONS = {} 21 | 22 | import CircuitQuantifier.critics as critics 23 | self.merit_functions = {} 24 | for merit in dir(critics): 25 | if merit.startswith('__'): continue 26 | self.merit_functions[merit.split('_')[-1]] = getattr(critics, merit) 27 | 28 | ############################################################## 29 | 30 | def report_reevaluations(self, circuits): 31 | for circuit in circuits: 32 | self.RECEIVED_EXTRA_EVALUATIONS[circuit['circuit']['circuit_id']] = circuit 33 | 34 | 35 | def run_merit_evaluation(self, merit_func, circuit_dict, merit_options, task): 36 | merit_eval_dict = merit_func(circuit_dict, merit_options, circuit_params = self.circuit_params) 37 | 38 | if len(merit_eval_dict['extra_tasks']) > 0: 39 | 40 | # check if the merit evaluation requests new tasks 41 | remaining_extra_circuit_ids = [] 42 | received_extra_task_evaluations = {} 43 | 44 | for extra_task in merit_eval_dict['extra_tasks']: 45 | # we need to modify the circuit_id of the proposed circuit parameters 46 | new_circuit_id = str(uuid.uuid4()) 47 | extra_task['circuit']['circuit_id'] = new_circuit_id 48 | self.EXTRA_TASKS.append(extra_task) 49 | remaining_extra_circuit_ids.append(new_circuit_id) 50 | 51 | while len(received_extra_task_evaluations) < len(remaining_extra_circuit_ids): 52 | # check if we have any new evaluated circuits 53 | extra_circuit_ids = list(self.RECEIVED_EXTRA_EVALUATIONS.keys()) 54 | for extra_circuit_id in extra_circuit_ids: 55 | # memorize received evaluations 56 | if extra_circuit_id in remaining_extra_circuit_ids: 57 | received_extra_task_evaluations[extra_circuit_id] = self.RECEIVED_EXTRA_EVALUATIONS[extra_circuit_id] 58 | del self.RECEIVED_EXTRA_EVALUATIONS[extra_circuit_id] 59 | 60 | 61 | # call evaluator again 62 | merit_eval_dict = merit_func(circuit_dict, merit_options, 63 | circuit_params = self.circuit_params, 64 | context_circuits = received_extra_task_evaluations.values()) 65 | 66 | circuit_dict['loss'] = merit_eval_dict['loss'] 67 | circuit_dict['context_circuits'] = list(received_extra_task_evaluations.values()) 68 | 69 | else: 70 | 71 | circuit_dict['loss'] = merit_eval_dict['loss'] 72 | circuit_dict['context_circuits'] = None 73 | 74 | self.CRITICIZED_CIRCUITS.append([circuit_dict, task]) 75 | 76 | ############################################################## 77 | 78 | @thread 79 | def criticize_circuit(self, circuit, task_set, task): 80 | # circuit: dict | information about circuit 81 | 82 | merit = task_set.settings['merit'] 83 | merit_options = task_set.settings['merit_options'] 84 | 85 | # check if simulation timed out 86 | if 'PLACEHOLDER' in circuit['measurements']: 87 | loss = np.nan 88 | 89 | # use specified merit function to calculate loss 90 | else: 91 | 92 | if not merit in self.merit_functions: 93 | print('# ERROR | ... could not find merit function: %s' % merit) 94 | return None 95 | 96 | # merit function needs to be put on a separate thread in case it likes to launch new tasks 97 | merit_func = self.merit_functions[merit] 98 | self.run_merit_evaluation(merit_func, circuit, merit_options, task) 99 | 100 | 101 | def get_requested_tasks(self): 102 | new_tasks = copy.deepcopy(self.EXTRA_TASKS) 103 | for new_task in new_tasks: 104 | self.EXTRA_TASKS.pop(0) 105 | return new_tasks 106 | 107 | 108 | def criticize_circuits(self, circuits, task_set, tasks): 109 | for circuit_index, circuit in enumerate(circuits): 110 | self.criticize_circuit(circuit, task_set, tasks[circuit_index]) 111 | 112 | 113 | def get_criticized_circuits(self): 114 | circuits = copy.deepcopy(self.CRITICIZED_CIRCUITS) 115 | for circuit in circuits: 116 | self.CRITICIZED_CIRCUITS.pop(0) 117 | return circuits 118 | 119 | 120 | def get_extra_tasks(self): 121 | circuits = copy.deepcopy(self.EXTRA_TASKS) 122 | for circuit in circuits: 123 | self.EXTRA_TASKS.pop(0) 124 | return circuits 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | 141 | 142 | 143 | 144 | 145 | 146 | 147 | 148 | -------------------------------------------------------------------------------- /CircuitQuantifier/circuit_validator.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | #==================================================== 4 | 5 | import copy 6 | import threading 7 | import numpy as np 8 | import time 9 | 10 | from Utilities.decorators import thread 11 | 12 | #==================================================== 13 | 14 | class CircuitValidator(object): 15 | 16 | VALIDATED_CIRCUITS = [] 17 | 18 | def __init__(self): 19 | pass 20 | 21 | 22 | @thread 23 | def run_validation(self, circuit): 24 | start = time.time() 25 | 26 | capacities = circuit['circuit_values']['capacities'] 27 | 28 | k = len(capacities) 29 | n = int(np.sqrt(2 * k + 0.25) - 0.5) 30 | 31 | c_mat_mf = np.zeros((n, n)) 32 | c_mat_mf[np.triu_indices(n, k = 0)] = capacities 33 | c_mat_mf = np.maximum(c_mat_mf, c_mat_mf.transpose()) 34 | 35 | c_mat_modified = np.diag(np.sum(c_mat_mf, axis = 0)) 36 | c_mat_modified += np.diag(np.diag(c_mat_mf)) - c_mat_mf 37 | 38 | determinant = np.abs(np.linalg.det(c_mat_modified)) 39 | if determinant > 10**-6: 40 | circuit['is_valid'] = True 41 | else: 42 | circuit['is_valid'] = False 43 | 44 | self.VALIDATED_CIRCUITS.append(circuit) 45 | 46 | 47 | def validate_circuits(self, circuits): 48 | start = time.time() 49 | for circuit in circuits: 50 | self.run_validation(circuit) 51 | end = time.time() 52 | content = open('TIME_validations', 'a') 53 | content.write('%.5f\t%d\n' % (end - start, len(circuits))) 54 | content.close() 55 | 56 | 57 | def get_validated_circuits(self): 58 | validated_circuits = copy.deepcopy(self.VALIDATED_CIRCUITS) 59 | for circuit in validated_circuits: 60 | self.VALIDATED_CIRCUITS.pop(0) 61 | return validated_circuits 62 | -------------------------------------------------------------------------------- /CircuitQuantifier/critic_double_well.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import numpy as np 4 | import copy 5 | 6 | def check_circuit_doublewell(circuit): 7 | """ 8 | Check whether the circuit spectrum is None, zero-length, flat, or has no double-well feature. 9 | If one or more of those is the case, the output is False. Otherwise, it is True. Spectrum is 10 | in GHz. 11 | 12 | Input 13 | circuit: circuit dict 14 | 15 | Output 16 | check: bool 17 | """ 18 | 19 | # Initialization 20 | check = True 21 | spectrum = np.array(circuit['measurements']['eigen_spectrum']).T 22 | 23 | # Check validity of spectrum 24 | if circuit['measurements']['eigen_spectrum'] is None: 25 | check = False 26 | elif len(spectrum) == 0: 27 | check = False 28 | elif np.max(spectrum[0]) - np.min(spectrum[0]) < 0.001: #almost constant spectrum 29 | check = False 30 | 31 | # Check outermost level population 32 | elif 'max_pop' in circuit['measurements'] and circuit['measurements']['max_pop'] > 0.01: 33 | print('MAX POP TOO HIGH!') 34 | check = False 35 | 36 | # Check for double well 37 | else: 38 | # Extract ground and excited state spectra 39 | spectrumGS = spectrum[0] 40 | spectrumES = spectrum[1] 41 | 42 | ### Check for double-well feature ### 43 | # Edges higher than inner region? 44 | c1 = (spectrumGS[0] > np.min(spectrumGS)) and (spectrumGS[-1] > np.min(spectrumGS)) 45 | # Center higher than minima? 46 | mididx = len(spectrumGS) // 2 47 | c2 = spectrumGS[mididx] > np.min(spectrumGS) 48 | # Peak has significant height 49 | c3 = (spectrumGS[mididx] - np.min(spectrumGS)) > 0.05 50 | # Energy levels do not cross 51 | c4 = np.min(np.array(spectrumES) - np.array(spectrumGS)) > 0.1 52 | check = c1 and c2 and c3 and c4 53 | 54 | return check 55 | 56 | 57 | def merit_DoubleWell(circuit, merit_options, **kwargs): 58 | 59 | print('# Calculating double-well merit ...') 60 | 61 | # Loss function settings 62 | max_peak = merit_options['max_peak'] 63 | max_split = merit_options['max_split'] 64 | norm_p = merit_options['norm_p'] 65 | flux_sens_bool = merit_options['flux_sens'] 66 | max_merit = merit_options['max_merit'] 67 | 68 | # Check if circuit is valid and has double well 69 | if check_circuit_doublewell(circuit): 70 | 71 | # Circuit spectrum 72 | spectrum = np.array(circuit['measurements']['eigen_spectrum']).T 73 | 74 | # Extract ground and excited state spectra 75 | spectrumGS = spectrum[0] 76 | spectrumES = spectrum[1] 77 | 78 | ### Calculate loss ### 79 | 80 | # (1) Check if flux sensitivity should be calculated and submit extra task if necessary 81 | hsens = None 82 | # Context circuits have been calculated 83 | if flux_sens_bool and ('context_circuits' in kwargs): 84 | hsens = 0 85 | context_circuits = kwargs['context_circuits'] 86 | for context_circuit in context_circuits: 87 | # Context circuit valid 88 | if check_circuit_doublewell(context_circuit): 89 | spectrum_context = np.array(context_circuit['measurements']['eigen_spectrum']).T 90 | spectrumGS_context = spectrum_context[0] 91 | mididx = len(spectrumGS_context) // 2 92 | hsens += abs( np.min(spectrumGS_context[:mididx]) - np.min(spectrumGS_context[mididx:]) ) 93 | # Context circuit invalid 94 | else: 95 | loss = max_merit 96 | print('# INVALID CONTEXT CIRCUIT -> LOSS: {} ...'.format(loss)) 97 | merit_dict = {'loss': loss, 'extra_tasks': []} 98 | return merit_dict 99 | 100 | # Context circuits have *not* been calculated 101 | elif flux_sens_bool: 102 | print('# PREPARING CONTEXT CIRCUITS ...') 103 | # Determine number of flux biases 104 | if 'num_biases' in circuit['measurements']: 105 | num_biases = circuit['measurements']['num_biases'] 106 | # Case A: no additional biases beyond the main loop 107 | if num_biases<=1: 108 | print('# No additional biases, moving to merit calculation ...') 109 | pass 110 | # Case B: perturb existing additional biases 111 | else: 112 | print('# Preparing {} perturbed circuit(s)'.format(num_biases-1)) 113 | perturbed_circuits = [] 114 | for i in range(1,num_biases): 115 | perturbed_circuit = copy.deepcopy(circuit) 116 | perturbed_circuit['measurements'].clear() 117 | perturbed_circuit['circuit']['circuit_values']['phiOffs'][i] += 0.0001 118 | perturbed_circuits.append(perturbed_circuit) 119 | merit_dict = {'extra_tasks': perturbed_circuits} 120 | return merit_dict 121 | # Number of biases can not be determined (was not saved) 122 | else: 123 | loss = max_merit 124 | print('# NUMBER OF BIASES COULD NOT BE DETERMINED -> LOSS: {} ...'.format(loss)) 125 | merit_dict = {'loss': loss, 'extra_tasks': []} 126 | return merit_dict 127 | 128 | # (2) Center peak height 129 | idx_mid = int(len(spectrumGS)/2) 130 | hpeak = spectrumGS[idx_mid] - np.min(spectrumGS) 131 | hpeak = np.min((hpeak, max_peak)) 132 | 133 | # (3) Level separation 134 | hsplit = np.min(spectrumES-spectrumGS) 135 | hsplit = np.min((hsplit, max_split)) 136 | 137 | # Combined loss 138 | if hsens == None: 139 | loss = max_merit - ( (hpeak/max_peak)**norm_p + (hsplit/max_split)**norm_p )**(1/norm_p) 140 | else: 141 | # print('A:', (hpeak/max_peak)) 142 | # print('B:', (hsplit/max_split)) 143 | # print('C:', noise_factor*(1 - np.min((hsens/hpeak, 1)))) 144 | loss = max_merit - ( abs(hpeak/max_peak)**norm_p + abs(hsplit/max_split)**norm_p \ 145 | + (1 - np.min((hsens/hpeak, 1)))**norm_p )**(1/norm_p) 146 | 147 | else: 148 | loss = max_merit 149 | 150 | print('# LOSS: {} ...'.format(loss)) 151 | 152 | merit_dict = {'loss': loss, 'extra_tasks': []} 153 | 154 | return merit_dict 155 | 156 | 157 | 158 | 159 | 160 | 161 | 162 | 163 | 164 | -------------------------------------------------------------------------------- /CircuitQuantifier/critic_example_multi_evaluations.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import copy 4 | import numpy as np 5 | 6 | 7 | def merit_TwoEvalExample(circuit_dict, merit_options, **kwargs): 8 | 9 | merit_dict = {'extra_tasks': []} 10 | 11 | if 'context_circuits' in kwargs: 12 | rmsd = 0. 13 | context_circuits = kwargs['context_circuits'] 14 | for context_circuit in context_circuits: 15 | context_spectrum = context_circuit['measurements']['eigen_spectrum'] 16 | 17 | dev = context_spectrum - circuit_dict['measurements']['eigen_spectrum'] 18 | rmsd += np.sqrt(np.mean(np.square(dev))) 19 | 20 | merit_dict['loss'] = rmsd 21 | 22 | else: 23 | perturbed_circuit = copy.deepcopy(circuit_dict) 24 | perturbed_circuit['general_params']['phiExt'] += 0.1 25 | 26 | merit_dict['extra_tasks'].append(perturbed_circuit) 27 | 28 | return merit_dict -------------------------------------------------------------------------------- /CircuitQuantifier/critic_target_spectrum.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import numpy as np 4 | 5 | 6 | def merit_TargetSpectrum(circuit, merit_options, circuit_params = {}): 7 | 8 | # Calculated and target spectrum 9 | spectrum = np.array(circuit['measurements']['eigen_spectrum']) 10 | targetspec = np.array(merit_options['target_spectrum']) 11 | 12 | # Calculate loss from mean square of spectra difference 13 | loss_flux = np.mean((spectrum[:,1:3]-targetspec[:,1:3])**2) 14 | loss = loss_flux 15 | 16 | # Symmetry enforcement for 2-node circuits without linear inductances 17 | if merit_options['include_symmetry']: 18 | Carr_norm = (circuit['circuit']['circuit_values']['capacities'] - circuit_params['c_specs']['low']) / circuit_params['c_specs']['high'] 19 | Jarr_norm = (circuit['circuit']['circuit_values']['junctions'] - circuit_params['j_specs']['low']) / circuit_params['j_specs']['high'] 20 | Larr = circuit['circuit']['circuit_values']['inductances'] 21 | if len(Carr_norm) == 3 and Larr == None: 22 | loss_symmetry = np.abs(Carr_norm[0] - Carr_norm[2]) + np.abs(Jarr_norm[0] - Jarr_norm[2]) 23 | loss += 100 * loss_symmetry 24 | else: 25 | raise NotImplementedError("Symmetry loss only implemented for 2-node circuits without linear inductances") 26 | 27 | # Apply squashing function 28 | loss = np.log10(loss) 29 | 30 | merit_dict = {'loss': loss, 'extra_tasks': []} 31 | 32 | return merit_dict 33 | -------------------------------------------------------------------------------- /CircuitQuantifier/critics.py: -------------------------------------------------------------------------------- 1 | 2 | from CircuitQuantifier.critic_double_well import merit_DoubleWell 3 | from CircuitQuantifier.critic_example_multi_evaluations import merit_TwoEvalExample 4 | from CircuitQuantifier.critic_target_spectrum import merit_TargetSpectrum 5 | -------------------------------------------------------------------------------- /DatabaseHandler/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from DatabaseHandler.sqlite_operations import AddEntry 3 | from DatabaseHandler.sqlite_operations import FetchEntries 4 | from DatabaseHandler.sqlite_operations import UpdateEntries 5 | 6 | from DatabaseHandler.sqlite_interface import SQLiteDatabase 7 | 8 | from DatabaseHandler.db_werkzeug import DB_Werkzeug 9 | 10 | from DatabaseHandler.circuit_handler import CircuitHandler 11 | from DatabaseHandler.merit_handler import MeritHandler 12 | from DatabaseHandler.master_handler import MasterHandler 13 | from DatabaseHandler.task_handler import TaskHandler 14 | 15 | from DatabaseHandler.database_handler import DatabaseHandler 16 | -------------------------------------------------------------------------------- /DatabaseHandler/circuit_handler.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | #==================================================== 4 | 5 | import uuid 6 | import copy 7 | 8 | from DatabaseHandler import DB_Werkzeug 9 | 10 | #==================================================== 11 | 12 | class CircuitHandler(DB_Werkzeug): 13 | 14 | DB_ATTRIBUTES = {'circuit_id': 'string', 15 | 'circuit_status': 'string', 16 | 'circuit_values': 'pickle', 17 | 'is_valid': 'bool', 18 | 'context_circuits': 'pickle'} 19 | 20 | NEW_CIRCUITS = {} 21 | VALIDATED_CIRCUITS = {} 22 | ALL_CIRCUITS = {} 23 | 24 | def __init__(self, db_settings): 25 | 26 | DB_Werkzeug.__init__(self) 27 | self.create_database(db_settings, self.DB_ATTRIBUTES) 28 | 29 | 30 | def add_new_circuits(self, info_dicts): 31 | for info_dict in info_dicts: 32 | info_dict['circuit_id'] = str(uuid.uuid4()) 33 | info_dict['circuit_status'] = 'new' 34 | info_dict['is_valid'] = False 35 | self.NEW_CIRCUITS[info_dict['circuit_id']] = info_dict 36 | self.ALL_CIRCUITS[info_dict['circuit_id']] = info_dict 37 | self.db_add(info_dicts) 38 | 39 | 40 | def get_new_circuits(self): 41 | return list(self.NEW_CIRCUITS.values()) 42 | 43 | 44 | def get_validated_circuits(self): 45 | return list(self.VALIDATED_CIRCUITS.values()) 46 | 47 | 48 | def select_circuits(self, circuit_ids): 49 | try: 50 | circuit = [self.ALL_CIRCUITS[circuit_id] for circuit_id in circuit_ids] 51 | except KeyError: 52 | return self.db_fetch_all({'circuit_id': circuit_ids}) 53 | return circuit 54 | 55 | 56 | def reserve_circuits(self, info_dicts): 57 | conditions, updates = [], [] 58 | for index, info_dict in enumerate(info_dicts): 59 | condition = {'circuit_id': info_dict['circuit_id']} 60 | update = {'circuit_status': 'processing'} 61 | conditions.append(condition) 62 | updates.append(update) 63 | del self.VALIDATED_CIRCUITS[info_dict['circuit_id']] 64 | self.db_update_all(conditions, updates) 65 | 66 | 67 | def release_circuits(self, info_dicts): 68 | conditions, updates = [], [] 69 | for index, info_dict in enumerate(info_dicts): 70 | condition = {'circuit_id': info_dict['circuit_id']} 71 | update = {'circuit_status': 'validated'} 72 | conditions.append(condition) 73 | updates.append(update) 74 | self.VALIDATED_CIRCUITS[info_dict['circuit_id']] = info_dict 75 | self.db_update_all(conditions, updates) 76 | 77 | 78 | def set_circuits_to_unused(self): 79 | conditions = [{'circuit_status': 'validated'}, {'circuit_status': 'new'}] 80 | updates = [{'circuit_status': 'unused'}, {'circuit_status': 'unused'}] 81 | self.NEW_CIRCUITS = {} 82 | self.VALIDATED_CIRCUITS = {} 83 | self.db_update_all(conditions, updates) 84 | 85 | 86 | def store_validated_circuits(self, circuits): 87 | conditions, updates = [], [] 88 | for circuit in circuits: 89 | condition = {'circuit_id': circuit['circuit_id']} 90 | update = {'is_valid': circuit['is_valid'], 91 | 'circuit_status': 'validated'} 92 | conditions.append(condition) 93 | updates.append(update) 94 | 95 | try: 96 | del self.NEW_CIRCUITS[circuit['circuit_id']] 97 | except KeyError: 98 | pass 99 | 100 | if circuit['is_valid']: 101 | self.VALIDATED_CIRCUITS[circuit['circuit_id']] = circuit 102 | 103 | self.db_update_all(conditions, updates) 104 | 105 | 106 | def update_circuits_with_contexts(self, circuits): 107 | conditions, updates = [], [] 108 | for circuit in circuits: 109 | condition = {'circuit_id': circuit['circuit']['circuit_id']} 110 | update = {'context_circuits': circuit['context_circuits']} 111 | conditions.append(condition) 112 | updates.append(update) 113 | self.db_update_all(conditions, updates) 114 | -------------------------------------------------------------------------------- /DatabaseHandler/database_handler.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import time 4 | import numpy as np 5 | 6 | #======================================================================== 7 | 8 | from DatabaseHandler import CircuitHandler 9 | from DatabaseHandler import MeritHandler 10 | from DatabaseHandler import MasterHandler 11 | from DatabaseHandler import TaskHandler 12 | 13 | from Utilities.decorators import thread 14 | 15 | #======================================================================== 16 | 17 | class DatabaseHandler(object): 18 | 19 | CIRCUIT_EVALUATIONS = {} 20 | CIRCUIT_EVALUATIONS_CHANGED = {} 21 | 22 | def __init__(self, db_settings, db_path): 23 | 24 | self.db_settings = db_settings 25 | if not db_path is None: 26 | for db_prop in ['master', 'circuits', 'merits', 'tasks']: 27 | db_settings = getattr(self.db_settings, db_prop) 28 | db_settings.db_path = '%s/%s.db' % (db_path, db_prop) 29 | setattr(self.db_settings, db_prop, db_settings) 30 | 31 | self.circuit_handler = CircuitHandler(self.db_settings.circuits) 32 | self.master_handler = MasterHandler(self.db_settings.master) 33 | self.merit_handler = MeritHandler(self.db_settings.merits) 34 | self.task_handler = TaskHandler(self.db_settings.tasks) 35 | 36 | #==================================================================== 37 | 38 | def add_task(self, info_dict): 39 | self.task_handler.add_task(info_dict) 40 | master_entry = {key: info_dict[key] for key in ['task_id', 'task_set_id', 'primer_index', 'execution_index']} 41 | if 'condition_id' in info_dict: 42 | master_entry['condition_id'] = info_dict['condition_id'] 43 | self.master_handler.add_task(info_dict) 44 | 45 | def add_tasks(self, info_dicts): 46 | master_entries = [] 47 | for info_dict in info_dicts: 48 | master_entry = {key: info_dict[key] for key in ['task_id', 'task_set_id', 'primer_index', 'execution_index']} 49 | if 'condition_id' in info_dict: 50 | master_entry['condition_id'] = info_dict['condition_id'] 51 | master_entries.append(master_entry) 52 | self.task_handler.add_tasks(info_dicts) 53 | self.master_handler.add_tasks(info_dicts) 54 | 55 | def fetch_remaining_tasks(self, task_set_id): 56 | return self.task_handler.fetch_remaining_tasks(task_set_id) 57 | 58 | def task_set_completed(self, task_set_id): 59 | return self.task_handler.task_set_completed(task_set_id) 60 | 61 | def report_circuit_submission(self): 62 | self.task_handler.report_circuit_submission() 63 | 64 | def report_circuit_computation(self): 65 | self.task_handler.report_circuit_computation() 66 | 67 | def set_tasks_to_submitted(self, tasks): 68 | self.task_handler.set_tasks_to_submitted(tasks) 69 | 70 | def set_tasks_to_redundant(self, task): 71 | return self.task_handler.set_tasks_to_redundant(task) 72 | 73 | def set_tasks_to_computed(self, circuits): 74 | circuit_ids = [circuit['circuit']['circuit_id'] for circuit in circuits] 75 | id_dicts = self.master_handler.get({'circuit_id': circuit_ids}) 76 | 77 | conditions = [] 78 | for id_dict in id_dicts: 79 | condition = {key: id_dict[key] for key in ['task_set_id', 'task_id', 'primer_index', 'execution_index']} 80 | conditions.append(condition) 81 | self.task_handler.set_tasks_to_computed(conditions) 82 | return id_dicts 83 | 84 | #==========================================f========================== 85 | 86 | def check_resource_availability(self, task_dict): 87 | return self.task_handler.check_resource_availability(task_dict) 88 | 89 | def get_num_available_resources(self, task_set): 90 | return self.task_handler.get_num_available_resources(task_set) 91 | 92 | #==================================================================== 93 | 94 | def add_new_circuit(self, circuit): 95 | self.circuit_handler.add_new_circuit(circuit) 96 | 97 | def add_new_circuits(self, circuits): 98 | self.circuit_handler.add_new_circuits(circuits) 99 | 100 | def get_new_circuits(self): 101 | circuits = self.circuit_handler.get_new_circuits() 102 | return circuits 103 | 104 | def get_validated_circuits(self): 105 | circuits = self.circuit_handler.get_validated_circuits() 106 | return circuits 107 | 108 | def reserve_circuits(self, circuit_dicts): 109 | self.circuit_handler.reserve_circuits(circuit_dicts) 110 | 111 | def release_circuits(self, circuit_dicts): 112 | self.circuit_handler.release_circuits(circuit_dicts) 113 | 114 | def set_circuits_to_unused(self): 115 | self.circuit_handler.set_circuits_to_unused() 116 | 117 | #==================================================================== 118 | 119 | def store_validated_circuits(self, circuits): 120 | valid_circuits = [] 121 | for circuit in circuits: 122 | if circuit['is_valid']: 123 | valid_circuits.append(circuit) 124 | self.circuit_handler.store_validated_circuits(valid_circuits) 125 | 126 | def __OLD__store_validated_circuits(self, circuits): 127 | self.circuit_handler.store_validated_circuits(circuits) 128 | invalid_circuit_ids = [] 129 | for circuit in circuits: 130 | if not circuit['is_valid']: 131 | invalid_circuit_ids.append(circuit['circuit_id']) 132 | if len(invalid_circuit_ids) > 0: 133 | loss_ids = self.merit_handler.add_losses_for_invalid_circuits(len(invalid_circuit_ids)) 134 | self.master_handler.add_invalid_circuits(invalid_circuit_ids, loss_ids) 135 | 136 | #==================================================================== 137 | 138 | def get_circuit_evaluations(self, task_set_id): 139 | 140 | # implement container to accelerate evaluation queries 141 | if not task_set_id in self.CIRCUIT_EVALUATIONS_CHANGED or self.CIRCUIT_EVALUATIONS_CHANGED[task_set_id]: 142 | self.CIRCUIT_EVALUATIONS_CHANGED[task_set_id] = False 143 | 144 | identifier_sets = self.master_handler.get_linked_losses_and_circuits(task_set_id) 145 | observations = [] 146 | circuit_ids = [] 147 | merit_ids = [] 148 | for identifier_set in identifier_sets: 149 | circuit_id = identifier_set['circuit_id'] 150 | merit_id = identifier_set['merit_id'] 151 | if circuit_id is None or merit_id is None: continue 152 | 153 | circuit_ids.append(circuit_id) 154 | merit_ids.append(merit_id) 155 | 156 | start = time.time() 157 | circuits = self.circuit_handler.select_circuits(circuit_ids) 158 | merits = self.merit_handler.select_merits(merit_ids) 159 | 160 | for circuit_index, circuit in enumerate(circuits): 161 | merit = merits[circuit_index] 162 | 163 | observ_dict = {} 164 | for param_name, param_value in circuit['circuit_values'].items(): 165 | observ_dict[param_name] = {'samples': param_value} 166 | for merit_name, merit_value in merit['merit_value'].items(): 167 | observ_dict[merit_name] = merit_value 168 | observations.append(observ_dict) 169 | self.CIRCUIT_EVALUATIONS[task_set_id] = observations 170 | return observations 171 | else: 172 | return self.CIRCUIT_EVALUATIONS[task_set_id] 173 | 174 | def get_prior_circuit_evaluations(self): 175 | condition = {'interest_score': 'relevant'} 176 | identifier_sets = self.master_handler.db_fetch_all(condition) 177 | observations = [] 178 | for identifier_set in identifier_sets: 179 | circuit_id = identifier_set['circuit_id'] 180 | merit_id = identifier_set['merit_id'] 181 | if circuit_id is None or merit_id is None: continue 182 | 183 | circuit = self.circuit_handler.db_fetch_all({'circuit_id': circuit_id})[0] 184 | merit = self.merit_handler.db_fetch_all({'merit_id': merit_id})[0] 185 | 186 | observ_dict = {} 187 | for param_name, param_value in circuit['circuit_values'].items(): 188 | observ_dict[param_name] = {'samples': param_value} 189 | for merit_name, merit_value in merit['merit_value'].items(): 190 | observ_dict[merit_name] = merit_value 191 | observations.append(observ_dict) 192 | return observations 193 | 194 | #==================================================================== 195 | 196 | def link_submissions(self, tasks, circuits): 197 | self.master_handler.link_submissions(tasks, circuits) 198 | 199 | #==================================================================== 200 | 201 | def store_criticized_circuits(self, circuit_dicts, id_dicts = None): 202 | 203 | circuits = [circuit_dict['circuit'] for circuit_dict in circuit_dicts] 204 | circuit_ids = [circuit['circuit_id'] for circuit in circuits] 205 | 206 | merits = self.merit_handler.add_losses_for_circuits(circuit_dicts) 207 | 208 | self.master_handler.link_losses_to_circuits(circuits, merits) 209 | self.circuit_handler.update_circuits_with_contexts(circuit_dicts) 210 | 211 | if id_dicts is None: 212 | id_dicts = self.master_handler._get({'circuit_id': circuit_ids}) 213 | 214 | conditions = [] 215 | for id_dict in id_dicts: 216 | self.CIRCUIT_EVALUATIONS_CHANGED[id_dict['task_set_id']] = True 217 | conditions.append({key: id_dict[key] for key in ['task_set_id', 'task_id', 'primer_index', 'execution_index']}) 218 | self.task_handler.set_tasks_to_completed(conditions) 219 | 220 | #==================================================================== 221 | 222 | def get_task_set_progress_info(self, task_set, run_time): 223 | 224 | counter = self.task_handler.COUNTER 225 | statuses = range(counter['statuses']) 226 | num_new = counter['new'] 227 | num_submitted = counter['submitted'] 228 | num_computed = counter['computed'] 229 | num_completed = counter['completed'] 230 | 231 | werkzeug = self.master_handler.database 232 | print('DB OPERATIONS: updates: %d, writes: %d, reads: %d' % (len(werkzeug.UPDATE_REQUESTS), len(werkzeug.WRITING_REQUESTS), len(werkzeug.READING_REQUESTS))) 233 | 234 | # progress string will consist of NUM_CHAR characters 235 | NUM_CHAR = 75 236 | 237 | progress_string = '' 238 | for index in range( int(NUM_CHAR * num_completed / len(statuses))): 239 | progress_string += '#' 240 | for index in range( int(NUM_CHAR * num_computed / len(statuses))): 241 | progress_string += '|' 242 | for index in range( int(NUM_CHAR * num_submitted / len(statuses))): 243 | progress_string += ':' 244 | for index in range( NUM_CHAR - len(progress_string)): 245 | progress_string += '.' 246 | 247 | counter = self.task_handler.COUNTER 248 | print(len(statuses), num_new, num_submitted, num_computed, num_completed, \ 249 | '(#statuses, #new, #submitted, #computed, #completed)') 250 | 251 | content = open('log_run_time', 'a') 252 | content.write('%.3f\t%d\n' % (run_time, num_completed)) 253 | content.close() 254 | 255 | return progress_string 256 | 257 | 258 | #==================================================================== 259 | 260 | def filter_for_best_performing(self, options): 261 | 262 | import time 263 | 264 | start = time.time() 265 | all_entries = self.master_handler.get({}) 266 | 267 | print('\n\n') 268 | print('LEN OF ALL ENTRIES', len(all_entries)) 269 | print('... took', time.time() - start) 270 | print('\n\n') 271 | 272 | start = time.time() 273 | all_merits = self.merit_handler._get({}) 274 | print('\n\n') 275 | print('LEN OF ALL MERITS', len(all_merits)) 276 | print('... took', time.time() - start) 277 | print('\n\n') 278 | 279 | # get entries for which merit has been calculated 280 | relevant_entries = [] 281 | for entry in all_entries: 282 | if not entry['interest_score'] == 'invalid': 283 | relevant_entries.append(entry) 284 | 285 | # collect merit_id 286 | merit_ids = [entry['merit_id'] for entry in relevant_entries] 287 | 288 | # get merits for merit_ids 289 | merits = [] 290 | for merit_id in merit_ids: 291 | print('MERIT_ID', merit_id) 292 | start = time.time() 293 | if merit_id is None: continue 294 | try: 295 | merit_dict = self.merit_handler._get({'merit_id': merit_id})[0] 296 | except IndexError: 297 | time.sleep(1) 298 | merit_dict = self.merit_handler._get({'merit_id': merit_id})[0] 299 | merits.append(merit_dict['merit_value']['loss']) 300 | print('...', time.time() - start) 301 | merits = np.array(merits) 302 | 303 | print('\n\n') 304 | 305 | # select relevant merits 306 | sorting_indices = np.argsort(merits) 307 | relevant_indices = sorting_indices[:options['num_circuits']] 308 | irrelevant_indices = sorting_indices[options['num_circuits']:] 309 | 310 | for relevant_index in relevant_indices: 311 | self.master_handler.label_relevant(relevant_entries[relevant_index]) 312 | 313 | for irrelevant_index in irrelevant_indices: 314 | self.master_handler.label_irrelevant(relevant_entries[irrelevant_index]) 315 | 316 | #==================================================================== 317 | 318 | # User queries 319 | 320 | def get_circuits_from_task(self, task): 321 | 322 | try: 323 | task_set_id = task.task_set_id 324 | except AttributeError: 325 | task_set_id = task['task_set_id'] 326 | 327 | id_dicts_all = self.master_handler._get({'task_set_id': task_set_id}) 328 | id_dicts = [] 329 | for id_dict in id_dicts_all: 330 | if not id_dict['circuit_id'] is None and not id_dict['merit_id'] is None: 331 | id_dicts.append(id_dict) 332 | 333 | circuits_ids = [id_dict['circuit_id'] for id_dict in id_dicts] 334 | circuits = self.circuit_handler._get({}) 335 | circuits_raw = {circuit['circuit_id']: circuit for circuit in circuits} 336 | 337 | merit_ids = [id_dict['merit_id'] for id_dict in id_dicts] 338 | merits = self.merit_handler._get({}) 339 | merits_raw = {merit['merit_id']: merit for merit in merits} 340 | 341 | out_dicts = [] 342 | i = -1 343 | j = 0 344 | for id_dict in id_dicts: 345 | i += 1 346 | 347 | try: 348 | circuit = circuits_raw[id_dict['circuit_id']] 349 | merit = merits_raw[id_dict['merit_id']] 350 | except KeyError: 351 | print('Circuit {0} \t| Diff {1} \t| FAILED GETTING CIRCUIT OR MERIT'.format(i, i-j)) 352 | j = i 353 | continue 354 | 355 | out_dict = {'circuit': circuit, 'merit': merit} 356 | out_dicts.append(out_dict) 357 | return out_dicts 358 | 359 | 360 | def get_trajectories(self, task_set): 361 | try: 362 | task_set_id = task_set.task_set_id 363 | except AttributeError: 364 | task_set_id = task_set['task_set_id'] 365 | 366 | 367 | id_dicts_all = self.master_handler._get({'task_set_id': task_set_id}) 368 | id_dicts = [] 369 | for id_dict in id_dicts_all: 370 | if not id_dict['circuit_id'] is None and not id_dict['merit_id'] is None: 371 | id_dicts.append(id_dict) 372 | 373 | circuit_ids = [id_dict['circuit_id'] for id_dict in id_dicts] 374 | circuits = self.circuit_handler._get({}) 375 | circuits_raw = {circuit['circuit_id']: circuit for circuit in circuits} 376 | 377 | merit_ids = [id_dict['merit_id'] for id_dict in id_dicts] 378 | merits = self.merit_handler._get({}) 379 | merits_raw = {merit['merit_id']: merit for merit in merits} 380 | 381 | # sort id_dicts by execution index 382 | trajs = {} 383 | current_bests = {} 384 | recorded_task_ids = {} 385 | recorded_exec_ids = {} 386 | for id_dict_index, id_dict in enumerate(id_dicts): 387 | 388 | try: 389 | circuit = circuits_raw[id_dict['circuit_id']] 390 | merit = merits_raw[id_dict['merit_id']] 391 | except KeyError: 392 | print('!!', id_dict_index) 393 | continue 394 | 395 | if not 'sim_id' in circuit['circuit_values']: 396 | circuit['circuit_values']['sim_id'] = 0 397 | if not 'task_id_index' in circuit['circuit_values']: 398 | circuit['circuit_values']['task_id_index'] = 0 399 | 400 | sim_id = circuit['circuit_values']['sim_id'] 401 | task_id_index = circuit['circuit_values']['task_id_index'] 402 | if not sim_id in trajs: 403 | trajs[sim_id] = [] 404 | current_bests[sim_id] = np.inf 405 | recorded_task_ids[sim_id] = [] 406 | recorded_exec_ids[sim_id] = [] 407 | 408 | recorded_task_ids[sim_id].append(task_id_index) 409 | recorded_exec_ids[sim_id].append(id_dict['execution_index']) 410 | 411 | loss_value = merit['merit_value']['loss'] 412 | measurements = merit['measurements'] 413 | current_bests[sim_id] = loss_value 414 | 415 | circuit_out_dict = {key: circuit[key] for key in ['circuit_id', 'circuit_values', 'context_circuits']} 416 | merit_out_dict = {'merit_value': current_bests[sim_id], 'measurements': measurements} 417 | out_dict = {'circuit': circuit_out_dict, 'merit': merit_out_dict} 418 | trajs[sim_id].append(out_dict) 419 | 420 | 421 | short_trajs = {key: [] for key in trajs.keys()} 422 | for key, values in trajs.items(): 423 | 424 | max_task_id = -1 425 | collected_values = {} 426 | for index, element in enumerate(recorded_task_ids[key]): 427 | max_task_id = np.maximum(max_task_id, element) 428 | if not element in collected_values: 429 | collected_values[element] = [] 430 | collected_values[element].append(values[index]) 431 | 432 | for task_id in range(max_task_id + 1): 433 | try: 434 | current_values = collected_values[task_id] 435 | except: 436 | continue 437 | best_loss = np.inf 438 | for value in current_values: 439 | if value['merit']['merit_value'] < best_loss: 440 | best_loss = value['merit']['merit_value'] 441 | best_dict = value 442 | short_trajs[key].append(best_dict) 443 | 444 | return short_trajs 445 | 446 | 447 | def get_lbfgs_trajectories(self, task_set): 448 | return self._get_trajectories(task_set) 449 | 450 | 451 | def get_particle_swarms_trajectories(self, task_set): 452 | return self._get_trajectories(task_set) 453 | 454 | 455 | def list_computing_tasks(self): 456 | tasks = self.task_handler._get({}) 457 | task_sets = [] 458 | task_names, task_ids = [], [] 459 | for task in tasks: 460 | if task['task_set_name'] in task_names or task['task_set_id'] in task_ids: continue 461 | task = {'task_set_name': task['task_set_name'], 'task_set_id': task['task_set_id']} 462 | task_names.append(task['task_set_name']) 463 | task_ids.append(task['task_set_id']) 464 | task_sets.append(task) 465 | return task_sets 466 | 467 | 468 | def refresh(self): 469 | self.task_handler.refresh() 470 | 471 | 472 | #==================================================================== 473 | 474 | def is_updating(self, db = 'master'): 475 | if db == 'master': 476 | werkzeug = self.master_handler.database 477 | # print('FOUND UPDATES', len(werkzeug.UPDATE_REQUESTS)) 478 | # print('FOUND WRITES', len(werkzeug.WRITING_REQUESTS)) 479 | # print('FOUND READS', len(werkzeug.READING_REQUESTS)) 480 | return len(werkzeug.UPDATE_REQUESTS) != 0 481 | 482 | 483 | #==================================================================== 484 | 485 | def synchronize(self, db = 'master'): 486 | if db == 'master': 487 | werkzeug = self.master_handler.database 488 | 489 | import time 490 | 491 | while True: 492 | num_updates = len(werkzeug.UPDATE_REQUESTS) 493 | num_writes = len(werkzeug.WRITING_REQUESTS) 494 | num_reads = len(werkzeug.READING_REQUESTS) 495 | limiting = np.amax([num_updates, num_writes, num_reads]) 496 | if limiting < 20: 497 | break 498 | else: 499 | print('## WAITING ##', num_updates, num_writes, num_reads) 500 | time.sleep(1.0) 501 | 502 | 503 | def print_pending_updates(self, iteration, db = 'master'): 504 | if db == 'master': 505 | werkzeug = self.master_handler.database 506 | # print('\t*******************') 507 | # print('\tPENDING UPDATES ...', len(werkzeug.UPDATE_REQUESTS)) 508 | # print('\tPENDING WRITES ...', len(werkzeug.WRITING_REQUESTS)) 509 | # print('\tPENDING READS ...', len(werkzeug.READING_REQUESTS)) 510 | # print('\t*******************') 511 | 512 | content = open('log_db_activity', 'a') 513 | content.write('%.3f\t%d\t%d\t%d\n' % (iteration, len(werkzeug.UPDATE_REQUESTS), len(werkzeug.WRITING_REQUESTS), len(werkzeug.READING_REQUESTS))) 514 | content.close() 515 | 516 | 517 | 518 | -------------------------------------------------------------------------------- /DatabaseHandler/db_werkzeug.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | #======================================================================== 4 | 5 | import copy 6 | 7 | from Utilities.decorators import thread 8 | 9 | #======================================================================== 10 | 11 | class DB_Werkzeug(object): 12 | 13 | def __init__(self): 14 | pass 15 | 16 | 17 | def create_database(self, db_settings, db_attributes): 18 | 19 | self.db_settings = db_settings 20 | self.db_attributes = db_attributes 21 | if db_settings.db_type == 'sqlite': 22 | from DatabaseHandler import SQLiteDatabase 23 | try: 24 | self.database = SQLiteDatabase(db_settings.db_path, db_attributes, db_settings.db_name) 25 | except OSError: 26 | print('path to database %s does not exist:\n\t%s' % (db_settings.db_name, db_settings.db_path)) 27 | else: 28 | print('database type %s for database %s unknown' % (db_settings.db_type, db_settings.db_name)) 29 | 30 | 31 | def _get(self, condition): 32 | entries = self.db_fetch_all(condition) 33 | return entries 34 | 35 | 36 | def db_add(self, info_dict): 37 | try: 38 | self.database.add(info_dict) 39 | except AttributeError: 40 | info_dict_str = '' 41 | for key, item in info_dict.items(): 42 | info_dict_str = '%s:\t%s\n' % (str(key), str(item)) 43 | 44 | 45 | def db_fetch_all(self, condition_dict): 46 | try: 47 | return self.database.fetch_all(condition_dict) 48 | except OSError: 49 | condition_dict_str = '' 50 | for key, item in condition_dict.items(): 51 | condition_dict_str = '%s:\t%s\n' % (str(key), str(item)) 52 | 53 | 54 | @thread 55 | def db_fetch_async(self, condition_dict): 56 | self.CACHE = None 57 | self.CACHE = self.database.fetch_all(condition_dict) 58 | 59 | 60 | def collect_from_cache(self): 61 | while self.CACHE is None: 62 | pass 63 | cache = copy.deepcopy(self.CACHE) 64 | self.CACHE = None 65 | return cache 66 | 67 | 68 | def db_update_all(self, condition_dict, update_dict): 69 | try: 70 | self.database.update_all(condition_dict, update_dict) 71 | except OSError: 72 | condition_dict_str = '' 73 | for key, item in condition_dict.items(): 74 | condition_dict_str = '%s:\t%s\n' % (str(key), str(item)) 75 | 76 | -------------------------------------------------------------------------------- /DatabaseHandler/master_handler.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | #==================================================== 4 | 5 | import time 6 | 7 | from DatabaseHandler import DB_Werkzeug 8 | 9 | #==================================================== 10 | 11 | class MasterHandler(DB_Werkzeug): 12 | 13 | DB_ATTRIBUTES = {'task_set_id': 'string', 14 | 'task_id': 'string', 15 | 'circuit_id': 'string', 16 | 'merit_id': 'string', 17 | 'primer_index': 'integer', 18 | 'execution_index': 'integer', 19 | 'interest_score': 'string'} 20 | 21 | LINKED_LOSSES_AND_CIRCUITS = {} 22 | 23 | def __init__(self, db_settings): 24 | 25 | DB_Werkzeug.__init__(self) 26 | self.MASTER_DICT = {key: [] for key in self.DB_ATTRIBUTES} 27 | self.num_dict_entries = 0 28 | self.create_database(db_settings, self.DB_ATTRIBUTES) 29 | self.dict_busy = False 30 | 31 | 32 | def dict_add(self, info_dicts): 33 | self.dict_busy = True 34 | if not isinstance(info_dicts, list): 35 | info_dicts = [info_dicts] 36 | for info_dict in info_dicts: 37 | for key in self.DB_ATTRIBUTES: 38 | if key in info_dict: 39 | self.MASTER_DICT[key].append(info_dict[key]) 40 | else: 41 | self.MASTER_DICT[key].append(None) 42 | self.num_dict_entries += 1 43 | self.dict_busy = False 44 | 45 | 46 | def dict_update_all(self, conditions, updates): 47 | self.dict_busy = True 48 | 49 | if not isinstance(conditions, list): 50 | conditions = [conditions] 51 | updates = [updates] 52 | 53 | for index in range(self.num_dict_entries): 54 | for condition_index, condition in enumerate(conditions): 55 | update = updates[condition_index] 56 | for cond_key, cond_value in condition.items(): 57 | if self.MASTER_DICT[cond_key][index] != cond_value: 58 | break 59 | else: 60 | for up_key, up_value in update.items(): 61 | self.MASTER_DICT[up_key][index] = up_value 62 | self.dict_busy = False 63 | 64 | 65 | def get(self, condition): 66 | # attempt a recovery from the dictionary 67 | self.dict_busy = True 68 | return_dicts = [] 69 | for entry_index in range(self.num_dict_entries): 70 | 71 | if len(condition) == 0: 72 | return_dict = {key: self.MASTER_DICT[key][entry_index] for key in self.DB_ATTRIBUTES} 73 | return_dicts.append(return_dict) 74 | continue 75 | 76 | for cond_key, cond_value in condition.items(): 77 | if not self.MASTER_DICT[cond_key][entry_index] in cond_value: 78 | break 79 | else: 80 | return_dict = {key: self.MASTER_DICT[key][entry_index] for key in self.DB_ATTRIBUTES} 81 | return_dicts.append(return_dict) 82 | 83 | self.dict_busy = False 84 | return return_dicts 85 | 86 | #====================================================== 87 | 88 | 89 | def add_tasks(self, info_dicts): 90 | for info_dict in info_dicts: 91 | info_dict['interest_score'] = 'n/a' 92 | self.dict_add(info_dicts) 93 | self.db_add(info_dicts) 94 | 95 | 96 | def add_invalid_circuit(self, circuit_id, loss_id): 97 | info_dict = {'circuit_id': circuit_id, 'loss_id': loss_id, 'interest_score': 'invalid'} 98 | self.dict_add(info_dict) 99 | self.db_add(info_dict) 100 | self.LINKED_LOSSES_AND_CIRCUITS[circuit['circuit_id']] = [None, loss_id] 101 | 102 | 103 | def add_invalid_circuits(self, circuit_ids, loss_ids): 104 | info_dicts = [] 105 | for circuit_index, circuit_id in enumerate(circuit_ids): 106 | try: 107 | loss_id = loss_ids[circuit_index] 108 | except TypeError: 109 | continue 110 | info_dict = {'circuit_id': circuit_id, 'loss_id': loss_id, 'interest_score': 'invalid'} 111 | info_dicts.append(info_dict) 112 | self.dict_add(info_dicts) 113 | self.db_add(info_dicts) 114 | 115 | 116 | def link_submission(self, task, circuit): 117 | condition = {key: task[key] for key in ['task_id', 'primer_index', 'execution_index']} 118 | while len(self.db_fetch_all(condition)) == 0: 119 | time.sleep(0.01) 120 | _ = self.db_fetch_all(condition)[0] 121 | update = {'circuit_id': circuit['circuit_id']} 122 | self.dict_update_all(condition, update) 123 | self.db_update_all(condition, update) 124 | 125 | 126 | def link_submissions(self, tasks, circuits): 127 | 128 | conditions, updates = [], [] 129 | for task_index, task in enumerate(tasks): 130 | circuit = circuits[task_index] 131 | condition = {key: task[key] for key in ['task_id', 'primer_index', 'execution_index']} 132 | update = {'circuit_id': circuit['circuit_id']} 133 | conditions.append(condition) 134 | updates.append(update) 135 | 136 | self.LINKED_LOSSES_AND_CIRCUITS[circuit['circuit_id']] = [task['task_set_id'], None] 137 | 138 | self.dict_update_all(conditions, updates) 139 | self.db_update_all(conditions, updates) 140 | 141 | 142 | def link_losses_to_circuits(self, circuits, merits): 143 | conditions, updates = [], [] 144 | for circuit_index, circuit in enumerate(circuits): 145 | merit = merits[circuit_index] 146 | condition = {'circuit_id': circuit['circuit_id']} 147 | update = {'merit_id': merit['merit_id']} 148 | conditions.append(condition) 149 | updates.append(update) 150 | 151 | self.LINKED_LOSSES_AND_CIRCUITS[circuit['circuit_id']][1] = merit['merit_id'] 152 | 153 | self.dict_update_all(conditions, updates) 154 | self.db_update_all(conditions, updates) 155 | 156 | 157 | def get_linked_losses_and_circuits(self, task_set_id): 158 | identifier_sets = [] 159 | for circuit_id, values in self.LINKED_LOSSES_AND_CIRCUITS.items(): 160 | if values[0] == task_set_id: 161 | identifier_sets.append({'circuit_id': circuit_id, 'merit_id': values[1]}) 162 | return identifier_sets 163 | 164 | 165 | def label_relevant(self, info_dict): 166 | condition = {'merit_id': info_dict['merit_id']} 167 | update = {'interest_score': 'relevant'} 168 | self.dict_update_all(condition, update) 169 | self.db_update_all(condition, update) 170 | 171 | 172 | def label_irrelevant(self, info_dict): 173 | condition = {'merit_id': info_dict['merit_id']} 174 | update = {'interest_score': 'irrelevant'} 175 | self.dict_update_all(condition, update) 176 | self.db_update_all(condition, update) 177 | -------------------------------------------------------------------------------- /DatabaseHandler/merit_handler.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | #==================================================== 4 | 5 | import uuid 6 | import numpy as np 7 | 8 | from DatabaseHandler import DB_Werkzeug 9 | 10 | #==================================================== 11 | 12 | class MeritHandler(DB_Werkzeug): 13 | 14 | DB_ATTRIBUTES = {'merit_id': 'string', 15 | 'merit_value': 'pickle', 16 | 'measurements': 'pickle',} 17 | 18 | ALL_MERITS = {} 19 | 20 | def __init__(self, db_settings): 21 | DB_Werkzeug.__init__(self) 22 | self.create_database(db_settings, self.DB_ATTRIBUTES) 23 | 24 | 25 | def add_losses_for_invalid_circuits(self, n_iter = 1): 26 | 27 | print('***************************************************') 28 | print('***************************************************') 29 | print('***************************************************') 30 | print('ADDING', n_iter, '\n'*5) 31 | 32 | info_dicts = [] 33 | for x_iter in range(n_iter): 34 | merit_id = str(uuid.uuid4()) 35 | info_dict = {'merit_id': merit_id, 'merit_value': np.nan} 36 | info_dicts.append(info_dict) 37 | self.ALL_MERITS[merit_id] = {'merit_id': merit_id, 'merit_value': np.nan, 'measurements': np.nan} 38 | self.db_add(info_dicts) 39 | 40 | 41 | def add_losses_for_circuits(self, circuits): 42 | 43 | print('#####################################') 44 | print('#####################################') 45 | print('#####################################') 46 | print('ADDING', len(circuits)) 47 | 48 | info_dicts = [] 49 | for circuit in circuits: 50 | merit_id = str(uuid.uuid4()) 51 | info_dict = {'merit_id': merit_id, 'merit_value': {'loss': circuit['loss']}, 'measurements': circuit['measurements']} 52 | info_dicts.append(info_dict) 53 | self.ALL_MERITS[merit_id] = {'merit_id': merit_id, 'merit_value': {'loss': circuit['loss']}, 'measurements': circuit['measurements']} 54 | self.db_add(info_dicts) 55 | return info_dicts 56 | 57 | 58 | def select_merits(self, merit_ids): 59 | try: 60 | merits = [self.ALL_MERITS[merit_id] for merit_id in merit_ids] 61 | except KeyError: 62 | return self.db_fetch_all({'merit_id': merit_ids}) 63 | return merits 64 | -------------------------------------------------------------------------------- /DatabaseHandler/sqlite_interface.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | __author__ = 'Florian Hase' 4 | 5 | #======================================================================== 6 | 7 | import os 8 | import uuid 9 | import time 10 | import copy 11 | import sqlalchemy as sql 12 | 13 | from DatabaseHandler import AddEntry, FetchEntries, UpdateEntries 14 | from Utilities.decorators import thread 15 | 16 | #======================================================================== 17 | 18 | class SQLiteDatabase(object): 19 | 20 | SQLITE_COLUMNS = {'bool': sql.Boolean(), 21 | 'float': sql.Float(), 22 | 'integer': sql.Integer(), 23 | 'pickle': sql.PickleType(), 24 | 'string': sql.String(512),} 25 | 26 | def __init__(self, path, attributes, name = 'table', verbosity = 0): 27 | 28 | self.WRITING_REQUESTS = [] 29 | self.READING_REQUESTS = {} 30 | self.UPDATE_REQUESTS = [] 31 | 32 | self.db_path = 'sqlite:///%s' % path 33 | self.attributes = attributes 34 | self.name = name 35 | 36 | # check if path exists 37 | dir_name = '/'.join(path.split('/')[:-1]) 38 | if not os.path.isdir(dir_name): 39 | os.makedirs(dir_name) 40 | 41 | # create database 42 | self.db = sql.create_engine(self.db_path) 43 | self.db.echo = False 44 | self.metadata = sql.MetaData(self.db) 45 | 46 | # create table in database 47 | self.table = sql.Table(self.name, self.metadata) 48 | for name, att_type in self.attributes.items(): 49 | self.table.append_column(sql.Column(name, self.SQLITE_COLUMNS[att_type])) 50 | self.table.create(checkfirst = True) 51 | 52 | # start request processor 53 | self._process_requests() 54 | 55 | #==================================================================== 56 | 57 | def _return_dict(function): 58 | def wrapper(self, *args, **kwargs): 59 | entries = function(self, *args, **kwargs) 60 | info_dicts = [{key: entry[key] for key in self.attributes} for entry in entries] 61 | return info_dicts 62 | return wrapper 63 | 64 | #==================================================================== 65 | 66 | @thread 67 | def _process_requests(self): 68 | self._processing_requests = True 69 | keep_processing = True 70 | iteration_index = 0 71 | while keep_processing: 72 | num_reading_requests = len(self.READING_REQUESTS) 73 | num_writing_requests = len(self.WRITING_REQUESTS) 74 | num_update_requests = len(self.UPDATE_REQUESTS) 75 | 76 | iteration_index += 1 77 | 78 | # run all reading request 79 | request_keys = copy.deepcopy(list(self.READING_REQUESTS.keys())) 80 | for request_key in request_keys: 81 | if not self.READING_REQUESTS[request_key].executed: 82 | self.READING_REQUESTS[request_key].execute() 83 | 84 | # run all update requests 85 | with self.db.connect() as conn: 86 | 87 | # run all update requests 88 | for update_index in range(num_update_requests): 89 | update_request = self.UPDATE_REQUESTS.pop(0) 90 | if isinstance(update_request.updates, list): 91 | for update in update_request.updates: 92 | has_updated = False 93 | while not has_updated: 94 | try: 95 | updated = conn.execute(update) 96 | has_updated = True 97 | except sql.exc.OperationalError: 98 | time.sleep(0.1) 99 | updated = conn.execute(update) 100 | else: 101 | has_updated = False 102 | while not has_updated: 103 | try: 104 | updated = conn.execute(update_request.updates) 105 | has_updated = True 106 | except sql.exc.OperationalError: 107 | time.sleep(0.1) 108 | updated = conn.execute(update_request.updates) 109 | 110 | # run all writing requests 111 | master_entry = [] 112 | for writing_index in range(num_writing_requests): 113 | writing_request = self.WRITING_REQUESTS.pop(0) 114 | if isinstance(writing_request.entry, list): 115 | master_entry.extend(writing_request.entry) 116 | else: 117 | master_entry.append(writing_request.entry) 118 | has_updated = False 119 | while not has_updated: 120 | try: 121 | conn.execute(self.table.insert(), master_entry) 122 | has_updated = True 123 | except sql.exc.OperationalError: 124 | time.sleep(0.1) 125 | 126 | conn.close() 127 | 128 | # clean reading requests 129 | request_keys = copy.deepcopy(list(self.READING_REQUESTS.keys())) 130 | delete_keys = [] 131 | for request_key in request_keys: 132 | if self.READING_REQUESTS[request_key].entries_fetched: 133 | delete_keys.append(request_key) 134 | for request_key in delete_keys: 135 | del self.READING_REQUESTS[request_key] 136 | 137 | keep_processing = len(self.WRITING_REQUESTS) > 0 or len(self.UPDATE_REQUESTS) > 0 or len(self.READING_REQUESTS) > 0 138 | self._processing_requests = False 139 | 140 | #==================================================================== 141 | 142 | 143 | def add(self, info_dict): 144 | if len(info_dict) == 0: return None 145 | 146 | add_entry = AddEntry(self.db, self.table, info_dict) 147 | self.WRITING_REQUESTS.append(add_entry) 148 | if not self._processing_requests: 149 | self._process_requests() 150 | 151 | 152 | @_return_dict 153 | def fetch_all(self, condition_dict): 154 | condition_keys = list(condition_dict.keys()) 155 | condition_values = list(condition_dict.values()) 156 | 157 | # define the selection 158 | selection = sql.select([self.table]) 159 | for index, key in enumerate(condition_keys): 160 | if isinstance(condition_values[index], list): 161 | # with a list, we need to combine all possibilities with _or 162 | if len(condition_values[index]) == 0: 163 | return [] 164 | filters = [getattr(self.table.c, key) == value for value in condition_values[index]] 165 | condition = sql.or_(*filters) 166 | else: 167 | condition = getattr(self.table.c, key) == condition_values[index] 168 | selection = selection.where(condition) 169 | 170 | fetch_entries = FetchEntries(self.db, self.table, selection, name = self.name) 171 | fetch_keys = str(uuid.uuid4()) 172 | self.READING_REQUESTS[fetch_keys] = fetch_entries 173 | if not self._processing_requests: 174 | self._process_requests() 175 | 176 | entries = fetch_entries.get_entries() 177 | return entries 178 | 179 | 180 | def update_all(self, condition_dict, update_dict): 181 | 182 | if isinstance(condition_dict, list): 183 | 184 | updates = [] 185 | for cond_dict_index, cond_dict in enumerate(condition_dict): 186 | 187 | up_dict = update_dict[cond_dict_index] 188 | condition_keys = list(cond_dict.keys()) 189 | condition_values = list(cond_dict.values()) 190 | 191 | update = sql.update(self.table).values(up_dict) 192 | for index, key in enumerate(condition_keys): 193 | update = update.where(getattr(self.table.c, key) == condition_values[index]) 194 | updates.append(update) 195 | 196 | else: 197 | 198 | condition_keys = list(condition_dict.keys()) 199 | condition_values = list(condition_dict.values()) 200 | 201 | update = sql.update(self.table).values(update_dict) 202 | for index, key in enumerate(condition_keys): 203 | update = update.where(getattr(self.table.c, key) == condition_values[index]) 204 | updates = update 205 | 206 | # submitting the update 207 | update_entries = UpdateEntries(self.db, self.table, updates) 208 | self.UPDATE_REQUESTS.append(update_entries) 209 | if not self._processing_requests: 210 | self._process_requests() 211 | 212 | 213 | 214 | 215 | 216 | -------------------------------------------------------------------------------- /DatabaseHandler/sqlite_operations.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | __author__ = 'Florian Hase' 4 | 5 | #======================================================================== 6 | 7 | import time 8 | import sqlalchemy as sql 9 | 10 | #======================================================================== 11 | 12 | class AddEntry(object): 13 | 14 | def __init__(self, database, table, entry): 15 | self.db = database 16 | self.table = table 17 | self.entry = entry 18 | 19 | def execute(self): 20 | start = time.time() 21 | with self.db.connect() as conn: 22 | conn.execute(self.table.insert(), self.entry) 23 | conn.close() 24 | end = time.time() 25 | 26 | #======================================================================== 27 | 28 | class FetchEntries(object): 29 | 30 | def __init__(self, database, table, selection, name = 'test'): 31 | self.db = database 32 | self.table = table 33 | self.selection = selection 34 | self.entries = None 35 | self.executed = False 36 | self.entries_fetched = False 37 | self.name = name 38 | 39 | def execute(self): 40 | start = time.time() 41 | with self.db.connect() as conn: 42 | selected = conn.execute(self.selection) 43 | entries = selected.fetchall() 44 | conn.close() 45 | self.entries = entries 46 | self.executed = True 47 | end = time.time() 48 | 49 | def get_entries(self): 50 | iteration_index = 0 51 | while not self.executed: 52 | pass 53 | self.entries_fetched = True 54 | return self.entries 55 | 56 | #======================================================================== 57 | 58 | class UpdateEntries(object): 59 | 60 | def __init__(self, database, table, updates): 61 | self.db = database 62 | self.table = table 63 | self.updates = updates 64 | 65 | def execute(self): 66 | start = time.time() 67 | if isinstance(self.updates, list): 68 | with self.db.connect() as conn: 69 | for updates in self.updates: 70 | updated = conn.execute(updates) 71 | conn.close() 72 | else: 73 | 74 | with self.db.connect() as conn: 75 | updated = conn.execute(self.updates) 76 | conn.close() 77 | end = time.time() 78 | -------------------------------------------------------------------------------- /DatabaseHandler/task_handler.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | #==================================================== 4 | 5 | from DatabaseHandler import DB_Werkzeug 6 | 7 | #==================================================== 8 | 9 | class TaskHandler(DB_Werkzeug): 10 | 11 | DB_ATTRIBUTES = {'designer': 'string', 12 | 'designer_options': 'pickle', 13 | 'from_optimizer': 'bool', 14 | 'observations': 'pickle', 15 | 'use_library': 'bool', 16 | 'computing_resource': 'string', 17 | 'computing_options': 'pickle', 18 | 'task_id': 'string', 19 | 'task_set_id': 'string', 20 | 'task_set_name': 'string', 21 | 'task_status': 'string', 22 | 'task_type': 'string', 23 | 'primer_index': 'integer', 24 | 'execution_index': 'integer',} 25 | 26 | TASK_SETS_COMPLETED = {} 27 | TASK_SETS_COMPLETED_CHANGED = {} 28 | TASK_SETS_REMAINING = {} 29 | TASK_SETS_REMAINING_CHANGED = {} 30 | NOT_YET_COMPLETED = {} 31 | 32 | def __init__(self, db_settings): 33 | 34 | DB_Werkzeug.__init__(self) 35 | self.create_database(db_settings, self.DB_ATTRIBUTES) 36 | self.REMAINING_TASKS = {} 37 | self.COUNTER = {'statuses': 0, 'new': 0, 'submitted': 0, 'computed': 0, 'completed': 0} 38 | 39 | #==================================================================== 40 | 41 | def refresh(self): 42 | self.COUNTER = {'statuses': 0, 'new': 0, 'submitted': 0, 'computed': 0, 'completed': 0} 43 | 44 | 45 | def add_tasks(self, info_dicts): 46 | for info_dict in info_dicts: 47 | info_dict['task_status'] = 'new' 48 | if not 'from_optimizer' in info_dict: 49 | info_dict['from_optimizer'] = False 50 | try: 51 | self.REMAINING_TASKS[info_dict['task_set_id']].append(info_dict) 52 | self.NOT_YET_COMPLETED[info_dict['task_set_id']].append(info_dict) 53 | except KeyError: 54 | self.REMAINING_TASKS[info_dict['task_set_id']] = [info_dict] 55 | self.NOT_YET_COMPLETED[info_dict['task_set_id']] = [info_dict] 56 | self.TASK_SETS_REMAINING_CHANGED[info_dict['task_set_id']] = True 57 | self.COUNTER['statuses'] += 1 58 | self.COUNTER['new'] += 1 59 | self.db_add(info_dicts) 60 | 61 | 62 | def fetch_remaining_tasks(self, task_set_id): 63 | try: 64 | return self.REMAINING_TASKS[task_set_id] 65 | except KeyError: 66 | condition = {'task_set_id': task_set_id, 'task_status': 'new'} 67 | entries = self.db_fetch_all(condition) 68 | return entries 69 | 70 | 71 | def task_set_completed(self, task_set_id): 72 | return len(self.NOT_YET_COMPLETED[task_set_id]) == 0 73 | 74 | 75 | #==================================================================== 76 | 77 | def report_circuit_submission(self): 78 | self.COUNTER['submitted'] += 1 79 | 80 | 81 | def report_circuit_computation(self): 82 | self.COUNTER['submitted'] -= 1 83 | 84 | 85 | def set_tasks_to_submitted(self, tasks): 86 | conditions = [] 87 | updates = [] 88 | for task in tasks: 89 | condition = {key: task[key] for key in ['task_id', 'primer_index', 'execution_index']} 90 | update = {'task_status': 'submitted'} 91 | conditions.append(condition) 92 | updates.append(update) 93 | self.TASK_SETS_REMAINING_CHANGED[task['task_set_id']] = True 94 | self.COUNTER['new'] -= 1 95 | self.COUNTER['submitted'] += 1 96 | 97 | remaining_tasks = self.REMAINING_TASKS[task['task_set_id']] 98 | for index, info_dict in enumerate(remaining_tasks): 99 | identical = True 100 | for key in ['task_id', 'primer_index', 'execution_index']: 101 | identical = identical and info_dict[key] == task[key] 102 | if identical: 103 | del self.REMAINING_TASKS[task['task_set_id']][index] 104 | break 105 | 106 | self.db_update_all(conditions, updates) 107 | 108 | 109 | def set_tasks_to_redundant(self, tasks): 110 | conditions = [] 111 | updates = [] 112 | for task in tasks: 113 | condition = {key: task[key] for key in ['task_id', 'primer_index', 'execution_index']} 114 | update = {'task_status': 'redundant'} 115 | conditions.append(condition) 116 | updates.append(update) 117 | self.TASK_SETS_REMAINING_CHANGED[task['task_set_id']] = True 118 | self.TASK_SETS_COMPLETED_CHANGED[task['task_set_id']] = True 119 | 120 | remaining_tasks = self.REMAINING_TASKS[task['task_set_id']] 121 | for index, info_dict in enumerate(remaining_tasks): 122 | identical = True 123 | for key in ['task_id', 'primer_index', 'execution_index']: 124 | identical = identical and info_dict[key] == task[key] 125 | if identical: 126 | del self.REMAINING_TASKS[task['task_set_id']][index] 127 | break 128 | 129 | not_yet_completed_tasks = self.NOT_YET_COMPLETED[task['task_set_id']] 130 | for index, info_dict in enumerate(not_yet_completed_tasks): 131 | identical = True 132 | for key in ['task_id', 'primer_index', 'execution_index']: 133 | identical = identical and info_dict[key] == task[key] 134 | if identical: 135 | del self.NOT_YET_COMPLETED[task['task_set_id']][index] 136 | break 137 | 138 | self.db_update_all(conditions, updates) 139 | 140 | 141 | def set_tasks_to_computed(self, tasks): 142 | conditions = [] 143 | updates = [] 144 | for task in tasks: 145 | condition = {key: task[key] for key in ['task_id', 'primer_index', 'execution_index']} 146 | update = {'task_status': 'computed'} 147 | conditions.append(condition) 148 | updates.append(update) 149 | self.TASK_SETS_REMAINING_CHANGED[task['task_set_id']] = True 150 | self.COUNTER['submitted'] -= 1 151 | self.COUNTER['computed'] += 1 152 | self.db_update_all(conditions, updates) 153 | 154 | 155 | def set_tasks_to_completed(self, tasks): 156 | conditions = [] 157 | updates = [] 158 | for task in tasks: 159 | condition = {key: task[key] for key in ['task_id', 'primer_index', 'execution_index']} 160 | update = {'task_status': 'completed'} 161 | conditions.append(condition) 162 | updates.append(update) 163 | self.TASK_SETS_REMAINING_CHANGED[task['task_set_id']] = True 164 | self.TASK_SETS_COMPLETED_CHANGED[task['task_set_id']] = True 165 | self.COUNTER['computed'] -= 1 166 | self.COUNTER['completed'] += 1 167 | 168 | not_yet_completed_tasks = self.NOT_YET_COMPLETED[task['task_set_id']] 169 | for index, info_dict in enumerate(not_yet_completed_tasks): 170 | identical = True 171 | for key in ['task_id', 'primer_index', 'execution_index']: 172 | identical = identical and info_dict[key] == task[key] 173 | if identical: 174 | del self.NOT_YET_COMPLETED[task['task_set_id']][index] 175 | break 176 | 177 | self.db_update_all(conditions, updates) 178 | 179 | #==================================================================== 180 | 181 | def check_resource_availability(self, info_dict): 182 | # get number of running jobs 183 | condition = {'task_set_id': info_dict['task_set_id'], 'task_status': 'submitted'} 184 | entries = self.db_fetch_all(condition) 185 | return len(entries) < info_dict['designer_options']['max_concurrent'] 186 | 187 | def get_num_available_resources(self, task_set): 188 | info_dict = task_set.generated_tasks[0] 189 | return info_dict['designer_options']['max_concurrent'] - self.COUNTER['submitted'] 190 | -------------------------------------------------------------------------------- /Designers/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from Designers.abstract_designer import AbstractDesigner 3 | 4 | try: 5 | from Designers.particle_swarm_designer import ParticleSwarmDesigner 6 | except ModuleNotFoundError: 7 | ParticleSwarmDesigner = None 8 | 9 | from Designers.random_designer import RandomDesigner 10 | from Designers.scipy_minimize_designer import ScipyMinimizeDesigner 11 | 12 | from Designers.circuit_designer import CircuitDesigner 13 | 14 | -------------------------------------------------------------------------------- /Designers/abstract_designer.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | #==================================================== 4 | 5 | import uuid 6 | 7 | import numpy as np 8 | 9 | #==================================================== 10 | 11 | # Factor to account for intrinsic junction capacitance 12 | CJFACTOR = 0 #junction capacitance is added automatically in simulation code 13 | 14 | class AbstractDesigner(object): 15 | 16 | def __init__(self, general_settings, param_settings, options): 17 | self.busy = False 18 | self.general = general_settings 19 | self.param_settings = param_settings 20 | self.c_specs = self.param_settings['c_specs'] 21 | self.j_specs = self.param_settings['j_specs'] 22 | self.l_specs = self.param_settings['l_specs'] 23 | self.phiOffs_specs = self.param_settings['phiOffs_specs'] 24 | self.has_inductances = self.l_specs is not None 25 | 26 | # copy method specific options 27 | for key, value in options.items(): 28 | setattr(self, key, value) 29 | self.options = options 30 | 31 | self.NEW_TASKS = [] 32 | self.OPTIMIZERS_FINISHED = False 33 | 34 | self.construct_bounds() 35 | 36 | 37 | def construct_bounds(self): 38 | # construct bounds for combined array (c, j, l) 39 | # CONVENTION: always report numbers in alphabetical order, i.e. c --> j --> l 40 | # No bounds added for inductors if 'l_specs' is set to None in param_settings 41 | 42 | self.bounds = [] 43 | for specs in [self.c_specs, self.j_specs, self.l_specs]: 44 | if specs != None: 45 | dim = specs['dimension'] 46 | # Case of 4-node circuit: add one element to account for forbidden connection 47 | if dim==9: 48 | dim += 1 49 | bounds = [(specs['low'], specs['high']) for _ in range(dim)] 50 | self.bounds += bounds 51 | self.bounds = np.array(self.bounds) 52 | 53 | 54 | def _construct_array_from_dict(self, obs_dict): 55 | 56 | # Get parameter arrays for junctions, capacitances, inductances 57 | try: 58 | j_arr = obs_dict['junctions']['samples'] 59 | if self.has_inductances: 60 | l_arr = obs_dict['inductances']['samples'] 61 | c_arr = obs_dict['capacities']['samples'] - j_arr * CJFACTOR 62 | except IndexError: 63 | j_arr = obs_dict['junctions'] 64 | if self.has_inductances: 65 | l_arr = obs_dict['inductances'] 66 | c_arr = obs_dict['capacities'] - j_arr * CJFACTOR 67 | 68 | # Make parameter array 69 | if self.has_inductances: 70 | param = np.concatenate([c_arr, j_arr, l_arr]) 71 | else: 72 | param = np.concatenate([c_arr, j_arr]) 73 | mask = np.ones(len(param)) 74 | mask[np.where(np.abs(param) < 1e-4)[0]] *= 0. 75 | return param, mask 76 | 77 | 78 | def _construct_dict_from_array(self, param, info_dict): 79 | param_vect = np.zeros(len(info_dict['x_init'])) 80 | param_vect[np.where(info_dict['x_mask'] > 0.)[0]] = param 81 | if self.has_inductances: 82 | k = len(param_vect) // 3 83 | c_arr = param_vect[:k] 84 | j_arr = param_vect[k : 2*k] 85 | l_arr = param_vect[2 * k:] 86 | else: 87 | k = len(param_vect) // 2 88 | c_arr = param_vect[:k] 89 | j_arr = param_vect[k:] 90 | l_arr = None 91 | c_arr += j_arr * CJFACTOR 92 | circuit = {'junctions': j_arr, 'capacities': c_arr, 'inductances': l_arr, 93 | 'sim_id': info_dict['sim_id'], 'task_id_index': info_dict['task_id_index'], 'circuit_id': str(uuid.uuid4())} 94 | if 'phiOffs' in info_dict: 95 | circuit['phiOffs'] = info_dict['phiOffs'] 96 | return circuit 97 | 98 | 99 | def _design_random_circuit(self): 100 | 101 | mask = np.ones(self.j_specs['dimension']) 102 | indices = np.arange(len(mask)) 103 | np.random.shuffle(indices) 104 | mask[indices[:self.j_specs['dimension']-self.j_specs['keep_num']]] *= 0. 105 | 106 | # Draw junctions 107 | junctions = np.random.uniform(self.j_specs['low'], self.j_specs['high'], self.j_specs['dimension']) 108 | junctions *= mask 109 | # Case of 4-node circuit: add zero at forbidden connection 2-4 110 | if self.j_specs['dimension']==9: 111 | junctions = np.insert(junctions, 6, 0) 112 | 113 | # Draw capacitances 114 | capacities = np.random.uniform(self.c_specs['low'], self.c_specs['high'], self.c_specs['dimension']) 115 | capacities = capacities * (np.random.uniform(0., 1., self.c_specs['dimension']) < self.c_specs['keep_prob']) 116 | # Case of 4-node circuit: add zero at forbidden connection 2-4 117 | if self.c_specs['dimension']==9: 118 | capacities = np.insert(capacities, 6, 0) 119 | capacities += junctions * CJFACTOR 120 | 121 | # Draw inductances 122 | if self.l_specs != None: 123 | inductances = np.random.uniform(self.l_specs['low'], self.l_specs['high'], self.l_specs['dimension']) 124 | inductances = inductances * (np.random.uniform(0., 1., self.l_specs['dimension']) < self.l_specs['keep_prob']) 125 | # Case of 4-node circuit: add zero at forbidden connection 2-4 126 | if self.l_specs['dimension']==9: 127 | inductances = np.insert(inductances, 6, 0) 128 | inductances[junctions > 0] = 0. 129 | else: 130 | inductances = None 131 | 132 | # draw flux offsets for loops 133 | if self.phiOffs_specs is not None: 134 | phiOffs = np.random.choice(self.phiOffs_specs['values'], self.phiOffs_specs['dimension']) 135 | else: 136 | phiOffs = None 137 | 138 | circuit = {'junctions': junctions, 'capacities': capacities, 'inductances': inductances, 'phiOffs': phiOffs} 139 | return circuit 140 | 141 | 142 | def is_busy(self): 143 | return self.busy 144 | 145 | def set_busy(self): 146 | self.busy = True 147 | 148 | def set_available(self): 149 | self.busy = False 150 | 151 | def _draw_circuit(self): 152 | pass 153 | 154 | 155 | def _submit(self, *args, **kwargs): 156 | job_id = kwargs['job_id'] 157 | condition_file = '%s/conditions_%s.pkl' % (self.general.scratch_dir, job_id) 158 | self._draw_circuit(condition_file) 159 | return condition_file -------------------------------------------------------------------------------- /Designers/circuit_designer.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os 4 | import copy 5 | import time 6 | import uuid 7 | import pickle 8 | 9 | from Utilities import FileLogger 10 | from Utilities.decorators import thread 11 | 12 | #======================================================================== 13 | 14 | class CircuitDesigner(object): 15 | 16 | # declare containers 17 | 18 | ACTIVE_DESIGNERS = {} 19 | FILE_LOGGERS = {} 20 | OBSERVATION_CONTAINER = {} 21 | DESIGNED_CIRCUITS = [] 22 | 23 | def __init__(self, settings_general, settings_params): 24 | 25 | self.designers = {} 26 | self.settings_general = settings_general 27 | self.settings_params = settings_params 28 | 29 | 30 | def add_designer(self, name_id, keyword, options): 31 | if keyword == 'particle_swarms': 32 | from Designers import ParticleSwarmDesigner as SelectedDesigner 33 | elif keyword == 'scipy': 34 | from Designers import ScipyMinimizeDesigner as SelectedDesigner 35 | elif keyword == 'random': 36 | from Designers import RandomDesigner as SelectedDesigner 37 | else: 38 | raise NotImplementedError() 39 | 40 | if SelectedDesigner is None: 41 | print('# FATAL | ... could not import %s designer; please install the required package ...' % keyword) 42 | import sys 43 | sys.exit() 44 | 45 | self.designers[name_id] = SelectedDesigner(self.settings_general, self.settings_params, options, kind = keyword) 46 | 47 | 48 | def is_busy(self, task_set_dict): 49 | name_id = task_set_dict.settings['name'] 50 | return self.designers[name_id].is_busy() 51 | 52 | 53 | def get_circuits(self): 54 | circuits = copy.deepcopy(self.DESIGNED_CIRCUITS) 55 | for circuit in circuits: 56 | self.DESIGNED_CIRCUITS.pop(0) 57 | return circuits 58 | 59 | 60 | def _parse_new_circuits(self, conditions_file): 61 | 62 | # for windows machines 63 | conditions_file = conditions_file.replace('\\', '/') 64 | 65 | # parse the job_id and stop file logger 66 | job_id = conditions_file.split('_')[-1].split('.')[0] 67 | self.FILE_LOGGERS[job_id].stop() 68 | 69 | # save conditions 70 | try: 71 | conditions = pickle.load(open(conditions_file, 'rb')) 72 | except EOFError: 73 | time.sleep(1) 74 | conditions = pickle.load(open(conditions_file, 'rb')) 75 | if len(conditions[0]) > 0: 76 | for condition in conditions: 77 | condition_dict = {'circuit_values': condition} 78 | self.DESIGNED_CIRCUITS.append(condition_dict) 79 | 80 | # clean up 81 | os.remove(conditions_file) 82 | self.designers[self.ACTIVE_DESIGNERS[job_id]].set_available() 83 | del self.ACTIVE_DESIGNERS[job_id] 84 | del self.FILE_LOGGERS[job_id] 85 | 86 | 87 | def get_requested_tasks(self, task_set): 88 | name_id = task_set.settings['name'] 89 | designer = self.designers[name_id] 90 | new_tasks = copy.deepcopy(designer.NEW_TASKS) 91 | for new_task in new_tasks: 92 | designer.NEW_TASKS.pop(0) 93 | return new_tasks 94 | 95 | 96 | @thread 97 | def provide_observations(self, task_set, observations): 98 | self.OBSERVATION_CONTAINER[task_set.settings['name']] = observations 99 | designer = self.designers[task_set.settings['name']] 100 | for observation in observations: 101 | if not 'circuit_id' in observation: continue 102 | if isinstance(observation['circuit_id'], dict): 103 | observation['circuit_id'] = observation['circuit_id']['samples'] 104 | designer.RECEIVED_OBSERVATIONS[observation['circuit_id']] = observation 105 | 106 | 107 | def designer_terminated(self, task_set): 108 | if hasattr(self.designers[task_set.settings['name']], 'SCIPY_OPTIMIZERS_FINISHED'): 109 | result = True 110 | for index, element in self.designers[task_set.settings['name']].SCIPY_OPTIMIZERS_FINISHED.items(): 111 | result = result and element 112 | return result 113 | if hasattr(self.designers[task_set.settings['name']], 'PS_OPTIMIZERS_FINISHED'): 114 | result = True 115 | for index, element in self.designers[task_set.settings['name']].PS_OPTIMIZERS_FINISHED.items(): 116 | result = result and element 117 | return result 118 | else: 119 | return self.designers[task_set.settings['name']].OPTIMIZERS_FINISHED 120 | 121 | 122 | @thread 123 | def design_new_circuits(self, task_set, observations = None, tasks = None): 124 | start = time.time() 125 | if observations: self.provide_observations(task_set, observations) 126 | 127 | # reserve designer 128 | name_id = task_set.settings['name'] 129 | designer = self.designers[name_id] 130 | designer.set_busy() 131 | 132 | # create circuit listener 133 | job_id = str(uuid.uuid4()) 134 | self.ACTIVE_DESIGNERS[job_id] = name_id 135 | file_logger = FileLogger(action = self._parse_new_circuits, path = self.settings_general.scratch_dir, pattern = '*conditions*%s*' % job_id) 136 | self.FILE_LOGGERS[job_id] = file_logger 137 | file_logger.start() 138 | 139 | # submit circuit 140 | conditions_file = designer.submit(job_id = job_id, task_set = task_set, observations = observations, tasks = tasks) 141 | 142 | end = time.time() 143 | content = open('TIME_design_submission', 'a') 144 | content.write('%.5f\t%d\n' % (end - start, len(observations))) 145 | content.close() 146 | -------------------------------------------------------------------------------- /Designers/design_utils.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aspuru-guzik-group/SCILLA/f0291082990e0f99f8f9abf41ec405427cfe3752/Designers/design_utils.py -------------------------------------------------------------------------------- /Designers/particle_swarm_designer.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | #==================================================== 4 | 5 | import copy 6 | import time 7 | import uuid 8 | import pickle 9 | import threading 10 | import numpy as np 11 | import pyswarms as ps 12 | 13 | np.set_printoptions(precision = 3) 14 | 15 | from Utilities.decorators import thread, process, delayed 16 | from Designers import AbstractDesigner 17 | 18 | #==================================================== 19 | 20 | class ParticleSwarmDesigner(AbstractDesigner): 21 | 22 | PROPOSED_CIRCUITS = [] 23 | RECEIVED_OBSERVATIONS = {} 24 | TASK_ID_EXECUTION = {} 25 | PS_OPTIMIZERS_FINISHED = {} 26 | 27 | n_particles = 2 28 | social_options = {'c1': 0.5, 'c2': 0.3, 'w': 0.9} 29 | 30 | 31 | def __init__(self, general_settings, param_settings, options, *args, **kwargs): 32 | 33 | AbstractDesigner.__init__(self, general_settings, param_settings, options) 34 | self.running_instance_ids = [] 35 | self.optimizers = {} 36 | self.sim_info_dicts = {} 37 | 38 | 39 | def _create_loss_wrapper(self, sim_id, task_dict): 40 | def _loss_wrapper(x_batch_squeezed): 41 | info_dict = self.sim_info_dicts[sim_id] 42 | current_task_id = info_dict['remaining_tasks'][0] 43 | proposed_circuit_ids = [] 44 | 45 | # look at each point and broaden the parameters 46 | for x_index, x_squeezed in enumerate(x_batch_squeezed): 47 | circuit = self._construct_dict_from_array(x_squeezed, info_dict) 48 | proposed_circuit_ids.append(circuit['circuit_id']) 49 | self.PROPOSED_CIRCUITS.append(circuit) 50 | 51 | # assemble a new task 52 | if info_dict['observation_index'] > 0 or len(info_dict['remaining_tasks']) < 2 * len(task_dict) or True: 53 | new_task = copy.deepcopy(task_dict[current_task_id]) 54 | new_task['execution_index'] = len(x_batch_squeezed) * len(info_dict['remaining_tasks']) + x_index 55 | new_task['primer_index'] = info_dict['observation_index'] 56 | new_task['from_optimizer'] = True 57 | self.NEW_TASKS.append(new_task) 58 | 59 | info_dict['remaining_tasks'].pop(0) 60 | 61 | # catch losses 62 | loss_batch = np.zeros(len(proposed_circuit_ids)) - np.inf 63 | while len(np.where(loss_batch < -10**300)[0]) > 0.: 64 | 65 | for proposed_circuit_index, proposed_circuit_id in enumerate(proposed_circuit_ids): 66 | if proposed_circuit_id in self.RECEIVED_OBSERVATIONS: 67 | loss = self.RECEIVED_OBSERVATIONS[proposed_circuit_id]['loss'] 68 | loss_batch[proposed_circuit_index] = loss 69 | del self.RECEIVED_OBSERVATIONS[proposed_circuit_id] 70 | 71 | loss_batch = np.where(np.isnan(loss_batch), 10**6, loss_batch) 72 | 73 | self.sim_info_dicts[sim_id]['task_id_index'] += 1 74 | return loss_batch 75 | return _loss_wrapper 76 | 77 | 78 | def prepare_optimizer_instance(self, task_ids, observation_index, observation = None): 79 | 80 | if observation: 81 | x_init, x_mask = self._construct_array_from_dict(observation) 82 | if 'phiOffs' in observation: 83 | phiOffs = observation['phiOffs']['samples'] 84 | else: 85 | phiOffs = None 86 | else: 87 | circuit = self._design_random_circuit() 88 | x_init, x_mask = self._construct_array_from_dict(circuit) 89 | if 'phiOffs' in circuit: 90 | phiOffs = circuit['phiOffs'] 91 | else: 92 | phiOffs = None 93 | x_init_squeezed = x_init[np.where(x_mask > 0.)[0]] 94 | 95 | # create bounds 96 | bounds_squeezed = self.bounds[np.where(x_mask > 0.)[0]] 97 | lower_squeezed = np.array([bounds_squeezed[i][0] for i in range(len(bounds_squeezed))]) 98 | upper_squeezed = np.array([bounds_squeezed[i][1] for i in range(len(bounds_squeezed))]) 99 | bounds_collection = (lower_squeezed, upper_squeezed) 100 | 101 | remaining_tasks = [] 102 | for task_id in task_ids: 103 | remaining_tasks.extend([task_id, task_id]) 104 | 105 | # assemble simulation 106 | sim_id = str(uuid.uuid4()) 107 | sim_info_dict = {'x_init': x_init, 'x_init_squeezed': x_init_squeezed, 'x_mask': x_mask, 108 | 'bounds_squeezed': bounds_squeezed, 'bounds_collection': bounds_collection, 109 | 'lower_squeezed': lower_squeezed, 'upper_squeezed': upper_squeezed, 110 | 'sim_id': sim_id, 'observation_index': observation_index, 'task_id_index': 0, 111 | 'remaining_tasks': copy.deepcopy(remaining_tasks)} 112 | if phiOffs is not None: 113 | sim_info_dict['phiOffs'] = phiOffs 114 | self.sim_info_dicts[sim_id] = sim_info_dict 115 | self.running_instance_ids.append(sim_info_dict) 116 | 117 | 118 | # NOTE: This needs to be a thread, not a process (!) 119 | @thread 120 | def run_optimizer(self, optimizer, loss_wrapper, max_iter, sim_id, **kwargs): 121 | optimizer.optimize(loss_wrapper, iters = max_iter, verbose = 1, print_step = 1) 122 | self.PS_OPTIMIZERS_FINISHED[sim_id] = True 123 | 124 | 125 | def initialize_optimizers(self, task_set, observations): 126 | print('# LOG | ... initializing particle swarms optimizer (%d) ...' % len(observations)) 127 | settings = task_set.settings['designer_options'] 128 | task_dict = {task['task_id']: task for task in task_set.generated_tasks} 129 | task_ids = [task['task_id'] for task in task_set.generated_tasks] 130 | 131 | # generate one optimizer for each observation 132 | if len(observations) == 0: 133 | self.prepare_optimizer_instance(task_ids, 0) 134 | else: 135 | for observation_index, observation in enumerate(observations): 136 | self.prepare_optimizer_instance(task_ids, observation_index, observation = observation) 137 | 138 | for sim_info_dict_index, sim_info_dict in enumerate(self.running_instance_ids): 139 | sim_id = sim_info_dict['sim_id'] 140 | print('# LOG | ... starting %d (%d) particle swarms optimizer' % (sim_info_dict_index + 1, len(self.sim_info_dicts))) 141 | 142 | init_pos = np.array([sim_info_dict['x_init_squeezed'] for i in range(self.n_particles)]) 143 | 144 | # Perturb initial positions of particles (except keep first instance at sampled parameters) 145 | init_pos_mod = np.copy(init_pos) 146 | init_pos_mod += np.random.normal(0., 0.1 * (sim_info_dict['upper_squeezed'] - sim_info_dict['lower_squeezed']), size = init_pos.shape) 147 | init_pos_mod[0] = init_pos[0] 148 | init_pos_mod = np.minimum(init_pos_mod, sim_info_dict['upper_squeezed']) 149 | init_pos_mod = np.maximum(init_pos_mod, sim_info_dict['lower_squeezed']) 150 | 151 | optimizer = ps.single.GlobalBestPSO(n_particles = self.n_particles, dimensions = len(sim_info_dict['x_init_squeezed']), 152 | options = self.social_options, bounds = sim_info_dict['bounds_collection'], init_pos = init_pos_mod) 153 | # create loss wrapper for optimizer 154 | loss_wrapper = self._create_loss_wrapper(sim_id, task_dict) 155 | 156 | self.PS_OPTIMIZERS_FINISHED[sim_id] = False 157 | if 'init' in self.PS_OPTIMIZERS_FINISHED: 158 | del self.PS_OPTIMIZERS_FINISHED['init'] 159 | self.run_optimizer(optimizer, loss_wrapper, settings['max_iters'], sim_id) 160 | 161 | content = open('LOG', 'a') 162 | content.write('starting optimizer for %s (%d)\n' % (sim_id, len(self.running_instance_ids))) 163 | content.close() 164 | 165 | 166 | @thread 167 | def _draw_circuit(self, condition_file): 168 | if len(self.PROPOSED_CIRCUITS) > 0: 169 | proposed_circuit = self.PROPOSED_CIRCUITS.pop(0) 170 | else: 171 | proposed_circuit = {} 172 | pickle.dump([proposed_circuit], open(condition_file, 'wb')) 173 | 174 | 175 | @thread 176 | def _control_optimizers(self, task_set, observations): 177 | # check if optimizers are initialized 178 | if len(self.running_instance_ids) == 0: 179 | self.initialize_optimizers(task_set, observations) 180 | else: 181 | for observation in observations: 182 | if not 'circuit_id' in observation: continue 183 | if isinstance(observation['circuit_id'], dict): 184 | observation['circuit_id'] = observation['circuit_id']['samples'] 185 | 186 | self.RECEIVED_OBSERVATIONS[observation['circuit_id']] = observation 187 | 188 | 189 | def submit(self, *args, **kwargs): 190 | task_set = kwargs['task_set'] 191 | observations = kwargs['observations'] 192 | self._control_optimizers(task_set, observations) 193 | return self._submit(*args, **kwargs) 194 | 195 | 196 | #==================================================== 197 | 198 | 199 | 200 | -------------------------------------------------------------------------------- /Designers/random_designer.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | #==================================================== 4 | 5 | import uuid 6 | import numpy as np 7 | import pickle 8 | import threading 9 | import time 10 | 11 | from Designers import AbstractDesigner 12 | from Utilities.decorators import thread, delayed 13 | 14 | #==================================================== 15 | 16 | class RandomDesigner(AbstractDesigner): 17 | 18 | batch_size = 1 19 | 20 | def __init__(self, general_settings, param_settings, options, *args, **kwargs): 21 | # likely that we need to pass variable settings to designers 22 | AbstractDesigner.__init__(self, general_settings, param_settings, options) 23 | 24 | # random search never requests new tasks 25 | self.OPTIMIZERS_FINISHED = True 26 | 27 | 28 | @thread 29 | @delayed(0.1) 30 | def _draw_circuit(self, condition_file): 31 | 32 | drawn_circuits = [] 33 | for batch_iteration in range(self.batch_size): 34 | 35 | circuit = self._design_random_circuit() 36 | drawn_circuits.append(circuit) 37 | 38 | with open(condition_file, 'wb') as content: 39 | pickle.dump(drawn_circuits, content) 40 | 41 | 42 | def submit(self, *args, **kwargs): 43 | return self._submit(**kwargs) 44 | -------------------------------------------------------------------------------- /Designers/scipy_minimize_designer.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | #==================================================== 4 | 5 | import sys 6 | import copy 7 | import time 8 | import uuid 9 | import pickle 10 | import threading 11 | import numpy as np 12 | 13 | from scipy.optimize import minimize as sp_minimize 14 | 15 | from Designers import AbstractDesigner 16 | from Utilities.decorators import thread, process, delayed 17 | 18 | np.set_printoptions(precision = 3) 19 | 20 | #==================================================== 21 | 22 | class ScipyMinimizeDesigner(AbstractDesigner): 23 | 24 | PROPOSED_CIRCUITS = [] 25 | RECEIVED_OBSERVATIONS = {} 26 | TASK_ID_EXECUTION = {} 27 | SCIPY_OPTIMIZERS_FINISHED = {} 28 | 29 | def __init__(self, general_settings, param_settings, options, method = 'L-BFGS-B', *args, **kwargs): 30 | 31 | AbstractDesigner.__init__(self, general_settings, param_settings, options) 32 | self.method = method 33 | self.running_instance_ids = [] 34 | self.sim_info_dicts = {} 35 | 36 | 37 | def _create_loss_wrapper(self, sim_id, task_dict): 38 | 39 | def _loss_wrapper(x_squeezed): 40 | 41 | info_dict = self.sim_info_dicts[sim_id] 42 | info_dict['num_executed'] += 1 43 | execution_index = info_dict['num_executed'] 44 | current_task_id = info_dict['task_ids'][info_dict['task_id_index']] 45 | 46 | assert(info_dict['sim_id'] == sim_id) 47 | 48 | circuit = self._construct_dict_from_array(x_squeezed, info_dict) 49 | proposed_circuit_id = circuit['circuit_id'] 50 | self.PROPOSED_CIRCUITS.append(circuit) 51 | 52 | start = time.time() 53 | loss = np.inf 54 | while np.isinf(loss): 55 | # wait for response 56 | for wait_iter in range(10000): # corresponds to a maximum wait time of 100 s 57 | if proposed_circuit_id in self.RECEIVED_OBSERVATIONS: 58 | loss = self.RECEIVED_OBSERVATIONS[proposed_circuit_id]['loss'] 59 | if np.isinf(loss): 60 | loss = 10**6 61 | del self.RECEIVED_OBSERVATIONS[proposed_circuit_id] 62 | break 63 | time.sleep(0.01) 64 | else: 65 | # submit circuit again in case we missed it 66 | self.PROPOSED_CIRCUITS.append(circuit) 67 | loss = 10**6 68 | end = time.time() 69 | 70 | if np.isnan(loss): loss = 10**6 71 | 72 | # assemble new task - needed for any designer that spawns new task 73 | new_task = copy.deepcopy(task_dict[current_task_id]) 74 | new_task['execution_index'] = execution_index 75 | new_task['primer_index'] = info_dict['observation_index'] 76 | new_task['from_optimizer'] = True 77 | self.NEW_TASKS.append(new_task) 78 | 79 | return loss 80 | return _loss_wrapper 81 | 82 | 83 | def prepare_optimizer_instance(self, task_ids, observation_index, observation = None): 84 | 85 | # create initial position 86 | if observation: 87 | x_init, x_mask = self._construct_array_from_dict(observation) 88 | else: 89 | circuit = self._design_random_circuit() 90 | x_init, x_mask = self._construct_array_from_dict(circuit) 91 | x_init_squeezed = x_init[np.where(x_mask > 0.)[0]] 92 | 93 | # create bounds 94 | bounds_squeezed = self.bounds[np.where(x_mask > 0.)[0]] 95 | 96 | # assemble simulation 97 | sim_id = str(uuid.uuid4()) 98 | sim_info_dict = {'x_init': x_init, 'x_init_squeezed': x_init_squeezed, 'x_mask': x_mask, 99 | 'bounds_squeezed': bounds_squeezed, 100 | 'sim_id': sim_id, 'task_id_index': 0, 101 | 'task_ids': task_ids, 'observation_index': observation_index, 'num_executed': 0} 102 | 103 | self.sim_info_dicts[sim_id] = copy.deepcopy(sim_info_dict) 104 | self.running_instance_ids.append(sim_info_dict) 105 | 106 | 107 | @thread 108 | def run_optimizer(self, loss_wrapper, sim_id, init_pos, bounds, max_iter, **kwargs): 109 | def local_callback(_): 110 | self.sim_info_dicts[sim_id]['task_id_index'] += 1 111 | res = sp_minimize(loss_wrapper, init_pos, method = self.method, bounds = bounds, options = {'maxiter': max_iter}, callback = local_callback) 112 | content = open('TIME_res_report', 'a') 113 | content.write('completed %s\n' % sim_id) 114 | for prop in dir(res): 115 | try: 116 | content.write('%s\t%s\n' % (prop, str(getattr(res, prop)))) 117 | except: 118 | pass 119 | content.write('===============\n') 120 | content.close() 121 | self.SCIPY_OPTIMIZERS_FINISHED[sim_id] = True 122 | 123 | 124 | def initialize_optimizers(self, task_set, observations): 125 | print('# LOG | ... initializing {0} optimizer ({1}) ...'.format(self.method, len(observations))) 126 | settings = task_set.settings['designer_options'] 127 | 128 | task_dict = {task['task_id']: task for task in task_set.generated_tasks} 129 | task_ids = [task['task_id'] for task in task_set.generated_tasks] 130 | 131 | # generate optimizer for each observation 132 | if len(observations) == 0: 133 | self.prepare_optimizer_instance(task_ids, 0) 134 | else: 135 | for observation_index, observation in enumerate(observations): 136 | self.prepare_optimizer_instance(task_ids, observation_index, observation = observation) 137 | 138 | for sim_info_dict_index, sim_info_dict in enumerate(self.running_instance_ids): 139 | sim_id = sim_info_dict['sim_id'] 140 | print('# LOG | ... starting {0} ({1}) {2} optimizer {3}'.format(sim_info_dict_index + 1, len(self.sim_info_dicts), self.method, sim_info_dict['observation_index'])) 141 | 142 | # create loss wrapper and submit optimizer on separate thread 143 | loss_wrapper = self._create_loss_wrapper(sim_id, task_dict) 144 | 145 | self.SCIPY_OPTIMIZERS_FINISHED[sim_id] = False 146 | if 'init' in self.SCIPY_OPTIMIZERS_FINISHED: 147 | del self.SCIPY_OPTIMIZERS_FINISHED['init'] 148 | self.run_optimizer(loss_wrapper, sim_id, sim_info_dict['x_init_squeezed'], sim_info_dict['bounds_squeezed'], settings['max_iters']) 149 | 150 | 151 | @thread 152 | def _draw_circuit(self, condition_file, **kwargs): 153 | proposed_circuits = [] 154 | copied_circuits = copy.deepcopy(self.PROPOSED_CIRCUITS) 155 | for copied_circuit in copied_circuits: 156 | proposed_circuits.append(self.PROPOSED_CIRCUITS.pop(0)) 157 | if len(proposed_circuits) == 0: 158 | proposed_circuits = [{}] 159 | pickle.dump(proposed_circuits, open(condition_file, 'wb')) 160 | 161 | 162 | @thread 163 | def _control_optimizers(self, task_set, observations): 164 | # check if optimizers are initialized 165 | 166 | if len(self.running_instance_ids) == 0: 167 | self.initialize_optimizers(task_set, observations) 168 | 169 | 170 | def submit(self, *args, **kwargs): 171 | task_set = kwargs['task_set'] 172 | observations = copy.deepcopy(kwargs['observations']) 173 | self._control_optimizers(task_set, observations) 174 | 175 | # get number of optimizers which have completed 176 | comp, not_comp = 0, 0 177 | for key, has_finished in self.SCIPY_OPTIMIZERS_FINISHED.items(): 178 | if has_finished: 179 | comp += 1 180 | else: 181 | not_comp += 1 182 | 183 | return self._submit(*args, **kwargs) 184 | 185 | 186 | #==================================================== 187 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # SCILLA 2 | 3 | SCILLA is a software for automated discovery of superconducting circuits. 4 | Its goal is to facilitate hardware design for quantum information processing applications. 5 | Starting from a desired target property for the circuit, it provides a closed-loop implementation of circuit design, property computation, and merit evaluation that searches the design space and identifies promising circuits. 6 | The software and its scientific application are described in ref [1]. 7 | Implementation details and examples are provided in the supplementary information of the manuscript. 8 | 9 | The script `main_benchmark.py` is provided as an example and benchmark of SCILLA. 10 | It searches the space of 2-node superconducting circuits for a flux spectrum that matches the capacitatively shunted flux qubit. 11 | The script is executed with the following command: 12 | ```python 13 | python main_benchmark.py 14 | ``` 15 | 16 | 17 | ### Requirements 18 | 19 | This code has been tested with Python 3.6 on Unix platforms. 20 | The required packages are listed in the `environment.yml` file. 21 | 22 | 23 | ### Disclaimer 24 | 25 | _This repository is under construction._ We hope to add further details on the method, instructions, and more examples in the near future. 26 | 27 | 28 | ### Experiencing problems? 29 | 30 | Please create a [new issue](https://github.com/aspuru-guzik-group/SCILLA/issues/new) and describe your problem in detail so we can fix it. 31 | 32 | 33 | ### Authors 34 | 35 | This software is written by [Tim Menke](https://github.com/Timmenke) and [Florian Häse](https://github.com/FlorianHase). 36 | 37 | 38 | ### References 39 | 40 | [1] Tim Menke, Florian Häse, Simon Gustavsson, Andrew J. Kerman, William D. Oliver, and Alán Aspuru-Guzik, [Automated discovery of superconducting circuits and its application to 4-local coupler design](https://arxiv.org/abs/1912.03322), arXiv preprint arXiv:1912.03322 (2019). 41 | -------------------------------------------------------------------------------- /Submitter/.nfs00000000080a4c1b00019518: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aspuru-guzik-group/SCILLA/f0291082990e0f99f8f9abf41ec405427cfe3752/Submitter/.nfs00000000080a4c1b00019518 -------------------------------------------------------------------------------- /Submitter/QcircSim_wrapper.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ Submits simulation job to desired solver and returns simulation results """ 4 | 5 | # Import available circuit solvers 6 | from Submitter import solver_JJcircuitSimV3 7 | from Submitter import solver_2node 8 | 9 | def solve_circuit(solver, Carr, Larr, Jarr, phiExt=0, phiOffs=[0.5,0.5,0.5]): 10 | 11 | if solver == 'JJcircuitSimV3': 12 | results = solver_JJcircuitSimV3(Carr, Larr, Jarr, fluxSweep=True, phiOffs=phiOffs) 13 | 14 | elif solver == '2-node': 15 | # Larr must be None 16 | eigenspec = solver_2node(Carr, Larr, Jarr, phiExt=phiExt, qExt=[0,0], n=6, normalized=True) 17 | timeout_bool = False 18 | results = (eigenspec, timeout_bool) 19 | 20 | else: 21 | raise NotImplementedError("Desired circuit solver '{}' not implemented".format(solver)) 22 | 23 | return results 24 | -------------------------------------------------------------------------------- /Submitter/__init__.py: -------------------------------------------------------------------------------- 1 | from Submitter.solver_JJcircuitSimV3 import solver_JJcircuitSimV3 2 | from Submitter.solver_2node import solver_2node 3 | from Submitter.QcircSim_wrapper import solve_circuit 4 | from Submitter.abstract_submitter import AbstractSubmitter 5 | from Submitter.local_submitter import LocalSubmitter 6 | from Submitter.slurm_submitter import SlurmSubmitter 7 | from Submitter.circuit_submitter import CircuitSubmitter -------------------------------------------------------------------------------- /Submitter/abstract_submitter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import copy 4 | import pickle 5 | 6 | #==================================================== 7 | 8 | class AbstractSubmitter(object): 9 | 10 | # submitter only computes the spectra / other properties of interest 11 | # there'll be another module computing the loss, which is done on the same computing architecure for now 12 | 13 | RECEIVED_RESULTS = [] 14 | 15 | def __init__(self, settings, general_params, evaluation_function): 16 | self.settings = settings 17 | self.general_params = general_params 18 | self.evaluation_function = evaluation_function 19 | 20 | 21 | # implement file logger and pick ups 22 | def process_received_results(self, file_name): 23 | 24 | results = pickle.load(open(file_name, 'rb')) 25 | 26 | 27 | def _submit(self): 28 | # this is the actual submission procedure 29 | pass 30 | 31 | 32 | def submit(self): 33 | 34 | # first, we need to validate the circuit, then we submit 35 | if self.is_valid(): 36 | loss = 0. 37 | else: 38 | loss = np.nan 39 | 40 | 41 | def get_results(self): 42 | results = copy.deepcopy(self.RECEIVED_RESULTS) 43 | for result in results: 44 | del self.RECEIVED_RESULTS[0] 45 | return results 46 | 47 | -------------------------------------------------------------------------------- /Submitter/circuit_submitter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | #==================================================== 4 | 5 | import os 6 | import copy 7 | import uuid 8 | import time 9 | import pickle 10 | import threading 11 | 12 | from Submitter import LocalSubmitter 13 | from Submitter import solve_circuit 14 | from Utilities import FileLogger 15 | 16 | from Utilities.decorators import thread 17 | 18 | #==================================================== 19 | 20 | class CircuitSubmitter(object): 21 | """ 22 | Purpose: 23 | connects to hardware specific circuit submission modules, 24 | receives circuit parameters from circuit searcher, 25 | submits these parameters to appropriate computing hardware, 26 | collects computation results for pick up by circuit searcher, 27 | """ 28 | 29 | # declare containers 30 | FILE_LOGGERS = {} 31 | RECEIVED_RESULTS = [] 32 | 33 | 34 | def __init__(self, settings, general_params): 35 | self.settings = settings 36 | self.general_params = general_params 37 | self.submitters = {} 38 | 39 | 40 | def add_submitter(self, keyword): 41 | if not keyword in self.submitters: 42 | if keyword == 'local': 43 | from Submitter import LocalSubmitter as SelectedSubmitter 44 | else: 45 | raise NotImplementedError() 46 | self.submitters[keyword] = SelectedSubmitter(self.settings, self.general_params, solve_circuit) 47 | 48 | 49 | def parse_calculation_results(self, file_name): 50 | if not 'proc' in file_name: return None 51 | 52 | with open(file_name, 'rb') as content: 53 | data = pickle.load(content) 54 | 55 | self.RECEIVED_RESULTS.append(data) 56 | os.remove(file_name) 57 | file_name = file_name.replace('proc_', '') 58 | os.remove(file_name) 59 | 60 | # need to close file logger 61 | job_id = file_name.split('_')[-1].split('.')[0] 62 | file_logger = self.FILE_LOGGERS[job_id] 63 | file_logger.stop() 64 | del self.FILE_LOGGERS[job_id] 65 | 66 | 67 | def get_new_results(self): 68 | new_results = copy.deepcopy(self.RECEIVED_RESULTS) 69 | for new_result in new_results: 70 | self.RECEIVED_RESULTS.pop(0) 71 | return new_results 72 | 73 | 74 | def check_resource_availability(self, task_set): 75 | computing_resource = task_set.settings['computing_resource'] 76 | submitter = self.submitters[computing_resource] 77 | return submitter.is_available() 78 | 79 | 80 | def _submit(self, submitter, circuit, task_set, job_id): 81 | file_logger = FileLogger(action = self.parse_calculation_results, path = self.settings.scratch_dir, pattern = '*job*%s*' % (job_id)) 82 | self.FILE_LOGGERS[job_id] = file_logger 83 | file_logger.start() 84 | 85 | # submit job 86 | success = submitter.submit(circuit, task_set, job_id) 87 | self.successes[job_id] = success 88 | if not success: 89 | self.FILE_LOGGERS[job_id].stop() 90 | del self.FILE_LOGGERS[job_id] 91 | 92 | 93 | def submit(self, circuits, task_set): 94 | # fetch hardware specific submitter, create for monitoring 95 | 96 | computing_resource = task_set.settings['computing_resource'] 97 | submitter = self.submitters[computing_resource] 98 | 99 | self.successes = {} 100 | job_ids, threads = [], [] 101 | 102 | # Case 1: there is just one circuit to be submitted (avoid enumerating dict entries) 103 | if type(circuits) == dict: 104 | circuit = circuits 105 | job_id = str(uuid.uuid4()) 106 | job_ids.append(job_id) 107 | thread = threading.Thread(target = self._submit, args = (submitter, circuit, task_set, job_id)) 108 | threads.append(thread) 109 | thread.start() 110 | 111 | # Case 2: there are several circuits to be submitted 112 | else: 113 | for circuit_index, circuit in enumerate(circuits): 114 | job_id = str(uuid.uuid4()) 115 | job_ids.append(job_id) 116 | thread = threading.Thread(target = self._submit, args = (submitter, circuit, task_set, job_id)) 117 | threads.append(thread) 118 | thread.start() 119 | 120 | submitted = range(len(job_ids)) 121 | not_submitted = [] 122 | return submitted, not_submitted 123 | 124 | 125 | def get_computed_circuits(self): 126 | # return all results collected from prior submission 127 | computed_circuits = copy.deepcopy(self.RECEIVED_RESULTS) 128 | for computed_circuit in computed_circuits: 129 | self.RECEIVED_RESULTS.pop(0) 130 | return computed_circuits 131 | -------------------------------------------------------------------------------- /Submitter/execute.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ Executes the circuit solver wrapper """ 4 | 5 | import os 6 | import sys 7 | import uuid 8 | import pickle 9 | import shutil 10 | 11 | sys.path.append(os.getcwd()) 12 | 13 | #==================================================== 14 | 15 | process_index = int(sys.argv[2]) 16 | 17 | #==================================================== 18 | 19 | 20 | job_id = sys.argv[1].split('_')[-1].split('.')[0] 21 | 22 | # Load information about circuit, solver, sweep 23 | 24 | with open(sys.argv[1], 'rb') as content: 25 | data = pickle.load(content) 26 | 27 | function = data['evaluation_function'] 28 | general_params = data['general_params'] 29 | solver = general_params['solver'] 30 | phiExt = general_params['phiExt'] 31 | params = data['circuit']['circuit_values'] 32 | carr, jarr, larr = params['capacities'], params['junctions'], params['inductances'] 33 | 34 | # Pass flux offsets in other loops to simulator 35 | phiOffs_bool = False 36 | if 'phiOffs' in params: 37 | phiOffs_bool = True 38 | phiOffs = params['phiOffs'] 39 | 40 | # Navigate into scratch directory before calling function 41 | scratch_dir = '.scratch_dir_%s' % str(uuid.uuid4()) 42 | os.mkdir(scratch_dir) 43 | os.chdir(scratch_dir) 44 | 45 | # Circuit solver wrapper is called here 46 | if phiOffs_bool: 47 | result_dict = function(solver, carr, larr, jarr, phiExt=phiExt, phiOffs=phiOffs) 48 | else: 49 | result_dict = function(solver, carr, larr, jarr, phiExt=phiExt) 50 | os.chdir('../') 51 | 52 | data['results'] = result_dict 53 | data['measurements'] = {'eigen_spectrum': data['results'][0], 'timeout': data['results'][1]} 54 | 55 | # Save number of flux biases and maximum outer level pop if that information is available 56 | if len(result_dict)==4: 57 | data['measurements']['num_biases'] = data['results'][2] 58 | data['measurements']['max_pop'] = data['results'][3] 59 | 60 | processed_file_name = sys.argv[1].replace(job_id, 'proc_%s' % job_id) 61 | 62 | with open(processed_file_name, 'wb') as content: 63 | pickle.dump(data, content) 64 | 65 | # Clean up 66 | try: 67 | shutil.rmtree(scratch_dir) 68 | except: 69 | pass 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | -------------------------------------------------------------------------------- /Submitter/local_submitter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ Creates a subprocess to execute the circuit solver wrapper """ 4 | 5 | ### Toggle line 11, 29, 38 comments to switch between MacOS and Odyssey computing cluster ### 6 | 7 | #==================================================== 8 | 9 | import os 10 | import pickle 11 | # import multiprocessing #comment for MacOS / uncomment for Odyssey 12 | import subprocess 13 | import uuid 14 | 15 | import numpy as np 16 | 17 | from Submitter import AbstractSubmitter 18 | 19 | #==================================================== 20 | 21 | class LocalSubmitter(AbstractSubmitter): 22 | 23 | FILE_LOGGERS = [] 24 | 25 | 26 | def __init__(self, settings, general_params, evaluation_function): 27 | 28 | AbstractSubmitter.__init__(self, settings, general_params, evaluation_function) 29 | # self.num_cores = multiprocessing.cpu_count() #comment for MacOS / uncomment for Odyssey 30 | # print('\n\n') 31 | # print('NUM_CORES', self.num_cores) 32 | # print('\n\n') 33 | self.get_available_cpus() 34 | self.next_process = {} 35 | 36 | def get_available_cpus(self): 37 | scratch_name = 'scratch_file' 38 | # subprocess.call('cat /proc/"self"/status | grep Cpus_allowed_list > %s' % scratch_name, shell = True) #comment for MacOS / uncomment for Odyssey 39 | with open(scratch_name, 'r') as content: 40 | file_content = content.read() 41 | cpu_info = file_content.strip().split()[1].split(',') 42 | cpu_list = [] 43 | for element in cpu_info: 44 | if '-' in element: 45 | cpu_bounds = [int(entry) for entry in element.split('-')] 46 | cpu_list.extend(range(cpu_bounds[0], cpu_bounds[1] + 1)) 47 | else: 48 | cpu_list.append(int(element)) 49 | self.cpu_list = cpu_list 50 | print('\n\nCPU_LIST: %s\n\n' % str(self.cpu_list)) 51 | 52 | 53 | def get_process_index(self, task_set): 54 | task_name = task_set.settings['name'] 55 | max_concurrent = task_set.settings['designer_options']['max_concurrent'] 56 | 57 | if not task_name in self.next_process: 58 | next_process = self.cpu_list[0] 59 | self.next_process[task_name] = 1 60 | else: 61 | next_process = self.cpu_list[self.next_process[task_name]] 62 | self.next_process[task_name] = (self.next_process[task_name] + 1) % len(self.cpu_list) 63 | return next_process 64 | 65 | 66 | def submit(self, circuit, task_set, job_id): 67 | 68 | process_index = self.get_process_index(task_set) 69 | 70 | job_name = 'job_%s' % job_id 71 | if 'general_params' in circuit: 72 | job_dict = {'evaluation_function': circuit['evaluation_function'], 73 | 'general_params': circuit['general_params'], 74 | 'circuit': circuit['circuit'], 75 | 'merit_re-eval': True, 76 | 'job_name': job_name,} 77 | else: 78 | job_dict = {'evaluation_function': self.evaluation_function, 79 | 'general_params': self.general_params, 80 | 'circuit': circuit, 81 | 'job_name': job_name,} 82 | 83 | file_name = '%s/%s.pkl' % (self.settings.scratch_dir, job_name) 84 | with open(file_name, 'wb') as content: 85 | pickle.dump(job_dict, content) 86 | 87 | FNULL = open(os.devnull, 'w') 88 | 89 | subprocess.call('python Submitter/execute.py %s %d &' % (file_name, process_index), shell = True) #debug: show subprocess output 90 | 91 | return True 92 | 93 | -------------------------------------------------------------------------------- /Submitter/slurm_submitter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from Submitter import AbstractSubmitter 4 | 5 | 6 | #==================================================== 7 | 8 | 9 | class SlurmSubmitter(AbstractSubmitter): 10 | 11 | def __init__(self): 12 | 13 | AbstractSubmitter.__init__(self) 14 | 15 | -------------------------------------------------------------------------------- /Submitter/solver_2node.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ Simulate Hamiltonian of 2-node circuits with arbitrary capacitances and junctions """ 4 | 5 | import numpy as np 6 | import numpy.linalg 7 | import scipy as sp 8 | import csv 9 | import os 10 | 11 | 12 | def solver_2node(Carr, Larr, Jarr, phiExt=0, qExt=[0,0], n=40, normalized=True): 13 | """ 14 | Calculates flux or charge spectrum of 2-node circuit containing junctions and capacitances. If 15 | flux or charge offset is given as a list, a sweep over the list will be performed. However, only 16 | one-dimensional sweeps are allowed. Returns eigenvalues for fixed parameters if no sweep specifid. 17 | 18 | Parameters: 19 | Carr: array | flattened capacitance matrix (in fF) 20 | Jarr: array | flattened junction matrix (in GHz) 21 | Larr: None | NOT SUPPORTED, SET TO 'None' 22 | phiExt: float or m-dim list | external flux (in fraction of flux quanta) 23 | qExt: 2-dim or 2xm-dim list | charge offsets for nodes 1 and 2 (in fraction of Cooper pairs) 24 | n: int | sets 2n+1 charge basis states (integer) 25 | 26 | Returns: 27 | spec: mxn-dim array | Eigenvalues of circuit for each point along sweep (in GHz) 28 | 29 | Note: Only one sweep allowed, i.e. sweep either flux or one of the two node charges. 30 | """ 31 | 32 | import time 33 | start = time.time() 34 | 35 | # Determine which parameter to sweep 36 | sweep_phi = (np.shape(phiExt) != ()) 37 | sweep_q1 = (np.shape(qExt[0]) != ()) 38 | sweep_q2 = (np.shape(qExt[1]) != ()) 39 | 40 | # Check whether more than one sweep is specified 41 | sweep_list = [sweep_phi, sweep_q1, sweep_q2] 42 | valid = (len([s for s in sweep_list if s==True]) <= 1) 43 | assert valid, "Only one sweep allowed - sweep either flux OR one of the two node charges." 44 | 45 | # Initialize spectrum 46 | spec = [] 47 | 48 | # Calculate spectrum for swept parameter 49 | if sweep_phi: 50 | for p in phiExt: 51 | spec.append( _eigs_2node_singleflux(Carr, Larr, Jarr, phiExt_fix=p, qExt_fix=qExt, n=n) ) 52 | elif sweep_q1 or sweep_q2: 53 | if sweep_q1: 54 | qSweep = [[q,qExt[1]] for q in qExt[0]] 55 | else: 56 | qSweep = [[qExt[0],q] for q in qExt[1]] 57 | for qv in qSweep: 58 | spec.append( _eigs_2node_singleflux(Carr, Larr, Jarr, phiExt_fix=phiExt, qExt_fix=qv, n=n) ) 59 | else: 60 | spec = _eigs_2node_singleflux(Carr, Larr, Jarr, phiExt_fix=phiExt, qExt_fix=qExt, n=n) 61 | 62 | spec = np.array(spec) 63 | 64 | # Normalize spectrum by ground state if desired 65 | if normalized: 66 | e0 = np.array([spec[i][0] for i in range(len(spec))]) 67 | spec = (spec.T - e0).T 68 | 69 | end = time.time() 70 | new_line= '$$$$$ took: %.4f s $$$$$$$\n' % (end - start) 71 | return spec 72 | 73 | 74 | def _eigs_2node_singleflux(Carr, Larr, Jarr, phiExt_fix=0, qExt_fix=[0,0], n=6): 75 | """ 76 | Eigenenergies of 2-node circuit containing capacitances and junctions for fixed flux and charge 77 | offset. Note: Adds junction capacitance. 78 | 79 | Parameters: 80 | Carr: array | flattened capacitance matrix (in fF) 81 | Jarr: array | flattened junction matrix (in GHz) 82 | Larr: None | NOT YET SUPPORTED, SET TO 'None' 83 | phiExt_fix: float | external flux (in fraction of flux quanta) 84 | qExt_fix: 2-dim array | charge offset vector for nodes 1 and 2 (in fraction of Cooper pairs) 85 | n: int | sets 2n+1 charge basis states (integer) 86 | 87 | Returns: 88 | evals: array | 2n+1 eigenvalues of circuit (in GHz) 89 | """ 90 | 91 | assert Larr==None, "Linear inductors not supported in 2-node solver - set Larr to 'None'" 92 | 93 | # Construct component connectivity matrices 94 | N = int((np.sqrt(1+8*len(Carr))-1)/2) #calculate dimension of matrices from number of upper triagonal entries 95 | Cmat, Jmat = np.zeros((N,N)), np.zeros((N,N)) 96 | Cmat[np.triu_indices(N,k=0)] = Carr 97 | Cmat = np.maximum(Cmat, Cmat.transpose()) 98 | Jmat[np.triu_indices(N,k=0)] = Jarr 99 | Jmat = np.maximum(Jmat, Jmat.transpose()) 100 | Cmat += 1/26.6 * Jmat #add junction capacitance 101 | 102 | # Capacitance matrix C (not to be confused with Capacitance connectivity matrix Cmat) 103 | C = np.diag(np.sum(Cmat, axis=0)) + np.diag(np.diag(Cmat)) - Cmat 104 | C = C * 10.**(-15) #convert fF -> F 105 | 106 | # Capacitive (kinetic) part of Hamiltonian 107 | e = 1.60217662 * 10**(-19) #elementary charge 108 | h = 6.62607004 * 10**(-34) #Planck constant 109 | T = np.zeros( ((2*n+1)**len(C), (2*n+1)**len(C)) ) #kinetic part of Hamiltonian 110 | Cinv = np.linalg.inv(C) 111 | I = np.eye(2*n+1) #identity matrix 112 | Q = np.diag(np.arange(-n,n+1)) #Charge operator 113 | Q1 = Q + qExt_fix[0]*I 114 | Q2 = Q + qExt_fix[1]*I 115 | # More simple construction specific to flux qubit 116 | T += 0.5*Cinv[0,0] * np.kron(Q1.dot(Q1), I) 117 | T += 0.5*Cinv[1,1] * np.kron(I, Q2.dot(Q2)) 118 | T += Cinv[0,1] * np.kron(Q1, Q2) 119 | T *= 4*e**2/h 120 | 121 | # Josephson potential part (specific to flux qubit) 122 | Jmat = Jmat * 10.**9 #convert GHz -> Hz 123 | U = np.zeros(((2*n+1)**len(C),(2*n+1)**len(C))) #potential part of Hamiltonian 124 | Dp = np.diag(np.ones((2*n+1)-1), k=1) 125 | Dm = np.diag(np.ones((2*n+1)-1), k=-1) 126 | # Add displacement operator terms that were obtained from cosines 127 | U = U - Jmat[0,0]/2 * np.kron((Dp + Dm),I) 128 | U = U - Jmat[1,1]/2 * np.kron(I, (Dp + Dm)) 129 | U = U - Jmat[0,1]/2 * ( np.exp(-2*np.pi*1j*phiExt_fix) * np.kron(Dp,Dm) + np.exp(2*np.pi*1j*phiExt_fix) * np.kron(Dm,Dp) ) 130 | 131 | # Assemble Hamiltonian 132 | H = T + U 133 | 134 | evals = np.linalg.eigh(H)[0] 135 | evals /= 1e9 #convert to GHz 136 | 137 | return evals 138 | 139 | 140 | ####### Testing ####### 141 | if __name__=='__main__': 142 | 143 | from matplotlib import pyplot as plt 144 | 145 | # Initialization 146 | EJa = 115 147 | EJb = 115 148 | EJc = 50 149 | Csh = 45 150 | Jarr = np.array([EJa, EJc, EJb]) 151 | Carr = np.array([0, Csh, 0]) 152 | phiExt = np.linspace(0, 1, 25, endpoint=True) 153 | qSweep = np.linspace(0, 1, 25, endpoint=True) 154 | 155 | # Find eigenvalues 156 | res = solver_2node(Carr, None, Jarr, phiExt=phiExt, qExt=[0,0], n=10, normalized=True) 157 | print('Testing _eigs_2node_singleflux:', res[:,1]) 158 | 159 | # Output 160 | plt.figure() 161 | plt.plot(res[:,1]) 162 | plt.show() 163 | 164 | 165 | 166 | 167 | 168 | 169 | 170 | 171 | 172 | 173 | 174 | 175 | 176 | 177 | 178 | 179 | 180 | 181 | 182 | 183 | 184 | 185 | 186 | 187 | 188 | 189 | 190 | 191 | 192 | -------------------------------------------------------------------------------- /Submitter/solver_JJcircuitSimV3.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ Wrapper to run the JJcircuitSim code in a Mathematica subprocess """ 4 | 5 | from subprocess import run, TimeoutExpired 6 | import os 7 | import time 8 | import csv 9 | import numpy as np 10 | import re 11 | 12 | 13 | def solver_JJcircuitSimV3(Carr, Larr, JJarr, nLin=6, nNol=8, nJos=11, nIsl=1, timeout=600, fluxSweep=True, phiOffs=[0.5,0.5,0.5,0.5]): 14 | """ 15 | Returns: 16 | eigenspec - 10-dim numpy array of the circuit eigenspectrum at fixed flux. If fluxSweep=True, 17 | returns a 10x41-dim array with flux sweep of each eigenmode (first flux bias chosen if multiple 18 | biases available). 19 | timeout_bool - flag for expired timeout 20 | Only returned if it can be determined: 21 | num_biases - number of flux biases available in circuit 22 | 23 | Parameters: 24 | Carr - flattened upper triangular of capacitance matrix [fF] 25 | Larr - flattened upper triangular of inductance matrix [pH] 26 | JJarr - flattened upper triangular of Josephson junction matrix [GHz] 27 | nLin, nNol, nJos, nIsl - truncation of linear, non-linear, Josephson and island modes 28 | timeout - timeout for Mathematica simulation in seconds 29 | fluxBool - perform flux sweep if set to True 30 | 31 | Note: The components matrices are entered as 1-dim numpy arrays stepping through the 32 | upper triangular matrix row-wise, e.g. np.array([c11, c12, c13, c22, c23, c33]) 33 | """ 34 | 35 | print('Running circuit simulation...') 36 | 37 | if not os.path.exists('../Mathematica_scriptV2-JJsimV3.wl'): 38 | raise NotImplementedError('JJcircuitSim module not available') 39 | 40 | tA = time.time() 41 | 42 | timeout_bool = False 43 | 44 | tstart = time.time() 45 | 46 | # Write parameters to text file, for Mathematica to read 47 | Carr.astype('float32').tofile('Carr.dat') 48 | Larr.astype('float32').tofile('Larr.dat') 49 | JJarr.astype('float32').tofile('JJarr.dat') 50 | phiOffs = np.array(phiOffs) 51 | phiOffs.astype('float32').tofile('phiOffs.dat') #flux biases for loops 52 | 53 | # Subprocess to run Mathematica script 54 | try: 55 | run(["wolframscript", "-file", "../Mathematica_scriptV2-JJsimV3.wl", 56 | str(nLin), str(nNol), str(nJos), str(nIsl), str(int(fluxSweep))], 57 | timeout=timeout, stdout=open(os.devnull, 'wb')) 58 | except TimeoutExpired: 59 | print('ERROR: timeout expired') 60 | timeout_bool = True 61 | return None, timeout_bool 62 | 63 | # Extract eigenspectrum from file 64 | eigenspec = [] 65 | if not os.path.isfile('log_eigenspectrum.csv'): 66 | return None, timeout_bool 67 | 68 | try: #try opening and reading spectrum file 69 | with open('log_eigenspectrum.csv', 'r') as datafile: 70 | if fluxSweep: 71 | reader = csv.reader(datafile, delimiter=',') 72 | for row in reader: 73 | eigenspec.append([float(e) for e in row]) 74 | else: 75 | for n, line in enumerate(datafile): 76 | eigenspec.append(float(line.strip())) 77 | except AttributeError: 78 | return None, timeout_bool 79 | 80 | # Determine number of flux biases and outermost level population 81 | try: 82 | ## Number of flux biases ## 83 | with open('log_biasinfo.txt', 'r') as datafile: 84 | content = datafile.read() 85 | start_indices = [m.start() for m in re.finditer('Fb', content)] 86 | matches = [content[i:i+3] for i in start_indices] #assumes no more than 9 biases 87 | matches = list(set(matches)) 88 | num_biases = len(matches) 89 | print('# Found {} flux biases'.format(num_biases)) 90 | 91 | ## Maximum outermost level population of eigenstates ## 92 | with open('log_diagonalization.txt', 'r') as datafile: 93 | content = datafile.read() 94 | # Find indices of level pop information for each mode 95 | start_indices = np.array([m.start() for m in re.finditer('Max level probs', content)]) 96 | stop_temp = np.array([m.start() for m in re.finditer('}', content)]) 97 | stop_indices = np.array([np.min([t for t in stop_temp-s if t>0])+s for s in start_indices]) 98 | # Extract level pops and convert to float 99 | levelpops = [] 100 | for s,t in zip(start_indices,stop_indices): 101 | l = content[s+19:t] 102 | pattern = re.compile('\*\^') 103 | l = pattern.sub('e', l) 104 | l = l.split(',') 105 | l = [float(e) for e in l] 106 | levelpops.append(l) 107 | # Determine maximum outermost level pop overall 108 | max_pop = np.max([np.max(l) for l in levelpops]) 109 | print('# Maximum outermost level pop: {}'.format(max_pop)) 110 | 111 | print('Simulation time: {} s'.format(time.time()-tA)) 112 | return eigenspec, timeout_bool, num_biases, max_pop 113 | 114 | except: 115 | print('ERROR: could not determine number of flux biases or outermost level population') 116 | print('Simulation time: {} s'.format(time.time()-tA)) 117 | return eigenspec, timeout_bool 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | -------------------------------------------------------------------------------- /TaskSets/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from TaskSets.task_set import TaskSet 3 | from TaskSets.calculation_task_set import CalculationTaskSet 4 | from TaskSets.filtering_task_set import FilteringTaskSet 5 | -------------------------------------------------------------------------------- /TaskSets/calculation_task_set.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from TaskSets import TaskSet 4 | 5 | #==================================================== 6 | 7 | class CalculationTaskSet(TaskSet): 8 | 9 | def __init__(self, settings): 10 | 11 | TaskSet.__init__(self, settings['name']) 12 | self.settings = settings 13 | self.settings['task_type'] = 'calculation' 14 | self.task_type = 'calculation' 15 | self.max_exec = self.settings['designer_options']['max_iters'] -------------------------------------------------------------------------------- /TaskSets/filtering_task_set.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from TaskSets import TaskSet 4 | 5 | #==================================================== 6 | 7 | class FilteringTaskSet(TaskSet): 8 | 9 | def __init__(self, settings): 10 | 11 | TaskSet.__init__(self, settings['name']) 12 | self.settings = settings 13 | self.settings['task_type'] = 'filtering' 14 | self.task_type = 'filtering' 15 | self.max_exec = 1 -------------------------------------------------------------------------------- /TaskSets/task_set.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import copy 4 | import uuid 5 | 6 | #==================================================== 7 | 8 | class TaskSet(object): 9 | 10 | def __init__(self, name): 11 | self.max_exec = 1 12 | self.task_set_name = name 13 | self.task_set_id = str(uuid.uuid4()) 14 | 15 | 16 | def generate_all_tasks(self): 17 | tasks = [] 18 | for single_exec in range(self.max_exec): 19 | info_dict = copy.deepcopy(self.settings) 20 | info_dict['task_set_id'] = self.task_set_id 21 | info_dict['task_set_name'] = self.task_set_name 22 | info_dict['task_id'] = str(uuid.uuid4()) 23 | info_dict['execution_index'] = 0 24 | info_dict['primer_index'] = 0 25 | info_dict['num_exec'] = single_exec 26 | info_dict['from_optimizer'] = False 27 | tasks.append(info_dict) 28 | self.generated_tasks = copy.deepcopy(tasks) 29 | return tasks -------------------------------------------------------------------------------- /Utilities/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from Utilities.file_logger import FileLogger 3 | from Utilities.settings import Settings 4 | -------------------------------------------------------------------------------- /Utilities/decorators.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | #======================================================================== 4 | 5 | import time 6 | 7 | from multiprocessing import Process 8 | from threading import Thread 9 | 10 | #======================================================================== 11 | 12 | def delayed(time_delay = 1.0): 13 | def decorator_wrapper(function): 14 | def wrapper(*args, **kwargs): 15 | time.sleep(time_delay) 16 | function(*args, **kwargs) 17 | return wrapper 18 | return decorator_wrapper 19 | 20 | #======================================================================== 21 | 22 | def process(function): 23 | def wrapper(*args, **kwargs): 24 | background_process = Process(target = function, args = args, kwargs = kwargs) 25 | background_process.start() 26 | return wrapper 27 | 28 | def thread(function): 29 | def wrapper(*args, **kwargs): 30 | background_thread = Thread(target = function, args = args, kwargs = kwargs) 31 | background_thread.start() 32 | return wrapper 33 | 34 | #======================================================================== 35 | -------------------------------------------------------------------------------- /Utilities/defaults.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os 4 | 5 | _HOME = os.getcwd() 6 | 7 | SETTINGS = { 8 | 9 | 'general': {'scratch_dir': '.scratch' 10 | }, 11 | 12 | 'databases': [{'name': 'master', 'db_name': 'master', 'db_type': 'sqlite', 'db_path': '%s/Experiments/master.db' % _HOME}, 13 | {'name': 'circuits', 'db_name': 'circuits', 'db_type': 'sqlite', 'db_path': '%s/Experiments/circuits.db' % _HOME}, 14 | {'name': 'merits', 'db_name': 'merits', 'db_type': 'sqlite', 'db_path': '%s/Experiments/merits.db' % _HOME}, 15 | {'name': 'losses', 'db_name': 'losses', 'db_type': 'sqlite', 'db_path': '%s/Experiments/losses.db' % _HOME}, 16 | {'name': 'tasks', 'db_name': 'tasks', 'db_type': 'sqlite', 'db_path': '%s/Experiments/tasks.db' % _HOME}, 17 | ], 18 | 19 | } 20 | 21 | -------------------------------------------------------------------------------- /Utilities/file_logger.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | #============================================================== 4 | 5 | import warnings 6 | 7 | from Utilities.decorators import thread 8 | 9 | #============================================================== 10 | 11 | class FileLogger(object): 12 | 13 | def __init__(self, action, path = './', pattern = '*'): 14 | self.action = action 15 | self.path = path 16 | self.pattern = pattern 17 | 18 | with warnings.catch_warnings(): 19 | warnings.filterwarnings('error') 20 | try: 21 | # from Utilities.watchdog_event_handler import FileEventHandler 22 | from Utilities.native_event_handler import FileEventHandler 23 | except Warning: 24 | print('WARNING: Watchdog module not working. Falling back to native event handler.') 25 | from Utilities.native_event_handler import FileEventHandler 26 | 27 | self.event_handler = FileEventHandler(action, self.pattern) 28 | 29 | def start(self): 30 | self.event_handler.stream(self.path) 31 | 32 | def stop(self): 33 | self.event_handler.stop() 34 | 35 | #============================================================== 36 | 37 | -------------------------------------------------------------------------------- /Utilities/native_event_handler.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | 4 | #============================================================== 5 | 6 | import os 7 | import time 8 | import uuid 9 | import fnmatch 10 | 11 | from Utilities.decorators import thread, process 12 | 13 | #============================================================== 14 | 15 | class FileEventHandler(object): 16 | 17 | def __init__(self, action, pattern): 18 | self.pattern = pattern 19 | self.action = action 20 | self.stopped = True 21 | self.ident = str(uuid.uuid4())[:8] 22 | 23 | @thread 24 | def execute(self, found_file): 25 | self.action(found_file) 26 | 27 | @thread 28 | def stream(self, path): 29 | executed_matches = [] 30 | self.run = True 31 | self.stopped = False 32 | while True: 33 | matches = [] 34 | for root, dir_name, file_names in os.walk(path): 35 | for file_name in fnmatch.filter(file_names, self.pattern): 36 | matches.append(os.path.join(root, file_name)) 37 | for match in matches: 38 | if match in executed_matches: continue 39 | time.sleep(0.005) 40 | executed_matches.append(match) 41 | self.execute(match) 42 | if not self.run: break 43 | self.stopped = True 44 | 45 | def stop(self): 46 | self.run = False 47 | while not self.stopped: 48 | time.sleep(0.05) 49 | 50 | -------------------------------------------------------------------------------- /Utilities/settings.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | #======================================================================== 4 | 5 | class _AbstractSetting: 6 | 7 | def __init__(self, raw = None, general = None): 8 | self._raw = raw 9 | self.general = general 10 | self.list = [] 11 | 12 | def __iter__(self): 13 | for element in self.list: 14 | yield element 15 | 16 | #======================================================================== 17 | 18 | class Settings(object): 19 | 20 | def __init__(self, settings_dict): 21 | 22 | # first, get the general settings 23 | for setting_name, setting_values in settings_dict.items(): 24 | if setting_name == 'general': 25 | general_settings = _AbstractSetting() 26 | for key, value in setting_values.items(): 27 | setattr(general_settings, key, value) 28 | setattr(self, setting_name, general_settings) 29 | break 30 | else: 31 | general_settings = None 32 | 33 | # now, parse all other settings 34 | for setting_name, setting_values in settings_dict.items(): 35 | 36 | if isinstance(setting_values, list): 37 | setting = _AbstractSetting(raw = setting_values, general = general_settings) 38 | for list_entry in setting_values: 39 | if isinstance(list_entry, dict): 40 | entry = _AbstractSetting(raw = list_entry, general = general_settings) 41 | for key, value in list_entry.items(): 42 | setattr(entry, key, value) 43 | setattr(setting, entry.name, entry) 44 | setting.list.append(entry) 45 | else: 46 | setattr(setting, list_entry['name'], list_entry) 47 | setting.list.append(list_entry) 48 | setattr(self, setting_name, setting) 49 | 50 | 51 | elif isinstance(setting_values, dict): 52 | setting = _AbstractSetting(raw = setting_values, general = general_settings) 53 | for key, value in setting_values.items(): 54 | setattr(setting, key, value) 55 | setattr(self, setting_name, setting) -------------------------------------------------------------------------------- /Utilities/watchdog_event_handler.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | #============================================================== 4 | 5 | from watchdog.observers import Observer 6 | from watchdog.events import PatternMatchingEventHandler 7 | 8 | #============================================================== 9 | 10 | class FileHandler(PatternMatchingEventHandler): 11 | 12 | def __init__(self, event, pattern): 13 | PatternMatchingEventHandler.__init__(self, patterns = [pattern]) 14 | self.process_event = event 15 | 16 | def process(self, found_file): 17 | file_name = found_file.src_path 18 | self.process_event(file_name) 19 | 20 | def on_created(self, found_file): 21 | self.process(found_file) 22 | 23 | 24 | class FileEventHandler(object): 25 | 26 | def __init__(self, event, pattern): 27 | self.event = event 28 | self.pattern = pattern 29 | self.event_handler = FileHandler(event, pattern) 30 | 31 | def stream(self, path): 32 | self.observer = Observer() 33 | self.observer.schedule(self.event_handler, path, recursive = True) 34 | self.observer.start() 35 | 36 | def stop(self): 37 | self.observer.stop() 38 | 39 | -------------------------------------------------------------------------------- /circuit_searcher.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os 4 | import time 5 | import numpy as np 6 | import threading 7 | import pickle 8 | 9 | from Submitter import CircuitSubmitter 10 | from CircuitQuantifier import CircuitCritic, CircuitValidator 11 | from DatabaseHandler import DatabaseHandler 12 | from Designers import CircuitDesigner 13 | from TaskSets import CalculationTaskSet, FilteringTaskSet 14 | from Utilities import Settings 15 | 16 | from Utilities import defaults 17 | from Utilities.decorators import thread 18 | 19 | 20 | #==================================================== 21 | 22 | def _report_times(file_name, start, end): 23 | content = open(file_name, 'a') 24 | content.write('%.5f\n' % (end - start)) 25 | content.close() 26 | 27 | #==================================================== 28 | 29 | 30 | class CircuitSearcher(object): 31 | """ 32 | API for SCILLA functionalities. 33 | """ 34 | 35 | def __init__(self, circuit_params = None, general_params = None, database_path = None, settings = None): 36 | if settings is None: 37 | self.settings = Settings(defaults.SETTINGS) 38 | 39 | if not os.path.isdir(self.settings.general.scratch_dir): 40 | os.mkdir(self.settings.general.scratch_dir) 41 | 42 | self.circuit_params = circuit_params 43 | self.general_params = general_params 44 | self.task_sets = [] 45 | 46 | self.db_handler = DatabaseHandler(self.settings.databases, database_path) 47 | self.circuit_submitter = CircuitSubmitter(self.settings.general, self.general_params) 48 | self.circuit_validator = CircuitValidator() 49 | self.circuit_critic = CircuitCritic(self.circuit_params) 50 | self.circuit_designer = CircuitDesigner(self.settings.general, self.circuit_params) 51 | 52 | 53 | def add_task(self, name = 'task0', 54 | designer = 'random_search', designer_options = {'max_iters': 10, 'max_concurrent': np.inf}, 55 | merit = 'DoubleWell', merit_options = {}, 56 | observations = [], use_library = False, 57 | computing_resource = 'local', computing_options = {}): 58 | 59 | # copy settings from kwargs 60 | settings = {} 61 | for key, value in locals().items(): 62 | if key in ['self', 'settings']: continue 63 | settings[key] = value 64 | 65 | # add task set based on defined designer 66 | if designer in ['random', 'particle_swarms', 'phoenics','grid','CMAES','LBFGS', 'scipy']: 67 | task_set = CalculationTaskSet(settings) 68 | self.circuit_designer.add_designer(name, designer, designer_options) 69 | self.circuit_submitter.add_submitter(computing_resource) 70 | elif designer in ['filter_db']: 71 | task_set = FilteringTaskSet(settings) 72 | else: 73 | raise NotImplementedError 74 | 75 | self.task_sets.append(task_set) 76 | return task_set 77 | 78 | 79 | def _run_calculation(self, task_set): 80 | # fetch task_set_id for easy access 81 | task_set_id = task_set.task_set_id 82 | 83 | # generate all primary tasks (i.e. primers for optimization iterations) 84 | all_tasks = task_set.generate_all_tasks() 85 | task_ids = [task['task_id'] for task in task_set.generated_tasks] 86 | self.db_handler.refresh() 87 | self.db_handler.add_tasks(all_tasks) 88 | 89 | # check if we build on prior results 90 | if task_set.settings['use_library']: 91 | prior_observations = self.db_handler.get_prior_circuit_evaluations() 92 | 93 | # check abortion criteria 94 | task_set_completed = self.db_handler.task_set_completed(task_set_id) 95 | designer_terminated = self.circuit_designer.designer_terminated(task_set) 96 | 97 | total_start = time.time() 98 | 99 | # enter task execution loop 100 | 101 | start_time = time.time() 102 | 103 | iteration = 0 104 | while not task_set_completed or not designer_terminated: 105 | 106 | tic = time.time() 107 | reported_times, reported_labels = [], [] 108 | 109 | # [x] fetch all tasks remaining for this task_set 110 | start = time.time() 111 | remaining_tasks = self.db_handler.fetch_remaining_tasks(task_set_id) 112 | end = time.time() 113 | reported_times.append(end - start) 114 | reported_labels.append('fetching remaining tasks') 115 | 116 | # [x] force designer to generate new parameters 117 | start = time.time() 118 | if not task_set_completed and designer_terminated: 119 | num_from_optimizer = 0 120 | for remaining_task in remaining_tasks: 121 | if remaining_task['from_optimizer']: 122 | num_from_optimizer += 1 123 | if num_from_optimizer == len(remaining_tasks): 124 | self.db_handler.set_tasks_to_redundant(remaining_tasks) 125 | 126 | 127 | # [x] query parameters from designer 128 | if task_set_completed and not designer_terminated: 129 | 130 | # send new observations to designer, i.e. give designer the chance to update 131 | observations = self.db_handler.get_circuit_evaluations(task_set_id) 132 | if task_set.settings['use_library']: 133 | observations.extend(prior_observations) 134 | self.circuit_designer.provide_observations(task_set, observations) 135 | 136 | # get new tasks from designer 137 | new_tasks = self.circuit_designer.get_requested_tasks(task_set) 138 | self.db_handler.add_tasks(new_tasks) 139 | end = time.time() 140 | reported_times.append(end - start) 141 | reported_labels.append('processing special cases') 142 | 143 | 144 | # give priority to tasks associated with currently running optimization iterations 145 | sorted_tasks = remaining_tasks 146 | print('# LOG | ... found %d remaining tasks ...' % len(sorted_tasks)) 147 | 148 | 149 | # [x] try to submit tasks to computing resources 150 | 151 | # try to submit remaining tasks 152 | start_0 = time.time() 153 | num_available_resources = self.db_handler.get_num_available_resources(task_set) 154 | if num_available_resources > 0: 155 | submittable_tasks = sorted_tasks[:num_available_resources] 156 | available_circuits = self.db_handler.get_validated_circuits() 157 | 158 | if len(available_circuits) == 0: 159 | start = time.time() 160 | # no valid circuits available --> tell designer to generate more circuit parameters 161 | print('# LOG | ... could not find validated circuits ...') 162 | if not self.circuit_designer.is_busy(task_set): 163 | # send observations to designer 164 | observations = self.db_handler.get_circuit_evaluations(task_set.task_set_id) 165 | if task_set.settings['use_library']: 166 | observations.extend(prior_observations) 167 | # tell designer to make more circuits 168 | self.circuit_designer.design_new_circuits(task_set, observations = observations) 169 | print('# LOG | ... called circuit designer ...') 170 | reported_times.append(time.time() - start) 171 | reported_labels.append('\tpinging_designer') 172 | else: 173 | # fetch valid circuits and submit to computing resources 174 | num_submissions = np.minimum(len(submittable_tasks), len(available_circuits)) 175 | tasks = submittable_tasks[:num_submissions] 176 | circuits = available_circuits[:num_submissions] 177 | 178 | # attempt to submit circuits 179 | start = time.time() 180 | self.db_handler.reserve_circuits(circuits) 181 | reported_times.append(time.time() - start) 182 | reported_labels.append('\tsubmitting_task_0') 183 | 184 | start = time.time() 185 | submitted, not_submitted = self.circuit_submitter.submit(circuits, task_set) 186 | reported_times.append(time.time() - start) 187 | reported_labels.append('\tsubmitting_task_1') 188 | 189 | # record successfully submitted circuits 190 | start = time.time() 191 | submitted_tasks = tasks 192 | submitted_circuits = circuits 193 | self.db_handler.set_tasks_to_submitted(submitted_tasks) 194 | self.db_handler.link_submissions(submitted_tasks, submitted_circuits) 195 | reported_times.append(time.time() - start) 196 | reported_labels.append('\tsubmitting_task_3') 197 | 198 | end_0 = time.time() 199 | reported_times.append(end_0 - start_0) 200 | reported_labels.append('processing remaining tasks') 201 | 202 | 203 | # check if new circuits have been designed 204 | start = time.time() 205 | new_circuits = self.circuit_designer.get_circuits() 206 | self.db_handler.add_new_circuits(new_circuits) 207 | end = time.time() 208 | reported_times.append(end - start) 209 | reported_labels.append('getting circuits from designer') 210 | 211 | # query validator for validated circuits 212 | start = time.time() 213 | validated_circuits = self.circuit_validator.get_validated_circuits() 214 | self.db_handler.store_validated_circuits(validated_circuits) 215 | end = time.time() 216 | reported_times.append(end - start) 217 | reported_labels.append('getting circuits from validator') 218 | 219 | # submit new circuits to validator 220 | start_0 = time.time() 221 | new_circuits = self.db_handler.get_new_circuits() 222 | end_0 = time.time() 223 | reported_times.append(end_0 - start_0) 224 | reported_labels.append('\tsubmitting circuits to validator 0') 225 | 226 | start_1 = time.time() 227 | print('# LOG | ... processing %d new circuits ...' % len(new_circuits)) 228 | self.circuit_validator.validate_circuits(new_circuits) 229 | end_1 = time.time() 230 | reported_times.append(end_1 - start_1) 231 | reported_labels.append('submitting circuits to validator 1') 232 | 233 | # collect criticized circuits 234 | start = time.time() 235 | criticized_circuit_results = self.circuit_critic.get_criticized_circuits() 236 | criticized_circuits = [result[0] for result in criticized_circuit_results] 237 | id_dicts = [result[1] for result in criticized_circuit_results] 238 | print('# LOG | ... found %d criticized circuits ...' % len(criticized_circuits)) 239 | self.db_handler.store_criticized_circuits(criticized_circuits, id_dicts) 240 | end = time.time() 241 | reported_times.append(end - start) 242 | reported_labels.append('collecting circuits from critic') 243 | 244 | # check if critic requires new tasks 245 | start = time.time() 246 | new_circuits = self.circuit_critic.get_requested_tasks() 247 | for circuit in new_circuits: 248 | self.circuit_submitter.submit(circuit, task_set) 249 | self.db_handler.report_circuit_submission() 250 | end = time.time() 251 | reported_times.append(end - start) 252 | reported_labels.append('submitting tasks from critic') 253 | 254 | # report progress 255 | start = time.time() 256 | progress_info = self.db_handler.get_task_set_progress_info(task_set, time.time() - start_time) 257 | print('PROGRESS:\n%s (%.3f)' % (progress_info, time.time() - total_start)) 258 | end = time.time() 259 | reported_times.append(end - start) 260 | reported_labels.append('getting progress report') 261 | 262 | iteration += 1 263 | 264 | # update abortion criteria 265 | start = time.time() 266 | task_set_completed = self.db_handler.task_set_completed(task_set_id) 267 | designer_terminated = self.circuit_designer.designer_terminated(task_set) 268 | end = time.time() 269 | reported_times.append(end - start) 270 | reported_labels.append('updating abortion criteria') 271 | 272 | # check if the designer requests any new tasks 273 | start = time.time() 274 | new_tasks = self.circuit_designer.get_requested_tasks(task_set) 275 | self.db_handler.add_tasks(new_tasks) 276 | end = time.time() 277 | reported_times.append(end - start) 278 | reported_labels.append('getting tasks from designer') 279 | 280 | # query submitter for received spectra 281 | start = time.time() 282 | merit_evaluation_circuits, newly_computed_circuits = [], [] 283 | computed_circuits = self.circuit_submitter.get_computed_circuits() 284 | for circuit in computed_circuits: 285 | if 'merit_re-eval' in circuit: 286 | merit_evaluation_circuits.append(circuit) 287 | self.db_handler.report_circuit_computation() 288 | else: 289 | newly_computed_circuits.append(circuit) 290 | 291 | reported_times.append(time.time() - start) 292 | reported_labels.append('\tprocessing merit evaluations 0') 293 | 294 | unsuccessful = True 295 | id_dicts = self.db_handler.set_tasks_to_computed(newly_computed_circuits) 296 | while len(id_dicts) < len(newly_computed_circuits): 297 | id_dicts = self.db_handler.set_tasks_to_computed(newly_computed_circuits) 298 | 299 | self.circuit_critic.criticize_circuits(newly_computed_circuits, task_set, id_dicts) 300 | self.circuit_critic.report_reevaluations(merit_evaluation_circuits) 301 | 302 | self.db_handler.print_pending_updates(time.time() - start_time) 303 | self.db_handler.synchronize() 304 | 305 | toc = time.time() 306 | new_line = '@@@ Timing: %.5f @@@ | NUM_THREADS: %d\n' % ((toc - tic), threading.active_count()) 307 | print(new_line) 308 | 309 | content = open('log_threads', 'a') 310 | content.write('%.3f\t%d\n' % (time.time() - start_time, threading.active_count())) 311 | content.close() 312 | 313 | time.sleep(.2) # <== THIS SLEEP IS ESSENTIAL; DO NOT REMOVE 314 | 315 | self.db_handler.set_circuits_to_unused() 316 | 317 | 318 | def _run_filtering(self, task_set): 319 | while self.db_handler.is_updating(): 320 | time.sleep(0.05) 321 | self.db_handler.filter_for_best_performing(task_set.settings['designer_options']) 322 | # wait for completion of update_requests 323 | while self.db_handler.is_updating(): 324 | time.sleep(0.05) 325 | 326 | 327 | def execute(self): 328 | 329 | for task_set in self.task_sets: 330 | 331 | print('# LOG | ... starting task "%s" ...' % task_set.task_set_name) 332 | 333 | if task_set.task_type == 'calculation': 334 | self._run_calculation(task_set) 335 | 336 | elif task_set.task_type == 'filtering': 337 | self._run_filtering(task_set) 338 | 339 | elif task_set.task_type == 'db_query': 340 | self._run_db_query(task_set) 341 | 342 | 343 | print('# LOG | ... COMPLETED task "%s" ...' % task_set.task_set_name) 344 | time.sleep(1) 345 | 346 | 347 | def query(self, kind = None, **kwargs): 348 | 349 | if kind == 'get_circuits_from_task': 350 | return self.db_handler.get_circuits_from_task(kwargs['task']) 351 | elif kind == 'get_trajectories': 352 | return self.db_handler.get_trajectories(kwargs['task']) 353 | elif kind == 'list_computing_tasks': 354 | task_set_dicts = self.db_handler.list_computing_tasks() 355 | return task_set_dicts 356 | else: 357 | raise NotImplementedError 358 | 359 | 360 | 361 | 362 | 363 | 364 | 365 | 366 | 367 | 368 | 369 | 370 | 371 | 372 | 373 | 374 | 375 | 376 | 377 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: Qcirc 2 | channels: 3 | - conda-forge 4 | - defaults 5 | dependencies: 6 | - argh=0.26.2=py36_1 7 | - blas=1.0=mkl 8 | - ca-certificates=2019.1.23=0 9 | - certifi=2018.11.29=py36_0 10 | - cycler=0.10.0=py_1 11 | - freetype=2.9.1=hb4e5f40_0 12 | - intel-openmp=2018.0.3=0 13 | - kiwisolver=1.0.1=py36h2d50403_2 14 | - libcxx=4.0.1=h579ed51_0 15 | - libcxxabi=4.0.1=hebd6815_0 16 | - libedit=3.1.20170329=hb402a30_2 17 | - libffi=3.2.1=h475c297_4 18 | - libgfortran=3.0.1=h93005f0_2 19 | - libpng=1.6.34=he12f830_0 20 | - matplotlib=2.2.3=py36h0e0179f_0 21 | - mkl=2018.0.3=1 22 | - mkl_fft=1.0.5=py36_0 23 | - mkl_random=1.0.1=py36_0 24 | - ncurses=6.1=h0a44026_0 25 | - numpy=1.15.0=py36h648b28d_0 26 | - numpy-base=1.15.0=py36h8a80b8c_0 27 | - openssl=1.1.1a=h1de35cc_0 28 | - pandas=0.23.4=py36h6440ff4_0 29 | - pathtools=0.1.2=py_1 30 | - patsy=0.5.0=py36_0 31 | - pip=18.0=py36_1 32 | - psutil=5.5.0=py36h1de35cc_0 33 | - pyparsing=2.2.0=py_1 34 | - python=3.6.8=haf84260_0 35 | - python-dateutil=2.7.3=py_0 36 | - pytz=2018.5=py_0 37 | - readline=7.0=hc1231fa_4 38 | - scipy=1.1.0=py36hf1f7d93_0 39 | - seaborn=0.9.0=py36_0 40 | - setuptools=40.0.0=py36_1 41 | - six=1.11.0=py36_1 42 | - sqlalchemy=1.2.10=py36h1de35cc_0 43 | - sqlite=3.26.0=ha441bb4_0 44 | - statsmodels=0.9.0=py36h1d22016_0 45 | - tk=8.6.8=ha441bb4_0 46 | - tornado=5.1=py36h470a237_1 47 | - wheel=0.31.1=py36_1 48 | - xz=5.2.4=h1de35cc_4 49 | - zlib=1.2.11=hf3cbc9b_2 50 | - pip: 51 | - argparse==1.4.0 52 | - aspy-yaml==1.1.1 53 | - atomicwrites==1.2.1 54 | - attrs==18.1.0 55 | - cached-property==1.4.3 56 | - cfgv==1.1.0 57 | - future==0.16.0 58 | - identify==1.1.4 59 | - macfsevents==0.8.1 60 | - mock==2.0.0 61 | - more-itertools==4.3.0 62 | - nodeenv==1.3.2 63 | - pbr==4.2.0 64 | - pluggy==0.7.1 65 | - pre-commit==1.10.5 66 | - py==1.6.0 67 | - pyswarms==0.3.1 68 | - pytest==3.6.4 69 | - pyyaml==3.13 70 | - toml==0.9.4 71 | - virtualenv==16.0.0 72 | - watchdog==0.9.0 73 | prefix: /anaconda3/envs/Qcirc 74 | 75 | -------------------------------------------------------------------------------- /main_benchmark.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Multi-step workflow for 2-node flux qubit benchmark 4 | 5 | import numpy as np 6 | import time 7 | import pickle 8 | 9 | from circuit_searcher import CircuitSearcher 10 | 11 | 12 | if __name__ == '__main__': 13 | """ 14 | Set parameters and run the inverse design algorithm. 15 | 16 | general_params: 17 | solver: string | specifies circuit solver - 'JJcircuitSim' or '2-node' 18 | phiExt: array | external fluxes for which to solve circuit 19 | target_spectrum: array | target flux spectrum of circuit (used by specific loss functions only) 20 | 21 | Note: Task names are assumed to be unique. 22 | """ 23 | 24 | # Python simulation of 2-node circuits 25 | c_specs = {'dimension': 3, 'low': 0., 'high': 100, 'keep_prob': 1.} 26 | j_specs = {'dimension': 3, 'low': 0., 'high': 200, 'keep_num': 3} 27 | circuit_params = {'c_specs': c_specs, 'j_specs': j_specs, 'l_specs': None, 'phiOffs_specs': None} 28 | phiExt = np.linspace(0,1,41,endpoint=True) 29 | general_params = {'solver': '2-node', 'phiExt': phiExt} 30 | 31 | # Loss function settings 32 | with open('target_fluxqubit.p', 'rb') as content: target_info = pickle.load(content) 33 | ts_options = {'target_spectrum': target_info['spectrum'], 'include_symmetry': True} 34 | 35 | # Initialize circuit searcher 36 | circuit_searcher = CircuitSearcher(circuit_params, general_params, database_path = 'Experiments') 37 | 38 | # Monte Carlo (random) optimization 39 | mc_options = {'max_iters': 6, 'max_concurrent': 2, 'batch_size': 10} 40 | computing_task_0 = circuit_searcher.add_task( 41 | name ='random_search', 42 | designer='random', designer_options=mc_options, 43 | merit='TargetSpectrum', merit_options=ts_options) 44 | 45 | # Filtering for best circuits 46 | filtering_task_0 = circuit_searcher.add_task(name = 'filtering', designer = 'filter_db', designer_options = {'num_circuits': 2}) 47 | 48 | # L-BFGS-B optimization 49 | bfgs_options = {'max_iters': 2, 'max_concurrent': 2} 50 | ts_options = {'target_spectrum': target_info['spectrum'], 'include_symmetry': True} 51 | computing_task_2 = circuit_searcher.add_task( 52 | name='lbfgs', 53 | designer='scipy', designer_options=bfgs_options, 54 | merit='TargetSpectrum', merit_options=ts_options, use_library=True) 55 | 56 | tic_glob = time.time() 57 | circuit_searcher.execute() 58 | print('#### TOTAL TIME: {} s ####'.format(time.time()-tic_glob)) 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | -------------------------------------------------------------------------------- /main_fourcoupler.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Multi-step workflow for 4-local coupler search 4 | 5 | import numpy as np 6 | import time 7 | import pickle 8 | 9 | from circuit_searcher import CircuitSearcher 10 | 11 | 12 | if __name__ == '__main__': 13 | """ 14 | Set parameters and run the inverse design algorithm. 15 | Note 1: Task names are assumed to be unique. 16 | Note 2: The JJcircuitSim circuit simulation module is *not* included on the SCILLA GitHub repo. 17 | """ 18 | 19 | # Simulation of 3-node circuits (using JJcircuitSim V3.6e) 20 | c_specs = {'dimension': 6, 'low': 1., 'high': 100., 'keep_prob': 0.5} 21 | j_specs = {'dimension': 6, 'low': 99., 'high': 1982., 'keep_num': 3} 22 | l_specs = {'dimension': 6, 'low': 75., 'high': 300., 'keep_prob': 0.5} 23 | phiOffs_specs = {'dimension': 4, 'values': [0.0, 0.5]} 24 | circuit_params = {'c_specs': c_specs, 'j_specs': j_specs, 'l_specs': l_specs, 'phiOffs_specs': phiOffs_specs} 25 | general_params = {'solver': 'JJcircuitSimV3', 'phiExt': None, 'target_spectrum': None} 26 | 27 | # Loss function settings 28 | dw_options = {'max_peak': 1.5, 'max_split': 10, 'norm_p': 4, 'flux_sens': True, 'max_merit': 100} 29 | 30 | # Initialize circuit searcher 31 | circuit_searcher = CircuitSearcher(circuit_params, general_params, database_path = 'Experiments') 32 | 33 | # Monte Carlo optimization 34 | mc_options = {'max_iters': 3, 'max_concurrent': 2, 'batch_size': 10} 35 | computing_task_0 = circuit_searcher.add_task( 36 | name ='random_search', 37 | designer='random', designer_options=mc_options, 38 | merit='DoubleWell', merit_options=dw_options) 39 | 40 | # Filtering for best circuits 41 | filtering_task_0 = circuit_searcher.add_task(name = 'filtering', designer = 'filter_db', designer_options = {'num_circuits': 2}) 42 | 43 | # Swarm optimization 44 | swarm_options = {'max_iters': 2, 'max_concurrent': 2, 'n_particles': 2} 45 | computing_task_2 = circuit_searcher.add_task( 46 | name='swarm_search', 47 | designer='particle_swarms', designer_options=swarm_options, 48 | merit='DoubleWell', merit_options=dw_options, use_library=True) 49 | 50 | tic_glob = time.time() 51 | circuit_searcher.execute() 52 | print('#### TOTAL TIME: {} s ####'.format(time.time()-tic_glob)) 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | -------------------------------------------------------------------------------- /run_coupler.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #SBATCH -t 2-00:00 # time (day-hours:minutes) 3 | #SBATCH -c 64 # number of cores 4 | #SBATCH -N 1 # ensures all cores are on one machine 5 | #SBATCH --mem=256000 # memory pool for all cores (in MB) 6 | #SBATCH -p unrestricted # partition 7 | #SBATCH -o run_coupler.out # out file 8 | #SBATCH -e run_coupler.err # error log file 9 | #SBATCH --job-name=run_coupler # job name 10 | #SBATCH --mail-type=END,FAIL # email if job ends or fails 11 | #SBATCH --mail-user=tim_menke@g.harvard.edu # your email if you want job updates 12 | 13 | module load centos6/0.0.1-fasrc01 14 | module load Anaconda3/5.0.1-fasrc01 15 | module load mathematica/11.1.1-fasrc01 16 | # module load mathematica/11.3.0-fasrc01 17 | 18 | source activate Qcirc 19 | 20 | python circuit_searcher.py 21 | 22 | echo Finished! 23 | -------------------------------------------------------------------------------- /scratch_file: -------------------------------------------------------------------------------- 1 | Cpus_allowed_list: 0-16 2 | -------------------------------------------------------------------------------- /target_fluxqubit.p: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aspuru-guzik-group/SCILLA/f0291082990e0f99f8f9abf41ec405427cfe3752/target_fluxqubit.p --------------------------------------------------------------------------------