├── README.md ├── bithtm ├── __init__.py ├── regularizations.py ├── utils.py ├── networks.py ├── reference_implementations.py └── projections.py ├── example.py └── legacy └── bithtm.py /README.md: -------------------------------------------------------------------------------- 1 | # bitHTM 2 | A barely barebone NumPy implementation of Hierarchical Temporal Memory. 3 | -------------------------------------------------------------------------------- /bithtm/__init__.py: -------------------------------------------------------------------------------- 1 | from . import networks 2 | 3 | 4 | SpatialPooler = networks.SpatialPooler 5 | TemporalMemory = networks.TemporalMemory 6 | HierarchicalTemporalMemory = networks.HierarchicalTemporalMemory -------------------------------------------------------------------------------- /bithtm/regularizations.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class ExponentialBoosting: 5 | def __init__( 6 | self, output_dim, active_outputs, 7 | intensity=0.3, momentum=0.99 8 | ): 9 | self.density = active_outputs / output_dim 10 | self.intensity = intensity 11 | self.momentum = momentum 12 | 13 | self.duty_cycle = np.zeros(output_dim, dtype=np.float32) 14 | 15 | def process(self, input_activation): 16 | factor = np.exp(-(self.intensity / self.density) * self.duty_cycle) 17 | return factor * input_activation 18 | 19 | def update(self, active_input): 20 | self.duty_cycle *= self.momentum 21 | self.duty_cycle[active_input] += 1.0 - self.momentum 22 | 23 | 24 | class GlobalInhibition: 25 | def __init__(self, active_outputs): 26 | self.active_outputs = active_outputs 27 | 28 | def process(self, input_activation): 29 | return np.argpartition(input_activation, -self.active_outputs)[-self.active_outputs:] -------------------------------------------------------------------------------- /example.py: -------------------------------------------------------------------------------- 1 | from bithtm import HierarchicalTemporalMemory 2 | from bithtm.reference_implementations import TemporalMemory as ReferenceTemporalMemory 3 | 4 | import numpy as np 5 | 6 | 7 | class ReferenceHierarchicalTemporalMemory(HierarchicalTemporalMemory): 8 | def __init__(self, input_dim, column_dim, cell_dim, active_columns=None): 9 | super().__init__( 10 | input_dim, column_dim, cell_dim, active_columns=active_columns, 11 | temporal_memory=ReferenceTemporalMemory(column_dim, cell_dim) 12 | ) 13 | 14 | 15 | if __name__ == '__main__': 16 | import argparse 17 | import time 18 | 19 | 20 | parser = argparse.ArgumentParser() 21 | 22 | parser.add_argument('--epochs', type=int, default=100) 23 | parser.add_argument('--input_patterns', type=int, default=100) 24 | parser.add_argument('--input_dim', type=int, default=1000) 25 | parser.add_argument('--input_density', type=float, default=0.2) 26 | parser.add_argument('--input_noise_probability', type=float, default=0.05) 27 | 28 | parser.add_argument('--column_dim', type=int, default=2048) 29 | parser.add_argument('--cell_dim', type=int, default=32) 30 | parser.add_argument('--use_reference_implementation', action='store_true') 31 | 32 | args = parser.parse_args() 33 | 34 | inputs = np.random.rand(args.input_patterns, args.input_dim) < args.input_density 35 | 36 | if args.use_reference_implementation: 37 | htm = ReferenceHierarchicalTemporalMemory(args.input_dim, args.column_dim, args.cell_dim) 38 | else: 39 | htm = HierarchicalTemporalMemory(args.input_dim, args.column_dim, args.cell_dim) 40 | 41 | epoch_string_length = int(np.ceil(np.log10(args.epochs - 1))) 42 | pattern_string_length = int(np.ceil(np.log10(args.input_patterns - 1))) 43 | column_string_length = int(np.ceil(np.log10(args.column_dim - 1))) 44 | active_column_string_length = int(np.ceil(np.log10(htm.spatial_pooler.active_columns - 1))) 45 | 46 | start_time = time.time() 47 | 48 | for epoch in range(args.epochs): 49 | for input_index, curr_input in enumerate(inputs): 50 | prev_column_prediction = htm.temporal_memory.last_state.cell_prediction.max(axis=1) 51 | 52 | noisy_input = curr_input ^ (np.random.rand(args.input_dim) < args.input_noise_probability) 53 | sp_state, tm_state = htm.process(noisy_input) 54 | 55 | burstings = tm_state.active_column_bursting.sum() 56 | corrects = prev_column_prediction[sp_state.active_column].sum() 57 | incorrects = prev_column_prediction.sum() - corrects 58 | 59 | print( 60 | f'epoch {epoch:{epoch_string_length}d}, ' 61 | f'pattern {input_index:{pattern_string_length}d}: ' 62 | f'bursting columns: {burstings:{active_column_string_length}d}, ' 63 | f'correct columns: {corrects:{active_column_string_length}d}, ' 64 | f'incorrect columns: {incorrects:{column_string_length}d}' 65 | ) 66 | 67 | print(f'{time.time() - start_time} seconds.') -------------------------------------------------------------------------------- /bithtm/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | try: 5 | np.bincount([0], minLength=1) 6 | bincount = np.bincount 7 | except: 8 | def bincount(x, weights=None, minLength=0): 9 | out = np.bincount(x, weights=weights) 10 | out = np.concatenate([out, np.zeros(max(minLength - len(out), 0), dtype=out.dtype)]) 11 | return out 12 | 13 | def arange_concatenated(lengths, border_offsets=None, lengths_cumsum=None): 14 | if lengths_cumsum is None: 15 | lengths_cumsum = lengths.cumsum() 16 | total_length = lengths_cumsum[-1] 17 | nonempty_row, = np.nonzero(lengths) 18 | row_borders = lengths_cumsum - lengths 19 | row_index = np.zeros(total_length, dtype=np.int32) 20 | row_index[row_borders[nonempty_row]] = np.arange(len(lengths), dtype=np.int32)[nonempty_row] 21 | row_index = np.maximum.accumulate(row_index) 22 | if border_offsets is not None: 23 | row_borders -= border_offsets 24 | col_index = np.arange(total_length, dtype=np.int32) - row_borders[row_index] 25 | return row_index, col_index 26 | 27 | def nonzero_bounded_2d(value, bounds, lengths=None, return_out_of_bounds=False): 28 | assert len(value.shape) == 2 29 | if lengths is None: 30 | lengths = (value != 0).sum(axis=1) 31 | bounded_lengths = np.minimum(lengths, bounds) 32 | lengths_cumsum = lengths.cumsum() 33 | bounded_lengths_cumsum = bounded_lengths.cumsum() 34 | border_offsets = lengths_cumsum - lengths 35 | row_index, col_index = arange_concatenated(bounded_lengths, border_offsets=border_offsets, lengths_cumsum=bounded_lengths_cumsum) 36 | _, col_nonzero = np.nonzero(value) 37 | nonzero_bounded = (row_index, col_nonzero[col_index]) 38 | if return_out_of_bounds: 39 | oob_row_index, oob_col_index = arange_concatenated(lengths - bounded_lengths, border_offsets=bounded_lengths + border_offsets, lengths_cumsum=lengths_cumsum - bounded_lengths_cumsum) 40 | nonzero_oob = (oob_row_index, col_nonzero[oob_col_index]) 41 | return nonzero_bounded, nonzero_oob 42 | return nonzero_bounded 43 | 44 | def replace_free(free, dests, srcs, dest_index=None, free_lengths=None, src_valid=None, src_lengths=None, return_indices=False, return_residue_info=False): 45 | assert len(dests[0].shape) == len(free.shape) == len(srcs[0].shape) == 2 46 | if free_lengths is None: 47 | free_lengths = free.sum(axis=1) 48 | if src_lengths is None: 49 | src_lengths = src_valid.sum(axis=1) if src_valid is not None else srcs[0].shape[1] 50 | mutually_bounded_lengths = np.minimum(free_lengths, src_lengths) 51 | free_index = nonzero_bounded_2d(free, mutually_bounded_lengths, lengths=free_lengths) 52 | if return_residue_info: 53 | residue_lengths = src_lengths - mutually_bounded_lengths 54 | residue_index = arange_concatenated(residue_lengths) 55 | if dest_index is not None: 56 | assert dest_index.shape[0] == free.shape[0] 57 | free_index = (dest_index[free_index[0]], free_index[1]) 58 | if src_valid is None: 59 | src_index = arange_concatenated(mutually_bounded_lengths) 60 | if return_residue_info: 61 | src_residue_index = (residue_index[0], residue_index[1] + mutually_bounded_lengths[residue_index[0]]) 62 | else: 63 | src_index = nonzero_bounded_2d(src_valid, mutually_bounded_lengths, lengths=src_lengths, return_out_of_bounds=return_residue_info) 64 | if return_residue_info: 65 | src_index, src_residue_index = src_index 66 | for dest, src in zip(dests, srcs): 67 | if np.ndim(src) == 0: 68 | dest[free_index] = src 69 | continue 70 | dest[free_index] = src[src_index] 71 | returned_values = [mutually_bounded_lengths] 72 | if return_indices: 73 | returned_values += [free_index, src_index] 74 | if return_residue_info: 75 | returned_values += [residue_lengths, residue_index, src_residue_index] 76 | return tuple(returned_values) 77 | 78 | 79 | class DynamicArray2D: 80 | def __init__(self, dtype, size=(0, 0), capacity=None, growth_exponential=(True, True), on_grow=None): 81 | if capacity is None: 82 | capacity = (0, 0) 83 | assert len(size) == len(capacity) == len(growth_exponential) == 2 84 | capacity = tuple(np.maximum(capacity, size)) 85 | 86 | self.dtype = dtype 87 | self.capacity = tuple(capacity) 88 | self.growth_exponential = tuple(growth_exponential) 89 | self.on_grow = on_grow 90 | 91 | self.size = tuple(size) 92 | self.values = self.initialize_values(capacity=self.capacity) 93 | 94 | def initialize_values(self, capacity=None): 95 | if capacity is None: 96 | capacity = self.capacity 97 | return np.empty(capacity, dtype=self.dtype) 98 | 99 | def evaluate_capacity(self, length, axis): 100 | if not self.growth_exponential[axis]: 101 | return length 102 | return 2 ** int(np.ceil(np.log2(length))) 103 | 104 | def __len__(self): 105 | return self.size[0] 106 | 107 | def __getitem__(self, index): 108 | return self.values[:self.size[0], :self.size[1]][index] 109 | 110 | def __setitem__(self, index, new_values): 111 | self.values[:self.size[0], :self.size[1]][index] = new_values 112 | 113 | def add(self, added_values, axis): 114 | assert len(added_values.shape) == 2 and added_values.shape[1 - axis] == self.size[1 - axis] 115 | new_size = list(self.size) 116 | new_size[axis] += added_values.shape[axis] 117 | if new_size[axis] > self.capacity[axis]: 118 | new_capacity = list(self.capacity) 119 | new_capacity[axis] = self.evaluate_capacity(new_size[axis], axis) 120 | new_values = self.initialize_values(capacity=new_capacity) 121 | new_values[:self.capacity[0], :self.capacity[1]] = self.values 122 | if self.on_grow is not None: 123 | self.on_grow(new_values, tuple(new_size), tuple(new_capacity), axis) 124 | self.capacity = tuple(new_capacity) 125 | self.values = new_values 126 | index = [slice(None, self.size[1 - axis])] 127 | index.insert(axis, slice(self.size[axis], new_size[axis])) 128 | self.values[tuple(index)] = added_values 129 | self.size = tuple(new_size) 130 | 131 | def add_rows(self, added_values): 132 | return self.add(added_values, 0) 133 | 134 | def add_cols(self, added_values): 135 | return self.add(added_values, 1) -------------------------------------------------------------------------------- /bithtm/networks.py: -------------------------------------------------------------------------------- 1 | from .projections import DenseProjection, PredictiveProjection 2 | from .regularizations import ExponentialBoosting, GlobalInhibition 3 | 4 | import numpy as np 5 | 6 | 7 | class SpatialPooler: 8 | class State: 9 | def __init__(self, active_column, overlaps=None, boosted_overlaps=None): 10 | self.active_column = active_column 11 | self.overlaps = overlaps 12 | self.boosted_overlaps = boosted_overlaps 13 | 14 | def __init__( 15 | self, input_dim, column_dim, active_columns, 16 | proximal_projection=None, boosting=None, inhibition=None 17 | ): 18 | self.input_dim = input_dim 19 | self.column_dim = column_dim 20 | self.active_columns = active_columns 21 | 22 | self.proximal_projection = proximal_projection or DenseProjection(input_dim, column_dim) 23 | self.boosting = boosting or ExponentialBoosting(column_dim, active_columns) 24 | self.inhibition = inhibition or GlobalInhibition(active_columns) 25 | 26 | def process(self, input, learning=True): 27 | overlaps = self.proximal_projection.process(input) 28 | boosted_overlaps = self.boosting.process(overlaps) 29 | active_column = self.inhibition.process(boosted_overlaps) 30 | 31 | if learning: 32 | self.proximal_projection.update(input, active_column) 33 | self.boosting.update(active_column) 34 | 35 | return self.State(active_column, overlaps=overlaps, boosted_overlaps=boosted_overlaps) 36 | 37 | 38 | class TemporalMemory: 39 | class State: 40 | def __init__(self, active_cell, winner_cell=None, cell_activation=None, cell_prediction=None, active_column_bursting=None, distal_state=None): 41 | self.active_cell = active_cell 42 | self.winner_cell = winner_cell 43 | self.cell_activation = cell_activation 44 | self.cell_prediction = cell_prediction 45 | self.active_column_bursting = active_column_bursting 46 | self.distal_state = distal_state 47 | 48 | def __init__( 49 | self, column_dim, cell_dim, 50 | distal_projection=None 51 | ): 52 | self.column_dim = column_dim 53 | self.cell_dim = cell_dim 54 | 55 | self.distal_projection = distal_projection or PredictiveProjection(self.column_dim * self.cell_dim) 56 | 57 | self.last_state = self.get_empty_state() 58 | 59 | def get_empty_state(self): 60 | return self.State( 61 | (np.empty(0, dtype=np.int32), np.empty(0, dtype=np.int32)), 62 | cell_activation=np.zeros((self.column_dim, self.cell_dim), dtype=np.bool_), 63 | cell_prediction=np.zeros((self.column_dim, self.cell_dim), dtype=np.bool_), 64 | active_column_bursting=np.empty(0, dtype=np.bool_) 65 | ) 66 | 67 | def flatten_cell(self, cell): 68 | if cell is None: 69 | return None 70 | assert len(cell) == 2 and len(cell[0].shape) == 1 71 | return cell[0] * self.cell_dim + cell[1] 72 | 73 | def evaluate_cell_best_matching(self, predictive_projection, prev_projection_state, relevant_column, epsilon=1e-8): 74 | if prev_projection_state is None: 75 | return np.zeros((len(relevant_column), 1), dtype=np.bool_), np.zeros((len(relevant_column), self.cell_dim), dtype=np.bool_) 76 | max_jittered_potential, _ = predictive_projection.get_jittered_potential_info(prev_projection_state) 77 | cell_max_jittered_potential = max_jittered_potential.reshape(self.column_dim, self.cell_dim) 78 | cell_max_jittered_potential = cell_max_jittered_potential[relevant_column] 79 | column_max_jittered_potential = cell_max_jittered_potential.max(axis=1, keepdims=True) 80 | column_matching = column_max_jittered_potential >= predictive_projection.segment_matching_threshold 81 | cell_best_matching = np.abs(cell_max_jittered_potential - column_max_jittered_potential) < epsilon 82 | return column_matching, cell_best_matching 83 | 84 | def evaluate_cell_least_used(self, predictive_projection, relevant_column, epsilon=1e-8): 85 | cell_segments = predictive_projection.bundle_segments.reshape(self.column_dim, self.cell_dim) 86 | cell_segments_jittered = cell_segments[relevant_column].astype(np.float32) 87 | cell_segments_jittered += np.random.rand(*cell_segments_jittered.shape) 88 | cell_least_used = np.abs(cell_segments_jittered - cell_segments_jittered.min(axis=1, keepdims=True)) < epsilon 89 | return cell_least_used 90 | 91 | def process(self, sp_state, prev_state=None, learning=True, return_winner_cell=True, epsilon=1e-8): 92 | if prev_state is None: 93 | prev_state = self.last_state 94 | 95 | active_column = sp_state.active_column 96 | active_column_cell_prediction = prev_state.cell_prediction[active_column] 97 | active_column_bursting = ~active_column_cell_prediction.max(axis=1, keepdims=True) 98 | 99 | if learning or return_winner_cell: 100 | column_matching, cell_best_matching = self.evaluate_cell_best_matching(self.distal_projection, prev_state.distal_state, active_column, epsilon=epsilon) 101 | least_used_cell = self.evaluate_cell_least_used(self.distal_projection, active_column, epsilon=epsilon) 102 | active_column_cell_winner = active_column_cell_prediction | (active_column_bursting & np.where(column_matching, cell_best_matching, least_used_cell)) 103 | winner_cell = np.where(active_column_cell_winner) 104 | winner_cell = (active_column[winner_cell[0]], winner_cell[1]) 105 | 106 | if learning: 107 | column_punishment = np.ones(self.column_dim, dtype=np.bool_) 108 | column_punishment[active_column] = False 109 | self.distal_projection.update( 110 | prev_state.distal_state, 111 | prev_state.cell_activation.flatten(), self.flatten_cell(winner_cell), np.repeat(column_punishment, self.cell_dim), 112 | winner_input=self.flatten_cell(prev_state.winner_cell), epsilon=epsilon 113 | ) 114 | 115 | active_column_cell_activation = active_column_cell_prediction | active_column_bursting 116 | active_cell = np.where(active_column_cell_activation) 117 | active_cell = (active_column[active_cell[0]], active_cell[1]) 118 | cell_activation = np.zeros((self.column_dim, self.cell_dim), dtype=np.bool_) 119 | cell_activation[active_column] = active_column_cell_activation 120 | 121 | distal_state = self.distal_projection.process(self.flatten_cell(active_cell), return_jittered_potential_info=return_winner_cell) 122 | cell_prediction = distal_state.prediction.reshape(self.column_dim, self.cell_dim) > epsilon 123 | 124 | curr_state = self.State(active_cell, cell_activation=cell_activation, cell_prediction=cell_prediction, active_column_bursting=active_column_bursting, distal_state=distal_state) 125 | if learning or return_winner_cell: 126 | curr_state.winner_cell = winner_cell 127 | self.last_state = curr_state 128 | return curr_state 129 | 130 | 131 | class HierarchicalTemporalMemory: 132 | def __init__( 133 | self, input_dim, column_dim, cell_dim, active_columns=None, 134 | spatial_pooler=None, temporal_memory=None 135 | ): 136 | if active_columns is None: 137 | active_columns = round(column_dim * 0.02) 138 | 139 | self.column_dim = column_dim 140 | self.cell_dim = cell_dim 141 | self.active_columns = active_columns 142 | 143 | self.spatial_pooler = spatial_pooler or SpatialPooler(input_dim, column_dim, active_columns) 144 | self.temporal_memory = temporal_memory or TemporalMemory(column_dim, cell_dim) 145 | 146 | def process(self, input, learning=True): 147 | sp_state = self.spatial_pooler.process(input, learning=learning) 148 | tm_state = self.temporal_memory.process(sp_state, learning=learning) 149 | return sp_state, tm_state -------------------------------------------------------------------------------- /bithtm/reference_implementations.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class TemporalMemory: 5 | class State: 6 | def __init__(self, column_dim, cell_dim): 7 | self.active_cells = set() 8 | self.winner_cells = set() 9 | self.active_segments = set() 10 | self.matching_segments = set() 11 | self.segment_num_active_potential_synapses = {} 12 | 13 | self.active_cell = (np.empty(0, dtype=np.int32), np.empty(0, dtype=np.int32)) 14 | self.winner_cell = (np.empty(0, dtype=np.int32), np.empty(0, dtype=np.int32)) 15 | self.cell_activation = np.zeros((column_dim, cell_dim), dtype=np.bool_) 16 | self.cell_prediction = np.zeros((column_dim, cell_dim), dtype=np.bool_) 17 | self.active_column_bursting = np.empty(0, dtype=np.bool_) 18 | self.distal_state = None 19 | 20 | def __init__(self, column_dim, cell_dim): 21 | self.column_dim = column_dim 22 | self.cell_dim = cell_dim 23 | 24 | self.permanence_initial = 0.21 25 | self.permanence_threshold = 0.5 26 | self.permanence_increment = 0.1 27 | self.permanence_decrement = 0.1 28 | self.permanence_punishment = 0.01 29 | 30 | self.segment_activation_threshold = 15 31 | self.segment_matching_threshold = 15 32 | self.segment_sampling_synapses = 32 33 | 34 | self.columns = np.arange(self.column_dim) 35 | self.column_cells = np.arange(self.column_dim * self.cell_dim).reshape(self.column_dim, self.cell_dim) 36 | self.cell_segments = [set() for _ in range(self.column_dim * self.cell_dim)] 37 | self.segment_cell = [] 38 | self.segment_synapses = [] 39 | self.synapse_presynaptic_cell = {} 40 | self.synapse_permanence = {} 41 | self.next_synapse = 0 42 | 43 | self.last_state = self.get_empty_state() 44 | 45 | def get_empty_state(self): 46 | return self.State(self.column_dim, self.cell_dim) 47 | 48 | def copy_custom(self, custom_tm): 49 | assert self.column_dim == custom_tm.column_dim and self.cell_dim == custom_tm.cell_dim 50 | 51 | self.segment_cell = custom_tm.distal_projection.segment_bundle[:].squeeze(1).tolist() 52 | self.cell_segments = [set() for _ in range(self.column_dim * self.cell_dim)] 53 | for segment, cell in enumerate(self.segment_cell): 54 | self.cell_segments[cell].add(segment) 55 | 56 | custom_segment_projection = custom_tm.distal_projection.segment_projection 57 | self.segment_synapses = [] 58 | self.synapse_presynaptic_cell = {} 59 | self.synapse_permanence = {} 60 | self.next_synapse = 0 61 | for synapses, permanences in zip(custom_segment_projection.output_edge[:], custom_segment_projection.output_permanence[:]): 62 | self.segment_synapses.append([]) 63 | for synapse, permanence in zip(synapses, permanences): 64 | if synapse == custom_segment_projection.invalid_output_edge: 65 | continue 66 | presynaptic_cell = custom_segment_projection.get_output_edge_target(synapse) 67 | self.segment_synapses[-1].append(self.next_synapse) 68 | self.synapse_presynaptic_cell[self.next_synapse] = presynaptic_cell 69 | self.synapse_permanence[self.next_synapse] = permanence 70 | self.next_synapse += 1 71 | 72 | empty_state = self.get_empty_state() 73 | self.last_state.winner_cells = empty_state.winner_cells 74 | self.last_state.active_segments = empty_state.active_segments 75 | self.last_state.matching_segments = empty_state.matching_segments 76 | self.last_state.segment_num_active_potential_synapses = empty_state.segment_num_active_potential_synapses 77 | 78 | self.last_state.active_cells = custom_tm.flatten_cell(custom_tm.last_state.active_cell).tolist() 79 | if custom_tm.last_state.winner_cell is not None: 80 | self.last_state.winner_cells = custom_tm.flatten_cell(custom_tm.last_state.winner_cell).tolist() 81 | 82 | custom_distal_state = custom_tm.last_state.distal_state 83 | if custom_distal_state is not None: 84 | matching_segment = custom_distal_state.matching_segment 85 | self.last_state.active_segments = set(matching_segment[custom_distal_state.matching_segment_active].tolist()) 86 | self.last_state.matching_segments = set(matching_segment.tolist()) 87 | for segment, num_active_potential_synapses in zip(matching_segment, custom_distal_state.segment_potential[matching_segment]): 88 | self.last_state.segment_num_active_potential_synapses[segment] = num_active_potential_synapses 89 | 90 | def activate_predicted_column(self, column, prev_state, curr_state, learning): 91 | for segment in self.segments_for_column(column, prev_state.active_segments): 92 | curr_state.active_cells.add(self.segment_cell[segment]) 93 | curr_state.winner_cells.add(self.segment_cell[segment]) 94 | 95 | if learning: 96 | for synapse in self.segment_synapses[segment]: 97 | if self.synapse_presynaptic_cell[synapse] in prev_state.active_cells: 98 | self.synapse_permanence[synapse] += self.permanence_increment 99 | else: 100 | self.synapse_permanence[synapse] -= self.permanence_decrement 101 | 102 | new_synapse_count = self.segment_sampling_synapses - self.num_active_potential_synapses(prev_state, segment) 103 | self.grow_synapses(segment, new_synapse_count, prev_state) 104 | 105 | def burst_column(self, column, prev_state, curr_state, learning): 106 | for cell in self.column_cells[column]: 107 | curr_state.active_cells.add(cell) 108 | 109 | if len(self.segments_for_column(column, prev_state.matching_segments)) > 0: 110 | learning_segment = self.best_matching_segment(column, prev_state) 111 | winner_cell = self.segment_cell[learning_segment] 112 | else: 113 | winner_cell = self.least_used_cell(column) 114 | if learning: 115 | learning_segment = self.grow_new_segment(winner_cell) if len(prev_state.winner_cells) > 0 else None 116 | 117 | curr_state.winner_cells.add(winner_cell) 118 | 119 | if learning and learning_segment is not None: 120 | for synapse in self.segment_synapses[learning_segment]: 121 | if self.synapse_presynaptic_cell[synapse] in prev_state.active_cells: 122 | self.synapse_permanence[synapse] += self.permanence_increment 123 | else: 124 | self.synapse_permanence[synapse] -= self.permanence_decrement 125 | 126 | new_synapse_count = self.segment_sampling_synapses - self.num_active_potential_synapses(prev_state, learning_segment) 127 | self.grow_synapses(learning_segment, new_synapse_count, prev_state) 128 | 129 | def punish_predicted_column(self, column, prev_state, learning): 130 | if learning: 131 | for segment in self.segments_for_column(column, prev_state.matching_segments): 132 | for synapse in self.segment_synapses[segment]: 133 | if self.synapse_presynaptic_cell[synapse] in prev_state.active_cells: 134 | self.synapse_permanence[synapse] -= self.permanence_punishment 135 | 136 | def grow_new_segment(self, cell): 137 | new_segment = len(self.segment_cell) 138 | self.cell_segments[cell].add(new_segment) 139 | self.segment_cell.append(cell) 140 | self.segment_synapses.append([]) 141 | return new_segment 142 | 143 | def grow_synapses(self, segment, new_synapse_count, prev_state): 144 | candidates = list(prev_state.winner_cells) 145 | while len(candidates) > 0 and new_synapse_count > 0: 146 | presynaptic_cell = np.random.choice(candidates) 147 | candidates.remove(presynaptic_cell) 148 | 149 | already_connected = False 150 | for synapse in self.segment_synapses[segment]: 151 | if self.synapse_presynaptic_cell[synapse] == presynaptic_cell: 152 | already_connected = True 153 | break 154 | 155 | if not already_connected: 156 | new_synapse = self.create_new_synapse(segment, presynaptic_cell, self.permanence_initial) 157 | new_synapse_count -= 1 158 | 159 | def create_new_synapse(self, segment, presynaptic_cell, permanence): 160 | new_synapse = self.next_synapse 161 | self.next_synapse += 1 162 | self.segment_synapses[segment].append(new_synapse) 163 | self.synapse_presynaptic_cell[new_synapse] = presynaptic_cell 164 | self.synapse_permanence[new_synapse] = permanence 165 | return new_synapse 166 | 167 | def least_used_cell(self, column): 168 | fewest_segments = np.inf 169 | for cell in self.column_cells[column]: 170 | fewest_segments = min(fewest_segments, len(self.cell_segments[cell])) 171 | 172 | least_used_cells = [] 173 | for cell in self.column_cells[column]: 174 | if len(self.cell_segments[cell]) == fewest_segments: 175 | least_used_cells.append(cell) 176 | 177 | return np.random.choice(least_used_cells) 178 | 179 | def best_matching_segment(self, column, prev_state): 180 | best_matching_segment = None 181 | best_score = -1 182 | for segment in self.segments_for_column(column, prev_state.matching_segments): 183 | if self.num_active_potential_synapses(prev_state, segment) > best_score: 184 | best_matching_segment = segment 185 | best_score = self.num_active_potential_synapses(prev_state, segment) 186 | 187 | return best_matching_segment 188 | 189 | def segments_for_column(self, column, segments): 190 | owning_segments = set() 191 | for cell in self.column_cells[column]: 192 | owning_segments.update(segments.intersection(self.cell_segments[cell])) 193 | return owning_segments 194 | 195 | def num_active_potential_synapses(self, state, segment): 196 | if segment not in state.segment_num_active_potential_synapses: 197 | return 0 198 | return state.segment_num_active_potential_synapses[segment] 199 | 200 | def cleanup_synapses(self, segment): 201 | synapses = self.segment_synapses[segment] 202 | synapse = 0 203 | while synapse < len(synapses): 204 | if self.synapse_permanence[synapses[synapse]] >= 0: 205 | synapse += 1 206 | continue 207 | del self.synapse_presynaptic_cell[synapses[synapse]] 208 | del self.synapse_permanence[synapses[synapse]] 209 | del synapses[synapse] 210 | 211 | def process(self, sp_state, prev_state=None, learning=True): 212 | if prev_state is None: 213 | prev_state = self.last_state 214 | curr_state = self.get_empty_state() 215 | 216 | for column in self.columns: 217 | if column in sp_state.active_column: 218 | if len(self.segments_for_column(column, prev_state.active_segments)) > 0: 219 | self.activate_predicted_column(column, prev_state, curr_state, learning) 220 | else: 221 | self.burst_column(column, prev_state, curr_state, learning) 222 | else: 223 | if len(self.segments_for_column(column, prev_state.matching_segments)) > 0: 224 | self.punish_predicted_column(column, prev_state, learning) 225 | 226 | for segment in prev_state.matching_segments: 227 | self.cleanup_synapses(segment) 228 | 229 | for segment, synapses in enumerate(self.segment_synapses): 230 | num_active_connected = 0 231 | num_active_potential = 0 232 | for synapse in synapses: 233 | if self.synapse_presynaptic_cell[synapse] in curr_state.active_cells: 234 | if self.synapse_permanence[synapse] >= self.permanence_threshold: 235 | num_active_connected += 1 236 | 237 | if self.synapse_permanence[synapse] >= 0: 238 | num_active_potential += 1 239 | 240 | if num_active_connected >= self.segment_activation_threshold: 241 | curr_state.active_segments.add(segment) 242 | 243 | if num_active_potential >= self.segment_matching_threshold: 244 | curr_state.matching_segments.add(segment) 245 | 246 | curr_state.segment_num_active_potential_synapses[segment] = num_active_potential 247 | 248 | curr_state.active_cell = divmod(np.array(list(curr_state.active_cells)), self.cell_dim) 249 | curr_state.winner_cell = divmod(np.array(list(curr_state.winner_cells)), self.cell_dim) 250 | curr_state.cell_activation[curr_state.active_cell] = True 251 | for segment in curr_state.active_segments: 252 | curr_state.cell_prediction[divmod(self.segment_cell[segment], self.cell_dim)] = True 253 | curr_state.active_column_bursting = ~prev_state.cell_prediction[sp_state.active_column].max(axis=1, keepdims=True) 254 | 255 | self.last_state = curr_state 256 | return curr_state 257 | 258 | 259 | # legacy: it doesn't work anymore. 260 | class RNGSyncedTemporalMemory(TemporalMemory): 261 | def __init__(self, column_dim, cell_dim): 262 | super().__init__(column_dim, cell_dim) 263 | 264 | self.cell_segments_jitter = None 265 | self.matching_segment_potential_jitter = None 266 | self.synapse_priority_jitter = None 267 | self.next_synapse_priority_jitter = 0 268 | 269 | def grow_synapses(self, segment, new_synapse_count, prev_state): 270 | for candidate in np.argsort(self.synapse_priority_jitter[self.next_synapse_priority_jitter]): 271 | presynaptic_cell = prev_state.winner_cells[candidate] 272 | 273 | already_connected = False 274 | for synapse in self.segment_synapses[segment]: 275 | if self.synapse_presynaptic_cell[synapse] == presynaptic_cell: 276 | already_connected = True 277 | break 278 | 279 | if not already_connected: 280 | new_synapse = self.create_new_synapse(segment, presynaptic_cell, self.permanence_initial) 281 | new_synapse_count -= 1 282 | if new_synapse_count <= 0: 283 | break 284 | 285 | self.next_synapse_priority_jitter += 1 286 | 287 | def least_used_cell(self, column): 288 | cell_segments_jittered = [len(self.cell_segments[cell]) for cell in self.column_cells[column]] + self.cell_segments_jitter[column] 289 | return column * self.cell_dim + cell_segments_jittered.argmin() 290 | 291 | def best_matching_segment(self, column, prev_state): 292 | segments = self.segments_for_column(column, prev_state.matching_segments) 293 | segment_matching_index = [prev_state.matching_segments.index(segment) for segment in segments] 294 | matching_segment_potential_jittered = [self.num_active_potential_synapses(prev_state, segment) for segment in segments] + self.matching_segment_potential_jitter[segment_matching_index] 295 | return segments[matching_segment_potential_jittered.argmax()] 296 | 297 | def process(self, sp_state, prev_state=None, learning=True): 298 | if prev_state is None: 299 | prev_state = self.last_state 300 | 301 | self.cell_segments_jitter = np.zeros((self.column_dim, self.cell_dim), dtype=np.float32) 302 | self.cell_segments_jitter[sp_state.active_column] = np.random.rand(len(sp_state.active_column), self.cell_dim).astype(np.float32) 303 | 304 | if len(prev_state.winner_cells) > 0: 305 | num_winner_segments = 0 306 | for active_column in sp_state.active_column: 307 | num_winner_segments += max(len(self.segments_for_column(active_column, prev_state.active_segments)), 1) 308 | 309 | self.synapse_priority_jitter = np.random.rand(num_winner_segments, len(prev_state.winner_cells) + 1).astype(np.float32) 310 | self.synapse_priority_jitter = self.synapse_priority_jitter[:, :-1] 311 | self.next_synapse_priority_jitter = 0 312 | 313 | curr_state = super().process(sp_state, prev_state=prev_state, learning=learning) 314 | self.matching_segment_potential_jitter = np.random.rand(len(curr_state.matching_segments)).astype(np.float32) 315 | 316 | return curr_state -------------------------------------------------------------------------------- /bithtm/projections.py: -------------------------------------------------------------------------------- 1 | from .utils import DynamicArray2D, bincount, replace_free 2 | 3 | import numpy as np 4 | 5 | 6 | class DenseProjection: 7 | def __init__( 8 | self, input_dim, output_dim, 9 | permanence_mean=0.0, permanence_std=0.1, 10 | permanence_threshold=0.0, permanence_increment=0.03, permanence_decrement=0.015 11 | ): 12 | self.permanence_threshold = permanence_threshold 13 | self.permanence_increment = permanence_increment 14 | self.permanence_decrement = permanence_decrement 15 | 16 | self.permanence = np.random.randn(output_dim, input_dim) * permanence_std + permanence_mean 17 | 18 | def process(self, input_activation): 19 | weight = self.permanence >= self.permanence_threshold 20 | overlaps = (weight & input_activation).sum(axis=1) 21 | return overlaps 22 | 23 | def update(self, input_activation, learning_output): 24 | self.permanence[learning_output] += input_activation * (self.permanence_increment + self.permanence_decrement) - self.permanence_decrement 25 | 26 | 27 | class SparseProjection: 28 | def __init__( 29 | self, input_dim, output_dim=0, 30 | output_growth_exponential=True, edge_growth_exponential=True 31 | ): 32 | self.input_dim = input_dim 33 | self.output_dim = output_dim 34 | 35 | self.invalid_input_edge = 0 36 | self.invalid_output_edge = self.input_dim 37 | 38 | # TODO: optimize by separately tracking completely empty space of edges per input/output. -> use for adding edges and projection. (max cut-off) 39 | 40 | self.input_edge = DynamicArray2D(np.int32, size=(self.input_dim + 1, 0), growth_exponential=(False, edge_growth_exponential), on_grow=self.on_input_edge_grow) 41 | 42 | self.output_edges = DynamicArray2D(np.int32, size=(self.output_dim, 1), growth_exponential=(output_growth_exponential, False)) 43 | self.output_edge = DynamicArray2D(np.int32, size=(self.output_dim, 0), growth_exponential=(output_growth_exponential, edge_growth_exponential), on_grow=self.on_output_edge_grow) 44 | self.output_permanence = DynamicArray2D(np.float32, size=(self.output_dim, 0), growth_exponential=(output_growth_exponential, edge_growth_exponential), on_grow=self.on_output_permanence_grow) 45 | 46 | def on_input_edge_grow(self, new_values, new_size, new_capacity, axis): 47 | assert axis == 1 48 | new_values[:, new_size[1]:new_capacity[1]] = self.invalid_input_edge 49 | 50 | def on_output_edge_grow(self, new_values, new_size, new_capacity, axis): 51 | index = [slice(None)] 52 | index.insert(axis, slice(new_size[axis], new_capacity[axis])) 53 | new_values[tuple(index)] = self.invalid_output_edge 54 | 55 | def on_output_permanence_grow(self, new_values, new_size, new_capacity, axis): 56 | index = [slice(None)] 57 | index.insert(axis, slice(new_size[axis], new_capacity[axis])) 58 | new_values[tuple(index)] = -1.0 59 | 60 | def get_output_edge_target(self, output_edge): 61 | return output_edge % (self.input_dim + 1) 62 | 63 | def pack_output_edge(self, target_input, input_edge): 64 | return input_edge * (self.input_dim + 1) + target_input 65 | 66 | def unpack_output_edge(self, output_edge): 67 | input_edge, target_input = np.divmod(output_edge, self.input_dim + 1) 68 | return target_input, input_edge 69 | 70 | def pad_input_activation(self, active_input=None, input_activation=None): 71 | assert active_input is not None or input_activation is not None 72 | padded_input_activation = np.zeros(self.input_dim + 1, dtype=np.bool_) 73 | if active_input is not None: 74 | padded_input_activation[:-1][active_input] = True 75 | else: 76 | padded_input_activation[:-1] = input_activation 77 | return padded_input_activation 78 | 79 | def add_output(self, added_outputs, edges_threshold): 80 | replaced_output, = np.where(self.output_edges[:].squeeze(1) < edges_threshold) 81 | replaced_output = replaced_output[:added_outputs] 82 | self.input_edge[self.unpack_output_edge(self.output_edge[replaced_output])] = self.invalid_input_edge 83 | self.output_edges[replaced_output] = 0 84 | self.output_edge[replaced_output] = self.invalid_output_edge 85 | self.output_permanence[replaced_output] = -1.0 86 | added_outputs -= len(replaced_output) 87 | if added_outputs == 0: 88 | return replaced_output, np.empty(0, dtype=np.int32) 89 | 90 | self.output_edges.add_rows(np.zeros((added_outputs, 1), dtype=self.output_edges.dtype)) 91 | self.output_edge.add_rows(np.full((added_outputs, self.output_edge.size[1]), self.invalid_output_edge, dtype=self.output_edge.dtype)) 92 | self.output_permanence.add_rows(np.full((added_outputs, self.output_permanence.size[1]), -1.0, dtype=self.output_permanence.dtype)) 93 | new_output = np.arange(self.output_dim, self.output_dim + added_outputs) 94 | self.output_dim += added_outputs 95 | return replaced_output, new_output 96 | 97 | def update_permanence(self, padded_input_activation, learning_output, active_edge_permanence_change, inactive_edge_permanence_change): 98 | learning_output_edge = self.output_edge[learning_output] 99 | learning_output_edge_valid = learning_output_edge != self.invalid_output_edge 100 | learning_target_input, learning_input_edge = self.unpack_output_edge(learning_output_edge) 101 | edge_activation = padded_input_activation[learning_target_input] 102 | naive_permanence_change = edge_activation * (active_edge_permanence_change - inactive_edge_permanence_change) + inactive_edge_permanence_change 103 | updated_permanence = self.output_permanence[learning_output] + learning_output_edge_valid * naive_permanence_change 104 | self.output_permanence[learning_output] = updated_permanence 105 | if min(active_edge_permanence_change, inactive_edge_permanence_change) < 0: 106 | updated_permanence_invalid = updated_permanence < 0.0 107 | self.output_edges[learning_output] -= (learning_output_edge_valid & updated_permanence_invalid).sum(axis=1, keepdims=True) 108 | self.output_edge[learning_output] = np.where(updated_permanence_invalid, self.invalid_output_edge, learning_output_edge) 109 | self.input_edge[learning_target_input, learning_input_edge] = np.where(updated_permanence_invalid, self.invalid_input_edge, np.expand_dims(1 + learning_output, 1)) 110 | 111 | def add_edge(self, padded_input_activation, winner_input, learning_output, permanence_initial, min_active_edges): 112 | learning_edge = self.output_edge[learning_output] 113 | learning_edge_target = self.get_output_edge_target(learning_edge) 114 | output_active_edges = padded_input_activation[learning_edge_target].sum(axis=1) 115 | added_output_edges = np.clip(min_active_edges - output_active_edges, 0, min(min_active_edges, len(winner_input))) 116 | 117 | whole_input_to_winner = np.full(self.input_dim + 1, len(winner_input), dtype=np.int32) 118 | whole_input_to_winner[winner_input] = np.arange(len(winner_input)) 119 | 120 | edge_priority = np.random.rand(len(learning_output), len(winner_input) + 1).astype(np.float32) 121 | np.put_along_axis(edge_priority, whole_input_to_winner[learning_edge_target], np.inf, axis=1) 122 | edge_priority = edge_priority[:, :-1] 123 | edge_absent = edge_priority < 1.0 124 | edge_prioritized = np.zeros(edge_priority.shape, dtype=np.bool_) 125 | np.put_along_axis(edge_prioritized, np.argsort(edge_priority, axis=1), np.expand_dims(np.arange(len(winner_input)), 0) < np.expand_dims(added_output_edges, 1), axis=1) 126 | edge_added = edge_absent & edge_prioritized 127 | added_output_edges = edge_added.sum(axis=1) 128 | 129 | added_input_edge_target = np.tile(1 + learning_output, (len(winner_input), 1)) 130 | replaced_edges, free_index, src_index, residue_edges, residue_index, src_residue_index = replace_free( 131 | self.input_edge[winner_input] == self.invalid_input_edge, [self.input_edge[:]], [added_input_edge_target], 132 | dest_index=winner_input, src_valid=edge_added.T, return_indices=True, return_residue_info=True 133 | ) 134 | max_new_input_edges = residue_edges.max(initial=0) 135 | if max_new_input_edges > 0: 136 | prev_max_input_edges = self.input_edge.size[1] 137 | new_input_edge = (winner_input[residue_index[0]], residue_index[1]) 138 | new_input_edge_target = np.full((self.input_dim + 1, max_new_input_edges), self.invalid_input_edge, dtype=self.input_edge.dtype) 139 | new_input_edge_target[new_input_edge] = 1 + learning_output[src_residue_index[1]] 140 | self.input_edge.add_cols(new_input_edge_target) 141 | new_input_edge = (new_input_edge[0], prev_max_input_edges + new_input_edge[1]) 142 | 143 | added_output_edge_target = np.full((len(learning_output), len(winner_input)), self.invalid_output_edge, dtype=self.output_edge.dtype) 144 | added_output_edge_target[src_index[::-1]] = self.pack_output_edge(*free_index) 145 | if max_new_input_edges > 0: 146 | added_output_edge_target[src_residue_index[::-1]] = self.pack_output_edge(*new_input_edge) 147 | 148 | replaced_edges, residue_edges, residue_index, src_residue_index = replace_free( 149 | learning_edge == self.invalid_output_edge, [self.output_edge[:], self.output_permanence[:]], [added_output_edge_target, permanence_initial], 150 | dest_index=learning_output, src_valid=edge_added, src_lengths=added_output_edges, return_residue_info=True 151 | ) 152 | max_new_output_edges = residue_edges.max(initial=0) 153 | if max_new_output_edges > 0: 154 | new_output_edge = (learning_output[residue_index[0]], residue_index[1]) 155 | new_output_edge_target = np.full((self.output_dim, max_new_output_edges), self.invalid_output_edge, dtype=self.output_edge.dtype) 156 | new_output_edge_permanence = np.full(new_output_edge_target.shape, -1.0, dtype=self.output_permanence.dtype) 157 | new_output_edge_target[new_output_edge] = added_output_edge_target[src_residue_index] 158 | new_output_edge_permanence[new_output_edge] = permanence_initial 159 | self.output_edge.add_cols(new_output_edge_target) 160 | self.output_permanence.add_cols(new_output_edge_permanence) 161 | self.output_edges[learning_output] += np.expand_dims(added_output_edges, 1) 162 | 163 | def process(self, active_input=None, padded_input_activation=None, invoked_output=None, permanence_threshold=None): 164 | if invoked_output is None and (padded_input_activation is not None or permanence_threshold is not None): 165 | raise NotImplementedError() 166 | 167 | if invoked_output is not None: 168 | if padded_input_activation is None: 169 | padded_input_activation = self.pad_input_activation(active_input=active_input) 170 | edge_target = self.get_output_edge_target(self.output_edge[invoked_output]) 171 | edge_weight = self.output_permanence[invoked_output] >= permanence_threshold if permanence_threshold is not None else 1 172 | projected = (edge_weight & padded_input_activation[edge_target]).sum(axis=1) 173 | return projected 174 | 175 | edge_target = self.input_edge[active_input] 176 | projected = bincount(edge_target.flatten(), minLength=1 + self.output_dim) 177 | assert len(projected) == 1 + self.output_dim 178 | return projected[1:] 179 | 180 | def update( 181 | self, learning_output, 182 | input_activation=None, padded_input_activation=None, 183 | winner_input=None, min_active_edges=32, 184 | permanence_initial=0.21, active_edge_permanence_change=0.1, inactive_edge_permanence_change=0.1 185 | ): 186 | assert input_activation is not None or padded_input_activation is not None 187 | if padded_input_activation is None: 188 | padded_input_activation = self.pad_input_activation(input_activation=input_activation) 189 | 190 | self.update_permanence(padded_input_activation, learning_output, active_edge_permanence_change, inactive_edge_permanence_change) 191 | if winner_input is not None: 192 | self.add_edge(padded_input_activation, winner_input, learning_output, permanence_initial, min_active_edges) 193 | 194 | class PredictiveProjection: 195 | class State: 196 | def __init__(self, prediction, segment_potential, matching_segment, matching_segment_activation, matching_segment_active): 197 | self.prediction = prediction 198 | self.segment_potential = segment_potential 199 | self.matching_segment = matching_segment 200 | self.matching_segment_activation = matching_segment_activation 201 | self.matching_segment_active = matching_segment_active 202 | self.max_jittered_potential = None 203 | self.matching_segment_jittered_potential = None 204 | 205 | def __init__( 206 | self, output_dim, 207 | permanence_initial=0.21, permanence_threshold=0.5, permanence_increment=0.1, permanence_decrement=0.1, permanence_punishment=0.01, 208 | segment_activation_threshold=15, segment_matching_threshold=15, segment_sampling_synapses=32, 209 | segment_bundle_growth_exponential=True 210 | ): 211 | assert segment_activation_threshold >= segment_matching_threshold 212 | 213 | self.output_dim = output_dim 214 | 215 | self.permanence_initial = permanence_initial 216 | self.permanence_threshold = permanence_threshold 217 | self.permanence_increment = permanence_increment 218 | self.permanence_decrement = permanence_decrement 219 | self.permanence_punishment = permanence_punishment 220 | 221 | self.segment_activation_threshold = segment_activation_threshold 222 | self.segment_matching_threshold = segment_matching_threshold 223 | self.segment_sampling_synapses = segment_sampling_synapses 224 | 225 | self.segment_projection = SparseProjection(self.output_dim) 226 | self.segment_bundle = DynamicArray2D(np.int32, size=(0, 1), growth_exponential=(segment_bundle_growth_exponential, False)) 227 | self.bundle_segments = np.zeros(self.output_dim, dtype=np.int32) 228 | 229 | def fill_jittered_potential_info(self, state, matching_segment_bundle=None): 230 | if state.max_jittered_potential is not None and state.matching_segment_jittered_potential is not None: 231 | return 232 | if matching_segment_bundle is None: 233 | matching_segment_bundle = self.segment_bundle[state.matching_segment].squeeze(1) 234 | matching_segment_jittered_potential = state.segment_potential[state.matching_segment].astype(np.float32) 235 | matching_segment_jittered_potential += np.random.rand(*state.matching_segment.shape) 236 | max_jittered_potential = np.zeros(self.output_dim, dtype=np.float32) 237 | np.maximum.at(max_jittered_potential, matching_segment_bundle, matching_segment_jittered_potential) 238 | state.max_jittered_potential = max_jittered_potential 239 | state.matching_segment_jittered_potential = matching_segment_jittered_potential 240 | 241 | def get_jittered_potential_info(self, state, matching_segment_bundle=None): 242 | self.fill_jittered_potential_info(state, matching_segment_bundle=matching_segment_bundle) 243 | return state.max_jittered_potential, state.matching_segment_jittered_potential 244 | 245 | def process(self, active_input, return_jittered_potential_info=True): 246 | segment_potential = self.segment_projection.process(active_input=active_input) 247 | matching_segment, = np.where(segment_potential >= self.segment_matching_threshold) 248 | matching_segment_bundle = self.segment_bundle[matching_segment].squeeze(1) 249 | matching_segment_activation = self.segment_projection.process(active_input=active_input, invoked_output=matching_segment, permanence_threshold=self.permanence_threshold) 250 | matching_segment_active = matching_segment_activation >= self.segment_activation_threshold 251 | prediction = bincount(matching_segment_bundle, weights=matching_segment_active, minLength=self.output_dim) 252 | state = self.State(prediction, segment_potential, matching_segment, matching_segment_activation, matching_segment_active) 253 | if return_jittered_potential_info: 254 | self.fill_jittered_potential_info(state, matching_segment_bundle=matching_segment_bundle) 255 | return state 256 | 257 | def update(self, prev_state, input_activation, learning_output, output_punishment, winner_input=None, output_learning=None, epsilon=1e-8): 258 | if prev_state is None: 259 | return 260 | if output_learning is None: 261 | output_learning = np.zeros(self.output_dim, dtype=np.bool_) 262 | output_learning[learning_output] = True 263 | 264 | matching_segment_bundle = self.segment_bundle[prev_state.matching_segment].squeeze(1) 265 | max_jittered_potentialself, matching_segment_jittered_potential = self.get_jittered_potential_info(prev_state, matching_segment_bundle=matching_segment_bundle) 266 | matching_segment_bundle_unpredicted = prev_state.prediction[matching_segment_bundle] < epsilon 267 | matching_segment_best_matching = np.abs(matching_segment_jittered_potential - max_jittered_potentialself[matching_segment_bundle]) < epsilon 268 | learning_segment = prev_state.matching_segment[output_learning[matching_segment_bundle] & (prev_state.matching_segment_active | (matching_segment_bundle_unpredicted & matching_segment_best_matching))] 269 | punished_segment = prev_state.matching_segment[output_punishment[matching_segment_bundle]] 270 | 271 | unaccounted_output, = np.where(prev_state.max_jittered_potential[learning_output] < epsilon) 272 | if len(unaccounted_output) > 0: 273 | unaccounted_output = learning_output[unaccounted_output] 274 | replaced_segment, new_segment = self.segment_projection.add_output(len(unaccounted_output), self.segment_matching_threshold) 275 | replaced_segment_bundle, bundle_replaced_segments = np.unique(self.segment_bundle[replaced_segment], return_counts=True) 276 | self.bundle_segments[replaced_segment_bundle] -= bundle_replaced_segments 277 | self.bundle_segments[unaccounted_output] += 1 278 | self.segment_bundle[replaced_segment] = np.expand_dims(unaccounted_output[:len(replaced_segment)], 1) 279 | if len(new_segment) > 0: 280 | self.segment_bundle.add_rows(np.expand_dims(unaccounted_output[-len(new_segment):], 1)) 281 | learning_segment = np.concatenate([learning_segment, replaced_segment, new_segment]) 282 | 283 | padded_input_activation = self.segment_projection.pad_input_activation(input_activation) 284 | self.segment_projection.update( 285 | learning_segment, padded_input_activation=padded_input_activation, winner_input=winner_input, 286 | permanence_initial=self.permanence_initial, 287 | active_edge_permanence_change=self.permanence_increment, inactive_edge_permanence_change=(-self.permanence_decrement), 288 | min_active_edges=self.segment_sampling_synapses 289 | ) 290 | self.segment_projection.update( 291 | punished_segment, padded_input_activation=padded_input_activation, 292 | active_edge_permanence_change=(-self.permanence_punishment), inactive_edge_permanence_change=0.0 293 | ) -------------------------------------------------------------------------------- /legacy/bithtm.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class SpatialPooler: 5 | def __init__(self, input_size, columns, active_columns): 6 | self.input_size = input_size 7 | self.columns = columns 8 | self.active_columns = active_columns 9 | 10 | self.sparsity = self.active_columns / self.columns 11 | 12 | self.boosting_intensity = 0.2 13 | self.duty_cycle_inertia = 0.99 14 | 15 | self.permanence_threshold = 0.0 16 | self.permanence_increment = 0.1 17 | self.permanence_decrement = 0.2 18 | 19 | self.activation = np.zeros(self.columns, dtype=np.bool_) 20 | self.overlaps = np.zeros(self.columns, dtype=np.int32) 21 | self.duty_cycle = np.zeros(self.columns, dtype=np.float32) 22 | 23 | self.active = np.zeros(self.active_columns, dtype=np.int32) 24 | 25 | self.permanence = np.random.randn(self.columns, self.input_size) * 0.1 26 | 27 | def run(self, input): 28 | weight = self.permanence > self.permanence_threshold 29 | self.overlaps = np.sum(input & weight, axis=1) 30 | 31 | boosting = np.exp(self.boosting_intensity * -self.duty_cycle / self.sparsity) 32 | sorted = (boosting * self.overlaps).argsort() 33 | self.active = sorted[-self.active_columns:] 34 | 35 | self.activation.fill(False) 36 | self.activation[self.active] = True 37 | 38 | self.duty_cycle *= self.duty_cycle_inertia 39 | self.duty_cycle[self.active] += 1.0 - self.duty_cycle_inertia 40 | 41 | self.permanence[self.active] += input * (self.permanence_increment + self.permanence_decrement) - self.permanence_decrement 42 | 43 | class TemporalMemory: 44 | def __init__(self, columns, cells): 45 | self.columns = columns 46 | self.cells = cells 47 | 48 | self.segment_active_threshold = 10 49 | self.segment_matching_threshold = 10 50 | 51 | self.synapse_sample_size = 20 52 | 53 | # self.permanence_invalid = -1.0 54 | # self.permanence_initial = 0.01 55 | self.permanence_threshold = 0.5 56 | self.permanence_increment = 0.3 57 | self.permanence_decrement = 0.05 58 | self.permanence_punishment = 0.01 59 | 60 | self.cell_active = np.zeros((self.columns, self.cells), dtype=np.bool_) 61 | self.cell_predictive = np.zeros_like(self.cell_active) 62 | self.cell_segments = np.zeros((self.columns, self.cells), dtype=np.int32) 63 | 64 | self.segment_capacity = 1 65 | self.segment_index = np.arange(self.cells * self.segment_capacity, dtype=np.int32).reshape(1, self.cells, self.segment_capacity) 66 | self.segment_activation = np.zeros((self.columns, self.cells, self.segment_capacity), dtype=np.int32) 67 | self.segment_potential = np.zeros_like(self.segment_activation) 68 | self.segment_active = np.zeros((self.columns, self.cells, self.segment_capacity), dtype=np.bool_) 69 | self.segment_matching = np.zeros_like(self.segment_active) 70 | self.segment_synapses = np.zeros((self.columns, self.cells, self.segment_capacity), dtype=np.int32) 71 | 72 | self.cell_synapse_capacity = 0 73 | self.cell_synapse_cell = np.full((self.columns, self.cells, self.cell_synapse_capacity), -1, dtype=np.int32) 74 | 75 | self.segment_synapse_capacity = 1 76 | self.segment_synapse_cell = np.full((self.columns, self.cells, self.segment_capacity, self.segment_synapse_capacity), -1, dtype=np.int32) 77 | self.segment_synapse_permanence = np.zeros((self.columns, self.cells, self.segment_capacity, self.segment_synapse_capacity), dtype=np.float32) 78 | 79 | self.prev_winner_cell = np.zeros(0, dtype=np.int32) 80 | self.prev_target_segment = np.zeros(0, dtype=np.int32) 81 | 82 | def run(self, active_column): 83 | cell_predictive = self.cell_predictive[active_column] 84 | column_bursting = ~np.any(cell_predictive, axis=1) 85 | 86 | segment_potential = self.segment_potential[active_column].reshape(len(active_column), -1) 87 | column_best_matching_segment = np.argmax(segment_potential, axis=1) 88 | column_least_used_cell = np.argmin(self.cell_segments[active_column], axis=1) 89 | column_grow_segment = segment_potential[(np.arange(len(active_column), dtype=np.int32), column_best_matching_segment)] == 0 90 | segment_learning = self.segment_active[active_column] | ((self.segment_index == column_best_matching_segment[:, None, None]) & (column_bursting & ~column_grow_segment)[:, None, None]) 91 | 92 | learning_segment = np.nonzero(segment_learning) 93 | learning_segment = active_column[learning_segment[0]] * (self.cells * self.segment_capacity) + learning_segment[1] * self.segment_capacity + learning_segment[2] 94 | learning_segment_synapse_cell = self.segment_synapse_cell.reshape(-1, self.segment_synapse_capacity)[learning_segment] 95 | learning_segment_synapse_cell_valid = np.nonzero(learning_segment_synapse_cell >= 0) 96 | learning_segment_synapse_cell = learning_segment_synapse_cell[learning_segment_synapse_cell_valid] 97 | self.segment_synapse_permanence.reshape(-1, self.segment_synapse_capacity)[(learning_segment[learning_segment_synapse_cell_valid[0]], learning_segment_synapse_cell_valid[1])] += self.cell_active.reshape(-1)[learning_segment_synapse_cell] * (self.permanence_increment + self.permanence_decrement) - self.permanence_decrement 98 | 99 | punished_segment = np.nonzero(self.segment_active.reshape(-1)[self.prev_target_segment])[0] 100 | punished_segment_synapse_cell = self.segment_synapse_cell.reshape(-1, self.segment_synapse_capacity)[punished_segment] 101 | punished_segment_synapse_cell_valid = np.nonzero(punished_segment_synapse_cell >= 0) 102 | punished_segment_synapse_cell = punished_segment_synapse_cell[punished_segment_synapse_cell_valid] 103 | self.segment_synapse_permanence.reshape(-1, self.segment_synapse_capacity)[(punished_segment[punished_segment_synapse_cell_valid[0]], punished_segment_synapse_cell_valid[1])] -= self.cell_active.reshape(-1)[punished_segment_synapse_cell] * self.permanence_punishment 104 | 105 | growing_segment_column = np.nonzero(column_grow_segment)[0] 106 | growing_segment_cell = column_least_used_cell[growing_segment_column] 107 | winner_cell = cell_predictive.copy() 108 | winner_cell[(growing_segment_column, growing_segment_cell)] = True 109 | winner_cell = np.nonzero(winner_cell) 110 | winner_cell = active_column[winner_cell[0]] * self.cells + winner_cell[1] 111 | 112 | if len(self.prev_winner_cell) > 0: 113 | growing_segment_column = active_column[growing_segment_column] 114 | growing_segment = self.cell_segments[(growing_segment_column, growing_segment_cell)] 115 | 116 | max_cell_segments = np.max(growing_segment) + 1 if len(growing_segment) > 0 else 0 117 | if max_cell_segments > self.segment_capacity: 118 | segment_capacity = max_cell_segments 119 | self.segment_index = np.arange(self.cells * segment_capacity, dtype=np.int32).reshape(1, self.cells, segment_capacity) 120 | self.segment_activation = np.zeros((self.columns, self.cells, segment_capacity), dtype=np.int32) 121 | self.segment_potential = np.zeros_like(self.segment_activation) 122 | 123 | segment_synapses = np.zeros((self.columns, self.cells, segment_capacity), dtype=np.int32) 124 | segment_synapse_cell = np.full((self.columns, self.cells, segment_capacity, self.segment_synapse_capacity), -1, dtype=np.int32) 125 | segment_synapse_permanence = np.zeros((self.columns, self.cells, segment_capacity, self.segment_synapse_capacity), dtype=np.float32) 126 | segment_synapses[:, :, :self.segment_capacity] = self.segment_synapses 127 | segment_synapse_cell[:, :, :self.segment_capacity, :] = self.segment_synapse_cell 128 | segment_synapse_permanence[:, :, :self.segment_capacity, :] = self.segment_synapse_permanence 129 | 130 | self.segment_capacity = segment_capacity 131 | self.segment_synapses = segment_synapses 132 | self.segment_synapse_cell = segment_synapse_cell 133 | self.segment_synapse_permanence = segment_synapse_permanence 134 | 135 | learning_segment = np.concatenate([learning_segment, growing_segment_column * (self.cells * self.segment_capacity) + growing_segment_cell * self.segment_capacity + growing_segment]) 136 | segment_candidate = np.sort(np.concatenate([np.tile(self.prev_winner_cell, (len(learning_segment), 1)), np.tile(self.segment_synapse_cell.reshape(-1, self.segment_synapse_capacity)[learning_segment], 2)], axis=1), axis=1) 137 | segment_winner_targeted = segment_candidate[:, :-1] == segment_candidate[:, 1:] 138 | segment_candidate[:, :-1][segment_winner_targeted] = -1 139 | segment_candidate[:, 1:][segment_winner_targeted] = -1 140 | segment_index = np.arange(segment_candidate.shape[0])[:, None] 141 | candidate_index = np.arange(segment_candidate.shape[1]) 142 | shuffled_candidate_index = np.tile(candidate_index, (segment_candidate.shape[0], 1)) 143 | np.apply_along_axis(np.random.shuffle, 1, shuffled_candidate_index) 144 | segment_candidate[:, candidate_index] = segment_candidate[(segment_index, shuffled_candidate_index)] 145 | 146 | segment_new_synapses = np.maximum(np.minimum(self.synapse_sample_size - self.segment_potential.reshape(-1)[learning_segment], np.sum(segment_candidate >= 0, axis=1)), 0) 147 | new_synapse_segment = np.nonzero(segment_new_synapses)[0] 148 | if len(new_synapse_segment) > 0: 149 | learning_segment = learning_segment[new_synapse_segment] 150 | segment_candidate = segment_candidate[new_synapse_segment] 151 | segment_new_synapses = segment_new_synapses[new_synapse_segment] 152 | shuffled_candidate_index = shuffled_candidate_index[new_synapse_segment] 153 | 154 | segment_synapses = self.segment_synapses.reshape(-1)[learning_segment] 155 | max_segment_synapses = np.max(segment_synapses + segment_new_synapses) if len(learning_segment) > 0 else 0 156 | if max_segment_synapses > self.segment_synapse_capacity: 157 | segment_synapses = np.zeros(len(learning_segment), dtype=np.int32) 158 | valid_segment_synapse = np.nonzero(self.segment_synapse_permanence.reshape(-1, self.segment_synapse_capacity)[learning_segment] > 0) 159 | segment_synapse_offset = np.zeros(len(learning_segment), dtype=np.int32) 160 | if len(valid_segment_synapse[0]) > 0: 161 | valid_segment_synapse_offset = np.concatenate([np.zeros(1, dtype=np.int32), 1 + np.nonzero(valid_segment_synapse[0][1:] != valid_segment_synapse[0][:-1])[0]]) 162 | valid_segment = valid_segment_synapse[0][valid_segment_synapse_offset] 163 | segment_synapses[valid_segment] = np.concatenate([valid_segment_synapse_offset[1:] - valid_segment_synapse_offset[:-1], len(valid_segment_synapse[0]) - valid_segment_synapse_offset[-1].reshape(1)]) 164 | segment_synapse_offset[valid_segment] = valid_segment_synapse_offset 165 | valid_segment_synapse_target = (valid_segment_synapse[0], np.arange(len(valid_segment_synapse[0]), dtype=np.int32) - segment_synapse_offset[valid_segment_synapse[0]]) 166 | valid_segment_synapse = (learning_segment[valid_segment_synapse[0]], valid_segment_synapse[1]) 167 | self.segment_synapse_cell.reshape(-1, self.segment_synapse_capacity)[valid_segment_synapse_target] = self.segment_synapse_cell.reshape(-1, self.segment_synapse_capacity)[valid_segment_synapse] 168 | self.segment_synapse_permanence.reshape(-1, self.segment_synapse_capacity)[valid_segment_synapse_target] = self.segment_synapse_permanence.reshape(-1, self.segment_synapse_capacity)[valid_segment_synapse] 169 | 170 | max_segment_synapses = np.max(segment_synapses + segment_new_synapses) if len(learning_segment) > 0 else 0 171 | if max_segment_synapses > self.segment_synapse_capacity: 172 | segment_synapse_capacity = max_segment_synapses 173 | segment_synapse_cell = np.full((self.columns, self.cells, self.segment_capacity, segment_synapse_capacity), -1, dtype=np.int32) 174 | segment_synapse_permanence = np.zeros((self.columns, self.cells, self.segment_capacity, segment_synapse_capacity), dtype=np.float32) 175 | segment_synapse_cell[:, :, :, :self.segment_synapse_capacity] = self.segment_synapse_cell 176 | segment_synapse_permanence[:, :, :, :self.segment_synapse_capacity] = self.segment_synapse_permanence 177 | self.segment_synapse_capacity = segment_synapse_capacity 178 | self.segment_synapse_cell = segment_synapse_cell 179 | self.segment_synapse_permanence = segment_synapse_permanence 180 | 181 | segment_target = np.nonzero(segment_candidate >= 0) 182 | segment_target_offset = np.concatenate([np.zeros(1, dtype=np.int32), 1 + np.nonzero(segment_target[0][1:] != segment_target[0][:-1])[0]]) 183 | segment_target_end = np.where(segment_new_synapses > 0, segment_target[1][segment_target_offset + segment_new_synapses - 1], 0) 184 | segment_new_synapse = np.arange(len(segment_target[0]), dtype=np.int32) - segment_target_offset[segment_target[0]] 185 | segment_target_valid = np.nonzero(segment_target[1] <= segment_target_end[segment_target[0]]) 186 | segment_target = (segment_target[0][segment_target_valid], segment_target[1][segment_target_valid]) 187 | segment_new_synapse = segment_synapses[segment_target[0]] + segment_new_synapse[segment_target_valid] 188 | 189 | segment_target_segment = learning_segment[segment_target[0]] 190 | segment_target_candidate = segment_candidate[segment_target] 191 | self.segment_synapse_cell.reshape(-1, self.segment_synapse_capacity)[(segment_target_segment, segment_new_synapse)] = segment_target_candidate 192 | 193 | self.cell_segments[(growing_segment_column, growing_segment_cell)] += 1 194 | self.segment_synapses.reshape(-1)[learning_segment] = segment_new_synapses + segment_new_synapses 195 | 196 | # TODO: they're not candidates at this point 197 | candidate_target = (shuffled_candidate_index[segment_target], segment_target[0]) 198 | candidate_synapse_cell = np.full((segment_candidate.shape[1], segment_candidate.shape[0]), -1, dtype=np.int32) 199 | candidate_synapse_cell[candidate_target] = segment_target_candidate 200 | candidate_valid = np.nonzero(np.any(candidate_synapse_cell >= 0, axis=1))[0] 201 | 202 | candidate_synapse_cell_candidate = candidate_synapse_cell[candidate_valid] 203 | candidate_synapse_cell_candidate_valid = np.nonzero(candidate_synapse_cell_candidate >= 0) 204 | candidate_synapse_cell_candidate[(candidate_synapse_cell_candidate_valid[0], 0)] = candidate_synapse_cell_candidate[candidate_synapse_cell_candidate_valid] 205 | candidate_synapse_cell_candidate = candidate_synapse_cell_candidate[:, 0] 206 | 207 | candidate_synapse_cell[candidate_target] = segment_target_segment // self.segment_capacity 208 | candidate_synapse_cell = candidate_synapse_cell[candidate_valid] 209 | candidate_synapse_cell = np.concatenate([candidate_synapse_cell, self.cell_synapse_cell.reshape(self.columns * self.cells, -1)[candidate_synapse_cell_candidate]], axis=1) 210 | candidate_synapse_cell = np.sort(candidate_synapse_cell, axis=1) 211 | candidate_synapse_cell[:, 1:][candidate_synapse_cell[:, 1:] == candidate_synapse_cell[:, :-1]] = -1 212 | candidate_synapse_cell_valid = candidate_synapse_cell >= 0 213 | 214 | candidate_synapses = np.sum(candidate_synapse_cell_valid, axis=1) 215 | max_cell_synapses = np.max(candidate_synapses) 216 | if max_cell_synapses > self.cell_synapse_capacity: 217 | cell_synapse_capacity = max_cell_synapses 218 | cell_synapse_cell = np.full((self.columns, self.cells, cell_synapse_capacity), -1, dtype=np.int32) 219 | cell_synapse_cell[:, :, :self.cell_synapse_capacity] = self.cell_synapse_cell 220 | self.cell_synapse_capacity = cell_synapse_capacity 221 | self.cell_synapse_cell = cell_synapse_cell 222 | 223 | candidate_synapse_cell_valid = np.nonzero(candidate_synapse_cell_valid) 224 | candidate_synapse_cell_offset = np.concatenate([np.zeros(1, dtype=np.int32), 1 + np.nonzero(candidate_synapse_cell_valid[0][1:] != candidate_synapse_cell_valid[0][:-1])[0]]) 225 | candidate_synapse_cell_index = np.arange(len(candidate_synapse_cell_valid[0]), dtype=np.int32) - candidate_synapse_cell_offset[candidate_synapse_cell_valid[0]] 226 | candidate_synapse_cell_candidate = candidate_synapse_cell_candidate[candidate_synapse_cell_valid[0]] 227 | self.cell_synapse_cell.reshape(-1, self.cell_synapse_capacity)[(candidate_synapse_cell_candidate, candidate_synapse_cell_index)] = candidate_synapse_cell[candidate_synapse_cell_valid] 228 | 229 | cell_active = cell_predictive | column_bursting[:, None] 230 | self.cell_active[:, :] = False 231 | self.cell_active[active_column] = cell_active 232 | 233 | active_cell = np.nonzero(cell_active) 234 | active_cell = (active_column[active_cell[0]], active_cell[1]) 235 | 236 | cell_targeted = np.zeros(self.columns * self.cells, dtype=np.bool_) 237 | active_cell_synapse_cell = self.cell_synapse_cell[active_cell] 238 | active_cell_synapse_cell = active_cell_synapse_cell[active_cell_synapse_cell >= 0] 239 | cell_targeted[active_cell_synapse_cell] = True 240 | target_cell = np.nonzero(cell_targeted)[0] 241 | target_segment = np.nonzero(np.arange(self.segment_capacity)[None, :] < self.cell_segments.reshape(-1)[target_cell][:, None]) 242 | target_segment = target_cell[target_segment[0]] * self.segment_capacity + target_segment[1] 243 | 244 | segment_synapse_cell_active = self.cell_active.reshape(-1)[self.segment_synapse_cell.reshape(-1, self.segment_synapse_capacity)[target_segment]] 245 | segment_synapse_permanence = self.segment_synapse_permanence.reshape(-1, self.segment_synapse_capacity)[target_segment] 246 | segment_synapse_weight = segment_synapse_permanence > self.permanence_threshold 247 | 248 | self.segment_activation[:, :, :] = 0 249 | self.segment_potential[:, :, :] = 0 250 | self.segment_activation.reshape(-1)[target_segment] = np.sum(segment_synapse_cell_active & segment_synapse_weight, axis=1) 251 | self.segment_potential.reshape(-1)[target_segment] = np.sum(segment_synapse_cell_active, axis=1) 252 | self.segment_active = self.segment_activation >= self.segment_active_threshold 253 | self.segment_matching = self.segment_potential >= self.segment_matching_threshold 254 | self.cell_predictive = np.any(self.segment_active, axis=2) 255 | 256 | self.prev_winner_cell = winner_cell 257 | self.prev_target_segment = target_segment 258 | 259 | class HierarchicalTemporalMemory: 260 | def __init__(self, input_size, columns, cells, active_columns=None): 261 | if active_columns is None: 262 | active_columns = int(columns * 0.02) 263 | 264 | self.spatial_pooler = SpatialPooler(input_size, columns, active_columns) 265 | self.temporal_memory = TemporalMemory(columns, cells) 266 | 267 | def run(self, input): 268 | self.spatial_pooler.run(input) 269 | self.temporal_memory.run(self.spatial_pooler.active) 270 | 271 | if __name__ == '__main__': 272 | input = np.random.rand(100, 1000) < 0.2 273 | htm = HierarchicalTemporalMemory(1000, 2048, 32) 274 | 275 | import time 276 | 277 | prev_time = time.time() 278 | 279 | for epoch in range(100): 280 | for i in range(len(input)): 281 | prev_column_predictive = htm.temporal_memory.cell_predictive.any(axis=1) 282 | 283 | htm.run(input[i] ^ (np.random.rand(*input[i].shape) < 0.05)) 284 | 285 | corrects = prev_column_predictive[htm.spatial_pooler.active].sum() 286 | incorrects = prev_column_predictive.sum() - corrects 287 | print('epoch {}, pattern {}: correct columns: {}, incorrect columns: {}'.format(epoch, i, corrects, incorrects)) 288 | 289 | print('{}s'.format(time.time() - prev_time)) 290 | --------------------------------------------------------------------------------