├── .clang-format ├── .clang-tidy ├── .gitignore ├── .readthedocs.yaml ├── CMakeLists.txt ├── LICENSE ├── MANIFEST.in ├── README.md ├── arch ├── demo_with_dendrites.yaml ├── example.yaml ├── loihi.yaml ├── loihi_with_noise.yaml └── truenorth.yaml ├── backends ├── fugu.py └── lava.py ├── docs ├── _static │ └── make_logo_white.css ├── api.rst ├── conf.py ├── index.rst ├── layers.rst ├── make_references.py ├── requirements.txt └── sana_fe.svg ├── plugins ├── CMakeLists.txt └── hodgkin_huxley.cpp ├── pyproject.toml ├── references.bib ├── sana_fe_logo.svg ├── sanafe ├── README.txt ├── __init__.py ├── layers.py └── tutorial.py ├── scripts ├── booksim.config ├── capstone_gui_runtime.py ├── compare_nemo_perf.py ├── compare_spiketrains.py ├── compress_spiketrain.py ├── create_tutorial.py ├── demo_truenorth.py ├── dendrites.py ├── design_space_exploration.py ├── dvs_gesture.py ├── git_status.sh ├── lasagna.py ├── latin_squares.py ├── load_network.py ├── message_analysis.py ├── misc_dvs_conversion.py ├── net_to_yaml.py ├── old_utils.py ├── plot_characterization.py ├── plot_messages.py ├── plot_raster.py ├── power_benchmark.py ├── queue_transient.py ├── random_network.py ├── test_api.py └── test_pybind.py ├── setup.py ├── snn ├── dendrite.net ├── dendrite.yaml ├── example.net ├── example.yaml ├── hh_example.net ├── input_net.yaml └── nemo │ ├── truenorth_bursting.net │ └── truenorth_phasic.net ├── src ├── arch.cpp ├── arch.hpp ├── attribute.hpp ├── chip.cpp ├── chip.hpp ├── core.cpp ├── core.hpp ├── docstrings.hpp ├── fwd.hpp ├── main.cpp ├── mapped.cpp ├── mapped.hpp ├── message.cpp ├── message.hpp ├── models.cpp ├── models.hpp ├── netlist.cpp ├── netlist.hpp ├── network.cpp ├── network.hpp ├── pipeline.cpp ├── pipeline.hpp ├── plugins.cpp ├── plugins.hpp ├── print.cpp ├── print.hpp ├── pymodule.cpp ├── schedule.cpp ├── schedule.hpp ├── tile.cpp ├── tile.hpp ├── utils.cpp ├── utils.hpp ├── yaml_arch.cpp ├── yaml_arch.hpp ├── yaml_common.cpp ├── yaml_common.hpp ├── yaml_snn.cpp └── yaml_snn.hpp └── tutorial ├── arch.yaml ├── example_arch.svg ├── example_snn.svg ├── snn.yaml ├── tutorial_0_intro.ipynb ├── tutorial_1_architecture.ipynb ├── tutorial_2_snns.ipynb ├── tutorial_3_api.ipynb ├── tutorial_4_traces.ipynb └── tutorial_5_dvs.ipynb /.clang-tidy: -------------------------------------------------------------------------------- 1 | # .clang-tidy configuration file 2 | Checks: > 3 | modernize-*, 4 | readability-*, 5 | cppcoreguidelines-*, 6 | bugprone-*, 7 | performance-*, 8 | clang-analyzer-*, 9 | misc-*, 10 | deadcode-*, 11 | -modernize-use-trailing-return-type, 12 | -readability-magic-numbers, 13 | -cppcoreguidelines-avoid-magic-numbers, 14 | -misc-non-private-member-variables-in-classes, 15 | -misc-no-recursion, 16 | -readability-identifier-length, 17 | -cppcoreguidelines-non-private-member-variables-in-classes, 18 | -performance-unnecessary-value-param, 19 | -clang-analyzer-optin.core.EnumCastOutOfRange, 20 | # Inherit parent configs (useful for large projects) 21 | InheritParentConfig: true 22 | 23 | # Header filter - only check headers in src/, exclude _deps/ and external dependencies 24 | HeaderFilterRegex: '^sana-fe/src/.*\.(h|hpp)$' 25 | 26 | # Configure specific checkers 27 | CheckOptions: 28 | # Readability options 29 | readability-identifier-naming.VariableCase: 'lower_case' 30 | readability-identifier-naming.FunctionCase: 'lower_case' 31 | readability-identifier-naming.ClassCase: 'CamelCase' 32 | readability-identifier-naming.StructCase: 'CamelCase' 33 | readability-identifier-naming.NamespaceCase: 'lower_case' 34 | readability-identifier-naming.EnumCase: 'CamelCase' 35 | readability-identifier-naming.ConstantCase: 'lower_case' 36 | readability-identifier-naming.MacroCase: 'UPPER_CASE' 37 | 38 | # Function length limits 39 | readability-function-size.LineThreshold: 100 40 | readability-function-size.StatementThreshold: 50 41 | readability-function-size.BranchThreshold: 20 42 | readability-function-size.ParameterThreshold: 8 43 | 44 | # Performance options 45 | performance-for-range-copy.WarnOnAllAutoCopies: true 46 | performance-inefficient-vector-operation.VectorLikeClasses: 'std::vector,std::deque' 47 | 48 | # Modernize options 49 | modernize-use-auto.MinTypeNameLength: 5 50 | modernize-loop-convert.MinConfidence: reasonable 51 | 52 | # Core Guidelines options 53 | cppcoreguidelines-special-member-functions.AllowSoleDefaultDtor: true 54 | cppcoreguidelines-special-member-functions.AllowMissingMoveFunctions: true 55 | 56 | # Format style for fix suggestions (optional) 57 | FormatStyle: file 58 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | runs/* 2 | debug/ 3 | release/ 4 | etc/ 5 | dist/ 6 | sanafe.egg-info/ 7 | tutorial/dvs_challenge.net 8 | tutorial/loihi.yaml 9 | *.swp 10 | *.yaml 11 | *.png 12 | *.npz 13 | *.parsed 14 | *.o 15 | *.so 16 | *.pdf 17 | *.pyc 18 | perf.csv 19 | messages.csv 20 | messages_single_ts.csv 21 | potentials.csv 22 | spikes.csv 23 | .vscode 24 | .codechecker 25 | gmon.out 26 | sim 27 | __pycache__ 28 | cmake_install.cmake 29 | CMakeCache.txt 30 | simcpp.* 31 | _deps/ 32 | build/ 33 | CMakeFiles/ 34 | Makefile 35 | compile_commands.json 36 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # Read the Docs configuration file 2 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 3 | 4 | # Required 5 | version: 2 6 | 7 | # Set the OS, Python version, and other tools you might need 8 | build: 9 | os: ubuntu-24.04 10 | tools: 11 | python: "3.13" 12 | apt_packages: 13 | - build-essential 14 | - cmake 15 | - flex 16 | - bison 17 | jobs: 18 | post_create_environment: 19 | - pip install . 20 | 21 | # Build documentation in the "docs/" directory with Sphinx 22 | sphinx: 23 | configuration: docs/conf.py 24 | fail_on_warning: false 25 | 26 | python: 27 | install: 28 | - requirements: docs/requirements.txt 29 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include CMakeLists.txt 2 | include src/*.cpp 3 | include src/*.hpp 4 | include plugins/CMakeLists.txt 5 | include plugins/*.cpp 6 | include plugins/*.hpp 7 | include __init__.py -------------------------------------------------------------------------------- /arch/demo_with_dendrites.yaml: -------------------------------------------------------------------------------- 1 | architecture: 2 | name: dendrite 3 | attributes: 4 | link_buffer_size: 4 5 | width: 2 6 | height: 1 7 | tile: 8 | - name: tile 9 | attributes: 10 | energy_north_hop: 2.0e-12 11 | latency_north_hop: 1.4e-9 12 | energy_east_hop: 2.5e-12 13 | latency_east_hop: 1.2e-9 14 | energy_south_hop: 2.0e-12 15 | latency_south_hop: 1.5e-9 16 | energy_west_hop: 1.8e-12 17 | latency_west_hop: 2.0e-9 18 | core: 19 | - name: core 20 | attributes: 21 | buffer_position: soma 22 | max_neurons_supported: 100 23 | axon_in: 24 | - name: axon_in 25 | attributes: 26 | energy_message_in: 0.0 27 | latency_message_in: 0.0 28 | synapse: 29 | - name: synapse 30 | attributes: 31 | model: current_based 32 | energy_process_spike: 20.0e-12 33 | latency_process_spike: 3.0e-9 34 | dendrite: 35 | - name: dendrite[0..99] 36 | attributes: 37 | energy_update: 0.0 38 | latency_update: 0.0 39 | model: taps 40 | soma: 41 | - name: soma 42 | attributes: 43 | model: leaky_integrate_fire 44 | energy_access_neuron: 20.0e-12 45 | latency_access_neuron: 3.0e-9 46 | energy_update_neuron: 10.0e-12 47 | latency_update_neuron: 1.0e-9 48 | energy_spike_out: 60.0e-12 49 | latency_spike_out: 30.0e-9 50 | - name: dummy_input 51 | attributes: 52 | model: input 53 | energy_access_neuron: 0.0 54 | latency_access_neuron: 0.0 55 | energy_update_neuron: 0.0 56 | latency_update_neuron: 0.0 57 | energy_spike_out: 0.0 58 | latency_spike_out: 0.0 59 | axon_out: 60 | - name: axon_out 61 | attributes: 62 | energy_message_out: 100.0e-12 63 | latency_message_out: 5.0e-9 64 | -------------------------------------------------------------------------------- /arch/example.yaml: -------------------------------------------------------------------------------- 1 | architecture: 2 | name: demo 3 | attributes: 4 | link_buffer_size: 4 5 | width: 2 6 | height: 1 7 | tile: 8 | - name: demo_tile[0..1] 9 | attributes: 10 | energy_north_hop: 2.0e-12 11 | latency_north_hop: 1.4e-9 12 | energy_east_hop: 2.5e-12 13 | latency_east_hop: 1.2e-9 14 | energy_south_hop: 2.0e-12 15 | latency_south_hop: 1.5e-9 16 | energy_west_hop: 1.8e-12 17 | latency_west_hop: 2.0e-9 18 | core: 19 | - name: demo_core[0..3] 20 | attributes: 21 | buffer_position: soma 22 | max_neurons_supported: 100 23 | axon_in: 24 | - name: demo_in 25 | attributes: 26 | energy_message_in: 0.0 27 | latency_message_in: 0.0 28 | synapse: 29 | - name: demo_synapse 30 | attributes: 31 | model: current_based 32 | energy_process_spike: 20.0e-12 33 | latency_process_spike: 3.0e-9 34 | dendrite: 35 | - name: demo_dendrite 36 | attributes: 37 | model: accumulator 38 | energy_update: 0.0 39 | latency_update: 0.0 40 | force_update: true 41 | soma: 42 | - name: demo_soma_default 43 | attributes: 44 | model: leaky_integrate_fire 45 | energy_access_neuron: 20.0e-12 46 | latency_access_neuron: 3.0e-9 47 | energy_update_neuron: 10.0e-12 48 | latency_update_neuron: 1.0e-9 49 | energy_spike_out: 60.0e-12 50 | latency_spike_out: 30.0e-9 51 | - name: demo_soma_alt 52 | attributes: 53 | model: leaky_integrate_fire 54 | energy_access_neuron: 50.0e-12 55 | latency_access_neuron: 5.0e-9 56 | energy_update_neuron: 60.0e-12 57 | latency_update_neuron: 10.0e-9 58 | energy_spike_out: 30.0e-12 59 | latency_spike_out: 3.0e-9 60 | - name: demo_input 61 | attributes: 62 | model: input 63 | energy_access_neuron: 0.0 64 | latency_access_neuron: 0.0 65 | energy_update_neuron: 0.0 66 | latency_update_neuron: 0.0 67 | energy_spike_out: 0.0 68 | latency_spike_out: 0.0 69 | axon_out: 70 | - name: demo_out 71 | attributes: 72 | energy_message_out: 100.0e-12 73 | latency_message_out: 5.0e-9 74 | -------------------------------------------------------------------------------- /arch/loihi.yaml: -------------------------------------------------------------------------------- 1 | # Energy and time estimates of different events, generated from SPICE 2 | # simulations of Loihi. All numbers were taken from: 3 | # "Loihi: A Neuromorphic Manycore Processor with On-Chip Learning" (2018) 4 | # M. Davies et al 5 | architecture: 6 | name: loihi_chip 7 | attributes: 8 | topology: mesh 9 | width: 8 10 | height: 4 11 | link_buffer_size: 16 12 | sync_model: table 13 | latency_sync: { 1: 0.6e-6, 2: 1.0e-6, 4: 1.4e-6, 29: 1.8e-6 } 14 | tile: 15 | - name: loihi_tile[0..31] 16 | attributes: 17 | energy_north_hop: 4.2e-12 18 | latency_north_hop: 6.5e-9 19 | energy_east_hop: 3.0e-12 20 | latency_east_hop: 4.1e-9 21 | energy_south_hop: 4.2e-12 22 | latency_south_hop: 6.5e-9 23 | energy_west_hop: 3.0e-12 24 | latency_west_hop: 4.1e-9 25 | core: 26 | - name: loihi_core[0..3] 27 | attributes: 28 | buffer_position: soma 29 | buffer_inside_unit: false 30 | max_neurons_supported: 1024 31 | axon_in: 32 | - name: loihi_in 33 | attributes: 34 | energy_message_in: 0.0e-12 35 | latency_message_in: 16.0e-9 36 | soma: 37 | - name: loihi_lif 38 | attributes: 39 | model: leaky_integrate_fire 40 | energy_access_neuron: 51.2e-12 41 | latency_access_neuron: 6.0e-9 42 | energy_update_neuron: 21.6e-12 43 | latency_update_neuron: 3.7e-9 44 | energy_spike_out: 69.3e-12 45 | latency_spike_out: 30.0e-9 46 | - name: loihi_inputs 47 | attributes: 48 | model: input 49 | energy_access_neuron: 0.0 50 | latency_access_neuron: 0.0 51 | energy_update_neuron: 0.0 52 | latency_update_neuron: 0.0 53 | energy_spike_out: 0.0 54 | latency_spike_out: 0.0 55 | synapse: 56 | - name: loihi_dense_synapse # Use the name to link 57 | attributes: 58 | model: current_based 59 | energy_process_spike: 35.5e-12 60 | latency_process_spike: 3.8e-9 61 | - name: loihi_sparse_synapse 62 | attributes: 63 | model: current_based 64 | energy_process_spike: 33.6e-12 65 | latency_process_spike: 4.7e-9 66 | # model: loihi 67 | # latency_concurrent_access: 4.7e-9 68 | # 69 | - name: loihi_conv_synapse 70 | attributes: 71 | model: current_based 72 | latency_process_spike: 3.1e-9 73 | energy_process_spike: 24.0e-12 74 | #model: loihi 75 | dendrite: 76 | - name: loihi_dendrites 77 | attributes: 78 | model: accumulator 79 | energy_update: 0.0 80 | latency_update: 0.0 81 | axon_out: 82 | - name: loihi_out 83 | attributes: 84 | energy_message_out: 111.0e-12 85 | latency_message_out: 5.1e-9 86 | -------------------------------------------------------------------------------- /arch/loihi_with_noise.yaml: -------------------------------------------------------------------------------- 1 | # Energy and time estimates of different events, generated from SPICE 2 | # simulations of Loihi. All numbers were taken from: 3 | # "Loihi: A Neuromorphic Manycore Processor with On-Chip Learning" (2018) 4 | # M. Davies et al 5 | architecture: 6 | name: loihi_chip 7 | attributes: 8 | topology: mesh 9 | width: 8 10 | height: 4 11 | link_buffer_size: 16 12 | 13 | sync_model: table 14 | latency_sync: { 1: 0.6e-6, 2: 1.0e-6, 4: 1.4e-6, 29: 1.8e-6 } 15 | tile: 16 | - name: loihi_tile[0..31] 17 | attributes: 18 | energy_north_hop: 4.2e-12 19 | latency_north_hop: 6.5e-9 20 | energy_east_hop: 3.0e-12 21 | latency_east_hop: 4.1e-9 22 | energy_south_hop: 4.2e-12 23 | latency_south_hop: 6.5e-9 24 | energy_west_hop: 3.0e-12 25 | latency_west_hop: 4.1e-9 26 | core: 27 | - name: loihi_core[0..3] 28 | attributes: 29 | buffer_position: soma 30 | buffer_inside_unit: false 31 | max_neurons_supported: 1024 32 | axon_in: 33 | - name: loihi_in 34 | attributes: 35 | energy_message_in: 0.0e-12 36 | latency_message_in: 16.0e-9 37 | soma: 38 | - name: loihi_lif 39 | attributes: 40 | model: leaky_integrate_fire 41 | energy_access_neuron: 51.2e-12 42 | latency_access_neuron: 6.0e-9 43 | energy_update_neuron: 21.6e-12 44 | latency_update_neuron: 3.7e-9 45 | energy_spike_out: 69.3e-12 46 | latency_spike_out: 30.0e-9 47 | - name: loihi_stochastic_lif 48 | attributes: 49 | model: leaky_integrate_fire 50 | # Noise stream is based on Intel's proprietary data 51 | # TODO: also support a LFSR based random number generator that 52 | # can replace this for the public model 53 | noise: /home/usr1/jboyle/neuro/sana-fe/etc/loihi_random_seq.csv 54 | energy_access_neuron: 51.2e-12 55 | latency_access_neuron: 6.0e-9 56 | # Here account for the 6 pJ cost of generating a random number 57 | energy_update_neuron: 27.6e-12 58 | latency_update_neuron: 3.7e-9 59 | energy_spike_out: 69.3e-12 60 | latency_spike_out: 30.0e-9 61 | - name: loihi_inputs 62 | attributes: 63 | model: input 64 | energy_access_neuron: 0.0 65 | latency_access_neuron: 0.0 66 | energy_update_neuron: 0.0 67 | latency_update_neuron: 0.0 68 | energy_spike_out: 0.0 69 | latency_spike_out: 0.0 70 | synapse: 71 | - name: loihi_dense_synapse # Use the name to link 72 | attributes: 73 | model: current_based 74 | energy_process_spike: 35.5e-12 75 | latency_process_spike: 3.8e-9 76 | - name: loihi_sparse_synapse 77 | attributes: 78 | model: current_based 79 | energy_process_spike: 33.6e-12 80 | latency_process_spike: 4.7e-9 81 | #model: loihi 82 | #latency_concurrent_access: 4.7e-9 83 | #latency_concurrent_access: 4.0e-9 84 | - name: loihi_conv_synapse 85 | attributes: 86 | model: loihi 87 | energy_process_spike: 24.0e-12 88 | dendrite: 89 | - name: loihi_dendrites 90 | attributes: 91 | model: accumulator 92 | energy_update: 0.0 93 | latency_update: 0.0 94 | axon_out: 95 | - name: loihi_out 96 | attributes: 97 | energy_message_out: 111.0e-12 98 | latency_message_out: 5.1e-9 99 | -------------------------------------------------------------------------------- /arch/truenorth.yaml: -------------------------------------------------------------------------------- 1 | # TrueNorth architecture 2 | architecture: 3 | name: truenorth_chip 4 | attributes: 5 | topology: mesh 6 | dimensions: 2 7 | width: 64 8 | height: 64 9 | 10 | tile: 11 | - name: truenorth_tile[0..4095] 12 | attributes: 13 | blocking: False 14 | core: 15 | - name: truenorth_core 16 | attributes: 17 | blocking: False 18 | axon_in: 19 | - name: core_in 20 | # axon_in acts like input ports to the arch or subtree. 21 | # For Loihi we have at most 4096 slots to route packets to 22 | # different cores. 23 | attributes: 24 | fan_in: 256 25 | protocol: dest_axon 26 | soma: 27 | - name: core_soma 28 | attributes: 29 | type: digital 30 | model: truenorth 31 | synapse: 32 | - name: core_synapses 33 | attributes: 34 | model: current_based 35 | weight_bits: 8 36 | dendrite: 37 | - name: core_dendrites 38 | attributes: 39 | 40 | axon_out: 41 | - name: core_out 42 | attributes: 43 | fan_out: 1 44 | protocol: dest_axon 45 | 46 | -------------------------------------------------------------------------------- /backends/lava.py: -------------------------------------------------------------------------------- 1 | #LAVA to SANAFE UTILS 2 | 3 | # This module converts LAVA processes or serialization 4 | # object to the SNN representation runnable on SANA-FE 5 | 6 | # Implemented by Lance Lui as part of the capstone senior design project 7 | import lava.utils.serialization 8 | from lava.magma.core.process.process import AbstractProcess 9 | from lava.magma.compiler.executable import Executable 10 | 11 | from lava.utils.serialization import load 12 | 13 | import os 14 | import sys 15 | import importlib 16 | 17 | """ 18 | keep this file structure- 19 | lava 20 | ├── src 21 | │ ├── lava 22 | │ │ ├── utils 23 | │ │ │ ├── sanafe.py 24 | │ │ │ │... 25 | │ │ │... 26 | │ │... 27 | │... 28 | SANAFE 29 | ├── sim.py 30 | │... 31 | """ 32 | 33 | UTILS_DIR = os.path.dirname(os.path.abspath(__file__)) 34 | LAVA_DIR = os.path.dirname(UTILS_DIR) 35 | SRC_DIR = os.path.dirname(LAVA_DIR) 36 | LAVA_PROJECT_DIR = os.path.dirname(SRC_DIR) 37 | PROJECT_DIR = os.path.dirname(LAVA_PROJECT_DIR) 38 | sys.path.append(PROJECT_DIR) 39 | sim = importlib.import_module('SANA-FE.snn') 40 | 41 | NETWORK_FILENAME = "runs/random/random.net" 42 | ARCH_FILENAME = "arch/loihi.yaml" 43 | LOIHI_NEURONS_PER_CORE = 1024 44 | LOIHI_CORES = 128 45 | LOIHI_CORES_PER_TILE = 4 46 | LOIHI_TILES = int(LOIHI_CORES / LOIHI_CORES_PER_TILE) 47 | 48 | import os 49 | import sys 50 | import importlib 51 | 52 | 53 | def serial_to_sanafe(filename: str, name:str = "converted_abstract_process.net"): 54 | p = load(filename) 55 | 56 | network = sim.Network(save_mappings=True) 57 | neurons_per_core = LOIHI_NEURONS_PER_CORE 58 | compartments = sim.init_compartments(LOIHI_TILES, LOIHI_CORES_PER_TILE, 59 | neurons_per_core) 60 | 61 | num_proc = len(p[0]) 62 | print(num_proc) 63 | 64 | if type(p[0]) == AbstractProcess: 65 | process_to_sanafe(p[0]) 66 | return 67 | else: 68 | for i in range(len(p[0])): 69 | dict = p[0][i].proc_params._parameters 70 | dim = dict["shape"] 71 | dim1 = dim[0] 72 | dim2 = 1 73 | if(len(dim) > 1): dim2 = dim[1] 74 | group = sim.create_layer_type(network=network, 75 | layer_neuron_count=dim1*dim2, 76 | compartments=compartments, 77 | neuron_parameters=dict) 78 | if group.id > 0: sim.connect_layers(network, group.id-1, group.id) 79 | prev = group.id 80 | 81 | network.save(filename=name) 82 | return 83 | 84 | def process_to_sanafe(process: AbstractProcess, name:str = "converted_abstract_process.net"): 85 | 86 | network = sim.Network(save_mappings=True) 87 | neurons_per_core = LOIHI_NEURONS_PER_CORE 88 | compartments = sim.init_compartments(LOIHI_TILES, LOIHI_CORES_PER_TILE, 89 | neurons_per_core) 90 | 91 | dict = process.proc_params._parameters 92 | dim = dict['shape'] 93 | dim1 = dim[0] 94 | dim2 = 1 95 | for i in range(dim[1]): 96 | group = sim.create_layer(network=network, layer_neuron_count=dim[0], compartments=compartments) 97 | if group.id > 0: sim.connect_layers(network, group.id-1, group.id) 98 | prev = group.id 99 | 100 | network.save(filename=name) 101 | return -------------------------------------------------------------------------------- /docs/_static/make_logo_white.css: -------------------------------------------------------------------------------- 1 | /* Make logo white for better visibility on dark backgrounds */ 2 | /* Target the logo in RTD theme sidebar */ 3 | .wy-side-nav-search .wy-dropdown > a img, 4 | .wy-side-nav-search > a img { 5 | filter: brightness(0) invert(1); 6 | } 7 | 8 | /* Also target by source path as backup */ 9 | img[src*="logo.svg"] { 10 | filter: brightness(0) invert(1); 11 | } -------------------------------------------------------------------------------- /docs/api.rst: -------------------------------------------------------------------------------- 1 | API Reference 2 | ============= 3 | 4 | This page contains the API documentation for SANA-FE. 5 | 6 | .. currentmodule:: sanafe 7 | 8 | Core Classes 9 | ------------ 10 | 11 | SpikingChip 12 | ~~~~~~~~~~~ 13 | 14 | .. autoclass:: SpikingChip 15 | :members: 16 | :undoc-members: 17 | :show-inheritance: 18 | 19 | .. autoclass:: MappedNeuron 20 | :members: 21 | :undoc-members: 22 | :show-inheritance: 23 | 24 | Architecture 25 | ~~~~~~~~~~~~ 26 | 27 | .. autoclass:: Architecture 28 | :members: 29 | :undoc-members: 30 | :show-inheritance: 31 | 32 | .. autoclass:: Tile 33 | :members: 34 | :undoc-members: 35 | :show-inheritance: 36 | 37 | .. autoclass:: Core 38 | :members: 39 | :undoc-members: 40 | :show-inheritance: 41 | 42 | Network 43 | ~~~~~~~ 44 | 45 | .. autoclass:: Network 46 | :members: 47 | :undoc-members: 48 | :show-inheritance: 49 | 50 | .. autoclass:: NeuronGroup 51 | :members: 52 | :undoc-members: 53 | :show-inheritance: 54 | 55 | .. autoclass:: Neuron 56 | :members: 57 | :undoc-members: 58 | :show-inheritance: 59 | 60 | .. autoclass:: Connection 61 | :members: 62 | :undoc-members: 63 | :show-inheritance: 64 | 65 | 66 | 67 | Utility Functions 68 | ----------------- 69 | 70 | .. autofunction:: load_arch 71 | 72 | .. autofunction:: load_net -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | project = 'SANA-FE' 2 | author = 'James Boyle' 3 | release = '2.0.20' 4 | 5 | extensions = [ 6 | 'sphinx.ext.autodoc', 7 | 'sphinx.ext.viewcode', 8 | 'sphinx.ext.autosummary', # For automatic summary tables 9 | 'sphinx.ext.napoleon', # For Google style docstrings 10 | ] 11 | 12 | html_theme = 'sphinx_rtd_theme' 13 | html_logo = '../sana_fe_logo.svg' 14 | 15 | # Autodoc settings 16 | autodoc_default_options = { 17 | 'members': True, 18 | 'member-order': 'bysource', 19 | 'special-members': '__init__', 20 | 'undoc-members': True, 21 | 'exclude-members': '__weakref__' 22 | } 23 | 24 | # Generate autosummary automatically 25 | autosummary_generate = True 26 | 27 | # Add custom CSS 28 | html_static_path = ['_static'] 29 | def setup(app): 30 | app.add_css_file('make_logo_white.css') 31 | 32 | # Napoleon settings 33 | napoleon_google_docstring = True 34 | napoleon_numpy_docstring = True 35 | napoleon_include_init_with_doc = False 36 | napoleon_include_private_with_doc = False 37 | napoleon_include_special_with_doc = True -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | Welcome to SANA-FE's Documentation 2 | ======================================= 3 | 4 | Architecting new neuromorphic chips involves several design decisions that can 5 | affect overall system power consumption and performance. Architecture models can 6 | be used to estimate the impact of different design decisions and thus inform 7 | decision-making and exploration. SANA-FE (Simulating Advanced Neuromorphic 8 | Architectures for Fast Exploration) is an open-source tool developed in a 9 | collaboration between UT Austin and Sandia National Laboratories (SNL) to 10 | rapidly and accurately model and simulate the energy and performance of 11 | different neuromorphic hardware platforms. 12 | 13 | .. image:: sana_fe.svg 14 | :alt: SANA-FE diagram 15 | :align: center 16 | 17 | SANA-FE requires a description of the architecture you want to simulate, a 18 | specification of the spiking neural network (SNN) application you wish to max 19 | and execute. Optionally, you can also provide shared library hardware plugins 20 | and simulation configuration. SANA-FE will simulate the given architecture and 21 | give performane and energy estimates. Optionally, SANA-FE will generate detailed 22 | hardware traces. 23 | 24 | The SANA-FE kernel is primarily written in C++, but uses PyBind11 to generate 25 | Python interfaces. SANA-FE can alternatively be used as a standalone C++ 26 | simulator. For more information on using SANA-FE without Python, visit the 27 | project repository homepage at: https://github.com/SLAM-Lab/SANA-FE 28 | The SANA-FE Python interface supports creating mapped SNNs, launching 29 | simulations, and getting prediction data out for analysis. 30 | 31 | Quick-start 32 | =========== 33 | 34 | To install SANA-FE from PyPI, run the following 35 | 36 | .. code-block:: bash 37 | 38 | python 39 | pip install sanafe 40 | 41 | If you would rather install the latest SANA-FE from source, run 42 | 43 | .. code-block:: bash 44 | 45 | git clone https://github.com/SLAM-Lab/SANA-FE sana-fe 46 | cd sana-fe 47 | pip install . 48 | 49 | You can test your SANA-FE installation by running the following command 50 | 51 | .. code-block:: bash 52 | 53 | python -c "import sanafe; \ 54 | arch=sanafe.load_arch('arch/example.yaml'); c=sanafe.SpikingChip(arch); \ 55 | c.load(sanafe.load_net('snn/example.yaml',arch)); c.sim(100)" 56 | 57 | If this ran successfully, you should see the following print on the console 58 | 59 | .. code-block:: bash 60 | 61 | Executed steps: [100/100] 62 | 63 | For more examples of how to use SANA-FE, we have provided a set of Jupyter 64 | notebooks at: 65 | https://github.com/SLAM-Lab/SANA-FE/blob/main/tutorial 66 | 67 | These tutorials can be run locally or using Google Colab. 68 | 69 | .. toctree:: 70 | :maxdepth: 2 71 | :caption: Contents: 72 | 73 | api 74 | layers 75 | 76 | Indices and tables 77 | ================== 78 | 79 | * :ref:`genindex` 80 | * :ref:`modindex` 81 | * :ref:`search` 82 | 83 | Citation 84 | ======== 85 | 86 | If you use SANA-FE in your work, please cite: 87 | 88 | .. code-block:: bibtex 89 | 90 | @article{boyle2025sanafe, 91 | title={SANA-FE: Simulating Advanced Neuromorphic Architectures for Fast Exploration}, 92 | author={James A. Boyle and Mark Plagge and Suma George Cardwell and Frances S. Chance and Andreas Gerstlauer}, 93 | journal={IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems}, 94 | year={2025}, 95 | doi={10.1109/TCAD.2025.3537971} 96 | } 97 | 98 | 99 | References 100 | ========== 101 | 102 | James A. Boyle, Mark Plagge, Suma George Cardwell, Frances S. Chance, and Andreas Gerstlauer, 103 | "SANA-FE: Simulating Advanced Neuromorphic Architectures for Fast Exploration," 104 | in IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems, 2025, 105 | `doi:10.1109/TCAD.2025.3537971 `_. 106 | 107 | James A. Boyle, Mark Plagge, Suma George Cardwell, Frances S. Chance, and Andreas Gerstlauer, 108 | "Tutorial: Large-Scale Spiking Neuromorphic Architecture Exploration using SANA-FE," 109 | in 2024 International Conference on Hardware/Software Codesign and System Synthesis (CODES+ISSS), Raleigh, NC, USA, 110 | `doi:10.1109/CODES-ISSS60120.2024.00007 `_. 111 | 112 | James A. Boyle, Mark Plagge, Suma George Cardwell, Frances S. Chance, and Andreas Gerstlauer, 113 | "Performance and Energy Simulation of Spiking Neuromorphic Architectures for Fast Exploration," 114 | in 2023 International Conference on Neuromorphic Systems (ICONS), Santa Fe, NM, USA, 115 | `doi:10.1145/3589737.3605970 `_. 116 | 117 | Acknowledgement 118 | =============== 119 | 120 | Copyright (c) 2025 - The University of Texas at Austin 121 | This work was produced under contract #2317831 to National Technology and 122 | Engineering Solutions of Sandia, LLC which is under contract 123 | No. DE-NA0003525 with the U.S. Department of Energy. -------------------------------------------------------------------------------- /docs/layers.rst: -------------------------------------------------------------------------------- 1 | ===================================== 2 | SANA-FE Layers Module (sanafe.layers) 3 | ===================================== 4 | 5 | The ``sanafe.layers`` module provides higher-level machine learning abstractions 6 | that simplify the construction of deep spiking neural network applications in 7 | SANA-FE. These layers wrap the low-level C++ kernel with PyTorch/Keras-style 8 | interfaces. 9 | 10 | Quick Start 11 | =========== 12 | 13 | .. code-block:: python 14 | 15 | import sanafe 16 | from sanafe import layers 17 | import numpy as np 18 | 19 | # Create network 20 | net = sanafecpp.Network() 21 | 22 | # Build a simple CNN 23 | input_layer = layers.Input2D(net, 28, 28, 1) 24 | 25 | # Add convolutional layer 26 | conv_weights = np.random.random((3, 3, 1, 32)) 27 | conv1 = layers.Conv2D(net, input_layer, conv_weights, threshold=1.0) 28 | 29 | # Add dense layer 30 | dense_weights = np.random.random((conv1.width * conv1.height * conv1.channels, 10)) 31 | output = layers.Dense(net, conv1, 10, dense_weights) 32 | 33 | For a more in-depth real-world example, see our tutorial implementing 34 | `DVS gesture categorization `_ 35 | 36 | Layer Base Class 37 | ================ 38 | 39 | .. autoclass:: sanafe.layers.Layer 40 | :members: 41 | :undoc-members: 42 | :show-inheritance: 43 | 44 | Input Layers 45 | ============ 46 | 47 | .. autoclass:: sanafe.layers.Input2D 48 | :members: 49 | :undoc-members: 50 | :show-inheritance: 51 | 52 | Convolutional Layers 53 | ==================== 54 | 55 | .. autoclass:: sanafe.layers.Conv2D 56 | :members: 57 | :undoc-members: 58 | :show-inheritance: 59 | 60 | Dense Layers 61 | ============ 62 | 63 | .. autoclass:: sanafe.layers.Dense 64 | :members: 65 | :undoc-members: 66 | :show-inheritance: 67 | 68 | Application Examples 69 | ===================== 70 | 71 | MNIST CNN 72 | --------- 73 | 74 | .. code-block:: python 75 | 76 | import sanafecpp 77 | from sanafe import layers 78 | import numpy as np 79 | 80 | def create_mnist_cnn(net): 81 | """Create a simple CNN for MNIST classification.""" 82 | 83 | # Input layer: 28x28 grayscale images 84 | input_layer = layers.Input2D(net, 28, 28, 1, threshold=1.0) 85 | 86 | # First conv layer: 3x3 kernels, 32 filters 87 | conv1_weights = np.random.normal(0, 0.1, (3, 3, 1, 32)) 88 | conv1 = layers.Conv2D(net, input_layer, conv1_weights, 89 | stride_width=1, stride_height=1, 90 | threshold=1.2, leak=0.05) 91 | 92 | # Second conv layer: 3x3 kernels, 64 filters 93 | conv2_weights = np.random.normal(0, 0.1, (3, 3, 32, 64)) 94 | conv2 = layers.Conv2D(net, conv1, conv2_weights, 95 | stride_width=2, stride_height=2, 96 | threshold=1.1, leak=0.05) 97 | 98 | # Flatten and classify 99 | flatten_size = conv2.width * conv2.height * conv2.channels 100 | dense_weights = np.random.normal(0, 0.1, (flatten_size, 10)) 101 | output = layers.Dense(net, conv2, 10, dense_weights, threshold=2.0) 102 | 103 | return input_layer, conv1, conv2, output 104 | 105 | Multi-Layer Perceptron 106 | ---------------------- 107 | 108 | .. code-block:: python 109 | 110 | def create_mlp(net, input_size, hidden_sizes, output_size): 111 | """Create a multi-layer perceptron.""" 112 | 113 | # Input layer 114 | input_layer = layers.Input2D(net, input_size, 1, 1, threshold=0.5) 115 | 116 | prev_layer = input_layer 117 | layers_list = [input_layer] 118 | 119 | # Hidden layers 120 | for i, hidden_size in enumerate(hidden_sizes): 121 | weights = np.random.normal(0, 0.1, (len(prev_layer), hidden_size)) 122 | hidden = layers.Dense(net, prev_layer, hidden_size, weights, 123 | threshold=1.0, leak=0.1) 124 | layers_list.append(hidden) 125 | prev_layer = hidden 126 | 127 | # Output layer 128 | output_weights = np.random.normal(0, 0.1, (len(prev_layer), output_size)) 129 | output = layers.Dense(net, prev_layer, output_size, output_weights, 130 | threshold=1.5) 131 | layers_list.append(output) 132 | 133 | return layers_list 134 | 135 | Error Handling 136 | ============== 137 | 138 | The layers module includes error checking: 139 | 140 | - **Dimension validation**: Ensures weight matrices match layer sizes 141 | - **Parameter validation**: Checks for positive dimensions and valid strides 142 | - **Channel compatibility**: Verifies input/output channel consistency 143 | - **Output size validation**: Prevents zero or negative output dimensions 144 | 145 | Common error scenarios: 146 | 147 | .. code-block:: python 148 | 149 | # This will raise ValueError: weight matrix size mismatch 150 | try: 151 | wrong_weights = np.random.random((100, 50)) # Wrong input size 152 | layer = layers.Dense(net, prev_layer, 50, wrong_weights) 153 | except ValueError as e: 154 | print(f"Error: {e}") 155 | 156 | # This will raise ValueError: invalid stride 157 | try: 158 | layer = layers.Conv2D(net, input_layer, weights, stride_width=0) 159 | except ValueError as e: 160 | print(f"Error: {e}") 161 | 162 | See Also 163 | ======== 164 | 165 | * :doc:`api` - API reference -------------------------------------------------------------------------------- /docs/make_references.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Citation reference generator for SANA-FE project. 4 | Reads references.bib and updates both README.md and docs/index.rst. 5 | 6 | Usage (from docs/ directory): 7 | python make_references.py 8 | 9 | Requirements: 10 | pip install bibtexparser 11 | """ 12 | 13 | import re 14 | import sys 15 | from pathlib import Path 16 | try: 17 | import bibtexparser 18 | from bibtexparser.bparser import BibTexParser 19 | except ImportError: 20 | print("Error: bibtexparser not found. Install with: pip install bibtexparser") 21 | sys.exit(1) 22 | 23 | 24 | def load_bibtex(bib_file: Path) -> dict: 25 | """Load and parse BibTeX file.""" 26 | if not bib_file.exists(): 27 | print(f"Error: {bib_file} not found!") 28 | sys.exit(1) 29 | 30 | with open(bib_file, 'r', encoding='utf-8') as f: 31 | parser = BibTexParser(common_strings=True) 32 | bib_database = bibtexparser.load(f, parser=parser) 33 | 34 | return {entry['ID']: entry for entry in bib_database.entries} 35 | 36 | 37 | def format_authors(authors_str: str) -> str: 38 | """Format authors string, handling 'and' separators.""" 39 | authors = [author.strip() for author in authors_str.split(' and ')] 40 | if len(authors) == 1: 41 | return authors[0] 42 | elif len(authors) == 2: 43 | return f"{authors[0]} and {authors[1]}" 44 | else: 45 | return f"{', '.join(authors[:-1])}, and {authors[-1]}" 46 | 47 | 48 | def generate_markdown_citation(entries: dict) -> str: 49 | """Generate markdown citation section for README.md.""" 50 | # Use the main SANA-FE paper (boyle2025sanafe) 51 | main_entry = entries.get('boyle2025sanafe') 52 | if not main_entry: 53 | print("Warning: Main citation 'boyle2025sanafe' not found in BibTeX") 54 | return "" 55 | 56 | authors = format_authors(main_entry['author']) 57 | title = main_entry['title'] 58 | journal = main_entry['journal'] 59 | year = main_entry['year'] 60 | doi = main_entry['doi'] 61 | 62 | citation_text = f"""## Citation 63 | 64 | If you use SANA-FE in your work, please cite: 65 | 66 | {authors}, "{title}," in {journal}, {year}, [doi:{doi}](https://doi.org/{doi}). 67 | 68 | ```bibtex 69 | @article{{boyle2025sanafe, 70 | title={{{title}}}, 71 | author={{{main_entry['author']}}}, 72 | journal={{{journal}}}, 73 | year={{{year}}}, 74 | doi={{{doi}}} 75 | }} 76 | ```""" 77 | return citation_text 78 | 79 | 80 | def generate_markdown_references(entries: dict) -> str: 81 | """Generate markdown references section for README.md.""" 82 | # Order: main paper, tutorial, icons 83 | ordered_keys = ['boyle2025sanafe', 'boyle2024tutorial', 'boyle2023performance'] 84 | references = [] 85 | 86 | for key in ordered_keys: 87 | if key not in entries: 88 | continue 89 | 90 | entry = entries[key] 91 | authors = format_authors(entry['author']) 92 | title = entry['title'] 93 | year = entry['year'] 94 | doi = entry['doi'] 95 | 96 | if entry['ENTRYTYPE'] == 'article': 97 | journal = entry['journal'] 98 | ref = f'{authors}, "{title}," in {journal}, {year}, [doi:{doi}](https://doi.org/{doi}).' 99 | else: # inproceedings 100 | booktitle = entry['booktitle'] 101 | address = entry.get('address', '') 102 | if address: 103 | ref = f'{authors}, "{title}," in {booktitle}, {address}, [doi:{doi}](https://doi.org/{doi}).' 104 | else: 105 | ref = f'{authors}, "{title}," in {booktitle}, {year}, [doi:{doi}](https://doi.org/{doi}).' 106 | 107 | references.append(ref) 108 | 109 | references_text = "## References\n\n" + "\n\n".join(references) 110 | return references_text 111 | 112 | 113 | def generate_rst_citation(entries: dict) -> str: 114 | """Generate RST citation section for index.rst.""" 115 | main_entry = entries.get('boyle2025sanafe') 116 | if not main_entry: 117 | return "" 118 | 119 | title = main_entry['title'] 120 | author = main_entry['author'] 121 | journal = main_entry['journal'] 122 | year = main_entry['year'] 123 | doi = main_entry['doi'] 124 | 125 | citation_text = f"""Citation 126 | ======== 127 | 128 | If you use SANA-FE in your work, please cite: 129 | 130 | .. code-block:: bibtex 131 | 132 | @article{{boyle2025sanafe, 133 | title={{{title}}}, 134 | author={{{author}}}, 135 | journal={{{journal}}}, 136 | year={{{year}}}, 137 | doi={{{doi}}} 138 | }}""" 139 | return citation_text 140 | 141 | 142 | def generate_rst_references(entries: dict) -> str: 143 | """Generate RST references section for index.rst.""" 144 | ordered_keys = ['boyle2025sanafe', 'boyle2024tutorial', 'boyle2023performance'] 145 | references = [] 146 | 147 | for key in ordered_keys: 148 | if key not in entries: 149 | continue 150 | 151 | entry = entries[key] 152 | authors = format_authors(entry['author']) 153 | title = entry['title'] 154 | year = entry['year'] 155 | doi = entry['doi'] 156 | 157 | if entry['ENTRYTYPE'] == 'article': 158 | journal = entry['journal'] 159 | ref = f'{authors},\n"{title},"\nin {journal}, {year},\n`doi:{doi} `_.' 160 | else: # inproceedings 161 | booktitle = entry['booktitle'] 162 | address = entry.get('address', '') 163 | if address: 164 | ref = f'{authors},\n"{title},"\nin {booktitle}, {address},\n`doi:{doi} `_.' 165 | else: 166 | ref = f'{authors},\n"{title},"\nin {booktitle}, {year},\n`doi:{doi} `_.' 167 | 168 | references.append(ref) 169 | 170 | references_text = "References\n==========\n\n" + "\n\n".join(references) 171 | return references_text 172 | 173 | 174 | def update_file_section(file_path: Path, pattern: str, new_content: str, section_name: str): 175 | """Update a specific section in a file.""" 176 | if not file_path.exists(): 177 | print(f"Warning: {file_path} not found, skipping.") 178 | return False 179 | 180 | content = file_path.read_text(encoding='utf-8') 181 | 182 | if not re.search(pattern, content, re.DOTALL): 183 | print(f"Warning: {section_name} section not found in {file_path}") 184 | return False 185 | 186 | updated_content = re.sub(pattern, new_content + '\n\n', content, flags=re.DOTALL) 187 | file_path.write_text(updated_content, encoding='utf-8') 188 | print(f"Updated {section_name} in {file_path}") 189 | return True 190 | 191 | 192 | def main(): 193 | """Main function to update all citation files.""" 194 | # File paths (relative to docs/ directory) 195 | bib_file = Path("../references.bib") 196 | readme_file = Path("../README.md") 197 | rst_file = Path("index.rst") 198 | 199 | print("Loading citations from references.bib...") 200 | entries = load_bibtex(bib_file) 201 | print(f"Found {len(entries)} citations") 202 | 203 | # Update README.md 204 | if readme_file.exists(): 205 | print("\nUpdating README.md...") 206 | 207 | # Update citation section 208 | citation_pattern = r'## Citation.*?(?=## [A-Z]|\Z)' 209 | new_citation = generate_markdown_citation(entries) 210 | update_file_section(readme_file, citation_pattern, new_citation, "Citation") 211 | 212 | # Update references section 213 | references_pattern = r'## References.*?(?=## [A-Z]|\Z)' 214 | new_references = generate_markdown_references(entries) 215 | update_file_section(readme_file, references_pattern, new_references, "References") 216 | 217 | # Update index.rst 218 | if rst_file.exists(): 219 | print("\nUpdating docs/index.rst...") 220 | 221 | # Update citation section 222 | citation_pattern = r'Citation\n========.*?(?=\n[A-Z][a-z]*\n=+|\Z)' 223 | new_citation = generate_rst_citation(entries) 224 | update_file_section(rst_file, citation_pattern, new_citation, "Citation") 225 | 226 | # Update references section 227 | references_pattern = r'References\n==========.*?(?=\n[A-Z][a-z]*\n=+|\Z)' 228 | new_references = generate_rst_references(entries) 229 | update_file_section(rst_file, references_pattern, new_references, "References") 230 | 231 | print("\nCitation update complete!") 232 | print("\nNext steps:") 233 | print("1. Review the updated files") 234 | print("2. Commit changes to git") 235 | print("3. To add new citations, edit references.bib and rerun this script") 236 | 237 | if __name__ == "__main__": 238 | main() -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx_rtd_theme 2 | pybind11 -------------------------------------------------------------------------------- /plugins/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.16) 2 | project(HodgkinHuxleyPlugin) 3 | 4 | set(CMAKE_CXX_STANDARD 17) 5 | set(CMAKE_CXX_STANDARD_REQUIRED ON) 6 | set(CMAKE_CXX_EXTENSIONS OFF) 7 | set(CMAKE_BUILD_TYPE Shared) 8 | 9 | add_compile_options(-Wall -pedantic -Werror -g) 10 | add_library(hodgkin_huxley SHARED "hodgkin_huxley.cpp") 11 | target_include_directories(hodgkin_huxley PRIVATE ../src) 12 | target_link_libraries(hodgkin_huxley PRIVATE ${CMAKE_DL_LIBS}) 13 | -------------------------------------------------------------------------------- /plugins/hodgkin_huxley.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 - The University of Texas at Austin 2 | // This work was produced under contract #2317831 to National Technology and 3 | // Engineering Solutions of Sandia, LLC which is under contract 4 | // No. DE-NA0003525 with the U.S. Department of Energy. 5 | // hodgkin_huxley.cpp 6 | // 7 | // Plugin implementation of the Hodgkin-Huxley neuron model. Implemented by 8 | // Robin Sam. 9 | // 10 | // Model inspired by this paper: https://ieeexplore.ieee.org/document/9235538 11 | // and this textbook: https://mrgreene09.github.io/computational-neuroscience-textbook 12 | #include 13 | #include 14 | #include 15 | #include 16 | 17 | #include "attribute.hpp" 18 | #include "mapped.hpp" 19 | #include "pipeline.hpp" 20 | #include "print.hpp" 21 | 22 | class HodgkinHuxley : public sanafe::SomaUnit 23 | { 24 | // HodgkinHuxley specific 25 | public: 26 | // system variables 27 | double C_m{10.0}; // Effective capacitance per area of membrane; default is 1 28 | double g_Na{1200.0}; // Conductance of sodium 29 | double g_K{360.0}; // Conductance of potassium 30 | double g_L{3.0}; // Conductance of leak channel 31 | double V_Na{50.0}; // Reverse potential of sodium 32 | double V_K{-77.0}; // Reverse potential of potassium 33 | double V_L{54.387}; // Reverse potential of leak channel 34 | double dt{0.1}; 35 | 36 | // main parameters 37 | double V{0.0}; 38 | double prev_V{0.0}; // Membrane potential 39 | double I{}; // Stimulation current per area 40 | double m{}; // m, n, h are coeff 41 | double n{}; 42 | double h{}; 43 | 44 | // internal results of various differential equations 45 | double alpha_m{}; 46 | double alpha_n{}; 47 | double alpha_h{}; 48 | double beta_m{}; 49 | double beta_n{}; 50 | double beta_h{}; 51 | 52 | double tau_m{0.0}; 53 | double tau_n{0.0}; 54 | double tau_h{0.0}; 55 | double pm{}; 56 | double pn{}; 57 | double ph{}; 58 | double denominator{0.0}; 59 | double tau_V{0.0}; 60 | double Vinf{0.0}; 61 | 62 | HodgkinHuxley() 63 | { 64 | register_attributes({"m", "n", "h", "current"}); 65 | } 66 | 67 | double get_potential(const size_t /*neuron_address*/) override 68 | { 69 | return V; 70 | } 71 | 72 | void reset() override 73 | { 74 | prev_V = 0.0; 75 | V = 0.0; 76 | m = 0.0; 77 | n = 0.0; 78 | h = 0.0; 79 | tau_n = 0.0; 80 | tau_m = 0.0; 81 | tau_h = 0.0; 82 | pm = 0.0; 83 | pn = 0.0; 84 | ph = 0.0; 85 | denominator = 0.0; 86 | tau_V = 0.0; 87 | Vinf = 0.0; 88 | } 89 | 90 | void set_attribute_hw(const std::string & /*attribute_name*/, 91 | const sanafe::ModelAttribute & /*param*/) override {}; 92 | 93 | void set_attribute_neuron(const size_t /*neuron_address*/, 94 | const std::string &attribute_name, 95 | const sanafe::ModelAttribute ¶m) override 96 | { 97 | if (attribute_name == "m") 98 | { 99 | m = static_cast(param); 100 | } 101 | else if (attribute_name == "n") 102 | { 103 | n = static_cast(param); 104 | } 105 | else if (attribute_name == "h") 106 | { 107 | h = static_cast(param); 108 | } 109 | else if (attribute_name == "current") 110 | { 111 | I = static_cast(param); 112 | } 113 | } 114 | 115 | sanafe::PipelineResult update(const size_t /*neuron_address*/, 116 | const std::optional /*current_in*/) override 117 | { 118 | sanafe::NeuronStatus status = sanafe::idle; 119 | 120 | // Calculate the change in potential since the last update e.g. 121 | // integate inputs and apply any potential leak 122 | TRACE1(MODELS, "Updating potential, before:%f\n", V); 123 | 124 | alpha_n = (0.01 * (V + 55)) / (1 - exp(-0.1 * (V + 55))); 125 | alpha_m = (0.1 * (V + 40)) / (1 - exp(-0.1 * (V + 40))); 126 | alpha_h = 0.07 * exp(-0.05 * (V + 65)); 127 | 128 | beta_n = 0.125 * exp(-0.01125 * (V + 55)); 129 | beta_m = 4 * exp(-0.05556 * (V + 65)); 130 | beta_h = 1 / (1 + exp(-0.1 * (V + 35))); 131 | 132 | tau_n = 1 / (alpha_n + beta_n); 133 | tau_m = 1 / (alpha_m + beta_m); 134 | tau_h = 1 / (alpha_h + beta_h); 135 | 136 | pm = alpha_m / (alpha_m + beta_m); 137 | pn = alpha_n / (alpha_n + beta_n); 138 | ph = alpha_h / (alpha_h + beta_h); 139 | 140 | denominator = g_L + g_K * (pow(n, 4)) + g_Na * (pow(m, 3) * h); 141 | tau_V = C_m / denominator; 142 | Vinf = ((g_L) *V_L + g_K * (pow(n, 4)) * V_K + 143 | g_Na * (pow(m, 3)) * h * V_Na + I) / 144 | denominator; 145 | 146 | // update main parameters 147 | prev_V = V; 148 | V = Vinf + (V - Vinf) * exp(-1 * dt / tau_V); 149 | m = pm + (m - pm) * exp(-1 * dt / tau_m); 150 | n = pn + (n - pn) * exp(-1 * dt / tau_n); 151 | h = ph + (h - ph) * exp(-1 * dt / tau_h); 152 | 153 | // Check against threshold potential (for spiking) 154 | if ((prev_V < 25) && (V > 25)) 155 | { 156 | // If voltage just crossed the 25 mV boundary, then 157 | // spike 158 | status = sanafe::fired; 159 | } 160 | else 161 | { 162 | status = sanafe::updated; 163 | } 164 | 165 | INFO("Updating potential, after:%f\n", V); 166 | 167 | return {std::nullopt, status, std::nullopt, std::nullopt}; 168 | } 169 | }; 170 | 171 | // the Class factories 172 | extern "C" sanafe::PipelineUnit *create_hodgkin_huxley() 173 | { 174 | TRACE1(MODELS, "Creating HH soma instance\n"); 175 | return (sanafe::PipelineUnit *) new HodgkinHuxley(); 176 | } 177 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = [ 3 | "setuptools>=42", 4 | "wheel", 5 | "cmake>=3.13", 6 | "pybind11>=2.6.0" 7 | ] 8 | build-backend = "setuptools.build_meta" 9 | -------------------------------------------------------------------------------- /references.bib: -------------------------------------------------------------------------------- 1 | @article{boyle2025sanafe, 2 | title={SANA-FE: Simulating Advanced Neuromorphic Architectures for Fast Exploration}, 3 | author={James A. Boyle and Mark Plagge and Suma George Cardwell and Frances S. Chance and Andreas Gerstlauer}, 4 | journal={IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems}, 5 | year={2025}, 6 | doi={10.1109/TCAD.2025.3537971} 7 | } 8 | 9 | @inproceedings{boyle2024tutorial, 10 | title={Tutorial: Large-Scale Spiking Neuromorphic Architecture Exploration using SANA-FE}, 11 | author={James A. Boyle and Mark Plagge and Suma George Cardwell and Frances S. Chance and Andreas Gerstlauer}, 12 | booktitle={2024 International Conference on Hardware/Software Codesign and System Synthesis (CODES+ISSS)}, 13 | year={2024}, 14 | address={Raleigh, NC, USA}, 15 | doi={10.1109/CODES-ISSS60120.2024.00007} 16 | } 17 | 18 | @inproceedings{boyle2023performance, 19 | title={Performance and Energy Simulation of Spiking Neuromorphic Architectures for Fast Exploration}, 20 | author={James A. Boyle and Mark Plagge and Suma George Cardwell and Frances S. Chance and Andreas Gerstlauer}, 21 | booktitle={2023 International Conference on Neuromorphic Systems (ICONS)}, 22 | year={2023}, 23 | address={Santa Fe, NM, USA}, 24 | doi={10.1145/3589737.3605970} 25 | } -------------------------------------------------------------------------------- /sanafe/README.txt: -------------------------------------------------------------------------------- 1 | README.txt 2 | ./sanafe contains modules for the sanafe Python package. 3 | 4 | These files are required when either building the package locally or preparing 5 | it for distribution. When building for distribution, setup.py will search in 6 | here. 7 | 8 | __init__.py is required to load the C++ shared library and any Python modules. 9 | 10 | -------------------------------------------------------------------------------- /sanafe/__init__.py: -------------------------------------------------------------------------------- 1 | # Import pybind11 (C++) kernel under top-level 2 | from sanafecpp import * 3 | -------------------------------------------------------------------------------- /sanafe/tutorial.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2025 - The University of Texas at Austin 3 | This work was produced under contract #2317831 to National Technology and 4 | Engineering Solutions of Sandia, LLC which is under contract 5 | No. DE-NA0003525 with the U.S. Department of Energy. 6 | 7 | tutorial.py - Tutorial helper scripts, mostly for checking answers 8 | """ 9 | import yaml 10 | import re 11 | 12 | green_text = "\033[92m" 13 | red_text = "\033[31m" 14 | default_text = "\033[0m" 15 | 16 | 17 | def check_arch(arch_filename): 18 | with open("arch.yaml", "r") as arch_file: 19 | arch_details = yaml.safe_load(arch_file) 20 | check_arch_exercise_1(arch_details) 21 | check_arch_exercise_2(arch_details) 22 | check_arch_exercise_3(arch_details) 23 | 24 | 25 | def check_arch_exercise_1(arch_details): 26 | tiles = arch_details["architecture"]["tile"] 27 | cores = tiles[0]["core"] 28 | somas = cores[0]["soma"] 29 | soma_energy = somas[0]["attributes"]["energy_update_neuron"] 30 | soma_latency = somas[0]["attributes"]["latency_update_neuron"] 31 | if soma_energy == 2.0e-12 and soma_latency == 2.0e-9: 32 | print(f"{green_text}Exercise 1: PASS{default_text}") 33 | else: 34 | print(f"{red_text}Exercise 1: FAIL - Soma energy ({soma_energy}J) " 35 | f"and/or latency ({soma_latency}s) not set correctly{default_text}") 36 | 37 | 38 | def parse_name_range(s): 39 | match = re.match(r"(\w+)\[(\d+)(?:\.\.(\d+))?\]", s) 40 | if match is None: 41 | return None, None 42 | else: 43 | return int(match.group(2)), int(match.group(3) or match.group(2)) 44 | 45 | 46 | def check_arch_exercise_2(arch_details): 47 | tiles = arch_details["architecture"]["tile"] 48 | tile_name = tiles[0]["name"] 49 | range_start, range_end = parse_name_range(tile_name) 50 | passed = True 51 | total_tiles = 2 52 | if range_start is None: 53 | print(f"{red_text}Exercise 2: FAIL - Tile not duplicated{default_text}") 54 | return 55 | elif (range_end - range_start) + 1 != total_tiles: 56 | print(f"{red_text}Exercise 2: FAIL - Tile duplicated {1+(range_end-range_start)} " 57 | f"times, should be {total_tiles} times{default_text}") 58 | return 59 | 60 | cores = tiles[0]["core"] 61 | core_name = cores[0]["name"] 62 | range_start, range_end = parse_name_range(core_name) 63 | total_cores = 4 64 | if range_start is None: 65 | print(f"{red_text}Exercise 2: FAIL - Cores not duplicated{default_text}") 66 | passed = False 67 | elif (range_end - range_start) + 1 != total_cores: 68 | print(f"{red_text}Exercise 2: FAIL - Cores duplicated {1+(range_end-range_start)} " 69 | f"times, should be {total_cores} times{default_text}") 70 | passed = False 71 | 72 | if passed: 73 | print(f"{green_text}Exercise 2: PASS{default_text}") 74 | 75 | 76 | def check_arch_exercise_3(arch_details): 77 | tiles = arch_details["architecture"]["tile"] 78 | cores = tiles[0]["core"] 79 | synapses = cores[0]["synapse"] 80 | if len(synapses) != 2: 81 | print(f"{red_text}Exercise 3: FAIL - Expected to see 2 synapse units, " 82 | f"only found {len(synapses)}") 83 | else: 84 | # Get the new soma unit 85 | synapse = synapses[0] 86 | if synapse["name"] == "tutorial_synapse_uncompressed": 87 | synapse = synapses[1] 88 | synapse_energy = synapse["attributes"]["energy_process_spike"] 89 | synapse_latency = synapse["attributes"]["latency_process_spike"] 90 | 91 | if synapse_energy == 0.5e-12 and synapse_latency == 2.0e-9: 92 | print(f"{green_text}Exercise 3: PASS{default_text}") 93 | else: 94 | print(f"{red_text}Exercise 3: FAIL - New synapse energy ({synapse_energy}J) " 95 | f"and/or latency ({synapse_latency}s) not set correctly{default_text}") 96 | 97 | 98 | def check_snn(snn_filename): 99 | with open(snn_filename, "r") as snn_file: 100 | snn = yaml.safe_load(snn_file) 101 | check_exercise_snns_1(snn) 102 | check_exercise_snns_2(snn) 103 | check_exercise_snns_3(snn) 104 | check_exercise_snns_4(snn) 105 | 106 | 107 | def check_exercise_snns_1(snn): 108 | net = snn["network"] 109 | group = net["groups"][1] 110 | neurons_found = len(group["neurons"]) 111 | if len(group["neurons"]) != 2: 112 | print(f"{red_text}Exercise 1: FAIL - Should be 2 neurons in group 1, found {neurons_found}{default_text}") 113 | return 114 | 115 | # SANA-FE will check other aspects of the mapping, if it runs, it should be 116 | # fine 117 | print(f"{green_text}Exercise 1: PASS{default_text}") 118 | 119 | 120 | def check_exercise_snns_2(snn): 121 | net = snn["network"] 122 | edges = net["edges"] 123 | if len(edges) < 3: 124 | print(f"{red_text}Exercise 2: FAIL - Expected 3 edges but got {len(edges)}{default_text}") 125 | return 126 | 127 | # TODO: check weights are correct 128 | 129 | print(f"{green_text}Exercise 2: PASS{default_text}") 130 | 131 | 132 | def check_exercise_snns_3(snn): 133 | net = snn["network"] 134 | group = net["groups"][0] 135 | neuron = group["neurons"][1] 136 | attributes = list(neuron.values())[0] 137 | if ("bias" not in attributes or attributes["bias"] != 0.5): 138 | print(f"{red_text}Exercise 3: FAIL - Neuron 0.1 bias not set to " 139 | f"0.5{default_text}") 140 | else: 141 | print(f"{green_text}Exercise 3: PASS{default_text}") 142 | 143 | 144 | def check_exercise_snns_4(snn): 145 | net = snn["network"] 146 | group = net["groups"][1] 147 | from functools import reduce 148 | attributes = reduce(lambda a, b: {**a, **b}, group["attributes"]) 149 | 150 | if attributes["synapse_hw_name"] == "tutorial_synapse_uncompressed": 151 | print(f"{red_text}Exercise 4: FAIL - Set group 1 synapse h/w to your " 152 | f"new synapse H/W unit{default_text}") 153 | else: 154 | print(f"{green_text}Exercise 4: PASS{default_text}") 155 | 156 | 157 | def check_api(snn): 158 | check_exercise_api_1(snn) 159 | check_exercise_api_2(snn) 160 | 161 | 162 | def check_exercise_api_1(snn): 163 | group = snn["out"] 164 | neurons_found = len(group) 165 | if len(group) != 2: 166 | print(f"{red_text}Exercise 1: FAIL - Should be 2 neurons in output " 167 | f"layer, found {neurons_found}{default_text}") 168 | return 169 | 170 | print(f"{green_text}Exercise 1: PASS{default_text}") 171 | 172 | 173 | def check_exercise_api_2(snn): 174 | in_layer = snn["in"] 175 | neuron = in_layer[0] 176 | connections_out = in_layer[0].edges_out 177 | 178 | if (len(connections_out) != 2 or 179 | connections_out[0].post_neuron.group_name != "out" or 180 | connections_out[1].post_neuron.group_name != "out"): 181 | print(f"{red_text}Exercise 2: FAIL - Should be 2 edges out of in.0, " 182 | f"to the output layer, found {len(connections_out)}{default_text}") 183 | return 184 | 185 | synapse_attributes = connections_out[1].synapse_attributes 186 | if (synapse_attributes.get("w") not in (-2, -2.0) and 187 | synapse_attributes.get("weight") not in (-2, -2.0)): 188 | print(f"{red_text}Exercise 2: FAIL - in.0 weight should be -2{default_text}") 189 | return 190 | 191 | connections_out = in_layer[1].edges_out 192 | if (len(connections_out) != 1 or 193 | connections_out[0].post_neuron.group_name != "out" or 194 | connections_out[0].post_neuron.neuron_offset != 1): 195 | print(f"{red_text}Exercise 2: FAIL - Should be 1 edge out of in.1 to " 196 | f"out.1, found {len(connections_out)}{default_text}") 197 | return 198 | 199 | synapse_attributes = connections_out[0].synapse_attributes 200 | if (synapse_attributes.get("w") not in (3, 3.0) and 201 | synapse_attributes.get("weight") not in (3, 3.0)): 202 | print(f"{red_text}Exercise 2: FAIL - in.0 weight should be 3{default_text}") 203 | return 204 | 205 | print(f"{green_text}Exercise 2: PASS{default_text}") 206 | -------------------------------------------------------------------------------- /scripts/booksim.config: -------------------------------------------------------------------------------- 1 | // $Id$ 2 | 3 | // Copyright (c) 2007-2015, Trustees of The Leland Stanford Junior University 4 | // All rights reserved. 5 | // 6 | // Redistribution and use in source and binary forms, with or without 7 | // modification, are permitted provided that the following conditions are met: 8 | // 9 | // Redistributions of source code must retain the above copyright notice, this 10 | // list of conditions and the following disclaimer. 11 | // Redistributions in binary form must reproduce the above copyright notice, 12 | // this list of conditions and the following disclaimer in the documentation 13 | // and/or other materials provided with the distribution. 14 | // 15 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 | // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 | // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 | // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 | // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 | // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 | // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 | // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 | // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 | // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 | // POSSIBILITY OF SUCH DAMAGE. 26 | 27 | //8X4 mesh 28 | 29 | // Topology 30 | // Based on the NanoMesh NoC architecture[Tse, 2013] used by Loihi 31 | 32 | topology = cmesh; 33 | subnets = 2; 34 | //subnets = 1; 35 | k = 8; // TODO: overriding this, in future remove this too 36 | n = 2; 37 | x = 8; 38 | y = 4; 39 | c = 4; 40 | // xr and yr don't make a ton of sense as variables: there isn't really a need 41 | // to define these in addition to c 42 | // TODO: remove xr and yr from simulations and calculations 43 | xr = 2; 44 | yr = 2; 45 | 46 | // Routing 47 | routing_function = dor_no_express; 48 | use_noc_latency = 0; 49 | //routing_function = xy_yx; 50 | 51 | // Flow control 52 | num_vcs = 1; 53 | vc_buf_size = 8; 54 | wait_for_tail_credit = 1; 55 | 56 | // Router architecture 57 | vc_allocator = islip; 58 | sw_allocator = islip; 59 | alloc_iters = 1; 60 | 61 | credit_delay = 0; 62 | routing_delay = 0; 63 | //credit_delay = 1; 64 | //routing_delay = 1; 65 | vc_alloc_delay = 1; 66 | sw_alloc_delay = 1; 67 | 68 | input_speedup = 1; 69 | output_speedup = 1; 70 | internal_speedup = 1.0; 71 | 72 | // Traffic 73 | packet_size = 1; 74 | 75 | // Simulation 76 | // trace_file = /home/usr1/jboyle/neuro/sana-fe/messages_single_ts.csv; 77 | // Do not randomly generate any input packets 78 | injection_rate = 0.0; 79 | clock_period = 1e-9; 80 | -------------------------------------------------------------------------------- /scripts/capstone_gui_runtime.py: -------------------------------------------------------------------------------- 1 | # Capstone code for live demo 2 | timesteps = 0 3 | if run_alive: 4 | while True: 5 | if timesteps > 0: 6 | sim.run(timesteps, heartbeat=100) 7 | sim.get_run_summary() 8 | print("Enter timesteps to run: ", end="") 9 | user_in = input() 10 | 11 | if user_in == "q" or user_in == "quit": 12 | break 13 | if user_in.startswith("u"): 14 | try: 15 | group_id = int(user_in[2]) 16 | except ValueError: 17 | print(f"Error: Expected int. Got \"{user_in[2]}\".") 18 | exit(1) 19 | 20 | try: 21 | n_id = int(user_in[4]) 22 | except ValueError: 23 | print(f"Error: Expected int. Got \"{user_in[4]}\".") 24 | exit(1) 25 | 26 | user_in = user_in[6:] 27 | kwargs = user_in.split(" ") 28 | # print(group_id, n_id, kwargs, len(kwargs)) 29 | #sim.update_neuron(group_id, n_id, kwargs, len(kwargs)) 30 | 31 | timesteps = 0 32 | continue 33 | if user_in.startswith("s"): 34 | try: 35 | group_id = int(user_in[2]) 36 | except ValueError: 37 | print(f"Error: Expected int. Got \"{user_in[2]}\".") 38 | exit(1) 39 | 40 | #print(sim.get_status(group_id)) 41 | 42 | timesteps = 0 43 | continue 44 | 45 | try: 46 | timesteps = int(user_in) 47 | except ValueError: 48 | print(f"Error: Expected int. Got {user_in}.") 49 | exit(1) 50 | else: 51 | if timesteps < 1: 52 | print(f"Error: Given {timesteps} timesteps, require int > 1.") 53 | exit(1) 54 | sim.run_timesteps(timesteps) -------------------------------------------------------------------------------- /scripts/compare_nemo_perf.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2024 - The University of Texas at Austin 3 | This work was produced under contract #2317831 to National Technology and 4 | Engineering Solutions of Sandia, LLC which is under contract 5 | No. DE-NA0003525 with the U.S. Department of Energy. 6 | """ 7 | # TODO: I seem to be getting an issue where NeMo is freezing after a number of 8 | # timestep / ticks 9 | 10 | # External libraries, plotting 11 | import matplotlib 12 | matplotlib.use('Agg') 13 | from matplotlib import pyplot as plt 14 | 15 | # Other external libraries 16 | import numpy as np 17 | import pandas as pd 18 | 19 | # Python built-in libraries 20 | import subprocess 21 | import random 22 | import time 23 | import csv 24 | import sys 25 | import os 26 | 27 | # SANA-FE libraries 28 | SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) 29 | PROJECT_DIR = os.path.abspath((os.path.join(SCRIPT_DIR, os.pardir))) 30 | 31 | sys.path.insert(0, PROJECT_DIR) 32 | import utils 33 | 34 | # Use a dumb seed to get consistent results 35 | random.seed(1) 36 | 37 | # Global experiment parameters 38 | TRUENORTH_COMPARTMENTS = 256 39 | TRUENORTH_AXONS = TRUENORTH_COMPARTMENTS 40 | SPIKE_INTRA_CORE_PROB = 0.8 41 | NETWORK_FILENAME = os.path.join(PROJECT_DIR, "runs", "nemo", 42 | "nemo_randomized.net") 43 | ARCH_FILENAME = os.path.join(PROJECT_DIR, "arch", "truenorth.yaml") 44 | TIMESTEPS = 10 # i.e., ticks, where each tick is 1 ms of wall-time on the chip 45 | NEMO_BIN_PATH = "/home/usr1/jboyle/neuro/nemo/NeMo/bin/NeMo" 46 | CSV_RESULTS_FILENAME = os.path.join(PROJECT_DIR, "runs", "nemo", 47 | "compare_sanafe_nemo.csv") 48 | 49 | # Create a random truenorth network, 80% connected to neuron within same 50 | # core, 20% connected to neurons outside 51 | def create_nemo_network(cores): 52 | network = sim.Network(save_mappings=True) 53 | compartments = sim.init_compartments(cores, 1, TRUENORTH_COMPARTMENTS) 54 | print("Creating neuron population") 55 | 56 | mappings = [] 57 | for i in range(0, cores): 58 | m = (i, 0) 59 | mappings.extend((m,) * TRUENORTH_COMPARTMENTS) 60 | # Create neurons to fill every TrueNorth compartment, with a negative 61 | # threshold and forced updates i.e., spikes every timestep 62 | population = sim.create_layer(network, cores*TRUENORTH_COMPARTMENTS, 63 | compartments, 0, 0, 1, 0.0, -1.0, 0.0, 64 | mappings=mappings, connections_out=1, 65 | soma_hw_name="core_soma", 66 | synapse_hw_name="core_synapses") 67 | 68 | print("Generating randomized network connections") 69 | weight = 1 70 | for c in range(0, cores): 71 | if (c % 32) == 0: 72 | print(f"Generating synaptic connections for core {c}") 73 | for n in range(0, TRUENORTH_AXONS): 74 | if random.random() < SPIKE_INTRA_CORE_PROB: 75 | possible_cores = list(range(0, cores)) 76 | del(possible_cores[c]) 77 | dest_core = random.choice(possible_cores) 78 | else: # 20% chance of picking the same core 79 | dest_core = c 80 | dest_axon = random.randrange(0, TRUENORTH_AXONS) 81 | src = population.neurons[(c*TRUENORTH_AXONS) + n] 82 | dest = population.neurons[(dest_core*TRUENORTH_AXONS) + dest_axon] 83 | src.add_connection(dest, weight) 84 | 85 | network.save(NETWORK_FILENAME) 86 | 87 | 88 | # Run the simulation on SANA-FE, generating the network and immediately using it 89 | # Return the total runtime measured by Python, including setup and processing 90 | # time. 91 | def run_sim_sanafe(cores, timesteps): 92 | create_nemo_network(cores) 93 | start = time.time() 94 | sim.run(ARCH_FILENAME, NETWORK_FILENAME, timesteps) 95 | end = time.time() 96 | run_time = end - start 97 | print(f"sanafe runtime for {cores} cores was {run_time} s") 98 | return run_time 99 | 100 | 101 | # Run the same simulation on NeMo, for a given number of cores and timesteps 102 | # Return the runtime measured by Python. 103 | # TODO: should we add changes to measure the runtime of simulation and ignore 104 | # setup time? Is this even possible 105 | def run_sim_nemo(cores, timesteps, debug=True): 106 | run_command = ["mpirun", "-np", "12", NEMO_BIN_PATH, 107 | f"--cores={cores}", f"--end={timesteps}", "--sync=3", 108 | "--rand"] 109 | if debug: 110 | run_command.append("--svouts") 111 | print("NeMo command: {0}".format(" ".join(run_command))) 112 | start = time.time() 113 | subprocess.call(run_command) 114 | end = time.time() 115 | run_time = end - start 116 | print(f"nemo runtime for {cores} cores was {run_time} s") 117 | 118 | if debug: 119 | # See how many spikes were sent, i.e., how many line entries are in the 120 | # firing logs (1 spike per line). We just want to check we generate 121 | # roughly the same number of spikes as sana-fe 122 | num_spikes = 0 123 | for i in range(0, 4): 124 | num_spikes += sum(1 for line in open(f"fire_record_rank_{i}.csv")) 125 | print(f"NeMo {num_spikes} total spikes") 126 | 127 | return run_time 128 | 129 | 130 | def plot_results(): 131 | df = pd.read_csv(CSV_RESULTS_FILENAME, index_col="cores") 132 | plt.rcParams.update({'font.size': 6, 'lines.markersize': 1}) 133 | times = np.array(df.values) 134 | cores = np.array(df.index) 135 | entries = len(cores) 136 | #df.plot.bar(rot=0, figsize=(3.5, 1.4), color=("#ff7f0e", "#1f77b4")) 137 | 138 | #fig = plt.figure(figsize=(3.5, 1.4)) 139 | fig = plt.figure(figsize=(3.7, 1.4)) 140 | nemo_throughput = TIMESTEPS / times[:, 1] 141 | sanafe_throughput = TIMESTEPS / times[:, 0] 142 | bar1 = plt.bar(np.arange(entries) - 0.15, nemo_throughput, width=0.3) 143 | bar2 = plt.bar(np.arange(entries) + 0.15, sanafe_throughput, width=0.3, 144 | alpha=.99) 145 | plt.legend(("NeMo", "SANA-FE")) 146 | 147 | for i, rect in enumerate(bar1): 148 | height = rect.get_height() 149 | plt.text(rect.get_x() + rect.get_width() / 2.0, height, f"{nemo_throughput[i]:.1f}", ha="center", va="bottom") 150 | 151 | for i, rect in enumerate(bar2): 152 | height = rect.get_height() 153 | plt.text(rect.get_x() + rect.get_width() / 2.0, height, f"{sanafe_throughput[i]:.1f}", ha="center", va="bottom") 154 | 155 | ax = plt.gca() 156 | plt.xlabel("TrueNorth Core Count / Total Neurons") 157 | ax.set_xticks(np.arange(entries)) 158 | neuron_counts = ("8k", "16k", "32k", "64k", "128k", "256k") 159 | core_labels = [] 160 | for core, neuron in zip(cores, neuron_counts): 161 | core_labels.append(f"{core}/{neuron}") 162 | ax.set_xticklabels(core_labels) 163 | #plt.xticks(rotation=30) 164 | #plt.ylabel("Run-time (s)") 165 | plt.ylabel("Throughput (steps per s)") 166 | #plt.yscale("log") 167 | #plt.minorticks_on() 168 | plt.ylim((0, 25)) 169 | ax.tick_params(axis='y', which='minor', labelbottom=False) 170 | plt.tight_layout(pad=0.3) 171 | plt.savefig(os.path.join(PROJECT_DIR, "runs", "nemo", "compare_sanafe_nemo.png")) 172 | plt.savefig(os.path.join(PROJECT_DIR, "runs", "nemo", "compare_sanafe_nemo.pdf")) 173 | return 174 | 175 | 176 | if __name__ == "__main__": 177 | run_experiments = False 178 | plot_experiments = True 179 | if run_experiments: 180 | core_counts = (32, 64, 128, 256, 512, 1024) 181 | print(f"Running experiments with following core counts: {core_counts}") 182 | 183 | experimental_runs = len(core_counts) 184 | sanafe_runtimes = np.zeros(experimental_runs) 185 | nemo_runtimes = np.zeros(experimental_runs) 186 | 187 | for i, cores in enumerate(core_counts): 188 | print(f"Running simulation of {cores} cores") 189 | sanafe_runtimes[i] = run_sim_sanafe(cores, TIMESTEPS) 190 | nemo_runtimes[i] = run_sim_nemo(cores, TIMESTEPS, debug=False) 191 | 192 | print(sanafe_runtimes) 193 | with open(CSV_RESULTS_FILENAME, "w") as csv_file: 194 | writer = csv.writer(csv_file) 195 | writer.writerow(("cores", "SANA-FE", "NeMo")) 196 | for i, cores in enumerate(core_counts): 197 | writer.writerow((cores, sanafe_runtimes[i], nemo_runtimes[i])) 198 | 199 | print("Saved results to file") 200 | 201 | if plot_experiments: 202 | plot_results() 203 | -------------------------------------------------------------------------------- /scripts/compare_spiketrains.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2024 - The University of Texas at Austin 3 | This work was produced under contract #2317831 to National Technology and 4 | Engineering Solutions of Sandia, LLC which is under contract 5 | No. DE-NA0003525 with the U.S. Department of Energy. 6 | """ 7 | import csv 8 | import argparse 9 | 10 | parser = argparse.ArgumentParser() 11 | parser.add_argument("first") 12 | parser.add_argument("second") 13 | 14 | args = parser.parse_args() 15 | first_spiketrain = args.first 16 | second_spiketrain = args.second 17 | 18 | 19 | with open(first_spiketrain, "r") as first: 20 | data = csv.reader(first) 21 | first_neurons = next(data) 22 | first_timesteps = next(data) 23 | 24 | with open(second_spiketrain, "r") as second: 25 | data = csv.reader(second) 26 | second_neurons = next(data) 27 | second_timesteps = next(data) 28 | 29 | same = True 30 | for (n1, t1, n2, t2) in zip(first_neurons, first_timesteps, second_neurons, second_timesteps): 31 | if n1 != n2: 32 | print("Neuron {0} != {1}".format(n1, n2)) 33 | same = False 34 | #break 35 | if t1 != t2: 36 | print("Timestep {0} != {1} (nid:{2})".format(t1, t2, n2)) 37 | same = False 38 | #break 39 | 40 | if same: 41 | print("Spike trains are the same") 42 | -------------------------------------------------------------------------------- /scripts/compress_spiketrain.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2025 - The University of Texas at Austin 3 | This work was produced under contract #2317831 to National Technology and 4 | Engineering Solutions of Sandia, LLC which is under contract 5 | No. DE-NA0003525 with the U.S. Department of Energy. 6 | 7 | Compress spike train data from SAnA-FE for a chosen layer to snntoolbox's format 8 | """ 9 | import csv 10 | 11 | layer = '1' 12 | spikes = [] 13 | 14 | with open("spikes.csv", "r") as csvfile: 15 | reader = csv.DictReader(csvfile) 16 | neurons = [] 17 | timesteps = [] 18 | for row in reader: 19 | l, neuron = row["neuron"].split(".") 20 | if l == layer: 21 | spikes.append((neuron, int(row["timestep"]))) 22 | 23 | # Sort the list based on the neuron id rather than timestep 24 | spikes.sort(key=lambda x:x[1]) 25 | 26 | spike_neurons = [spike[0] for spike in spikes] 27 | spike_times = [spike[1] for spike in spikes] 28 | 29 | # Now print in the new format 30 | with open("spiketrain.csv", "w") as csvfile: 31 | writer = csv.writer(csvfile) 32 | writer.writerow(spike_neurons) 33 | writer.writerow(spike_times) 34 | 35 | print("Finished converting spike train format.") 36 | -------------------------------------------------------------------------------- /scripts/create_tutorial.py: -------------------------------------------------------------------------------- 1 | from tensorflow.keras import models 2 | import numpy as np 3 | import os 4 | SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) 5 | PROJECT_DIR = os.path.abspath((os.path.join(SCRIPT_DIR, os.pardir))) 6 | 7 | # Load the Keras model for the DVS gesture categorization benchmark 8 | model = models.load_model(os.path.join(PROJECT_DIR, "etc", "dvs_challenge.h5")) 9 | inputs = np.loadtxt(os.path.join(PROJECT_DIR, "etc", "inputs.csv")) 10 | inputs = inputs[0:1024] 11 | 12 | dense_layer = np.reshape(model.layers[5].get_weights()[0], (9, 9, 11, 11)) 13 | dense_layer = np.swapaxes(dense_layer, 0, 1) 14 | dense_layer = np.swapaxes(dense_layer, 2, 1) 15 | dense_layer = np.reshape(dense_layer, (891, 11), order='C') 16 | print(dense_layer) 17 | np.savez("dvs_challenge.npz", 18 | conv1=np.rint(model.layers[0].get_weights()[0] * 420.05236577257483), 19 | conv2=np.rint(model.layers[1].get_weights()[0] * 351.1046444780251), 20 | conv3=np.rint(model.layers[2].get_weights()[0] * 276.6147837631879), 21 | conv4=np.rint(model.layers[3].get_weights()[0] * 371.60317670987195), 22 | dense1=np.rint(dense_layer * 341.41679600239286), 23 | inputs=np.rint(inputs), 24 | thresholds=np.array((255, 420, 351, 276, 371, 341))) 25 | print(model.summary()) -------------------------------------------------------------------------------- /scripts/demo_truenorth.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2024 - The University of Texas at Austin 3 | This work was produced under contract #2317831 to National Technology and 4 | Engineering Solutions of Sandia, LLC which is under contract 5 | No. DE-NA0003525 with the U.S. Department of Energy. 6 | 7 | Validate and demo TrueNorth neuron models similar to the method from: 8 | "NeMo: A Massively Parallel Discrete-Event SimulationModel for Neuromorphic 9 | Architectures" M. Plagge (2016) 10 | 11 | Two different experiments showing 1) Izhikevich phasic spiking and 12 | 2) Izhikevich tonic bursting. The main purpose of this experiment is to 13 | cross-validate my TrueNorth implementation. 14 | """ 15 | import matplotlib 16 | matplotlib.use('Agg') 17 | 18 | import sys 19 | import pandas as pd 20 | from matplotlib import pyplot as plt 21 | import numpy as np 22 | import os 23 | 24 | import sys 25 | SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) 26 | PROJECT_DIR = os.path.abspath((os.path.join(SCRIPT_DIR, os.pardir))) 27 | sys.path.insert(0, os.path.join(PROJECT_DIR)) 28 | import utils 29 | 30 | ARCH_PATH = os.path.join(PROJECT_DIR, "arch", "truenorth.yaml") 31 | 32 | 33 | def run_sim(network_path, timesteps, plot_filename): 34 | print(ARCH_PATH) 35 | sim.run(ARCH_PATH, network_path, timesteps, potential_trace=True, 36 | spike_trace=True) 37 | 38 | potential_data = pd.read_csv("potential.csv") 39 | spike_data = pd.read_csv("spikes.csv") 40 | 41 | offset=200 42 | potentials = potential_data.loc[offset:timesteps, "neuron 1.0"] 43 | spikes_in = spike_data.loc[spike_data["neuron"] == 0.0] 44 | spikes_out = spike_data.loc[spike_data["neuron"] == 1.0] 45 | 46 | plt.rcParams.update({'font.size': 6, "lines.markersize": 2}) 47 | plt.figure(figsize=(3.2, 1.5)) 48 | plt.plot(np.arange(0, timesteps-offset), potentials) 49 | linestyle = (0, (1, 2)) 50 | plt.plot(np.arange(0, timesteps-offset), potentials, linestyle=linestyle, color="black") 51 | legend = plt.legend(("NeMo", "SANA-FE"), fontsize=6, handlelength=1) 52 | spike_idx = spikes_out.loc[:, "timestep"] 53 | height = max(potentials) + 2 54 | spike_vals = height * np.ones(spike_idx.shape) 55 | plt.scatter(spike_idx-offset, spike_vals, marker='^', color='red') 56 | spike_idx = spikes_in.loc[:, "timestep"] 57 | spike_vals = (min(potentials) - 1.0) * np.ones(spike_idx.shape) 58 | plt.scatter(spike_idx-offset, spike_vals, marker='^', color='black') 59 | plt.xlabel("Simulation Ticks") 60 | plt.ylabel("Membrane Potential") 61 | #plt.ylim((0, 22)) 62 | 63 | 64 | 65 | plt.tight_layout() 66 | # Need to save 67 | plt.savefig(plot_filename) 68 | 69 | # Probe potential and probe spikes to get the data 70 | return (potential_data, spike_data) 71 | 72 | 73 | def run_experiment(): 74 | run_sim("snn/nemo/truenorth_phasic.net", 1200, "runs/nemo/phasic.pdf") 75 | run_sim("snn/nemo/truenorth_bursting.net", 1200, "runs/nemo/bursting.pdf") 76 | 77 | if __name__ == "__main__": 78 | run_experiment() 79 | -------------------------------------------------------------------------------- /scripts/dendrites.py: -------------------------------------------------------------------------------- 1 | import snntorch as snn 2 | 3 | from snntorch import spikeplot as splt 4 | from snntorch import spikegen 5 | import torch 6 | import torch.nn as nn 7 | 8 | import numpy as np 9 | import matplotlib.pyplot as plt 10 | 11 | network = snn.torch.load("runs/dendrites/dend.pt") 12 | spike_recording = [] 13 | 14 | num_steps = 100 15 | batch_size = 1 16 | data_in = torch.rand(num_steps, batch_size, 1, 28, 28) 17 | 18 | #for step in range(num_steps): 19 | # spike, state = network(data_in[step]) 20 | # spike_recording.append(spike) 21 | 22 | for param in network.parameters(): 23 | print(param) -------------------------------------------------------------------------------- /scripts/git_status.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | export GIT_VERSION=`git log -1 --pretty=format:"%H"` 3 | if [[ $(git diff --stat) != "" ]]; 4 | then GIT_VERSION="$GIT_VERSION-dirty" 5 | fi 6 | 7 | echo $GIT_VERSION; 8 | -------------------------------------------------------------------------------- /scripts/load_network.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2024 - The University of Texas at Austin 3 | This work was produced under contract #2317831 to National Technology and 4 | Engineering Solutions of Sandia, LLC which is under contract 5 | No. DE-NA0003525 with the U.S. Department of Energy. 6 | 7 | load_network.py - Simulator script and utility functionality 8 | """ 9 | import sys 10 | import os 11 | 12 | SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) 13 | PROJECT_DIR = os.path.abspath((os.path.join(SCRIPT_DIR, os.pardir))) 14 | sys.path.insert(0, os.path.join(PROJECT_DIR)) 15 | # Set some flags for the dynamic linking library 16 | # Important to do before importing the simcpp .so library! 17 | sys.setdlopenflags(os.RTLD_GLOBAL | os.RTLD_LAZY) 18 | import sanafecpp 19 | 20 | def load_from_net_file(filename, arch, heartbeat=100): 21 | neurons_loaded = 0 22 | edges_loaded = 0 23 | mappings_loaded = 0 24 | 25 | net = sanafecpp.Network() 26 | with open(filename, 'r') as network_file: 27 | for line in network_file: 28 | fields = line.split() 29 | if not fields: 30 | continue 31 | if fields[0] == 'g': 32 | neuron_count = int(fields[1]) 33 | group_attributes = dict([f.split('=') for f in fields[2:]]) 34 | group = net.create_neuron_group(neuron_count, 35 | group_attributes) 36 | print("Loaded group") 37 | elif fields[0] == 'n': 38 | group_id = int(fields[1].split('.')[0]) 39 | neuron_id = int(fields[1].split('.')[1]) 40 | neuron_attributes = dict([f.split('=') for f in fields[2:]]) 41 | group = net.groups[group_id] 42 | group.define_neuron(neuron_id, neuron_attributes) 43 | neurons_loaded += 1 44 | if (neurons_loaded % heartbeat) == 0: 45 | print(f"Loaded {neurons_loaded} neurons") 46 | 47 | elif fields[0] == 'e': 48 | edge_info = fields[1] 49 | src_address = edge_info.split("->")[0] 50 | dest_address = edge_info.split("->")[1] 51 | 52 | src_gid = int(src_address.split(".")[0]) 53 | src_nid = int(src_address.split(".")[1]) 54 | src = net.groups[src_gid].neurons[src_nid] 55 | 56 | dest_gid = int(dest_address.split(".")[0]) 57 | dest_nid = int(dest_address.split(".")[1]) 58 | dest = net.groups[dest_gid].neurons[dest_nid] 59 | 60 | edge_attributes = dict([f.split('=') for f in fields[2:]]) 61 | src.connect_to_neuron(dest, edge_attributes) 62 | edges_loaded += 1 63 | if (edges_loaded % heartbeat) == 0: 64 | print(f"Loaded {edges_loaded} edges") 65 | 66 | elif fields[0] == '&': 67 | mapping_info = fields[1] 68 | neuron_address = mapping_info.split("@")[0] 69 | core_address = mapping_info.split("@")[1] 70 | 71 | group_id = int(neuron_address.split(".")[0]) 72 | neuron_id = int(neuron_address.split(".")[1]) 73 | neuron = net.groups[group_id].neurons[neuron_id] 74 | 75 | tile_id = int(core_address.split(".")[0]) 76 | core_offset = int(core_address.split(".")[1]) 77 | core = arch.tiles[tile_id].cores[core_offset] 78 | 79 | core.map_neuron(neuron) 80 | mappings_loaded += 1 81 | if (mappings_loaded % heartbeat) == 0: 82 | print(f"Loaded {mappings_loaded} mappings") 83 | 84 | return net 85 | -------------------------------------------------------------------------------- /scripts/misc_dvs_conversion.py: -------------------------------------------------------------------------------- 1 | """misc_dvs_conversion.py 2 | 3 | A temporary script to help with moving from the old network format to the new 4 | YAML-based format. I've already created a script to convert netlists to YAML 5 | files, so this script just outputs the convolutional filters in the new, 6 | more compact format, and can update inputs. 7 | 8 | Going forward, I need to make sure the SNNToolbox outputs networks in the new 9 | compact format, using conv2d and dense layers, and efficiently outputs the 10 | mappings. 11 | """ 12 | import yaml 13 | import numpy as np 14 | import sys 15 | 16 | # Hacks to get pyyaml to print with a mix of flow and block styles 17 | class block_dict(dict): 18 | pass 19 | 20 | def block_dict_rep(dumper, data): 21 | return dumper.represent_mapping(u"tag:yaml.org,2002:map", data, 22 | flow_style=False) 23 | 24 | class flow_dict(dict): 25 | pass 26 | 27 | def flow_dict_rep(dumper, data): 28 | return dumper.represent_mapping(u"tag:yaml.org,2002:map", data, 29 | flow_style=True) 30 | 31 | class block_list(list): 32 | pass 33 | 34 | def block_list_rep(dumper, data): 35 | return dumper.represent_sequence(u"tag:yaml.org,2002:seq", data, 36 | flow_style=False) 37 | 38 | class flow_list(list): 39 | pass 40 | 41 | def flow_list_rep(dumper, data): 42 | return dumper.represent_sequence(u"tag:yaml.org,2002:seq", data, 43 | flow_style=True) 44 | 45 | yaml.add_representer(block_dict, block_dict_rep) 46 | yaml.add_representer(flow_dict, flow_dict_rep) 47 | yaml.add_representer(block_list, block_list_rep) 48 | yaml.add_representer(flow_list, flow_list_rep) 49 | # End of hacks 50 | 51 | if (len(sys.argv) != 4): 52 | print("Usage: convert_dvs_edges.py ") 53 | exit() 54 | script_name, yaml_filename, np_filename, out_filename = sys.argv 55 | 56 | #with open(yaml_filename, "r") as yaml_file: 57 | # snn = yaml.safe_load(yaml_file) 58 | info = np.load(np_filename) 59 | 60 | edges = [ 61 | {"0 -> 1": {"type": "conv2d", "weight": flow_list(info["conv1"].astype(int).flatten().tolist())}}, 62 | {"1 -> 2": {"type": "conv2d", "weight": flow_list(info["conv2"].astype(int).flatten().tolist())}}, 63 | {"2 -> 3": {"type": "conv2d", "weight": flow_list(info["conv3"].astype(int).flatten().tolist())}}, 64 | {"3 -> 4": {"type": "conv2d", "weight": flow_list(info["conv4"].astype(int).flatten().tolist())}}, 65 | {"4 -> 5": {"type": "conv2d", "weight": flow_list(info["dense1"].astype(int).flatten().tolist())}}, 66 | ] 67 | 68 | input_neurons = [] 69 | for id, bias in enumerate(info["inputs"].astype(int).tolist()): 70 | input_neurons.append(flow_dict({id: {"bias": bias}})) 71 | 72 | print(yaml.dump(edges)) 73 | with open(out_filename, "w") as description_file: 74 | yaml.dump({"network": {"edges": edges}, "neurons": input_neurons}, 75 | description_file, default_flow_style=False) 76 | -------------------------------------------------------------------------------- /scripts/net_to_yaml.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2024 - The University of Texas at Austin 3 | This work was produced under contract #2317831 to National Technology and 4 | Engineering Solutions of Sandia, LLC which is under contract 5 | No. DE-NA0003525 with the U.S. Department of Energy. 6 | 7 | net_to_yaml.py - Convert from v1 .net file to v2 .yaml description 8 | """ 9 | import sys 10 | import os 11 | import yaml 12 | 13 | # Hacks to get pyyaml to print with a mix of flow and block styles 14 | class block_dict(dict): 15 | pass 16 | 17 | def block_dict_rep(dumper, data): 18 | return dumper.represent_mapping(u"tag:yaml.org,2002:map", data, 19 | flow_style=False) 20 | 21 | class flow_dict(dict): 22 | pass 23 | 24 | def flow_dict_rep(dumper, data): 25 | return dumper.represent_mapping(u"tag:yaml.org,2002:map", data, 26 | flow_style=True) 27 | 28 | class block_list(list): 29 | pass 30 | 31 | def block_list_rep(dumper, data): 32 | return dumper.represent_sequence(u"tag:yaml.org,2002:seq", data, 33 | flow_style=False) 34 | 35 | class flow_list(list): 36 | pass 37 | 38 | def flow_list_rep(dumper, data): 39 | return dumper.represent_sequence(u"tag:yaml.org,2002:seq", data, 40 | flow_style=True) 41 | 42 | yaml.add_representer(block_dict, block_dict_rep) 43 | yaml.add_representer(flow_dict, flow_dict_rep) 44 | yaml.add_representer(block_list, block_list_rep) 45 | yaml.add_representer(flow_list, flow_list_rep) 46 | # End of hacks 47 | 48 | def parse_attributes(attributes): 49 | float_attributes = ("w", "weight", "bias", "threshold", "reset", "leak_decay") 50 | bool_attributes = ("log_spikes", "log_v", "force_update") 51 | for key, val in attributes.items(): 52 | if key in float_attributes: 53 | val = float(val) 54 | if val.is_integer(): 55 | val = int(val) 56 | attributes[key] = val 57 | elif key in bool_attributes: 58 | if val.is_integer(): 59 | val = int(val) 60 | if val != 0: 61 | val = True 62 | else: 63 | val = False 64 | elif val == "true" or val == "True": 65 | val = True 66 | else: 67 | val = False 68 | attributes[key] = val 69 | 70 | if "connections_out" in attributes: 71 | del(attributes["connections_out"]) 72 | 73 | return attributes 74 | 75 | def load_from_net_file(filename, heartbeat=100000): 76 | # Use short description formats for neurons and edges 77 | neurons_loaded = 0 78 | edges_loaded = 0 79 | mappings_loaded = 0 80 | 81 | net = {"network": {}, "mappings": []} 82 | neurons_in_group = [] 83 | with open(filename, 'r') as net_file: 84 | net_name = filename.split('.')[0] 85 | net["network"] = {"name": net_name, "groups": [], "edges": {}} 86 | for line in net_file: 87 | fields = line.split() 88 | if not fields: 89 | continue 90 | if fields[0] == 'g': 91 | group_attributes = dict([f.split('=') for f in fields[2:]]) 92 | group_attributes = flow_dict(parse_attributes(group_attributes)) 93 | group_id = len(net["network"]["groups"]) 94 | net["network"]["groups"].append({"name": group_id, 95 | "attributes": group_attributes, 96 | "neurons": {}}) 97 | neurons_in_group.append([]) 98 | print(f"Loaded group: {group_id}") 99 | elif fields[0] == 'n': 100 | group_id = int(fields[1].split('.')[0]) 101 | neuron_id = int(fields[1].split('.')[1]) 102 | neuron_attributes = dict([f.split('=') for f in fields[2:]]) 103 | neuron_attributes = flow_dict( 104 | parse_attributes(neuron_attributes)) 105 | 106 | # Add some temporary logic to compress 107 | if ((len(neurons_in_group[group_id]) > 0) and 108 | (neuron_attributes == neurons_in_group[group_id][-1]["attributes"])): 109 | # Neuron is identical, don't redefine 110 | neurons_in_group[group_id][-1]["_last"] += 1 111 | else: 112 | print("Creating new neuron definition") 113 | neurons_in_group[group_id].append( 114 | {"_first": neuron_id, "_last": neuron_id, 115 | "attributes": neuron_attributes}) 116 | neurons_loaded += 1 117 | if heartbeat is not None and (neurons_loaded % heartbeat) == 0: 118 | print(f"Loaded {neurons_loaded} neurons") 119 | 120 | elif fields[0] == 'e': 121 | edge_info = fields[1] 122 | src_address = edge_info.split("->")[0] 123 | dest_address = edge_info.split("->")[1] 124 | 125 | edge_description = f"{src_address} -> {dest_address}" 126 | edge_attributes = dict([f.split('=') for f in fields[2:]]) 127 | edge_attributes = flow_dict(parse_attributes(edge_attributes)) 128 | net["network"]["edges"][edge_description] = edge_attributes 129 | 130 | edges_loaded += 1 131 | if heartbeat is not None and (edges_loaded % heartbeat) == 0: 132 | print(f"Loaded {edges_loaded} edges") 133 | 134 | # Mapping now goes in separate file, figure out 135 | elif fields[0] == '&': 136 | mapping_info = fields[1] 137 | neuron_address = mapping_info.split("@")[0] 138 | core_address = mapping_info.split("@")[1] 139 | 140 | mapping = flow_dict({neuron_address: {"core": core_address}}) 141 | net["mappings"].append(mapping) 142 | mappings_loaded += 1 143 | if (mappings_loaded % heartbeat) == 0: 144 | print(f"Loaded {mappings_loaded} mappings") 145 | 146 | # Go through structs and convert to YAML dictionary 147 | for group_id, neurons in enumerate(neurons_in_group): 148 | for n in neurons: 149 | first, last = n["_first"], n["_last"] 150 | name = first 151 | if last > first: 152 | name = f"{first}..{last}" 153 | 154 | attributes = n["attributes"] 155 | net["network"]["groups"][group_id]["neurons"][name] = attributes 156 | 157 | return net 158 | 159 | if len(sys.argv) < 2: 160 | print("Usage: python net_to_yaml.py [file.net]") 161 | exit(1) 162 | filename = sys.argv[1] 163 | print(f"Old filename: {filename}") 164 | net = load_from_net_file(filename) 165 | 166 | if len(sys.argv) > 2: 167 | new_filename = sys.argv[2] 168 | else: 169 | new_filename = filename.split('.')[0] + ".yaml" 170 | with open(new_filename, "w") as yaml_file: 171 | print("Writing to YAML file.") 172 | yaml.dump(net, yaml_file, sort_keys=False) 173 | print(f"YAML file {new_filename} written.") 174 | -------------------------------------------------------------------------------- /scripts/plot_characterization.py: -------------------------------------------------------------------------------- 1 | import matplotlib 2 | matplotlib.use('Agg') 3 | 4 | import csv 5 | from matplotlib import pyplot as plt 6 | 7 | neurons = [] 8 | energies = [] 9 | with open("../runs/neuron_characterization.csv", "r") as nonspiking_csv: 10 | reader = csv.DictReader(nonspiking_csv) 11 | for row in reader: 12 | neurons.append(float(row["Neurons"])) 13 | energies.append(float(row["Energy (nJ)"])) 14 | 15 | plt.rcParams.update({'axes.linewidth': 1.5, "scatter.marker": 'x'}) 16 | plt.rcParams.update({'font.size': 18, 'lines.markersize': 6}) 17 | 18 | plt.figure(figsize=(4.5, 4.5)) 19 | plt.scatter(neurons, energies, marker='x', s=30, lw=2) 20 | plt.yscale("linear") 21 | plt.xscale("linear") 22 | plt.ylabel("Energy (nJ)") 23 | plt.xlabel("Neuron Updates") 24 | plt.ticklabel_format(style="sci", axis="x", scilimits=(0,0)) 25 | plt.ticklabel_format(style="sci", axis="y", scilimits=(0,0)) 26 | plt.tight_layout() 27 | plt.savefig("../runs/neuron_characterization.pdf") 28 | plt.savefig("../runs/neuron_characterization.png") 29 | -------------------------------------------------------------------------------- /scripts/plot_messages.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import csv 3 | import numpy as np 4 | import pandas as pd 5 | 6 | MESSAGE_TRACE_FILENAME = "dvs_messages.trace" 7 | 8 | def tile_idx(x, y): 9 | return (x * 4) + y 10 | 11 | 12 | def track_hops(src_x, src_y, dest_x, dest_y): 13 | # Loihi is 8x4 grid (8 on the x axis) 14 | spikes_processed_per_router = np.zeros(8*4, dtype=int) 15 | 16 | # Add spike to tile that generates the spike 17 | spikes_processed_per_router[tile_idx(src_x, src_y)] += 1 18 | 19 | # Account for all the hops in between the src and dest tile 20 | while src_x < dest_x: 21 | src_x += 1 22 | spikes_processed_per_router[tile_idx(src_x, src_y)] += 1 23 | 24 | while src_x > dest_x: 25 | src_x -= 1 26 | spikes_processed_per_router[tile_idx(src_x, src_y)] += 1 27 | 28 | while src_y < dest_y: 29 | src_y += 1 30 | spikes_processed_per_router[tile_idx(src_x, src_y)] += 1 31 | 32 | while src_y > dest_y: 33 | src_y -= 1 34 | spikes_processed_per_router[tile_idx(src_x, src_y)] += 1 35 | 36 | return spikes_processed_per_router 37 | 38 | 39 | hops = np.zeros(32, dtype=int) 40 | with open(MESSAGE_TRACE_FILENAME) as trace: 41 | reader = csv.DictReader(trace) 42 | for line in reader: 43 | src_hw = line["src_hw"] 44 | dest_hw = line["dest_hw"] 45 | 46 | src_tile, src_core = src_hw.split('.') 47 | dest_tile, dest_core = dest_hw.split('.') 48 | 49 | src_x = int(src_tile) // 4 50 | src_y = int(src_tile) % 4 51 | 52 | dest_x = int(dest_tile) // 4 53 | dest_y = int(dest_tile) // 4 54 | hops += track_hops(src_x, src_y, 55 | dest_x, dest_y) 56 | 57 | plt.figure() 58 | 59 | # Plot a heat map 60 | x, y = np.meshgrid(np.linspace(0, 7, 8), np.linspace(0, 3, 4)) 61 | # Plot the circles 62 | 63 | plt.scatter(x, y, c=hops, cmap="hot") 64 | plt.colorbar() 65 | 66 | df = pd.read_csv(MESSAGE_TRACE_FILENAME) 67 | #df = pd.read_csv("latin_messages.trace") 68 | #df = pd.read_csv("messages.trace") 69 | plt.figure() 70 | plt.hist(df["hops"], bins=50) 71 | 72 | plt.figure() 73 | plt.hist(df["generation_delay"], bins=50) 74 | 75 | plt.figure() 76 | plt.hist(df["processing_latency"], bins=50) 77 | 78 | plt.show() 79 | -------------------------------------------------------------------------------- /scripts/plot_raster.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | import csv 4 | 5 | plt.figure(figsize=(5.0,5.0)) 6 | with open("probe_spikes.csv") as spike_csv: 7 | spike_data = csv.DictReader(spike_csv) 8 | timesteps = 0 9 | neuron_ids = spike_data.fieldnames 10 | assert(len(neuron_ids) > 0) 11 | print("Processing {0} neurons".format(len(neuron_ids))) 12 | #plt.ylim((0, len(neuron_ids))) 13 | plt.xlim((0, 128)) 14 | 15 | for neuron_spikes in spike_data: 16 | # Spikes for one timestep 17 | values = list(neuron_spikes.values()) 18 | values = values[0:-1] # Trim empty field at end of the line 19 | #s = [int(v) for v in values[-900:]] 20 | #s = [int(v) for v in values[-11:]] 21 | s = [int(v) for v in values[0:3600]] 22 | 23 | spike_array = np.asarray(s) 24 | spikes = np.where(spike_array >= 1)[0] 25 | 26 | plt.scatter([timesteps] * len(spikes), spikes.tolist(), c='b', s=2, 27 | marker='.', linewidths=0.1) 28 | timesteps += 1 29 | 30 | print("timesteps: {0}".format(timesteps)) 31 | 32 | plt.xlabel("Time-step") 33 | plt.ylabel("Neuron") 34 | 35 | plt.savefig("raster.png") 36 | #plt.show() 37 | -------------------------------------------------------------------------------- /scripts/queue_transient.py: -------------------------------------------------------------------------------- 1 | import random 2 | import matplotlib.pyplot as plt 3 | import numpy as np 4 | 5 | buffer_size = 60 6 | #arrival_rate = 1.0 / 5.1e-9 7 | #server_rate = 1.0 / 69.8e-9 8 | 9 | arrival_rate = 1.0 10 | server_rate = 0.1 11 | 12 | #messages = 2000 13 | messages = 200 14 | 15 | # Model arrivals as Poisson, with exponentially distributed interarrival and 16 | # service times 17 | interarrival_times = [random.expovariate(arrival_rate) for i in range(0, messages)] 18 | service_times = [random.expovariate(server_rate) for i in range(0, messages)] 19 | 20 | #plt.figure() 21 | #plt.hist(interarrival_times, bins=int(np.floor(np.sqrt(messages)))) 22 | 23 | #plt.figure() 24 | #plt.hist(service_times, bins=int(np.floor(np.sqrt(messages)))) 25 | 26 | # Model arrivals as bursty, with quick bursts of messages followed by longer 27 | # gaps 28 | 29 | interarrival_times = [5.1e-9] * 6 #ns 30 | interarrival_times.append(random.uniform(30e-9, 90e-9)) # ns 31 | #interarrival_times.append(300e-9) # ns 32 | interarrival_times = interarrival_times * 100 33 | interarrival_times = interarrival_times[0:messages] 34 | print(interarrival_times) 35 | 36 | # Model service as uniformly distributed 37 | #service_times = [random.uniform(2.31E-8, 1.31E-7) for _ in range(0, messages)] 38 | service_times = [70e-9] * messages 39 | 40 | # ** End of arrival and service models ** 41 | 42 | 43 | times = [0,] 44 | queue_sizes = [0,] 45 | 46 | m = 0 47 | queue_len = 0 48 | 49 | t = 0 50 | updates = [] 51 | for i in interarrival_times: 52 | assert(i > 0) 53 | t += i 54 | updates.append((t, +1),) 55 | updates = sorted(updates, key=lambda u: u[0], reverse=True) 56 | 57 | 58 | event_count = 0 59 | while len(updates) > 0: 60 | event_count += 1 61 | if event_count % 100 == 0: 62 | print(event_count) 63 | u = updates.pop() 64 | t = u[0] 65 | if u[1] == +1: # insert 66 | if queue_len < buffer_size: 67 | queue_len += 1 68 | assert(len(service_times) > 0) 69 | if queue_len == 1: # If this is the head of the queue, schedule service 70 | updates.append((t + service_times.pop(), -1)) 71 | 72 | elif u[1] == -1: # service 73 | queue_len -= 1 74 | if queue_len > 0: 75 | updates.append((t + service_times.pop(), -1)) 76 | 77 | times.append(t) 78 | queue_sizes.append(queue_len) 79 | updates = sorted(updates, key=lambda u: u[0], reverse=True) 80 | 81 | 82 | plt.figure() 83 | plt.plot(times, queue_sizes, '-') 84 | 85 | plt.show() 86 | -------------------------------------------------------------------------------- /scripts/test_api.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) 4 | PROJECT_DIR = os.path.abspath((os.path.join(SCRIPT_DIR, os.pardir))) 5 | sys.path.insert(0, os.path.join(PROJECT_DIR)) 6 | import sanafe 7 | 8 | arch = sanafe.load_arch("arch/example.yaml") 9 | chip = sanafe.SpikingChip(arch) 10 | 11 | net = sanafe.load_net("snn/example.net", arch, use_netlist_format=True) 12 | net.save("out", use_netlist_format=True) 13 | chip.load(net) 14 | 15 | result = chip.sim(10, spike_trace=True) 16 | 17 | import yaml 18 | print(yaml.dump(result)) 19 | print(result["spike_trace"]) -------------------------------------------------------------------------------- /scripts/test_pybind.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import sys 4 | SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) 5 | PROJECT_DIR = os.path.abspath((os.path.join(SCRIPT_DIR, os.pardir))) 6 | sys.path.insert(0, os.path.join(PROJECT_DIR)) 7 | import sanafe 8 | 9 | net = sanafe.Network() 10 | weights = np.ones((9, 9)) 11 | layer1 = net.create_neuron_group("in", 2, {}) 12 | layer2 = net.create_neuron_group("out", 2, {}) 13 | 14 | print(layer1) 15 | print(layer2) 16 | 17 | layer1.connect_neurons_sparse(layer2, {}, [(0, 0), (0, 1)]) 18 | 19 | groups = net.groups 20 | names = list(net.groups.keys()) 21 | print(groups) 22 | print(names) 23 | print(groups["in"].neurons) 24 | print(groups["in"].neurons[0]) -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | import sys 4 | import sysconfig 5 | import platform 6 | import subprocess 7 | 8 | from setuptools import setup, Extension, find_packages 9 | from setuptools.command.build_ext import build_ext 10 | from distutils.version import LooseVersion 11 | 12 | class CMakeExtension(Extension): 13 | def __init__(self, name, sourcedir=""): 14 | Extension.__init__(self, name, sources=[]) 15 | self.sourcedir = os.path.abspath(sourcedir) 16 | 17 | class CMakeBuild(build_ext): 18 | def run(self): 19 | try: 20 | out = subprocess.check_output(["cmake", "--version"]) 21 | except OSError: 22 | raise RuntimeError("CMake must be installed to build the following extensions: " + 23 | ", ".join(e.name for e in self.extensions)) 24 | 25 | for ext in self.extensions: 26 | self.build_extension(ext) 27 | 28 | def build_extension(self, ext): 29 | extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name))) 30 | print("Current directory:", os.getcwd()) 31 | print("Source directory:", ext.sourcedir) 32 | print("External directory:", extdir) 33 | 34 | jobs = os.getenv('CMAKE_BUILD_PARALLEL_LEVEL', '1') # Default to single-threaded build 35 | # Check for -j option 36 | if '-j' in sys.argv: 37 | # Find the index of '-j' and get the following number 38 | try: 39 | jobs_index = sys.argv.index('-j') + 1 40 | jobs = int(sys.argv[jobs_index]) 41 | # Remove -j option and the following value from sys.argv 42 | sys.argv.pop(jobs_index) 43 | sys.argv.pop(jobs_index - 1) 44 | except (IndexError, ValueError): 45 | print("Warning: -j option requires a positive integer argument, using default number of threads.") 46 | 47 | cmake_args = ["-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=" + extdir, 48 | "-DPYTHON_EXECUTABLE=" + sys.executable, 49 | "-DPYTHON_INCLUDE_DIRS=" + sysconfig.get_path('include'), 50 | "-DSTANDALONE_BUILD_ENABLED=OFF", 51 | "-DPYTHON_FROM_SETUP=ON"] 52 | print(f"CMake Arguments: {cmake_args}") 53 | cfg = "Debug" if self.debug else "Release" 54 | build_args = ["--config", cfg] 55 | 56 | if platform.system() == "Windows": 57 | cmake_args += [f"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{cfg.upper()}={extdir}"] 58 | if sys.maxsize > 2**32: 59 | cmake_args += ["-A", "x64"] 60 | else: 61 | cmake_args += ["-DCMAKE_BUILD_TYPE=" + cfg] 62 | 63 | env = os.environ.copy() 64 | env["CXXFLAGS"] = "{} -DVERSION_INFO=\\'{}\\'".format(env.get("CXXFLAGS", ""), 65 | self.distribution.get_version()) 66 | env["CMAKE_BUILD_PARALLEL_LEVEL"] = jobs 67 | if not os.path.exists(self.build_temp): 68 | os.makedirs(self.build_temp) 69 | subprocess.check_call(["cmake", ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env) 70 | subprocess.check_call(["cmake", "--build", "."] + build_args, cwd=self.build_temp) 71 | 72 | setup( 73 | name="sanafe", 74 | version="2.0.20", 75 | author="James Boyle", 76 | author_email="james.boyle@utexas.edu", 77 | description="SANA-FE: Simulating Advanced Neuromorphic Architectures for Fast Exploration", 78 | long_description=open("README.md").read(), 79 | long_description_content_type="text/markdown", 80 | url="https://github.com/SLAM-Lab/SANA-FE", 81 | ext_modules=[CMakeExtension("sanafe")], 82 | cmdclass=dict(build_ext=CMakeBuild), 83 | zip_safe=False, 84 | python_requires=">=3.8", 85 | packages=find_packages() 86 | ) 87 | -------------------------------------------------------------------------------- /snn/dendrite.net: -------------------------------------------------------------------------------- 1 | ### dendrite.net 2 | # neuron groups 3 | g 4 log_v=1 4 | 5 | # neurons 6 | n 0.0 taps=3 threshold=100 time_constant[0]=0.8 time_constant[1]=0.8 time_constant[2]=0.7 space_constant[0]=0.1 space_constant[1]=0.2 7 | n 0.1 bias=0.0 threshold=1.0 connections_out=1 8 | n 0.2 bias=0.0 threshold=1.0 connections_out=1 9 | n 0.3 bias=0.1 threshold=2.0 connections_out=1 10 | 11 | # n(0.3) n(0.2) n(0.1) 12 | # | | | 13 | # \ / \ / \ / 14 | # n(0.0:2) <---> n(0.0:1) <---> n(0.0:0) proximal 15 | # | 16 | # \ / 17 | # n(0.0) 18 | 19 | e 0.1->0.0 weight=10 tap=0 20 | e 0.2->0.0 weight=10 tap=1 21 | e 0.3->0.0 weight=10 tap=2 22 | 23 | ## Map everything to one core 24 | & 0.0@0.0 25 | & 0.1@0.0 26 | & 0.2@0.0 27 | & 0.3@0.0 28 | -------------------------------------------------------------------------------- /snn/dendrite.yaml: -------------------------------------------------------------------------------- 1 | ## dendrite.yaml 2 | # n(0.3) n(0.2) n(0.1) 3 | # | | | 4 | # \ / \ / \ / 5 | # n(0.0:2) <---> n(0.0:1) <---> n(0.0:0) proximal 6 | # | 7 | # \ / 8 | # n(0.0) 9 | network: 10 | name: dendrite_example_network 11 | groups: 12 | - name: inputs 13 | attributes: {soma_hw_name: dummy_input} 14 | neurons: 15 | - 0: {spikes: [0,0]} 16 | - 1: {} 17 | - 2: {spikes: [1,0]} 18 | - name: dendrite 19 | attributes: {log_potential: true, force_dendrite_update: true} 20 | neurons: 21 | - 0: {threshold: 100, taps: 3, time_constants: [0.8, 0.8, 0.7], 22 | space_constants: [0.1, 0.2]} 23 | edges: 24 | - inputs.0 -> dendrite.0: {weight: 10, tap: 0} 25 | - inputs.1 -> dendrite.0: {weight: 10, tap: 1} 26 | - inputs.2 -> dendrite.0: {weight: 10, tap: 2} 27 | mappings: 28 | - dendrite.0: {core: 0.0} 29 | - inputs.0: {core: 0.0} 30 | - inputs.1: {core: 0.0} 31 | - inputs.2: {core: 0.0} 32 | -------------------------------------------------------------------------------- /snn/example.net: -------------------------------------------------------------------------------- 1 | ### example.net 2 | # 3 | g 3 threshold=1.0 log_spikes=1 log_v=1 4 | g 3 threshold=2.0 soma_hw_name=demo_soma_alt log_spikes=1 log_v=1 5 | 6 | ## Neuron groups 7 | # gid.nid 8 | n 0.0 bias=1.0 connections_out=1 9 | n 0.1 bias=0.0 connections_out=1 10 | n 0.2 bias=1.0 connections_out=1 11 | n 1.0 { bias: 0 } 12 | n 1.1 [bias: 1.0] 13 | n 1.2 bias=0.0 14 | 15 | ## Edges 16 | e 0.0->1.0 weight=-1.0 17 | e 0.1->1.2 weight=-2.0 18 | e 0.2->1.2 weight=3.0 19 | 20 | ## Mappings 21 | # group.neuron@tile.core 22 | & 0.0@0.0 23 | & 0.1@0.0 24 | & 0.2@0.1 25 | & 1.0@0.0 26 | & 1.1@0.0 27 | & 1.2@0.1 28 | -------------------------------------------------------------------------------- /snn/example.yaml: -------------------------------------------------------------------------------- 1 | network: 2 | name: example 3 | groups: 4 | - name: in 5 | attributes: [log_spikes: true, threshold: 1.0] 6 | neurons: 7 | - 0: [log_potential: true, bias: 1.0] 8 | - 1..2 9 | - name: out 10 | attributes: [log_spikes: true, threshold: 2.0, soma_hw_name: demo_soma_alt] 11 | neurons: 12 | - 0..2: [bias: 1.0] 13 | edges: 14 | - in.0 -> out.0: [weight: -1.0] 15 | - in.1 -> out.2: [weight: -2.0] 16 | - in.2 -> out.2: [weight: 3.0] 17 | mappings: 18 | - in: [core: 0.0] 19 | - out.0..1: [core: 0.0] 20 | - out.2: [core: 0.1] 21 | -------------------------------------------------------------------------------- /snn/hh_example.net: -------------------------------------------------------------------------------- 1 | ### hh_example.net 2 | ### Fully connected hodgkin-huxley network, everything spikes on each timestep ### 3 | # First define the neuron group, specifying the number of neurons and default values: 4 | # 5 | g 3 synapse_hw_name=loihi_sparse_synapse m=0.0529 n=0.3177 h=0.5961 current=0 6 | 7 | ## Then define neurons in the group 8 | # gid nid 9 | n 0.0 connections_out=1 soma_hw_name=hh[0] 10 | n 0.1 connections_out=1 soma_hw_name=hh[1] 11 | n 0.2 connections_out=1 soma_hw_name=hh[2] 12 | 13 | ## Next define the edges between neurons 14 | e 0.0->0.1 weight=1.0 15 | e 0.1->0.2 weight=1.0 16 | e 0.2->0.0 weight=1.0 17 | 18 | ## Finally map neurons (group.neuron) to hardware (tile.core) 19 | # neuron@core 20 | & 0.0@0.0 21 | & 0.1@0.0 22 | & 0.2@0.0 23 | -------------------------------------------------------------------------------- /snn/input_net.yaml: -------------------------------------------------------------------------------- 1 | network: 2 | name: example 3 | groups: 4 | - name: in 5 | attributes: [soma_hw_name: demo_input, log_spikes: true] 6 | neurons: 7 | - 0: [spikes: [1,0,0,0]] 8 | - 1: [spikes: [1,0,0,0]] 9 | - 2: [spikes: [0,0,0,1]] 10 | - name: 1 11 | attributes: [log_spikes: true, threshold: 2.0, soma_hw_name: demo_soma_default] 12 | neurons: 13 | - 0..2: [bias: 1.0] 14 | edges: 15 | - in.0 -> 1.0: [weight: -1.0] 16 | - in.1 -> 1.2: [weight: -2.0] 17 | - in.2 -> 1.2: [weight: 3.0] 18 | mappings: 19 | - in.0: {core: 0.0} 20 | - in.1: {core: 0.0} 21 | - in.2: {core: 0.1} 22 | - 1.0: {core: 0.0} 23 | - 1.1: {core: 0.0} 24 | - 1.2: {core: 0.1} 25 | -------------------------------------------------------------------------------- /snn/nemo/truenorth_bursting.net: -------------------------------------------------------------------------------- 1 | # neuron groups 2 | g 1 threshold=300 soma_hw_name=core_soma synapse_hw_name=core_synapses reset=0 reverse_threshold=0 reverse_reset=0 leak_bias=0 reset_mode=hard reverse_reset_mode=none 3 | g 1 threshold=18 soma_hw_name=core_soma synapse_hw_name=core_synapses reset=1 reverse_threshold=0 reverse_reset=1 leak_bias=-1 reset_mode=hard reverse_reset_mode=saturate 4 | g 1 threshold=6 soma_hw_name=core_soma synapse_hw_name=core_synapses reset=0 reverse_threshold=0 reverse_reset=0 leak_bias=0 reset_mode=hard reverse_reset_mode=saturate 5 | 6 | n 0.0 bias=1.0 log_spikes=1 log_v=1 force_update=1 connections_out=2 7 | e 0.0->1.0 w=1.0 8 | e 0.0->2.0 w=0 9 | 10 | # neuron 0 in paper 11 | # TODO: to make sure we update the neuron every timestep, set a small 12 | # negative bias. This is a bit of a hack. I should fix force_update to 13 | # mean what it says 14 | n 1.0 bias=0.0 log_spikes=1 log_v=1 force_update=1 connections_out=1 15 | e 1.0->2.0 w=1.0 16 | 17 | # neuron 1 in paper 18 | n 2.0 bias=0.0 force_update=1 connections_out=1 19 | e 2.0->1.0 w=-100.0 20 | 21 | & 0.0@0.0 22 | & 1.0@0.0 23 | & 2.0@0.0 24 | -------------------------------------------------------------------------------- /snn/nemo/truenorth_phasic.net: -------------------------------------------------------------------------------- 1 | g 1 threshold=200 soma_hw_name=core_soma synapse_hw_name=core_synapses reset=0 reverse_threshold=0 reverse_reset=0 leak_bias=0 reset_mode=hard reverse_reset_mode=none 2 | g 1 threshold=2 soma_hw_name=core_soma synapse_hw_name=core_synapses reset=-15 reverse_threshold=-10 reverse_reset=-15 leak_bias=-2 reset_mode=hard reverse_reset_mode=saturate 3 | 4 | n 0.0 bias=1.0 log_spikes=1 log_v=1 force_update=1 connections_out=1 5 | e 0.0->1.0 w=4.0 6 | 7 | n 1.0 bias=0.0 log_spikes=1 log_v=1 force_update=1 8 | 9 | & 0.0@0.0 10 | & 1.0@0.0 11 | -------------------------------------------------------------------------------- /src/arch.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 - The University of Texas at Austin 2 | // This work was produced under contract #2317831 to National Technology and 3 | // Engineering Solutions of Sandia, LLC which is under contract 4 | // No. DE-NA0003525 with the U.S. Department of Energy. 5 | // arch.cpp 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include // For std::filesystem::path 12 | #include 13 | #include // For std::reference_wrapper 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | 24 | #include "arch.hpp" 25 | #include "print.hpp" 26 | #include "yaml_arch.hpp" 27 | 28 | sanafe::Architecture::Architecture( 29 | std::string name, const NetworkOnChipConfiguration &noc) 30 | : ts_sync_delay_table(noc.ts_sync_delay_table) 31 | , name(std::move(name)) 32 | , noc_width_in_tiles(noc.width_in_tiles) 33 | , noc_height_in_tiles(noc.height_in_tiles) 34 | , noc_buffer_size(noc.link_buffer_size) 35 | , timestep_delay(noc.timestep_delay) 36 | { 37 | } 38 | 39 | std::vector> 40 | sanafe::Architecture::cores() 41 | { 42 | std::vector> all_cores_in_arch; 43 | 44 | for (TileConfiguration &tile : tiles) 45 | { 46 | std::copy(tile.cores.begin(), tile.cores.end(), 47 | std::back_inserter(all_cores_in_arch)); 48 | } 49 | 50 | return all_cores_in_arch; 51 | } 52 | 53 | std::string sanafe::Architecture::info() const noexcept 54 | { 55 | std::ostringstream ss; 56 | ss << "sanafe::Architecture(tiles=" << tiles.size(); 57 | ss << ", cores=" << core_count << ")"; 58 | 59 | return ss.str(); 60 | } 61 | 62 | sanafe::TileConfiguration::TileConfiguration( 63 | std::string name, const size_t id, const TilePowerMetrics &metrics) 64 | : power_metrics(metrics) 65 | , name(std::move(name)) 66 | , id(id) 67 | { 68 | } 69 | 70 | sanafe::CoreConfiguration::CoreConfiguration(std::string name, 71 | const CoreAddress &address, const CorePipelineConfiguration &pipeline) 72 | : pipeline(pipeline) 73 | , name(std::move(name)) 74 | , address(address) 75 | { 76 | } 77 | 78 | std::pair sanafe::Architecture::calculate_tile_coordinates( 79 | const size_t tile_id) const 80 | { 81 | // Map linear tile IDs to 2D coordinates for physical layout representation. 82 | // This conversion assumes a row-major NoC grid arrangement where 83 | // consecutive IDs are placed vertically before moving to the next column. 84 | const size_t x = tile_id / noc_height_in_tiles; // floor(id/height) 85 | const size_t y = tile_id % noc_height_in_tiles; 86 | assert(x < noc_width_in_tiles); 87 | return std::make_pair(x, y); 88 | } 89 | 90 | sanafe::TileConfiguration &sanafe::Architecture::create_tile( 91 | std::string name, const TilePowerMetrics &power_metrics) 92 | { 93 | // Tiles are assigned sequential IDs based on creation order to ensure 94 | // deterministic addressing in the network-on-chip topology 95 | const size_t new_tile_id = tiles.size(); 96 | 97 | // Tile IDs serve as both array indices and as network addresses in the NoC 98 | // topology, enabling O(1) tile lookups and efficient message routing 99 | tiles.emplace_back(std::move(name), new_tile_id, power_metrics); 100 | TileConfiguration &new_tile = tiles[new_tile_id]; 101 | std::tie(new_tile.x, new_tile.y) = calculate_tile_coordinates(new_tile.id); 102 | 103 | return new_tile; 104 | } 105 | 106 | sanafe::Architecture sanafe::load_arch(const std::filesystem::path &path) 107 | { 108 | std::ifstream arch_fp_stream(path); 109 | 110 | if (arch_fp_stream.fail()) 111 | { 112 | throw std::system_error(std::make_error_code(std::errc::io_error), 113 | "Failed to open architecture file: " + path.string()); 114 | } 115 | INFO("Loading architecture from file: %s\n", path.c_str()); 116 | return description_parse_arch_file_yaml(arch_fp_stream); 117 | } 118 | 119 | sanafe::CoreConfiguration &sanafe::Architecture::create_core(std::string name, 120 | const size_t parent_tile_id, 121 | const CorePipelineConfiguration &pipeline_config) 122 | { 123 | if (parent_tile_id >= tiles.size()) 124 | { 125 | INFO("Error: Tile ID (%zu) out of range (>=%zu)\n", parent_tile_id, 126 | tiles.size()); 127 | throw std::invalid_argument("Tile ID out of range"); 128 | } 129 | // Cores must be attached to existing tiles representing on-chip hierarchy 130 | TileConfiguration &parent_tile = tiles.at(parent_tile_id); 131 | const size_t offset_within_tile = parent_tile.cores.size(); 132 | const size_t new_core_id = core_count++; 133 | const CoreAddress new_core_address = { 134 | parent_tile_id, offset_within_tile, new_core_id}; 135 | // Cores have dual referencing: within their parent tile's local space and 136 | // globally within the architecture to support both local and cross-tile 137 | // routing 138 | parent_tile.cores.emplace_back( 139 | std::move(name), new_core_address, pipeline_config); 140 | 141 | // The architecture tracks the maximum cores in *any* of its tiles. 142 | // This information is needed later by the scheduler, when creating 143 | // structures to track spike messages and congestion in the NoC 144 | max_cores_per_tile = 145 | std::max(max_cores_per_tile, offset_within_tile + 1); 146 | TRACE1(ARCH, "Core created id:%zu.%zu.\n", parent_tile_id, new_core_id); 147 | CoreConfiguration &new_core = parent_tile.cores[offset_within_tile]; 148 | 149 | return new_core; 150 | } 151 | 152 | sanafe::AxonInConfiguration &sanafe::CoreConfiguration::create_axon_in( 153 | std::string name, const AxonInPowerMetrics &power_metrics) 154 | { 155 | axon_in.emplace_back(power_metrics, name); 156 | AxonInConfiguration &new_axon = axon_in.back(); 157 | 158 | // Return a reference to the newly created component to allow caller to 159 | // perform further configuration using method chaining 160 | return new_axon; 161 | } 162 | 163 | sanafe::PipelineUnitConfiguration & 164 | sanafe::CoreConfiguration::create_hardware_unit( 165 | std::string name, const ModelInfo &model_details) 166 | { 167 | pipeline_hw.emplace_back(model_details, name); 168 | PipelineUnitConfiguration &new_hw = pipeline_hw.back(); 169 | 170 | return new_hw; 171 | } 172 | 173 | sanafe::AxonOutConfiguration &sanafe::CoreConfiguration::create_axon_out( 174 | std::string name, const AxonOutPowerMetrics &power_metrics) 175 | { 176 | axon_out.emplace_back(power_metrics, name); 177 | AxonOutConfiguration &new_axon_out = axon_out.back(); 178 | 179 | return new_axon_out; 180 | } 181 | -------------------------------------------------------------------------------- /src/arch.hpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 - The University of Texas at Austin 2 | // This work was produced under contract #2317831 to National Technology and 3 | // Engineering Solutions of Sandia, LLC which is under contract 4 | // No. DE-NA0003525 with the U.S. Department of Energy. 5 | // arch.hpp 6 | // 7 | // Classes to specify different neuromorphic (spiking) architectures. 8 | // In SANA-FE, an architecture is a represented as a hierarchy of different 9 | // hardware tiles, cores, and spike pipeline hardware units. Within a 10 | // neuromorphic chip, cores share some network resources (tiles) and 11 | // communicate over a Network-on-Chip (NoC). An architecture contains one or 12 | // more network tiles, where each tile contains one or more cores. Each core 13 | // has a neuromorphic pipeline composed of a sequence of neural-inspired 14 | // hardware units: implementing synaptic, dendritic and somatic models. SANA-FE 15 | // uses these classes to represent an abstract architecture. The Architecture 16 | // TileConfiguration, and CoreConfiguration classes are later used by SANA-FE 17 | // to construct a SpikingChip, which is a simulation of the realized hardware. 18 | // One Architecture can later be used to define multiple SpikingChip 19 | // simulations. 20 | 21 | #ifndef ARCH_HEADER_INCLUDED_ 22 | #define ARCH_HEADER_INCLUDED_ 23 | 24 | #include 25 | #include 26 | #include // For std::filesystem::path 27 | #include // For std::reference_wrapper 28 | #include 29 | #include 30 | #include 31 | #include 32 | #include 33 | 34 | #include "attribute.hpp" 35 | #include "fwd.hpp" 36 | #include "utils.hpp" 37 | 38 | namespace sanafe 39 | { 40 | 41 | enum BufferPosition : uint8_t 42 | { 43 | buffer_before_dendrite_unit = 0U, 44 | buffer_inside_dendrite_unit = 1U, 45 | buffer_before_soma_unit = 2U, 46 | buffer_inside_soma_unit = 3U, 47 | buffer_before_axon_out_unit = 4U, 48 | buffer_positions = 5U, 49 | }; 50 | 51 | struct ModelInfo 52 | { 53 | std::map model_attributes; 54 | std::optional plugin_library_path; 55 | std::string name; 56 | bool log_energy{false}; 57 | bool log_latency{false}; 58 | }; 59 | 60 | enum NeuronResetModes : uint8_t 61 | { 62 | neuron_no_reset = 0U, 63 | neuron_reset_soft = 1U, 64 | neuron_reset_hard = 2U, 65 | neuron_reset_saturate = 3U, 66 | neuron_reset_mode_count = 4U, 67 | }; 68 | 69 | class Architecture 70 | { 71 | public: 72 | std::vector tiles; 73 | LookupTable ts_sync_delay_table{}; 74 | std::string name; 75 | size_t core_count{0UL}; 76 | size_t max_cores_per_tile{0UL}; 77 | size_t noc_width_in_tiles{1UL}; 78 | size_t noc_height_in_tiles{1UL}; 79 | size_t noc_buffer_size{0UL}; 80 | 81 | double timestep_delay{0.0}; 82 | 83 | Architecture(std::string name, const NetworkOnChipConfiguration &noc); 84 | [[nodiscard]] std::vector> cores(); 85 | TileConfiguration &create_tile(std::string name, const TilePowerMetrics &power_metrics); 86 | CoreConfiguration &create_core(std::string name, size_t parent_tile_id, const CorePipelineConfiguration &pipeline_config); 87 | [[nodiscard]] std::string info() const noexcept; 88 | 89 | private: 90 | [[nodiscard]] std::pair calculate_tile_coordinates(size_t tile_id) const; 91 | }; 92 | 93 | Architecture load_arch(const std::filesystem::path &path); 94 | 95 | struct NetworkOnChipConfiguration 96 | { 97 | LookupTable ts_sync_delay_table{}; 98 | size_t width_in_tiles{1UL}; 99 | size_t height_in_tiles{1UL}; 100 | size_t link_buffer_size{0UL}; 101 | 102 | double timestep_delay{0.0}; 103 | }; 104 | 105 | struct TilePowerMetrics 106 | { 107 | double energy_north_hop{0.0}; 108 | double latency_north_hop{0.0}; 109 | double energy_east_hop{0.0}; 110 | double latency_east_hop{0.0}; 111 | double energy_south_hop{0.0}; 112 | double latency_south_hop{0.0}; 113 | double energy_west_hop{0.0}; 114 | double latency_west_hop{0.0}; 115 | bool log_energy{false}; 116 | bool log_latency{false}; 117 | }; 118 | 119 | struct TileConfiguration 120 | { 121 | std::vector cores; 122 | TilePowerMetrics power_metrics{}; 123 | std::string name; 124 | size_t id{}; 125 | size_t x{}; 126 | size_t y{}; 127 | 128 | TileConfiguration(std::string name, size_t id, 129 | const TilePowerMetrics &metrics); 130 | }; 131 | 132 | constexpr size_t default_max_neurons = 1024; // The same as Loihi 1 133 | struct CorePipelineConfiguration 134 | { 135 | BufferPosition buffer_position{buffer_before_soma_unit}; 136 | size_t max_neurons_supported{default_max_neurons}; 137 | bool log_energy{false}; 138 | bool log_latency{false}; 139 | }; 140 | 141 | struct CoreAddress 142 | { 143 | size_t parent_tile_id{}; 144 | size_t offset_within_tile{}; 145 | size_t id{}; 146 | }; 147 | 148 | struct CoreConfiguration 149 | { 150 | CorePipelineConfiguration pipeline{}; 151 | std::string name; 152 | CoreAddress address{}; 153 | 154 | std::vector axon_in; 155 | std::vector pipeline_hw; 156 | std::vector axon_out; 157 | 158 | AxonInConfiguration &create_axon_in(std::string name, const AxonInPowerMetrics &power_metrics); 159 | PipelineUnitConfiguration &create_hardware_unit(std::string name, const ModelInfo &model_details); 160 | AxonOutConfiguration &create_axon_out(std::string name, const AxonOutPowerMetrics &power_metrics); 161 | 162 | CoreConfiguration(std::string name, const CoreAddress &address, const CorePipelineConfiguration &pipeline); 163 | }; 164 | 165 | struct AxonInPowerMetrics 166 | { 167 | double energy_message_in{0.0}; 168 | double latency_message_in{0.0}; 169 | }; 170 | 171 | struct AxonInConfiguration 172 | { 173 | AxonInPowerMetrics metrics{}; 174 | std::string name; 175 | AxonInConfiguration(const AxonInPowerMetrics &metrics, std::string name) : metrics(metrics), name(std::move(name)) {} 176 | }; 177 | 178 | struct PipelineUnitConfiguration 179 | { 180 | ModelInfo model_info{}; 181 | std::string name; 182 | size_t tile_id{}; 183 | size_t core_offset{}; 184 | size_t core_id{}; 185 | bool implements_synapse{false}; 186 | bool implements_dendrite{false}; 187 | bool implements_soma{false}; 188 | 189 | PipelineUnitConfiguration(ModelInfo &&model_info, std::string &&name) : model_info(std::move(model_info)), name(std::move(name)) {} 190 | PipelineUnitConfiguration(const ModelInfo &model_info, const std::string &name) : model_info(model_info), name(name) {} 191 | }; 192 | 193 | struct AxonOutPowerMetrics 194 | { 195 | double energy_message_out{0.0}; 196 | double latency_message_out{0.0}; 197 | }; 198 | 199 | struct AxonOutConfiguration 200 | { 201 | AxonOutPowerMetrics metrics{}; 202 | std::string name; 203 | AxonOutConfiguration(AxonOutPowerMetrics metrics, std::string name) : metrics(metrics), name(std::move(name)) {} 204 | }; 205 | } 206 | 207 | #endif 208 | -------------------------------------------------------------------------------- /src/attribute.hpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 - The University of Texas at Austin 2 | // This work was produced under contract #2317831 to National Technology and 3 | // Engineering Solutions of Sandia, LLC which is under contract 4 | // No. DE-NA0003525 with the U.S. Department of Energy. 5 | #ifndef ATTRIBUTE_HEADER_INCLUDED_ 6 | #define ATTRIBUTE_HEADER_INCLUDED_ 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | 17 | #include "print.hpp" 18 | 19 | namespace sanafe 20 | { 21 | // An attribute can contain a scalar value, or either a list or named set of 22 | // attributes i.e., attributes can be recursively defined. However, 23 | // in C++, variants cannot be defined recursively, so create this new class. 24 | struct ModelAttribute 25 | { 26 | operator bool() const 27 | { 28 | if (std::holds_alternative(value)) 29 | { 30 | return std::get(value); 31 | } 32 | if (std::holds_alternative(value)) 33 | { 34 | TRACE1(ARCH, "Warning: Casting integer value to bool type.\n"); 35 | return (std::get(value) != 0); 36 | } 37 | INFO("index: %zu\n", value.index()); 38 | 39 | std::string error = "Error: Attribute "; 40 | if (name.has_value()) 41 | { 42 | error += name.value(); 43 | error += " "; 44 | } 45 | error += "cannot be cast to a bool ()"; 46 | throw std::runtime_error(error); 47 | } 48 | operator int() const 49 | { 50 | return std::get(value); 51 | } 52 | operator double() const 53 | { 54 | if (std::holds_alternative(value)) 55 | { 56 | return std::get(value); 57 | } 58 | if (std::holds_alternative(value)) 59 | { 60 | // Assume it is safe to convert from any integer to double 61 | TRACE1(ARCH, "Warning: Casting integer value to double type.\n"); 62 | return static_cast(std::get(value)); 63 | } 64 | 65 | std::string error = "Error: Attribute"; 66 | if (name.has_value()) 67 | { 68 | error += " " + name.value(); 69 | } 70 | error += " cannot be cast to a double"; 71 | throw std::runtime_error(error); 72 | } 73 | 74 | operator std::string() const 75 | { 76 | return std::get(value); 77 | } 78 | template operator std::vector() const 79 | { 80 | std::vector cast_vector; 81 | const auto &value_vector = std::get>(value); 82 | cast_vector.reserve(value_vector.size()); 83 | 84 | for (const auto &element : value_vector) 85 | { 86 | cast_vector.push_back(static_cast(element)); 87 | } 88 | return cast_vector; 89 | } 90 | template operator std::map() const 91 | { 92 | std::map cast_map; 93 | const auto &value_vector = std::get>(value); 94 | for (const auto &element : value_vector) 95 | { 96 | cast_map[element.name.value()] = static_cast(element); 97 | } 98 | return cast_map; 99 | } 100 | bool operator==(const ModelAttribute &rhs) const 101 | { 102 | return (value == rhs.value && 103 | (forward_to_synapse == rhs.forward_to_synapse) && 104 | (forward_to_dendrite == rhs.forward_to_dendrite) && 105 | (forward_to_soma == rhs.forward_to_soma)); 106 | } 107 | bool operator!=(const ModelAttribute &rhs) const 108 | { 109 | return (value != rhs.value) || 110 | (forward_to_synapse != rhs.forward_to_synapse) || 111 | (forward_to_dendrite != rhs.forward_to_dendrite) || 112 | (forward_to_soma != rhs.forward_to_soma); 113 | } 114 | [[nodiscard]] std::string print() const 115 | { 116 | if (std::holds_alternative(value)) 117 | { 118 | return std::get(value) ? "true" : "false"; 119 | } 120 | if (std::holds_alternative(value)) 121 | { 122 | return std::to_string(std::get(value)); 123 | } 124 | if (std::holds_alternative(value)) 125 | { 126 | std::ostringstream ss; 127 | ss << std::scientific << std::get(value); 128 | return ss.str(); 129 | } 130 | if (std::holds_alternative(value)) 131 | { 132 | return std::get(value); 133 | } 134 | if (std::holds_alternative>(value)) 135 | { 136 | throw std::runtime_error("Printing vectors not yet supported"); 137 | } 138 | // This should not be reached if all variant types are handled 139 | throw std::runtime_error("Unknown variant type in ModelAttribute"); 140 | } 141 | 142 | // In C++17, we cannot use std::map (which would be the natural choice) with 143 | // incomplete types i.e., cannot use std::map in such a recursive 144 | // structure. Considering this, and the fact that performance is not as 145 | // important for this struct, label every attribute with a name and if the 146 | // user wants to use "map" style lookups e.g., foo = attribute["key"] 147 | // then support casting the struct to a std::map. 148 | // There have been other discussions on this topic e.g., for implementing 149 | // JSON and YAML parsers, but they end up either requiring Boost or other 150 | // dependencies, and / or rely on undefined C++ behavior and generally 151 | // require complex solutions. 152 | std::variant> value; 153 | std::optional name; 154 | 155 | // Filters control which hardware units can receive this parameter 156 | bool forward_to_synapse{true}; 157 | bool forward_to_dendrite{true}; 158 | bool forward_to_soma{true}; 159 | }; 160 | 161 | // We define an alias for abbreviating the unwieldy value variant 162 | using AttributeVariant = std::variant>; 164 | } 165 | #endif 166 | -------------------------------------------------------------------------------- /src/core.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 - The University of Texas at Austin 2 | // This work was produced under contract #2317831 to National Technology and 3 | // Engineering Solutions of Sandia, LLC which is under contract 4 | // No. DE-NA0003525 with the U.S. Department of Energy. 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | #include "arch.hpp" 11 | #include "core.hpp" 12 | #include "models.hpp" 13 | #include "network.hpp" 14 | #include "pipeline.hpp" 15 | #include "plugins.hpp" 16 | #include "print.hpp" 17 | 18 | sanafe::AxonInUnit::AxonInUnit(const AxonInConfiguration &config) 19 | : name(config.name) 20 | , energy_spike_message(config.metrics.energy_message_in) 21 | , latency_spike_message(config.metrics.latency_message_in) 22 | { 23 | } 24 | 25 | sanafe::AxonOutUnit::AxonOutUnit(const AxonOutConfiguration &config) 26 | : name(config.name) 27 | , energy_access(config.metrics.energy_message_out) 28 | , latency_access(config.metrics.latency_message_out) 29 | { 30 | } 31 | 32 | sanafe::Core::Core(const CoreConfiguration &config) 33 | : pipeline_config(config.pipeline) 34 | , name(config.name) 35 | , id(config.address.id) 36 | , offset(config.address.offset_within_tile) 37 | , parent_tile_id(config.address.parent_tile_id) 38 | , log_energy(config.pipeline.log_energy) 39 | , log_latency(config.pipeline.log_latency) 40 | 41 | { 42 | timestep_buffer.resize(pipeline_config.max_neurons_supported); 43 | } 44 | 45 | sanafe::PipelineUnit *sanafe::Core::map_neuron_to_dendrite( 46 | const Neuron &neuron_to_map) 47 | { 48 | PipelineUnit *mapped_dendrite{nullptr}; 49 | 50 | const bool choose_first_dendrite_by_default = 51 | (neuron_to_map.dendrite_hw_name.empty()); 52 | bool dendrite_found = false; 53 | for (auto &hw : pipeline_hw) 54 | { 55 | if (hw->implements_dendrite && 56 | (choose_first_dendrite_by_default || 57 | neuron_to_map.dendrite_hw_name == hw->name)) 58 | { 59 | mapped_dendrite = hw.get(); 60 | dendrite_found = true; 61 | break; 62 | } 63 | } 64 | if (!dendrite_found) 65 | { 66 | INFO("Error: Could not map neuron nid:%zu (hw:%s) " 67 | "to any dendrite h/w.\n", 68 | neuron_to_map.offset, neuron_to_map.dendrite_hw_name.c_str()); 69 | throw std::runtime_error("Error: Could not map neuron to dendrite h/w"); 70 | } 71 | 72 | return mapped_dendrite; 73 | } 74 | 75 | sanafe::PipelineUnit *sanafe::Core::map_neuron_to_soma( 76 | const Neuron &neuron_to_map) 77 | { 78 | PipelineUnit *mapped_soma{nullptr}; 79 | const bool choose_first_soma_by_default = 80 | (neuron_to_map.soma_hw_name.empty()); 81 | bool soma_found = false; 82 | for (auto &hw : pipeline_hw) 83 | { 84 | if (hw->implements_soma && 85 | (choose_first_soma_by_default || 86 | neuron_to_map.soma_hw_name == hw->name)) 87 | { 88 | mapped_soma = hw.get(); 89 | soma_found = true; 90 | break; 91 | } 92 | } 93 | if (!soma_found) 94 | { 95 | INFO("Error: Could not map neuron nid:%zu (hw:%s) " 96 | "to any soma h/w.\n", 97 | neuron_to_map.offset, neuron_to_map.soma_hw_name.c_str()); 98 | throw std::runtime_error("Error: Could not map neuron to soma h/w"); 99 | } 100 | mapped_soma->neuron_count++; 101 | 102 | return mapped_soma; 103 | } 104 | 105 | void sanafe::Core::map_neuron( 106 | const Neuron &neuron_to_map, const size_t neuron_id) 107 | { 108 | TRACE1(CHIP, "Mapping nid:%s.%zu to core: %zu\n", 109 | neuron_to_map.parent_group_name.c_str(), neuron_to_map.offset, id); 110 | 111 | if (neurons.size() >= pipeline_config.max_neurons_supported) 112 | { 113 | INFO("Error: Exceeded maximum neurons per core (%zu)", 114 | pipeline_config.max_neurons_supported); 115 | throw std::runtime_error("Error: Exceeded maximum neurons per core."); 116 | } 117 | 118 | // Map neuron model to dendrite and soma hardware units in this core. 119 | // Search through all models implemented by this core and return the 120 | // one that matches. If no dendrite / soma hardware is specified, 121 | // default to the first one defined 122 | if (pipeline_hw.empty()) 123 | { 124 | INFO("Error: No pipeline units defined for cid:%zu\n", id); 125 | throw std::runtime_error("Error: No units defined"); 126 | } 127 | PipelineUnit *mapped_dendrite = map_neuron_to_dendrite(neuron_to_map); 128 | PipelineUnit *mapped_soma = map_neuron_to_soma(neuron_to_map); 129 | 130 | if (axon_out_hw.empty()) 131 | { 132 | INFO("Error: No axon out units defined for cid:%zu\n", id); 133 | throw std::runtime_error("Error: No axon out units defined"); 134 | } 135 | AxonOutUnit *mapped_axon_out = axon_out_hw.data(); 136 | 137 | // Map the neuron to the core and its hardware units 138 | const size_t address = neurons.size(); 139 | neurons.emplace_back(neuron_to_map, neuron_id, this, 140 | mapped_soma, address, mapped_axon_out, mapped_dendrite); 141 | } 142 | 143 | sanafe::AxonInUnit &sanafe::Core::create_axon_in( 144 | const AxonInConfiguration &config) 145 | { 146 | axon_in_hw.emplace_back(config); 147 | TRACE1(CHIP, "New axon in h/w unit created (%zu.%zu)\n", parent_tile_id, 148 | id); 149 | 150 | return axon_in_hw.back(); 151 | } 152 | 153 | sanafe::PipelineUnit &sanafe::Core::create_pipeline_unit( 154 | const PipelineUnitConfiguration &config) 155 | { 156 | // Create the synapse model 157 | if (config.model_info.plugin_library_path.has_value()) 158 | { 159 | const std::filesystem::path plugin_lib_path = 160 | config.model_info.plugin_library_path.value(); 161 | TRACE1(CHIP, "Creating unit from plugin: %s.\n", 162 | plugin_lib_path.c_str()); 163 | pipeline_hw.emplace_back( 164 | plugin_get_hw(config.model_info.name, plugin_lib_path)); 165 | } 166 | else 167 | { 168 | // Use built in models 169 | TRACE1(CHIP, "Creating built-in model %s.\n", 170 | config.model_info.name.c_str()); 171 | pipeline_hw.emplace_back( 172 | model_get_pipeline_unit(config.model_info.name)); 173 | } 174 | 175 | auto &new_unit = pipeline_hw.back(); 176 | // Forward all attributes onto the new h/w unit 177 | new_unit->set_attributes(config.name, config.model_info); 178 | // Set the input/output interface of the pipeline unit and in doing so we 179 | // configure which functionality the h/w unit supports 180 | new_unit->check_implemented(config.implements_synapse, 181 | config.implements_dendrite, config.implements_soma); 182 | TRACE1(CHIP, "New h/w unit created (%s) in core:%zu\n", config.name.c_str(), 183 | id); 184 | 185 | return *new_unit; 186 | } 187 | 188 | sanafe::AxonOutUnit &sanafe::Core::create_axon_out( 189 | const AxonOutConfiguration &config) 190 | { 191 | axon_out_hw.emplace_back(config); 192 | TRACE1(CHIP, "New axon out h/w unit created: (%zu.%zu)\n", parent_tile_id, 193 | id); 194 | 195 | return axon_out_hw.back(); 196 | } 197 | 198 | std::string sanafe::Core::info() const noexcept 199 | { 200 | std::ostringstream ss; 201 | ss << "sanafe::Core(name= " << name << " tile=" << parent_tile_id << ")"; 202 | return ss.str(); 203 | } 204 | -------------------------------------------------------------------------------- /src/core.hpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 - The University of Texas at Austin 2 | // This work was produced under contract #2317831 to National Technology and 3 | // Engineering Solutions of Sandia, LLC which is under contract 4 | // No. DE-NA0003525 with the U.S. Department of Energy. 5 | // tile.hpp 6 | #ifndef CORE_HEADER_INCLUDED_ 7 | #define CORE_HEADER_INCLUDED_ 8 | 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | 16 | #include "arch.hpp" 17 | #include "fwd.hpp" 18 | #include "mapped.hpp" 19 | #include "message.hpp" 20 | #include "pipeline.hpp" 21 | 22 | namespace sanafe 23 | { 24 | struct AxonInModel 25 | { 26 | // List of all neuron connections to send spikes to 27 | std::vector synapse_addresses; 28 | Message *message{nullptr}; 29 | int spikes_received{0}; 30 | int active_synapses{0}; 31 | }; 32 | 33 | struct AxonOutModel 34 | { 35 | size_t dest_axon_id{}; 36 | size_t dest_tile_id{}; 37 | size_t dest_core_offset{}; 38 | size_t src_neuron_offset{}; 39 | }; 40 | 41 | class AxonInUnit 42 | { 43 | public: 44 | std::string name; 45 | long int spike_messages_in{0L}; 46 | double energy{0.0}; 47 | double time{0.0}; 48 | double energy_spike_message; 49 | double latency_spike_message; 50 | 51 | explicit AxonInUnit(const AxonInConfiguration &config); 52 | }; 53 | 54 | class AxonOutUnit 55 | { 56 | public: 57 | // The axon output points to a number of axons, stored at the 58 | // post-synaptic core. A neuron can point to a number of these 59 | std::string name; 60 | long int packets_out{0L}; 61 | double energy{0.0}; 62 | double time{0.0}; 63 | double energy_access; 64 | double latency_access; 65 | 66 | explicit AxonOutUnit(const AxonOutConfiguration &config); 67 | }; 68 | 69 | class Core 70 | { 71 | public: 72 | std::vector axon_in_hw; 73 | std::vector> pipeline_hw; 74 | std::vector axon_out_hw; 75 | 76 | std::vector> messages_in; 77 | std::vector axons_in; 78 | std::vector neurons; 79 | std::vector connections_in; 80 | std::vector axons_out; 81 | std::vector timestep_buffer; 82 | 83 | std::list neuron_processing_units; 84 | std::list message_processing_units; 85 | CorePipelineConfiguration pipeline_config{}; 86 | std::string name; 87 | double energy{0.0}; 88 | double next_message_generation_delay{0.0}; 89 | size_t id; 90 | size_t offset; 91 | size_t parent_tile_id; 92 | int message_count{0}; 93 | bool log_energy{false}; 94 | bool log_latency{false}; 95 | 96 | explicit Core(const CoreConfiguration &config); 97 | void map_neuron(const Neuron &n, size_t neuron_id); 98 | AxonInUnit &create_axon_in(const AxonInConfiguration &config); 99 | PipelineUnit &create_pipeline_unit(const PipelineUnitConfiguration &config); 100 | AxonOutUnit &create_axon_out(const AxonOutConfiguration &config); 101 | [[nodiscard]] size_t get_id() const { return id; } 102 | [[nodiscard]] size_t get_offset() const { return offset; } 103 | [[nodiscard]] std::string info() const noexcept; 104 | 105 | private: 106 | PipelineUnit *map_neuron_to_dendrite(const Neuron &neuron_to_map); 107 | PipelineUnit *map_neuron_to_soma(const Neuron &neuron_to_map); 108 | }; 109 | 110 | } 111 | 112 | #endif -------------------------------------------------------------------------------- /src/fwd.hpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 - The University of Texas at Austin 2 | // This work was produced under contract #2317831 to National Technology and 3 | // Engineering Solutions of Sandia, LLC which is under contract 4 | // No. DE-NA0003525 with the U.S. Department of Energy. 5 | // fwd.hpp: Forward declarations 6 | #ifndef FWD_HEADER_INCLUDED_ 7 | #define FWD_HEADER_INCLUDED_ 8 | 9 | #include 10 | 11 | namespace sanafe 12 | { 13 | class SpikingChip; 14 | struct Timestep; 15 | struct RunData; 16 | class NocInfo; 17 | 18 | struct Scheduler; 19 | 20 | class Architecture; 21 | struct NetworkOnChipConfiguration; 22 | struct TileConfiguration; 23 | struct TilePowerMetrics; 24 | struct CoreConfiguration; 25 | struct CorePipelineConfiguration; 26 | struct AxonInConfiguration; 27 | struct PipelineUnitConfiguration; 28 | struct AxonOutConfiguration; 29 | struct AxonOutPowerMetrics; 30 | struct AxonInPowerMetrics; 31 | 32 | class SpikingNetwork; 33 | class Neuron; 34 | class NeuronGroup; 35 | struct Connection; 36 | class MappedNeuron; 37 | class MappedConnection; 38 | struct NeuronConfiguration; 39 | struct ModelInfo; 40 | struct PipelineResult; 41 | struct NeuronAddress; 42 | 43 | class Tile; 44 | class Core; 45 | class AxonInUnit; 46 | class AxonOutUnit; 47 | class PipelineUnit; 48 | class SynapseUnit; 49 | class DendriteUnit; 50 | class SomaUnit; 51 | 52 | 53 | struct AxonInModel; 54 | struct AxonOutModel; 55 | 56 | struct Message; 57 | 58 | enum BufferPosition : uint8_t; 59 | enum NeuronStatus : uint8_t; 60 | 61 | struct ModelAttribute; 62 | 63 | template 64 | struct LookupTable; 65 | } 66 | 67 | // External library forward declarations i.e., not in SANA-FE's namespace 68 | class BookSimConfig; 69 | 70 | #endif -------------------------------------------------------------------------------- /src/mapped.cpp: -------------------------------------------------------------------------------- 1 | 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | #include "arch.hpp" 8 | #include "attribute.hpp" 9 | #include "core.hpp" 10 | #include "mapped.hpp" 11 | #include "network.hpp" 12 | #include "pipeline.hpp" 13 | #include "print.hpp" 14 | 15 | // NOLINTBEGIN(bugprone-easily-swappable-parameters) 16 | sanafe::MappedConnection::MappedConnection( 17 | std::reference_wrapper pre_neuron, 18 | std::reference_wrapper post_neuron) 19 | : pre_neuron_ref(pre_neuron) 20 | , post_neuron_ref(post_neuron) 21 | // NOLINTEND(bugprone-easily-swappable-parameters) 22 | { 23 | } 24 | 25 | void sanafe::MappedConnection::build_message_processing_pipeline() 26 | { 27 | const MappedNeuron &n = post_neuron_ref; 28 | const Core &mapped_core = *(n.core); 29 | 30 | // We don't support putting the buffer inside or before the synapse unit, so 31 | // unconditionally push the synapse h/w. This is because putting the buffer 32 | // here could cause a spike sent that shouldn't be 33 | message_processing_pipeline.push_back(synapse_hw); 34 | if ((mapped_core.pipeline_config.buffer_position > 35 | buffer_before_dendrite_unit) && 36 | (n.dendrite_hw != synapse_hw)) 37 | { 38 | message_processing_pipeline.push_back(n.dendrite_hw); 39 | } 40 | if ((mapped_core.pipeline_config.buffer_position > 41 | buffer_before_soma_unit) && 42 | (n.soma_hw != n.dendrite_hw)) 43 | { 44 | message_processing_pipeline.push_back(n.soma_hw); 45 | } 46 | } 47 | 48 | sanafe::MappedNeuron::MappedNeuron(const Neuron &neuron_to_map, 49 | const size_t nid, Core *mapped_core, PipelineUnit *mapped_soma, 50 | const size_t mapped_address, AxonOutUnit *mapped_axon_out, 51 | PipelineUnit *mapped_dendrite) 52 | : parent_group_name(neuron_to_map.parent_group_name) 53 | , offset(neuron_to_map.offset) 54 | , id(nid) 55 | , core(mapped_core) 56 | , dendrite_hw(mapped_dendrite) 57 | , soma_hw(mapped_soma) 58 | , axon_out_hw(mapped_axon_out) 59 | , mapped_address(mapped_address) 60 | , mapping_order(neuron_to_map.mapping_order) 61 | , force_synapse_update(neuron_to_map.force_synapse_update) 62 | , force_dendrite_update(neuron_to_map.force_dendrite_update) 63 | , force_soma_update(neuron_to_map.force_soma_update) 64 | , log_spikes(neuron_to_map.log_spikes) 65 | , log_potential(neuron_to_map.log_potential) 66 | 67 | { 68 | set_model_attributes(neuron_to_map.model_attributes); 69 | build_neuron_processing_pipeline(); 70 | } 71 | 72 | void sanafe::MappedNeuron::set_model_attributes( 73 | const std::map &model_attributes) 74 | const 75 | { 76 | for (const auto &[key, attribute] : model_attributes) 77 | { 78 | TRACE2(CHIP, "Forwarding attribute: %s (dendrite:%d soma:%d)\n", 79 | key.c_str(), attribute.forward_to_dendrite, 80 | attribute.forward_to_soma); 81 | if (attribute.forward_to_dendrite && (dendrite_hw != nullptr)) 82 | { 83 | dendrite_hw->check_attribute(key); 84 | dendrite_hw->set_attribute_neuron(mapped_address, key, attribute); 85 | } 86 | if (attribute.forward_to_soma && (soma_hw != nullptr)) 87 | { 88 | soma_hw->check_attribute(key); 89 | soma_hw->set_attribute_neuron(mapped_address, key, attribute); 90 | } 91 | } 92 | } 93 | 94 | void sanafe::MappedNeuron::build_neuron_processing_pipeline() 95 | { 96 | if (core->pipeline_config.buffer_position < buffer_before_dendrite_unit) 97 | { 98 | throw std::runtime_error("Error: Buffer must be after synaptic h/w"); 99 | } 100 | if (core->pipeline_config.buffer_position == buffer_inside_dendrite_unit) 101 | { 102 | neuron_processing_pipeline.push_back(dendrite_hw); 103 | } 104 | if ((core->pipeline_config.buffer_position <= buffer_inside_soma_unit) && 105 | (soma_hw != dendrite_hw)) 106 | { 107 | neuron_processing_pipeline.push_back(soma_hw); 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /src/mapped.hpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 - The University of Texas at Austin 2 | // This work was produced under contract #2317831 to National Technology and 3 | // Engineering Solutions of Sandia, LLC which is under contract 4 | // No. DE-NA0003525 with the U.S. Department of Energy. 5 | #ifndef MAPPED_HEADER_INCLUDED_ 6 | #define MAPPED_HEADER_INCLUDED_ 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | #include "attribute.hpp" 16 | #include "fwd.hpp" 17 | 18 | namespace sanafe 19 | { 20 | 21 | enum NeuronStatus : uint8_t 22 | { 23 | invalid_neuron_state = 0U, 24 | idle = 1U, 25 | updated = 2U, 26 | fired = 3U 27 | }; 28 | 29 | class MappedConnection 30 | { 31 | public: 32 | std::reference_wrapper pre_neuron_ref; 33 | std::reference_wrapper post_neuron_ref; 34 | PipelineUnit *synapse_hw{nullptr}; 35 | std::vector message_processing_pipeline; 36 | size_t synapse_address{0UL}; 37 | 38 | explicit MappedConnection(std::reference_wrapper pre_neuron, std::reference_wrapper post_neuron); 39 | void build_message_processing_pipeline(); 40 | }; 41 | 42 | class MappedNeuron 43 | { 44 | public: 45 | std::vector connections_out; 46 | std::vector axon_out_addresses; 47 | std::string parent_group_name; 48 | size_t offset; 49 | size_t id; 50 | 51 | // Internal pointers to mapped hardware 52 | Core *core{nullptr}; 53 | Core *post_synaptic_cores{nullptr}; 54 | PipelineUnit *dendrite_hw{nullptr}; 55 | PipelineUnit *soma_hw{nullptr}; 56 | AxonOutUnit *axon_out_hw{nullptr}; 57 | std::vector neuron_processing_pipeline; 58 | 59 | size_t mapped_address{-1ULL}; 60 | size_t mapping_order; 61 | int spike_count{0}; 62 | int maps_in_count{0}; 63 | int maps_out_count{0}; 64 | NeuronStatus status{invalid_neuron_state}; 65 | 66 | // Flags and traces 67 | bool force_synapse_update{false}; 68 | bool force_dendrite_update{false}; 69 | bool force_soma_update{false}; 70 | bool log_spikes{false}; 71 | bool log_potential{false}; 72 | 73 | MappedNeuron(const Neuron &neuron_to_map, size_t nid, Core *mapped_core, PipelineUnit *mapped_soma, size_t mapped_address, AxonOutUnit *mapped_axon_out, PipelineUnit *mapped_dendrite); 74 | MappedNeuron(const MappedNeuron ©) = default; 75 | ~MappedNeuron() = default; 76 | MappedNeuron& operator=(const MappedNeuron& other) = default; 77 | MappedNeuron(MappedNeuron&& other) = default; 78 | MappedNeuron& operator=(MappedNeuron&& other) = default; 79 | void set_model_attributes(const std::map &model_attributes) const; 80 | 81 | private: 82 | void build_neuron_processing_pipeline(); 83 | }; 84 | 85 | } 86 | 87 | #endif -------------------------------------------------------------------------------- /src/message.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "chip.hpp" 4 | #include "core.hpp" 5 | #include "mapped.hpp" 6 | #include "message.hpp" 7 | #include "tile.hpp" 8 | 9 | sanafe::Message::Message() 10 | : timestep(-1L) 11 | , mid(placeholder_mid) 12 | , src_neuron_offset(0UL) 13 | , src_neuron_group_id("invalid") 14 | , src_tile_id(0UL) 15 | , src_core_id(0UL) 16 | , src_core_offset(0UL) 17 | { 18 | } 19 | 20 | sanafe::Message::Message(const long int id, const SpikingChip &hw, 21 | const MappedNeuron &n, const long int timestep) 22 | : timestep(timestep) 23 | , mid(id) 24 | , src_neuron_offset(n.offset) 25 | , src_neuron_group_id(n.parent_group_name) 26 | { 27 | // If no axon was given create a message with no destination. By 28 | // default, messages without destinations act as a placeholder for neuron 29 | // processing 30 | const Core &src_core = *(n.core); 31 | const Tile &src_tile = hw.tiles[src_core.parent_tile_id]; 32 | src_x = src_tile.x; 33 | src_y = src_tile.y; 34 | src_tile_id = src_tile.id; 35 | src_core_id = src_core.id; 36 | src_core_offset = src_core.offset; 37 | } 38 | 39 | sanafe::Message::Message(const long int id, const SpikingChip &hw, 40 | const size_t axon_address, const MappedNeuron &n, 41 | const long int timestep) 42 | : Message(id, hw, n, timestep) 43 | { 44 | const Core &src_core = *(n.core); 45 | const AxonOutModel &src_axon = src_core.axons_out[axon_address]; 46 | const Tile &dest_tile = hw.tiles[src_axon.dest_tile_id]; 47 | const Core &dest_core = dest_tile.cores[src_axon.dest_core_offset]; 48 | const AxonInModel &dest_axon = dest_core.axons_in[src_axon.dest_axon_id]; 49 | 50 | placeholder = false; 51 | spikes = dest_axon.synapse_addresses.size(); 52 | dest_x = dest_tile.x; 53 | dest_y = dest_tile.y; 54 | dest_tile_id = dest_tile.id; 55 | dest_core_id = dest_core.id; 56 | dest_core_offset = dest_core.offset; 57 | dest_axon_id = src_axon.dest_axon_id; 58 | dest_axon_hw = 0; 59 | } 60 | -------------------------------------------------------------------------------- /src/message.hpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 - The University of Texas at Austin 2 | // This work was produced under contract #2317831 to National Technology and 3 | // Engineering Solutions of Sandia, LLC which is under contract 4 | // No. DE-NA0003525 with the U.S. Department of Energy. 5 | // message.hpp 6 | #ifndef MESSAGE_HEADER_INCLUDED_ 7 | #define MESSAGE_HEADER_INCLUDED_ 8 | 9 | #include 10 | #include 11 | #include 12 | 13 | #include "fwd.hpp" 14 | 15 | namespace sanafe 16 | { 17 | 18 | constexpr long int placeholder_mid = -1L; // An invalid message id for placeholders 19 | struct Message 20 | { 21 | double generation_delay{0.0}; 22 | double network_delay{0.0}; 23 | double receive_delay{0.0}; 24 | double blocked_delay{0.0}; 25 | double sent_timestamp{-std::numeric_limits::infinity()}; 26 | double received_timestamp{-std::numeric_limits::infinity()}; 27 | double processed_timestamp{-std::numeric_limits::infinity()}; 28 | long int timestep; 29 | long int mid; 30 | size_t spikes{0UL}; 31 | size_t hops{0UL}; 32 | size_t src_neuron_offset; 33 | std::string src_neuron_group_id; 34 | size_t src_x{0UL}; 35 | size_t dest_x{0UL}; 36 | size_t src_y{0UL}; 37 | size_t dest_y{0UL}; 38 | size_t src_tile_id; 39 | size_t src_core_id; 40 | size_t src_core_offset; 41 | size_t dest_tile_id{0UL}; 42 | size_t dest_core_id{0UL}; 43 | size_t dest_core_offset{0UL}; 44 | int dest_axon_hw{0}; 45 | size_t dest_axon_id{0UL}; 46 | bool placeholder{true}; 47 | bool in_noc{false}; 48 | 49 | explicit Message(); 50 | explicit Message(long int id, const SpikingChip &hw, const MappedNeuron &n, 51 | long int timestep); 52 | explicit Message(long int id, const SpikingChip &hw, size_t axon_address, 53 | const MappedNeuron &n, long int timestep); 54 | Message(const Message ©) = default; 55 | Message(Message &&move) = default; 56 | ~Message() = default; 57 | Message& operator=(const Message ©) = default; 58 | Message& operator=(Message &&move) = default; 59 | }; 60 | 61 | } 62 | 63 | #endif -------------------------------------------------------------------------------- /src/netlist.hpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 - The University of Texas at Austin 2 | // This work was produced under contract #2317831 to National Technology and 3 | // Engineering Solutions of Sandia, LLC which is under contract 4 | // No. DE-NA0003525 with the U.S. Department of Energy. 5 | // netlist.hpp 6 | #ifndef NETLIST_HEADER_INCLUDED_ 7 | #define NETLIST_HEADER_INCLUDED_ 8 | 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | 20 | #include "attribute.hpp" 21 | #include "fwd.hpp" 22 | 23 | namespace sanafe 24 | { 25 | SpikingNetwork netlist_parse_file(std::ifstream &fp, Architecture &arch); 26 | void netlist_get_fields(std::vector &fields, const std::string &line); 27 | size_t field_to_int(const std::string_view &field); 28 | std::pair netlist_parse_neuron_field(const std::string_view &neuron_field); 29 | std::pair netlist_parse_core_field(const std::string_view &core_field); 30 | std::tuple netlist_parse_edge_field(const std::string_view &edge_field); 31 | std::tuple netlist_parse_mapping_field(const std::string_view &mapping_field); 32 | void netlist_read_network_entry(const std::vector &fields, Architecture &arch, SpikingNetwork &net, int line_number); 33 | 34 | std::string netlist_group_to_netlist(const NeuronGroup &group); 35 | std::string netlist_neuron_to_netlist(const Neuron &neuron, const SpikingNetwork &net, const std::map &group_name_to_id); 36 | std::string netlist_mapping_to_netlist(const Neuron &neuron, const std::map &group_name_to_id); 37 | std::string netlist_connection_to_netlist(const Connection &con, const std::map &group_name_to_id); 38 | std::string netlist_attributes_to_netlist(const std::map &model_attributes, const std::map &default_attributes); 39 | 40 | void netlist_read_group(const std::vector &fields, SpikingNetwork &net, int line_number); 41 | void netlist_read_neuron(const std::vector &fields, SpikingNetwork &net, int line_number); 42 | void netlist_read_edge(const std::vector &fields, SpikingNetwork &net, int line_number); 43 | void netlist_read_mapping(const std::vector &fields, Architecture &arch, SpikingNetwork &net, int line_number); 44 | 45 | std::variant> netlist_parse_attribute_value(std::string value_str); 46 | void netlist_parse_attribute_field(const std::string_view &field, std::map &attributes, int line_number); 47 | std::map netlist_parse_attributes(const std::vector &attribute_fields, int line_number); 48 | std::map netlist_parse_embedded_json(const std::vector &attribute_fields, int line_number); 49 | char netlist_get_closing_char(char opening_char); 50 | size_t netlist_embedded_json_end_pos(char opening_char, const std::string &all_fields, int line_number); 51 | 52 | void add_string_attribute_if_unique(std::string &entry, const std::string &attr_name, const std::string &neuron_value, const std::optional &group_default); 53 | void add_bool_attribute_if_unique(std::string &entry, const std::string &attr_name, bool neuron_value, const std::optional &group_default); 54 | 55 | template 56 | bool is_unique_attribute(const std::optional &group_default, const T &neuron_value, bool is_empty = false) 57 | { 58 | if (is_empty) 59 | { 60 | return false; 61 | } 62 | return !group_default.has_value() || 63 | (group_default.value() != neuron_value); 64 | } 65 | 66 | } 67 | 68 | #endif -------------------------------------------------------------------------------- /src/network.hpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 - The University of Texas at Austin 2 | // This work was produced under contract #2317831 to National Technology and 3 | // Engineering Solutions of Sandia, LLC which is under contract 4 | // No. DE-NA0003525 with the U.S. Department of Energy. 5 | // network.hpp - (spiking) neural network functionality. Spiking neural 6 | // networks are represented as groups of neurons. A neuron group might have a 7 | // bunch of neurons all with the same properties (and common hardware). 8 | // Each neuron has its own state and a set of connections to other neurons. 9 | // These structures have links to hardware for performance simulation. 10 | // Here we include different neuron, synapse and dendrite models. 11 | #ifndef NETWORK_HEADER_INCLUDED_ 12 | #define NETWORK_HEADER_INCLUDED_ 13 | 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | 24 | #include "arch.hpp" 25 | #include "fwd.hpp" 26 | 27 | namespace sanafe 28 | { 29 | 30 | struct NeuronConfiguration 31 | { 32 | std::map model_attributes; 33 | std::optional soma_hw_name; 34 | std::optional default_synapse_hw_name; 35 | std::optional dendrite_hw_name; 36 | std::optional log_spikes; 37 | std::optional log_potential; 38 | std::optional force_synapse_update; 39 | std::optional force_dendrite_update; 40 | std::optional force_soma_update; 41 | }; 42 | 43 | struct NeuronAddress 44 | { 45 | std::string group_name; 46 | std::optional neuron_offset{std::nullopt}; 47 | [[nodiscard]] std::string info() const; 48 | }; 49 | 50 | struct Conv2DParameters 51 | { 52 | int input_width{}; 53 | int input_height{}; 54 | int input_channels{}; 55 | int kernel_width{}; 56 | int kernel_height{}; 57 | int kernel_count{1}; 58 | int stride_width{1}; 59 | int stride_height{1}; 60 | }; 61 | 62 | struct Conv2DCoordinate 63 | { 64 | int channel; 65 | int y; 66 | int x; 67 | }; 68 | 69 | struct Conv2DOutputDimensions 70 | { 71 | int output_width; 72 | int output_height; 73 | int output_channels; 74 | size_t expected_input_size; 75 | size_t expected_output_size; 76 | }; 77 | 78 | struct Conv2DPosition 79 | { 80 | Conv2DCoordinate output_coordinate; 81 | int c_in; 82 | int y_filter; 83 | int x_filter; 84 | }; 85 | 86 | struct Conv2DIndices 87 | { 88 | int dest_idx; 89 | int source_idx; 90 | int filter_idx; 91 | }; 92 | 93 | class Neuron 94 | { 95 | public: 96 | std::vector edges_out; 97 | std::map model_attributes; 98 | std::string soma_hw_name; 99 | std::string default_synapse_hw_name; 100 | std::string dendrite_hw_name; 101 | std::string parent_group_name; 102 | std::reference_wrapper parent_net; 103 | size_t offset{}; 104 | std::optional core_address{std::nullopt}; 105 | size_t mapping_order{}; 106 | // Optionally set flags for updating and traces 107 | bool force_synapse_update{false}; 108 | bool force_dendrite_update{false}; 109 | bool force_soma_update{false}; 110 | bool log_spikes{false}; 111 | bool log_potential{false}; 112 | 113 | explicit Neuron(size_t neuron_offset, SpikingNetwork &net, std::string parent_group_name, const NeuronConfiguration &config); 114 | [[nodiscard]] size_t get_id() const { return offset; } 115 | size_t connect_to_neuron(Neuron &dest); 116 | void map_to_core(const CoreConfiguration &core); 117 | void set_attributes(const NeuronConfiguration &config); 118 | [[nodiscard]] std::string info() const; 119 | }; 120 | 121 | class NeuronGroup 122 | { 123 | public: 124 | // A neuron group is a collection of neurons that share common attributes 125 | std::vector neurons; 126 | NeuronConfiguration default_neuron_config; 127 | std::string name; 128 | 129 | [[nodiscard]] std::string get_name() const { return name; } 130 | explicit NeuronGroup(std::string group_name, SpikingNetwork &net, size_t neuron_count, const NeuronConfiguration &default_config); 131 | 132 | void connect_neurons_dense(NeuronGroup &dest_group, const std::map> &attribute_lists); 133 | void connect_neurons_sparse(NeuronGroup &dest_group, const std::map> &attribute_lists, const std::vector > &source_dest_id_pairs); 134 | void connect_neurons_conv2d(NeuronGroup &dest_group, const std::map> &attribute_lists, const Conv2DParameters &convolution); 135 | [[nodiscard]] std::string info() const; 136 | 137 | private: 138 | static Conv2DOutputDimensions conv2d_calculate_dimensions(const Conv2DParameters &convolution); 139 | void conv2d_validate_neuron_counts(const NeuronGroup &dest_group, const Conv2DOutputDimensions &dims) const; 140 | static Conv2DIndices conv2d_calculate_indices(const Conv2DParameters &convolution, const Conv2DOutputDimensions &dims, const Conv2DPosition &position); 141 | static bool conv2d_is_position_valid(int position, int max_size) noexcept; 142 | void conv2d_create_output_neuron_connections(NeuronGroup &dest_group, const std::map> &attribute_lists, const Conv2DParameters &convolution, const Conv2DOutputDimensions &dims, const Conv2DCoordinate &out); 143 | void conv2d_create_kernel_connections(Neuron &dest, const std::map> &attribute_lists, const Conv2DParameters &convolution, const Conv2DOutputDimensions &dims, const Conv2DCoordinate &out, int c_in); 144 | static void conv2d_create_and_configure_connection(Neuron &source, Neuron &dest, const std::map> &attribute_lists, int filter_idx); 145 | }; 146 | 147 | class SpikingNetwork 148 | { 149 | public: 150 | std::map groups; 151 | std::string name; 152 | 153 | explicit SpikingNetwork(std::string net_name = "") : name(std::move(net_name)) {}; 154 | ~SpikingNetwork() = default; 155 | SpikingNetwork(SpikingNetwork &&) = default; 156 | SpikingNetwork &operator=(SpikingNetwork &&) = default; 157 | // Do *NOT* allow Network objects to be copied 158 | // This is because Neuron objects link back to their parent Network 159 | // (and need to be aware of the parent NeuronGroup). Linking to parent 160 | // objects allows us to efficiently store Attributes for neurons i.e., 161 | // by avoiding duplication of shared attributes. 162 | // If the Network was moved or copied, all parent links in Neurons 163 | // would be invalid. 164 | SpikingNetwork(const SpikingNetwork &) = delete; 165 | SpikingNetwork &operator=(const SpikingNetwork &) = delete; 166 | 167 | NeuronGroup &create_neuron_group(std::string name, size_t neuron_count, 168 | const NeuronConfiguration &default_config); 169 | [[nodiscard]] std::string info() const; 170 | void save(const std::filesystem::path &path, 171 | bool use_netlist_format = false) const; 172 | 173 | size_t update_mapping_count(); 174 | private: 175 | void save_netlist(const std::filesystem::path &path) const; 176 | void save_groups_to_netlist(std::ofstream &out) const; 177 | void save_neurons_to_netlist(std::ofstream &out, const std::map &group_name_to_id) const; 178 | void save_mappings_to_netlist(std::ofstream &out, const std::map &group_name_to_id) const; 179 | [[nodiscard]] std::map 180 | create_group_name_to_id_mapping() const; 181 | 182 | void save_yaml(const std::filesystem::path &path) const; 183 | 184 | size_t mapping_count{0}; 185 | }; 186 | 187 | struct Connection 188 | { 189 | std::map synapse_attributes; 190 | std::map dendrite_attributes; 191 | std::string synapse_hw_name; 192 | NeuronAddress pre_neuron{}; 193 | NeuronAddress post_neuron{}; 194 | size_t id; 195 | 196 | Connection(size_t id) : id(id) {} 197 | [[nodiscard]] std::string info() const; 198 | }; 199 | 200 | SpikingNetwork load_net(const std::filesystem::path &path, Architecture &arch, bool use_netlist_format = false); 201 | 202 | } // namespace 203 | 204 | #endif 205 | -------------------------------------------------------------------------------- /src/plugins.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 - The University of Texas at Austin 2 | // This work was produced under contract #2317831 to National Technology and 3 | // Engineering Solutions of Sandia, LLC which is under contract 4 | // No. DE-NA0003525 with the U.S. Department of Energy. 5 | // plugins.cpp 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | #include 14 | 15 | #include "pipeline.hpp" 16 | #include "plugins.hpp" 17 | #include "print.hpp" 18 | 19 | using create_hw = sanafe::PipelineUnit *(); 20 | 21 | namespace // anonymous 22 | { 23 | // Manage the different plugins and their corresponding factory routines. For 24 | // now, use a couple of global maps (ignoring any clang lint warnings). 25 | // Probably not the cleanest or most modern, but it works and should be self- 26 | // contained in this file. 27 | std::map 28 | plugin_create_hw; // NOLINT(cppcoreguidelines-avoid-non-const-global-variables) 29 | 30 | // Use a unique_ptr with the custom deleter to automatically manage the library 31 | // handle 32 | using DlHandlePtr = std::unique_ptr; 33 | std::unordered_map 34 | plugin_handles; // NOLINT(cppcoreguidelines-avoid-non-const-global-variables) 35 | } 36 | 37 | void sanafe::DlHandleDeleter::operator()(void *handle) const 38 | { 39 | if (handle != nullptr) 40 | { 41 | dlclose(handle); 42 | } 43 | } 44 | 45 | void sanafe::plugin_init_hw( 46 | const std::string &model_name, const std::filesystem::path &plugin_path) 47 | { 48 | const std::string create = "create_" + model_name; 49 | 50 | // Load the soma library 51 | INFO("Loading plugin:%s\n", plugin_path.c_str()); 52 | void *hw = dlopen(plugin_path.c_str(), RTLD_LAZY | RTLD_GLOBAL); 53 | plugin_handles[model_name] = DlHandlePtr(hw); 54 | if (hw == nullptr) 55 | { 56 | INFO("Error: Couldn't load library %s\n", plugin_path.c_str()); 57 | throw std::runtime_error("Error: Could not load library.\n"); 58 | } 59 | 60 | // Reset DLL errors 61 | dlerror(); 62 | 63 | // Function to create an instance of the Soma class 64 | INFO("Loading function: %s\n", create.c_str()); 65 | // NOLINTBEGIN(cppcoreguidelines-pro-type-reinterpret-cast) 66 | auto *create_func = 67 | reinterpret_cast(dlsym(hw, create.c_str())); 68 | plugin_create_hw[model_name] = create_func; 69 | // NOLINTEND(cppcoreguidelines-pro-type-reinterpret-cast) 70 | 71 | const char *dlsym_error = dlerror(); 72 | if (dlsym_error != nullptr) 73 | { 74 | INFO("Error: Couldn't load symbol %s: %s\n", create.c_str(), 75 | dlsym_error); 76 | // This will also automatically close the library through its unique_ptr 77 | plugin_handles.erase(model_name); 78 | throw std::runtime_error("Error: Could not load symbol.\n"); 79 | } 80 | INFO("Loaded plugin symbols for %s.\n", model_name.c_str()); 81 | } 82 | 83 | std::shared_ptr sanafe::plugin_get_hw( 84 | const std::string &model_name, const std::filesystem::path &plugin_path) 85 | { 86 | if (plugin_path.empty()) 87 | { 88 | throw std::runtime_error("No plugin path given."); 89 | } 90 | 91 | TRACE1(PLUGINS, "Getting model:%s\n", model_name.c_str()); 92 | if (plugin_create_hw.count(model_name) == 0) 93 | { 94 | plugin_init_hw(model_name, plugin_path); 95 | } 96 | 97 | return std::shared_ptr(plugin_create_hw[model_name]()); 98 | } 99 | -------------------------------------------------------------------------------- /src/plugins.hpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 - The University of Texas at Austin 2 | // This work was produced under contract #2317831 to National Technology and 3 | // Engineering Solutions of Sandia, LLC which is under contract 4 | // No. DE-NA0003525 with the U.S. Department of Energy. 5 | // plugins.hpp 6 | #ifndef PLUGINS_HEADER_INCLUDED_ 7 | #define PLUGINS_HEADER_INCLUDED_ 8 | #include 9 | #include 10 | #include 11 | 12 | #include "fwd.hpp" 13 | 14 | namespace sanafe 15 | { 16 | struct DlHandleDeleter 17 | { 18 | void operator()(void *handle) const; 19 | }; 20 | void plugin_init_hw(const std::string &model_name, const std::filesystem::path &plugin_path); 21 | std::shared_ptr plugin_get_hw(const std::string &model_name, const std::filesystem::path &plugin_path); 22 | } 23 | 24 | #endif 25 | -------------------------------------------------------------------------------- /src/print.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 - The University of Texas at Austin 2 | // This work was produced under contract #2317831 to National Technology and 3 | // Engineering Solutions of Sandia, LLC which is under contract 4 | // No. DE-NA0003525 with the U.S. Department of Energy. 5 | // print.cpp 6 | #include 7 | #include 8 | #include 9 | 10 | #include "print.hpp" 11 | 12 | std::string sanafe::print_format_attributes( 13 | const std::map &attr) 14 | { 15 | std::string attr_str; 16 | 17 | for (const auto &a : attr) 18 | { 19 | const std::string &key = a.first; 20 | const auto value_str = std::any_cast(a.second); 21 | 22 | attr_str += " "; 23 | attr_str += key; 24 | attr_str += "="; 25 | attr_str += value_str; 26 | } 27 | return attr_str; 28 | } 29 | -------------------------------------------------------------------------------- /src/print.hpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 - The University of Texas at Austin 2 | // This work was produced under contract #2317831 to National Technology and 3 | // Engineering Solutions of Sandia, LLC which is under contract 4 | // No. DE-NA0003525 with the U.S. Department of Energy. 5 | // print.hpp 6 | #ifndef PRINT_HEADER_INCLUDED_ 7 | #define PRINT_HEADER_INCLUDED_ 8 | 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | namespace sanafe 15 | { 16 | std::string print_format_attributes(const std::map &attr); 17 | } 18 | 19 | // DEBUG and TRACE print with source annotations, TRACE levels offer more 20 | // detailed and verbose output for debugging. If you want to add a new category 21 | // for tracing you must follow the following steps: 22 | // 1) Below, add your new category macro e.g., #define DEBUG_CATEGORY_FOO 23 | // 2) In CMakeLists.txt, set the default debug level for the new category 24 | // 3) In CMakeLists.txt, add the new category to the validation loop, which 25 | // adds a compiler flag for each category i.e., add your category inside 26 | // foreach(category ) 27 | 28 | // Define categories 29 | // NOLINTBEGIN 30 | #define DEBUG_CATEGORY_CHIP 0 31 | #define DEBUG_CATEGORY_ARCH 1 32 | #define DEBUG_CATEGORY_NET 2 33 | #define DEBUG_CATEGORY_PYMODULE 3 34 | #define DEBUG_CATEGORY_DESCRIPTION 4 35 | #define DEBUG_CATEGORY_MODELS 5 36 | #define DEBUG_CATEGORY_SCHEDULER 6 37 | #define DEBUG_CATEGORY_PLUGINS 7 38 | 39 | // Default debug levels; CMake can override these defaults later 40 | #ifndef DEBUG_LEVEL_ARCH 41 | #define DEBUG_LEVEL_ARCH 0 42 | #endif 43 | #ifndef DEBUG_LEVEL_NET 44 | #define DEBUG_LEVEL_NET 0 45 | #endif 46 | #ifndef DEBUG_LEVEL_PYMODULE 47 | #define DEBUG_LEVEL_PYMODULE 0 48 | #endif 49 | #ifndef DEBUG_LEVEL_DESCRIPTION 50 | #define DEBUG_LEVEL_DESCRIPTION 0 51 | #endif 52 | #ifndef DEBUG_LEVEL_MODELS 53 | #define DEBUG_LEVEL_MODELS 0 54 | #endif 55 | #ifndef DEBUG_LEVEL_PLUGINS 56 | #define DEBUG_LEVEL_PLUGINS 0 57 | #endif 58 | #ifndef DEBUG_LEVEL_SCHEDULER 59 | #define DEBUG_LEVEL_SCHEDULER 0 60 | #endif 61 | #ifndef DEBUG_LEVEL_CHIP 62 | #define DEBUG_LEVEL_CHIP 0 63 | #endif 64 | // NOLINTEND 65 | 66 | // Conditional source info based on build configuration 67 | //NOLINTBEGIN 68 | #ifdef ENABLE_SOURCE_INFO 69 | #define SOURCE_INFO() fprintf(stdout, "[%s:%d:%s()] ", __FILE__, __LINE__, __func__) 70 | #else 71 | #define SOURCE_INFO() do {} while (0) 72 | #endif 73 | // NOLINTEND 74 | 75 | //NOLINTBEGIN 76 | #ifdef ENABLE_DEBUG_PRINTS 77 | #define INFO(...) do { \ 78 | SOURCE_INFO(); \ 79 | fprintf(stdout, __VA_ARGS__); \ 80 | } while (0) 81 | 82 | #define TRACE1(category, ...) do { \ 83 | if (DEBUG_LEVEL_##category >= 1) { \ 84 | SOURCE_INFO(); \ 85 | fprintf(stdout, __VA_ARGS__); \ 86 | } \ 87 | } while (0) 88 | 89 | #define TRACE2(category, ...) do { \ 90 | if (DEBUG_LEVEL_##category >= 2) { \ 91 | SOURCE_INFO(); \ 92 | fprintf(stdout, __VA_ARGS__); \ 93 | } \ 94 | } while (0) 95 | 96 | #define TRACE3(category, ...) do { \ 97 | if (DEBUG_LEVEL_##category >= 3) { \ 98 | SOURCE_INFO(); \ 99 | fprintf(stdout, __VA_ARGS__); \ 100 | } \ 101 | } while (0) 102 | #else 103 | // No-op versions for stripped builds 104 | #define INFO(...) do {} while (0) 105 | #define TRACE1(category, ...) do {} while (0) 106 | #define TRACE2(category, ...) do {} while (0) 107 | #define TRACE3(category, ...) do {} while (0) 108 | #endif 109 | // NOLINTEND 110 | 111 | #endif 112 | -------------------------------------------------------------------------------- /src/tile.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 - The University of Texas at Austin 2 | // This work was produced under contract #2317831 to National Technology and 3 | // Engineering Solutions of Sandia, LLC which is under contract 4 | // No. DE-NA0003525 with the U.S. Department of Energy. 5 | #include 6 | 7 | #include "arch.hpp" 8 | #include "tile.hpp" 9 | 10 | sanafe::Tile::Tile(const TileConfiguration &config) 11 | : name(config.name) 12 | , energy_north_hop(config.power_metrics.energy_north_hop) 13 | , latency_north_hop(config.power_metrics.latency_north_hop) 14 | , energy_east_hop(config.power_metrics.energy_east_hop) 15 | , latency_east_hop(config.power_metrics.latency_east_hop) 16 | , energy_south_hop(config.power_metrics.energy_south_hop) 17 | , latency_south_hop(config.power_metrics.latency_south_hop) 18 | , energy_west_hop(config.power_metrics.energy_west_hop) 19 | , latency_west_hop(config.power_metrics.latency_west_hop) 20 | , id(config.id) 21 | , x(config.x) 22 | , y(config.y) 23 | , log_energy(config.power_metrics.log_energy) 24 | , log_latency(config.power_metrics.log_latency) 25 | { 26 | } 27 | 28 | std::string sanafe::Tile::info() const 29 | { 30 | std::ostringstream ss; 31 | ss << "sanafe::Tile(tile=" << id << " cores="; 32 | ss << cores.size() << ")"; 33 | 34 | return ss.str(); 35 | } 36 | -------------------------------------------------------------------------------- /src/tile.hpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 - The University of Texas at Austin 2 | // This work was produced under contract #2317831 to National Technology and 3 | // Engineering Solutions of Sandia, LLC which is under contract 4 | // No. DE-NA0003525 with the U.S. Department of Energy. 5 | // tile.hpp 6 | #ifndef TILE_HEADER_INCLUDED_ 7 | #define TILE_HEADER_INCLUDED_ 8 | 9 | #include 10 | #include 11 | #include 12 | 13 | #include "core.hpp" 14 | #include "fwd.hpp" 15 | 16 | namespace sanafe 17 | { 18 | class Tile 19 | { 20 | public: 21 | std::vector cores; 22 | std::string name; 23 | double energy{0.0}; 24 | double energy_north_hop; 25 | double latency_north_hop; 26 | double energy_east_hop; 27 | double latency_east_hop; 28 | double energy_south_hop; 29 | double latency_south_hop; 30 | double energy_west_hop; 31 | double latency_west_hop; 32 | size_t hops{0UL}; 33 | long int messages_received{0L}; 34 | long int total_neurons_fired{0L}; 35 | size_t north_hops{0UL}; 36 | size_t east_hops{0UL}; 37 | size_t south_hops{0UL}; 38 | size_t west_hops{0UL}; 39 | size_t id; 40 | size_t x{0}; 41 | size_t y{0}; 42 | bool log_energy{false}; 43 | bool log_latency{false}; 44 | 45 | explicit Tile(const TileConfiguration &config); 46 | [[nodiscard]] size_t get_id() const { return id; } 47 | [[nodiscard]] std::string info() const; 48 | }; 49 | } 50 | 51 | #endif 52 | -------------------------------------------------------------------------------- /src/utils.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "utils.hpp" 5 | 6 | double sanafe::calculate_elapsed_time( 7 | const std::chrono::time_point 8 | &ts_start, 9 | const std::chrono::time_point 10 | &ts_end) 11 | { 12 | // Calculate elapsed wall-clock time between ts_start and ts_end 13 | const auto chrono_elapsed = ts_end - ts_start; 14 | const long int cycles_elapsed = 15 | std::chrono::duration_cast(chrono_elapsed) 16 | .count(); 17 | constexpr double seconds_in_nanoseconds = 1.0e-9; 18 | const double time_elapsed = 19 | static_cast(cycles_elapsed) * seconds_in_nanoseconds; 20 | 21 | return time_elapsed; 22 | } 23 | 24 | size_t sanafe::abs_diff(const size_t a, const size_t b) 25 | { 26 | // Returns the absolute difference between two unsigned (size_t) values 27 | return (a > b) ? (a - b) : (b - a); 28 | } 29 | -------------------------------------------------------------------------------- /src/utils.hpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 - The University of Texas at Austin 2 | // This work was produced under contract #2317831 to National Technology and 3 | // Engineering Solutions of Sandia, LLC which is under contract 4 | // No. DE-NA0003525 with the U.S. Department of Energy. 5 | // utils.hpp 6 | #ifndef UTILS_HEADER_INCLUDED_ 7 | #define UTILS_HEADER_INCLUDED_ 8 | 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | namespace sanafe 15 | { 16 | double calculate_elapsed_time(const std::chrono::time_point &ts_start, const std::chrono::time_point &ts_end); 17 | size_t abs_diff(size_t a, size_t b); 18 | 19 | template struct LookupTable 20 | { 21 | std::map values; 22 | 23 | T get(const int x) const 24 | { 25 | return get(static_cast(x)); 26 | } 27 | 28 | T get(const size_t x) const 29 | { 30 | if (values.empty()) 31 | { 32 | throw std::runtime_error("Table is empty"); 33 | } 34 | 35 | // Find the largest key that is <= x 36 | auto it = values.upper_bound(x); 37 | if (it == values.begin()) 38 | { 39 | // x is smaller than all keys, return first value 40 | return values.begin()->second; 41 | } 42 | --it; // Move to the largest key <= x 43 | return it->second; 44 | } 45 | }; 46 | } 47 | 48 | #endif -------------------------------------------------------------------------------- /src/yaml_arch.hpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 - The University of Texas at Austin 2 | // This work was produced under contract #2317831 to National Technology and 3 | // Engineering Solutions of Sandia, LLC which is under contract 4 | // No. DE-NA0003525 with the U.S. Department of Energy. 5 | #ifndef YAML_ARCH_HEADER_INCLUDED_ 6 | #define YAML_ARCH_HEADER_INCLUDED_ 7 | 8 | 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | #include // NOLINT(misc-include-cleaner) 16 | #include // NOLINT(misc-include-cleaner) 17 | #include 18 | #include 19 | 20 | #include "arch.hpp" 21 | #include "fwd.hpp" 22 | 23 | namespace sanafe 24 | { 25 | 26 | struct PipelineUnitSectionInfo 27 | { 28 | std::string_view name; 29 | std::function parsing_function; 30 | }; 31 | 32 | // Architecture description 33 | Architecture description_parse_arch_file_yaml(std::ifstream &fp); 34 | Architecture description_parse_arch_section_yaml(const ryml::Parser &parser, ryml::ConstNodeRef arch_node); 35 | 36 | void description_parse_tile_section_yaml(const ryml::Parser &parser, ryml::ConstNodeRef tile_node, Architecture &arch); 37 | void description_parse_core_section_yaml(const ryml::Parser &parser, ryml::ConstNodeRef core_node, size_t parent_tile_id, Architecture &arch); 38 | void description_parse_core_yaml(const ryml::Parser &parser, ryml::ConstNodeRef core_node, size_t parent_tile_id, Architecture &arch, const std::string_view &name); 39 | 40 | void yaml_parse_axon_in(const ryml::Parser &parser, ryml::ConstNodeRef axon_in_node, CoreConfiguration &parent_core, const std::string_view & /*type*/, const std::string &name); 41 | void yaml_parse_axon_out(const ryml::Parser &parser, ryml::ConstNodeRef axon_out_node, CoreConfiguration &parent_core, const std::string_view & /*type*/, const std::string &name); 42 | void yaml_parse_processing_unit(const ryml::Parser &parser, ryml::ConstNodeRef synapse_node, CoreConfiguration &parent_core, const std::string_view &type, const std::string &name); 43 | 44 | AxonInPowerMetrics yaml_parse_axon_in_attributes(const ryml::Parser &parser, ryml::ConstNodeRef attributes); 45 | AxonOutPowerMetrics yaml_parse_axon_out_attributes(const ryml::Parser &parser, ryml::ConstNodeRef attributes); 46 | ModelInfo yaml_parse_processing_unit_attributes(const ryml::Parser &parser, const ryml::ConstNodeRef &attributes); 47 | 48 | void yaml_merge_or_create_hardware_unit(CoreConfiguration &parent_core, const std::string &name, ModelInfo &model_details, const std::string_view §ion); 49 | void yaml_set_implements_flag(PipelineUnitConfiguration &hw, const std::string_view §ion); 50 | 51 | template 52 | void yaml_parse_pipeline_entry(const ryml::Parser &parser, const ryml::ConstNodeRef &unit_node, CoreConfiguration &parent_core, const std::string_view &type, ParseFunc parsing_function); 53 | 54 | //void description_parse_synapse_section_yaml(const ryml::Parser &parser, ryml::ConstNodeRef synapse_node, CoreConfiguration &parent_core); 55 | //ModelInfo description_parse_synapse_attributes_yaml(const ryml::Parser &parser, const ryml::ConstNodeRef attributes); 56 | //void description_parse_dendrite_section_yaml(const ryml::Parser &parser, ryml::ConstNodeRef dendrite_node, CoreConfiguration &parent_core); 57 | //void description_parse_dendrite_yaml(const ryml::Parser &parser, ryml::ConstNodeRef dendrite_node, CoreConfiguration &parent_core, std::string name); 58 | //sanafe::ModelInfo description_parse_dendrite_attributes_yaml(const ryml::Parser &parser, const ryml::ConstNodeRef attributes); 59 | //void description_parse_soma_section_yaml(const ryml::Parser &parser, ryml::ConstNodeRef soma_node, CoreConfiguration &parent_core); 60 | 61 | CorePipelineConfiguration description_parse_core_pipeline_yaml(const ryml::Parser &parser, ryml::ConstNodeRef attributes); 62 | TilePowerMetrics description_parse_tile_metrics_yaml(const ryml::Parser &parser, ryml::ConstNodeRef attributes); 63 | NetworkOnChipConfiguration description_parse_noc_configuration_yaml(const ryml::Parser &parser, ryml::ConstNodeRef noc_attributes); 64 | LookupTable yaml_parse_sync_delay_table(const ryml::Parser &parser, const ryml::ConstNodeRef &noc_attributes, const std::string &model_type); 65 | } 66 | 67 | #endif -------------------------------------------------------------------------------- /src/yaml_common.hpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 - The University of Texas at Austin 2 | // This work was produced under contract #2317831 to National Technology and 3 | // Engineering Solutions of Sandia, LLC which is under contract 4 | // No. DE-NA0003525 with the U.S. Department of Energy. 5 | #ifndef YAML_COMMON_HEADER_INCLUDED_ 6 | #define YAML_COMMON_HEADER_INCLUDED_ 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | 18 | #include // NOLINT(misc-include-cleaner) 19 | #include 20 | #include 21 | 22 | #include "fwd.hpp" 23 | 24 | namespace sanafe 25 | { 26 | class YamlDescriptionParsingError : public std::invalid_argument 27 | { 28 | // Custom exception class for architecture and network description parsing 29 | public: 30 | YamlDescriptionParsingError(const std::string &error, const ryml::Parser &parser, const ryml::ConstNodeRef &node); 31 | [[nodiscard]] const char *what() const noexcept override; 32 | private: 33 | std::string message; 34 | }; 35 | 36 | std::string yaml_get_type_string(const std::type_info &type); 37 | 38 | template 39 | T yaml_required_field(const ryml::Parser &parser, 40 | const ryml::ConstNodeRef node, const std::string &key) 41 | { 42 | // Wrapper around YAML library for field=map[key], adding more error prints 43 | if (node.invalid()) 44 | { 45 | const std::string message = "Invalid node when looking up key: " + key; 46 | throw std::runtime_error(message); 47 | } 48 | const ryml::ConstNodeRef field_node = node.find_child(key.c_str()); 49 | if (field_node.invalid()) 50 | { 51 | const std::string message = "Key '" + key + "' does not exist"; 52 | throw YamlDescriptionParsingError(message, parser, node); 53 | } 54 | if (!field_node.has_val()) 55 | { 56 | const std::string message = "'" + key + "' value should be a scalar"; 57 | throw YamlDescriptionParsingError(message, parser, field_node); 58 | } 59 | 60 | T field{}; 61 | // Efficiently convert to type T by trying the RapidYAML reader. 62 | // If read() fails, it returns false and execution falls through 63 | if (c4::yml::read(field_node, &field)) 64 | { 65 | return field; // type T 66 | } 67 | 68 | const std::string message = "Could not cast field '" + key + 69 | "' to type: " + yaml_get_type_string(typeid(field)); 70 | throw YamlDescriptionParsingError(message, parser, field_node); 71 | } 72 | 73 | template 74 | std::optional yaml_optional_field( 75 | ryml::ConstNodeRef node, const std::string &key) 76 | { 77 | // A terser way of getting optionally set values from YAML 78 | std::optional optional_value; 79 | if (!(node.find_child(key.c_str()).invalid())) 80 | { 81 | // A few redundant steps needed so that RapidYAML doesn't complain 82 | T value; 83 | node[key.c_str()] >> value; 84 | optional_value = value; 85 | } 86 | 87 | return optional_value; 88 | } 89 | 90 | // Helper functions 91 | std::map description_parse_model_attributes_yaml(const ryml::Parser &parser, ryml::ConstNodeRef attributes_node); 92 | ModelAttribute yaml_parse_attribute(const ryml::Parser &parser, ryml::ConstNodeRef attribute_node); 93 | std::vector yaml_parse_attribute_list(const ryml::Parser &parser, ryml::ConstNodeRef attribute_node); 94 | std::vector yaml_parse_attribute_map(const ryml::Parser &parser, ryml::ConstNodeRef attribute_node); 95 | std::variant> 96 | yaml_parse_attribute_scalar(ryml::ConstNodeRef attribute_node); 97 | void check_key(const ryml::Parser &parser, ryml::ConstNodeRef node, const std::string &key); 98 | ryml::NodeRef description_serialize_variant_value_to_yaml(ryml::NodeRef node, const std::variant> &value); 99 | std::pair yaml_parse_range(const std::string &range_str); 100 | } 101 | 102 | #endif 103 | -------------------------------------------------------------------------------- /src/yaml_snn.hpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 - The University of Texas at Austin 2 | // This work was produced under contract #2317831 to National Technology and 3 | // Engineering Solutions of Sandia, LLC which is under contract 4 | // No. DE-NA0003525 with the U.S. Department of Energy. 5 | #ifndef YAML_SNN_HEADER_INCLUDED_ 6 | #define YAML_SNN_HEADER_INCLUDED_ 7 | 8 | #include "c4/yml/fwd.hpp" 9 | #include "c4/yml/node.hpp" 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | 21 | #include // NOLINT(misc-include-cleaner) 22 | #include // NOLINT(misc-include-cleaner) 23 | 24 | #include "fwd.hpp" 25 | #include "network.hpp" 26 | 27 | namespace sanafe 28 | { 29 | // YAML network description 30 | SpikingNetwork yaml_parse_network_file(std::ifstream &fp, Architecture &arch); 31 | SpikingNetwork yaml_parse_network_section(const ryml::Parser &parser, ryml::ConstNodeRef net_node); 32 | void yaml_parse_neuron_group_section(const ryml::Parser &parser, ryml::ConstNodeRef groups_node, SpikingNetwork &net); 33 | size_t description_count_neurons(const ryml::Parser &parser, ryml::ConstNodeRef neuron_node); 34 | void yaml_parse_edges_section_yaml(const ryml::Parser &parser, ryml::ConstNodeRef edges_node, SpikingNetwork &net); 35 | void yaml_parse_group(const ryml::Parser &parser, ryml::ConstNodeRef neuron_group_node, SpikingNetwork &net); 36 | void yaml_parse_neuron_section(const ryml::Parser &parser, ryml::ConstNodeRef neuron_node, NeuronGroup &neuron_group); 37 | // Neuron to Hardware Mapping 38 | void description_parse_mapping_section_yaml(const ryml::Parser &parser, 39 | ryml::ConstNodeRef mappings_node, Architecture &arch, 40 | SpikingNetwork &net); 41 | void description_parse_mapping(const ryml::Parser &parser, 42 | ryml::ConstNodeRef mapping_info, Architecture &arch, 43 | SpikingNetwork &net); 44 | void description_parse_mapping_info(const ryml::Parser &parser, 45 | ryml::ConstNodeRef info, Neuron &n, std::string &core_name); 46 | void description_parse_neuron(const std::string &id, const ryml::Parser &parser, 47 | ryml::ConstNodeRef attributes, NeuronGroup &neuron_group); 48 | NeuronConfiguration yaml_parse_neuron_attributes(const ryml::Parser &parser, 49 | ryml::ConstNodeRef attributes, 50 | const NeuronConfiguration &default_template = NeuronConfiguration()); 51 | void description_parse_edge(const std::string &description, 52 | const ryml::Parser &parser, ryml::ConstNodeRef attributes_node, 53 | SpikingNetwork &network); 54 | void description_parse_neuron_connection(const NeuronAddress &source_address, 55 | const NeuronAddress &target_address, const ryml::Parser &parser, 56 | ryml::ConstNodeRef attributes_node, SpikingNetwork &net); 57 | 58 | void description_parse_hyperedge(const NeuronAddress &source_address, 59 | const NeuronAddress &target_address, const ryml::Parser &parser, 60 | ryml::ConstNodeRef hyperedge_node, SpikingNetwork &net); 61 | void yaml_parse_conv2d(NeuronGroup &source_group, const ryml::Parser &parser, 62 | ryml::ConstNodeRef hyperedge_node, NeuronGroup &target_group); 63 | void yaml_parse_sparse(NeuronGroup &source_group, const ryml::Parser &parser, 64 | ryml::ConstNodeRef hyperedge_node, NeuronGroup &target_group); 65 | void yaml_parse_dense(NeuronGroup &source_group, const ryml::Parser &parser, 66 | ryml::ConstNodeRef hyperedge_node, NeuronGroup &target_group); 67 | bool yaml_parse_conv2d_attributes(ryml::ConstNodeRef attribute, Conv2DParameters &convolution); 68 | void yaml_parse_unit_specific_attributes(const ryml::Parser &parser, ryml::ConstNodeRef parent_node, std::map> &attribute_lists); 69 | 70 | void description_parse_edge_attributes(Connection &edge, 71 | const ryml::Parser &parser, ryml::ConstNodeRef attributes_node); 72 | std::tuple description_parse_edge_description(const std::string_view &description); 73 | 74 | // Functions for writing YAML 75 | void yaml_write_network( 76 | std::filesystem::path path, const sanafe::SpikingNetwork &network); 77 | void yaml_write_mappings_file( 78 | std::filesystem::path path, const SpikingNetwork &network); 79 | void yaml_create_mappings(ryml::NodeRef &node, std::vector> &all_neurons, std::list &strings); 80 | c4::yml::NodeRef yaml_serialize_network(c4::yml::NodeRef root, const sanafe::SpikingNetwork &network, std::list &strings); 81 | c4::yml::NodeRef yaml_serialize_neuron_group(c4::yml::NodeRef parent, const NeuronGroup &group, std::list &strings); 82 | ryml::NodeRef yaml_serialize_neuron_run(ryml::NodeRef neurons_node, const std::tuple &neuron_run,const NeuronGroup &group, std::list &strings); 83 | c4::yml::NodeRef yaml_serialize_model_attributes(const std::map &default_values, c4::yml::NodeRef parent, const std::map &attributes); 84 | 85 | std::string write_edge_format(const Connection &connection); 86 | 87 | } 88 | 89 | #endif -------------------------------------------------------------------------------- /tutorial/arch.yaml: -------------------------------------------------------------------------------- 1 | ## arch.yaml 2 | # Exercise 1) Set the costs of a neuron update in the soma from 0.0 ns to 2.0 ns and from 0.0 pJ to 2.0 pJ. Note that ns=e-9 and pJ=e-12. 3 | # Exercise 2) Duplicate tiles 2 times and each core 4 times within every tile (8 cores total). (For a hint look at files/loihi.yaml) 4 | # Exercise 3) Define an additional synapse unit for compressed synapses. The energy and latency costs for reading a compressed synapse is 0.5 pJ and 2 ns 5 | # Again, for a hint in how to define multiple hardware units, look at tutorial/loihi.yaml. This has three different synapse units defined. 6 | architecture: 7 | name: tutorial 8 | attributes: 9 | link_buffer_size: 1 10 | width: 2 11 | height: 1 12 | tile: 13 | - name: tutorial_tile 14 | attributes: 15 | energy_north_hop: 1.0e-12 16 | latency_north_hop: 1.0e-9 17 | energy_east_hop: 1.0e-12 18 | latency_east_hop: 1.0e-9 19 | energy_south_hop: 1.0e-12 20 | latency_south_hop: 1.0e-9 21 | energy_west_hop: 1.0e-12 22 | latency_west_hop: 1.0e-9 23 | core: 24 | - name: tutorial_core 25 | attributes: 26 | buffer_position: soma 27 | max_neurons_supported: 64 28 | axon_in: 29 | - name: tutorial_axon_in 30 | attributes: 31 | energy_message_in: 0.0 32 | latency_message_in: 0.0 33 | synapse: 34 | - name: tutorial_synapse_uncompressed 35 | attributes: 36 | model: current_based 37 | energy_process_spike: 1.0e-12 38 | latency_process_spike: 1.0e-9 39 | dendrite: 40 | - name: demo_dendrite 41 | attributes: 42 | model: accumulator 43 | energy_update: 0.0 44 | latency_update: 0.0 45 | soma: 46 | - name: tutorial_soma 47 | attributes: 48 | model: leaky_integrate_fire 49 | energy_access_neuron: 1.0e-12 50 | latency_access_neuron: 1.0e-9 51 | energy_update_neuron: 0.0e-12 52 | latency_update_neuron: 0.0e-9 53 | energy_spike_out: 3.0e-12 54 | latency_spike_out: 3.0e-9 55 | axon_out: 56 | - name: tutorial_axon_out 57 | attributes: 58 | energy_message_out: 4.0e-12 59 | latency_message_out: 4.0e-9 60 | -------------------------------------------------------------------------------- /tutorial/snn.yaml: -------------------------------------------------------------------------------- 1 | ## snn.net 2 | # Exercise 1. Define a new mapped neuron: 1.1. To do this, add another neuron to group 1 with no attributes (leave the attribute list empty), and map neuron 1.1 to core 0.1 3 | # Exercise 2. Add edges from neurons 0.0 and 0.1, both to neuron 1.1, with weights -2 & 3 respectively 4 | # Exercise 3. Set the bias of neuron 0.1 to 0.5 5 | # Exercise 4. Configure Group 1 to use the new compressed synapses that you defined H/W for in architecture description, instead of the uncompressed synapses used currently 6 | network: 7 | name: snn 8 | groups: 9 | - name: 0 10 | attributes: [threshold: 1.0, reset: 0.0, log_spikes: True, log_potential: True] 11 | neurons: 12 | - 0: [bias: 0.2] 13 | - 1: [] 14 | - name: 1 15 | attributes: [threshold: 2.0, reset: 0.0, synapse_hw_name: tutorial_synapse_uncompressed] 16 | neurons: 17 | - 0: [] 18 | edges: 19 | - 0.0 -> 1.0: [weight: -1.0] 20 | 21 | mappings: 22 | - 0.0: [core: 0.0] 23 | - 0.1: [core: 0.0] 24 | - 1.0: [core: 0.0] -------------------------------------------------------------------------------- /tutorial/tutorial_0_intro.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# SANA-FE Tutorial #\n", 8 | "\n", 9 | " \"Open" 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": null, 15 | "metadata": {}, 16 | "outputs": [], 17 | "source": [ 18 | "%pip install sanafe==2.0.20\n", 19 | "%pip install pyyaml\n", 20 | "!wget -nc https://raw.githubusercontent.com/SLAM-Lab/SANA-FE/cpp/tutorial/arch.yaml\n", 21 | "!wget -nc https://raw.githubusercontent.com/SLAM-Lab/SANA-FE/cpp/tutorial/snn.yaml\n", 22 | "import sanafe" 23 | ] 24 | }, 25 | { 26 | "cell_type": "markdown", 27 | "metadata": {}, 28 | "source": [ 29 | "### Running SANA-FE for the first time ###\n", 30 | "Run SANA-FE for the first time using a minimal network and architecture we have provided. This will load the architecture, SNN and launch a short simulation. After the simulation has finished, a Dict summary of the simulated results is returned back and printed. As part of this tutorial, we will extend the SNN and architecture and will look at the hardware insights you can get from SANA-FE." 31 | ] 32 | }, 33 | { 34 | "cell_type": "code", 35 | "execution_count": null, 36 | "metadata": {}, 37 | "outputs": [], 38 | "source": [ 39 | "# First, load an architecture description and build the chip (for simulation)\n", 40 | "first_arch = sanafe.load_arch(\"arch.yaml\")\n", 41 | "\n", 42 | "# Second, load an SNN from file and upload it to the chip. We pass the\n", 43 | "# architecture as an extra argument so that SANA-FE can check the mappings are\n", 44 | "# to valid cores\n", 45 | "hello_snn = sanafe.load_net(\"snn.yaml\", first_arch)\n", 46 | "\n", 47 | "# Third, create the spiking chip and upload the SNN to it\n", 48 | "first_chip = sanafe.SpikingChip(first_arch)\n", 49 | "first_chip.load(hello_snn)\n", 50 | "\n", 51 | "# Fourth and finally, simulate the programmed chip for 1000 simulated timesteps\n", 52 | "results = first_chip.sim(1000)" 53 | ] 54 | }, 55 | { 56 | "cell_type": "code", 57 | "execution_count": null, 58 | "metadata": {}, 59 | "outputs": [], 60 | "source": [ 61 | "# Print a summary of the simulation\n", 62 | "import yaml\n", 63 | "print(f\"Run results:\\n{yaml.dump(results)}\")" 64 | ] 65 | } 66 | ], 67 | "metadata": { 68 | "kernelspec": { 69 | "display_name": "neuro", 70 | "language": "python", 71 | "name": "python3" 72 | }, 73 | "language_info": { 74 | "codemirror_mode": { 75 | "name": "ipython", 76 | "version": 3 77 | }, 78 | "file_extension": ".py", 79 | "mimetype": "text/x-python", 80 | "name": "python", 81 | "nbconvert_exporter": "python", 82 | "pygments_lexer": "ipython3", 83 | "version": "3.8.20" 84 | } 85 | }, 86 | "nbformat": 4, 87 | "nbformat_minor": 2 88 | } 89 | -------------------------------------------------------------------------------- /tutorial/tutorial_1_architecture.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "cbecc269", 6 | "metadata": {}, 7 | "source": [ 8 | "# Architecture Description File #\n", 9 | "\n", 10 | "\n", 11 | " \"Open" 12 | ] 13 | }, 14 | { 15 | "cell_type": "markdown", 16 | "id": "2af44d97", 17 | "metadata": {}, 18 | "source": [ 19 | "\n", 20 | "In SANA-FE, you can describe different spiking architectures using a custom YAML-based architecture description format. The file `arch.yaml` contains a minimal example architecture based on the diagram below. Follow the three exercises at the top of the `arch.yaml` file to extend the architecture. These new elements are shown in dashed orange below.\n", 21 | "\n", 22 | "![Example architecture](example_arch.svg)\n", 23 | "\n", 24 | "#### Exercises ####\n", 25 | "1. Set the costs of a neuron update in the soma from 0.0 ns to 2.0 ns and from 0.0 pJ to 2.0 pJ. Note that ns=e-9 and pJ=e-12.\n", 26 | "2. Duplicate tiles 2 times and each core 4 times within every tile (8 cores total). (For a hint look at files/loihi.yaml)\n", 27 | "3. Define an additional synapse unit for compressed synapses. The energy and latency costs for reading a compressed synapse is 0.5 pJ and 2 ns. For a hint in how to define multiple hardware units, look at tutorial/loihi.yaml. This has three different synapse units defined.\n", 28 | "\n", 29 | "After completing the exercises, run the next cell to see if all the checks pass!" 30 | ] 31 | }, 32 | { 33 | "cell_type": "code", 34 | "execution_count": null, 35 | "id": "d5e8aedb", 36 | "metadata": {}, 37 | "outputs": [], 38 | "source": [ 39 | "%pip install sanafe==2.0.20\n", 40 | "%pip install pyyaml\n", 41 | "!wget -nc https://raw.githubusercontent.com/SLAM-Lab/SANA-FE/main/tutorial/arch.yaml\n", 42 | "!wget -nc https://raw.githubusercontent.com/SLAM-Lab/SANA-FE/main/tutorial/snn.yaml\n", 43 | "import sanafe" 44 | ] 45 | }, 46 | { 47 | "cell_type": "code", 48 | "execution_count": null, 49 | "id": "164d045d", 50 | "metadata": {}, 51 | "outputs": [], 52 | "source": [ 53 | "import sanafe.tutorial\n", 54 | "sanafe.tutorial.check_arch(\"arch.yaml\")" 55 | ] 56 | } 57 | ], 58 | "metadata": { 59 | "kernelspec": { 60 | "display_name": "neuro", 61 | "language": "python", 62 | "name": "python3" 63 | }, 64 | "language_info": { 65 | "codemirror_mode": { 66 | "name": "ipython", 67 | "version": 3 68 | }, 69 | "file_extension": ".py", 70 | "mimetype": "text/x-python", 71 | "name": "python", 72 | "nbconvert_exporter": "python", 73 | "pygments_lexer": "ipython3", 74 | "version": "3.8.20" 75 | } 76 | }, 77 | "nbformat": 4, 78 | "nbformat_minor": 5 79 | } 80 | -------------------------------------------------------------------------------- /tutorial/tutorial_2_snns.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "571553bb", 6 | "metadata": {}, 7 | "source": [ 8 | "# SNN Description File #\n", 9 | "\n", 10 | " \"Open" 11 | ] 12 | }, 13 | { 14 | "cell_type": "markdown", 15 | "id": "7bcd6c63", 16 | "metadata": { 17 | "vscode": { 18 | "languageId": "plaintext" 19 | } 20 | }, 21 | "source": [ 22 | "Next, we will finish the SNN defined in `snn.yaml` and map it to our updated architecture. The SNN is shown in the diagram below, with pre-defined elements drawn in black. Complete the four exercises listed in the file to add the orange elements shown and complete this small SNN.\n", 23 | "\n", 24 | "![Example SNN](example_snn.svg)" 25 | ] 26 | }, 27 | { 28 | "cell_type": "markdown", 29 | "id": "c3c81565", 30 | "metadata": {}, 31 | "source": [ 32 | "## Exercises ##\n", 33 | "1. Define a new mapped neuron: 1.1. To do this, add another neuron to group 1 with no attributes (leave the attribute list empty), and map neuron 1.1 to core 0.1\n", 34 | "2. Add edges from neurons 0.0 and 0.1, both to neuron 1.1, with weights -2 & 3 respectively\n", 35 | "3. Set the bias of neuron 0.1 to 0.5\n", 36 | "4. Configure Group 1 to use the new compressed synapses that you defined H/W for in architecture description, instead of the uncompressed synapses used currently\n", 37 | "\n" 38 | ] 39 | }, 40 | { 41 | "cell_type": "code", 42 | "execution_count": null, 43 | "id": "d68a15a2", 44 | "metadata": {}, 45 | "outputs": [], 46 | "source": [ 47 | "%pip install sanafe==2.0.20\n", 48 | "%pip install pyyaml\n", 49 | "!wget -nc https://raw.githubusercontent.com/SLAM-Lab/SANA-FE/main/tutorial/arch.yaml\n", 50 | "!wget -nc https://raw.githubusercontent.com/SLAM-Lab/SANA-FE/main/tutorial/snn.yaml\n", 51 | "import sanafe" 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": null, 57 | "id": "2b09ae5a", 58 | "metadata": {}, 59 | "outputs": [], 60 | "source": [ 61 | "import sanafe.tutorial\n", 62 | "sanafe.tutorial.check_snn(\"snn.yaml\")" 63 | ] 64 | } 65 | ], 66 | "metadata": { 67 | "kernelspec": { 68 | "display_name": "neuro", 69 | "language": "python", 70 | "name": "python3" 71 | }, 72 | "language_info": { 73 | "codemirror_mode": { 74 | "name": "ipython", 75 | "version": 3 76 | }, 77 | "file_extension": ".py", 78 | "mimetype": "text/x-python", 79 | "name": "python", 80 | "nbconvert_exporter": "python", 81 | "pygments_lexer": "ipython3", 82 | "version": "3.8.20" 83 | } 84 | }, 85 | "nbformat": 4, 86 | "nbformat_minor": 5 87 | } 88 | -------------------------------------------------------------------------------- /tutorial/tutorial_3_api.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "06a41dfc", 6 | "metadata": {}, 7 | "source": [ 8 | "# Using SANA-FE's Python API #\n", 9 | "\n", 10 | " \"Open" 11 | ] 12 | }, 13 | { 14 | "cell_type": "markdown", 15 | "id": "b7f7f507", 16 | "metadata": {}, 17 | "source": [ 18 | "\n", 19 | "SANA-FE v2 supports building SNNs in Python, in addition to file-based inputs. Using the Python API, try extending the example SNN from Tutorial 2 by completing the following exercises.\n", 20 | "\n", 21 | "![Example SNN](example_snn.svg)\n", 22 | "\n", 23 | "As a reminder, these were:\n", 24 | "#### Exercises ####\n", 25 | "1. Define a new mapped neuron: 1.1. To do this, add another neuron to group 1 (i.e. increment the number of neurons in the group) and map neuron 1.1 to core 0.1\n", 26 | "2. Add edges from neurons 0.0 and 0.1, both to neuron 1.1, with weights -2 & 3 respectively" 27 | ] 28 | }, 29 | { 30 | "cell_type": "code", 31 | "execution_count": null, 32 | "id": "f251b568", 33 | "metadata": {}, 34 | "outputs": [], 35 | "source": [ 36 | "%pip install sanafe==2.0.20\n", 37 | "%pip install pyyaml\n", 38 | "!wget -nc https://raw.githubusercontent.com/SLAM-Lab/SANA-FE/main/tutorial/arch.yaml\n", 39 | "!wget -nc https://raw.githubusercontent.com/SLAM-Lab/SANA-FE/main/tutorial/snn.yaml\n", 40 | "import sanafe, sanafe.tutorial\n", 41 | "import yaml" 42 | ] 43 | }, 44 | { 45 | "cell_type": "code", 46 | "execution_count": null, 47 | "id": "f66fd9eb", 48 | "metadata": {}, 49 | "outputs": [], 50 | "source": [ 51 | "# Create an example SNN\n", 52 | "in_attributes = {\"threshold\": 1.0, \"reset\": 0.0, \"log_spikes\": True, \"log_potential\": True}\n", 53 | "out_attributes = {\"threshold\": 2.0, \"reset\": 0.0, \"synapse_hw_name\": \"tutorial_synapse_uncompressed\"}\n", 54 | "\n", 55 | "# Create neuron groups\n", 56 | "snn = sanafe.Network()\n", 57 | "in_group = snn.create_neuron_group(\"in\", 2, in_attributes)\n", 58 | "out_group = snn.create_neuron_group(\"out\", 1, out_attributes)\n", 59 | "\n", 60 | "# Set neuron attributes\n", 61 | "in_group[0].set_attributes(model_attributes={\"bias\": 0.2})\n", 62 | "\n", 63 | "# Create connections\n", 64 | "in_group[0].connect_to_neuron(out_group[0], {\"weight\": -1.0})\n", 65 | "\n", 66 | "# Create mappings\n", 67 | "arch = sanafe.load_arch(\"arch.yaml\")\n", 68 | "core = arch.tiles[0].cores[0]\n", 69 | "in_group[0].map_to_core(core)\n", 70 | "in_group[1].map_to_core(core)\n", 71 | "out_group[0].map_to_core(core)" 72 | ] 73 | }, 74 | { 75 | "cell_type": "code", 76 | "execution_count": null, 77 | "id": "63e49969", 78 | "metadata": {}, 79 | "outputs": [], 80 | "source": [ 81 | "tutorial_chip = sanafe.SpikingChip(arch)\n", 82 | "tutorial_chip.load(snn)\n", 83 | "results = tutorial_chip.sim(200)\n", 84 | "print(yaml.dump(results))" 85 | ] 86 | }, 87 | { 88 | "cell_type": "code", 89 | "execution_count": null, 90 | "id": "43db3dcc", 91 | "metadata": {}, 92 | "outputs": [], 93 | "source": [ 94 | "sanafe.tutorial.check_api(snn)" 95 | ] 96 | } 97 | ], 98 | "metadata": { 99 | "kernelspec": { 100 | "display_name": "neuro", 101 | "language": "python", 102 | "name": "python3" 103 | }, 104 | "language_info": { 105 | "codemirror_mode": { 106 | "name": "ipython", 107 | "version": 3 108 | }, 109 | "file_extension": ".py", 110 | "mimetype": "text/x-python", 111 | "name": "python", 112 | "nbconvert_exporter": "python", 113 | "pygments_lexer": "ipython3", 114 | "version": "3.8.20" 115 | } 116 | }, 117 | "nbformat": 4, 118 | "nbformat_minor": 5 119 | } 120 | -------------------------------------------------------------------------------- /tutorial/tutorial_5_dvs.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "cdd95780", 6 | "metadata": {}, 7 | "source": [ 8 | "\n", 9 | " \"Open" 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": null, 15 | "id": "e0330cab", 16 | "metadata": {}, 17 | "outputs": [], 18 | "source": [ 19 | "%pip install sanafe==2.0.20\n", 20 | "%pip install pyyaml\n", 21 | "!wget -nc https://raw.githubusercontent.com/SLAM-Lab/SANA-FE/main/arch/loihi.yaml\n", 22 | "!wget --no-check-certificate 'https://docs.google.com/uc?export=download&id=1WkbJZFasTe-v8vTYXrUaz_1e-p_xHMEj' -O dvs_challenge.npz\n", 23 | "import sanafe\n", 24 | "import sanafe.layers" 25 | ] 26 | }, 27 | { 28 | "cell_type": "code", 29 | "execution_count": null, 30 | "id": "67df667e", 31 | "metadata": {}, 32 | "outputs": [], 33 | "source": [ 34 | "arch = sanafe.load_arch(\"loihi.yaml\")\n", 35 | "snn = sanafe.Network()\n", 36 | "\n", 37 | "# Load the convolutional kernel weights, thresholds and input biases from file.\n", 38 | "# If using the Docker container, this file is included in the image.\n", 39 | "# Otherwise, this file is also hosted on Google Drive and can be downloaded\n", 40 | "# prior to running this script\n", 41 | "import numpy as np\n", 42 | "try:\n", 43 | " snn_attributes = np.load(\"dvs_challenge.npz\")\n", 44 | "except FileNotFoundError as exc:\n", 45 | " print(exc)\n", 46 | " print(\"\"\"\n", 47 | "To run this challenge, you need to download the network kernel weights: dvs_challenge.npz, to the tutorial directory.\n", 48 | "These weights are hosted online on a shared Google Drive. To download the file with a in Linux run the command:\n", 49 | "\n", 50 | "wget --no-check-certificate 'https://docs.google.com/uc?export=download&id=1WkbJZFasTe-v8vTYXrUaz_1e-p_xHMEj' -O tutorial/dvs_challenge.npz\n", 51 | "\n", 52 | "Or go directly to the drive at: https://drive.google.com/drive/folders/1GzjXAFouakm3b6GcFIHsw67H8t6l3BtY?usp=drive_link\n", 53 | " \"\"\")\n", 54 | " exit()\n", 55 | "\n", 56 | "# Convert the DVS gesture categorization model to SANA-FE's SNN format\n", 57 | "thresholds = snn_attributes[\"thresholds\"]\n", 58 | "biases = snn_attributes[\"inputs\"]\n", 59 | "\n", 60 | "layer0 = sanafe.layers.Input2D(snn, 32, 32, threshold=thresholds[0])\n", 61 | "layer1 = sanafe.layers.Conv2D(snn, layer0, snn_attributes[\"conv1\"],\n", 62 | " stride_width=2, stride_height=2, threshold=thresholds[1])\n", 63 | "layer2 = sanafe.layers.Conv2D(snn, layer1, snn_attributes[\"conv2\"], threshold=thresholds[2])\n", 64 | "layer3 = sanafe.layers.Conv2D(snn, layer2, snn_attributes[\"conv3\"], threshold=thresholds[3])\n", 65 | "layer4 = sanafe.layers.Conv2D(snn, layer3, snn_attributes[\"conv4\"], threshold=thresholds[4])\n", 66 | "layer5 = sanafe.layers.Dense(snn, layer4, 11, snn_attributes[\"dense1\"], threshold=thresholds[5])\n", 67 | "\n", 68 | "# Finally set up the inputs\n", 69 | "for n, b in zip(layer0, biases):\n", 70 | " n.set_attributes(model_attributes={\"bias\": b})" 71 | ] 72 | }, 73 | { 74 | "cell_type": "code", 75 | "execution_count": null, 76 | "id": "f133bb3e", 77 | "metadata": {}, 78 | "outputs": [], 79 | "source": [ 80 | "# Map the SNN to Loihi cores. Specify the number of cores each layer is evenly\n", 81 | "# mapped across. Feel free to experiment with changing the line below\n", 82 | "layer_mapped_core_counts = [1, 4, 16, 16, 4, 1]" 83 | ] 84 | }, 85 | { 86 | "cell_type": "code", 87 | "execution_count": null, 88 | "id": "70ad7113", 89 | "metadata": {}, 90 | "outputs": [], 91 | "source": [ 92 | "# Map neurons, taking into account the number of cores we want to map across\n", 93 | "# each layer\n", 94 | "total_cores_mapped = 0\n", 95 | "\n", 96 | "def map_layer_to_cores(layer, cores, core_count):\n", 97 | " global total_cores_mapped\n", 98 | " total_neurons = len(layer)\n", 99 | " neurons_per_core = total_neurons // core_count\n", 100 | " for idx in range(core_count):\n", 101 | " first_nid = idx * neurons_per_core\n", 102 | " is_last = (idx == (core_count-1))\n", 103 | " if is_last:\n", 104 | " neurons_to_map_to_core = layer[first_nid:]\n", 105 | " else:\n", 106 | " last_nid = (idx+1) * neurons_per_core\n", 107 | " neurons_to_map_to_core = layer[first_nid:last_nid]\n", 108 | "\n", 109 | " for neuron in neurons_to_map_to_core:\n", 110 | " neuron.map_to_core(cores[total_cores_mapped])\n", 111 | " total_cores_mapped += 1\n", 112 | " return\n", 113 | "\n", 114 | "for n in layer0:\n", 115 | " n.map_to_core(arch.tiles[0].cores[0])\n", 116 | "\n", 117 | "cores = arch.cores()\n", 118 | "map_layer_to_cores(layer0, cores, layer_mapped_core_counts[0])\n", 119 | "map_layer_to_cores(layer1, cores, layer_mapped_core_counts[1])\n", 120 | "map_layer_to_cores(layer2, cores, layer_mapped_core_counts[2])\n", 121 | "map_layer_to_cores(layer3, cores, layer_mapped_core_counts[3])\n", 122 | "map_layer_to_cores(layer4, cores, layer_mapped_core_counts[4])\n", 123 | "map_layer_to_cores(layer5, cores, layer_mapped_core_counts[5])" 124 | ] 125 | }, 126 | { 127 | "cell_type": "code", 128 | "execution_count": null, 129 | "id": "a1b90dfe", 130 | "metadata": {}, 131 | "outputs": [], 132 | "source": [ 133 | "# Run the network you just generated on Loihi\n", 134 | "# Comment out this line if you want to stop the simulations running\n", 135 | "chip = sanafe.SpikingChip(arch)\n", 136 | "chip.load(snn)\n", 137 | "results = chip.sim(1000)" 138 | ] 139 | }, 140 | { 141 | "cell_type": "code", 142 | "execution_count": null, 143 | "id": "c83c9355", 144 | "metadata": {}, 145 | "outputs": [], 146 | "source": [ 147 | "# Check the runtime results against expected values to make sure nothing got\n", 148 | "# messed up earlier\n", 149 | "expected_firing_neurons = 365277\n", 150 | "if results[\"neurons_fired\"] != expected_firing_neurons:\n", 151 | " print(f\"Error: The total number of neurons spiking was \"\n", 152 | " f\"{results['neurons_fired']}, \"\n", 153 | " f\"should be {expected_firing_neurons}\")\n", 154 | " print(\"Somehow you may have changed the functional behavior of the SNN\")\n", 155 | " raise RuntimeError\n", 156 | "\n", 157 | "# The energy-delay product is our final performance metric. See how low you can\n", 158 | "# get this number!\n", 159 | "energy_delay_product = results[\"energy\"][\"total\"] * results[\"sim_time\"]\n", 160 | "print(f\"Energy-Delay product: {energy_delay_product}\")" 161 | ] 162 | } 163 | ], 164 | "metadata": { 165 | "kernelspec": { 166 | "display_name": "py11debug", 167 | "language": "python", 168 | "name": "python3" 169 | }, 170 | "language_info": { 171 | "name": "python", 172 | "version": "3.11.13" 173 | } 174 | }, 175 | "nbformat": 4, 176 | "nbformat_minor": 5 177 | } 178 | --------------------------------------------------------------------------------