├── .coveragerc ├── .github └── workflows │ ├── synth.yml │ └── test_publish.yml ├── .gitignore ├── LICENSE ├── README.rst ├── applets ├── _builds_test.py ├── axi_writer_demo.py ├── camera.py ├── camera_ethernet.py ├── camera_usb3.py ├── cmv12k │ ├── pattern_dram_test.py │ ├── pattern_test.py │ ├── spi_test.py │ └── train_test.py ├── connector_test.py ├── csr_demo.py ├── hdmi_demo.py ├── hdmi_framebuffer.py ├── mipi_data_collect.py ├── rfw_pass.py ├── usb3_perf_debug.py ├── usb3_plugin.py ├── usb3_plugin_host.py └── usb3_plugin_jtag_debug.py ├── doc ├── NapsPosterFPGAIgnite2023.pdf ├── conf.py ├── getting_started.rst ├── index.rst ├── intro.rst ├── naps.platform.rst ├── naps.soc.rst └── setup.rst ├── naps ├── __init__.py ├── cores │ ├── __init__.py │ ├── axi │ │ ├── __init__.py │ │ ├── axi_endpoint.py │ │ ├── full_to_lite.py │ │ ├── interconnect.py │ │ ├── peripheral_connector.py │ │ ├── sim_util.py │ │ ├── specification.pdf │ │ ├── specification_notes.org │ │ ├── stream_reader.py │ │ ├── stream_reader_test.py │ │ ├── stream_writer.py │ │ ├── stream_writer_test.py │ │ └── zynq_util.py │ ├── cmv12k │ │ ├── __init__.py │ │ ├── cmv12k_rx.py │ │ ├── cmv12k_spi.py │ │ ├── pixel_remapper.py │ │ └── s7_phy.py │ ├── compression │ │ ├── __init__.py │ │ ├── bit_stuffing.py │ │ ├── bit_stuffing_test.py │ │ ├── encoding_space.py │ │ ├── huffman_encoder.py │ │ ├── huffman_encoder_test.py │ │ ├── rle.py │ │ └── rle_test.py │ ├── debug │ │ ├── __init__.py │ │ ├── blink_debug.py │ │ ├── clocking_debug.py │ │ ├── fsm_status_reg.py │ │ ├── ila.py │ │ ├── ila_test.py │ │ ├── packet_console.py │ │ ├── packet_console_test.py │ │ └── tracer.py │ ├── dram_packet_ringbuffer │ │ ├── __init__.py │ │ ├── cpu_if.py │ │ ├── stream_if.py │ │ └── stream_if_test.py │ ├── ft601 │ │ ├── __init__.py │ │ ├── ft601_perf_debug.py │ │ ├── ft601_stream_sink.py │ │ ├── ft601_stream_sink_test.py │ │ └── ft60x_legalizer.py │ ├── hdmi │ │ ├── InfoFrame.py │ │ ├── __init__.py │ │ ├── cvt_python.py │ │ ├── cvt_subprocess.py │ │ ├── hdmispecification13a.pdf │ │ ├── parse_modeline.py │ │ ├── rx │ │ │ ├── __init__.py │ │ │ ├── hdmi_rx.py │ │ │ └── tmds_decoder.py │ │ ├── tmds.py │ │ ├── tmds_test.py │ │ └── tx │ │ │ ├── __init__.py │ │ │ ├── hdmi_stream_sink.py │ │ │ ├── hdmi_test.py │ │ │ ├── hdmi_tx.py │ │ │ ├── pattern_generator.py │ │ │ └── tmds_encoder.py │ ├── hispi │ │ ├── __init__.py │ │ ├── hispi_rx.py │ │ ├── hispi_rx_test.py │ │ ├── s7_phy.py │ │ ├── test_data_new.txt.lzma │ │ └── test_data_old.txt.lzma │ ├── jtag │ │ ├── __init__.py │ │ ├── jtag_peripheral_connector.py │ │ └── jtag_peripheral_connector_test.py │ ├── mipi │ │ ├── __init__.py │ │ ├── common.py │ │ ├── csi_rx │ │ │ ├── __init__.py │ │ │ ├── aligner.py │ │ │ ├── combiner.py │ │ │ ├── packet.py │ │ │ ├── s7_rx_phy.py │ │ │ └── types.py │ │ └── dsi_tx │ │ │ ├── __init__.py │ │ │ ├── d_phy_lane.py │ │ │ ├── d_phy_lane_test.py │ │ │ ├── dsi_phy.py │ │ │ ├── py_dsi_generator.py │ │ │ ├── types.py │ │ │ ├── video2dsi.py │ │ │ └── video2dsi_test.py │ ├── peripherals │ │ ├── __init__.py │ │ ├── bitbang_i2c.py │ │ ├── bitbang_spi.py │ │ ├── csr_bank.py │ │ ├── csr_bank_zynq_test.py │ │ ├── drp_bridge.py │ │ ├── mmio_gpio.py │ │ ├── soc_memory.py │ │ └── soc_memory_test.py │ ├── plugin_module_streamer │ │ ├── PROTOCOL.md │ │ ├── __init__.py │ │ ├── rx.py │ │ └── tx.py │ ├── serdes │ │ ├── __init__.py │ │ ├── inputgearbox.py │ │ └── serializer.py │ ├── stream │ │ ├── __init__.py │ │ ├── buffer.py │ │ ├── buffer_test.py │ │ ├── counter_source.py │ │ ├── counter_source_test.py │ │ ├── debug.py │ │ ├── fifo.py │ │ ├── fifo_test.py │ │ ├── gearbox.py │ │ ├── gearbox_test.py │ │ ├── metadata_wrapper.py │ │ ├── metadata_wrapper_test.py │ │ ├── repacking.py │ │ ├── stream_memory.py │ │ ├── stream_memory_test.py │ │ ├── tee.py │ │ └── tee_test.py │ └── video │ │ ├── __init__.py │ │ ├── adapters.py │ │ ├── adapters_test.py │ │ ├── debayer.py │ │ ├── debayer_test.py │ │ ├── demo_source.py │ │ ├── focus_peeking.py │ │ ├── focus_peeking_cxxrtl_test │ │ ├── .gitignore │ │ ├── cat512.jpg │ │ ├── focus_peak_test.png │ │ ├── focus_peeking_test.cpp │ │ ├── main.cpp │ │ ├── stb_image.h │ │ └── stb_image_write.h │ │ ├── gamma_corrector.py │ │ ├── gamma_corrector_test.py │ │ ├── image_convoluter.py │ │ ├── image_convoluter_test.py │ │ ├── image_stream.py │ │ ├── rearrange.py │ │ ├── rearrange_test.py │ │ ├── resizer.py │ │ ├── rgb.py │ │ ├── test_bayer.png │ │ ├── test_util.py │ │ └── wavelet │ │ ├── __init__.py │ │ ├── che_128.png │ │ ├── che_16.png │ │ ├── che_32.png │ │ ├── che_64.png │ │ ├── che_full.png │ │ ├── dng.py │ │ ├── py_compressor.py │ │ ├── py_wavelet.py │ │ ├── py_wavelet_benchmark.py │ │ ├── py_wavelet_repack.py │ │ ├── vifp.py │ │ ├── wavelet.py │ │ ├── wavelet_compressor.py │ │ └── wavelet_test.py ├── data_structure │ ├── __init__.py │ └── bundle.py ├── platform │ ├── __init__.py │ ├── beta_platform.py │ ├── colorlight_5a_75b_7_0.py │ ├── hdmi_digitizer_platform.py │ ├── micro_r2_platform.py │ ├── plugins │ │ ├── __init__.py │ │ ├── hdmi_plugin_resource.py │ │ ├── plugin_connector.py │ │ └── usb3_plugin_resource.py │ ├── usb3_plugin_platform.py │ └── zybo_platform.py ├── soc │ ├── __init__.py │ ├── cli.py │ ├── csr_types.py │ ├── devicetree_overlay.py │ ├── fatbitstream.py │ ├── hooks.py │ ├── memorymap.py │ ├── peripheral.py │ ├── peripherals_aggregator.py │ ├── platform │ │ ├── __init__.py │ │ ├── jtag │ │ │ ├── __init__.py │ │ │ ├── jtag_soc_platform.py │ │ │ └── memory_accessor_openocd.py │ │ ├── sim │ │ │ ├── __init__.py │ │ │ └── sim_soc_platform.py │ │ └── zynq │ │ │ ├── __init__.py │ │ │ ├── memory_accessor_devmem.py │ │ │ ├── ps7_memorymap.org │ │ │ ├── to_raw_bitstream.py │ │ │ └── zynq_soc_platform.py │ ├── program_fatbitstream_local.py │ ├── program_fatbitstream_ssh.py │ ├── pydriver │ │ ├── __init__.py │ │ ├── driver_items.py │ │ ├── generate.py │ │ ├── hardware_proxy.py │ │ ├── hardware_proxy_test.py │ │ └── interactive.py │ ├── smoke_test.py │ ├── soc_platform.py │ ├── soc_platform_test.py │ └── tracing_elaborate.py ├── stream │ ├── __init__.py │ ├── first_stream.py │ ├── formal_util.py │ ├── formal_util_test.py │ ├── pipeline.py │ ├── sim_util.py │ ├── stream.py │ ├── stream_notes.md │ └── stream_transformer.py ├── util │ ├── __init__.py │ ├── amaranth_misc.py │ ├── amaranth_misc_test.py │ ├── amaranth_private.py │ ├── draw_hierarchy.py │ ├── env.py │ ├── formal.py │ ├── past.py │ ├── plot_util.py │ ├── process.py │ ├── process_test.py │ ├── py_serialize.py │ ├── python_misc.py │ ├── sim.py │ ├── size_estimation.py │ ├── timer.py │ └── yosys.py └── vendor │ ├── __init__.py │ ├── generic │ ├── __init__.py │ └── jtag.py │ ├── instance_helper.py │ ├── lattice_ecp5 │ ├── __init__.py │ ├── clocking.py │ ├── io.py │ └── jtag.py │ ├── lattice_machxo2 │ ├── __init__.py │ ├── clocking.py │ ├── io.py │ └── jtag.py │ ├── platform_agnostic_elaboratable.py │ └── xilinx_s7 │ ├── __init__.py │ ├── clocking.py │ ├── io.py │ ├── jtag.py │ ├── mmcm_drp_memorymap.yml │ ├── ps7.py │ └── ps7_fclk_frequencies.txt ├── pdm.lock ├── pdm_build.py └── pyproject.toml /.coveragerc: -------------------------------------------------------------------------------- 1 | [report] 2 | exclude_lines = 3 | # Have to re-enable the standard pragma 4 | pragma: no cover 5 | 6 | # Don't complain if tests don't hit defensive assertion code: 7 | raise NotImplementedError -------------------------------------------------------------------------------- /.github/workflows/synth.yml: -------------------------------------------------------------------------------- 1 | name: Vivado Builds 2 | 3 | on: 4 | workflow_dispatch: 5 | schedule: 6 | - cron: '0 0 * * 0' 7 | 8 | jobs: 9 | build_applet: 10 | strategy: 11 | fail-fast: false 12 | matrix: 13 | applet: 14 | - applets/camera.py -b -d MicroR2 -s Zynq 15 | - applets/hdmi_framebuffer.py -b -d MicroR2 -s Zynq 16 | - applets/hdmi_framebuffer.py -b -d Beta -s Zynq 17 | - applets/hdmi_framebuffer.py -b -d Zybo -s Zynq 18 | runs-on: vivado 19 | steps: 20 | - name: Checkout 21 | uses: actions/checkout@v3 22 | - name: Set up Python 23 | uses: actions/setup-python@v4 24 | with: 25 | python-version: '3.11' 26 | - name: Setup PDM 27 | uses: pdm-project/setup-pdm@v3 28 | with: 29 | python-version: '3.11' 30 | - uses: YosysHQ/setup-oss-cad-suite@v3 31 | - name: Install dependencies 32 | run: pdm install -G test 33 | - name: Run test suite 34 | run: 35 | pdm run ${{ matrix.applet }} 36 | - name: upload 37 | uses: actions/upload-artifact@v2 38 | with: 39 | name: build_hdmi_test_${{ matrix.device }}_${{ matrix.soc_platform }} 40 | path: build/*/* 41 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .Xil 2 | .vscode 3 | .idea 4 | __pycache__/ 5 | *.pyc 6 | build/ 7 | usage_statistics_webtalk.html 8 | usage_statistics_webtalk.xml 9 | *.gtkw 10 | *.vcd 11 | spec_*/ 12 | *.pyi 13 | *.sim_results/ 14 | analyze/ 15 | **/analyze/ 16 | *.dng 17 | doc/_build 18 | naps.egg-info/ 19 | .pdm-python 20 | dist/ 21 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | naps - The Relaxed Amaranth Packages Collection 2 | ======================================================= 3 | 4 | Building Blocks & Tools for FPGA Design with Python & `Amaranth HDL `__. 5 | Mostly a place to experiment and figure out how to build things. 6 | Also the incubator for the future AXIOM Beta camera gateware & the home of the current AXIOM micro gateware. 7 | 8 | This repo contains: 9 | 10 | - Prototypes of the upcoming Amaranth data types ``PackedStruct`` and ``Interface`` (here called ``Bundle``) (in ``src/lib/data_structure/``) 11 | - A stream Abstraction with various building Blocks: (in ``src/lib/stream/``) 12 | 13 | - FIFOs 14 | - A gearbox for changing the width 15 | - Helpers for building other Stream cores 16 | - Miscellaneous Debug and Inspection tools 17 | 18 | - various Amaranth cores (in ``src/lib/``) for: 19 | 20 | - AXI and AXI Lite including a Buffer reader and Writer 21 | - A CSR bank that can be wired to an AXI bus 22 | - HDMI (currently the DVI subset; derived from Litevideo) 23 | - A HISPI reciever (for the use with aptina / onsemi image sensors) 24 | - A core for streaming data over USB3 using the ft601 25 | - Some utility video processing (like debayering) 26 | - … 27 | 28 | - tools for gluing together SOCs (currently supports the Xilinx Zynq and JTAG based plattoforms) in ``src/soc/`` 29 | 30 | - Making heavy use of Amaranth HDL Platform abstractions (wrapping existing plattforms) 31 | - Provides a bus agnostic way to describe (low speed) peripherals 32 | - Emits Python code that can be used to access the designs CSRs (“pydriver”) 33 | - Generate devicetree overlays for loading linux device drivers 34 | - pack “fatbitstreams” that bundle setup logic, drivers and the bitstream 35 | - there is a `Poster about the naps soc infrastructure `__. 36 | 37 | - platform definitions for both the AXIOM Beta and the AXIOM Micro in ``src/devices/`` 38 | - a variety of other smaller half-working experiments in ``src/experiments/`` 39 | 40 | - linux framebuffer HDMI output 41 | - USB3 Plugin module gateware (wip) 42 | - AXIOM micro camera gateware (wip) 43 | - test gateware to test connectors for their ability to transmit high speed data (via a loopback test) 44 | - test gateware for the axi writer & reader 45 | 46 | Installation 47 | ------------ 48 | 49 | Installing -------------------------------------------------------------------------------- /applets/_builds_test.py: -------------------------------------------------------------------------------- 1 | # this is not actually an applet but a helper to run all the applets with the test runner. 2 | # you can probably ignore this file, but it is very handy to catch major breakages. 3 | 4 | import subprocess 5 | from importlib import import_module 6 | from pathlib import Path 7 | import unittest 8 | import amaranth 9 | 10 | from naps.soc.platform import JTAGSocPlatform, ZynqSocPlatform 11 | 12 | 13 | for path in Path(__file__).parent.glob("**/*.py"): 14 | if path.stem.startswith("_"): # exclude ourselves 15 | continue 16 | name = str(path.relative_to(Path(__file__).parent)).removesuffix(".py").replace("/", ".") 17 | 18 | vars()[name] = type(name, (unittest.TestCase,), {}) 19 | module = import_module(name) 20 | for target in module.Top.runs_on: 21 | device = target.__name__.replace("Platform", "") 22 | 23 | for soc in [None, JTAGSocPlatform, ZynqSocPlatform]: 24 | hardware_platform = target() 25 | 26 | if hasattr(module.Top, "soc_platform") and module.Top.soc_platform != soc: 27 | continue 28 | 29 | if soc is None: 30 | soc_name = "Plain" 31 | soc_platform = hardware_platform 32 | else: 33 | soc_name = soc.__name__.replace("SocPlatform", "") 34 | if not soc.can_wrap(hardware_platform): 35 | continue 36 | soc_platform = soc(hardware_platform) 37 | 38 | build = hardware_platform.toolchain == "Trellis" 39 | if amaranth.__version__ == "0.5.4": 40 | build = False # TODO: remove, once https://github.com/amaranth-lang/amaranth/commit/7664a00f4d3033e353b2f3a00802abb7403c0b68 is released 41 | def make_run(path, device, soc_name, build): 42 | def run(self): 43 | command = ['python', str(path), '-e', '--no_cache', '-d', device, '-s', soc_name] 44 | if build: 45 | command.append("-b") 46 | print("running '{}'".format(' '.join(command))) 47 | process = subprocess.Popen(command, stdout=subprocess.PIPE, 48 | stderr=subprocess.STDOUT, stdin=subprocess.PIPE) 49 | stdout, stderr = process.communicate() 50 | if process.returncode != 0: 51 | self.fail("\n" + stdout.decode()) 52 | return run 53 | setattr(vars()[name], f"test_{'build' if build else 'elaborate'}_for_{device}_{soc_name}", make_run(path, device, soc_name, build)) 54 | 55 | -------------------------------------------------------------------------------- /applets/axi_writer_demo.py: -------------------------------------------------------------------------------- 1 | # An experiment to that checks the functionality of the axi writer 2 | from amaranth import * 3 | from naps import * 4 | 5 | 6 | class Top(Elaboratable): 7 | runs_on = [MicroR2Platform, BetaPlatform, ZyboPlatform] 8 | soc_platform = ZynqSocPlatform 9 | 10 | def __init__(self): 11 | self.reset = ControlSignal() 12 | self.to_write = ControlSignal(init=32 * 1024 * 1024) 13 | self.data_counter = StatusSignal(32) 14 | self.data_valid = ControlSignal() 15 | self.data_ready = StatusSignal() 16 | 17 | def elaborate(self, platform: ZynqSocPlatform): 18 | m = Module() 19 | 20 | platform.ps7.fck_domain(requested_frequency=200e6) 21 | m.d.comb += ResetSignal().eq(self.reset) 22 | 23 | stream = PacketizedStream(64) 24 | m.d.comb += self.data_ready.eq(stream.ready) 25 | m.d.comb += stream.valid.eq(self.data_valid) 26 | 27 | axi_writer = m.submodules.axi_writer = DramPacketRingbufferStreamWriter(stream, max_packet_size=0x1200000, n_buffers=4) 28 | 29 | with m.If(axi_writer.input.ready & axi_writer.input.valid): 30 | m.d.sync += self.data_counter.eq(self.data_counter + 1) 31 | m.d.comb += stream.payload.eq(Cat(self.data_counter, self.data_counter + 1000)) 32 | 33 | return m 34 | 35 | 36 | if __name__ == "__main__": 37 | cli(Top) 38 | -------------------------------------------------------------------------------- /applets/camera_ethernet.py: -------------------------------------------------------------------------------- 1 | # An experiment that glues everything together and tries to get a full sensor -> ethernet flow working on the micro 2 | import os 3 | from amaranth import * 4 | from naps import * 5 | 6 | 7 | class Top(Elaboratable): 8 | runs_on = [MicroR2Platform] 9 | soc_platform = ZynqSocPlatform 10 | 11 | def __init__(self): 12 | self.sensor_reset_n = ControlSignal(name='sensor_reset', init=1) 13 | 14 | def elaborate(self, platform): 15 | m = Module() 16 | 17 | platform.ps7.fck_domain(100e6, "axi_hp") 18 | 19 | # Control Pane 20 | i2c_pads = platform.request("i2c") 21 | m.submodules.i2c = BitbangI2c(i2c_pads) 22 | 23 | # Input Pipeline 24 | sensor = platform.request("sensor") 25 | platform.ps7.fck_domain(24e6, "sensor_clk") 26 | m.d.comb += sensor.clk.o.eq(ClockSignal("sensor_clk")) 27 | m.d.comb += sensor.reset.o.eq(~self.sensor_reset_n) 28 | # TODO: find more idiomatic way to do this 29 | os.environ["AMARANTH_add_constraints"] = \ 30 | "set_property CLOCK_DEDICATED_ROUTE FALSE [get_nets pin_sensor_0__lvds_clk/input_hispi_rx_sensor_0__lvds_clk__i]" 31 | 32 | p = Pipeline(m) 33 | p += HispiRx(sensor, hispi_domain="hispi") 34 | p += Repack12BitStream(p.output) 35 | 36 | p += BufferedAsyncStreamFIFO(p.output, 2048, o_domain="axi_hp") 37 | p += StreamGearbox(p.output, 64) 38 | p += ImageStream2PacketizedStream(p.output) 39 | p += DramPacketRingbufferStreamWriter(p.output, max_packet_size=0x800000, n_buffers=4) 40 | p += DramPacketRingbufferCpuReader(p.last) 41 | 42 | return m 43 | 44 | @driver_method 45 | def kick_sensor(self): 46 | from os import system 47 | system("cat /axiom-api/scripts/kick/value") 48 | 49 | 50 | if __name__ == "__main__": 51 | cli(Top) 52 | -------------------------------------------------------------------------------- /applets/cmv12k/spi_test.py: -------------------------------------------------------------------------------- 1 | # set up and demonstrate SPI connection to CMV12k control pins 2 | 3 | from amaranth import * 4 | from naps import * 5 | 6 | class Top(Elaboratable): 7 | runs_on = [BetaPlatform] 8 | soc_platform = ZynqSocPlatform 9 | 10 | def __init__(self): 11 | self.sensor_reset = ControlSignal() 12 | 13 | def elaborate(self, platform: BetaPlatform): 14 | m = Module() 15 | 16 | platform.ps7.fck_domain(requested_frequency=100e6) 17 | 18 | sensor = platform.request("sensor") 19 | platform.ps7.fck_domain(250e6, "sensor_clk") 20 | m.d.comb += sensor.lvds_clk.o.eq(ClockSignal("sensor_clk")) 21 | m.d.comb += sensor.reset.o.eq(self.sensor_reset) 22 | 23 | m.submodules.sensor_spi = Cmv12kSpi(platform.request("sensor_spi")) 24 | 25 | return m 26 | 27 | if __name__ == "__main__": 28 | cli(Top) 29 | -------------------------------------------------------------------------------- /applets/cmv12k/train_test.py: -------------------------------------------------------------------------------- 1 | # set up and demonstrate training of CMV12k 2 | 3 | # DEMO PROCEDURE: 4 | # 1. build the fatbitstream with `python3 applets/cmv12k/train_test.py -b` 5 | # 2. copy the resulting build/train_test_*/train_test.fatbitstream.sh file to the Beta 6 | # 3. log into the Beta and get root access with e.g. `sudo su` 7 | # 4. power up the sensor with `axiom_power_init.sh && axiom_power_on.sh` 8 | # 5. load the fatbitstream with `./train_test.fatbitstream.sh --run` 9 | # 6. run the `design.train()` function at the prompt 10 | # 7. if everything worked, you will see "working channel mask: 0xFFFFFFFF" 11 | 12 | from amaranth import * 13 | from naps import * 14 | 15 | class Top(Elaboratable): 16 | runs_on = [BetaPlatform] 17 | soc_platform = ZynqSocPlatform 18 | 19 | def __init__(self): 20 | self.sensor_reset = ControlSignal() 21 | 22 | def elaborate(self, platform: BetaPlatform): 23 | m = Module() 24 | 25 | platform.ps7.fck_domain(requested_frequency=100e6) 26 | 27 | sensor = platform.request("sensor") 28 | platform.ps7.fck_domain(250e6, "sensor_clk") 29 | m.d.comb += sensor.lvds_clk.o.eq(ClockSignal("sensor_clk")) 30 | m.d.comb += sensor.reset.o.eq(self.sensor_reset) 31 | 32 | m.d.comb += [ 33 | sensor.frame_req.o.eq(0), 34 | sensor.t_exp1.o.eq(0), 35 | sensor.t_exp2.o.eq(0), 36 | ] 37 | 38 | m.submodules.sensor_spi = Cmv12kSpi(platform.request("sensor_spi")) 39 | sensor_rx = m.submodules.sensor_rx = Cmv12kRx(sensor) 40 | 41 | return m 42 | 43 | @driver_method 44 | def train(self): 45 | self.sensor_rx.configure_sensor_defaults(self.sensor_spi) 46 | self.sensor_rx.trainer.train(self.sensor_spi) 47 | 48 | if __name__ == "__main__": 49 | cli(Top) 50 | -------------------------------------------------------------------------------- /applets/csr_demo.py: -------------------------------------------------------------------------------- 1 | # A simple experiment that demonstrates basic CSR / SOC functionality 2 | from amaranth import * 3 | from amaranth.vendor import LatticePlatform 4 | from naps import * 5 | from naps.vendor.lattice_machxo2 import Osc 6 | 7 | 8 | class Top(Elaboratable): 9 | runs_on = [Usb3PluginPlatform, MicroR2Platform, ZyboPlatform, BetaPlatform, HdmiDigitizerPlatform, BetaRFWPlatform, Colorlight5a75b70Platform] 10 | 11 | def __init__(self): 12 | self.counter = StatusSignal(32) 13 | self.test_reg32 = ControlSignal(32) 14 | 15 | def elaborate(self, platform): 16 | m = Module() 17 | 18 | has_clk = False 19 | if isinstance(platform, ZynqSocPlatform): 20 | platform.ps7.fck_domain(requested_frequency=100e6) 21 | has_clk = True 22 | elif isinstance(platform, LatticePlatform) and platform.family == "machxo2": 23 | m.submodules.osc = Osc() 24 | has_clk = True 25 | elif isinstance(platform, Colorlight5a75b70Platform): 26 | has_clk = True 27 | m.d.comb += platform.request("led", 0).o.eq(self.counter[22]) 28 | 29 | if has_clk: 30 | m.d.sync += self.counter.eq(self.counter + 1) 31 | else: 32 | m.d.comb += self.counter.eq(42) # we dont have a clock source so we cant count 33 | 34 | return m 35 | 36 | 37 | if __name__ == "__main__": 38 | cli(Top) 39 | -------------------------------------------------------------------------------- /applets/hdmi_demo.py: -------------------------------------------------------------------------------- 1 | # Test HDMI output using a given modeline by displaying a solid (adjustable) color 2 | 3 | from amaranth import * 4 | from naps import * 5 | 6 | 7 | class Top(Elaboratable): 8 | runs_on = [MicroR2Platform, BetaPlatform, ZyboPlatform] 9 | soc_platform = ZynqSocPlatform 10 | 11 | def __init__(self): 12 | self.r = ControlSignal(8, init=0xFA) 13 | self.g = ControlSignal(8, init=0x87) 14 | self.b = ControlSignal(8, init=0x56) 15 | 16 | def elaborate(self, platform: ZynqSocPlatform): 17 | if not isinstance(platform, ZyboPlatform): 18 | hdmi_plugin_connect(platform, "north") 19 | 20 | m = Module() 21 | 22 | hdmi_resource = platform.request("hdmi") 23 | hdmi = m.submodules.hdmi = HdmiTx(hdmi_resource, generate_modeline(1920, 1080, 30)) 24 | 25 | clocking_debug = m.submodules.clocking_debug = ClockingDebug("pix", "pix_5x") 26 | 27 | m.d.comb += hdmi.rgb.r.eq(self.r) 28 | m.d.comb += hdmi.rgb.g.eq(self.g) 29 | m.d.comb += hdmi.rgb.b.eq(self.b) 30 | 31 | return m 32 | 33 | 34 | if __name__ == "__main__": 35 | cli(Top) 36 | -------------------------------------------------------------------------------- /applets/hdmi_framebuffer.py: -------------------------------------------------------------------------------- 1 | # Provides a linux framebuffer via HDMI 2 | 3 | from amaranth import * 4 | from naps import * 5 | 6 | 7 | class Top(Elaboratable): 8 | runs_on = [MicroR2Platform, BetaPlatform, ZyboPlatform] 9 | soc_platform = ZynqSocPlatform 10 | 11 | def __init__(self): 12 | self.width = 1280 13 | self.height = 720 14 | 15 | def elaborate(self, platform: ZynqSocPlatform): 16 | if not isinstance(platform, ZyboPlatform): 17 | hdmi_plugin_connect(platform, "north") 18 | 19 | m = Module() 20 | 21 | cpu_writer = m.submodules.cpu_writer = DramPacketRingbufferCpuWriter( 22 | max_packet_size=0x1000000, n_buffers=2, 23 | default_packet_size=self.width * self.height * 4 24 | ) 25 | 26 | platform.ps7.fck_domain(100e6, "axi_hp") 27 | 28 | p = Pipeline(m, start_domain="axi_hp") 29 | p += DramPacketRingbufferStreamReader(cpu_writer) 30 | p += SimpleStreamGearbox(p.output, target_width=32) 31 | p += StreamResizer(p.output, target_width=24) 32 | p += PacketizedStream2ImageStream(p.output, width=self.width) 33 | p += BufferedAsyncStreamFIFO(p.output, depth=16 * 1024, o_domain="pix") 34 | 35 | hdmi = platform.request("hdmi") 36 | p += HdmiStreamSink(p.output, hdmi, generate_modeline(self.width, self.height, 30), pix_domain="pix") 37 | 38 | m.submodules.clocking_debug = ClockingDebug("pix", "pix_5x", "axi_hp") 39 | 40 | overlay_content = """ 41 | %overlay_name%: framebuffer@%address% { 42 | compatible = "simple-framebuffer"; 43 | reg = <0x%address% (%width% * %height% * 4)>; 44 | width = <%width%>; 45 | height = <%height%>; 46 | stride = <(%width% * 4)>; 47 | format = "a8b8g8r8"; 48 | }; 49 | """ 50 | devicetree_overlay(platform, "framebuffer", overlay_content, { 51 | "width": str(self.width), 52 | "height": str(self.height), 53 | "address": "{:x}".format(cpu_writer.buffer_base_list[0]), 54 | }) 55 | 56 | return m 57 | 58 | 59 | if __name__ == "__main__": 60 | cli(Top) 61 | -------------------------------------------------------------------------------- /applets/rfw_pass.py: -------------------------------------------------------------------------------- 1 | # Pass through gateware to program the usb3 plugin module on the Beta 2 | # basically a nMigen adaption of http://vserver.13thfloor.at/Stuff/AXIOM/BETA/pass_jtag/ 3 | from amaranth import * 4 | from naps import * 5 | 6 | 7 | class Top(Elaboratable): 8 | runs_on = [BetaRFWPlatform] 9 | 10 | def elaborate(self, platform): 11 | m = Module() 12 | 13 | usb3_plugin_connect(platform, "south", gpio=True, lvds=False, gpio_attrs=dict(IO_TYPE="LVCMOS33", PULLMODE="UP", DRIVE="4")) 14 | 15 | usb3 = platform.request("usb3_plugin") 16 | pic_io = platform.request("pic_io") 17 | 18 | def connect(output, input, invert=False): 19 | m.d.comb += output.o.eq(~input.i if invert else input.i) 20 | m.d.comb += output.oe.eq(1) 21 | m.d.comb += input.oe.eq(0) 22 | 23 | connect(usb3.jtag.tdi, pic_io.sdo) 24 | connect(usb3.jtag.tck, pic_io.sck) 25 | connect(usb3.jtag_enb, pic_io.ss) 26 | connect(usb3.init, pic_io.initn, invert=True) 27 | connect(pic_io.done, usb3.done, invert=True) 28 | connect(pic_io.sdi, usb3.jtag.tdo) 29 | connect(usb3.jtag.tms, pic_io.sn) 30 | connect(usb3.program, pic_io.pb22b, invert=True) 31 | 32 | return m 33 | 34 | 35 | if __name__ == "__main__": 36 | cli(Top) 37 | -------------------------------------------------------------------------------- /applets/usb3_perf_debug.py: -------------------------------------------------------------------------------- 1 | # An experiment that allows debugging / diagnosing performance of the FT601 USB3 FIFO ic 2 | from amaranth import * 3 | from naps import * 4 | 5 | 6 | class Top(Elaboratable): 7 | runs_on = [Usb3PluginPlatform, HdmiDigitizerPlatform] 8 | 9 | def __init__(self): 10 | pass 11 | 12 | def elaborate(self, platform): 13 | m = Module() 14 | 15 | ft601 = platform.request("ft601") 16 | ft601_perf_debug = m.submodules.ft601_perf_debug = FT601PerfDebug(ft601) 17 | m.d.comb += platform.request("led", 0).o.eq(1) 18 | 19 | return m 20 | 21 | 22 | if __name__ == "__main__": 23 | cli(Top) 24 | -------------------------------------------------------------------------------- /applets/usb3_plugin.py: -------------------------------------------------------------------------------- 1 | # Experimental gateware for the usb3 plugin module 2 | from amaranth import * 3 | from naps import * 4 | 5 | 6 | class Top(Elaboratable): 7 | runs_on = [Usb3PluginPlatform] 8 | 9 | def elaborate(self, platform): 10 | m = Module() 11 | 12 | clocking = m.submodules.clocking = ClockingDebug("sync", "sync_in", "ft601") 13 | 14 | plugin = platform.request("plugin_stream_input") 15 | rx = m.submodules.rx = PluginModuleStreamerRx(plugin, domain_name="sync") 16 | 17 | ft601 = platform.request("ft601") 18 | m.submodules.ft601 = FT601StreamSink(ft601, rx.output, domain_name="ft601") 19 | 20 | return m 21 | 22 | 23 | if __name__ == "__main__": 24 | cli(Top) 25 | -------------------------------------------------------------------------------- /applets/usb3_plugin_host.py: -------------------------------------------------------------------------------- 1 | # An experiment that allows to flash the USB3 plugin module via JTAG on the micro via bitbanging and 2 | # MMIO GPIO. 3 | from amaranth import * 4 | from naps import * 5 | from naps.vendor.xilinx_s7 import Pll 6 | 7 | 8 | class Top(Elaboratable): 9 | runs_on = [MicroR2Platform] 10 | soc_platform = ZynqSocPlatform 11 | 12 | def elaborate(self, platform: ZynqSocPlatform): 13 | usb3_plugin_connect(platform, "south") 14 | 15 | m = Module() 16 | 17 | platform.ps7.fck_domain(20e6, "fclk_in") 18 | pll = m.submodules.pll = Pll(20e6, 40, 1, input_domain="fclk_in") 19 | pll.output_domain("bitclk", 2) 20 | pll.output_domain("sync", 8) 21 | 22 | clocking = m.submodules.clocking = ClockingDebug("fclk_in", "bitclk", "sync") 23 | 24 | usb3_plugin = platform.request("usb3_plugin") 25 | 26 | if isinstance(platform, MicroR2Platform): 27 | m.submodules.mmio_gpio = MmioGpio([ 28 | usb3_plugin.jtag.tms, 29 | usb3_plugin.jtag.tck, 30 | usb3_plugin.jtag.tdi, 31 | usb3_plugin.jtag.tdo, 32 | 33 | usb3_plugin.jtag_enb, 34 | usb3_plugin.program, 35 | usb3_plugin.init, 36 | usb3_plugin.done, 37 | ]) 38 | 39 | counter = m.submodules.counter = CounterStreamSource(32) 40 | m.submodules.tx = PluginModuleStreamerTx(usb3_plugin.lvds, counter.output, bitclk_domain="bitclk") 41 | 42 | return m 43 | 44 | 45 | if __name__ == "__main__": 46 | cli(Top) 47 | -------------------------------------------------------------------------------- /applets/usb3_plugin_jtag_debug.py: -------------------------------------------------------------------------------- 1 | # An experiment for debugging the JTAG interface on the usb3 plugin module by using the USB3 output 2 | # as a usb3 output. 3 | from amaranth import * 4 | from naps import * 5 | from naps.vendor.lattice_machxo2 import Osc 6 | 7 | 8 | class Top(Elaboratable): 9 | runs_on = [Usb3PluginPlatform] 10 | soc_platform = JTAGSocPlatform 11 | 12 | def __init__(self): 13 | self.counter = StatusSignal(32) 14 | self.test_reg32 = ControlSignal(32) 15 | 16 | def elaborate(self, platform): 17 | m = Module() 18 | m.submodules.osc = Osc(freq=53.2e6) 19 | m.d.sync += self.counter.eq(self.counter + 1) 20 | m.d.comb += platform.request("led", 0).o.eq(self.counter[24]) 21 | 22 | debug_stream_source = BasicStream(32) 23 | m.d.comb += debug_stream_source.valid.eq(1) 24 | m.d.comb += debug_stream_source.payload.eq(platform.jtag_debug_signals) 25 | 26 | ft601 = platform.request("ft601") 27 | m.submodules.ft601 = FT601StreamSink(ft601, debug_stream_source, domain_name="ft601") 28 | 29 | return m 30 | 31 | 32 | if __name__ == "__main__": 33 | cli(Top) 34 | -------------------------------------------------------------------------------- /doc/NapsPosterFPGAIgnite2023.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apertus-open-source-cinema/naps/f39373a808e6005dd2c154360b5eac045b015bb2/doc/NapsPosterFPGAIgnite2023.pdf -------------------------------------------------------------------------------- /doc/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # This file only contains a selection of the most common options. For a full 4 | # list see the documentation: 5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 6 | 7 | # -- Path setup -------------------------------------------------------------- 8 | 9 | # If extensions (or modules to document with autodoc) are in another directory, 10 | # add these directories to sys.path here. If the directory is relative to the 11 | # documentation root, use os.path.abspath to make it absolute, like shown here. 12 | 13 | import os 14 | import sys 15 | sys.path.insert(0, os.path.abspath('../')) 16 | 17 | 18 | # -- Project information ----------------------------------------------------- 19 | 20 | project = 'naps' 21 | copyright = '2021, Robin Heinemann, Jaro Habiger' 22 | author = 'Robin Heinemann, Jaro Habiger' 23 | 24 | 25 | # -- General configuration --------------------------------------------------- 26 | 27 | # Add any Sphinx extension module names here, as strings. They can be 28 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 29 | # ones. 30 | extensions = [ 31 | "sphinx.ext.todo", 32 | "sphinx.ext.autodoc", 33 | "sphinx.ext.autosectionlabel", 34 | "sphinx_rtd_theme", 35 | ] 36 | 37 | # Add any paths that contain templates here, relative to this directory. 38 | templates_path = ['_templates'] 39 | 40 | # List of patterns, relative to source directory, that match files and 41 | # directories to ignore when looking for source files. 42 | # This pattern also affects html_static_path and html_extra_path. 43 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] 44 | 45 | 46 | # -- Options for HTML output ------------------------------------------------- 47 | 48 | # The theme to use for HTML and HTML Help pages. See the documentation for 49 | # a list of builtin themes. 50 | # 51 | html_theme = 'sphinx_rtd_theme' 52 | 53 | # Add any paths that contain custom static files (such as style sheets) here, 54 | # relative to this directory. They are copied after the builtin static files, 55 | # so a file named "default.css" will overwrite the builtin "default.css". 56 | html_static_path = ['_static'] 57 | -------------------------------------------------------------------------------- /doc/getting_started.rst: -------------------------------------------------------------------------------- 1 | Getting started 2 | ================= 3 | 4 | Now that you have a :ref:`working installation of naps and decided on working either 5 | in-tree or out-of-tree`, you can start writing your first design. 6 | 7 | For this, we create a new file named ``blinky.py`` (if you work in-tree in the ``applets/`` directory) 8 | and put the following code in it:: 9 | 10 | from amaranth import * 11 | from naps import * 12 | 13 | class Top(Elaboratable): 14 | runs_on = [Colorlight5a75b70Platform] 15 | 16 | def __init__(self): 17 | pass 18 | 19 | def elaborate(self, platform): 20 | m = Module() 21 | 22 | led = platform.request("user_led") 23 | counter = Signal(16) 24 | with m.If(counter == int(25e6)): 25 | m.d.sync += led.o.eq(~led.o) 26 | m.d.sync += counter.eq(0) 27 | with m.Else(): 28 | m.d.sync += counter.eq(counter + 1) 29 | 30 | return m 31 | 32 | if __name__ == "__main__": 33 | cli(Top) 34 | 35 | 36 | You can now "elaborate" and build this design using the naps cli. "Elaboration" 37 | (``-e``) means that we execute all the python code and generate verilog / rtlil 38 | that then could be fed into the vendor toolchain for building (``-b``):: 39 | 40 | pdm run python blinky.py -s JTAG -e -b 41 | 42 | With the ``-s JTAG`` flag, we specify that we want to use the "JTAGSoc". What that means will be 43 | explained in the :ref:`naps.soc` section. 44 | 45 | To actually program the board, we can add the ``-p`` flag:: 46 | 47 | pdm run python blinky.py -s JTAG -e -b -p 48 | 49 | This should give you a blinking LED on your board. 50 | 51 | The ``Colorlight5a75b70Platform`` class currently assumes that it is connected to a 52 | jlink USB to JTAG adapter. If you have a different board, you can create a board description 53 | similiar for it as described in the :ref:`naps.platform` section. 54 | 55 | After this, you have everything set up to start exploring and using naps. 56 | The rest of the documentation is organized by package and you can skip around. 57 | Still, the documentation here is ordered in a somewhat sensible way and it is 58 | recomended that you read at least the documentation for the `naps.soc` package 59 | before you go on. 60 | -------------------------------------------------------------------------------- /doc/index.rst: -------------------------------------------------------------------------------- 1 | naps - The Relaxed Amaranth Packages Collection 2 | ======================================================= 3 | 4 | .. warning:: 5 | This manual is a work in progress and is seriously incomplete! 6 | 7 | .. toctree:: 8 | :maxdepth: 1 9 | :caption: Contents: 10 | 11 | intro 12 | setup 13 | getting_started 14 | naps.soc 15 | naps.platform -------------------------------------------------------------------------------- /doc/intro.rst: -------------------------------------------------------------------------------- 1 | Introduction 2 | ------------ 3 | 4 | Welcome to the documentation of `naps` - a collection of cores and infrastructure around Amaranth HDL to help with 5 | rapid prototyping of FPGA based designs on different platforms. It was originally written 6 | for developing gateware for cameras developed by the `Apertus collective `__ but has 7 | since been used (mostly by its authors) for various other projects and experiments. 8 | Generally I think that image processing is a great way to get started with FPGA 9 | design and Amaranth is especially a good start. Probably you will experience 10 | many rough edges and some frustration but also for me Amaranth and this project were 11 | the first big project with reconfigurable hardware :). 12 | 13 | The docs in here are currently in a very rough state but might be able to give you some impression on 14 | whats included in naps and how to get started. The docs here assume that you are (at least a bit) 15 | familiar with Amaranth HDL specifically and FPGA design in general. If you are not, 16 | I recommend to start reading the `Amaranth HDL documentation `__. 17 | -------------------------------------------------------------------------------- /doc/naps.platform.rst: -------------------------------------------------------------------------------- 1 | naps.platform 2 | ------------- 3 | 4 | The naps.platform package contains board descriptions for various boards. 5 | The board descriptions are plain ``amaranth-boards`` descriptions with sometimes extra functionality. 6 | 7 | ``JTAGSoc`` requires platform classes to implement the ``generate_jtag_conf`` method 8 | (an `example can be found here `__). 9 | 10 | If you have a board that is not supported at the moment, you can create a file that is similiar 11 | to one of the existing ones - possibly also by just inheriting from one of the boards from 12 | `amaranth-boards `__. 13 | -------------------------------------------------------------------------------- /naps/__init__.py: -------------------------------------------------------------------------------- 1 | try: 2 | try: 3 | from importlib import metadata as importlib_metadata # py3.8+ stdlib 4 | except ImportError: 5 | import importlib_metadata # py3.7- shim 6 | __version__ = importlib_metadata.version(__package__) 7 | except ImportError: 8 | # No importlib_metadata. This shouldn't normally happen, but some people prefer not installing 9 | # packages via pip at all, instead using PYTHONPATH directly or copying the package files into 10 | # `lib/pythonX.Y/site-packages`. Although not a recommended way, we still try to support it. 11 | __version__ = "unknown" # :nocov: 12 | 13 | from .util import * 14 | from .data_structure import * 15 | from .stream import * 16 | from .vendor import * 17 | from .soc import * 18 | from .platform import * 19 | from .cores import * 20 | -------------------------------------------------------------------------------- /naps/cores/__init__.py: -------------------------------------------------------------------------------- 1 | from .stream import * 2 | from .compression import * 3 | from .video import * 4 | from .debug import * 5 | from .axi import * 6 | from .dram_packet_ringbuffer import * 7 | from .serdes import * 8 | from .jtag import * 9 | from .peripherals import * 10 | from .cmv12k import * 11 | from .hispi import * 12 | from .mipi import * 13 | from .hdmi import * 14 | from .ft601 import * 15 | from .plugin_module_streamer import * 16 | -------------------------------------------------------------------------------- /naps/cores/axi/__init__.py: -------------------------------------------------------------------------------- 1 | from .axi_endpoint import * 2 | from .full_to_lite import * 3 | from .interconnect import * 4 | from .peripheral_connector import * 5 | from .sim_util import * 6 | from .stream_reader import * 7 | from .stream_writer import * 8 | from .zynq_util import * 9 | -------------------------------------------------------------------------------- /naps/cores/axi/full_to_lite.py: -------------------------------------------------------------------------------- 1 | from amaranth import * 2 | from . import AxiEndpoint 3 | 4 | __all__ = ["AxiFullToLiteBridge"] 5 | 6 | 7 | class AxiFullToLiteBridge(Elaboratable): 8 | def __init__(self, full_master: AxiEndpoint): 9 | assert not full_master.is_lite 10 | self._full_master = full_master 11 | self.lite_master = AxiEndpoint.like(full_master, lite=True, name="axi_lite_bridge_master") 12 | 13 | def elaborate(self, platform): 14 | m = Module() 15 | 16 | m.d.comb += self._full_master.connect_downstream(self.lite_master, allow_partial=True) 17 | 18 | # fake the id tracking 19 | read_id = Signal.like(self._full_master.read_data.id) 20 | write_id = Signal.like(self._full_master.write_data.id) 21 | 22 | with m.If(self._full_master.read_address.valid): 23 | m.d.comb += self._full_master.read_data.id.eq(self._full_master.read_address.id) 24 | m.d.sync += read_id.eq(self._full_master.read_address.id) 25 | with m.Else(): 26 | m.d.comb += self._full_master.read_data.id.eq(read_id) 27 | 28 | with m.If(self._full_master.write_address.valid): 29 | m.d.comb += self._full_master.write_response.id.eq(self._full_master.write_address.id) 30 | m.d.sync += write_id.eq(self._full_master.write_address.id) 31 | with m.Else(): 32 | m.d.comb += self._full_master.write_response.id.eq(write_id) 33 | 34 | m.d.comb += self._full_master.read_data.last.eq(1) 35 | 36 | return m 37 | -------------------------------------------------------------------------------- /naps/cores/axi/interconnect.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | from amaranth import * 3 | from naps.util.amaranth_misc import iterator_with_if_elif, nAny 4 | from . import AxiEndpoint 5 | 6 | __all__ = ["AxiInterconnect"] 7 | 8 | 9 | class AxiInterconnect(Elaboratable): 10 | def __init__(self, upstream): 11 | """ 12 | A simple single master to many slaves AXI interconnect. 13 | 14 | :type upstream: AxiEndpoint 15 | :param upstream: The axi master to which the interconnect is connected. 16 | """ 17 | assert upstream.is_lite, "AXI interconnect only supports AXI lite atm" 18 | self._upstream = upstream 19 | self._downstream_ports: List[AxiEndpoint] = [] 20 | 21 | def get_port(self): 22 | """ 23 | Gets a AXI master port connected to the master via this interconnect. 24 | 25 | :return: A new AxiInterface shaped after the upstream port. 26 | """ 27 | downstream_master = AxiEndpoint.like(self._upstream, name="axi_interconnect_downstream") 28 | self._downstream_ports.append(downstream_master) 29 | return downstream_master 30 | 31 | def elaborate(self, platform): 32 | m = Module() 33 | 34 | for downstream_port in self._downstream_ports: 35 | m.d.comb += downstream_port.read_address.connect_upstream(self._upstream.read_address) 36 | m.d.comb += downstream_port.write_address.connect_upstream(self._upstream.write_address) 37 | m.d.comb += downstream_port.write_data.connect_upstream(self._upstream.write_data) 38 | 39 | # wait until at least one peripherals is ready when writing the addresses 40 | m.d.comb += self._upstream.read_address.ready.eq(nAny(d.read_address.ready for d in self._downstream_ports)) 41 | m.d.comb += self._upstream.write_address.ready.eq(nAny(d.write_address.ready for d in self._downstream_ports)) 42 | 43 | # only one peripheral has to accept written data 44 | m.d.comb += self._upstream.write_data.ready.eq(nAny(d.write_data.ready for d in self._downstream_ports)) 45 | 46 | # we are creating priority encoders here: When multiple peripherals want to answer, we take the answer of the 47 | # first added peripheral 48 | for conditional, downstream_port in iterator_with_if_elif(self._downstream_ports, m): 49 | with conditional(downstream_port.read_data.valid): 50 | m.d.comb += self._upstream.read_data.connect_upstream(downstream_port.read_data) 51 | for conditional, downstream_port in iterator_with_if_elif(self._downstream_ports, m): 52 | with conditional(downstream_port.write_response.valid): 53 | m.d.comb += self._upstream.write_response.connect_upstream(downstream_port.write_response) 54 | 55 | return m 56 | -------------------------------------------------------------------------------- /naps/cores/axi/specification.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apertus-open-source-cinema/naps/f39373a808e6005dd2c154360b5eac045b015bb2/naps/cores/axi/specification.pdf -------------------------------------------------------------------------------- /naps/cores/axi/stream_reader_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from amaranth.sim import Passive 3 | from naps import SimPlatform, BasicStream, write_to_stream, read_from_stream 4 | from naps.stream.formal_util import verify_stream_output_contract, LegalStreamSource 5 | from naps.cores.axi import AxiEndpoint, answer_read_burst 6 | from .stream_reader import AxiReader, AxiReaderBurster 7 | 8 | 9 | class TestAxiReader(unittest.TestCase): 10 | def test_basic(self): 11 | platform = SimPlatform() 12 | 13 | memory = {i: i + 100 for i in range(1000)} 14 | read_sequence = [ 15 | 0, 16 | 100, 17 | 108, 18 | 116, 19 | 2, 20 | 7, 21 | *[i + 100 for i in range(0, 400, 8)], 22 | 0, 23 | 100, 24 | 108, 25 | 116, 26 | 2, 27 | 7, 28 | ] 29 | golden_read_result = [memory[addr] for addr in read_sequence] 30 | 31 | axi = AxiEndpoint(addr_bits=32, data_bits=64, lite=False, id_bits=12) 32 | address_stream = BasicStream(32) 33 | dut = AxiReader(address_stream, axi) 34 | 35 | def write_address_process(): 36 | for addr in read_sequence: 37 | yield from write_to_stream(address_stream, payload=addr) 38 | platform.add_process(write_address_process, "sync") 39 | 40 | def read_data_process(): 41 | read_result = [] 42 | while len(read_result) < len(golden_read_result): 43 | read = yield from read_from_stream(dut.output) 44 | read_result.append(read) 45 | self.assertEqual(read_result, golden_read_result) 46 | platform.add_process(read_data_process, "sync") 47 | 48 | def axi_answer_process(): 49 | yield Passive() 50 | while True: 51 | yield from answer_read_burst(axi, memory) 52 | platform.add_process(axi_answer_process, "sync") 53 | 54 | platform.add_sim_clock("sync", 100e6) 55 | platform.sim(dut) 56 | 57 | def test_reader_stream_output(self): 58 | axi = AxiEndpoint(addr_bits=32, data_bits=64, lite=False, id_bits=12) 59 | verify_stream_output_contract( 60 | AxiReader(BasicStream(32), axi), 61 | support_modules=(LegalStreamSource(axi.read_data),) 62 | ) 63 | 64 | def test_burster_stream_output(self): 65 | i = BasicStream(32) 66 | verify_stream_output_contract( 67 | AxiReaderBurster(i), 68 | support_modules=(LegalStreamSource(i),) 69 | ) 70 | -------------------------------------------------------------------------------- /naps/cores/axi/zynq_util.py: -------------------------------------------------------------------------------- 1 | from amaranth import * 2 | from . import AxiEndpoint 3 | 4 | __all__ = ["if_none_get_zynq_hp_port"] 5 | 6 | 7 | def if_none_get_zynq_hp_port(maybe_axi_port, m, platform) -> AxiEndpoint: 8 | """If `maybe_axi_port` is None, grab an AXI HP port from the zynq and return it. Otherwise returns the passed in AXI port.""" 9 | if maybe_axi_port is not None: 10 | assert not maybe_axi_port.is_lite 11 | axi = maybe_axi_port 12 | else: 13 | clock_signal = Signal() 14 | m.d.comb += clock_signal.eq(ClockSignal()) 15 | axi = platform.ps7.get_axi_hp_slave(clock_signal) 16 | return axi 17 | -------------------------------------------------------------------------------- /naps/cores/cmv12k/__init__.py: -------------------------------------------------------------------------------- 1 | from .pixel_remapper import * 2 | from .cmv12k_rx import * 3 | from .cmv12k_spi import * -------------------------------------------------------------------------------- /naps/cores/cmv12k/pixel_remapper.py: -------------------------------------------------------------------------------- 1 | # remaps the cmv12k output lanes into an ImageStream with subsequent pixels 2 | # the raw lane output of the cmv12k is a bit wired 3 | 4 | from typing import List 5 | 6 | from amaranth import * 7 | from amaranth.lib import data 8 | from naps import BasicStream 9 | from naps.cores import ImageStream, BufferedSyncStreamFIFO 10 | 11 | __all__ = ["Cmv12kPixelRemapper"] 12 | 13 | class ControlChannelWord(data.Struct): 14 | data_valid: 1 15 | line_valid: 1 16 | frame_valid: 1 17 | fot: 1 18 | integration_1: 1 19 | integration_2: 1 20 | 21 | 22 | class Cmv12kPixelRemapper(Elaboratable): 23 | """Remaps one output port (if top and bottom outputs of the cmv12k are used, instantiate this twice!) to provide a linear output. 24 | 25 | Generally the CMV12000 sends its data out in bursts. 26 | The output format of the CMV12000 (and therefore the input for this core) depends on two factors: 27 | the number of lanes used and the presence of subsampling / binning. The burst length is described by 28 | line_length / n_lanes_per_side so for the non line_lengh of 4096 and 16 lanes per side 29 | (the default beta configuration) this equals to a burst_length of 256. 30 | One Lane always sends adjacent pixels of bust_length out. 31 | 32 | """ 33 | def __init__(self, top_lanes: List[Signal], bottom_lanes: List[Signal], control_lane: Signal): 34 | self.lanes = top_lanes 35 | self.bottom_lanes = bottom_lanes 36 | self.control_lane = control_lane 37 | assert len(top_lanes) == len(bottom_lanes) 38 | 39 | self.lines_to_buffer = 64 // (len(top_lanes) + len(bottom_lanes)) 40 | self.n_bits = len(top_lanes[0]) 41 | self.n_lanes_per_side = len(top_lanes) 42 | 43 | self.output = ImageStream(self.n_bits) 44 | 45 | def elaborate(self, platform): 46 | m = Module() 47 | 48 | cw = ControlChannelWord(self.control_lane) 49 | 50 | lane_top_streams_buffered = [] 51 | for i, signal in enumerate(self.top_lanes): 52 | stream = BasicStream(self.n_bits) 53 | m.d.comb += stream.payload.eq(signal) 54 | m.d.comb += stream.valid.eq(cw.data_valid) 55 | 56 | fifo = m.submodules[f"fifo_top{i}"] = BufferedSyncStreamFIFO(stream, depth=128 * (self.lines_to_buffer + 1)) 57 | lane_top_streams_buffered.append(fifo.output) 58 | 59 | return m 60 | -------------------------------------------------------------------------------- /naps/cores/compression/__init__.py: -------------------------------------------------------------------------------- 1 | from .bit_stuffing import * 2 | from .encoding_space import * 3 | from .huffman_encoder import * 4 | from .rle import * 5 | -------------------------------------------------------------------------------- /naps/cores/compression/encoding_space.py: -------------------------------------------------------------------------------- 1 | 2 | __all__ = ["EncodingSpace"] 3 | 4 | 5 | class EncodingSpace: 6 | @property 7 | def numeric_range(self): 8 | raise NotImplementedError() 9 | -------------------------------------------------------------------------------- /naps/cores/compression/huffman_encoder.py: -------------------------------------------------------------------------------- 1 | import huffman 2 | from amaranth import * 3 | from amaranth.lib.memory import Memory 4 | from naps import PacketizedStream, stream_transformer 5 | from . import VariableWidthStream 6 | 7 | __all__ = ["HuffmanEncoder"] 8 | 9 | 10 | class HuffmanEncoder(Elaboratable): 11 | def __init__(self, input: PacketizedStream, distribution): 12 | self.input = input 13 | 14 | self.distribution = distribution 15 | self.table = {k: v[::-1] for k, v in huffman.codebook(self.distribution.items()).items()} 16 | 17 | self.max_input_word = max(self.distribution.keys()) + 1 18 | self.max_code_len = max([len(v) for v in self.table.values()]) 19 | 20 | self.output = VariableWidthStream(self.max_code_len + 1) 21 | 22 | def elaborate(self, platform): 23 | m = Module() 24 | 25 | # this code is kind of similar to the StreamMemoryReader but not quite the same as it operates two memories at the same time 26 | stream_transformer(self.input, self.output, m, latency=1, allow_partial_out_of_band=True) 27 | input_transaction = self.input.ready & self.input.valid 28 | 29 | code_memory = m.submodules.code_memory = Memory(shape=self.max_code_len, depth=self.max_input_word, init=[int(self.table.get(i, '0'), 2) for i in range(self.max_input_word)]) 30 | code_port = code_memory.read_port(domain="sync") 31 | m.d.comb += code_port.en.eq(input_transaction) 32 | m.d.comb += code_port.addr.eq(self.input.payload) 33 | m.d.comb += self.output.payload.eq(code_port.data) 34 | 35 | m.submodules.code_len_memory = code_len_memory = Memory(shape=self.max_code_len, depth=self.max_input_word, init=[len(self.table.get(i, '')) for i in range(self.max_input_word)]) 36 | len_port = code_len_memory.read_port(domain="sync") 37 | m.d.comb += len_port.en.eq(input_transaction) 38 | m.d.comb += len_port.addr.eq(self.input.payload) 39 | m.d.comb += self.output.current_width.eq(len_port.data) 40 | 41 | return m 42 | -------------------------------------------------------------------------------- /naps/cores/compression/rle_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from amaranth import * 3 | from naps import SimPlatform 4 | from naps.stream import PacketizedStream, verify_stream_output_contract, LegalStreamSource, write_to_stream, read_from_stream 5 | from . import ZeroRleEncoder, RleEncodingSpace 6 | 7 | 8 | class RleTest(unittest.TestCase): 9 | def test_basic(self): 10 | platform = SimPlatform() 11 | m = Module() 12 | 13 | input = PacketizedStream(8) 14 | input_data = [1, 0, 1, *([0] * 14), 1] 15 | run_length_options = [3, 10, 27, 80, 160] 16 | 17 | rle = m.submodules.rle = ZeroRleEncoder(input, RleEncodingSpace(range(0, 255), run_length_options, zero_value=0)) 18 | 19 | def write_process(): 20 | for x in input_data: 21 | yield from write_to_stream(input, payload=x) 22 | 23 | def read_process(): 24 | received = [] 25 | while True: 26 | try: 27 | received.append((yield from read_from_stream(rle.output))) 28 | except TimeoutError: 29 | break 30 | decoded = [] 31 | for x in received: 32 | if x < 256: 33 | decoded.append(x) 34 | else: 35 | decoded += ([0] * run_length_options[x - 256]) 36 | self.assertEqual(input_data, decoded) 37 | 38 | platform.add_sim_clock("sync", 100e6) 39 | platform.add_process(write_process, "sync") 40 | platform.sim(m, read_process) 41 | 42 | def test_output_stream_properties(self): 43 | input = PacketizedStream(8) 44 | encoding_space = RleEncodingSpace(range(0, 255), [3, 10, 27, 80, 160], zero_value=0) 45 | verify_stream_output_contract(ZeroRleEncoder(input, encoding_space), support_modules=(LegalStreamSource(input),)) 46 | -------------------------------------------------------------------------------- /naps/cores/debug/__init__.py: -------------------------------------------------------------------------------- 1 | from .blink_debug import * 2 | from .clocking_debug import * 3 | from .packet_console import * 4 | from .ila import * 5 | from .tracer import * 6 | from .fsm_status_reg import * 7 | -------------------------------------------------------------------------------- /naps/cores/debug/blink_debug.py: -------------------------------------------------------------------------------- 1 | from amaranth import * 2 | 3 | __all__ = ["BlinkDebug"] 4 | 5 | 6 | class BlinkDebug(Elaboratable): 7 | def __init__(self, led, divider=20, max_value=8): 8 | self.led = led 9 | self.max_value = max_value 10 | self.divider = divider 11 | 12 | self.value = Signal(range(max_value)) 13 | 14 | def elaborate(self, platform): 15 | m = Module() 16 | 17 | div_counter = Signal(self.divider) 18 | m.d.sync += div_counter.eq(div_counter + 1) 19 | 20 | def next_div(m, next_state): 21 | with m.If(div_counter == 0): 22 | m.next = next_state 23 | 24 | with m.FSM(): 25 | for i in range(self.max_value): 26 | with m.State("ON_{}".format(i)): 27 | m.d.comb += self.led.eq(1) 28 | next_div(m, "OFF_{}".format(i)) 29 | 30 | with m.State("OFF_{}".format(i)): 31 | m.d.comb += self.led.eq(0) 32 | if i != self.max_value - 1: 33 | with m.If(self.value > i): 34 | next_div(m, "ON_{}".format(i + 1)) 35 | with m.Else(): 36 | next_div(m, "IDLE_0") 37 | else: 38 | next_div(m, "IDLE_0") 39 | 40 | idle_states = 4 41 | for i in range(idle_states): 42 | with m.State("IDLE_{}".format(i)): 43 | m.d.comb += self.led.eq(0) 44 | if i != idle_states - 1: 45 | next_div(m, "IDLE_{}".format(i + 1)) 46 | else: 47 | next_div(m, "ON_0") 48 | 49 | return m 50 | -------------------------------------------------------------------------------- /naps/cores/debug/clocking_debug.py: -------------------------------------------------------------------------------- 1 | # reports the clock frequencies of the design using csr infrastructure 2 | 3 | from amaranth import * 4 | from naps import StatusSignal, driver_property 5 | 6 | __all__ = ["ClockingDebug", "ClockDebug"] 7 | 8 | 9 | class ClockingDebug(Elaboratable): 10 | def __init__(self, *args): 11 | self.clockdomains = args 12 | 13 | def elaborate(self, platform): 14 | m = Module() 15 | 16 | for cd in self.clockdomains: 17 | if isinstance(cd, tuple): 18 | cd, reset_less = cd 19 | else: 20 | reset_less = False 21 | m.submodules[cd] = ClockDebug(cd, reset_less) 22 | 23 | return m 24 | 25 | 26 | class ClockDebug(Elaboratable): 27 | def __init__(self, domain_name, reset_less=False): 28 | self.domain_name = domain_name 29 | self.reset_less = reset_less 30 | 31 | self.counter = StatusSignal(32) 32 | if not self.reset_less: 33 | self.is_reset = StatusSignal() 34 | 35 | def elaborate(self, platform): 36 | m = Module() 37 | 38 | m.d[self.domain_name] += self.counter.eq(self.counter + 1) 39 | if not self.reset_less: 40 | m.d.comb += self.is_reset.eq(ResetSignal(self.domain_name)) 41 | 42 | return m 43 | 44 | @driver_property 45 | def mhz(self): 46 | from time import sleep, time 47 | initial_counter = self.counter 48 | start = time() 49 | sleep(0.1) 50 | counter_difference = (self.counter - initial_counter) 51 | return counter_difference * (1 / (time() - start)) / 1e6 52 | -------------------------------------------------------------------------------- /naps/cores/debug/fsm_status_reg.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | from amaranth import * 4 | 5 | from naps import StatusSignal, SocPlatform 6 | 7 | __all__ = ["fsm_status_reg"] 8 | 9 | 10 | def fsm_status_reg(platform, m, fsm): 11 | if isinstance(platform, SocPlatform): 12 | fsm_state = StatusSignal(name=f"{fsm.state.name}_reg") # TODO: use meaningful shape value here (needs deferring) 13 | def signal_fixup_hook(platform, top_fragment: Fragment): 14 | fsm_state.width = fsm.state.width 15 | fsm_state.decoder = fsm.state.decoder 16 | platform.prepare_hooks.insert(0, signal_fixup_hook) 17 | m.d.comb += fsm_state.eq(fsm.state) 18 | -------------------------------------------------------------------------------- /naps/cores/debug/ila_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from amaranth import * 4 | from naps import SimPlatform, do_nothing, SimSocPlatform, StatusSignal, probe, trigger, add_ila 5 | 6 | 7 | class IlaTest(unittest.TestCase): 8 | def test_with_driver(self): 9 | platform = SimSocPlatform(SimPlatform()) 10 | 11 | class Top(Elaboratable): 12 | def __init__(self): 13 | self.up_counter = StatusSignal(16) 14 | self.down_counter = StatusSignal(16, init=1000) 15 | 16 | def elaborate(self, platform): 17 | m = Module() 18 | m.d.sync += self.up_counter.eq(self.up_counter + 1) 19 | m.d.sync += self.down_counter.eq(self.down_counter - 1) 20 | 21 | add_ila(platform, trace_length=100) 22 | probe(m, self.up_counter) 23 | probe(m, self.down_counter) 24 | trigger(m, self.up_counter > 200) 25 | return m 26 | 27 | def driver(design): 28 | design.ila.arm() 29 | yield from do_nothing(1000) 30 | ila_trace = list(design.ila.get_values()) 31 | last_up = 150 32 | last_down = 1000 - 150 33 | assert len(ila_trace) == 100 34 | for up, down in ila_trace: 35 | assert up == last_up + 1, (up, last_up) 36 | last_up = up 37 | assert down == last_down - 1, (down, last_down) 38 | last_down = down 39 | platform.add_driver(driver) 40 | 41 | platform.add_sim_clock('sync', 10e6) 42 | platform.sim(Top()) 43 | -------------------------------------------------------------------------------- /naps/cores/debug/packet_console_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from amaranth import * 4 | 5 | from naps import SimPlatform, verify_stream_output_contract, do_nothing, SimSocPlatform 6 | from naps.cores.debug.packet_console import ConsolePacketSource, ConsolePacketSink 7 | 8 | 9 | class PacketConsoleTest(unittest.TestCase): 10 | def check_roundtrip_complex(self, test_packet): 11 | platform = SimSocPlatform(SimPlatform()) 12 | 13 | m = Module() 14 | 15 | source = m.submodules.source = ConsolePacketSource() 16 | sink = m.submodules.sink = ConsolePacketSink(source.output) 17 | 18 | def driver(design): 19 | 20 | design.source.write_packet(test_packet) 21 | yield from do_nothing(20) 22 | design.source.write_packet(test_packet) 23 | yield from do_nothing(20) 24 | self.assertEqual(test_packet, design.sink.read_packet()) 25 | yield from do_nothing(20) 26 | self.assertEqual(test_packet, design.sink.read_packet()) 27 | platform.add_driver(driver) 28 | 29 | platform.add_sim_clock("sync", 100e6) 30 | platform.sim(m) 31 | 32 | def test_roundtrip_complex(self): 33 | self.check_roundtrip_complex(test_packet=[10, 20, 30, 40, 0]) 34 | 35 | def test_roundtrip_one_byte(self): 36 | self.check_roundtrip_complex(test_packet=[0x42]) 37 | 38 | def test_source_output_stream_contract(self): 39 | def dut(): 40 | dut = ConsolePacketSource() 41 | return (dut, dut.output, []) 42 | verify_stream_output_contract(dut) 43 | -------------------------------------------------------------------------------- /naps/cores/debug/tracer.py: -------------------------------------------------------------------------------- 1 | from amaranth import * 2 | 3 | from naps import driver_method, StatusSignal, Changed 4 | from ..peripherals import SocMemory 5 | 6 | __all__ = ["Tracer"] 7 | 8 | 9 | class Tracer(Elaboratable): 10 | def __init__(self, fsm, trace_length=128): 11 | self.fsm = fsm 12 | self.trace_length = trace_length 13 | self.write_ptr = StatusSignal(range(trace_length)) 14 | self.trace_decoder = {} 15 | 16 | def elaborate(self, platform): 17 | m = Module() 18 | 19 | mem = SocMemory(shape=len(self.fsm.state), depth=self.trace_length, soc_write=False) 20 | write_port = mem.write_port(domain="sync") 21 | with m.If(Changed(m, self.fsm.state)): 22 | m.d.comb += write_port.en.eq(1) 23 | m.d.comb += write_port.data.eq(self.fsm.state) 24 | m.d.comb += write_port.addr.eq(self.write_ptr) 25 | with m.If(self.write_ptr < self.trace_length): 26 | m.d.sync += self.write_ptr.eq(self.write_ptr + 1) 27 | with m.Else(): 28 | m.d.sync += self.write_ptr.eq(0) 29 | 30 | self.trace_decoder.update(self.fsm.decoding) 31 | m.submodules.mem = mem 32 | 33 | return m 34 | 35 | @driver_method 36 | def print_trace(self): 37 | r = list(range(self.trace_length)) 38 | for i in r[self.write_ptr:] + r[:self.write_ptr]: 39 | print(self.trace_decoder[self.mem[i]]) 40 | -------------------------------------------------------------------------------- /naps/cores/dram_packet_ringbuffer/__init__.py: -------------------------------------------------------------------------------- 1 | from .cpu_if import * 2 | from .stream_if import * 3 | -------------------------------------------------------------------------------- /naps/cores/dram_packet_ringbuffer/cpu_if.py: -------------------------------------------------------------------------------- 1 | from amaranth import * 2 | from naps import ControlSignal, StatusSignal, driver_method 3 | from .stream_if import DramPacketRingbufferStreamWriter 4 | 5 | __all__ = ["DramPacketRingbufferCpuReader", "DramPacketRingbufferCpuWriter"] 6 | 7 | 8 | class DramPacketRingbufferCpuWriter(Elaboratable): 9 | def __init__( 10 | self, 11 | max_packet_size, n_buffers, base_address=0x0f80_0000, default_packet_size=0 12 | ): 13 | self.max_packet_size = max_packet_size 14 | self.base_address = base_address 15 | self.n_buffers = n_buffers 16 | 17 | self.buffer_base_list = Array([base_address + max_packet_size * i for i in range(n_buffers)]) 18 | self.buffer_level_list = Array([ControlSignal(range(max_packet_size), init=default_packet_size) for _ in range(n_buffers)]) 19 | self.current_write_buffer = ControlSignal(range(n_buffers)) 20 | 21 | for i, signal in enumerate(self.buffer_level_list): 22 | setattr(self, f"buffer{i}_level", signal) 23 | 24 | def elaborate(self, platform): 25 | return Module() 26 | 27 | # TODO: implement driver methods 28 | 29 | 30 | class DramPacketRingbufferCpuReader(Elaboratable): 31 | def __init__(self, writer: DramPacketRingbufferStreamWriter): 32 | self.writer = writer 33 | self.n_buffers = writer.n_buffers 34 | 35 | self.current_write_buffer = StatusSignal(range(self.n_buffers)) 36 | 37 | # note the base and level StatusSignals generated in elaborate() 38 | 39 | def elaborate(self, platform): 40 | m = Module() 41 | 42 | m.d.sync += self.current_write_buffer.eq(self.writer.current_write_buffer) 43 | 44 | for i, (base, level) in enumerate(zip(self.writer.buffer_base_list, self.writer.buffer_level_list)): 45 | setattr(self, f"buffer{i}_base", base) # base addresses are constant 46 | 47 | buffer_level = StatusSignal(level.shape()) 48 | m.d.sync += buffer_level.eq(level) 49 | setattr(self, f"buffer{i}_level", buffer_level) 50 | 51 | return m 52 | 53 | @driver_method 54 | def read_packet_to_file(self, filename="packet.bin"): 55 | import os, mmap 56 | 57 | buf = (self.current_write_buffer - 1) % self.n_buffers 58 | base = getattr(self, f"buffer{buf}_base") 59 | length = getattr(self, f"buffer{buf}_level") 60 | 61 | fd = os.open("/dev/mem", os.O_RDONLY) 62 | with mmap.mmap(fd, length, prot=mmap.PROT_READ, offset=base) as mm: 63 | with open(filename, "wb") as f: 64 | f.write(mm) 65 | os.close(fd) 66 | -------------------------------------------------------------------------------- /naps/cores/dram_packet_ringbuffer/stream_if_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from amaranth import * 3 | from naps import SimPlatform 4 | from naps.cores.axi import axi_ram_sim_model 5 | from naps.stream import PacketizedStream, write_packet_to_stream, read_packet_from_stream 6 | from . import DramPacketRingbufferCpuReader, DramPacketRingbufferStreamWriter, DramPacketRingbufferStreamReader 7 | 8 | 9 | class StreamIfTest(unittest.TestCase): 10 | def test_integration(self): 11 | plat = SimPlatform() 12 | m = Module() 13 | writer_axi_port, reader_axi_port = axi_ram_sim_model(plat) 14 | 15 | input_stream = PacketizedStream(64) 16 | writer = m.submodules.writer = DramPacketRingbufferStreamWriter(input_stream, base_address=0, max_packet_size=10000, n_buffers=4, axi=writer_axi_port) 17 | reader = m.submodules.reader = DramPacketRingbufferStreamReader(writer, axi=reader_axi_port) 18 | cpu_reader = m.submodules.cpu_reader = DramPacketRingbufferCpuReader(writer) 19 | 20 | def testbench(): 21 | test_packets = [ 22 | [0 for _ in range(100)], 23 | [i for i in range(100)] 24 | ] 25 | 26 | for p in test_packets: 27 | yield from write_packet_to_stream(input_stream, p) 28 | 29 | read_packets = [] 30 | while len(read_packets) < len(test_packets): 31 | read = yield from read_packet_from_stream(reader.output) 32 | read_packets.append(read) 33 | 34 | self.assertEqual(read_packets, test_packets) 35 | self.assertEqual((yield writer.buffer_level_list[0]), 800) 36 | self.assertEqual((yield writer.buffer_level_list[1]), 800) 37 | 38 | self.assertEqual((yield cpu_reader.buffer0_level), 800) 39 | self.assertEqual((yield cpu_reader.buffer1_level), 800) 40 | 41 | plat.add_sim_clock("sync", 100e6) 42 | plat.sim(m, testbench) 43 | -------------------------------------------------------------------------------- /naps/cores/ft601/__init__.py: -------------------------------------------------------------------------------- 1 | from .ft60x_legalizer import * 2 | from .ft601_perf_debug import * 3 | from .ft601_stream_sink import * 4 | -------------------------------------------------------------------------------- /naps/cores/ft601/ft601_perf_debug.py: -------------------------------------------------------------------------------- 1 | # a helper for diagnosing ft601 performance 2 | 3 | from amaranth import * 4 | 5 | __all__ = ["FT601PerfDebug"] 6 | 7 | 8 | class FT601PerfDebug(Elaboratable): 9 | def __init__(self, ft601_resource): 10 | self.ft_601_resource = ft601_resource 11 | self.burst_counter = Signal(16) 12 | self.idle_counter = Signal(16) 13 | 14 | def elaborate(self, platform): 15 | m = Module() 16 | 17 | ft = self.ft_601_resource 18 | 19 | m.domains += ClockDomain("ft") 20 | m.d.comb += ClockSignal("ft").eq(ft.clk.i) 21 | 22 | pause_cycles = 0 23 | in_transaction = Signal() 24 | pause_counter = Signal(range(pause_cycles + 1)) 25 | 26 | with m.If((ft.txe.i & in_transaction) | (ft.txe.i & (pause_counter == pause_cycles))): # we have space in the transmit fifo 27 | m.d.ft += self.burst_counter.eq(self.burst_counter + 1) 28 | m.d.comb += ft.write.o.eq(1) 29 | m.d.ft += self.idle_counter.eq(0) 30 | m.d.ft += in_transaction.eq(1) 31 | m.d.ft += pause_counter.eq(0) 32 | with m.Else(): 33 | m.d.ft += self.burst_counter.eq(0) 34 | m.d.ft += self.idle_counter.eq(self.idle_counter + 1) 35 | m.d.ft += in_transaction.eq(0) 36 | with m.If(pause_counter < pause_cycles): 37 | m.d.ft += pause_counter.eq(pause_counter + 1) 38 | 39 | m.d.comb += ft.be.o.eq(0b1111) # everything we write is valid 40 | m.d.comb += ft.oe.o.eq(0) # we are driving the data bits all the time 41 | m.d.comb += ft.data.o.eq(Cat(self.burst_counter, self.idle_counter)) 42 | 43 | return m 44 | -------------------------------------------------------------------------------- /naps/cores/ft601/ft601_stream_sink_test.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | from amaranth import * 3 | from naps import CounterStreamSource, FT601StreamSink, SimPlatform, TristateIo, OutputIo, InputIo 4 | 5 | 6 | class Ft601FakeResource: 7 | reset = OutputIo() 8 | data = TristateIo(32) 9 | be = TristateIo(4) 10 | oe = OutputIo() 11 | read = OutputIo() 12 | write = OutputIo() 13 | siwu = OutputIo() 14 | rxf = InputIo() 15 | txe = InputIo() 16 | clk = InputIo() 17 | 18 | 19 | class TestFt601StreamSink(TestCase): 20 | def test_smoke(self): 21 | m = Module() 22 | 23 | platform = SimPlatform() 24 | platform.add_sim_clock("sync", 50e6) 25 | platform.add_sim_clock("ft601_outer", 100e6) 26 | 27 | ft601 = Ft601FakeResource() 28 | stream_counter = m.submodules.stream_counter = CounterStreamSource(32, count_if_not_ready=True) 29 | m.d.comb += ft601.clk.i.eq(ClockSignal("ft601_outer")) 30 | m.submodules.dut = FT601StreamSink(ft601, stream_counter.output) 31 | 32 | def testbench(): 33 | read = [] 34 | for i in range(3): 35 | yield ft601.txe.i.eq(1) 36 | written = 0 37 | began = False 38 | while True: 39 | if not began: 40 | if (yield ft601.write.o): 41 | began = True 42 | if began: 43 | if (yield ft601.write.o): 44 | written += 1 45 | read.append((yield ft601.data.o)) 46 | else: 47 | yield ft601.txe.i.eq(0) 48 | break 49 | if written == 2048: 50 | yield ft601.txe.i.eq(0) 51 | break 52 | yield 53 | yield 54 | assert written == 2048 55 | for i in range(200): 56 | yield 57 | assert (yield ft601.write.o) == 0, "write was high in idle cycle {}".format(i) 58 | 59 | # validate the received data 60 | print(read) 61 | last = 0 62 | for v in read: 63 | assert v == last 64 | last += 1 65 | 66 | import sys 67 | sys.setrecursionlimit(1500) # this test compiles a rather large memory and fails with the standard recursion limit 68 | platform.sim(m, (testbench, "ft601")) 69 | -------------------------------------------------------------------------------- /naps/cores/hdmi/__init__.py: -------------------------------------------------------------------------------- 1 | from .cvt_python import * 2 | from .parse_modeline import * 3 | 4 | from .tx import * 5 | from .rx import * 6 | -------------------------------------------------------------------------------- /naps/cores/hdmi/cvt_subprocess.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | 3 | CVT_BIN = "cvt" 4 | 5 | 6 | def generate_modeline(width, height, refresh, reduced_blanking=True): 7 | if (refresh % 60) != 0: 8 | reduced_blanking = False # only possible for multiples of 60 Hz 9 | 10 | cvt_bin = ([CVT_BIN] + ["-r"]) if reduced_blanking else [CVT_BIN] 11 | out, _ = subprocess.Popen(cvt_bin + [str(width), str(height), str(refresh)], 12 | stdout=subprocess.PIPE).communicate() 13 | return out.split(b"\n")[1].decode("utf-8") 14 | -------------------------------------------------------------------------------- /naps/cores/hdmi/hdmispecification13a.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apertus-open-source-cinema/naps/f39373a808e6005dd2c154360b5eac045b015bb2/naps/cores/hdmi/hdmispecification13a.pdf -------------------------------------------------------------------------------- /naps/cores/hdmi/parse_modeline.py: -------------------------------------------------------------------------------- 1 | import shlex 2 | from dataclasses import dataclass 3 | 4 | __all__ = ['VideoTiming', 'parse_modeline'] 5 | 6 | 7 | @dataclass 8 | class VideoTiming: 9 | pxclk: float 10 | hres: int 11 | hsync_start: int 12 | hsync_end: int 13 | hscan: int 14 | vres: int 15 | vsync_start: int 16 | vsync_end: int 17 | vscan: int 18 | 19 | 20 | def parse_modeline(modeline: str): 21 | assert modeline.startswith("Modeline") 22 | modeline = shlex.split(modeline) 23 | 24 | pxclk = float(modeline[2]) 25 | 26 | names = ["hres", "hsync_start", "hsync_end", "hscan", "vres", "vsync_start", "vsync_end", "vscan"] 27 | values = map(int, modeline[3:-2]) 28 | 29 | return_dict = dict(zip(names, values)) 30 | return_dict["pxclk"] = pxclk 31 | 32 | return VideoTiming(**return_dict) 33 | -------------------------------------------------------------------------------- /naps/cores/hdmi/rx/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apertus-open-source-cinema/naps/f39373a808e6005dd2c154360b5eac045b015bb2/naps/cores/hdmi/rx/__init__.py -------------------------------------------------------------------------------- /naps/cores/hdmi/rx/tmds_decoder.py: -------------------------------------------------------------------------------- 1 | from amaranth import * 2 | from naps import iterator_with_if_elif 3 | from ..tmds import tmds_control_tokens 4 | 5 | 6 | 7 | class TmdsDecoder(Elaboratable): 8 | """ 9 | Decodes tmds signals. 10 | """ 11 | def __init__(self, input: Signal): 12 | self.input = input 13 | 14 | self.data_enable = Signal() 15 | self.data = Signal(8) 16 | self.control = Signal(2) 17 | 18 | def elaborate(self, platform): 19 | m = Module() 20 | 21 | for cond, (i, token) in iterator_with_if_elif(enumerate(tmds_control_tokens), m): 22 | with cond(self.input == token): 23 | m.d.comb += self.data_enable.eq(0) 24 | m.d.comb += self.control.eq(i) 25 | with m.Else(): 26 | inverted = Signal(8) 27 | m.d.comb += inverted.eq(Mux(self.input[9], ~self.input[0:8], self.input[0:8])) 28 | xored = Signal(8) 29 | m.d.comb += xored[0].eq(inverted[0]) 30 | m.d.comb += xored[1].eq(inverted[1] ^ inverted[0]) 31 | m.d.comb += xored[2].eq(inverted[2] ^ inverted[1]) 32 | m.d.comb += xored[3].eq(inverted[3] ^ inverted[2]) 33 | m.d.comb += xored[4].eq(inverted[4] ^ inverted[3]) 34 | m.d.comb += xored[5].eq(inverted[5] ^ inverted[4]) 35 | m.d.comb += xored[6].eq(inverted[6] ^ inverted[5]) 36 | m.d.comb += xored[7].eq(inverted[7] ^ inverted[6]) 37 | with m.If(self.input[8]): # XOR encoding 38 | m.d.comb += self.data.eq(xored) 39 | with m.Else(): # XNOR encoding 40 | m.d.comb += self.data.eq(Cat(xored[0], ~xored[1:])) 41 | m.d.comb += self.data_enable.eq(1) 42 | 43 | return m 44 | -------------------------------------------------------------------------------- /naps/cores/hdmi/tmds.py: -------------------------------------------------------------------------------- 1 | __all__ = ["tmds_control_tokens"] 2 | 3 | tmds_control_tokens = [0b1101010100, 0b0010101011, 0b0101010100, 0b1010101011] -------------------------------------------------------------------------------- /naps/cores/hdmi/tmds_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import random 3 | from amaranth import * 4 | from naps import SimPlatform 5 | from naps.cores.hdmi.tx.tmds_encoder import TmdsEncoder 6 | from naps.cores.hdmi.rx.tmds_decoder import TmdsDecoder 7 | 8 | 9 | class TestTmds(unittest.TestCase): 10 | def test_roundtrip(self): 11 | # connect a tmds encoder to a tmds decoder and verify that we receive the characters we have sent. 12 | platform = SimPlatform() 13 | m = Module() 14 | 15 | data = Signal(8) 16 | control = Signal(2) 17 | data_enable = Signal(8) 18 | encoder = m.submodules.encoder = TmdsEncoder(data, control, data_enable) 19 | decoder = m.submodules.decoder = TmdsDecoder(encoder.out) 20 | 21 | random.seed(0) 22 | test_sequence = [10, *[random.randrange(0, 255 + 4) for _ in range(0, 1000)]] 23 | 24 | def writer(): 25 | for x in test_sequence: 26 | if x < 256: 27 | yield data.eq(x) 28 | yield data_enable.eq(1) 29 | else: 30 | yield data_enable.eq(0) 31 | yield control.eq(x >> 8) 32 | yield 33 | platform.add_process(writer, "sync") 34 | 35 | def reader(): 36 | active = False 37 | seq = [*test_sequence] 38 | while len(seq): 39 | x = seq[0] 40 | if active: 41 | seq.pop(0) 42 | 43 | if active: 44 | if x < 256: 45 | self.assertEqual(x, (yield data)) 46 | self.assertEqual(1, (yield data_enable)) 47 | else: 48 | self.assertEqual(0, (yield data_enable)) 49 | self.assertEqual(x >> 8, (yield control)) 50 | elif (yield data) == x: 51 | active = True 52 | seq.pop(0) 53 | yield 54 | platform.add_process(reader, "sync") 55 | 56 | 57 | 58 | platform.add_sim_clock("sync", 100e6) 59 | platform.sim(m) 60 | -------------------------------------------------------------------------------- /naps/cores/hdmi/tx/__init__.py: -------------------------------------------------------------------------------- 1 | from .hdmi_stream_sink import * 2 | from .hdmi_tx import * 3 | -------------------------------------------------------------------------------- /naps/cores/hdmi/tx/hdmi_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from amaranth.build import Clock 3 | from naps import SimPlatform 4 | from .. import generate_modeline, parse_modeline 5 | from . import HdmiClocking, HdmiTimingGenerator 6 | 7 | 8 | class TestHdmi(unittest.TestCase): 9 | def test_timing_generator(self): 10 | platform = SimPlatform() 11 | dut = HdmiTimingGenerator(parse_modeline(generate_modeline(640, 480, 60))) 12 | 13 | def testbench(): 14 | last_x = 0 15 | for i in range(800 - 1): 16 | yield 17 | this_x = (yield dut.x) 18 | assert this_x == last_x + 1, "x increment failed" 19 | last_x = this_x 20 | yield 21 | assert 1 == (yield dut.y), "y increment failed" 22 | 23 | platform.add_sim_clock("sync", 100e6) 24 | platform.sim(dut, testbench) 25 | 26 | def test_mmcm_calculation(self): 27 | clocking = HdmiClocking(Clock(79.75e6), pix_domain="pix") 28 | clocking.find_valid_config() 29 | clocking._MustUse__silence = True 30 | -------------------------------------------------------------------------------- /naps/cores/hdmi/tx/pattern_generator.py: -------------------------------------------------------------------------------- 1 | from amaranth import * 2 | from naps import RGB24 3 | 4 | 5 | class BertlPatternGenerator(Elaboratable): 6 | def __init__(self, width, height): 7 | self.x = Signal.like(width) 8 | self.y = Signal.like(height) 9 | self.out = RGB24() 10 | 11 | def elaborate(self, platform): 12 | m = Module() 13 | 14 | m.d.comb += self.out.r.eq(self.x[0:8]) 15 | m.d.comb += self.out.g.eq(self.y[0:8]) 16 | m.d.comb += self.out.b.eq(Cat(Signal(3), self.y[8:10], self.x[8:11])) 17 | 18 | return m 19 | 20 | 21 | class DimmingPatternGenerator(Elaboratable): 22 | def __init__(self, width, height): 23 | self.x = Signal(range(width)) 24 | self.y = Signal(range(height)) 25 | self.out = RGB24() 26 | 27 | def elaborate(self, platform): 28 | m = Module() 29 | 30 | frame_counter = Signal(range(256 * 3 + 1)) 31 | with m.If((self.x == 0) & (self.y == 0) & (frame_counter < 256 * 3)): 32 | m.d.sync += frame_counter.eq(frame_counter + 1) 33 | with m.Elif((self.x == 0) & (self.y == 0)): 34 | m.d.sync += frame_counter.eq(0) 35 | 36 | with m.If(self.x < 256 * 1): 37 | m.d.comb += self.out.r.eq(self.x) 38 | with m.Elif(self.x < 256 * 2): 39 | m.d.comb += self.out.g.eq(self.x) 40 | with m.Elif(self.x < 256 * 3): 41 | m.d.comb += self.out.b.eq(self.x) 42 | 43 | return m -------------------------------------------------------------------------------- /naps/cores/hispi/__init__.py: -------------------------------------------------------------------------------- 1 | from .hispi_rx import * 2 | -------------------------------------------------------------------------------- /naps/cores/hispi/hispi_rx_test.py: -------------------------------------------------------------------------------- 1 | import lzma 2 | import unittest 3 | from os.path import dirname, join 4 | from amaranth import Signal 5 | from naps import SimPlatform 6 | from .hispi_rx import LaneManager 7 | 8 | 9 | class FakeSensorResource: 10 | def __init__(self): 11 | self.lvds = Signal(4) 12 | self.lvds_clk = Signal() 13 | 14 | 15 | class TestHispi(unittest.TestCase): 16 | def check_hispi_lane_manager(self, datafile): 17 | platform = SimPlatform() 18 | input_data = Signal(12) 19 | dut = LaneManager(input_data) 20 | 21 | def testbench(): 22 | def lane_n_generator(n): 23 | with lzma.open(join(dirname(__file__), datafile), "r") as f: 24 | for line in f: 25 | line = line.replace(b" ", b"") 26 | for i in range(12): 27 | val = "0" if line[i + (n * 12)] == ord("0") else "1" 28 | yield val 29 | 30 | generator = lane_n_generator(0) 31 | last_valid = 0 32 | line_ctr = 0 33 | for i in range(200000): 34 | try: 35 | if (yield dut.do_bitslip): 36 | next(generator) 37 | word = int("".join(reversed([next(generator) for _ in range(12)])), 38 | 2) # TODO: figure out, how this reversed() affects the real world 39 | yield input_data.eq(word) 40 | except RuntimeError: # this is raised when we are done instead of StopIteration 41 | break 42 | 43 | if (last_valid == 0) and ((yield dut.output.valid) == 1): 44 | last_valid = 1 45 | line_ctr = 0 46 | elif (last_valid == 1) and ((yield dut.output.valid) == 1): 47 | line_ctr += 1 48 | elif (last_valid == 1) and ((yield dut.output.valid) == 0): 49 | last_valid = 0 50 | assert (line_ctr + 1) * 4 == 2304 51 | 52 | yield 53 | assert (yield dut.is_aligned) == 1, "dut is not aligned" 54 | 55 | platform.add_sim_clock("sync", 100e6) 56 | platform.sim(dut, testbench) 57 | 58 | def test_hispi_lane_manager_old_data(self): 59 | self.check_hispi_lane_manager("test_data_old.txt.lzma") 60 | 61 | def test_hispi_lane_manager_new_data(self): 62 | self.check_hispi_lane_manager("test_data_new.txt.lzma") -------------------------------------------------------------------------------- /naps/cores/hispi/test_data_new.txt.lzma: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apertus-open-source-cinema/naps/f39373a808e6005dd2c154360b5eac045b015bb2/naps/cores/hispi/test_data_new.txt.lzma -------------------------------------------------------------------------------- /naps/cores/hispi/test_data_old.txt.lzma: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apertus-open-source-cinema/naps/f39373a808e6005dd2c154360b5eac045b015bb2/naps/cores/hispi/test_data_old.txt.lzma -------------------------------------------------------------------------------- /naps/cores/jtag/__init__.py: -------------------------------------------------------------------------------- 1 | from .jtag_peripheral_connector import * 2 | -------------------------------------------------------------------------------- /naps/cores/mipi/__init__.py: -------------------------------------------------------------------------------- 1 | from .common import * 2 | 3 | from .csi_rx import * 4 | from .dsi_tx import * 5 | -------------------------------------------------------------------------------- /naps/cores/mipi/common.py: -------------------------------------------------------------------------------- 1 | from functools import reduce 2 | 3 | from amaranth import * 4 | from amaranth.lib import data 5 | 6 | __all__ = ["DataIdentifier", "calculate_ecc", "PacketHeader"] 7 | 8 | 9 | class DataIdentifier(data.Struct): 10 | data_type: 6 11 | virtual_channel_identifier: 2 12 | 13 | def is_long_packet(self): 14 | return self.data_type <= 0x0F 15 | 16 | 17 | def calculate_ecc(header): 18 | ecc_table = reversed([ 19 | [], 20 | [], 21 | [ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23], 22 | [ 4, 5, 6, 7, 8, 9, 16, 17, 18, 19, 20, 22, 23], 23 | [1, 2, 3, 7, 8, 9, 13, 14, 15, 19, 20, 21, 23], 24 | [0, 2, 3, 5, 6, 9, 11, 12, 15, 18, 20, 21, 22 ], 25 | [0, 1, 3, 4, 6, 8, 10, 12, 14, 17, 20, 21, 22, 23], 26 | [0, 1, 2, 4, 5, 7, 10, 11, 13, 16, 20, 21, 22, 23] 27 | ]) 28 | return Cat(reduce(lambda a, b: a ^ b, (header[i] for i in row), 0) for row in ecc_table) 29 | 30 | 31 | 32 | class PacketHeader(data.Struct): 33 | data_id: DataIdentifier 34 | word_count: 16 35 | ecc: 8 36 | 37 | def calculate_ecc(self): 38 | return calculate_ecc(self.as_value()) 39 | 40 | def is_packet_valid(self): 41 | # one could also recover bit errors using the ecc; maybe do this here 42 | return self.calculate_ecc() == self.ecc 43 | -------------------------------------------------------------------------------- /naps/cores/mipi/csi_rx/__init__.py: -------------------------------------------------------------------------------- 1 | from .types import * 2 | from .aligner import * 3 | from .combiner import * 4 | from .packet import * 5 | from .s7_rx_phy import * 6 | -------------------------------------------------------------------------------- /naps/cores/mipi/csi_rx/packet.py: -------------------------------------------------------------------------------- 1 | from amaranth import * 2 | from naps.cores.video import ImageStream 3 | from .. import PacketHeader 4 | from .aligner import CsiWordAligner 5 | 6 | __all__ = ["CsiPacketLayer"] 7 | 8 | 9 | class CsiPacketLayer(Elaboratable): 10 | def __init__(self, lane_word_aligner: CsiWordAligner): 11 | self.lane_word_aligner = lane_word_aligner 12 | 13 | self.output = ImageStream(32) # we currently only support raw8 with 4 pixels / cycle 14 | 15 | def elaborate(self, platform): 16 | m = Module() 17 | 18 | packet_ctr = Signal(16) 19 | with m.FSM(): 20 | with m.State("IDLE"): 21 | with m.If(self.lane_word_aligner.maybe_first_packet_byte): 22 | packet_header = PacketHeader(self.lane_word_aligner.output) 23 | with m.If(packet_header.is_packet_valid()): 24 | m.d.comb += self.lane_word_aligner.in_packet.eq(1) 25 | with m.If(packet_header.data_id.is_long_packet()): 26 | m.next = "LONG_PACKET" 27 | m.d.sync += packet_ctr.eq(packet_header.word_count) 28 | 29 | with m.State("LONG_PACKET"): 30 | m.d.comb += self.lane_word_aligner.in_packet.eq(1) 31 | with m.If(packet_ctr > 4): # we always process 4 words per cycle 32 | m.d.sync += packet_ctr.eq(packet_ctr - 4) 33 | with m.Else(): 34 | m.d.sync += packet_ctr.eq(0) 35 | m.next = "IDLE" 36 | # TODO: steal crc from https://gitlab.com/harmoninstruments/harmon-instruments-open-hdl/-/blob/master/Ethernet/CRC.py 37 | # think about how to extract the packet footer 38 | 39 | return m 40 | -------------------------------------------------------------------------------- /naps/cores/mipi/csi_rx/s7_rx_phy.py: -------------------------------------------------------------------------------- 1 | from amaranth import * 2 | from naps import ControlSignal 3 | from naps.vendor.xilinx_s7 import BufIO, ClockDivider, IDelay, DDRDeserializer 4 | 5 | __all__ = ["MipiClockRxPhy", "MipiLaneRxPhy"] 6 | 7 | 8 | class MipiClockRxPhy(Elaboratable): 9 | """Drives the sync domain with the word clock and produces a ddr bit clock derived from the clock lane at `pin`""" 10 | def __init__(self, pin, ddr_domain): 11 | self.pin = pin 12 | self.ddr_domain = ddr_domain 13 | 14 | def elaborate(self, platform): 15 | m = Module() 16 | 17 | m.domains += ClockDomain(self.ddr_domain) 18 | 19 | bufio = m.submodules.bufio = BufIO(self.pin) 20 | m.d.comb += ClockSignal(self.ddr_domain).eq(bufio.o) 21 | 22 | divider = m.submodules.divider = ClockDivider(self.pin, divider=4) 23 | m.d.comb += ClockSignal().eq(divider.o) 24 | platform.add_clock_constraint(divider.o, 350e6 / 4) 25 | 26 | return m 27 | 28 | 29 | class MipiLaneRxPhy(Elaboratable): 30 | def __init__(self, pin, ddr_domain): 31 | self.pin = pin 32 | self.ddr_domain = ddr_domain 33 | 34 | self.bitslip = ControlSignal() 35 | 36 | self.output = Signal(8) 37 | 38 | def elaborate(self, platform): 39 | m = Module() 40 | 41 | delay = m.submodules.delay = IDelay(self.pin) 42 | serdes = m.submodules.serdes = DDRDeserializer(delay.output, self.ddr_domain, bit_width=8, msb_first=False) 43 | m.d.comb += serdes.bitslip.eq(self.bitslip) 44 | m.d.comb += self.output.eq(serdes.output) 45 | 46 | return m 47 | -------------------------------------------------------------------------------- /naps/cores/mipi/csi_rx/types.py: -------------------------------------------------------------------------------- 1 | from enum import IntEnum 2 | 3 | __all__ = ["CsiShortPacketDataType", "CsiLongPacketDataType"] 4 | 5 | 6 | class CsiShortPacketDataType(IntEnum): 7 | FRAME_START = 0x00 8 | FRAME_END = 0x01 9 | LINE_START = 0x02 10 | LINE_END = 0x03 11 | 12 | 13 | class CsiLongPacketDataType(IntEnum): 14 | RAW6 = 0x28 15 | RAW7 = 0x29 16 | RAW8 = 0x2A 17 | RAW10 = 0x2B 18 | RAW12 = 0x2C 19 | RAW14 = 0x2D -------------------------------------------------------------------------------- /naps/cores/mipi/dsi_tx/__init__.py: -------------------------------------------------------------------------------- 1 | from .types import * 2 | from .d_phy_lane import * 3 | from .dsi_phy import * 4 | from .py_dsi_generator import * 5 | from .video2dsi import * 6 | -------------------------------------------------------------------------------- /naps/cores/mipi/dsi_tx/dsi_phy.py: -------------------------------------------------------------------------------- 1 | from amaranth import * 2 | from amaranth.build import Platform 3 | 4 | from naps import PacketizedStream, ControlSignal 5 | from .d_phy_lane import DPhyDataLane, DPhyClockLane 6 | 7 | 8 | class DsiPhy(Elaboratable): 9 | def __init__(self, resource, num_lanes, ddr_domain, ck_domain): 10 | self.resource = resource 11 | self.num_lanes = num_lanes 12 | self.ddr_domain = ddr_domain 13 | self.ck_domain = ck_domain 14 | 15 | self.control_input = PacketizedStream(8) 16 | self.control_output = PacketizedStream(8) 17 | self.hs_input = PacketizedStream(8 * num_lanes) 18 | self.request_hs = ControlSignal() 19 | 20 | 21 | def elaborate(self, platform: Platform): 22 | resource = platform.request(*self.resource, xdr={"hs_ck": 2, **{f"hs_d{i}": 2 for i in range(self.num_lanes)}}) 23 | 24 | m = Module() 25 | 26 | lanes = [] 27 | for i in range(2): 28 | lane = DPhyDataLane( 29 | lp_pins=getattr(resource, f"lp_d{i}"), 30 | hs_pins=getattr(resource, f"hs_d{i}"), 31 | can_lp=(i == 0), 32 | ddr_domain=self.ddr_domain 33 | ) 34 | m.submodules[f"lane_d{i}"] = lane 35 | lanes.append(lane) 36 | 37 | lane0 = lanes[0] 38 | m.d.comb += lane0.control_input.connect_upstream(self.control_input) 39 | m.d.comb += self.control_output.connect_upstream(lane0.control_output) 40 | 41 | m.d.comb += self.hs_input.ready.eq(lane0.hs_input.ready) 42 | for i, lane in enumerate(lanes): 43 | m.d.comb += lane.hs_input.payload.eq(self.hs_input.payload[i * 8: (i+1) * 8]) 44 | m.d.comb += lane.hs_input.valid.eq(self.hs_input.valid) 45 | m.d.comb += lane.hs_input.last.eq(self.hs_input.last) 46 | 47 | lane_ck = m.submodules.lane_ck = DPhyClockLane(resource.lp_ck, resource.hs_ck, ck_domain=self.ck_domain) 48 | m.d.comb += lane_ck.request_hs.eq(self.request_hs) 49 | 50 | return m 51 | -------------------------------------------------------------------------------- /naps/cores/mipi/dsi_tx/types.py: -------------------------------------------------------------------------------- 1 | from enum import IntEnum 2 | from amaranth import unsigned 3 | from amaranth.lib import data 4 | 5 | __all__ = ["DsiShortPacketDataType", "DsiLongPacketDataType", "DsiErrorResponse"] 6 | 7 | 8 | class DsiShortPacketDataType(IntEnum): 9 | V_SYNC_START = 0x01 10 | V_SYNC_END = 0x11 11 | H_SYNC_START = 0x21 12 | H_SYNC_END = 0x31 13 | END_OF_TRANSMISSION_PACKET = 0x08 14 | COLOR_MODE_OFF = 0x02 15 | COLOR_MODE_ON = 0x12 16 | SHUT_DOWN_PERIPHERAL = 0x22 17 | TURN_ON_PERIPHERAL = 0x32 18 | GENERIC_SHORT_WRITE_0_PARAMETER = 0x03 19 | GENERIC_SHORT_WRITE_1_PARAMETER = 0x13 20 | GENERIC_SHORT_WRITE_2_PARAMETER = 0x23 21 | GENERIC_READ_0_PARAMETER = 0x04 22 | GENERIC_READ_1_PARAMETER = 0x14 23 | GENERIC_READ_2_PARAMETER = 0x24 24 | DCS_SHORT_WRITE_0_PARAMETER = 0x05 25 | DCS_SHORT_WRITE_1_PARAMETER = 0x15 26 | DCS_READ_0_PARAMETER = 0x06 27 | SET_MAXIMUM_RETURN_PACKET_SIZE = 0x37 28 | 29 | 30 | class DsiLongPacketDataType(IntEnum): 31 | NULL_PACKET_NO_DATA = 0x09 32 | BLANKING_PACKET_NO_DATA = 0x19 33 | GENERIC_LONG_WRITE = 0x29 34 | DCS_LONG_WRITE = 0x39 35 | LOOSELY_PACKET_PIXEL_STREAM_20_BIT_YCBCR_4_2_2 = 0x0C 36 | PACKED_PIXEL_STREAM_24_BIT_YCBCR_4_2_2 = 0x1C 37 | PACKED_PIXEL_STREAM_16_BIT_YCBCR_4_2_2 = 0x2C 38 | PACKED_PIXEL_STREAM_30_BIT_RGB_10_10_10 = 0x0D 39 | PACKED_PIXEL_STREAM_36_BIT_RGB_12_12_12 = 0x1D 40 | PACKED_PIXEL_STREAM_12_BIT_YCBCR_4_2_0 = 0x3D 41 | PACKED_PIXEL_STREAM_16_BIT_RGB_5_6_5 = 0x0E 42 | PACKED_PIXEL_STREAM_18_BIT_RGB_6_6_6 = 0x1E 43 | LOOSELY_PACKET_PIXEL_STREAM_18_BIT_RGB_6_6_6 = 0x2E 44 | PACKED_PIXEL_STREAM_24_BIT_RGB_8_8_8 = 0x3E 45 | 46 | class DsiErrorResponse(data.Struct): 47 | SOT_ERROR: 1 48 | SOT_SYNC_ERROR: 1 49 | EOT_SYNC_ERROR: 1 50 | ESCAPE_MODE_ENTRY_COMMAND_ERROR: 1 51 | LOW_POWER_TRANSMIT_SYNC_ERROR: 1 52 | PERIPHERAL_TIMEOUT_ERROR: 1 53 | FALSE_CONTROL_ERROR: 1 54 | CONTENTION_DETECTED: 1 55 | ECC_ERROR_SINGLE_BIT_CORRECTED: 1 56 | ECC_ERROR_MULTI_BIT_NOT_CORRECTED: 1 57 | CHECKSUM_ERROR: 1 58 | DSI_DATA_TYPE_NOT_RECOGNIZED: 1 59 | DSI_VC_ID_INVALID: 1 60 | INVALID_TRANSMISSION_LENGTH: 1 61 | RESERVED: 1 62 | DSI_PROTOCOL_VIOLATION: 1 -------------------------------------------------------------------------------- /naps/cores/mipi/dsi_tx/video2dsi_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from amaranth import * 3 | from naps import SimPlatform, read_packet_from_stream, GradientDemoVideoSource, DsiPhy, TristateIo, TristateDdrIo, do_nothing 4 | from .video2dsi import ImageStream2Dsi 5 | from .types import DsiShortPacketDataType, DsiLongPacketDataType 6 | 7 | 8 | class TestDsiProtocol(unittest.TestCase): 9 | def test_2_lane(self): 10 | platform = SimPlatform() 11 | m = Module() 12 | 13 | source = m.submodules.source = GradientDemoVideoSource(direction_y=False, divider=2, width=10, height=10) 14 | dsi_protocol = m.submodules.dsi_protocol = ImageStream2Dsi(source.output, num_lanes=2, image_width=10) 15 | m.d.comb += dsi_protocol.vbp.eq(18) 16 | 17 | def testbench(): 18 | for i in range(3): 19 | packet_raw = (yield from read_packet_from_stream(dsi_protocol.output, timeout=1000, allow_pause=False, pause_after_word=3)) 20 | rest = [x for word in packet_raw for x in [word & 0xff, word >> 8]] 21 | print("\n", rest) 22 | while rest: 23 | if short_packet := next((opt for opt in DsiShortPacketDataType if opt.value == rest[0]), None): 24 | packet, rest = rest[:4], rest[4:] 25 | print(f"{short_packet.name} \t {packet}") 26 | continue 27 | elif long_packet := next((opt for opt in DsiLongPacketDataType if opt.value == rest[0]), None): 28 | len_header = rest[1] | (rest[2] << 8) 29 | packet, rest = rest[:len_header + 4 + 2], rest[len_header + 4 + 2:] 30 | print(f"{long_packet.name} (len={len_header}) \t {packet}") 31 | continue 32 | else: 33 | raise TypeError(f"unknown packet: {rest}") 34 | platform.add_sim_clock("sync", 100e6) 35 | platform.sim(m, testbench) 36 | 37 | def test_full_stack(self): 38 | platform = SimPlatform() 39 | m = Module() 40 | 41 | source = m.submodules.source = GradientDemoVideoSource(direction_y=False, divider=2, width=10, height=10) 42 | dsi_protocol = m.submodules.dsi_protocol = ImageStream2Dsi(source.output, num_lanes=2, image_width=10) 43 | dsi_phy = m.submodules.dsi_phy = DsiPhy(("mipi", 0), num_lanes=2, ddr_domain="ddr", ck_domain="ddr") 44 | m.d.comb += dsi_phy.hs_input.connect_upstream(dsi_protocol.output) 45 | 46 | def testbench(): 47 | yield from do_nothing(10000) 48 | 49 | platform.add_sim_clock("sync", 100e6) 50 | platform.add_sim_clock("ddr", 100e6) 51 | platform.sim(m, testbench) -------------------------------------------------------------------------------- /naps/cores/peripherals/__init__.py: -------------------------------------------------------------------------------- 1 | from .bitbang_i2c import * 2 | from .bitbang_spi import * 3 | from .csr_bank import * 4 | from .drp_bridge import * 5 | from .mmio_gpio import * 6 | from .soc_memory import * 7 | -------------------------------------------------------------------------------- /naps/cores/peripherals/bitbang_i2c.py: -------------------------------------------------------------------------------- 1 | from amaranth import * 2 | 3 | from naps.cores.peripherals.mmio_gpio import MmioGpio 4 | from naps.soc.devicetree_overlay import devicetree_overlay 5 | from naps.soc.soc_platform import SocPlatform 6 | 7 | __all__ = ["BitbangI2c"] 8 | 9 | 10 | class BitbangI2c(Elaboratable): 11 | def __init__(self, pins, name_suffix=""): 12 | self.devicetree_name = "bitbang_i2c" + name_suffix 13 | self.mmio_gpio = MmioGpio(pads=(pins.scl, pins.sda), name_suffix="_" + self.devicetree_name) 14 | 15 | def elaborate(self, platform: SocPlatform): 16 | m = Module() 17 | m.submodules.mmio_gpio = self.mmio_gpio 18 | 19 | overlay_content = """ 20 | %overlay_name%: i2c@0 { 21 | compatible = "i2c-gpio"; 22 | sda-gpios = <&%mmio_gpio% 1 6>; 23 | scl-gpios = <&%mmio_gpio% 0 6>; 24 | i2c-gpio,delay-us = <2>; /* ~100 kHz */ 25 | #address-cells = <1>; 26 | #size-cells = <0>; 27 | }; 28 | """ 29 | devicetree_overlay(platform, self.devicetree_name, overlay_content, {"mmio_gpio": self.mmio_gpio.devicetree_name}) 30 | 31 | return m 32 | -------------------------------------------------------------------------------- /naps/cores/peripherals/bitbang_spi.py: -------------------------------------------------------------------------------- 1 | from amaranth import * 2 | 3 | from naps.cores.peripherals.mmio_gpio import MmioGpio 4 | from naps.soc.devicetree_overlay import devicetree_overlay 5 | from naps.soc.soc_platform import SocPlatform 6 | 7 | __all__ = ["BitbangSPI"] 8 | 9 | 10 | class BitbangSPI(Elaboratable): 11 | def __init__(self, pins, name_suffix=""): 12 | self.devicetree_name = "bitbang_spi" + name_suffix 13 | self.pins = pins 14 | 15 | def elaborate(self, platform: SocPlatform): 16 | m = Module() 17 | 18 | # for some reason, despite CS being specified as active high in the 19 | # device tree, linux and/or the python spidev module treat it as active 20 | # low. thus we invert CS here to make the pin active high as defined 21 | ncs = Signal.like(self.pins.cs.o) 22 | m.d.comb += self.pins.cs.o.eq(~ncs) 23 | 24 | self.mmio_gpio = MmioGpio(pads=(self.pins.clk, self.pins.copi, self.pins.cipo, ncs), name_suffix="_" + self.devicetree_name) 25 | m.submodules.mmio_gpio = self.mmio_gpio 26 | 27 | overlay_content = """ 28 | %overlay_name%: spi@0 { 29 | compatible = "spi-gpio"; 30 | #address-cells = <1>; 31 | #size-cells = <0>; 32 | ranges; 33 | 34 | sck-gpios = <&%mmio_gpio% 0 0>; 35 | mosi-gpios = <&%mmio_gpio% 1 0>; 36 | miso-gpios = <&%mmio_gpio% 2 0>; 37 | cs-gpios = <&%mmio_gpio% 3 0>; 38 | num_chipselects = <1>; 39 | status = "ok"; 40 | 41 | spidev1 { 42 | compatible = "spidev"; 43 | reg = <0>; 44 | #address-cells = <1>; 45 | #size-cells = <0>; 46 | spi-max-frequency = <30000000>; 47 | }; 48 | }; 49 | """ 50 | devicetree_overlay(platform, self.devicetree_name, overlay_content, {"mmio_gpio": self.mmio_gpio.devicetree_name}) 51 | 52 | return m 53 | -------------------------------------------------------------------------------- /naps/cores/peripherals/csr_bank_zynq_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from naps import AxiEndpoint, axil_read, axil_write, CsrBank, ControlSignal, ZynqSocPlatform, SimPlatform, do_nothing 3 | 4 | 5 | class TestAxiSlave(unittest.TestCase): 6 | def check_csr_bank(self, num_csr=10, testdata=0x12345678, use_axi_interconnect=False): 7 | platform = ZynqSocPlatform(SimPlatform(), use_axi_interconnect) 8 | csr_bank = CsrBank("test") 9 | for i in range(num_csr): 10 | csr_bank.reg("csr#{}".format(i), ControlSignal(32)) 11 | 12 | def testbench(): 13 | axi = platform.axi_lite_master 14 | for addr in [0x4000_0000 + (i * 4) for i in range(num_csr)]: 15 | yield from axil_read(axi, addr) 16 | yield from axil_write(axi, addr, testdata) 17 | self.assertEqual(testdata, (yield from axil_read(axi, addr))) 18 | 19 | platform.sim(csr_bank, (testbench, "axi_lite")) 20 | 21 | def test_csr_bank_aggregator(self): 22 | self.check_csr_bank(use_axi_interconnect=False) 23 | 24 | def test_csr_bank_interconnect(self): 25 | self.check_csr_bank(use_axi_interconnect=True) 26 | 27 | def test_simple_test_csr_bank(self): 28 | platform = ZynqSocPlatform(SimPlatform()) 29 | csr_bank = CsrBank("test") 30 | csr_bank.reg("csr", ControlSignal(32)) 31 | 32 | def testbench(): 33 | axi: AxiEndpoint = platform.axi_lite_master 34 | yield axi.read_address.payload.eq(0x4000_0000) 35 | yield axi.read_address.valid.eq(1) 36 | yield from do_nothing() 37 | 38 | platform.sim(csr_bank, (testbench, "axi_lite")) 39 | -------------------------------------------------------------------------------- /naps/cores/peripherals/drp_bridge.py: -------------------------------------------------------------------------------- 1 | # TODO: add tests 2 | 3 | from amaranth import * 4 | from naps.soc import SocPlatform, Response 5 | 6 | 7 | __all__ = ["DrpInterface", "DrpBridge"] 8 | 9 | 10 | # TODO: let this be an `Interface` 11 | class DrpInterface: 12 | def __init__(self, DWE, DEN, DADDR, DI, DO, DRDY, DCLK): 13 | self.data_write_enable: Signal = DWE 14 | self.data_enable: Signal = DEN 15 | self.address: Signal = DADDR 16 | self.data_in: Signal = DI 17 | self.data_out: Signal = DO 18 | self.ready: Signal = DRDY 19 | self.clk : Signal = DCLK 20 | 21 | 22 | class DrpBridge(Elaboratable): 23 | def __init__(self, drp_interface): 24 | """ 25 | A bridge for the xilinx dynamic reconfiguration port. This is for example used in the Xilinx 7 series MMCM and 26 | PLL vendor. 27 | 28 | :param drp_interface: the drp bus of the drp slave 29 | """ 30 | self.drp_interface: DrpInterface = drp_interface 31 | 32 | def elaborate(self, platform: SocPlatform): 33 | m = Module() 34 | 35 | def handle_read(m, addr, data, read_done): 36 | m.d.comb += self.drp_interface.clk.eq(ClockSignal()) 37 | m.d.sync += self.drp_interface.address.eq(addr) 38 | m.d.sync += self.drp_interface.data_enable.eq(1) 39 | with m.If(self.drp_interface.ready): 40 | m.d.sync += self.drp_interface.data_enable.eq(0) 41 | m.d.sync += data.eq(self.drp_interface.data_out) 42 | read_done(Response.OK) 43 | 44 | def handle_write(m, addr, data, write_done): 45 | m.d.comb += self.drp_interface.clk.eq(ClockSignal()) 46 | m.d.sync += self.drp_interface.address.eq(addr) 47 | m.d.sync += self.drp_interface.data_enable.eq(1) 48 | m.d.sync += self.drp_interface.data_write_enable.eq(1) 49 | m.d.sync += self.drp_interface.data_in.eq(data) 50 | with m.If(self.drp_interface.ready): 51 | m.d.sync += self.drp_interface.data_enable.eq(0) 52 | m.d.sync += self.drp_interface.data_write_enable.eq(0) 53 | write_done(Response.OK) 54 | 55 | # TODO: fix drp bridge 56 | # memorymap = MemoryMap() 57 | # memorymap.allocate("drp", writable=True, bits=2**len(self.drp_interface.address) * 8) 58 | # 59 | # m.submodules += Peripheral( 60 | # handle_read, 61 | # handle_write, 62 | # memorymap 63 | # ) 64 | 65 | return m 66 | -------------------------------------------------------------------------------- /naps/cores/peripherals/mmio_gpio.py: -------------------------------------------------------------------------------- 1 | from amaranth import * 2 | from naps.soc import ControlSignal, StatusSignal, devicetree_overlay 3 | 4 | __all__ = ["MmioGpio"] 5 | 6 | 7 | class MmioGpio(Elaboratable): 8 | def __init__(self, pads, name_suffix=""): 9 | """ A simple gpio peripheral, that is compatible with the gpio-mmio.c linux kernel pydriver. 10 | see https://github.com/torvalds/linux/blob/master/drivers/gpio/gpio-mmio.c 11 | """ 12 | self._pads = pads 13 | 14 | # see https://github.com/torvalds/linux/blob/master/drivers/gpio/gpio-mmio.c#L473 15 | # we are using a configuration with one output one input and one direction register 16 | w = len(self._pads) 17 | self.set = ControlSignal(w) 18 | self.dat = StatusSignal(w) 19 | self.dirout = ControlSignal(w) 20 | 21 | self.devicetree_name = "mmio_gpio" + name_suffix 22 | 23 | def elaborate(self, platform): 24 | m = Module() 25 | 26 | overlay_content = """ 27 | %overlay_name%: %overlay_name%@40000000 { 28 | compatible = "brcm,bcm6345-gpio"; 29 | reg-names = "set", "dat", "dirout"; 30 | reg = <%set% 1>, <%dat% 1>, <%dirout% 1>; 31 | 32 | #gpio-cells = <2>; 33 | gpio-controller; 34 | }; 35 | """ 36 | devicetree_overlay(platform, self.devicetree_name, overlay_content, {"set": self.set, "dat": self.dat, "dirout": self.dirout}) 37 | 38 | for i, pad in enumerate(self._pads): 39 | if hasattr(pad, "i"): 40 | m.d.comb += self.dat[i].eq(pad.i) 41 | if hasattr(pad, "oe"): 42 | m.d.comb += pad.oe.eq(self.dirout[i]) 43 | if hasattr(pad, "o"): 44 | m.d.comb += pad.o.eq(self.set[i]) 45 | 46 | return m 47 | -------------------------------------------------------------------------------- /naps/cores/peripherals/soc_memory_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from naps import SimPlatform, SocMemory, axil_read, axil_write, do_nothing, SimSocPlatform 4 | from naps.soc.platform.zynq import ZynqSocPlatform 5 | 6 | 7 | class SocMemoryTest(unittest.TestCase): 8 | def test_smoke(self): 9 | platform = ZynqSocPlatform(SimPlatform()) 10 | memory_depth = 128 11 | dut = SocMemory(shape=32, init=[], depth=memory_depth) 12 | 13 | def testbench(): 14 | axi = platform.axi_lite_master 15 | memorymap = platform.memorymap 16 | for addr in range(memory_depth): 17 | yield from axil_write(axi, 4*addr + 0x40000000, addr) 18 | for addr in range(memory_depth): 19 | self.assertEqual(addr, (yield from axil_read(axi, 4*addr + 0x40000000))) 20 | 21 | platform.sim(dut, (testbench, "axi_lite")) 22 | 23 | def test_with_driver(self): 24 | platform = SimSocPlatform(SimPlatform()) 25 | 26 | memory_depth = 128 27 | dut = SocMemory(shape=64, depth=memory_depth, init=[]) 28 | 29 | def driver(design): 30 | for i in range(memory_depth): 31 | design[i] = i * i << 30 32 | yield from do_nothing(10) 33 | for i in reversed(range(memory_depth)): 34 | self.assertEqual(design[i], i * i << 30) 35 | yield from do_nothing(10) 36 | platform.add_driver(driver) 37 | 38 | platform.sim(dut) 39 | 40 | 41 | def test_with_driver_simple(self): 42 | platform = SimSocPlatform(SimPlatform()) 43 | 44 | memory_depth = 2 45 | dut = SocMemory(shape=32, depth=memory_depth, init=[]) 46 | 47 | def driver(design): 48 | for i in range(memory_depth): 49 | design[i] = i 50 | yield from do_nothing(10) 51 | for i in reversed(range(memory_depth)): 52 | self.assertEqual(design[i], i) 53 | yield from do_nothing(10) 54 | platform.add_driver(driver) 55 | 56 | platform.sim(dut) 57 | -------------------------------------------------------------------------------- /naps/cores/plugin_module_streamer/PROTOCOL.md: -------------------------------------------------------------------------------- 1 | 2 | # Protocol 3 | * 1 Clock lane (TODO: which lane?) 4 | * 4 Data lanes (TODO: which lanes) 5 | * 0x00 and 0xFF codes are disallowed 6 | * in idle (when no data is to be sent) the transmitter transmits 0x00 and 0xFF alternating 7 | * the 0x00 0xFF pattern is used for training (both bit alignment and word alignment) 8 | * 12 consecutive bits are sent one one lane. the next 12 bits are on the next lane 9 | 10 | ``` 11 | bit number 12 | lvds0: 01 02 03 04 05 06 07 08 13 | lvds1: 09 10 11 12 13 14 15 16 14 | lvds2: 17 28 19 20 21 22 23 24 15 | lvds3: 25 26 27 28 29 30 31 32 16 | lane5 is clock 17 | ``` -------------------------------------------------------------------------------- /naps/cores/plugin_module_streamer/__init__.py: -------------------------------------------------------------------------------- 1 | from .tx import * 2 | from .rx import * 3 | -------------------------------------------------------------------------------- /naps/cores/plugin_module_streamer/tx.py: -------------------------------------------------------------------------------- 1 | # The PluginModuleStreamer{Sink,Source} tuple allows us to stream data from the main camera FPGA (Zynq) to some 2 | # plugin module FPGA (i.e. the usb3 plugin module machxo2) 3 | 4 | from amaranth import * 5 | from naps import BasicStream, ControlSignal 6 | from naps.cores import InflexibleSinkDebug 7 | from naps.vendor.xilinx_s7 import DDRSerializer 8 | 9 | __all__ = ["PluginModuleStreamerTx"] 10 | 11 | 12 | class PluginModuleStreamerTx(Elaboratable): 13 | def __init__(self, plugin_resource, input: BasicStream, bitclk_domain, training_pattern=0b00010110): 14 | self.bitclk_domain = bitclk_domain 15 | self.plugin_resource = plugin_resource 16 | self.input = input 17 | 18 | self.training_pattern = ControlSignal(8, init=training_pattern) 19 | self.do_training = ControlSignal(init=1) 20 | 21 | def elaborate(self, platform): 22 | m = Module() 23 | 24 | m.d.comb += self.input.ready.eq(~self.do_training) 25 | m.submodules.inflexible_sink_debug = InflexibleSinkDebug(self.input) 26 | 27 | valid = Signal() 28 | m.d.comb += valid.eq(self.input.valid & ~self.do_training) 29 | m.submodules.lane_clock = DDRSerializer(0b00001111, self.plugin_resource.clk_word.o, ddr_domain=self.bitclk_domain, msb_first=True) 30 | m.submodules.lane_valid = DDRSerializer(valid.replicate(8), self.plugin_resource.valid.o, ddr_domain=self.bitclk_domain, msb_first=True) 31 | for i in range(4): 32 | value = Signal(8) 33 | m.submodules["lane{}".format(i)] = DDRSerializer(value, getattr(self.plugin_resource, "lane{}".format(i)).o, ddr_domain=self.bitclk_domain, msb_first=True) 34 | with m.If(valid): 35 | m.d.comb += value.eq(self.input.payload[0+(i*8):8+(i*8)]) 36 | with m.Else(): 37 | m.d.comb += value.eq(self.training_pattern) 38 | 39 | return m 40 | -------------------------------------------------------------------------------- /naps/cores/serdes/__init__.py: -------------------------------------------------------------------------------- 1 | from .serializer import * 2 | -------------------------------------------------------------------------------- /naps/cores/serdes/serializer.py: -------------------------------------------------------------------------------- 1 | from amaranth import * 2 | from amaranth.lib.cdc import FFSynchronizer 3 | from naps import BasicStream 4 | from ...util.amaranth_misc import fake_differential 5 | from ..stream import BufferedAsyncStreamFIFO 6 | 7 | __all__ = ["Serializer"] 8 | 9 | 10 | class Serializer(Elaboratable): 11 | def __init__(self, pins, width: int, ddr_domain, reset): 12 | self.pins = pins 13 | self.reset = reset 14 | self.input = BasicStream(width) 15 | self.idle = Signal(width) 16 | self.is_idle = Signal() 17 | self.ddr_domain = ddr_domain 18 | 19 | def elaborate(self, platform): 20 | m = Module() 21 | 22 | ddr_reset = Signal() 23 | m.submodules += FFSynchronizer(self.reset, ddr_reset, o_domain=self.ddr_domain) 24 | 25 | m.d.comb += self.pins.o_clk.eq(ClockSignal(self.ddr_domain)) 26 | m.d.comb += self.pins.oe.eq(~self.reset) 27 | 28 | hs_fifo = m.submodules.hs_fifo = BufferedAsyncStreamFIFO(self.input, 8, o_domain=self.ddr_domain) 29 | hs_payload = Signal(8) 30 | 31 | was_valid = Signal() 32 | m.submodules += FFSynchronizer(~was_valid, self.is_idle) 33 | 34 | with m.FSM(domain=self.ddr_domain): 35 | for i in range(4): 36 | with m.State(f"{i}"): 37 | if i == 3: 38 | with m.If(hs_fifo.output.valid): 39 | m.d[self.ddr_domain] += hs_payload.eq(hs_fifo.output.payload) 40 | m.d[self.ddr_domain] += self.idle.eq(hs_fifo.output.payload[7].replicate(8)) 41 | m.d[self.ddr_domain] += was_valid.eq(1) 42 | with m.Else(): 43 | m.d[self.ddr_domain] += hs_payload.eq(self.idle) 44 | m.d[self.ddr_domain] += was_valid.eq(0) 45 | m.d.comb += hs_fifo.output.ready.eq(was_valid) 46 | m.d.comb += self.pins.o0.eq(fake_differential(hs_payload[i * 2 + 0])) 47 | m.d.comb += self.pins.o1.eq(fake_differential(hs_payload[i * 2 + 1])) 48 | m.next = f"{(i + 1) % 4}" 49 | 50 | return ResetInserter({self.ddr_domain: ddr_reset, "sync": self.reset})(m) 51 | -------------------------------------------------------------------------------- /naps/cores/stream/__init__.py: -------------------------------------------------------------------------------- 1 | from .buffer import * 2 | from .tee import * 3 | from .counter_source import * 4 | from .debug import * 5 | from .fifo import * 6 | from .gearbox import * 7 | from .metadata_wrapper import * 8 | from .repacking import * 9 | from .stream_memory import * 10 | -------------------------------------------------------------------------------- /naps/cores/stream/buffer.py: -------------------------------------------------------------------------------- 1 | from amaranth import * 2 | from naps.stream import BasicStream, stream_transformer 3 | 4 | __all__ = ["StreamBuffer"] 5 | 6 | 7 | class StreamBuffer(Elaboratable): 8 | """Basically a 1 deep Stream FIFO. Can be used to improve timing or to make outputs compliant with the Stream contract""" 9 | def __init__(self, input: BasicStream): 10 | self.input = input 11 | self.output = input.clone() 12 | 13 | def elaborate(self, platform): 14 | m = Module() 15 | 16 | stream_transformer(self.input, self.output, m, latency=1) 17 | with m.If(self.input.ready & self.input.valid): 18 | m.d.sync += self.output.connect_upstream(self.input, exclude=["ready", "valid"]) 19 | 20 | return m 21 | -------------------------------------------------------------------------------- /naps/cores/stream/buffer_test.py: -------------------------------------------------------------------------------- 1 | import random 2 | import unittest 3 | 4 | from naps.cores.stream.buffer import StreamBuffer 5 | from naps.stream.formal_util import verify_stream_output_contract 6 | from naps.stream.sim_util import write_to_stream, read_from_stream 7 | from naps.stream import BasicStream 8 | from naps.util.sim import SimPlatform 9 | 10 | 11 | class TestBuffer(unittest.TestCase): 12 | def test_basic(self): 13 | platform = SimPlatform() 14 | input_stream = BasicStream(32) 15 | dut = StreamBuffer(input_stream) 16 | 17 | random.seed(0) 18 | test_data = [random.randrange(0, 2**32) for _ in range(100)] 19 | 20 | def write_process(): 21 | for d in test_data: 22 | yield from write_to_stream(input_stream, payload=d) 23 | platform.add_process(write_process, "sync") 24 | 25 | def read_process(): 26 | for expected in test_data: 27 | read = yield from read_from_stream(dut.output) 28 | self.assertEqual(read, expected) 29 | platform.add_process(read_process, "sync") 30 | 31 | platform.add_sim_clock("sync", 100e6) 32 | platform.sim(dut) 33 | 34 | def test_output_properties(self): 35 | input = BasicStream(32) 36 | # important: we prove here that ANY input produces a contract obeying output 37 | verify_stream_output_contract(StreamBuffer(input)) 38 | -------------------------------------------------------------------------------- /naps/cores/stream/counter_source.py: -------------------------------------------------------------------------------- 1 | from amaranth import * 2 | from naps import BasicStream, ControlSignal 3 | 4 | __all__ = ["CounterStreamSource"] 5 | 6 | 7 | class CounterStreamSource(Elaboratable): 8 | def __init__(self, width, count_if_not_ready=False): 9 | self.output = BasicStream(width, name="counter_stream") 10 | 11 | self.count_if_not_ready = ControlSignal(init=count_if_not_ready) 12 | 13 | def elaborate(self, platform): 14 | m = Module() 15 | 16 | # we initialize our counter with one to compensate for the 2 cycle delay 17 | # from it to the payload 18 | counter = Signal(self.output.payload.shape(), init=1) 19 | 20 | m.d.comb += self.output.valid.eq(1) 21 | with m.If(self.output.ready | self.count_if_not_ready): 22 | m.d.sync += counter.eq(counter + 1) 23 | 24 | with m.If(self.output.ready): 25 | m.d.sync += self.output.payload.eq(counter) 26 | 27 | return m 28 | -------------------------------------------------------------------------------- /naps/cores/stream/counter_source_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from . import CounterStreamSource 4 | from naps import verify_stream_output_contract 5 | 6 | 7 | class CounterStreamSourceTest(unittest.TestCase): 8 | def test_stream_contract(self): 9 | verify_stream_output_contract(CounterStreamSource(32)) 10 | -------------------------------------------------------------------------------- /naps/cores/stream/fifo.py: -------------------------------------------------------------------------------- 1 | from amaranth import * 2 | from amaranth.lib.fifo import SyncFIFOBuffered, SyncFIFO, AsyncFIFOBuffered, AsyncFIFO 3 | from naps import Stream, StatusSignal 4 | 5 | __all__ = ["BufferedAsyncStreamFIFO", "UnbufferedAsyncStreamFIFO", "BufferedSyncStreamFIFO", "UnbufferedSyncStreamFIFO"] 6 | 7 | 8 | class StreamFIFO(Elaboratable): 9 | def __init__(self, input: Stream, fifo_type, output_stream_name="fifo_out", **fifo_args): 10 | self.input = input 11 | self.output = input.clone(name=output_stream_name) 12 | if "r_domain" in fifo_args: 13 | self.output_domain = fifo_args["r_domain"] 14 | self.fifo = fifo_type(width=len(Cat(self.input.payload_signals.values())), **fifo_args) 15 | self.depth = fifo_args['depth'] 16 | 17 | self.r_level = StatusSignal(range(self.fifo.depth + 1)) 18 | self.w_level = StatusSignal(range(self.fifo.depth + 1)) 19 | 20 | def elaborate(self, platform): 21 | m = Module() 22 | fifo = m.submodules.fifo = self.fifo 23 | 24 | if self.depth == 0: 25 | m.d.comb += self.output.connect_upstream(self.input) 26 | else: 27 | m.d.comb += self.r_level.eq(fifo.r_level) 28 | m.d.comb += self.w_level.eq(fifo.w_level) 29 | 30 | m.d.comb += self.input.ready.eq(fifo.w_rdy) 31 | m.d.comb += fifo.w_data.eq(Cat(self.input.payload_signals.values())) 32 | m.d.comb += fifo.w_en.eq(self.input.valid) 33 | 34 | m.d.comb += Cat(self.output.payload_signals.values()).eq(fifo.r_data) 35 | m.d.comb += self.output.valid.eq(fifo.r_rdy) 36 | m.d.comb += fifo.r_en.eq(self.output.ready) 37 | 38 | return m 39 | 40 | 41 | def BufferedSyncStreamFIFO(input: Stream, depth, **kwargs): 42 | return StreamFIFO(input, SyncFIFOBuffered, depth=depth, **kwargs) 43 | 44 | 45 | def UnbufferedSyncStreamFIFO(input: Stream, depth, **kwargs): 46 | return StreamFIFO(input, SyncFIFO, depth=depth, **kwargs) 47 | 48 | 49 | def BufferedAsyncStreamFIFO(input, depth, i_domain="sync", o_domain="sync", exact_depth=False, **kwargs): 50 | return StreamFIFO( 51 | input, AsyncFIFOBuffered, depth=depth, r_domain=o_domain, w_domain=i_domain, exact_depth=exact_depth, 52 | **kwargs 53 | ) 54 | 55 | 56 | def UnbufferedAsyncStreamFIFO(input, depth, i_domain="sync", o_domain="sync", exact_depth=False, **kwargs): 57 | return StreamFIFO( 58 | input, AsyncFIFO, depth=depth, r_domain=o_domain, w_domain=i_domain, exact_depth=exact_depth, 59 | **kwargs 60 | ) 61 | -------------------------------------------------------------------------------- /naps/cores/stream/fifo_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import pytest 4 | 5 | from naps import SimPlatform, do_nothing 6 | from naps.stream import BasicStream, verify_stream_output_contract, write_to_stream, read_from_stream 7 | from . import UnbufferedAsyncStreamFIFO, BufferedAsyncStreamFIFO, UnbufferedSyncStreamFIFO, \ 8 | BufferedSyncStreamFIFO 9 | 10 | 11 | class TestFifo(unittest.TestCase): 12 | def check_fifo_basic(self, fifo_generator): 13 | input = BasicStream(32) 14 | fifo = fifo_generator(input, 1024) 15 | 16 | def testbench(): 17 | for i in range(10): 18 | yield from write_to_stream(input, payload=i) 19 | 20 | # async fifos need some time due to cdc 21 | yield from do_nothing() 22 | 23 | assert (yield fifo.r_level) == 10 24 | 25 | for i in range(10): 26 | assert (yield from read_from_stream(fifo.output)) == i, "read data doesnt match written data" 27 | 28 | platform = SimPlatform() 29 | platform.add_sim_clock("sync", 100e6) 30 | platform.sim(fifo, testbench) 31 | 32 | def test_sim_async_stream_fifo(self): 33 | fifo_gen = lambda input, depth: UnbufferedAsyncStreamFIFO(input, depth, o_domain="sync", i_domain="sync") 34 | self.check_fifo_basic(fifo_gen) 35 | 36 | def test_async_stream_fifo_buffered(self): 37 | fifo_gen = lambda input, depth: BufferedAsyncStreamFIFO(input, depth, o_domain="sync", i_domain="sync") 38 | self.check_fifo_basic(fifo_gen) 39 | 40 | def test_sync_stream_fifo(self): 41 | fifo_gen = lambda input, depth: UnbufferedSyncStreamFIFO(input, depth) 42 | self.check_fifo_basic(fifo_gen) 43 | 44 | def test_sync_stream_fifo_buffered(self): 45 | fifo_gen = lambda input, depth: BufferedSyncStreamFIFO(input, depth) 46 | self.check_fifo_basic(fifo_gen) 47 | 48 | @pytest.mark.skip("this can not be proven at the moment because a FFSyncronizer in the async FIFO is resetless") 49 | def test_async_stream_fifo_output_properties(self): 50 | input = BasicStream(32) 51 | verify_stream_output_contract(UnbufferedAsyncStreamFIFO(input, 10, o_domain="sync", i_domain="sync")) 52 | 53 | def test_async_stream_fifo_buffered_output_properties(self): 54 | input = BasicStream(32) 55 | verify_stream_output_contract(BufferedAsyncStreamFIFO(input, 10, o_domain="sync", i_domain="sync")) 56 | 57 | def test_sync_stream_fifo_output_properties(self): 58 | input = BasicStream(32) 59 | verify_stream_output_contract(UnbufferedSyncStreamFIFO(input, 10)) 60 | 61 | def test_sync_stream_fifo_buffered_output_properties(self): 62 | input = BasicStream(32) 63 | verify_stream_output_contract(BufferedSyncStreamFIFO(input, 10)) 64 | -------------------------------------------------------------------------------- /naps/cores/stream/repacking.py: -------------------------------------------------------------------------------- 1 | from amaranth import * 2 | from naps import BasicStream, stream_transformer 3 | 4 | __all__ = ["Repack12BitStream"] 5 | 6 | 7 | class Repack12BitStream(Elaboratable): 8 | """Repacks a packed 12 bit little endian stream to a packed 12 bit big endian stream 9 | This core is probably not what you want unless you want to hand of your data to a computer. 10 | Then do this as your very last step because everything beyond this is pure confusion. 11 | """ 12 | def __init__(self, input: BasicStream, inverse=False): 13 | self.input = input 14 | assert len(self.input.payload) % 24 == 0 15 | self.inverse = inverse 16 | 17 | self.output = self.input.clone() 18 | 19 | def elaborate(self, platform): 20 | m = Module() 21 | 22 | stream_transformer(self.input, self.output, m, latency=0) 23 | for i in range(len(self.input.payload) // 24): 24 | input_slice = self.input.payload[i * 24:(i + 1) * 24] 25 | output_slice = self.output.payload[i * 24:(i + 1) * 24] 26 | 27 | # (assuming the least significant bit is written right and the byte with the lowest address is the rightmost byte) 28 | # input: NMLKJIHG FEDCBA98 76543210 29 | # output: JIHGFEDC 3210NMLK BA987654 30 | 31 | if not self.inverse: 32 | m.d.comb += output_slice[0:4].eq(input_slice[4:8]) 33 | m.d.comb += output_slice[4:8].eq(input_slice[8:12]) 34 | m.d.comb += output_slice[8:12].eq(input_slice[20:24]) 35 | m.d.comb += output_slice[12:16].eq(input_slice[0:4]) 36 | m.d.comb += output_slice[16:20].eq(input_slice[12:16]) 37 | m.d.comb += output_slice[20:24].eq(input_slice[16:20]) 38 | else: 39 | m.d.comb += output_slice[4:8].eq(input_slice[0:4]) 40 | m.d.comb += output_slice[8:12].eq(input_slice[4:8]) 41 | m.d.comb += output_slice[20:24].eq(input_slice[8:12]) 42 | m.d.comb += output_slice[0:4].eq(input_slice[12:16]) 43 | m.d.comb += output_slice[12:16].eq(input_slice[16:20]) 44 | m.d.comb += output_slice[16:20].eq(input_slice[20:24]) 45 | 46 | return m 47 | -------------------------------------------------------------------------------- /naps/cores/stream/stream_memory.py: -------------------------------------------------------------------------------- 1 | from amaranth import * 2 | from amaranth.lib.memory import Memory 3 | from amaranth.utils import bits_for 4 | 5 | from naps import BasicStream, stream_transformer 6 | 7 | __all__ = ["StreamMemoryReader"] 8 | 9 | 10 | class StreamMemoryReader(Elaboratable): 11 | def __init__(self, address_input: BasicStream, memory: Memory): 12 | assert len(address_input.payload) == bits_for(memory.depth) 13 | self.address_input = address_input 14 | self.memory = memory 15 | 16 | self.output = address_input.clone() 17 | self.output.payload = Signal(memory.shape) 18 | 19 | def elaborate(self, platform): 20 | m = Module() 21 | 22 | stream_transformer(self.address_input, self.output, m, latency=1, allow_partial_out_of_band=True) 23 | port = self.memory.read_port(domain="sync") 24 | m.d.comb += port.en.eq(self.address_input.ready & self.address_input.valid) 25 | m.d.comb += port.addr.eq(self.address_input.payload) 26 | m.d.comb += self.output.payload.eq(port.data) 27 | 28 | return m 29 | -------------------------------------------------------------------------------- /naps/cores/stream/stream_memory_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from amaranth import * 4 | from amaranth.sim import Passive 5 | from amaranth.lib.memory import Memory 6 | 7 | from naps import SimPlatform, PacketizedStream, write_to_stream, read_from_stream, verify_stream_output_contract, LegalStreamSource 8 | from naps.cores.stream.stream_memory import StreamMemoryReader 9 | 10 | 11 | class StreamMemoryTest(unittest.TestCase): 12 | def test_hello_world(self): 13 | platform = SimPlatform() 14 | m = Module() 15 | 16 | address_stream = PacketizedStream(8) 17 | mem = Memory(shape=32, depth=128, init=[i + 2 for i in range(128)]) 18 | reader = m.submodules.reader = StreamMemoryReader(address_stream, mem) 19 | m.submodules.memory = mem 20 | 21 | def write_process(): 22 | for i in range(128): 23 | yield from write_to_stream(address_stream, payload=i, last=(i % 8) == 0) 24 | yield Passive() 25 | 26 | def read_process(): 27 | for i in range(128): 28 | data, last = (yield from read_from_stream(reader.output, extract=("payload", "last"))) 29 | assert data == i + 2 30 | assert last == ((i % 8) == 0) 31 | yield Passive() 32 | 33 | platform.add_sim_clock("sync", 100e6) 34 | platform.add_process(write_process, "sync") 35 | platform.sim(m, read_process) 36 | 37 | def test_output_stream_contract(self): 38 | def dut(): 39 | input_stream = PacketizedStream(8) 40 | mem = Memory(shape=32, depth=128, init=[i + 2 for i in range(128)]) 41 | dut = StreamMemoryReader(input_stream, mem) 42 | return (dut, dut.output, [LegalStreamSource(input_stream), mem]) 43 | 44 | verify_stream_output_contract(dut) -------------------------------------------------------------------------------- /naps/cores/stream/tee_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from naps.stream import PacketizedStream, LegalStreamSource, verify_stream_output_contract 3 | from .tee import StreamTee, StreamCombiner 4 | 5 | 6 | class StreamTeeTest(unittest.TestCase): 7 | def test_tee_output_stream_contract(self): 8 | input_stream = PacketizedStream(32) 9 | dut = StreamTee(input_stream) 10 | output1, output2 = dut.get_output(), dut.get_output() 11 | verify_stream_output_contract(dut, stream_output=output1, support_modules=(LegalStreamSource(input_stream),)) 12 | verify_stream_output_contract(dut, stream_output=output2, support_modules=(LegalStreamSource(input_stream),)) 13 | 14 | def test_stream_combiner_output_stream_contract(self): 15 | input1, input2 = PacketizedStream(32), PacketizedStream(32) 16 | dut = StreamCombiner(input1, input2, merge_payload=True) 17 | verify_stream_output_contract(dut, support_modules=(LegalStreamSource(input1), LegalStreamSource(input2))) 18 | -------------------------------------------------------------------------------- /naps/cores/video/__init__.py: -------------------------------------------------------------------------------- 1 | from .rgb import * 2 | from .image_stream import * 3 | from .image_convoluter import * 4 | from .adapters import * 5 | from .debayer import * 6 | from .demo_source import * 7 | from .focus_peeking import * 8 | from .gamma_corrector import * 9 | from .rearrange import * 10 | from .resizer import * 11 | from .test_util import * 12 | -------------------------------------------------------------------------------- /naps/cores/video/adapters.py: -------------------------------------------------------------------------------- 1 | from amaranth import * 2 | from naps import StatusSignal, PacketizedStream 3 | from . import ImageStream 4 | 5 | __all__ = ["ImageStream2PacketizedStream", "PacketizedStream2ImageStream"] 6 | 7 | 8 | class ImageStream2PacketizedStream(Elaboratable): 9 | """Convert an ImageStream to a packetized Stream by producing one packet per frame""" 10 | 11 | def __init__(self, input: ImageStream): 12 | self.input = input 13 | self.output = PacketizedStream(self.input.payload.shape(), name="packetized_image_stream") 14 | 15 | def elaborate(self, platform): 16 | m = Module() 17 | 18 | m.d.comb += self.output.connect_upstream(self.input, exclude=["last", "frame_last", "line_last"]) 19 | m.d.comb += self.output.last.eq(self.input.frame_last) 20 | 21 | return m 22 | 23 | 24 | class PacketizedStream2ImageStream(Elaboratable): 25 | """Convert a Packetized stream to an Image stream by creating lines with `width`""" 26 | def __init__(self, input: PacketizedStream, width): 27 | self.input = input 28 | self.width = width 29 | self.not_exact_number_of_lines_error = StatusSignal(32) 30 | self.output = ImageStream(input.payload.shape(), name="adapted_image_stream") 31 | 32 | def elaborate(self, platform): 33 | m = Module() 34 | 35 | line_ctr = Signal(16) 36 | 37 | m.d.comb += self.output.connect_upstream(self.input, only=["ready", "valid", "payload"]) 38 | with m.If(self.input.ready & self.input.valid): 39 | with m.If(self.input.last): 40 | m.d.sync += line_ctr.eq(0) 41 | with m.If(line_ctr != self.width - 1): 42 | m.d.sync += self.not_exact_number_of_lines_error.eq(self.not_exact_number_of_lines_error + 1) 43 | with m.Else(): 44 | with m.If(line_ctr >= (self.width - 1)): 45 | m.d.sync += line_ctr.eq(0) 46 | with m.Else(): 47 | m.d.sync += line_ctr.eq(line_ctr + 1) 48 | 49 | m.d.comb += self.output.payload.eq(self.input.payload) 50 | with m.If(self.input.last): 51 | m.d.comb += self.output.frame_last.eq(1) 52 | m.d.comb += self.output.line_last.eq(1) 53 | with m.Else(): 54 | with m.If(line_ctr >= (self.width - 1)): 55 | m.d.comb += self.output.line_last.eq(1) 56 | 57 | return m 58 | -------------------------------------------------------------------------------- /naps/cores/video/adapters_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from naps import PacketizedStream, write_packet_to_stream, SimPlatform 3 | from naps.cores.video import PacketizedStream2ImageStream, read_frame_from_stream 4 | 5 | 6 | class AdaptersTest(unittest.TestCase): 7 | def test_PacketizedStream2ImageStream(self): 8 | platform = SimPlatform() 9 | input_stream = PacketizedStream(32) 10 | dut = PacketizedStream2ImageStream(input_stream, width=10) 11 | 12 | def write_process(): 13 | for frame in range(10): 14 | yield from write_packet_to_stream(input_stream, [0 for _ in range(100)]) 15 | platform.add_process(write_process, "sync") 16 | 17 | def read_process(): 18 | for frame in range(10): 19 | frame = yield from read_frame_from_stream(dut.output) 20 | self.assertEqual(len(frame), 10) 21 | self.assertTrue(all(len(l) == 10 for l in frame)) 22 | platform.add_process(read_process, "sync") 23 | 24 | platform.add_sim_clock("sync", 100e6) 25 | platform.sim(dut) 26 | -------------------------------------------------------------------------------- /naps/cores/video/debayer_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from os.path import join, dirname 3 | import imageio.v2 as imageio 4 | from amaranth import * 5 | from amaranth.sim import Passive 6 | from naps import SimPlatform 7 | from naps.stream import write_to_stream 8 | from naps.cores.video import ImageStream, RecoloringDebayerer, SimpleInterpolatingDebayerer, write_frame_to_stream, read_frame_from_stream, to_8bit_rgb, crop 9 | import numpy as np 10 | 11 | 12 | class DebayerTest(unittest.TestCase): 13 | def check_output_stable(self, debayerer_gen): 14 | platform = SimPlatform() 15 | m = Module() 16 | 17 | input = ImageStream(8) 18 | transformer = m.submodules.transformer = debayerer_gen(input) 19 | image = imageio.imread(join(dirname(__file__), "test_bayer.png")) 20 | 21 | def write_process(): 22 | yield from write_frame_to_stream(input, image, pause=False) 23 | yield from write_frame_to_stream(input, image, pause=False) 24 | yield from write_frame_to_stream(input, image, pause=False) 25 | yield Passive() 26 | while True: 27 | yield from write_to_stream(input, line_last=0, frame_last=0, payload=0) 28 | 29 | def read_process(): 30 | (yield from read_frame_from_stream(transformer.output, timeout=1000, pause=False)) 31 | first = crop(to_8bit_rgb((yield from read_frame_from_stream(transformer.output, timeout=1000, pause=False))), 1, 1, 1, 1) 32 | second = crop(to_8bit_rgb((yield from read_frame_from_stream(transformer.output, timeout=1000, pause=False))), 1, 1, 1, 1) 33 | imageio.imsave(platform.output_filename_base + "_first.png", np.array(first, dtype=np.uint8)) 34 | imageio.imsave(platform.output_filename_base + "_second.png", np.array(second, dtype=np.uint8)) 35 | self.assertEqual(first, second) 36 | 37 | platform.add_sim_clock("sync", 100e6) 38 | platform.add_process(write_process, "sync") 39 | platform.sim(m, read_process) 40 | 41 | def test_output_stable_recoloring_debayerer(self): 42 | self.check_output_stable(RecoloringDebayerer) 43 | 44 | def test_output_stable_simple_interpolating_debayerer(self): 45 | self.check_output_stable(lambda input: SimpleInterpolatingDebayerer(input, 70, 48)) 46 | -------------------------------------------------------------------------------- /naps/cores/video/focus_peeking.py: -------------------------------------------------------------------------------- 1 | from itertools import chain 2 | from amaranth import * 3 | from naps import ControlSignal, nAbsDifference 4 | from . import ImageStream, RGB24, ImageConvoluter 5 | 6 | __all__ = ["FocusPeeking"] 7 | 8 | 9 | class FocusPeeking(Elaboratable): 10 | """Adds A focus peeking overlay to the image""" 11 | 12 | def __init__(self, input: ImageStream, width=3000, height=3000): 13 | self.input = input 14 | self.output = ImageStream(24) 15 | 16 | self.width = width 17 | self.height = height 18 | 19 | self.threshold = ControlSignal(16, init=255) 20 | self.highlight_r = ControlSignal(8, init=255) 21 | self.highlight_g = ControlSignal(8) 22 | self.highlight_b = ControlSignal(8) 23 | 24 | def elaborate(self, platform): 25 | m = Module() 26 | 27 | def transformer_function(x, y, image_proxy): 28 | self_rgb = RGB24(image_proxy[x, y]) 29 | other_rgbs = [ 30 | RGB24(image_proxy[x + dx, y + dy]) 31 | for dx in range(-1, 2) 32 | for dy in range(-1, 2) 33 | ] 34 | 35 | deviations = [[nAbsDifference(self_rgb.r, o.r), nAbsDifference(self_rgb.g, o.g), nAbsDifference(self_rgb.b, o.b)] for o in other_rgbs] 36 | total_deviation = sum(chain(*deviations)) 37 | 38 | output = RGB24() 39 | m.d.comb += output.eq(RGB24(image_proxy[x, y])) 40 | with m.If(total_deviation > self.threshold): 41 | m.d.comb += output.r.eq(self.highlight_r) 42 | m.d.comb += output.g.eq(self.highlight_g) 43 | m.d.comb += output.b.eq(self.highlight_b) 44 | 45 | return output 46 | 47 | video_transformer = m.submodules.video_transformer = ImageConvoluter(self.input, transformer_function, 48 | self.width, self.height) 49 | m.d.comb += self.output.connect_upstream(video_transformer.output) 50 | 51 | return m 52 | 53 | 54 | if __name__ == "__main__": 55 | in_stream = ImageStream(len(RGB24())) 56 | dut = FocusPeeking(in_stream, width=512, height=512) 57 | 58 | from amaranth.back.cxxrtl import convert 59 | from pathlib import Path 60 | 61 | cpp_path = (Path(__file__).parent / "focus_peeking_cxxrtl_test" / "focus_peeking_test.cpp") 62 | cpp_path.write_text( 63 | convert(dut, ports=[*in_stream.signals, *dut.output.signals, dut.threshold, dut.highlight_r, dut.highlight_g, dut.highlight_b]) 64 | ) 65 | -------------------------------------------------------------------------------- /naps/cores/video/focus_peeking_cxxrtl_test/.gitignore: -------------------------------------------------------------------------------- 1 | main 2 | focus_test.vcd -------------------------------------------------------------------------------- /naps/cores/video/focus_peeking_cxxrtl_test/cat512.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apertus-open-source-cinema/naps/f39373a808e6005dd2c154360b5eac045b015bb2/naps/cores/video/focus_peeking_cxxrtl_test/cat512.jpg -------------------------------------------------------------------------------- /naps/cores/video/focus_peeking_cxxrtl_test/focus_peak_test.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apertus-open-source-cinema/naps/f39373a808e6005dd2c154360b5eac045b015bb2/naps/cores/video/focus_peeking_cxxrtl_test/focus_peak_test.png -------------------------------------------------------------------------------- /naps/cores/video/gamma_corrector.py: -------------------------------------------------------------------------------- 1 | from amaranth import * 2 | from amaranth.lib.memory import Memory 3 | 4 | from naps import ControlSignal, stream_transformer 5 | from . import ImageStream 6 | 7 | __all__ = ["TableGammaCorrector"] 8 | 9 | 10 | class TableGammaCorrector(Elaboratable): 11 | """Apply gamma correction to a monochrome image using a pre-computed lookup table""" 12 | def __init__(self, input: ImageStream, gamma: float): 13 | self.input = input 14 | self.bpp = len(input.payload) 15 | self.output = ImageStream(self.bpp) 16 | 17 | self.gamma = gamma 18 | 19 | def elaborate(self, platform): 20 | m = Module() 21 | 22 | stream_transformer(self.input, self.output, m, latency=1) 23 | input_transaction = self.input.ready & self.input.valid 24 | 25 | # compute the gamma lookup table with the formula 26 | # out = in ^ gamma, where in and out are 0-1 and ^ is exponentiation 27 | max_pix = 2**self.bpp - 1 28 | lut = list(int(max_pix*((v/max_pix)**self.gamma)+0.5) for v in range(max_pix+1)) 29 | 30 | lut_mem = m.submodules.lut_mem = Memory(shape=self.bpp, depth=2**self.bpp, init=lut) 31 | lut_port = lut_mem.read_port(domain="sync") 32 | 33 | m.d.comb += lut_port.en.eq(input_transaction) 34 | m.d.comb += lut_port.addr.eq(self.input.payload) 35 | m.d.comb += self.output.payload.eq(lut_port.data) 36 | 37 | return m 38 | -------------------------------------------------------------------------------- /naps/cores/video/gamma_corrector_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from os.path import join, dirname 3 | import imageio.v2 as imageio 4 | from amaranth import * 5 | from amaranth.sim import Passive 6 | from naps import SimPlatform 7 | from naps.cores.video import ImageStream, TableGammaCorrector, write_frame_to_stream, read_frame_from_stream 8 | import numpy as np 9 | 10 | 11 | class GammaCorrectorTest(unittest.TestCase): 12 | def check_output(self, corrector_gen, gamma): 13 | platform = SimPlatform() 14 | m = Module() 15 | 16 | input = ImageStream(8) 17 | transformer = m.submodules.transformer = corrector_gen(input, gamma) 18 | image = imageio.imread(join(dirname(__file__), "wavelet", "che_32.png")) 19 | 20 | # correct the image ourselves to check the corrector's work 21 | if gamma != 1: 22 | bpp = 8 23 | max_pix = 2**bpp - 1 24 | lut = list(int(max_pix*((v/max_pix)**gamma)+0.5) for v in range(max_pix+1)) 25 | image_corrected = list(list(lut[pixel] for pixel in line) for line in image) 26 | else: # gamma = 1 should not change image at all 27 | image_corrected = list(list(pixel for pixel in line) for line in image) 28 | 29 | def write_process(): 30 | yield from write_frame_to_stream(input, image, pause=False) 31 | yield Passive() 32 | 33 | def read_process(): 34 | result = yield from read_frame_from_stream(transformer.output, timeout=1000, pause=False) 35 | imageio.imsave(platform.output_filename_base + "_result.png", np.array(result, dtype=np.uint8)) 36 | self.assertEqual(result, image_corrected) 37 | 38 | platform.add_sim_clock("sync", 100e6) 39 | platform.add_process(write_process, "sync") 40 | platform.sim(m, read_process) 41 | 42 | def test_output_table_gamma_corrector_encode(self): 43 | self.check_output(TableGammaCorrector, 1/2.2) 44 | 45 | def test_output_table_gamma_corrector_decode(self): 46 | self.check_output(TableGammaCorrector, 2.2) 47 | 48 | def test_output_table_gamma_corrector_nop(self): 49 | self.check_output(TableGammaCorrector, 1) 50 | -------------------------------------------------------------------------------- /naps/cores/video/image_stream.py: -------------------------------------------------------------------------------- 1 | from amaranth import Signal 2 | from naps import BasicStream, DOWNWARDS 3 | 4 | __all__ = ["ImageStream"] 5 | 6 | 7 | class ImageStream(BasicStream): 8 | """ 9 | A stream that can be used to transfer image data. 10 | """ 11 | 12 | def __init__(self, payload_shape, name=None, src_loc_at=1): 13 | super().__init__(payload_shape, name, src_loc_at=1 + src_loc_at) 14 | self.line_last = Signal() @ DOWNWARDS 15 | self.frame_last = Signal() @ DOWNWARDS 16 | -------------------------------------------------------------------------------- /naps/cores/video/rearrange_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from os.path import join, dirname 3 | import imageio.v2 as imageio 4 | from amaranth import * 5 | from amaranth.sim import Passive 6 | from naps import SimPlatform, ImageStream, write_frame_to_stream, read_frame_from_stream, write_to_stream, do_nothing 7 | from naps.cores.video.rearrange import ImageSplitter2 8 | import numpy as np 9 | 10 | 11 | class TestImageSplitter2(unittest.TestCase): 12 | def test_image(self): 13 | platform = SimPlatform() 14 | m = Module() 15 | 16 | input = ImageStream(8) 17 | transformer = m.submodules.transformer = ImageSplitter2(input, 16, 4, 80) 18 | image = imageio.imread(join(dirname(__file__), "wavelet/che_64.png")) 19 | 20 | def write_process(): 21 | for i in range(2): 22 | yield from write_frame_to_stream(input, image, pause=False) 23 | yield Passive() 24 | yield from do_nothing(100) 25 | platform.add_process(write_process, "sync") 26 | 27 | 28 | for i in range(4): 29 | def makefunc(i): 30 | def read_process(): 31 | for n in range(2): 32 | frame = (yield from read_frame_from_stream(transformer.outputs[i], timeout=1000, pause=False)) 33 | imageio.imsave(platform.output_filename_base + f"_{i}_{n}.png", np.array(frame, dtype=np.uint8)) 34 | return read_process 35 | platform.add_process(makefunc(i), "sync") 36 | 37 | platform.add_sim_clock("sync", 100e6) 38 | platform.sim(m) 39 | -------------------------------------------------------------------------------- /naps/cores/video/rgb.py: -------------------------------------------------------------------------------- 1 | from amaranth import * 2 | from amaranth.lib import data 3 | 4 | __all__ = ["RGB24", "RGB565"] 5 | 6 | 7 | class RGB(data.StructLayout): 8 | r: Signal 9 | b: Signal 10 | g: Signal 11 | 12 | def __init__(self, r_bits, g_bits, b_bits): 13 | super().__init__({ 14 | "r": r_bits, 15 | "g": g_bits, 16 | "b": b_bits, 17 | }) 18 | 19 | def brightness(self): 20 | return self.r + self.g + self.b 21 | 22 | RGB24 = RGB(8, 8, 8) 23 | RGB565 = RGB(5, 6, 5) 24 | -------------------------------------------------------------------------------- /naps/cores/video/test_bayer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apertus-open-source-cinema/naps/f39373a808e6005dd2c154360b5eac045b015bb2/naps/cores/video/test_bayer.png -------------------------------------------------------------------------------- /naps/cores/video/test_util.py: -------------------------------------------------------------------------------- 1 | from random import Random 2 | from naps import write_to_stream, read_from_stream, do_nothing 3 | 4 | __all__ = ["write_frame_to_stream", "read_frame_from_stream", "to_8bit_rgb", "crop"] 5 | 6 | 7 | def write_frame_to_stream(stream, frame, timeout=100, pause=False): 8 | random = Random(0) 9 | for y, line in enumerate(frame): 10 | for x, px in enumerate(line): 11 | if (random.random() < 0.3) and pause: 12 | yield from do_nothing() 13 | yield from write_to_stream( 14 | stream, payload=int(px), 15 | timeout=timeout, 16 | line_last=(x == (len(line) - 1)), 17 | frame_last=(y == (len(frame) - 1)) & (x == (len(line) - 1)), 18 | ) 19 | if (random.random() < 0.3) and pause: 20 | yield from do_nothing() 21 | 22 | 23 | def read_frame_from_stream(stream, timeout=100, pause=False): 24 | random = Random(1) 25 | frame = [[]] 26 | while True: 27 | if (random.random() < 0.3) and pause: 28 | yield from do_nothing() 29 | px, line_last, frame_last = (yield from read_from_stream(stream, timeout=timeout, 30 | extract=("payload", "line_last", "frame_last"))) 31 | frame[-1].append(px) 32 | if frame_last: 33 | return frame 34 | if line_last: 35 | frame.append([]) 36 | 37 | 38 | def to_8bit_rgb(image_24bit): 39 | return [ 40 | [[px & 0xff, (px >> 8) & 0xff, (px >> 16) & 0xff] for px in line] 41 | for line in image_24bit 42 | ] 43 | 44 | 45 | def crop(frame, left=0, right=0, top=0, bottom=0): 46 | return [line[left:len(line)-right] for line in frame[top:len(frame)-bottom]] 47 | -------------------------------------------------------------------------------- /naps/cores/video/wavelet/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apertus-open-source-cinema/naps/f39373a808e6005dd2c154360b5eac045b015bb2/naps/cores/video/wavelet/__init__.py -------------------------------------------------------------------------------- /naps/cores/video/wavelet/che_128.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apertus-open-source-cinema/naps/f39373a808e6005dd2c154360b5eac045b015bb2/naps/cores/video/wavelet/che_128.png -------------------------------------------------------------------------------- /naps/cores/video/wavelet/che_16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apertus-open-source-cinema/naps/f39373a808e6005dd2c154360b5eac045b015bb2/naps/cores/video/wavelet/che_16.png -------------------------------------------------------------------------------- /naps/cores/video/wavelet/che_32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apertus-open-source-cinema/naps/f39373a808e6005dd2c154360b5eac045b015bb2/naps/cores/video/wavelet/che_32.png -------------------------------------------------------------------------------- /naps/cores/video/wavelet/che_64.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apertus-open-source-cinema/naps/f39373a808e6005dd2c154360b5eac045b015bb2/naps/cores/video/wavelet/che_64.png -------------------------------------------------------------------------------- /naps/cores/video/wavelet/che_full.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apertus-open-source-cinema/naps/f39373a808e6005dd2c154360b5eac045b015bb2/naps/cores/video/wavelet/che_full.png -------------------------------------------------------------------------------- /naps/cores/video/wavelet/dng.py: -------------------------------------------------------------------------------- 1 | import rawpy 2 | import numpy as np 3 | from pidng.core import RAW2DNG, DNGTags, Tag 4 | from .py_wavelet import ty 5 | 6 | positions = [(0, 0), (0, 1), (1, 0), (1, 1)] 7 | 8 | 9 | def read_dng(filename): 10 | image = rawpy.imread(filename) 11 | raw_image = np.array(image.raw_image, dtype=ty) 12 | arrays = [raw_image[y::2, x::2] for x, y in positions] 13 | color_desc = image.color_desc.decode("utf-8") 14 | g = 1 15 | colors = {} 16 | for i, (x, y) in enumerate(positions): 17 | c = color_desc[image.raw_color(x, y)] 18 | if c == "G": 19 | c += str(g) 20 | g += 1 21 | colors[c] = arrays[i] 22 | 23 | estimated_bit_depth = 12 if np.max(raw_image) > 255 else 8 24 | return colors['R'], colors['G1'], colors['G2'], colors['B'], list(colors.keys()), estimated_bit_depth 25 | 26 | 27 | def write_dng(filename, red, green1, green2, blue, bit_depth, order=('G1', 'R', 'B', 'G2')): 28 | h, w = red.shape 29 | result = np.empty((h * 2, w * 2), dtype=np.uint16) 30 | 31 | colors = {'R': red, 'G1': green1, 'G2': green2, 'B': blue} 32 | for (x, y), color in zip(positions, order): 33 | result[y::2, x::2] = colors[color] 34 | 35 | # set DNG tags. 36 | t = DNGTags() 37 | 38 | t.set(Tag.ImageWidth, w * 2) 39 | t.set(Tag.ImageLength, h * 2) 40 | t.set(Tag.TileWidth, w * 2) 41 | t.set(Tag.TileLength, h * 2) 42 | t.set(Tag.PhotometricInterpretation, 32803) 43 | t.set(Tag.SamplesPerPixel, 1) 44 | t.set(Tag.BitsPerSample, bit_depth) 45 | t.set(Tag.CFARepeatPatternDim, [2, 2]) 46 | t.set(Tag.CFAPattern, [{'R': 0, 'G': 1, 'B': 2}[c[0]] for c in order]) 47 | t.set(Tag.BlackLevel, 0) 48 | t.set(Tag.WhiteLevel, ((1 << bit_depth) - 1)) 49 | t.set(Tag.CalibrationIlluminant1, 21) 50 | t.set(Tag.AsShotNeutral, [[1, 1], [1, 1], [1, 1]]) 51 | t.set(Tag.DNGVersion, [1, 4, 0, 0]) 52 | t.set(Tag.DNGBackwardVersion, [1, 2, 0, 0]) 53 | t.set(Tag.Make, "Camera Brand") 54 | t.set(Tag.Model, "Camera Model") 55 | t.set(Tag.PreviewColorSpace, 2) 56 | 57 | # save to dng file. 58 | RAW2DNG().convert(result, tags=t, filename=filename) 59 | return filename + '.dng' 60 | -------------------------------------------------------------------------------- /naps/cores/video/wavelet/py_wavelet_repack.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def interleave(*args): 5 | n = len(args) 6 | w, h = args[0].shape 7 | a = np.zeros((w * n, h)) 8 | for i, arg in enumerate(args): 9 | a[i::n] = arg 10 | return a 11 | 12 | 13 | def full_width(w, stages): 14 | return int(width_factor(stages) * w) 15 | 16 | 17 | def width_factor(stages, factor=1.0): 18 | if stages == 1: 19 | return 2 * factor 20 | else: 21 | return factor * 3 / 2 + width_factor(stages - 1, factor / 2) 22 | 23 | 24 | def real_width(encoded_width, stages): 25 | factor = width_factor(stages) 26 | return int(encoded_width // factor) 27 | 28 | 29 | def pack(image, levels): 30 | h, w = image.shape 31 | result = np.zeros((h, full_width(w, levels)), dtype=image.dtype) 32 | orig_result = result 33 | 34 | for level in reversed(range(levels)): 35 | hf1 = image[:h // 2, w // 2:] 36 | hf2 = image[h // 2:, :w // 2] 37 | hf3 = image[h // 2:, w // 2:] 38 | 39 | result[:h // 2, -w * 3 // 2:-w * 2 // 2] = hf1 40 | result[:h // 2, -w * 2 // 2:-w * 1 // 2] = hf2 41 | result[:h // 2, -w * 1 // 2:] = hf3 42 | 43 | if level == 0: 44 | result[:h // 2, :w // 2] = image[:h // 2, :w // 2] 45 | else: 46 | result = result[5::2, :-w * 3 // 2] 47 | image = image[:h // 2, :w // 2] 48 | w //= 2 49 | h //= 2 50 | 51 | return orig_result 52 | 53 | 54 | def unpack(image, levels): 55 | h, w = image.shape 56 | w = real_width(w, levels) 57 | result = np.zeros((h, w), dtype=image.dtype) 58 | orig_result = result 59 | 60 | for level in reversed(range(levels)): 61 | hf1 = image[:h // 2, -w * 3 // 2: -w * 2 // 2] 62 | hf2 = image[:h // 2, -w * 2 // 2: -w * 1 // 2] 63 | hf3 = image[:h // 2, -w * 1 // 2:] 64 | result[:h // 2, w // 2:] = hf1 65 | result[h // 2:, :w // 2] = hf2 66 | result[h // 2:, w // 2:] = hf3 67 | 68 | if level == 0: 69 | result[:h // 2, :w // 2] = image[:h // 2, :w // 2] 70 | else: 71 | result = result[:h // 2, :w // 2] 72 | image = image[5::2, :-w * 3 // 2] 73 | w //= 2 74 | h //= 2 75 | 76 | return orig_result 77 | -------------------------------------------------------------------------------- /naps/cores/video/wavelet/wavelet_compressor.py: -------------------------------------------------------------------------------- 1 | from amaranth import * 2 | from naps import PacketizedStream, ImageStream, VariableWidthStream 3 | from naps.cores import BitStuffer, HuffmanEncoder, ZeroRleEncoder, ImageStream2PacketizedStream 4 | from .wavelet import MultiStageWavelet2D 5 | 6 | 7 | class WaveletCompressor(Elaboratable): 8 | """compresses an image stream using a wavelet compression algorithm""" 9 | def __init__(self, input: ImageStream, width, height): 10 | self.input = input 11 | self.width = width 12 | self.height = height 13 | 14 | max_input_word = 2**len(self.input.payload) 15 | self.possible_run_lengths = [2, 4, 8, 16, 32, 64, 128, 256] 16 | self.huffmann_frequencies = { 17 | **{k: 1 for k in range(max_input_word)}, 18 | **{k: 10 for k in range(max_input_word, max_input_word + len(self.possible_run_lengths))} 19 | } 20 | 21 | self.output = PacketizedStream(self.input.payload.shape()) 22 | 23 | def elaborate(self, platform): 24 | m = Module() 25 | 26 | wavelet = m.submodules.wavelet = MultiStageWavelet2D(self.input, self.width, self.height, stages=3) 27 | packetizer = m.submodules.packetizer = ImageStream2PacketizedStream(wavelet.output) 28 | 29 | bit_stuffing_input = VariableWidthStream(self.input.payload.shape(), init_width=len(self.input.payload)) 30 | with m.If(packetizer.output.is_hf): 31 | rle_input = PacketizedStream() 32 | m.d.comb += rle_input.connect_upstream(packetizer.output) 33 | rle = m.submodules.rle = ZeroRleEncoder(rle_input, self.possible_run_lengths) 34 | huffman = m.submodules.huffman = HuffmanEncoder(rle.output) 35 | m.d.comb += bit_stuffing_input.connect_upstream(huffman.output) 36 | with m.Else(): 37 | m.d.comb += bit_stuffing_input.connect_upstream(packetizer.output) 38 | 39 | bit_stuffing = m.submodules.bit_stuffing = BitStuffer(bit_stuffing_input, len(self.output.payload)) 40 | m.d.comb += self.output.connect_upstream(bit_stuffing.output) 41 | 42 | return m 43 | -------------------------------------------------------------------------------- /naps/data_structure/__init__.py: -------------------------------------------------------------------------------- 1 | from .bundle import * -------------------------------------------------------------------------------- /naps/platform/__init__.py: -------------------------------------------------------------------------------- 1 | from .plugins import * 2 | 3 | from .beta_platform import * 4 | from .hdmi_digitizer_platform import * 5 | from .micro_r2_platform import * 6 | from .usb3_plugin_platform import * 7 | from .zybo_platform import * 8 | from .colorlight_5a_75b_7_0 import * 9 | -------------------------------------------------------------------------------- /naps/platform/hdmi_digitizer_platform.py: -------------------------------------------------------------------------------- 1 | from amaranth.build import * 2 | from amaranth_boards.te0714_03_50_2I import TE0714_03_50_2IPlatform 3 | 4 | __all__ = ["HdmiDigitizerPlatform"] 5 | 6 | 7 | class HdmiDigitizerPlatform(TE0714_03_50_2IPlatform): 8 | def __init__(self): 9 | super().__init__() 10 | self.add_resources([ 11 | Resource("ft601", 0, 12 | Subsignal("reset", PinsN("39", dir="o", conn=("JM2", 0))), 13 | Subsignal("data", Pins("34 32 30 28 26 24 22 20 16 14 12 10 8 6 4 2" 14 | " 1 3 5 7 9 11 13 15 19 21 23 25 27 29 31 33", 15 | dir="io", conn=("JM2", 0))), 16 | Subsignal("be", Pins("45 51 44 48", dir="io", conn=("JM2", 0))), 17 | Subsignal("oe", PinsN("41", dir="o", conn=("JM2", 0))), 18 | Subsignal("read", PinsN("43", dir="o", conn=("JM2", 0))), 19 | Subsignal("write", PinsN("47", dir="o", conn=("JM2", 0))), 20 | Subsignal("siwu", PinsN("46", dir="o", conn=("JM2", 0))), 21 | Subsignal("rxf", PinsN("42", dir="i", conn=("JM2", 0))), 22 | Subsignal("txe", PinsN("49", dir="i", conn=("JM2", 0))), 23 | Subsignal("wakeup", PinsN("37", dir="io", conn=("JM2", 0))), 24 | Subsignal("clk", Pins("40", dir="i", conn=("JM2", 0)), Clock(100e63)), 25 | 26 | Attrs(IOSTANDARD="LVCMOS33"), 27 | ), 28 | ]) 29 | for i, p in enumerate([59, 61, 63, 65, 85, 87, 89, 91]): 30 | self.add_resources([Resource("led", i + 1, Pins(str(p), dir='o', invert=True, conn=("JM2", 0)), Attrs(IOSTANDARD="LVCMOS18"))]) 31 | 32 | # 'source [find bus/ftdi/digilent_jtag_hs3.cfg];' 33 | # 'source [find cpld/xilinx-xc7.cfg]; transport select jtag;' 34 | # 'adapter_khz 20000;' 35 | # 'init;' 36 | -------------------------------------------------------------------------------- /naps/platform/plugins/__init__.py: -------------------------------------------------------------------------------- 1 | from .plugin_connector import * 2 | from .hdmi_plugin_resource import * 3 | from .usb3_plugin_resource import * 4 | -------------------------------------------------------------------------------- /naps/platform/plugins/hdmi_plugin_resource.py: -------------------------------------------------------------------------------- 1 | # the 1x hdmi plugin module 2 | # see: https://wiki.apertus.org/index.php/Beta_HDMI_Plugin_Module 3 | 4 | from amaranth.build import Resource, Subsignal, Pins, PinsN, Attrs 5 | from .plugin_connector import PluginDiffPair 6 | 7 | __all__ = ["hdmi_plugin_connect"] 8 | 9 | 10 | def hdmi_plugin_connect(platform, plugin_slot, resource_number=0): 11 | if "plugin_{}:gpio0".format(plugin_slot) in platform._conn_pins: 12 | lowspeed_signals = [ 13 | # i2c to read edid data from the monitor 14 | Subsignal("sda", Pins("lvds5_n", dir='io', conn=("plugin", plugin_slot)), Attrs(IOSTANDARD="LVCMOS25")), 15 | Subsignal("scl", Pins("lvds5_p", dir='io', conn=("plugin", plugin_slot)), Attrs(IOSTANDARD="LVCMOS25")), 16 | 17 | # hdmi plugin-module specific signals 18 | Subsignal("output_enable", PinsN("gpio6", dir='o', conn=("plugin", plugin_slot)), Attrs(IOSTANDARD="LVCMOS33")), 19 | Subsignal("equalizer", Pins("gpio1 gpio4", dir='o', conn=("plugin", plugin_slot)), Attrs(IOSTANDARD="LVCMOS33")), 20 | Subsignal("dcc_enable", Pins("gpio5", dir='o', conn=("plugin", plugin_slot)), Attrs(IOSTANDARD="LVCMOS33")), 21 | Subsignal("vcc_enable", Pins("gpio7", dir='o', conn=("plugin", plugin_slot)), Attrs(IOSTANDARD="LVCMOS33")), 22 | Subsignal("ddet", Pins("gpio3", dir='o', conn=("plugin", plugin_slot)), Attrs(IOSTANDARD="LVCMOS33")), 23 | Subsignal("ihp", Pins("gpio2", dir='i', conn=("plugin", plugin_slot)), Attrs(IOSTANDARD="LVCMOS33")), 24 | ] 25 | else: 26 | lowspeed_signals = [] 27 | 28 | platform.add_resources([ 29 | Resource("hdmi", resource_number, 30 | Subsignal("clock", PluginDiffPair(platform, plugin_slot, pin=3, dir='o', serdes=True), Attrs(IOSTANDARD="LVDS_25")), 31 | Subsignal("b", PluginDiffPair(platform, plugin_slot, pin=2, dir='o', serdes=True), Attrs(IOSTANDARD="LVDS_25")), 32 | Subsignal("g", PluginDiffPair(platform, plugin_slot, pin=1, dir='o', serdes=True), Attrs(IOSTANDARD="LVDS_25")), 33 | Subsignal("r", PluginDiffPair(platform, plugin_slot, pin=0, dir='o', serdes=True), Attrs(IOSTANDARD="LVDS_25")), 34 | *lowspeed_signals 35 | ) 36 | ]) 37 | -------------------------------------------------------------------------------- /naps/platform/plugins/usb3_plugin_resource.py: -------------------------------------------------------------------------------- 1 | # the usb3 plugin module plugin module resource 2 | # (not the platform definition for building gateware for the fpga _on_ the plugin module itself) 3 | # see: https://wiki.apertus.org/index.php/1x_USB_3.0_Plugin_Module 4 | 5 | from amaranth.build import * 6 | from .plugin_connector import PluginDiffPair 7 | 8 | __all__ = ["usb3_plugin_connect"] 9 | 10 | 11 | def usb3_plugin_connect(platform, plugin_slot, resource_number=0, gpio=True, lvds=True, gpio_attrs=dict(IOSTANDARD="LVCMOS25")): 12 | if gpio: 13 | gpio_signals = [ 14 | Subsignal( 15 | "jtag", 16 | Subsignal("tms", Pins("gpio0", dir="io", conn=("plugin", plugin_slot)), Attrs(**gpio_attrs)), 17 | Subsignal("tck", Pins("gpio1", dir="io", conn=("plugin", plugin_slot)), Attrs(**gpio_attrs)), 18 | Subsignal("tdi", Pins("gpio2", dir="io", conn=("plugin", plugin_slot)), Attrs(**gpio_attrs)), 19 | Subsignal("tdo", Pins("gpio3", dir="io", conn=("plugin", plugin_slot)), Attrs(**gpio_attrs)), 20 | ), 21 | Subsignal("jtag_enb", Pins("gpio4", dir="io", conn=("plugin", plugin_slot)), Attrs(**gpio_attrs)), 22 | Subsignal("program", PinsN("gpio5", dir="io", conn=("plugin", plugin_slot)), Attrs(**gpio_attrs)), 23 | Subsignal("init", PinsN("gpio6", dir="io", conn=("plugin", plugin_slot)), Attrs(**gpio_attrs)), 24 | Subsignal("done", PinsN("gpio7", dir="io", conn=("plugin", plugin_slot)), Attrs(**gpio_attrs)), 25 | ] 26 | else: 27 | gpio_signals = [] 28 | 29 | if lvds: 30 | lvds_signals = [ 31 | Subsignal( 32 | "lvds", 33 | Subsignal("valid", PluginDiffPair(platform, plugin_slot, 0, dir='o', serdes=True), Attrs(IOSTANDARD="LVDS_25")), 34 | Subsignal("lane0", PluginDiffPair(platform, plugin_slot, 1, dir='o', serdes=True), Attrs(IOSTANDARD="LVDS_25")), 35 | Subsignal("lane1", PluginDiffPair(platform, plugin_slot, 2, dir='o', serdes=True), Attrs(IOSTANDARD="LVDS_25")), 36 | Subsignal("lane2", PluginDiffPair(platform, plugin_slot, 3, dir='o', serdes=True), Attrs(IOSTANDARD="LVDS_25")), 37 | Subsignal("lane3", PluginDiffPair(platform, plugin_slot, 4, dir='o', serdes=True), Attrs(IOSTANDARD="LVDS_25")), 38 | Subsignal("clk_word", PluginDiffPair(platform, plugin_slot, 5, dir='o', serdes=True), Attrs(IOSTANDARD="LVDS_25")), 39 | ), 40 | ] 41 | else: 42 | lvds_signals = [] 43 | 44 | platform.add_resources([ 45 | Resource( 46 | "usb3_plugin", resource_number, 47 | *lvds_signals, 48 | *gpio_signals, 49 | ) 50 | ]) 51 | -------------------------------------------------------------------------------- /naps/platform/zybo_platform.py: -------------------------------------------------------------------------------- 1 | from amaranth.build import * 2 | from amaranth.vendor import XilinxPlatform 3 | 4 | __all__ = ["ZyboPlatform"] 5 | 6 | 7 | class ZyboPlatform(XilinxPlatform): 8 | device = "xc7z010" 9 | package = "clg400" 10 | speed = "1" 11 | resources = [ 12 | Resource("hdmi", 0, 13 | # high speed serial lanes 14 | Subsignal("clock", DiffPairs("H16", "H17", dir='o'), Attrs(IOSTANDARD="TMDS_33")), 15 | Subsignal("b", DiffPairs("D19", "D20", dir='o'), Attrs(IOSTANDARD="TMDS_33")), 16 | Subsignal("g", DiffPairs("C20", "B20", dir='o'), Attrs(IOSTANDARD="TMDS_33")), 17 | Subsignal("r", DiffPairs("B19", "A20", dir='o'), Attrs(IOSTANDARD="TMDS_33")), 18 | Subsignal("out_en", Pins("F17", dir='o'), Attrs(IOSTANDARD="LVCMOS33")), 19 | ) 20 | ] 21 | connectors = [] 22 | -------------------------------------------------------------------------------- /naps/soc/__init__.py: -------------------------------------------------------------------------------- 1 | from .csr_types import * 2 | 3 | from .fatbitstream import * 4 | from .program_fatbitstream_local import * 5 | from .program_fatbitstream_ssh import * 6 | from .memorymap import * 7 | from .peripheral import * 8 | from .peripherals_aggregator import * 9 | from .pydriver import * 10 | from .devicetree_overlay import * 11 | from .soc_platform import * 12 | from .platform import * 13 | from .cli import * 14 | -------------------------------------------------------------------------------- /naps/soc/peripheral.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | from typing import Callable 3 | 4 | from amaranth import * 5 | 6 | from .memorymap import MemoryMap 7 | 8 | __all__ = ["Peripheral", "Response"] 9 | 10 | 11 | class Response(Enum): 12 | OK = 0 13 | ERR = 1 14 | 15 | 16 | HandleRead = Callable[[Module, Signal, Signal, Callable[[Response], None]], None] 17 | HandleWrite = Callable[[Module, Signal, Signal, Callable[[Response], None]], None] 18 | 19 | 20 | class Peripheral(Elaboratable): 21 | def __init__( 22 | self, 23 | handle_read: HandleRead, 24 | handle_write: HandleWrite, 25 | memorymap: MemoryMap, 26 | name: str | None = None 27 | ): 28 | """ 29 | A `Peripheral` is a thing that is memorymaped in the SOC. 30 | It gets collected and wired up automatically to a platform dependent `Controller` implementation (e.g. an 31 | `AxiLiteController`) by the concrete SOCPlatform (e.g. the `ZynqSocPlatform`). 32 | 33 | :param handle_read: a function with the signature handle_read(m, addr, data, read_done) that is used to insert 34 | logic to the read path. Read_done is a function that gets a Response as an argument 35 | :param handle_write: a function with the signature handle_write(m, addr, data, write_done) that is used to 36 | insert logic to the write path. Write_done is a function that gets a Response as an argument 37 | :param memorymap: the MemoryMap of the peripheral 38 | """ 39 | self.handle_read = handle_read 40 | self.handle_write = handle_write 41 | self.memorymap = memorymap 42 | self.name = name 43 | 44 | def range(self): 45 | return self.memorymap.absolute_range_of_direct_children.range() 46 | 47 | def elaborate(self, platform): 48 | m = Module() 49 | m.peripheral = self 50 | return m 51 | -------------------------------------------------------------------------------- /naps/soc/peripherals_aggregator.py: -------------------------------------------------------------------------------- 1 | from .peripheral import Response 2 | from naps.util.amaranth_misc import iterator_with_if_elif 3 | 4 | __all__ = ["PeripheralsAggregator"] 5 | 6 | 7 | class PeripheralsAggregator: 8 | def __init__(self): 9 | """ 10 | A helper class that behaves like a Peripheral but proxies its read/write request to downstream peripherals 11 | based on their memorymap. 12 | """ 13 | self.downstream_peripherals = [] 14 | 15 | def add_peripheral(self, peripheral): 16 | assert callable(peripheral.handle_read) and callable(peripheral.handle_write) 17 | assert isinstance(peripheral.range(), range) 18 | self.downstream_peripherals.append(peripheral) 19 | 20 | def range(self): 21 | return range( 22 | min(p.range().start for p in self.downstream_peripherals), 23 | max(p.range().stop for p in self.downstream_peripherals) 24 | ) 25 | 26 | def handle_read(self, m, addr, data, read_done_callback): 27 | for cond, peripheral in iterator_with_if_elif(self.downstream_peripherals, m): 28 | address_range = peripheral.memorymap.absolute_range_of_direct_children.range() 29 | translated_address_range = range( 30 | address_range.start - self.range().start, 31 | address_range.stop - self.range().start, 32 | ) 33 | with cond((addr >= translated_address_range.start) & (addr < translated_address_range.stop)): 34 | peripheral.handle_read(m, addr - translated_address_range.start, data, read_done_callback) 35 | if self.downstream_peripherals: 36 | with m.Else(): 37 | read_done_callback(Response.ERR) 38 | else: 39 | read_done_callback(Response.ERR) 40 | 41 | def handle_write(self, m, addr, data, write_done_callback): 42 | for cond, peripheral in iterator_with_if_elif(self.downstream_peripherals, m): 43 | address_range = peripheral.memorymap.absolute_range_of_direct_children.range() 44 | translated_address_range = range( 45 | address_range.start - self.range().start, 46 | address_range.stop - self.range().start, 47 | ) 48 | with cond((addr >= translated_address_range.start) & (addr < translated_address_range.stop)): 49 | peripheral.handle_write(m, addr - translated_address_range.start, data, write_done_callback) 50 | if self.downstream_peripherals: 51 | with m.Else(): 52 | write_done_callback(Response.ERR) 53 | else: 54 | write_done_callback(Response.ERR) 55 | -------------------------------------------------------------------------------- /naps/soc/platform/__init__.py: -------------------------------------------------------------------------------- 1 | from .jtag import * 2 | from .zynq import * 3 | from .sim import * 4 | -------------------------------------------------------------------------------- /naps/soc/platform/jtag/__init__.py: -------------------------------------------------------------------------------- 1 | from .jtag_soc_platform import * 2 | -------------------------------------------------------------------------------- /naps/soc/platform/jtag/jtag_soc_platform.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from amaranth import Fragment, Signal, Module, ClockSignal, ClockDomain 3 | from amaranth.build.run import BuildProducts 4 | from amaranth.vendor import LatticePlatform, XilinxPlatform 5 | 6 | from ... import SocPlatform, Address, PeripheralsAggregator, PERIPHERAL_DOMAIN, program_fatbitstream_local 7 | 8 | __all__ = ["JTAGSocPlatform"] 9 | 10 | from ...fatbitstream import File 11 | 12 | 13 | class JTAGSocPlatform(SocPlatform): 14 | base_address = Address(address=0x0000_0000, bit_offset=0, bit_len=0xFFFF_FFFF * 8) 15 | 16 | @staticmethod 17 | def can_wrap(platform): 18 | return ( 19 | isinstance(platform, LatticePlatform) and platform.family == "ecp5" or 20 | isinstance(platform, LatticePlatform) and platform.family == "machxo2" or 21 | isinstance(platform, XilinxPlatform) and platform.family == "series7" 22 | ) 23 | 24 | def __init__(self, platform): 25 | super().__init__(platform) 26 | 27 | self.jtag_active = Signal() 28 | self.jtag_debug_signals = Signal(32) 29 | 30 | def peripherals_connect_hook(platform, top_fragment: Fragment): 31 | from naps import JTAGPeripheralConnector 32 | if platform.peripherals: 33 | aggregator = PeripheralsAggregator() 34 | for peripheral in platform.peripherals: 35 | aggregator.add_peripheral(peripheral) 36 | 37 | m = Module() 38 | m.submodules.jtag_controller = JTAGPeripheralConnector(aggregator, jtag_domain="jtag") 39 | 40 | m.domains += ClockDomain(PERIPHERAL_DOMAIN) 41 | m.d.comb += ClockSignal(PERIPHERAL_DOMAIN).eq(ClockSignal("jtag")) 42 | 43 | platform.to_inject_subfragments.append((m, "jtag")) 44 | 45 | self.prepare_hooks.append(peripherals_connect_hook) 46 | 47 | def pack_bitstream_fatbitstream(self, name: str, build_products: BuildProducts): 48 | if isinstance(self, LatticePlatform) and self.toolchain == "Diamond": 49 | yield File("bitstream_jtag.svf", build_products.get(f"{name}_sram.svf")) 50 | else: 51 | yield File("bitstream_jtag.svf", build_products.get(f"{name}.svf")) 52 | yield from self._wrapped_platform.generate_openocd_conf() 53 | yield 'openocd -f openocd.cfg -c "svf -tap dut.tap -quiet -progress bitstream_jtag.svf; shutdown"' 54 | 55 | def program_fatbitstream(self, name, **kwargs): 56 | program_fatbitstream_local(name, **kwargs) 57 | 58 | def pydriver_memory_accessor(self, _memorymap): 59 | return (Path(__file__).parent / "memory_accessor_openocd.py").read_text() 60 | -------------------------------------------------------------------------------- /naps/soc/platform/sim/__init__.py: -------------------------------------------------------------------------------- 1 | from .sim_soc_platform import * 2 | -------------------------------------------------------------------------------- /naps/soc/platform/zynq/__init__.py: -------------------------------------------------------------------------------- 1 | from .zynq_soc_platform import * 2 | -------------------------------------------------------------------------------- /naps/soc/platform/zynq/memory_accessor_devmem.py: -------------------------------------------------------------------------------- 1 | import mmap 2 | import os 3 | import struct 4 | from math import ceil 5 | 6 | 7 | class DevMemAccessor: 8 | word = 4 9 | mask = ~(word - 1) 10 | 11 | def __init__(self, base_addr=0x4000_0000, bytes=None, filename='/dev/mem'): 12 | if bytes is None: 13 | bytes = mmap.PAGESIZE 14 | 15 | assert (base_addr % mmap.PAGESIZE) == 0 16 | 17 | bytes = ((bytes + mmap.PAGESIZE - 1) // mmap.PAGESIZE) * mmap.PAGESIZE 18 | 19 | self.base = base_addr 20 | 21 | self.f = os.open(filename, os.O_RDWR | os.O_SYNC) 22 | self.mem = mmap.mmap(self.f, bytes, mmap.MAP_SHARED, mmap.PROT_READ | mmap.PROT_WRITE, offset=base_addr) 23 | 24 | def __del__(self): 25 | os.close(self.f) 26 | 27 | def read(self, offset): 28 | return struct.unpack('I', self.mem[offset:offset + 4])[0] 29 | 30 | def write(self, offset, to_write): 31 | self.mem[offset:offset + 4] = struct.pack('I', to_write) 32 | 33 | -------------------------------------------------------------------------------- /naps/soc/platform/zynq/to_raw_bitstream.py: -------------------------------------------------------------------------------- 1 | import struct 2 | import sys 3 | from io import BytesIO 4 | 5 | 6 | def flip32(data): 7 | sl = struct.Struct('I') 9 | d = bytearray(len(data)) 10 | for offset in range(0, len(data), 4): 11 | sb.pack_into(d, offset, sl.unpack_from(data, offset)[0]) 12 | return d 13 | 14 | 15 | def bit2bin(bitstream, flip_data=True): 16 | bitfile = BytesIO(bitstream) 17 | 18 | short = struct.Struct('>H') 19 | ulong = struct.Struct('>I') 20 | 21 | l = short.unpack(bitfile.read(2))[0] 22 | if l != 9: 23 | raise Exception("Missing <0009> header (0x%x), not a bit file" % l) 24 | bitfile.read(l) 25 | l = short.unpack(bitfile.read(2))[0] 26 | d = bitfile.read(l) 27 | if d != b'a': 28 | raise Exception("Missing header, not a bit file") 29 | 30 | l = short.unpack(bitfile.read(2))[0] 31 | d = bitfile.read(l) 32 | 33 | KEYNAMES = {b'b': "Partname", b'c': "Date", b'd': "Time"} 34 | while 1: 35 | k = bitfile.read(1) 36 | if not k: 37 | bitfile.close() 38 | raise Exception("unexpected EOF") 39 | elif k == b'e': 40 | l = ulong.unpack(bitfile.read(4))[0] 41 | d = bitfile.read(l) 42 | if flip_data: 43 | return flip32(d) 44 | else: 45 | return d 46 | elif k in KEYNAMES: 47 | l = short.unpack(bitfile.read(2))[0] 48 | d = bitfile.read(l) 49 | else: 50 | print("Unexpected key: %s" % k) 51 | l = short.unpack(bitfile.read(2))[0] 52 | d = bitfile.read(l) 53 | 54 | 55 | if __name__ == "__main__": 56 | input_file_name = sys.argv[1] 57 | if input_file_name == "-": 58 | input_file = sys.stdin 59 | else: 60 | input_file = open(input_file_name, 'rb') 61 | input_bitstream = input_file.read() 62 | sys.stdout.buffer.write(bit2bin(input_bitstream)) 63 | -------------------------------------------------------------------------------- /naps/soc/program_fatbitstream_local.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | 4 | __all__ = ["program_fatbitstream_local"] 5 | 6 | 7 | def program_fatbitstream_local(fatbitstream, run=False): 8 | os.chdir(Path(fatbitstream).parent) 9 | ret = os.system(f"./{Path(fatbitstream).name} {'--run' if run else ''}") 10 | if ret != 0: 11 | raise RuntimeError(f"Failed to program fatbitstream {fatbitstream}") 12 | -------------------------------------------------------------------------------- /naps/soc/pydriver/__init__.py: -------------------------------------------------------------------------------- 1 | from .driver_items import * 2 | from .hardware_proxy import BitwiseAccessibleInteger 3 | -------------------------------------------------------------------------------- /naps/soc/pydriver/driver_items.py: -------------------------------------------------------------------------------- 1 | # a decorator to mark methods in elaboratables that should end up in the pydriver 2 | # all DriverMethod instances are collected and shipped with the pydriver 3 | 4 | __all__ = ["driver_method", "driver_property", "driver_init", "DriverData"] 5 | 6 | 7 | class DriverItem: 8 | pass 9 | 10 | 11 | class DriverMethod(DriverItem): 12 | def __init__(self, function, is_property=False, is_init=False): 13 | self.is_property = is_property 14 | self.is_init = is_init 15 | self.function = function 16 | 17 | def __repr__(self): 18 | if self.is_property: 19 | return "driver_property" 20 | elif self.is_init: 21 | return "driver_init()" 22 | else: 23 | return "driver_method()" 24 | 25 | 26 | def driver_method(function): 27 | return DriverMethod(function) 28 | 29 | 30 | def driver_property(function): 31 | return DriverMethod(function, is_property=True) 32 | 33 | 34 | def driver_init(function): 35 | return DriverMethod(function, is_init=True) 36 | 37 | 38 | class DriverData(DriverItem): 39 | def __init__(self, data): 40 | self.data = data 41 | 42 | def __repr__(self): 43 | return "" 44 | -------------------------------------------------------------------------------- /naps/soc/pydriver/hardware_proxy_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from naps.soc.pydriver.hardware_proxy import BitwiseAccessibleInteger 3 | 4 | 5 | class BitwiseAccessibleIntegerTest(unittest.TestCase): 6 | def test_destruct(self): 7 | v = BitwiseAccessibleInteger(0b10100100) 8 | self.assertEqual(v[0], 0) 9 | self.assertEqual(v[2], 1) 10 | self.assertEqual(v[4:8], 0b1010) 11 | 12 | def test_construct(self): 13 | v = BitwiseAccessibleInteger(256) 14 | v[0] = 1 15 | self.assertEqual(int(v), 257) 16 | 17 | v = BitwiseAccessibleInteger() 18 | v[8] = 1 19 | self.assertEqual(int(v), 256) 20 | 21 | v = BitwiseAccessibleInteger(0b00001111) 22 | v[0:4] = 0 23 | self.assertEqual(int(v), 0) 24 | 25 | v = BitwiseAccessibleInteger(0b01011111) 26 | v[4:8] = 0b1010 27 | self.assertEqual(int(v), 0b10101111) -------------------------------------------------------------------------------- /naps/soc/pydriver/interactive.py: -------------------------------------------------------------------------------- 1 | if __name__ == "__main__": 2 | import os, atexit, code, readline 3 | 4 | class PydriverConsole(code.InteractiveConsole): 5 | def __init__(self, locals=None, filename="", 6 | histfile=os.path.expanduser("~/.pydriver-history")): 7 | code.InteractiveConsole.__init__(self, locals, filename) 8 | self.init_history(histfile) 9 | 10 | def init_history(self, histfile): 11 | readline.parse_and_bind("tab: complete") # readline 12 | readline.parse_and_bind("bind ^I rl_complete") # editline (on macOS) 13 | if hasattr(readline, "read_history_file"): 14 | try: 15 | readline.read_history_file(histfile) 16 | except FileNotFoundError: 17 | pass 18 | atexit.register(self.save_history, histfile) 19 | 20 | def save_history(self, histfile): 21 | readline.set_history_length(1000) 22 | readline.write_history_file(histfile) 23 | 24 | shell = PydriverConsole(locals()) 25 | 26 | # setup the design variable 27 | design = Design(MemoryAccessor()) 28 | self = design # this is a hack to be able to copy & paste driver code directly 29 | 30 | print("welcome to the python shell to interact with the fpga") 31 | print("interact with it over the `design` variable and tab completion") 32 | 33 | import rlcompleter # this import is not really unused but rather monkey-patches tab-completion into the shell 34 | shell.interact(banner="") 35 | -------------------------------------------------------------------------------- /naps/soc/smoke_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from amaranth import * 3 | from naps import ZynqSocPlatform, SimPlatform, OutputIo 4 | from naps.cores.axi import axil_read 5 | from naps.cores.hdmi import generate_modeline, HdmiTx 6 | from naps.soc.pydriver.driver_items import DriverItem 7 | 8 | 9 | class SocSmokeTest(unittest.TestCase): 10 | def test_hdmi_registers(self, testdata=0x1): 11 | platform = ZynqSocPlatform(SimPlatform()) 12 | 13 | class Pins: 14 | def __init__(self): 15 | self.r = OutputIo() 16 | self.g = OutputIo() 17 | self.b = OutputIo() 18 | self.clock = OutputIo() 19 | 20 | dut = HdmiTx(Pins(), generate_clocks=False, modeline=generate_modeline(640, 480, 60)) 21 | 22 | platform.add_sim_clock("pix", 117.5e6) 23 | 24 | def testbench(): 25 | axi = platform.axi_lite_master 26 | memorymap = platform.memorymap 27 | for name, addr in memorymap.flattened.items(): 28 | if not isinstance(addr, DriverItem): 29 | yield from axil_read(axi, addr.address) 30 | 31 | platform.sim(dut, (testbench, "axi_lite")) 32 | -------------------------------------------------------------------------------- /naps/soc/soc_platform.py: -------------------------------------------------------------------------------- 1 | from abc import ABC 2 | 3 | from amaranth import Fragment 4 | 5 | from .hooks import csr_and_driver_item_hook, address_assignment_hook, peripherals_collect_hook 6 | from .pydriver.generate import pydriver_hook 7 | 8 | __all__ = ["SocPlatform", "soc_platform_name", "PERIPHERAL_DOMAIN"] 9 | 10 | PERIPHERAL_DOMAIN = "peripheral_domain" 11 | 12 | class SocPlatform(ABC): 13 | base_address = None 14 | _wrapped_platform = None 15 | 16 | # we build a new type that combines the soc and the real platform class 17 | def __new__(cls, platform, *args, **kwargs): 18 | return super(SocPlatform, cls).__new__(type(cls.__name__, (cls, platform.__class__), vars(platform))) 19 | 20 | # we pass through all platform methods, because we pretend to be a platform 21 | def __getattr__(self, item): 22 | return getattr(self._wrapped_platform, item) 23 | 24 | def __init__(self, platform): 25 | platform._soc_platform = self 26 | 27 | self._wrapped_platform = platform 28 | 29 | self.prepare_hooks = [] 30 | self.to_inject_subfragments = [] 31 | self.final_to_inject_subfragments = [] 32 | 33 | self.prepare_hooks.append(csr_and_driver_item_hook) 34 | self.prepare_hooks.append(address_assignment_hook) 35 | self.prepare_hooks.append(peripherals_collect_hook) 36 | self.prepare_hooks.append(pydriver_hook) 37 | 38 | # we override the prepare method of the real platform to be able to inject stuff into the design 39 | def prepare_soc(self, elaboratable): 40 | print("# ELABORATING MAIN DESIGN") 41 | top_fragment = Fragment.get(elaboratable, self) 42 | 43 | def inject_subfragments(top_fragment, to_inject_subfragments): 44 | for elaboratable, name in to_inject_subfragments: 45 | fragment = Fragment.get(elaboratable, self) 46 | print("<- injecting fragment '{}'".format(name)) 47 | top_fragment.add_subfragment(fragment, name) 48 | self.to_inject_subfragments = [] 49 | 50 | print("\n# ELABORATING SOC PLATFORM ADDITIONS") 51 | inject_subfragments(top_fragment, self.to_inject_subfragments) 52 | for hook in self.prepare_hooks: 53 | print("-> running {}".format(hook.__name__)) 54 | hook(self, top_fragment) 55 | inject_subfragments(top_fragment, self.to_inject_subfragments) 56 | 57 | print("\ninjecting final fragments") 58 | inject_subfragments(top_fragment, self.final_to_inject_subfragments) 59 | 60 | return top_fragment 61 | 62 | 63 | def soc_platform_name(obj): 64 | if obj is None: 65 | return "None" 66 | else: 67 | if not isinstance(obj, type): 68 | obj = obj.__class__ 69 | return obj.__name__.replace("SocPlatform", "") 70 | -------------------------------------------------------------------------------- /naps/soc/soc_platform_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from naps import SimPlatform 3 | from . import SocPlatform 4 | 5 | 6 | class ConcreteSocPlatform(SocPlatform): 7 | pass 8 | 9 | 10 | class TestSocPlatform(unittest.TestCase): 11 | def test_is_instance(self): 12 | platform = ConcreteSocPlatform(SimPlatform()) 13 | self.assertIsInstance(platform, SimPlatform) 14 | self.assertIsInstance(platform, ConcreteSocPlatform) 15 | self.assertIsInstance(platform, SocPlatform) 16 | -------------------------------------------------------------------------------- /naps/soc/tracing_elaborate.py: -------------------------------------------------------------------------------- 1 | from types import MethodType 2 | 3 | from amaranth import * 4 | 5 | 6 | def get_elaboratable(frag: Fragment): 7 | if isinstance(frag, Instance): 8 | return None 9 | else: 10 | try: 11 | return (o for o in frag.origins 12 | if isinstance(o, Elaboratable) 13 | and not isinstance(o, Module)) 14 | except: 15 | print(f"no elaboratable found for {frag}") 16 | return None 17 | 18 | 19 | def get_module(frag: Fragment): 20 | if isinstance(frag, Instance): 21 | return None 22 | else: 23 | try: 24 | return next(o for o in frag.origins if isinstance(o, Module)) 25 | except StopIteration: 26 | print(f"no module found for {frag}") 27 | return None 28 | -------------------------------------------------------------------------------- /naps/stream/__init__.py: -------------------------------------------------------------------------------- 1 | from .stream import * 2 | from .first_stream import * 3 | from .pipeline import * 4 | from .formal_util import * 5 | from .sim_util import * 6 | from .stream_transformer import * 7 | -------------------------------------------------------------------------------- /naps/stream/first_stream.py: -------------------------------------------------------------------------------- 1 | from amaranth import * 2 | from ..data_structure import DOWNWARDS 3 | from .stream import BasicStream 4 | 5 | __all__ = ["PacketizedFirstStream"] 6 | 7 | 8 | class PacketizedFirstStream(BasicStream): 9 | """ 10 | A stream that carries a payload and can separate Packets via a first signal that is asserted on the 11 | first word of a packet 12 | """ 13 | 14 | def __init__(self, payload_shape, name=None, src_loc_at=1): 15 | super().__init__(payload_shape, name, src_loc_at=1 + src_loc_at) 16 | self.first = Signal() @ DOWNWARDS 17 | -------------------------------------------------------------------------------- /naps/stream/formal_util_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from amaranth import * 4 | 5 | from naps.stream.formal_util import verify_stream_output_contract, LegalStreamSource 6 | from naps import BasicStream 7 | 8 | 9 | class BrokenStreamSource(Elaboratable): 10 | def __init__(self, mode): 11 | self.output = BasicStream(32) 12 | self.mode = mode 13 | 14 | def elaborate(self, platform): 15 | m = Module() 16 | 17 | if self.mode == "valid_is_ready": 18 | m.d.comb += self.output.valid.eq(self.output.ready) 19 | elif self.mode == "payload_unsteady": 20 | m.d.sync += self.output.payload.eq(self.output.payload + 1) 21 | m.d.comb += self.output.valid.eq(1) 22 | elif self.mode == "dont_wait_for_ready": 23 | m.d.sync += self.output.valid.eq(~self.output.valid) 24 | 25 | return m 26 | 27 | 28 | class FormalUtilTestCase(unittest.TestCase): 29 | def test_catch_valid_depends_on_ready(self): 30 | with self.assertRaises(AssertionError): 31 | verify_stream_output_contract(BrokenStreamSource("valid_is_ready")) 32 | 33 | def test_catch_changing_payload(self): 34 | with self.assertRaises(AssertionError): 35 | verify_stream_output_contract(BrokenStreamSource("payload_unsteady")) 36 | 37 | def test_catch_dont_wait_for_ready(self): 38 | with self.assertRaises(AssertionError): 39 | verify_stream_output_contract(BrokenStreamSource("dont_wait_for_ready")) 40 | 41 | def test_legal_stream_source(self): 42 | verify_stream_output_contract(LegalStreamSource(BasicStream(32))) 43 | -------------------------------------------------------------------------------- /naps/stream/pipeline.py: -------------------------------------------------------------------------------- 1 | from itertools import count 2 | 3 | from amaranth import * 4 | from naps.util.python_misc import camel_to_snake 5 | 6 | __all__ = ["Pipeline"] 7 | 8 | 9 | class Pipeline: 10 | """A helper (syntactic sugar) to easier write pipelines of stream cores""" 11 | def __init__(self, m, prefix="", start_domain="sync"): 12 | self.m = m 13 | self.prefix = prefix 14 | self.next_domain = start_domain 15 | self.pipeline_members = {} 16 | 17 | def __setitem__(self, key, value): 18 | value = DomainRenamer(self.next_domain)(value) 19 | self.pipeline_members[key] = value 20 | self.m.submodules[key if not self.prefix else f'{self.prefix}_{key}'] = value 21 | 22 | # TODO: this concept breaks when a DomainRenamer is in the game; 23 | # rethink how we handle this in that case 24 | if hasattr(value, "output_domain"): 25 | self.next_domain = value.output_domain 26 | 27 | def __iadd__(self, other): 28 | name = camel_to_snake(other.__class__.__name__) 29 | for i in count(): 30 | n = name if i == 0 else f'{name}_{i}' 31 | if n not in self.pipeline_members: 32 | name = n 33 | break 34 | self.__setitem__(name, other) 35 | return self 36 | 37 | def __getitem__(self, item): 38 | return self.pipeline_members[item] 39 | 40 | @property 41 | def last(self): 42 | return list(self.pipeline_members.values())[-1] 43 | 44 | @property 45 | def output(self): 46 | return self.last.output -------------------------------------------------------------------------------- /naps/stream/sim_util.py: -------------------------------------------------------------------------------- 1 | from typing import Iterable 2 | 3 | from . import Stream, PacketizedStream 4 | from naps.util.sim import wait_for, do_nothing 5 | 6 | __all__ = ["write_to_stream", "read_from_stream", "read_packet_from_stream", "write_packet_to_stream"] 7 | 8 | 9 | def write_to_stream(stream: Stream, timeout=100, **kwargs): 10 | for k, v in kwargs.items(): 11 | yield stream[k].eq(v) 12 | yield stream.valid.eq(1) 13 | yield from wait_for(stream.ready, timeout) 14 | yield stream.valid.eq(0) 15 | 16 | 17 | def read_from_stream(stream: Stream, extract="payload", timeout=100): 18 | yield stream.ready.eq(1) 19 | yield from wait_for(stream.valid, timeout) 20 | if isinstance(extract, str): 21 | read = (yield stream[extract]) 22 | elif isinstance(extract, Iterable): 23 | read = [] 24 | for x in extract: 25 | read.append((yield stream[x])) 26 | read = tuple(read) 27 | else: 28 | raise TypeError("extract must be either a string or an iterable of strings") 29 | 30 | yield stream.ready.eq(0) 31 | return read 32 | 33 | 34 | def write_packet_to_stream(stream: PacketizedStream, payload_array, timeout=100): 35 | for i, p in enumerate(payload_array): 36 | if i < (len(payload_array) - 1): 37 | yield from write_to_stream(stream, timeout, payload=p, last=0) 38 | else: 39 | yield from write_to_stream(stream, timeout, payload=p, last=1) 40 | 41 | 42 | def read_packet_from_stream(stream: PacketizedStream, timeout=100, allow_pause=True, pause_after_word=0): 43 | packet = [] 44 | first = True 45 | while True: 46 | payload, last = yield from read_from_stream(stream, extract=("payload", "last"), timeout=timeout if (first or allow_pause) else 1) 47 | yield from do_nothing(pause_after_word) 48 | first = False 49 | packet.append(payload) 50 | if last: 51 | return packet 52 | -------------------------------------------------------------------------------- /naps/stream/stream_notes.md: -------------------------------------------------------------------------------- 1 | # additional notes 2 | 3 | * formal check for stream contract 4 | * valid not depends on ready 5 | * no combinatorial dependency 6 | * syntactic conventions for streams (input as constructor argument seems suboptimal) 7 | * On master and slave interfaces there must be no combinatorial paths between input and output signals. <- this rule is shit -------------------------------------------------------------------------------- /naps/stream/stream_transformer.py: -------------------------------------------------------------------------------- 1 | from amaranth import * 2 | from naps.stream import BasicStream 3 | 4 | __all__ = ["stream_transformer"] 5 | 6 | 7 | def stream_transformer(input_stream: BasicStream, output_stream: BasicStream, m: Module, *, latency: int, handle_out_of_band=True, allow_partial_out_of_band=False): 8 | """ 9 | A utility to help you writing fixed latency stream ip that converts one input word to one output word. 10 | 11 | :warning: 12 | You have to make sure that you only sample the input when ready and valid of it are high for transformers with latency 13 | otherwise you are not going to comply to the stream contract. In this case you MUST place a StreamBuffer after your core. 14 | 15 | @param handle_out_of_band: determines if this core should connect the out of bands signals or if it is done manually 16 | @param allow_partial_out_of_band: allow the out of band signals of the streams to differ 17 | @param input_stream: the input stream 18 | @param output_stream: the output stream 19 | @param m: an amaranth HDL Module 20 | @param latency: the latency of the transform data path in cycles 21 | """ 22 | if latency == 0: 23 | m.d.comb += output_stream.connect_upstream(input_stream, only=["ready", "valid"]) 24 | if handle_out_of_band: 25 | if not allow_partial_out_of_band: 26 | assert list(input_stream.out_of_band_signals.keys()) == list(output_stream.out_of_band_signals.keys()) 27 | for k in input_stream.out_of_band_signals.keys(): 28 | if k in output_stream.out_of_band_signals: 29 | m.d.comb += output_stream[k].eq(input_stream[k]) 30 | 31 | elif latency == 1: 32 | input_transaction = input_stream.ready & input_stream.valid 33 | output_transaction = output_stream.ready & output_stream.valid 34 | 35 | with m.If(input_transaction): 36 | if handle_out_of_band: 37 | if not allow_partial_out_of_band: 38 | assert list(input_stream.out_of_band_signals.keys()) == list(output_stream.out_of_band_signals.keys()) 39 | for k in input_stream.out_of_band_signals.keys(): 40 | if k in output_stream.out_of_band_signals: 41 | m.d.sync += output_stream[k].eq(input_stream[k]) 42 | 43 | output_produce = Signal() 44 | m.d.sync += output_produce.eq(input_transaction) 45 | 46 | has_output = Signal() 47 | with m.If(has_output | output_produce): 48 | m.d.comb += output_stream.valid.eq(1) 49 | m.d.sync += has_output.eq(has_output + output_produce - output_transaction > 0) 50 | m.d.comb += input_stream.ready.eq(output_stream.ready | (~has_output & ~output_produce)) 51 | 52 | else: 53 | raise NotImplementedError() 54 | -------------------------------------------------------------------------------- /naps/util/__init__.py: -------------------------------------------------------------------------------- 1 | from .amaranth_misc import * 2 | from .python_misc import * 3 | from .formal import * 4 | from .sim import * 5 | from .past import * 6 | from .py_serialize import * 7 | from .process import * 8 | from .env import * 9 | -------------------------------------------------------------------------------- /naps/util/amaranth_misc_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from amaranth import * 4 | from .amaranth_misc import nMin, nAny, nAll, nMax, assert_is_pot, log2, ends_with, nAbsDifference 5 | from .sim import resolve 6 | 7 | 8 | class TestNMigenMisc(unittest.TestCase): 9 | def test_nMin(self): 10 | self.assertEqual(3, resolve(nMin(3, 7))) 11 | self.assertEqual(3, resolve(nMin(7, 3))) 12 | 13 | def test_nMax(self): 14 | self.assertEqual(7, resolve(nMax(3, 7))) 15 | self.assertEqual(7, resolve(nMax(7, 3))) 16 | 17 | def test_nAny(self): 18 | self.assertEqual(1, resolve(nAny([Const(1), Const(0), Const(0), Const(0)]))) 19 | self.assertEqual(1, resolve(nAny([Const(0), Const(0), Const(1), Const(1)]))) 20 | self.assertEqual(0, resolve(nAny([Const(0), Const(0), Const(0), Const(0)]))) 21 | 22 | def test_nAll(self): 23 | self.assertEqual(1, resolve(nAll([Const(1), Const(1), Const(1), Const(1)]))) 24 | self.assertEqual(0, resolve(nAll([Const(0), Const(0), Const(1), Const(1)]))) 25 | self.assertEqual(0, resolve(nAll([Const(0), Const(0), Const(0), Const(0)]))) 26 | 27 | def test_is_pot(self): 28 | assert_is_pot(2) 29 | assert_is_pot(4) 30 | assert_is_pot(64) 31 | assert_is_pot(512) 32 | with self.assertRaisesRegex(AssertionError, "is not a power of two"): 33 | assert_is_pot(7) 34 | with self.assertRaisesRegex(AssertionError, "is not a power of two"): 35 | assert_is_pot(42) 36 | with self.assertRaisesRegex(AssertionError, "is not a power of two"): 37 | assert_is_pot(196) 38 | 39 | def test_log2(self): 40 | self.assertEqual(1, log2(2)) 41 | self.assertEqual(2, log2(4)) 42 | self.assertEqual(10, log2(1024)) 43 | with self.assertRaisesRegex(AssertionError, "is not a power of two"): 44 | assert_is_pot(196) 45 | 46 | def test_ends_with(self): 47 | self.assertTrue(resolve(ends_with(Const(0b0001, 4), "01"))) 48 | self.assertFalse(resolve(ends_with(Const(0b0001, 4), "10"))) 49 | self.assertTrue(resolve(ends_with(Const(0b10001011, 4), "1011"))) 50 | self.assertTrue(resolve(ends_with(Const(0b10001011, 4), "011"))) 51 | self.assertFalse(resolve(ends_with(Const(0b10001011, 4), "0110"))) 52 | 53 | def test_nAbsDifference(self): 54 | self.assertEqual(7, resolve(nAbsDifference(10, 3))) 55 | self.assertEqual(7, resolve(nAbsDifference(3, 10))) 56 | self.assertEqual(3, resolve(nAbsDifference(12, 9))) 57 | self.assertEqual(3, resolve(nAbsDifference(9, 12))) 58 | -------------------------------------------------------------------------------- /naps/util/amaranth_private.py: -------------------------------------------------------------------------------- 1 | # this is a somewhat unholy utility to be able to import private amaranth stuff without warning: 2 | # the one stop shop for unstable amaranth internals 3 | 4 | def __getattr__(name): 5 | import warnings 6 | with warnings.catch_warnings(): 7 | warnings.filterwarnings("ignore", "name `.*` is a private implementation detail and should not be imported") 8 | import amaranth.hdl._ast as ast 9 | for option in [ast]: 10 | if hasattr(option, name): 11 | return getattr(option, name) 12 | raise AttributeError() 13 | -------------------------------------------------------------------------------- /naps/util/env.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | __all__ = ["naps_getenv"] 5 | 6 | 7 | def naps_getenv(name, default=None): 8 | return os.getenv("NAPS_" + name, default) 9 | -------------------------------------------------------------------------------- /naps/util/past.py: -------------------------------------------------------------------------------- 1 | # Workarounds for https://github.com/amaranth-lang/amaranth/issues/372 2 | # (DomainRenamer and Sample do not work together) 3 | 4 | from amaranth import * 5 | from .amaranth_misc import delay_by 6 | 7 | 8 | __all__ = ["Sample", "Rose", "Fell", "Changed", "NewHere"] 9 | 10 | 11 | def Sample(m, signal: Signal, clocks=1, domain="sync"): 12 | if clocks == 0: 13 | return signal 14 | inner_module = Module() # we create our own module to be free of all conditional statements 15 | m.submodules += DomainRenamer(domain)(inner_module) 16 | return delay_by(signal, clocks, inner_module) 17 | 18 | 19 | def Rose(m, expr: Signal, domain="sync", clocks=0): 20 | return ~Sample(m, expr, clocks + 1, domain) & Sample(m, expr, clocks, domain) 21 | 22 | 23 | def Fell(m, expr: Signal, domain="sync", clocks=0): 24 | return Sample(m, expr, clocks + 1, domain) & ~Sample(m, expr, clocks, domain) 25 | 26 | 27 | def Changed(m, expr: Signal, domain="sync", clocks=0): 28 | return Sample(m, expr, clocks + 1, domain) != Sample(m, expr, clocks, domain) 29 | 30 | 31 | def NewHere(m): 32 | _new_here_is_here = Signal() 33 | m.d.comb += _new_here_is_here.eq(1) 34 | return Rose(m, _new_here_is_here) 35 | -------------------------------------------------------------------------------- /naps/util/plot_util.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from matplotlib import pyplot as plt 3 | 4 | 5 | def plt_discrete_hist(title, data): 6 | plt.figure() 7 | plt.title(title) 8 | min = np.min(data) 9 | max = np.max(data) 10 | plt.bar(np.arange(min, max + 1), np.bincount(np.ravel(data) - min, minlength=(max - min)), width=1.0) 11 | 12 | 13 | def plt_hist(title, data, **kwargs): 14 | plt.figure() 15 | plt.title(title) 16 | plt.hist(np.ravel(data), **kwargs) 17 | 18 | 19 | def plt_image(title, data, **kwargs): 20 | plt.figure() 21 | plt.title(title) 22 | plt.imshow(data, **kwargs) 23 | 24 | 25 | def plt_show(): 26 | plt.show() 27 | -------------------------------------------------------------------------------- /naps/util/process_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from amaranth import * 3 | from naps import SimPlatform, Process, process_delay, do_nothing 4 | 5 | 6 | class TestProcess(unittest.TestCase): 7 | def test_basic(self): 8 | platform = SimPlatform() 9 | m = Module() 10 | 11 | stage1 = Signal() 12 | stage2 = Signal() 13 | stage3 = Signal() 14 | end = Signal() 15 | stage3_barrier = Signal() 16 | with m.FSM(): 17 | with Process(m, "INITIAL", to="END") as p: 18 | m.d.comb += stage1.eq(1) 19 | p += process_delay(m, 10) 20 | m.d.comb += stage2.eq(1) 21 | p += m.If(stage3_barrier) 22 | m.d.comb += stage3.eq(1) # this will be ignored because we jump directly to the END state 23 | with m.State("END"): 24 | m.d.comb += end.eq(1) 25 | 26 | def testbench(): 27 | self.assertEqual(1, (yield stage1)) 28 | yield from do_nothing(10) 29 | self.assertEqual(0, (yield stage1)) 30 | self.assertEqual(1, (yield stage2)) 31 | yield stage3_barrier.eq(1) 32 | yield 33 | yield 34 | self.assertEqual(1, (yield end)) 35 | 36 | 37 | 38 | platform.add_sim_clock("sync", 100e6) 39 | platform.sim(m, testbench) 40 | -------------------------------------------------------------------------------- /naps/util/py_serialize.py: -------------------------------------------------------------------------------- 1 | # utilities for serializing objects into python code. Useful eg. for pydriver 2 | 3 | __all__ = ["is_py_serializable", "py_serialize"] 4 | 5 | 6 | def is_py_serializable(obj): 7 | if obj is None: 8 | return True 9 | if type(obj) in (str, int, float, complex, bool, bytes, bytearray, range): 10 | return True 11 | elif type(obj) in (list, tuple, set, frozenset): 12 | return all(is_py_serializable(x) for x in obj) 13 | elif type(obj) in (dict,): 14 | return all(is_py_serializable(k) and is_py_serializable(v) for k, v in obj.items()) 15 | else: 16 | return False 17 | 18 | 19 | def py_serialize(obj): 20 | assert is_py_serializable(obj) 21 | return repr(obj) 22 | -------------------------------------------------------------------------------- /naps/util/python_misc.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | __all__ = ["decimal_range", "camel_to_snake"] 4 | 5 | 6 | def decimal_range(start, stop, step): 7 | next_decimal = start 8 | while next_decimal < stop: 9 | yield next_decimal 10 | next_decimal += step 11 | return 12 | 13 | 14 | def camel_to_snake(name): 15 | name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) 16 | return re.sub('([a-z0-9])([A-Z])', r'\1_\2', name).lower() 17 | -------------------------------------------------------------------------------- /naps/util/size_estimation.py: -------------------------------------------------------------------------------- 1 | import re 2 | import subprocess 3 | from typing import Dict 4 | 5 | from amaranth.back import rtlil 6 | 7 | 8 | def get_module_sizes(module, *args, **kwargs): 9 | rtlil_text = rtlil.convert(module, *args, **kwargs) 10 | 11 | script = """ 12 | read_rtlil < 0: 16 | print(f"\n### naps finished - timing summary") 17 | for task_timing in task_timings: 18 | print_task_timing(*task_timing) 19 | 20 | 21 | 22 | def start_task(name): 23 | end_task() 24 | print(f"\n### starting {name}") 25 | global current_task, current_start 26 | current_task = name 27 | current_start = time() 28 | 29 | 30 | def end_task(): 31 | global current_task, current_start 32 | if current_task is not None: 33 | assert current_start is not None 34 | task_timings.append((current_task, time() - current_start)) 35 | print_task_timing(*task_timings[-1]) 36 | current_task = None 37 | current_start = None 38 | -------------------------------------------------------------------------------- /naps/util/yosys.py: -------------------------------------------------------------------------------- 1 | from functools import lru_cache 2 | from json import loads 3 | 4 | from amaranth._toolchain.yosys import find_yosys 5 | 6 | 7 | @lru_cache() 8 | def parse_yosys_json(verilog_paths): 9 | if isinstance(verilog_paths, str): 10 | verilog_paths = [verilog_paths] 11 | json = yosys_script(["read_verilog {}".format(path) for path in verilog_paths] + ["write_json"]) 12 | return loads(json) 13 | 14 | 15 | def get_verilog_module_ports(verilog_paths, module_name): 16 | """Get the ports of a verilog module via yosys 17 | 18 | :param verilog_paths: 19 | :param module_name: 20 | :return: the verilog as a string 21 | """ 22 | 23 | parsed_json = parse_yosys_json(verilog_paths) 24 | module = parsed_json["modules"][module_name] 25 | ports = module["ports"] 26 | 27 | # do some reformatting of the data: 28 | def abbrev_direction(long_form): 29 | if long_form in ("output", "o"): 30 | return "o" 31 | elif long_form in ("input", "i"): 32 | return "i" 33 | elif long_form in ("inout", "io"): 34 | return "io" 35 | else: 36 | raise Exception("Bad direction: {}".format(long_form)) 37 | 38 | for k, v in ports.items(): 39 | if "bits" in v: 40 | ports[k]["width"] = len(v["bits"]) 41 | del ports[k]["bits"] 42 | assert "width" in v 43 | 44 | ports[k]["direction"] = abbrev_direction(v["direction"]) 45 | 46 | return ports 47 | 48 | 49 | def yosys_script(commands): 50 | """Executes a yosys script 51 | 52 | :param commands: a list of commands to run 53 | :return: the stdout of yosys 54 | """ 55 | 56 | return find_yosys(lambda ver: ver >= (0, 9)).run(["-q", "-p {}".format("; ".join(commands))]) 57 | -------------------------------------------------------------------------------- /naps/vendor/__init__.py: -------------------------------------------------------------------------------- 1 | from .generic import * 2 | -------------------------------------------------------------------------------- /naps/vendor/generic/__init__.py: -------------------------------------------------------------------------------- 1 | from .jtag import * 2 | -------------------------------------------------------------------------------- /naps/vendor/generic/jtag.py: -------------------------------------------------------------------------------- 1 | from amaranth import * 2 | from ..platform_agnostic_elaboratable import PlatformAgnosticElaboratable 3 | 4 | __all__ = ["JTAG"] 5 | 6 | 7 | class JTAG(PlatformAgnosticElaboratable): 8 | def __init__(self, jtag_domain="jtag"): 9 | self.shift_tdi = Signal() 10 | self.shift_tdo = Signal() 11 | self.tdi = Signal() 12 | self.tdo = Signal() 13 | self.jtag_domain = jtag_domain 14 | -------------------------------------------------------------------------------- /naps/vendor/lattice_ecp5/__init__.py: -------------------------------------------------------------------------------- 1 | from .clocking import * 2 | -------------------------------------------------------------------------------- /naps/vendor/lattice_ecp5/io.py: -------------------------------------------------------------------------------- 1 | from amaranth import * 2 | 3 | from naps import StatusSignal, ControlSignal, driver_method 4 | 5 | 6 | class DelayF(Elaboratable): 7 | def __init__(self, i): 8 | self.i = i 9 | 10 | self.move = ControlSignal() 11 | self.direction = ControlSignal() 12 | self.at_limit = StatusSignal() 13 | 14 | self.o = Signal() 15 | 16 | def elaborate(self, platform): 17 | m = Module() 18 | 19 | m.submodules.instance = Instance( 20 | "DELAYF", 21 | 22 | i_A=self.i, 23 | i_LOADN=Const(1), 24 | i_MOVE=self.move, 25 | i_DIRECTION=self.direction, 26 | 27 | o_Z=self.o, 28 | o_CFLAG=self.at_limit, 29 | ) 30 | 31 | return m 32 | 33 | @driver_method 34 | def forward(self, count=1): 35 | self.direction = 1 36 | for i in range(count): 37 | self.do_move() 38 | 39 | @driver_method 40 | def backward(self, count=1): 41 | self.direction = 0 42 | for i in range(count): 43 | self.do_move() 44 | 45 | @driver_method 46 | def do_move(self): 47 | self.move = 1 48 | self.move = 0 49 | 50 | @driver_method 51 | def reset_delay(self): 52 | self.backward(128) 53 | 54 | @driver_method 55 | def set_delay(self, delay): 56 | self.reset_delay() 57 | self.forward(delay) 58 | 59 | 60 | class IDDRX2F(Elaboratable): 61 | def __init__(self, pin, eclk_domain): 62 | self.pin = pin 63 | self.eclk_domain = eclk_domain 64 | 65 | self.output = Signal(4) 66 | 67 | def elaborate(self, platform): 68 | m = Module() 69 | 70 | m.submodules.instance = Instance( 71 | "IDDRX2F", 72 | 73 | i_D=self.pin, 74 | i_SCLK=ClockSignal(), 75 | i_RST=ResetSignal(), 76 | i_ECLK=ClockSignal(self.eclk_domain), 77 | # i_ALIGNWD=Const(0, 1), 78 | 79 | o_Q0=self.output[0], 80 | o_Q1=self.output[1], 81 | o_Q2=self.output[2], 82 | o_Q3=self.output[3], 83 | ) 84 | 85 | return m 86 | 87 | 88 | class IDDRX1F(Elaboratable): 89 | def __init__(self, pin): 90 | self.pin = pin 91 | 92 | self.output = Signal(2) 93 | 94 | def elaborate(self, platform): 95 | m = Module() 96 | 97 | m.submodules.instance = Instance( 98 | "IDDRX1F", 99 | 100 | i_D=self.pin, 101 | i_SCLK=ClockSignal(), 102 | i_RST=ResetSignal(), 103 | 104 | o_Q0=self.output[0], 105 | o_Q1=self.output[1], 106 | ) 107 | 108 | return m 109 | -------------------------------------------------------------------------------- /naps/vendor/lattice_ecp5/jtag.py: -------------------------------------------------------------------------------- 1 | from amaranth import * 2 | from ..generic.jtag import JTAG as GenericJTAG 3 | 4 | 5 | class JTAG(GenericJTAG.implementation): 6 | # the lattice jtag primitive is rather wired and has a one cycle delay on tdi 7 | 8 | def elaborate(self, platform): 9 | m = Module() 10 | 11 | cd = ClockDomain(self.jtag_domain) 12 | m.domains += cd 13 | clock_signal = Signal() 14 | m.d.comb += cd.clk.eq(~clock_signal) # we do this to avoid using a negedge clockdomain (see: https://github.com/amaranth-lang/amaranth/issues/611) 15 | platform.add_clock_constraint(clock_signal, 1e6) 16 | 17 | shift = Signal() # for some reason we also have to delay shift 18 | m.d.comb += self.shift_tdo.eq(shift) 19 | m.d.jtag += self.shift_tdi.eq(shift) 20 | 21 | m.submodules.jtag_primitive = Instance( 22 | "JTAGG", 23 | i_JTDO1=self.tdo, 24 | o_JTDI=self.tdi, 25 | o_JTCK=clock_signal, 26 | o_JSHIFT=shift, 27 | ) 28 | 29 | return m -------------------------------------------------------------------------------- /naps/vendor/lattice_machxo2/__init__.py: -------------------------------------------------------------------------------- 1 | from .clocking import * 2 | from .io import * 3 | -------------------------------------------------------------------------------- /naps/vendor/lattice_machxo2/io.py: -------------------------------------------------------------------------------- 1 | from amaranth import * 2 | 3 | __all__ = ["ISerdes8"] 4 | 5 | 6 | class ISerdes8(Elaboratable): 7 | def __init__(self, input, ddr_domain, word_domain, invert=False): 8 | self.input = input 9 | self.output = Signal(8) 10 | self.bitslip = Signal() 11 | 12 | self.invert = invert 13 | self.ddr_domain = ddr_domain 14 | self.word_domain = word_domain 15 | 16 | def elaborate(self, platform): 17 | m = Module() 18 | 19 | iddr_output = Signal(8) 20 | m.d.comb += self.output.eq(iddr_output ^ Const(self.invert).replicate(8)) 21 | m.submodules.iddr = Instance( 22 | "IDDRX4B", 23 | 24 | i_D=self.input, 25 | i_ECLK=ClockSignal(self.ddr_domain), 26 | i_SCLK=ClockSignal(self.word_domain), 27 | i_RST=ResetSignal(self.word_domain), 28 | i_ALIGNWD=self.bitslip, 29 | 30 | o_Q0=iddr_output[7], 31 | o_Q1=iddr_output[6], 32 | o_Q2=iddr_output[5], 33 | o_Q3=iddr_output[4], 34 | o_Q4=iddr_output[3], 35 | o_Q5=iddr_output[2], 36 | o_Q6=iddr_output[1], 37 | o_Q7=iddr_output[0], 38 | ) 39 | 40 | return m 41 | -------------------------------------------------------------------------------- /naps/vendor/lattice_machxo2/jtag.py: -------------------------------------------------------------------------------- 1 | from amaranth import * 2 | from ..generic.jtag import JTAG as GenericJTAG 3 | 4 | 5 | class JTAG(GenericJTAG.implementation): 6 | # the lattice jtag primitive is rather wired and has a one cycle delay on tdi 7 | 8 | def elaborate(self, platform): 9 | m = Module() 10 | 11 | cd = ClockDomain(self.jtag_domain) 12 | m.domains += cd 13 | clock_signal = Signal() 14 | m.d.comb += cd.clk.eq(~clock_signal) # we do this to avoid using a negedge clockdomain (see: https://github.com/amaranth-lang/amaranth/issues/611) 15 | platform.add_clock_constraint(clock_signal, 1e6) 16 | 17 | shift = Signal() # for some reason we also have to delay shift 18 | m.d.comb += self.shift_tdo.eq(shift) 19 | m.d.jtag += self.shift_tdi.eq(shift) 20 | 21 | m.submodules.jtag_primitive = Instance( 22 | "JTAGF", 23 | i_JTDO1=self.tdo, 24 | o_JTDI=self.tdi, 25 | o_JTCK=clock_signal, 26 | o_JSHIFT=shift, 27 | ) 28 | 29 | return m -------------------------------------------------------------------------------- /naps/vendor/platform_agnostic_elaboratable.py: -------------------------------------------------------------------------------- 1 | from abc import ABCMeta 2 | from glob import glob 3 | from importlib import import_module 4 | from os.path import join, dirname, split 5 | 6 | from amaranth import * 7 | from amaranth.vendor import LatticePlatform, XilinxPlatform 8 | from naps import SimPlatform 9 | 10 | 11 | class ImplementationMarkerMetaclass(ABCMeta): 12 | @property 13 | def implementation(self): 14 | if not hasattr(self, "_marker_type"): 15 | self._marker_type = type("{}Implementation".format(self.__name__), (), {}) 16 | return self._marker_type 17 | 18 | 19 | class PlatformAgnosticElaboratable(Elaboratable, metaclass=ImplementationMarkerMetaclass): 20 | """ 21 | A helper to write Platform agnostic code. Searches in the vendor directories for the real elaboratable. 22 | """ 23 | 24 | @classmethod 25 | def _search_in_path(cls, path): 26 | marker_type = cls.implementation 27 | basepath = __name__.split(".")[:-1] + [path] 28 | files = [split(p)[-1].replace(".py", "") for p in glob(join(dirname(__file__), path) + "/*.py")] 29 | for file in files: 30 | module = import_module(".".join(basepath + [file])) 31 | for candidate in [getattr(module, k) for k in dir(module)]: 32 | if isinstance(candidate, type) and issubclass(candidate, marker_type): 33 | return candidate 34 | raise PrimitiveNotSupportedByPlatformError() 35 | 36 | def elaborate(self, platform): 37 | if isinstance(platform, XilinxPlatform) and platform.family == "series7": 38 | elaboratable = self._search_in_path("xilinx_s7") 39 | elif isinstance(platform, LatticePlatform) and platform.family == "machxo2": 40 | elaboratable = self._search_in_path("lattice_machxo2") 41 | elif isinstance(platform, LatticePlatform) and platform.family == "ecp5": 42 | elaboratable = self._search_in_path("lattice_ecp5") 43 | elif isinstance(platform, SimPlatform): 44 | return Module() 45 | else: 46 | raise PlatformNotSupportedError() 47 | 48 | return elaboratable.elaborate(self, platform) 49 | 50 | 51 | class PrimitiveNotSupportedByPlatformError(ValueError): 52 | pass 53 | 54 | 55 | class PlatformNotSupportedError(ValueError): 56 | pass 57 | -------------------------------------------------------------------------------- /naps/vendor/xilinx_s7/__init__.py: -------------------------------------------------------------------------------- 1 | from .clocking import * 2 | from .io import * 3 | from .ps7 import * 4 | -------------------------------------------------------------------------------- /naps/vendor/xilinx_s7/jtag.py: -------------------------------------------------------------------------------- 1 | from amaranth import * 2 | 3 | from naps.vendor import JTAG as GenericJTAG 4 | 5 | 6 | class JTAG(GenericJTAG.implementation): 7 | def elaborate(self, platform): 8 | m = Module() 9 | tck = Signal(attrs={"KEEP": "TRUE"}) 10 | platform.add_clock_constraint(tck, 1e6) 11 | m.domains += ClockDomain(self.jtag_domain) 12 | m.d.comb += ClockSignal(self.jtag_domain).eq(tck) 13 | 14 | shift = Signal() 15 | m.d.comb += self.shift_tdi.eq(shift) 16 | m.d.comb += self.shift_tdo.eq(shift) 17 | 18 | m.submodules.jtag_primitive = Instance( 19 | "BSCANE2", 20 | p_JTAG_CHAIN=1, 21 | 22 | o_TCK=tck, 23 | o_TDI=self.tdi, 24 | o_SHIFT=shift, 25 | 26 | i_TDO=self.tdo, 27 | ) 28 | return m 29 | -------------------------------------------------------------------------------- /pdm_build.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | from pdm.backend.hooks.version import SCMVersion 5 | from pdm.backend._vendor.packaging.version import Version 6 | 7 | def format_version(version: SCMVersion) -> str: 8 | print(version.version) 9 | major, minor = (int(n) for n in str(version.version).split(".")[:3]) 10 | if minor == 0: 11 | minor = 1 12 | if version.distance is None: 13 | return f"{major}.{minor}" 14 | else: 15 | return f"{major}.{minor}.dev{version.distance}" 16 | 17 | def pdm_build_initialize(context): 18 | if "DOC_SHA" in os.environ: 19 | context.config.metadata["urls"]["Documentation"] = f"https://docs.niemo.de/naps/commit/{os.environ['DOC_SHA']}".strip() 20 | 21 | # we cannot depend on git dependencies for pypi so we filter out amaranth-boards as it is not published at the time I write this 22 | context.config.metadata["dependencies"] = [dep for dep in context.config.metadata["dependencies"] if "git" not in dep] 23 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.pdm.version] 2 | source = "scm" 3 | version_format = "pdm_build:format_version" 4 | 5 | [project] 6 | dynamic = ["version"] 7 | 8 | name = "naps" 9 | description = "naps - The Relaxed Amaranth Packages Collection" 10 | authors = [ 11 | {name = "Robin Heinemann", email = "robin.ole.heinemann+naps@gmail.com"}, 12 | {name = "Jaro Habiger", email = "jarohabiger@googlemail.com"}, 13 | ] 14 | dependencies = [ 15 | "amaranth==0.5.5", 16 | "amaranth-boards @ git+https://github.com/amaranth-lang/amaranth-boards@c26a72e59c786b38e0e989ae64c6c2560ca7c29c", 17 | "huffman", 18 | "paramiko", 19 | ] 20 | requires-python = ">=3.11" 21 | readme = "README.rst" 22 | license = {file = "LICENSE"} 23 | keywords = ["amaranth apertus fpga gateware video camera"] 24 | 25 | [project.urls] 26 | Homepage = "https://github.com/apertus-open-source-cinema/naps" 27 | Documentation = "https://apertus-open-source-cinema.github.io/naps/" 28 | "Bug Tracker" = "https://github.com/apertus-open-source-cinema/naps/issues" 29 | 30 | [project.optional-dependencies] 31 | publish = [ 32 | "twine>=4.0.2", 33 | ] 34 | [tool.pdm.dev-dependencies] 35 | test = [ 36 | "PiDNG", 37 | "bitarray", 38 | "imageio>=2.16.2", 39 | "matplotlib", 40 | "numba", 41 | "numpy", 42 | "psutil", 43 | "pytest", 44 | "pytest-github-actions-annotate-failures", 45 | "pytest-pycharm", 46 | "pytest-xdist", 47 | "rawpy", 48 | "scipy", 49 | ] 50 | doc = [ 51 | "sphinx<9", 52 | "sphinx_rtd_theme", 53 | ] 54 | 55 | [build-system] 56 | requires = ["pdm-backend"] 57 | build-backend = "pdm.backend" 58 | 59 | [tool.pdm.build] 60 | excludes = ["**/.sim_results"] 61 | 62 | [tool.pdm.scripts] 63 | test = "pytest" 64 | 65 | [tool.pytest.ini_options] 66 | python_files = ["*_test.py"] 67 | addopts = ["-n", "auto" ] 68 | testpaths = ["naps", "applets"] 69 | filterwarnings = ["ignore::cryptography.utils.CryptographyDeprecationWarning"] 70 | --------------------------------------------------------------------------------