├── litevideo ├── __init__.py ├── csc │ ├── __init__.py │ ├── test │ │ ├── lena.png │ │ ├── Makefile │ │ ├── rgb16f2rgb_tb.py │ │ ├── rgb2ycbcr_tb.py │ │ ├── ycbcr2rgb_tb.py │ │ ├── ycbcr_resampling_tb.py │ │ ├── rgb2rgb16f_tb.py │ │ ├── ycbcr422to444_tb.py │ │ └── common.py │ ├── common.py │ ├── rgb16f2rgb.py │ ├── ycbcr422to444.py │ ├── ycbcr444to422.py │ ├── ycbcr2rgb.py │ ├── rgb2ycbcr.py │ └── rgb2rgb16f.py ├── output │ ├── hdmi │ │ ├── __init__.py │ │ ├── encoder.py │ │ ├── s7.py │ │ └── s6.py │ ├── test │ │ ├── Makefile │ │ └── core_tb.py │ ├── common.py │ ├── driver.py │ ├── pattern.py │ ├── __init__.py │ └── core.py ├── terminal │ ├── __init__.py │ ├── cp437.bin │ ├── ctest.jpg │ ├── circuit.png │ ├── terminal.bin │ ├── screen-init.bin │ ├── screenshot.jpg │ ├── README.md │ └── core.py ├── float_arithmetic │ ├── __init__.py │ ├── test │ │ ├── Makefile │ │ ├── float_conv.py │ │ ├── floatadd_tb.py │ │ ├── floatmult_tb.py │ │ └── common.py │ ├── common.py │ ├── floatmult.py │ └── floatadd.py └── input │ ├── common.py │ ├── charsync.py │ ├── wer.py │ ├── chansync.py │ ├── dma.py │ ├── __init__.py │ ├── edid.py │ ├── clocking.py │ ├── analysis.py │ └── decoding.py ├── MANIFEST.in ├── setup.py ├── .gitignore ├── LICENSE └── README.md /litevideo/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /litevideo/csc/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /litevideo/output/hdmi/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /litevideo/terminal/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /litevideo/float_arithmetic/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include litevideo/terminal/cp437.bin 2 | include litevideo/terminal/screen-init.bin 3 | -------------------------------------------------------------------------------- /litevideo/csc/test/lena.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/litex-hub/litevideo/HEAD/litevideo/csc/test/lena.png -------------------------------------------------------------------------------- /litevideo/terminal/cp437.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/litex-hub/litevideo/HEAD/litevideo/terminal/cp437.bin -------------------------------------------------------------------------------- /litevideo/terminal/ctest.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/litex-hub/litevideo/HEAD/litevideo/terminal/ctest.jpg -------------------------------------------------------------------------------- /litevideo/terminal/circuit.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/litex-hub/litevideo/HEAD/litevideo/terminal/circuit.png -------------------------------------------------------------------------------- /litevideo/terminal/terminal.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/litex-hub/litevideo/HEAD/litevideo/terminal/terminal.bin -------------------------------------------------------------------------------- /litevideo/terminal/screen-init.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/litex-hub/litevideo/HEAD/litevideo/terminal/screen-init.bin -------------------------------------------------------------------------------- /litevideo/terminal/screenshot.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/litex-hub/litevideo/HEAD/litevideo/terminal/screenshot.jpg -------------------------------------------------------------------------------- /litevideo/output/test/Makefile: -------------------------------------------------------------------------------- 1 | HDLDIR = ../../../ 2 | PYTHON = python3 3 | 4 | CMD = PYTHONPATH=$(HDLDIR) $(PYTHON) 5 | 6 | core_tb: 7 | $(CMD) core_tb.py 8 | 9 | clean: 10 | rm -rf *.vcd 11 | 12 | .PHONY: clean -------------------------------------------------------------------------------- /litevideo/float_arithmetic/test/Makefile: -------------------------------------------------------------------------------- 1 | HDLDIR = ../../../ 2 | PYTHON = python3 3 | 4 | CMD = PYTHONPATH=$(HDLDIR) $(PYTHON) 5 | 6 | floatmult_tb: 7 | $(CMD) floatmult_tb.py 8 | 9 | floatadd_tb: 10 | $(CMD) floatadd_tb.py 11 | 12 | clean: 13 | rm -rf *_*.png *.vvp *.v *.vcd 14 | 15 | .PHONY: clean -------------------------------------------------------------------------------- /litevideo/csc/test/Makefile: -------------------------------------------------------------------------------- 1 | HDLDIR = ../../../ 2 | PYTHON = python3 3 | 4 | CMD = PYTHONPATH=$(HDLDIR) $(PYTHON) 5 | 6 | rgb2ycbcr_tb: 7 | $(CMD) rgb2ycbcr_tb.py 8 | 9 | ycbcr2rgb_tb: 10 | $(CMD) ycbcr2rgb_tb.py 11 | 12 | rgb2rgb16f_tb: 13 | $(CMD) rgb2rgb16f_tb.py 14 | 15 | rgb16f2rgb_tb: 16 | $(CMD) rgb16f2rgb_tb.py 17 | 18 | ycbcr_resampling_tb: 19 | $(CMD) ycbcr_resampling_tb.py 20 | 21 | clean: 22 | rm -rf *_*.png *.vvp *.v *.vcd 23 | 24 | .PHONY: clean -------------------------------------------------------------------------------- /litevideo/input/common.py: -------------------------------------------------------------------------------- 1 | control_tokens = [ 2 | # Control tokens are designed to have a large number (7) of transitions to 3 | # help the receiver synchronize its clock with the transmitter clock. 4 | # Control tokens are encoded using the values in the table below. 5 | # 9........0 C1 C0 6 | 0b1101010100, # 0 0 7 | 0b0010101011, # 0 1 8 | 0b0101010100, # 1 0 9 | 0b1010101011, # 1 1 10 | ] 11 | 12 | channel_layout = [("raw", 10), ("d", 8), ("c", 2), ("de", 1)] 13 | -------------------------------------------------------------------------------- /litevideo/float_arithmetic/test/float_conv.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def float2binint(f): 4 | x = int(bin(np.float16(f).view('H'))[2:].zfill(16),2) 5 | return x 6 | 7 | 8 | def binint2float(x): 9 | xs = bin(x)[2:].zfill(16) 10 | frac = '1'+xs[6:16] 11 | fracn = int(frac,2) 12 | exp = xs[1:6] 13 | expn = int(exp,2) -15 14 | 15 | if expn == -15 : 16 | expn = -14 17 | frac = '0'+xs[6:16] 18 | fracn = int(frac,2) 19 | 20 | sign = xs[0] 21 | signv = int(sign,2) 22 | 23 | y = ((-1)**signv)*(2**(expn))*fracn*(2**(-10)) 24 | return y 25 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from setuptools import setup 4 | from setuptools import find_packages 5 | 6 | setup( 7 | name="litevideo", 8 | description="Small footprint and configurable video cores", 9 | author="Florent Kermarrec", 10 | author_email="florent@enjoy-digital.fr", 11 | url="http://enjoy-digital.fr", 12 | download_url="https://github.com/enjoy-digital/litevideo", 13 | test_suite="test", 14 | license="BSD", 15 | python_requires="~=3.6", 16 | packages=find_packages(exclude=("test*", "sim*", "doc*", "examples*")), 17 | include_package_data=True, 18 | ) 19 | -------------------------------------------------------------------------------- /litevideo/float_arithmetic/common.py: -------------------------------------------------------------------------------- 1 | from migen import * 2 | 3 | from litex.soc.interconnect import stream 4 | 5 | def in_layout(dw): 6 | return [("in1", dw), ("in2", dw)] 7 | 8 | def out_layout(dw): 9 | return [("out", dw)] 10 | 11 | 12 | class LeadOne(Module): 13 | """ 14 | This return the position of leading one of the Signal Object datai, as the 15 | leadone Signal object. Function input dw defines the data width of datai 16 | Signal object. 17 | """ 18 | def __init__(self, dw): 19 | self.datai = Signal(dw) 20 | self.leadone = Signal(max=dw) 21 | for j in range(dw): 22 | self.comb += If(self.datai[j], self.leadone.eq(dw - j - 1)) 23 | -------------------------------------------------------------------------------- /litevideo/csc/common.py: -------------------------------------------------------------------------------- 1 | from migen import * 2 | 3 | from litex.soc.interconnect import stream 4 | 5 | 6 | def saturate(i, o, minimum, maximum): 7 | return [ 8 | If(i > maximum, 9 | o.eq(maximum) 10 | ).Elif(i < minimum, 11 | o.eq(minimum) 12 | ).Else( 13 | o.eq(i) 14 | ) 15 | ] 16 | 17 | 18 | def coef(value, cw=None): 19 | return int(value * 2**cw) if cw is not None else value 20 | 21 | def rgb_layout(dw): 22 | return [("r", dw), ("g", dw), ("b", dw)] 23 | 24 | def rgb16f_layout(dw): 25 | return [("rf", dw), ("gf", dw), ("bf", dw)] 26 | 27 | def ycbcr444_layout(dw): 28 | return [("y", dw), ("cb", dw), ("cr", dw)] 29 | 30 | def ycbcr422_layout(dw): 31 | return [("y", dw), ("cb_cr", dw)] 32 | 33 | def pix_layout(dw): 34 | return [("pix", dw)] 35 | 36 | def pixf_layout(dw): 37 | return [("pixf", dw)] 38 | 39 | -------------------------------------------------------------------------------- /litevideo/output/common.py: -------------------------------------------------------------------------------- 1 | from migen import * 2 | 3 | from litex.soc.interconnect import stream 4 | 5 | hbits = 12 6 | vbits = 12 7 | 8 | def list_signals(layout): 9 | return [f[0] for f in layout] 10 | 11 | frame_parameter_layout = [ 12 | ("hres", hbits), 13 | ("hsync_start", hbits), 14 | ("hsync_end", hbits), 15 | ("hscan", hbits), 16 | ("vres", vbits), 17 | ("vsync_start", vbits), 18 | ("vsync_end", vbits), 19 | ("vscan", vbits) 20 | ] 21 | 22 | frame_dma_layout = [ 23 | ("base", 32), 24 | ("length", 32), 25 | ] 26 | 27 | frame_timing_layout = [ 28 | ("hsync", 1), 29 | ("vsync", 1), 30 | ("de", 1) 31 | ] 32 | 33 | color_bar_parameter_layout = [("hres", hbits)] 34 | 35 | def video_out_layout(dw): 36 | param_layout = frame_timing_layout 37 | payload_layout = [("data", dw)] 38 | return stream.EndpointDescription(payload_layout, param_layout) 39 | 40 | def phy_layout(mode): 41 | if mode == "raw": 42 | param_layout = frame_timing_layout # not used 43 | payload_layout = [("c0", 10), ("c1", 10), ("c2", 11)] 44 | return stream.EndpointDescription(payload_layout, param_layout) 45 | else: 46 | param_layout = frame_timing_layout 47 | payload_layout = [("r", 8), ("g", 8), ("b", 8)] 48 | return stream.EndpointDescription(payload_layout, param_layout) 49 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | 27 | # PyInstaller 28 | # Usually these files are written by a python script from a template 29 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 30 | *.manifest 31 | *.spec 32 | 33 | # Installer logs 34 | pip-log.txt 35 | pip-delete-this-directory.txt 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .coverage 41 | .coverage.* 42 | .cache 43 | nosetests.xml 44 | coverage.xml 45 | *,cover 46 | .hypothesis/ 47 | 48 | # Translations 49 | *.mo 50 | *.pot 51 | 52 | # Django stuff: 53 | *.log 54 | local_settings.py 55 | 56 | # Flask stuff: 57 | instance/ 58 | .webassets-cache 59 | 60 | # Scrapy stuff: 61 | .scrapy 62 | 63 | # Sphinx documentation 64 | docs/_build/ 65 | 66 | # PyBuilder 67 | target/ 68 | 69 | # IPython Notebook 70 | .ipynb_checkpoints 71 | 72 | # pyenv 73 | .python-version 74 | 75 | # celery beat schedule file 76 | celerybeat-schedule 77 | 78 | # dotenv 79 | .env 80 | 81 | # virtualenv 82 | venv/ 83 | ENV/ 84 | 85 | # Spyder project settings 86 | .spyderproject 87 | 88 | # Rope project settings 89 | .ropeproject 90 | -------------------------------------------------------------------------------- /litevideo/float_arithmetic/test/floatadd_tb.py: -------------------------------------------------------------------------------- 1 | from migen import * 2 | 3 | from litex.soc.interconnect.stream import * 4 | from litex.soc.interconnect.stream_sim import * 5 | 6 | from litevideo.float_arithmetic.common import * 7 | from litevideo.float_arithmetic.floatadd import FloatAdd 8 | from litevideo.float_arithmetic.test.common import * 9 | 10 | class TB(Module): 11 | def __init__(self): 12 | self.submodules.streamer = PacketStreamer(EndpointDescription([("data", 32)])) 13 | self.submodules.floatadd = FloatAdd() 14 | self.submodules.logger = PacketLogger(EndpointDescription([("data", 16)])) 15 | 16 | self.comb += [ 17 | self.streamer.source.connect(self.floatadd.sink, omit=["data"]), 18 | self.floatadd.sink.payload.in1.eq(self.streamer.source.data[16:32]), 19 | self.floatadd.sink.payload.in2.eq(self.streamer.source.data[0:16]), 20 | 21 | self.floatadd.source.connect(self.logger.sink, omit=["out"]), 22 | self.logger.sink.data[0:16].eq(self.floatadd.source.out) 23 | ] 24 | 25 | def main_generator(dut): 26 | 27 | for i in range(48): 28 | yield 29 | 30 | raw_image = RAWImage() 31 | raw_image.pack_mult_in() 32 | packet = Packet(raw_image.data) 33 | dut.streamer.send(packet) 34 | yield from dut.logger.receive() 35 | raw_image.set_data(dut.logger.packet) 36 | raw_image.unpack_mult_in() 37 | 38 | 39 | if __name__ == "__main__": 40 | tb = TB() 41 | generators = {"sys" : [main_generator(tb)]} 42 | generators = { 43 | "sys" : [main_generator(tb), 44 | tb.streamer.generator(), 45 | tb.logger.generator()] 46 | } 47 | clocks = {"sys": 10} 48 | run_simulation(tb, generators, clocks, vcd_name="sim.vcd") 49 | -------------------------------------------------------------------------------- /litevideo/float_arithmetic/test/floatmult_tb.py: -------------------------------------------------------------------------------- 1 | from migen import * 2 | 3 | from litex.soc.interconnect.stream import * 4 | from litex.soc.interconnect.stream_sim import * 5 | 6 | from litevideo.float_arithmetic.common import * 7 | from litevideo.float_arithmetic.floatmult import FloatMult 8 | from litevideo.float_arithmetic.test.common import * 9 | 10 | class TB(Module): 11 | def __init__(self): 12 | self.submodules.streamer = PacketStreamer(EndpointDescription([("data", 32)])) 13 | self.submodules.floatmult = FloatMult() 14 | self.submodules.logger = PacketLogger(EndpointDescription([("data", 16)])) 15 | 16 | self.comb += [ 17 | self.streamer.source.connect(self.floatmult.sink, omit=["data"]), 18 | self.floatmult.sink.payload.in1.eq(self.streamer.source.data[16:32]), 19 | self.floatmult.sink.payload.in2.eq(self.streamer.source.data[0:16]), 20 | 21 | self.floatmult.source.connect(self.logger.sink, omit=["out"]), 22 | self.logger.sink.data[0:16].eq(self.floatmult.source.out) 23 | ] 24 | 25 | def main_generator(dut): 26 | 27 | for i in range(4): 28 | yield 29 | 30 | raw_image = RAWImage() 31 | raw_image.pack_mult_in() 32 | packet = Packet(raw_image.data) 33 | # print (raw_image.data) 34 | # print (packet) 35 | # print( type(packet[0])) 36 | dut.streamer.send(packet) 37 | yield from dut.logger.receive() 38 | raw_image.set_data(dut.logger.packet) 39 | raw_image.unpack_mult_in() 40 | 41 | 42 | if __name__ == "__main__": 43 | tb = TB() 44 | generators = {"sys" : [main_generator(tb)]} 45 | generators = { 46 | "sys" : [main_generator(tb), 47 | tb.streamer.generator(), 48 | tb.logger.generator()] 49 | } 50 | clocks = {"sys": 10} 51 | run_simulation(tb, generators, clocks, vcd_name="sim.vcd") 52 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Unless otherwise noted, LiteVideo is Copyright 2016-2018 / EnjoyDigital 2 | Unless otherwise noted, LiteVideo is Copyright 2016-2018 / TimVideos.us 3 | 4 | 5 | Initial development is based on MiXXeo Gateware / Copyright 2007-2016 / M-Labs 6 | 7 | Redistribution and use in source and binary forms, with or without modification, 8 | are permitted provided that the following conditions are met: 9 | 10 | 1. Redistributions of source code must retain the above copyright notice, this 11 | list of conditions and the following disclaimer. 12 | 2. Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 17 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 20 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 22 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 23 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 25 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 | 27 | 28 | Other authors retain ownership of their contributions. If a submission can 29 | reasonably be considered independently copyrightable, it's yours and we 30 | encourage you to claim it with appropriate copyright notices. This submission 31 | then falls under the "otherwise noted" category. All submissions are strongly 32 | encouraged to use the two-clause BSD license reproduced above. 33 | -------------------------------------------------------------------------------- /litevideo/output/driver.py: -------------------------------------------------------------------------------- 1 | from migen import * 2 | 3 | from litex.soc.interconnect import stream 4 | from litex.soc.interconnect.csr import * 5 | 6 | from litevideo.output.common import * 7 | from litevideo.output.hdmi.s6 import S6HDMIOutClocking, S6HDMIOutPHY 8 | from litevideo.output.hdmi.s7 import S7HDMIOutClocking, S7HDMIOutPHY 9 | 10 | 11 | clocking_cls = { 12 | "xc6" : S6HDMIOutClocking, 13 | "xc7" : S7HDMIOutClocking, 14 | } 15 | 16 | hdmi_phy_cls = { 17 | "xc6" : S6HDMIOutPHY, 18 | "xc7" : S7HDMIOutPHY 19 | } 20 | 21 | class VGAPHY(Module): 22 | def __init__(self, pads, mode): 23 | assert mode != "raw" 24 | self.sink = stream.Endpoint(phy_layout(mode)) 25 | 26 | # # # 27 | 28 | self.comb += [ 29 | self.sink.ready.eq(1), 30 | pads.hsync_n.eq(~self.sink.hsync), 31 | pads.vsync_n.eq(~self.sink.vsync), 32 | pads.r.eq(self.sink.r[8-len(pads.r):]), 33 | pads.g.eq(self.sink.g[8-len(pads.g):]), 34 | pads.b.eq(self.sink.b[8-len(pads.b):]), 35 | pads.psave_n.eq(1) 36 | ] 37 | 38 | 39 | class Driver(Module, AutoCSR): 40 | """Driver 41 | 42 | Low level video interface module. 43 | """ 44 | def __init__(self, device, pads, mode, external_clocking=None): 45 | self.sink = sink = stream.Endpoint(phy_layout(mode)) 46 | 47 | # # # 48 | 49 | family = device[:3] 50 | 51 | # clocking 52 | self.submodules.clocking = clocking_cls[family](pads, external_clocking) 53 | 54 | # phy 55 | vga = hasattr(pads, "hsync_n") 56 | if vga: 57 | self.submodules.vga_phy = VGAPHY(pads, mode) 58 | self.comb += sink.connect(self.vga_phy.sink) 59 | else: 60 | self.submodules.hdmi_phy = hdmi_phy_cls[family](pads, mode) 61 | if hasattr(self.hdmi_phy, "serdesstrobe"): 62 | self.comb += self.hdmi_phy.serdesstrobe.eq(self.clocking.serdesstrobe) 63 | self.comb += sink.connect(self.hdmi_phy.sink) 64 | -------------------------------------------------------------------------------- /litevideo/input/charsync.py: -------------------------------------------------------------------------------- 1 | from functools import reduce 2 | from operator import or_ 3 | 4 | from migen import * 5 | from migen.genlib.cdc import MultiReg 6 | 7 | from litex.soc.interconnect.csr import * 8 | 9 | from litevideo.input.common import control_tokens 10 | 11 | 12 | class CharSync(Module, AutoCSR): 13 | def __init__(self, required_controls=8): 14 | self.raw_data = Signal(10) 15 | self.synced = Signal() 16 | self.data = Signal(10) 17 | 18 | self._char_synced = CSRStatus() 19 | self._ctl_pos = CSRStatus(bits_for(9)) 20 | 21 | # # # 22 | 23 | raw_data1 = Signal(10) 24 | self.sync.pix += raw_data1.eq(self.raw_data) 25 | raw = Signal(20) 26 | self.comb += raw.eq(Cat(raw_data1, self.raw_data)) 27 | 28 | found_control = Signal() 29 | control_position = Signal(max=10) 30 | self.sync.pix += found_control.eq(0) 31 | for i in range(10): 32 | self.sync.pix += If(reduce(or_, [raw[i:i+10] == t for t in control_tokens]), 33 | found_control.eq(1), 34 | control_position.eq(i) 35 | ) 36 | 37 | control_counter = Signal(max=required_controls) 38 | previous_control_position = Signal(max=10) 39 | word_sel = Signal(max=10) 40 | self.sync.pix += [ 41 | If(found_control & (control_position == previous_control_position), 42 | If(control_counter == (required_controls - 1), 43 | control_counter.eq(0), 44 | self.synced.eq(1), 45 | word_sel.eq(control_position) 46 | ).Else( 47 | control_counter.eq(control_counter + 1) 48 | ) 49 | ).Else( 50 | control_counter.eq(0) 51 | ), 52 | previous_control_position.eq(control_position) 53 | ] 54 | self.specials += [ 55 | MultiReg(self.synced, self._char_synced.status), 56 | MultiReg(word_sel, self._ctl_pos.status) 57 | ] 58 | 59 | self.sync.pix += self.data.eq(raw >> word_sel) 60 | -------------------------------------------------------------------------------- /litevideo/float_arithmetic/test/common.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | 3 | import random 4 | import copy 5 | import numpy as np 6 | 7 | 8 | from migen import * 9 | 10 | from litex.soc.interconnect.stream import * 11 | 12 | 13 | class RAWImage: 14 | def __init__(self): 15 | self.a = None 16 | self.b = None 17 | self.c = None 18 | 19 | self.data = [] 20 | 21 | self.length = None 22 | 23 | self.set_value() 24 | 25 | 26 | def set_value(self): 27 | 28 | v1 = 0 29 | v2 = 0.24142 30 | print ("Add out" , v1+v2) 31 | print ("Mult out" , v1*v2) 32 | print( "Add bin", bin(float2binint(v1+v2))[2:].zfill(16) ) 33 | print( "Mult bin", bin(float2binint(v1*v2))[2:].zfill(16) ) 34 | a, b = ([float2binint(v1)]*5,[float2binint(v2)]*5) 35 | self.set_mult_in(a, b) 36 | 37 | def set_mult_in(self, a, b): 38 | self.a = a 39 | self.b = b 40 | self.length = len(a) 41 | 42 | def set_data(self, data): 43 | self.data = data 44 | 45 | def pack_mult_in(self): 46 | self.data = [] 47 | for i in range(self.length): 48 | data = (self.a[i] & 0xffff) << 16 49 | data |= (self.b[i] & 0xffff) << 0 50 | self.data.append(data) 51 | q = bin(data)[2:].zfill(32) 52 | print( q[:16] ) 53 | print( q[16:32] ) 54 | return self.data 55 | 56 | def unpack_mult_in(self): 57 | self.c = [] 58 | for data in self.data: 59 | self.c.append((data >> 0) & 0xffff) 60 | print(bin(self.c[1])[2:].zfill(16) ) 61 | print(binint2float(self.c[1])) 62 | return self.c 63 | 64 | 65 | def float2binint(f): 66 | x = int(np.float16(f).view('H')) 67 | return x 68 | 69 | 70 | def binint2float(x): 71 | xs = bin(x)[2:].zfill(16) 72 | frac = '1'+xs[6:16] 73 | fracn = int(frac,2) 74 | exp = xs[1:6] 75 | expn = int(exp,2) -15 76 | 77 | if expn == -15 : #subnormal numbers 78 | expn = -14 79 | frac = '0'+xs[6:16] 80 | fracn = int(frac,2) 81 | 82 | sign = xs[0] 83 | signv = int(sign,2) 84 | 85 | y = ((-1)**signv)*(2**(expn))*fracn*(2**(-10)) 86 | return y 87 | -------------------------------------------------------------------------------- /litevideo/csc/rgb16f2rgb.py: -------------------------------------------------------------------------------- 1 | # rgb16f2rgb 2 | 3 | from migen import * 4 | 5 | from litex.soc.interconnect.stream import * 6 | 7 | from litevideo.csc.common import * 8 | 9 | 10 | @CEInserter() 11 | class PIXF2PIXDatapath(Module): 12 | """ 13 | Converts a 16 bit half precision floating point 14 | number defined in the range [0-1] to 8 bit unsigned 15 | int represented by a pixel in the range [0-255] 16 | """ 17 | latency = 2 18 | def __init__(self, pixf_w, pix_w): 19 | self.sink = sink = Record(pixf_layout(pixf_w)) 20 | self.source = source = Record(pix_layout(pix_w)) 21 | 22 | # # # 23 | 24 | # delay pixf signals 25 | pixf_delayed = [sink] 26 | for i in range(self.latency): 27 | pixf_n = Record(pixf_layout(pixf_w)) 28 | self.sync += getattr(pixf_n, "pixf").eq(getattr(pixf_delayed[-1], "pixf")) 29 | pixf_delayed.append(pixf_n) 30 | 31 | 32 | # Hardware implementation: 33 | 34 | # Stage 1 35 | # Unpack frac and exp components 36 | # Correct exponent offset for shifting later 37 | frac = Signal(11) 38 | exp = Signal(5) 39 | exp_offset = Signal(5) 40 | 41 | self.sync += [ 42 | exp_offset.eq(15 - sink.pixf[10:15] - 1), 43 | frac[:10].eq(sink.pixf[:10]), 44 | frac[10].eq(1) 45 | ] 46 | 47 | # Stage 2 48 | # Right shift frac by exp_offset 49 | # Most significant 8 bits of frac assigned to uint8 pix 50 | self.sync += source.pix.eq( (frac >> exp_offset)[3:]) 51 | 52 | 53 | class RGB16f2RGB(PipelinedActor, Module): 54 | def __init__(self, rgb_w=8, rgb16f_w=16): 55 | self.sink = sink = stream.Endpoint(EndpointDescription(rgb16f_layout(rgb16f_w))) 56 | self.source = source = stream.Endpoint(EndpointDescription(rgb_layout(rgb_w))) 57 | 58 | # # # 59 | 60 | for name in ["r", "g", "b"]: 61 | self.submodules.datapath = PIXF2PIXDatapath(rgb16f_w, rgb_w) 62 | PipelinedActor.__init__(self, self.datapath.latency) # FIXME 63 | self.comb += self.datapath.ce.eq(self.pipe_ce) 64 | self.comb += getattr(self.datapath.sink, "pixf").eq(getattr(sink, name +"f")) 65 | self.comb += getattr(source, name).eq(getattr(self.datapath.source, "pix")) 66 | -------------------------------------------------------------------------------- /litevideo/csc/test/rgb16f2rgb_tb.py: -------------------------------------------------------------------------------- 1 | from migen import * 2 | 3 | from litex.soc.interconnect.stream import * 4 | from litex.soc.interconnect.stream_sim import * 5 | 6 | from litevideo.csc.common import * 7 | from litevideo.csc.rgb16f2rgb import RGB16f2RGB 8 | 9 | from litevideo.csc.test.common import * 10 | 11 | class TB(Module): 12 | def __init__(self): 13 | self.submodules.streamer = PacketStreamer(EndpointDescription([("data", 48)])) 14 | self.submodules.rgb16f2rgb = RGB16f2RGB() 15 | self.submodules.logger = PacketLogger(EndpointDescription([("data", 24)])) 16 | 17 | self.comb += [ 18 | self.streamer.source.connect(self.rgb16f2rgb.sink, omit=["data"]), 19 | self.rgb16f2rgb.sink.payload.rf.eq(self.streamer.source.data[32:48]), 20 | self.rgb16f2rgb.sink.payload.gf.eq(self.streamer.source.data[16:32]), 21 | self.rgb16f2rgb.sink.payload.bf.eq(self.streamer.source.data[0:16]), 22 | 23 | self.rgb16f2rgb.source.connect(self.logger.sink, omit=["r", "g", "b"]), 24 | self.logger.sink.data[16:24].eq(self.rgb16f2rgb.source.r), 25 | self.logger.sink.data[8:16].eq(self.rgb16f2rgb.source.g), 26 | self.logger.sink.data[0:8].eq(self.rgb16f2rgb.source.b) 27 | ] 28 | 29 | def main_generator(dut): 30 | # convert image using rgb16f2rgb model 31 | raw_image = RAWImage(None, "lena.png", 64) 32 | raw_image.rgb2rgb16f_model() 33 | raw_image.rgb16f2rgb_model() 34 | raw_image.save("lena_rgb16f2rgb_reference.png") 35 | 36 | for i in range(24): 37 | yield 38 | 39 | # convert image using rgb16f2rgb implementation 40 | raw_image = RAWImage(None, "lena.png", 64) 41 | raw_image.rgb2rgb16f_model() 42 | raw_image.pack_rgb16f() 43 | packet = Packet(raw_image.data) 44 | dut.streamer.send(packet) 45 | yield from dut.logger.receive() 46 | raw_image.set_data(dut.logger.packet) 47 | raw_image.unpack_rgb() 48 | raw_image.save("lena_rgb16f2rgb.png") 49 | 50 | 51 | if __name__ == "__main__": 52 | tb = TB() 53 | generators = {"sys" : [main_generator(tb)]} 54 | generators = { 55 | "sys" : [main_generator(tb), 56 | tb.streamer.generator(), 57 | tb.logger.generator()] 58 | } 59 | clocks = {"sys": 10} 60 | run_simulation(tb, generators, clocks, vcd_name="sim.vcd") 61 | -------------------------------------------------------------------------------- /litevideo/csc/test/rgb2ycbcr_tb.py: -------------------------------------------------------------------------------- 1 | from migen import * 2 | 3 | from litex.soc.interconnect.stream import * 4 | from litex.soc.interconnect.stream_sim import * 5 | 6 | from litevideo.csc.common import * 7 | from litevideo.csc.rgb2ycbcr import rgb2ycbcr_coefs, RGB2YCbCr 8 | 9 | from litevideo.csc.test.common import * 10 | 11 | 12 | class TB(Module): 13 | def __init__(self): 14 | self.submodules.streamer = PacketStreamer(EndpointDescription([("data", 24)])) 15 | self.submodules.rgb2ycbcr = RGB2YCbCr() 16 | self.submodules.logger = PacketLogger(EndpointDescription([("data", 24)])) 17 | 18 | self.comb += [ 19 | self.streamer.source.connect(self.rgb2ycbcr.sink, omit=["data"]), 20 | self.rgb2ycbcr.sink.payload.r.eq(self.streamer.source.data[16:24]), 21 | self.rgb2ycbcr.sink.payload.g.eq(self.streamer.source.data[8:16]), 22 | self.rgb2ycbcr.sink.payload.b.eq(self.streamer.source.data[0:8]), 23 | 24 | self.rgb2ycbcr.source.connect(self.logger.sink, omit=["y", "cb", "cr"]), 25 | self.logger.sink.data[16:24].eq(self.rgb2ycbcr.source.y), 26 | self.logger.sink.data[8:16].eq(self.rgb2ycbcr.source.cb), 27 | self.logger.sink.data[0:8].eq(self.rgb2ycbcr.source.cr) 28 | ] 29 | 30 | 31 | def main_generator(dut): 32 | # convert image using rgb2ycbcr model 33 | raw_image = RAWImage(rgb2ycbcr_coefs(8), "lena.png", 64) 34 | raw_image.rgb2ycbcr_model() 35 | raw_image.ycbcr2rgb() 36 | raw_image.save("lena_rgb2ycbcr_reference.png") 37 | 38 | for i in range(16): 39 | yield 40 | 41 | # convert image using rgb2ycbcr implementation 42 | raw_image = RAWImage(rgb2ycbcr_coefs(8), "lena.png", 64) 43 | raw_image.pack_rgb() 44 | packet = Packet(raw_image.data) 45 | dut.streamer.send(packet) 46 | yield from dut.logger.receive() 47 | raw_image.set_data(dut.logger.packet) 48 | raw_image.unpack_ycbcr() 49 | raw_image.ycbcr2rgb() 50 | raw_image.save("lena_rgb2ycbcr.png") 51 | 52 | if __name__ == "__main__": 53 | tb = TB() 54 | generators = {"sys" : [main_generator(tb)]} 55 | generators = { 56 | "sys" : [main_generator(tb), 57 | tb.streamer.generator(), 58 | tb.logger.generator()] 59 | } 60 | clocks = {"sys": 10} 61 | run_simulation(tb, generators, clocks, vcd_name="sim.vcd") 62 | -------------------------------------------------------------------------------- /litevideo/csc/test/ycbcr2rgb_tb.py: -------------------------------------------------------------------------------- 1 | from migen import * 2 | 3 | from litex.soc.interconnect.stream import * 4 | from litex.soc.interconnect.stream_sim import * 5 | 6 | from litevideo.csc.common import * 7 | from litevideo.csc.ycbcr2rgb import ycbcr2rgb_coefs, YCbCr2RGB 8 | 9 | from litevideo.csc.test.common import * 10 | 11 | class TB(Module): 12 | def __init__(self): 13 | self.submodules.streamer = PacketStreamer(EndpointDescription([("data", 24)])) 14 | self.submodules.ycbcr2rgb = YCbCr2RGB() 15 | self.submodules.logger = PacketLogger(EndpointDescription([("data", 24)])) 16 | 17 | self.comb += [ 18 | self.streamer.source.connect(self.ycbcr2rgb.sink, omit=["data"]), 19 | self.ycbcr2rgb.sink.payload.y.eq(self.streamer.source.data[16:24]), 20 | self.ycbcr2rgb.sink.payload.cb.eq(self.streamer.source.data[8:16]), 21 | self.ycbcr2rgb.sink.payload.cr.eq(self.streamer.source.data[0:8]), 22 | 23 | self.ycbcr2rgb.source.connect(self.logger.sink, omit=["r", "g", "b"]), 24 | self.logger.sink.data[16:24].eq(self.ycbcr2rgb.source.r), 25 | self.logger.sink.data[8:16].eq(self.ycbcr2rgb.source.g), 26 | self.logger.sink.data[0:8].eq(self.ycbcr2rgb.source.b) 27 | ] 28 | 29 | 30 | def main_generator(dut): 31 | # convert image using ycbcr2rgb model 32 | raw_image = RAWImage(ycbcr2rgb_coefs(8), "lena.png", 64) 33 | raw_image.rgb2ycbcr() 34 | raw_image.ycbcr2rgb_model() 35 | raw_image.save("lena_ycbcr2rgb_reference.png") 36 | 37 | for i in range(16): 38 | yield 39 | 40 | # convert image using ycbcr2rgb implementation 41 | raw_image = RAWImage(ycbcr2rgb_coefs(8), "lena.png", 64) 42 | raw_image.rgb2ycbcr() 43 | raw_image.pack_ycbcr() 44 | packet = Packet(raw_image.data) 45 | dut.streamer.send(packet) 46 | yield from dut.logger.receive() 47 | raw_image.set_data(dut.logger.packet) 48 | raw_image.unpack_rgb() 49 | raw_image.save("lena_ycbcr2rgb.png") 50 | 51 | 52 | if __name__ == "__main__": 53 | tb = TB() 54 | generators = {"sys" : [main_generator(tb)]} 55 | generators = { 56 | "sys" : [main_generator(tb), 57 | tb.streamer.generator(), 58 | tb.logger.generator()] 59 | } 60 | clocks = {"sys": 10} 61 | run_simulation(tb, generators, clocks, vcd_name="sim.vcd") 62 | -------------------------------------------------------------------------------- /litevideo/csc/test/ycbcr_resampling_tb.py: -------------------------------------------------------------------------------- 1 | from migen import * 2 | 3 | from litex.soc.interconnect.stream import * 4 | from litex.soc.interconnect.stream_sim import * 5 | 6 | from litevideo.csc.common import * 7 | from litevideo.csc.ycbcr444to422 import YCbCr444to422 8 | from litevideo.csc.ycbcr422to444 import YCbCr422to444 9 | 10 | from litevideo.csc.test.common import * 11 | 12 | 13 | class TB(Module): 14 | def __init__(self): 15 | self.submodules.streamer = PacketStreamer(EndpointDescription([("data", 24)])) 16 | self.submodules.ycbcr444to422 = YCbCr444to422() 17 | self.submodules.ycbcr422to444 = YCbCr422to444() 18 | self.submodules.logger = PacketLogger(EndpointDescription([("data", 24)])) 19 | 20 | self.comb += [ 21 | self.streamer.source.connect(self.ycbcr444to422.sink, omit=["data"]), 22 | self.ycbcr444to422.sink.payload.y.eq(self.streamer.source.data[16:24]), 23 | self.ycbcr444to422.sink.payload.cb.eq(self.streamer.source.data[8:16]), 24 | self.ycbcr444to422.sink.payload.cr.eq(self.streamer.source.data[0:8]), 25 | 26 | self.ycbcr444to422.source.connect(self.ycbcr422to444.sink), 27 | 28 | self.ycbcr422to444.source.connect(self.logger.sink, omit=["y", "cb", "cr"]), 29 | self.logger.sink.data[16:24].eq(self.ycbcr422to444.source.y), 30 | self.logger.sink.data[8:16].eq(self.ycbcr422to444.source.cb), 31 | self.logger.sink.data[0:8].eq(self.ycbcr422to444.source.cr) 32 | ] 33 | 34 | 35 | def main_generator(dut): 36 | for i in range(16): 37 | yield 38 | 39 | # chain ycbcr444to422 and ycbcr422to444 40 | raw_image = RAWImage(None, "lena.png", 64) 41 | raw_image.rgb2ycbcr() 42 | raw_image.pack_ycbcr() 43 | packet = Packet(raw_image.data) 44 | dut.streamer.send(packet) 45 | yield from dut.logger.receive() 46 | raw_image.set_data(dut.logger.packet) 47 | raw_image.unpack_ycbcr() 48 | raw_image.ycbcr2rgb() 49 | raw_image.save("lena_resampling.png") 50 | 51 | if __name__ == "__main__": 52 | tb = TB() 53 | generators = {"sys" : [main_generator(tb)]} 54 | generators = { 55 | "sys" : [main_generator(tb), 56 | tb.streamer.generator(), 57 | tb.logger.generator()] 58 | } 59 | clocks = {"sys": 10} 60 | run_simulation(tb, generators, clocks, vcd_name="sim.vcd") 61 | -------------------------------------------------------------------------------- /litevideo/csc/test/rgb2rgb16f_tb.py: -------------------------------------------------------------------------------- 1 | from migen import * 2 | 3 | from litex.soc.interconnect.stream import * 4 | from litex.soc.interconnect.stream_sim import * 5 | 6 | from litevideo.csc.common import * 7 | from litevideo.csc.rgb2rgb16f import RGB2RGB16f 8 | 9 | from litevideo.csc.test.common import * 10 | 11 | class TB(Module): 12 | def __init__(self): 13 | self.submodules.streamer = PacketStreamer(EndpointDescription([("data", 24)])) 14 | self.submodules.rgb2rgb16f = RGB2RGB16f() 15 | self.submodules.logger = PacketLogger(EndpointDescription([("data", 48)])) 16 | 17 | self.comb += [ 18 | self.streamer.source.connect(self.rgb2rgb16f.sink, omit=["data"]), 19 | self.rgb2rgb16f.sink.payload.r.eq(self.streamer.source.data[16:24]), 20 | self.rgb2rgb16f.sink.payload.g.eq(self.streamer.source.data[8:16]), 21 | self.rgb2rgb16f.sink.payload.b.eq(self.streamer.source.data[0:8]), 22 | 23 | self.rgb2rgb16f.source.connect(self.logger.sink, omit=["rf", "gf", "bf"]), 24 | self.logger.sink.data[32:48].eq(self.rgb2rgb16f.source.rf), 25 | self.logger.sink.data[16:32].eq(self.rgb2rgb16f.source.gf), 26 | self.logger.sink.data[ 0:16].eq(self.rgb2rgb16f.source.bf) 27 | ] 28 | 29 | 30 | def main_generator(dut): 31 | # convert image using rgb2ycbcr model 32 | raw_image = RAWImage(None, "lena.png", 64) 33 | raw_image.rgb2rgb16f_model() 34 | raw_image.rgb16f2rgb_model() 35 | raw_image.save("lena_rgb2rgb16f_reference.png") 36 | 37 | for i in range(24): 38 | yield 39 | 40 | # convert image using rgb2ycbcr implementation 41 | raw_image = RAWImage(None, "lena.png", 64) 42 | raw_image.pack_rgb() 43 | packet = Packet(raw_image.data) 44 | dut.streamer.send(packet) 45 | yield from dut.logger.receive() 46 | raw_image.set_data(dut.logger.packet) 47 | raw_image.unpack_rgb16f() 48 | raw_image.rgb16f2rgb_model() 49 | raw_image.save("lena_rgb2rgb16f.png") 50 | 51 | if __name__ == "__main__": 52 | tb = TB() 53 | generators = {"sys" : [main_generator(tb)]} 54 | generators = { 55 | "sys" : [main_generator(tb), 56 | tb.streamer.generator(), 57 | tb.logger.generator()] 58 | } 59 | clocks = {"sys": 10} 60 | run_simulation(tb, generators, clocks, vcd_name="sim.vcd") 61 | -------------------------------------------------------------------------------- /litevideo/csc/ycbcr422to444.py: -------------------------------------------------------------------------------- 1 | # ycbcr422to444 2 | 3 | from migen import * 4 | 5 | from litex.soc.interconnect.stream import * 6 | 7 | from litevideo.csc.common import * 8 | 9 | @ResetInserter() 10 | class YCbCr422to444(Module): 11 | """YCbCr 422 to 444 12 | 13 | Input: Output: 14 | Y0 Y1 Y2 Y3 Y0 Y1 Y2 Y3 15 | Cb01 Cr01 Cb23 Cr23 --> Cb01 Cb01 Cb23 Cb23 16 | Cr01 Cr01 Cr23 Cr23 17 | """ 18 | latency = 2 19 | def __init__(self, dw=8): 20 | self.sink = sink = stream.Endpoint(EndpointDescription(ycbcr422_layout(dw))) 21 | self.source = source = stream.Endpoint(EndpointDescription(ycbcr444_layout(dw))) 22 | 23 | # # # 24 | 25 | y_fifo = stream.SyncFIFO([("data", dw)], 4) 26 | cb_fifo = stream.SyncFIFO([("data", dw)], 4) 27 | cr_fifo = stream.SyncFIFO([("data", dw)], 4) 28 | self.submodules += y_fifo, cb_fifo, cr_fifo 29 | 30 | # input 31 | parity_in = Signal() 32 | self.sync += If(sink.valid & sink.ready, parity_in.eq(~parity_in)) 33 | self.comb += [ 34 | If(~parity_in, 35 | y_fifo.sink.valid.eq(sink.valid & sink.ready), 36 | y_fifo.sink.data.eq(sink.y), 37 | cb_fifo.sink.valid.eq(sink.valid & sink.ready), 38 | cb_fifo.sink.data.eq(sink.cb_cr), 39 | sink.ready.eq(y_fifo.sink.ready & cb_fifo.sink.ready) 40 | ).Else( 41 | y_fifo.sink.valid.eq(sink.valid & sink.ready), 42 | y_fifo.sink.data.eq(sink.y), 43 | cr_fifo.sink.valid.eq(sink.valid & sink.ready), 44 | cr_fifo.sink.data.eq(sink.cb_cr), 45 | sink.ready.eq(y_fifo.sink.ready & cr_fifo.sink.ready) 46 | ) 47 | ] 48 | 49 | 50 | # output 51 | parity_out = Signal() 52 | self.sync += If(source.valid & source.ready, parity_out.eq(~parity_out)) 53 | self.comb += [ 54 | source.valid.eq(y_fifo.source.valid & 55 | cb_fifo.source.valid & 56 | cr_fifo.source.valid), 57 | source.y.eq(y_fifo.source.data), 58 | source.cb.eq(cb_fifo.source.data), 59 | source.cr.eq(cr_fifo.source.data), 60 | y_fifo.source.ready.eq(source.valid & source.ready), 61 | cb_fifo.source.ready.eq(source.valid & source.ready & parity_out), 62 | cr_fifo.source.ready.eq(source.valid & source.ready & parity_out) 63 | ] 64 | -------------------------------------------------------------------------------- /litevideo/csc/test/ycbcr422to444_tb.py: -------------------------------------------------------------------------------- 1 | import random 2 | import unittest 3 | 4 | from migen import * 5 | from litex.soc.interconnect.stream import * 6 | 7 | from litevideo.csc.common import * 8 | from litevideo.csc.ycbcr422to444 import YCbCr422to444 9 | 10 | 11 | prng = random.Random(42) 12 | reference_y = [prng.randrange(256) for i in range(32)] 13 | reference_cb = [] 14 | reference_cr = [] 15 | reference_cb_cr = [prng.randrange(20, 200) for i in range(32)] 16 | for i in range(len(reference_cb_cr)//2): 17 | cb = reference_cb_cr[2*i + 0] 18 | reference_cb.append(cb) 19 | reference_cb.append(cb) 20 | for i in range(len(reference_cb_cr)//2): 21 | cr = reference_cb_cr[2*i + 1] 22 | reference_cr.append(cr) 23 | reference_cr.append(cr) 24 | 25 | sink_y = reference_y 26 | sink_cb_cr = reference_cb_cr 27 | 28 | source_y = [] 29 | source_cb = [] 30 | source_cr = [] 31 | 32 | 33 | def sink_generator(sink, rand_threshold=100): 34 | prng = random.Random(42) 35 | for i in range(len(sink_cb_cr)): 36 | valid = 0 37 | while True: 38 | valid = (prng.randrange(100) < rand_threshold) 39 | if valid: 40 | yield sink.valid.eq(1) 41 | yield sink.y.eq(sink_y[i]) 42 | yield sink.cb_cr.eq(sink_cb_cr[i]) 43 | yield 44 | while not (yield sink.ready): 45 | yield 46 | yield sink.valid.eq(0) 47 | break 48 | else: 49 | yield sink.valid.eq(0) 50 | yield 51 | # yield for processing latency 52 | for i in range(128): 53 | yield 54 | 55 | @passive 56 | def source_generator(source, rand_threshold=100): 57 | prng = random.Random(42) 58 | while True: 59 | if (yield source.ready) & (yield source.valid): 60 | source_y.append((yield source.y)) 61 | source_cb.append((yield source.cb)) 62 | source_cr.append((yield source.cr)) 63 | ready = (prng.randrange(100) < rand_threshold) 64 | yield source.ready.eq(ready) 65 | yield 66 | 67 | if __name__ == "__main__": 68 | tb = YCbCr422to444() 69 | generators = {"sys" : [source_generator(tb.source), 70 | sink_generator(tb.sink)]} 71 | clocks = {"sys": 10} 72 | 73 | run_simulation(tb, generators, clocks, vcd_name="sim.vcd") 74 | 75 | testcase = unittest.TestCase() 76 | testcase.assertEqual(source_y, reference_y) 77 | testcase.assertEqual(source_cb, reference_cb) 78 | testcase.assertEqual(source_cr, reference_cr) 79 | -------------------------------------------------------------------------------- /litevideo/csc/ycbcr444to422.py: -------------------------------------------------------------------------------- 1 | # ycbcr444to422 2 | 3 | from migen import * 4 | 5 | from litex.soc.interconnect.stream import * 6 | 7 | from litevideo.csc.common import * 8 | 9 | 10 | @CEInserter() 11 | class YCbCr444to422Datapath(Module): 12 | """YCbCr 444 to 422 13 | 14 | Input: Output: 15 | Y0 Y1 Y2 Y3 Y0 Y1 Y2 Y3 16 | Cb0 Cb1 Cb2 Cb3 --> Cb01 Cr01 Cb23 Cr23 17 | Cr0 Cr1 Cr2 Cr3 18 | """ 19 | latency = 3 20 | 21 | def __init__(self, dw): 22 | self.sink = sink = Record(ycbcr444_layout(dw)) 23 | self.source = source = Record(ycbcr422_layout(dw)) 24 | self.first = Signal() 25 | 26 | # # # 27 | 28 | # delay data signals 29 | ycbcr_delayed = [sink] 30 | for i in range(self.latency): 31 | ycbcr_n = Record(ycbcr444_layout(dw)) 32 | for name in ["y", "cb", "cr"]: 33 | self.sync += getattr(ycbcr_n, name).eq(getattr(ycbcr_delayed[-1], name)) 34 | ycbcr_delayed.append(ycbcr_n) 35 | 36 | # parity 37 | parity = Signal() 38 | self.sync += \ 39 | If(self.first | ~parity, 40 | parity.eq(1) 41 | ).Else( 42 | parity.eq(0) 43 | ) 44 | 45 | # compute mean of cb and cr compoments 46 | cb_sum = Signal(dw+1) 47 | cr_sum = Signal(dw+1) 48 | cb_mean = Signal(dw) 49 | cr_mean = Signal(dw) 50 | 51 | self.comb += [ 52 | cb_mean.eq(cb_sum[1:]), 53 | cr_mean.eq(cr_sum[1:]) 54 | ] 55 | 56 | self.sync += \ 57 | If(parity, 58 | cb_sum.eq(sink.cb + ycbcr_delayed[1].cb), 59 | cr_sum.eq(sink.cr + ycbcr_delayed[1].cr) 60 | ) 61 | 62 | # output 63 | self.sync += \ 64 | If(parity, 65 | self.source.y.eq(ycbcr_delayed[2].y), 66 | self.source.cb_cr.eq(cr_mean) 67 | ).Else( 68 | self.source.y.eq(ycbcr_delayed[2].y), 69 | self.source.cb_cr.eq(cb_mean) 70 | ) 71 | 72 | 73 | class YCbCr444to422(PipelinedActor, Module): 74 | def __init__(self, dw=8): 75 | self.sink = sink = stream.Endpoint(EndpointDescription(ycbcr444_layout(dw))) 76 | self.source = source = stream.Endpoint(EndpointDescription(ycbcr422_layout(dw))) 77 | 78 | # # # 79 | 80 | self.submodules.datapath = YCbCr444to422Datapath(dw) 81 | PipelinedActor.__init__(self, self.datapath.latency) 82 | self.comb += self.datapath.ce.eq(self.pipe_ce) 83 | for name in ["y", "cb", "cr"]: 84 | self.comb += getattr(self.datapath.sink, name).eq(getattr(sink, name)) 85 | for name in ["y", "cb_cr"]: 86 | self.comb += getattr(source, name).eq(getattr(self.datapath.source, name)) 87 | -------------------------------------------------------------------------------- /litevideo/input/wer.py: -------------------------------------------------------------------------------- 1 | from functools import reduce 2 | from operator import or_, add 3 | 4 | from migen import * 5 | from migen.genlib.cdc import PulseSynchronizer 6 | 7 | from litex.soc.interconnect.csr import * 8 | 9 | from litevideo.input.common import control_tokens 10 | 11 | 12 | class WER(Module, AutoCSR): 13 | """Word Error Rate calculation module. 14 | 15 | https://en.wikipedia.org/wiki/Transition-minimized_differential_signaling 16 | 17 | """ 18 | 19 | def __init__(self, period_bits=24): 20 | self.data = Signal(10) 21 | self._update = CSR() 22 | self._value = CSRStatus(period_bits) 23 | 24 | ### 25 | # (pipeline stage 1) 26 | # We ignore the 10th (inversion) bit, as it is independent of the 27 | # transition minimization. 28 | data_r = Signal(9) 29 | self.sync.pix += data_r.eq(self.data[:9]) 30 | 31 | # (pipeline stage 2) 32 | # Count the number of transitions in the TMDS word. 33 | transitions = Signal(8) 34 | self.comb += [transitions[i].eq(data_r[i] ^ data_r[i+1]) for i in range(8)] 35 | transition_count = Signal(max=9) 36 | self.sync.pix += transition_count.eq(reduce(add, [transitions[i] for i in range(8)])) 37 | 38 | # Control data characters are designed to have a large number (7) of 39 | # transitions to help the receiver synchronize its clock with the 40 | # transmitter clock. 41 | is_control = Signal() 42 | self.sync.pix += is_control.eq(reduce(or_, [data_r == ct for ct in control_tokens])) 43 | 44 | # (pipeline stage 3) 45 | # The TMDS characters selected to represent pixel data contain five or 46 | # fewer transitions. 47 | is_error = Signal() 48 | self.sync.pix += is_error.eq((transition_count > 4) & ~is_control) 49 | 50 | # counter 51 | period_counter = Signal(period_bits) 52 | period_done = Signal() 53 | self.sync.pix += Cat(period_counter, period_done).eq(period_counter + 1) 54 | 55 | wer_counter = Signal(period_bits) 56 | wer_counter_r = Signal(period_bits) 57 | wer_counter_r_updated = Signal() 58 | self.sync.pix += [ 59 | wer_counter_r_updated.eq(period_done), 60 | If(period_done, 61 | wer_counter_r.eq(wer_counter), 62 | wer_counter.eq(0) 63 | ).Elif(is_error, 64 | wer_counter.eq(wer_counter + 1) 65 | ) 66 | ] 67 | 68 | # sync to system clock domain 69 | wer_counter_sys = Signal(period_bits) 70 | self.submodules.ps_counter = PulseSynchronizer("pix", "sys") 71 | self.comb += self.ps_counter.i.eq(wer_counter_r_updated) 72 | self.sync += If(self.ps_counter.o, wer_counter_sys.eq(wer_counter_r)) 73 | 74 | # register interface 75 | self.sync += If(self._update.re, self._value.status.eq(wer_counter_sys)) 76 | -------------------------------------------------------------------------------- /litevideo/output/hdmi/encoder.py: -------------------------------------------------------------------------------- 1 | from functools import reduce 2 | from operator import add 3 | 4 | from migen import * 5 | 6 | control_tokens = [0b1101010100, 0b0010101011, 0b0101010100, 0b1010101011] 7 | 8 | 9 | class Encoder(Module): 10 | def __init__(self): 11 | self.d = Signal(8) 12 | self.c = Signal(2) 13 | self.de = Signal() 14 | 15 | self.out = Signal(10) 16 | 17 | # # # 18 | 19 | # stage 1 - count number of 1s in data 20 | d = Signal(8) 21 | n1d = Signal(max=9) 22 | self.sync += [ 23 | n1d.eq(reduce(add, [self.d[i] for i in range(8)])), 24 | d.eq(self.d) 25 | ] 26 | 27 | # stage 2 - add 9th bit 28 | q_m = Signal(9) 29 | q_m8_n = Signal() 30 | self.comb += q_m8_n.eq((n1d > 4) | ((n1d == 4) & ~d[0])) 31 | for i in range(8): 32 | if i: 33 | curval = curval ^ d[i] ^ q_m8_n 34 | else: 35 | curval = d[0] 36 | self.sync += q_m[i].eq(curval) 37 | self.sync += q_m[8].eq(~q_m8_n) 38 | 39 | # stage 3 - count number of 1s and 0s in q_m[:8] 40 | q_m_r = Signal(9) 41 | n0q_m = Signal(max=9) 42 | n1q_m = Signal(max=9) 43 | self.sync += [ 44 | n0q_m.eq(reduce(add, [~q_m[i] for i in range(8)])), 45 | n1q_m.eq(reduce(add, [q_m[i] for i in range(8)])), 46 | q_m_r.eq(q_m) 47 | ] 48 | 49 | # stage 4 - final encoding 50 | cnt = Signal((6, True)) 51 | 52 | s_c = self.c 53 | s_de = self.de 54 | for p in range(3): 55 | new_c = Signal(2) 56 | new_de = Signal() 57 | self.sync += new_c.eq(s_c), new_de.eq(s_de) 58 | s_c, s_de = new_c, new_de 59 | 60 | self.sync += \ 61 | If(s_de, 62 | If((cnt == 0) | (n1q_m == n0q_m), 63 | self.out[9].eq(~q_m_r[8]), 64 | self.out[8].eq(q_m_r[8]), 65 | If(q_m_r[8], 66 | self.out[:8].eq(q_m_r[:8]), 67 | cnt.eq(cnt + n1q_m - n0q_m) 68 | ).Else( 69 | self.out[:8].eq(~q_m_r[:8]), 70 | cnt.eq(cnt + n0q_m - n1q_m) 71 | ) 72 | ).Else( 73 | If((~cnt[5] & (n1q_m > n0q_m)) | (cnt[5] & (n0q_m > n1q_m)), 74 | self.out[9].eq(1), 75 | self.out[8].eq(q_m_r[8]), 76 | self.out[:8].eq(~q_m_r[:8]), 77 | cnt.eq(cnt + Cat(0, q_m_r[8]) + n0q_m - n1q_m) 78 | ).Else( 79 | self.out[9].eq(0), 80 | self.out[8].eq(q_m_r[8]), 81 | self.out[:8].eq(q_m_r[:8]), 82 | cnt.eq(cnt - Cat(0, ~q_m_r[8]) + n1q_m - n0q_m) 83 | ) 84 | ) 85 | ).Else( 86 | self.out.eq(Array(control_tokens)[s_c]), 87 | cnt.eq(0) 88 | ) 89 | -------------------------------------------------------------------------------- /litevideo/input/chansync.py: -------------------------------------------------------------------------------- 1 | from functools import reduce 2 | from operator import or_, and_ 3 | 4 | from migen import * 5 | from migen.genlib.cdc import MultiReg 6 | from migen.genlib.fifo import _inc 7 | from migen.genlib.record import Record, layout_len 8 | 9 | from litex.soc.interconnect.csr import * 10 | 11 | from litevideo.input.common import channel_layout 12 | 13 | 14 | class _SyncBuffer(Module): 15 | def __init__(self, width, depth): 16 | self.din = Signal(width) 17 | self.dout = Signal(width) 18 | self.re = Signal() 19 | 20 | # # # 21 | 22 | produce = Signal(max=depth) 23 | consume = Signal(max=depth) 24 | storage = Memory(width, depth) 25 | self.specials += storage 26 | 27 | wrport = storage.get_port(write_capable=True) 28 | self.specials += wrport 29 | self.comb += [ 30 | wrport.adr.eq(produce), 31 | wrport.dat_w.eq(self.din), 32 | wrport.we.eq(1) 33 | ] 34 | self.sync += _inc(produce, depth) 35 | 36 | rdport = storage.get_port(async_read=True) 37 | self.specials += rdport 38 | self.comb += [ 39 | rdport.adr.eq(consume), 40 | self.dout.eq(rdport.dat_r) 41 | ] 42 | self.sync += If(self.re, _inc(consume, depth)) 43 | 44 | 45 | class ChanSync(Module, AutoCSR): 46 | def __init__(self, nchan=3, depth=8): 47 | self.valid_i = Signal() 48 | self.chan_synced = Signal() 49 | 50 | self._channels_synced = CSRStatus() 51 | 52 | lst_control = [] 53 | all_control = Signal() 54 | for i in range(nchan): 55 | name = "data_in" + str(i) 56 | data_in = Record(channel_layout, name=name) 57 | setattr(self, name, data_in) 58 | name = "data_out" + str(i) 59 | data_out = Record(channel_layout, name=name) 60 | setattr(self, name, data_out) 61 | 62 | # # # 63 | 64 | syncbuffer = _SyncBuffer(layout_len(channel_layout), depth) 65 | syncbuffer = ClockDomainsRenamer("pix")(syncbuffer) 66 | self.submodules += syncbuffer 67 | self.comb += [ 68 | syncbuffer.din.eq(data_in.raw_bits()), 69 | data_out.raw_bits().eq(syncbuffer.dout) 70 | ] 71 | is_control = Signal() 72 | self.comb += [ 73 | is_control.eq(~data_out.de), 74 | syncbuffer.re.eq(~is_control | all_control) 75 | ] 76 | lst_control.append(is_control) 77 | 78 | some_control = Signal() 79 | self.comb += [ 80 | all_control.eq(reduce(and_, lst_control)), 81 | some_control.eq(reduce(or_, lst_control)) 82 | ] 83 | self.sync.pix += \ 84 | If(~self.valid_i, 85 | self.chan_synced.eq(0) 86 | ).Else( 87 | If(some_control, 88 | If(all_control, 89 | self.chan_synced.eq(1) 90 | ).Else( 91 | self.chan_synced.eq(0) 92 | ) 93 | ) 94 | ) 95 | self.specials += MultiReg(self.chan_synced, self._channels_synced.status) 96 | -------------------------------------------------------------------------------- /litevideo/output/test/core_tb.py: -------------------------------------------------------------------------------- 1 | from migen import * 2 | 3 | from litex.soc.interconnect.stream import * 4 | 5 | from litedram.common import LiteDRAMPort 6 | 7 | from litevideo.output.core import VideoOutCore 8 | 9 | 10 | class TB(Module): 11 | def __init__(self): 12 | self.dram_port = LiteDRAMPort(mode="read", aw=32, dw=32, cd="video") 13 | self.submodules.core = VideoOutCore(self.dram_port) 14 | self.sync += \ 15 | self.core.source.ready.eq(~self.core.source.ready) 16 | 17 | 18 | class DRAMMemory: 19 | def __init__(self, width, depth, init=[]): 20 | self.width = width 21 | self.depth = depth 22 | self.mem = [] 23 | for d in init: 24 | self.mem.append(d) 25 | for _ in range(depth-len(init)): 26 | self.mem.append(0) 27 | 28 | @passive 29 | def read_generator(self, dram_port): 30 | address = 0 31 | pending = 0 32 | while True: 33 | yield dram_port.cmd.ready.eq(0) 34 | yield dram_port.rdata.valid.eq(0) 35 | if pending: 36 | yield dram_port.rdata.valid.eq(1) 37 | yield dram_port.rdata.data.eq(self.mem[address%self.depth]) 38 | yield 39 | yield dram_port.rdata.valid.eq(0) 40 | yield dram_port.rdata.data.eq(0) 41 | pending = 0 42 | elif (yield dram_port.cmd.valid): 43 | pending = not (yield dram_port.cmd.we) 44 | address = (yield dram_port.cmd.adr) 45 | yield 46 | yield dram_port.cmd.ready.eq(1) 47 | yield 48 | 49 | 50 | video_data = [] 51 | 52 | @passive 53 | def video_capture_generator(dut): 54 | while True: 55 | if ((yield dut.core.source.valid) and 56 | (yield dut.core.source.ready) and 57 | (yield dut.core.source.de)): 58 | video_data.append((yield dut.core.source.data)) 59 | yield 60 | 61 | def main_generator(dut): 62 | for i in range(100): 63 | yield 64 | # init video 65 | yield dut.core.initiator.hres.storage.eq(16) 66 | yield dut.core.initiator.hsync_start.storage.eq(18) 67 | yield dut.core.initiator.hsync_end.storage.eq(20) 68 | yield dut.core.initiator.hscan.storage.eq(24) 69 | 70 | yield dut.core.initiator.vres.storage.eq(16) 71 | yield dut.core.initiator.vsync_start.storage.eq(18) 72 | yield dut.core.initiator.vsync_end.storage.eq(20) 73 | yield dut.core.initiator.vscan.storage.eq(24) 74 | 75 | yield dut.core.initiator.base.storage.eq(0) 76 | yield dut.core.initiator.length.storage.eq(16*16*4) 77 | 78 | yield 79 | yield dut.core.initiator.enable.storage.eq(1) 80 | yield 81 | 82 | # delay 83 | for i in range(4096): 84 | yield 85 | 86 | # check video data 87 | errors = 0 88 | last = -1 89 | for data in video_data: 90 | if (data != (last + 1)%256): 91 | errors += 1 92 | print(data) 93 | last = data 94 | print(video_data) 95 | print("errors: {:d}".format(errors)) 96 | 97 | 98 | if __name__ == "__main__": 99 | for video_clk_ns in [20, 10, 5]: 100 | tb = TB() 101 | mem = DRAMMemory(32, 1024, [i for i in range(256)]) 102 | generators = { 103 | "sys": [main_generator(tb)], 104 | "video": [video_capture_generator(tb), 105 | mem.read_generator(tb.dram_port)], 106 | } 107 | clocks = {"sys": 10, 108 | "video": video_clk_ns} 109 | video_data = [] 110 | run_simulation(tb, generators, clocks, vcd_name="sim.vcd") 111 | -------------------------------------------------------------------------------- /litevideo/output/pattern.py: -------------------------------------------------------------------------------- 1 | from migen import * 2 | 3 | from litex.soc.interconnect import stream 4 | 5 | from litevideo.output.common import * 6 | 7 | 8 | class ColorBarsPattern(Module): 9 | """Color Bars Pattern 10 | """ 11 | def __init__(self): 12 | self.sink = sink = stream.Endpoint(color_bar_parameter_layout) 13 | self.source = source = stream.Endpoint([("r", 8), ("g", 8), ("b", 8)]) 14 | 15 | # # # 16 | 17 | # ctrl 18 | pix = Signal(hbits) 19 | bar = Signal(3) 20 | self.sync += [ 21 | If(sink.valid, 22 | source.valid.eq(1), 23 | If(source.ready, 24 | If(pix == (sink.hres[3:]-1), 25 | pix.eq(0), 26 | bar.eq(bar + 1) 27 | ).Else( 28 | pix.eq(pix + 1) 29 | ) 30 | ) 31 | ) 32 | ] 33 | 34 | # data 35 | color_bar = [ 36 | # r , g , b 37 | [255, 255, 255], 38 | [255, 255, 0], 39 | [0, 255, 255], 40 | [0, 255, 0], 41 | [255, 0, 255], 42 | [255, 0, 0], 43 | [0, 0, 255], 44 | [0, 0, 0], 45 | ] 46 | cases = {} 47 | for i in range(8): 48 | cases[i] = [ 49 | source.r.eq(color_bar[i][0]), 50 | source.g.eq(color_bar[i][1]), 51 | source.b.eq(color_bar[i][2]) 52 | ] 53 | self.sync += Case(bar, cases) 54 | 55 | 56 | class VerticalLinesPattern(Module): 57 | """Vertical Lines Pattern 58 | """ 59 | def __init__(self): 60 | self.sink = sink = stream.Endpoint(color_bar_parameter_layout) 61 | self.source = source = stream.Endpoint([("r", 8), ("g", 8), ("b", 8)]) 62 | 63 | # # # 64 | 65 | parity = Signal() 66 | self.sync += [ 67 | If(sink.valid, 68 | source.valid.eq(1), 69 | If(source.ready, 70 | parity.eq(~parity) 71 | ), 72 | If(parity, 73 | source.r.eq(255), 74 | source.g.eq(255), 75 | source.b.eq(255) 76 | ).Else( 77 | source.r.eq(0), 78 | source.g.eq(0), 79 | source.b.eq(0) 80 | ) 81 | ) 82 | ] 83 | 84 | 85 | class DotsPattern(Module): 86 | """Dots Pattern 87 | """ 88 | def __init__(self): 89 | self.sink = sink = stream.Endpoint(color_bar_parameter_layout) 90 | self.source = source = stream.Endpoint([("r", 8), ("g", 8), ("b", 8)]) 91 | 92 | # # # 93 | 94 | h = Signal(hbits) 95 | parity = Signal() 96 | self.sync += [ 97 | If(sink.valid, 98 | source.valid.eq(1), 99 | If(source.ready, 100 | If(h == (sink.hres-1), 101 | # don't change parity: 102 | # next line pixel will 103 | # be swapped. 104 | h.eq(0) 105 | ).Else( 106 | h.eq(h+1), 107 | parity.eq(~parity) 108 | ) 109 | ) 110 | ), 111 | If(parity, 112 | source.r.eq(255), 113 | source.g.eq(255), 114 | source.b.eq(255) 115 | ).Else( 116 | source.r.eq(0), 117 | source.g.eq(0), 118 | source.b.eq(0) 119 | ) 120 | ] 121 | -------------------------------------------------------------------------------- /litevideo/output/__init__.py: -------------------------------------------------------------------------------- 1 | from migen import * 2 | 3 | from litex.soc.interconnect import stream 4 | from litex.soc.interconnect.csr import * 5 | 6 | from litevideo.output.common import * 7 | from litevideo.output.core import VideoOutCore 8 | from litevideo.output.driver import Driver 9 | 10 | from litevideo.csc.ycbcr2rgb import YCbCr2RGB 11 | from litevideo.csc.ycbcr422to444 import YCbCr422to444 12 | 13 | 14 | class TimingDelay(Module): 15 | def __init__(self, latency): 16 | self.sink = stream.Endpoint(frame_timing_layout) 17 | self.source = stream.Endpoint(frame_timing_layout) 18 | 19 | # # # 20 | 21 | for name in list_signals(frame_timing_layout): 22 | s = getattr(self.sink, name) 23 | for i in range(latency): 24 | next_s = Signal() 25 | self.sync += next_s.eq(s) 26 | s = next_s 27 | self.comb += getattr(self.source, name).eq(s) 28 | 29 | 30 | class VideoOut(Module, AutoCSR): 31 | """Video out 32 | 33 | Generates a video from memory. 34 | """ 35 | def __init__(self, device, pads, dram_port, 36 | mode="rgb", 37 | fifo_depth=512, 38 | external_clocking=None): 39 | cd = dram_port.cd 40 | 41 | self.submodules.core = core = VideoOutCore(dram_port, mode, fifo_depth) 42 | self.submodules.driver = driver = Driver(device, pads, mode, external_clocking) 43 | 44 | if mode == "raw": 45 | self.comb += [ 46 | core.source.connect(driver.sink, omit=["data"]), 47 | driver.sink.c0.eq(core.source.data[0:10]), 48 | driver.sink.c1.eq(core.source.data[10:20]), 49 | driver.sink.c2.eq(core.source.data[20:30]) 50 | ] 51 | elif mode == "rgb": 52 | self.comb += [ 53 | core.source.connect(driver.sink, omit=["data"]), 54 | driver.sink.r.eq(core.source.data[0:8]), 55 | driver.sink.g.eq(core.source.data[8:16]), 56 | driver.sink.b.eq(core.source.data[16:24]) 57 | ] 58 | elif mode == "ycbcr422": 59 | ycbcr422to444 = ClockDomainsRenamer(cd)(YCbCr422to444()) 60 | ycbcr2rgb = ClockDomainsRenamer(cd)(YCbCr2RGB()) 61 | timing_delay = TimingDelay(ycbcr422to444.latency + ycbcr2rgb.latency) 62 | timing_delay = ClockDomainsRenamer(cd)(timing_delay) 63 | self.submodules += ycbcr422to444, ycbcr2rgb, timing_delay 64 | 65 | # data / control 66 | de_r = Signal() 67 | core_source_valid_d = Signal() 68 | core_source_data_d = Signal(16) 69 | sync_cd = getattr(self.sync, cd) 70 | sync_cd += [ 71 | de_r.eq(core.source.de), 72 | core_source_valid_d.eq(core.source.valid), 73 | core_source_data_d.eq(core.source.data), 74 | ] 75 | 76 | self.comb += [ 77 | core.source.ready.eq(1), # always ready, no flow control 78 | ycbcr422to444.reset.eq(core.source.de & ~de_r), 79 | ycbcr422to444.sink.valid.eq(core_source_valid_d), 80 | ycbcr422to444.sink.y.eq(core_source_data_d[:8]), 81 | ycbcr422to444.sink.cb_cr.eq(core_source_data_d[8:]), 82 | 83 | ycbcr422to444.source.connect(ycbcr2rgb.sink), 84 | 85 | ycbcr2rgb.source.connect(driver.sink) 86 | ] 87 | # timing 88 | self.comb += [ 89 | timing_delay.sink.de.eq(core.source.de), 90 | timing_delay.sink.vsync.eq(core.source.vsync), 91 | timing_delay.sink.hsync.eq(core.source.hsync), 92 | 93 | driver.sink.de.eq(timing_delay.source.de), 94 | driver.sink.vsync.eq(timing_delay.source.vsync), 95 | driver.sink.hsync.eq(timing_delay.source.hsync) 96 | ] 97 | else: 98 | raise ValueError("Video mode {} not supported".format(mode)) 99 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ``` 2 | __ _ __ _ ___ __ 3 | / / (_) /____| | / (_)__/ /__ ___ 4 | / /__/ / __/ -_) |/ / / _ / -_) _ \ 5 | /____/_/\__/\__/|___/_/\_,_/\__/\___/ 6 | 7 | Copyright 2016-2020 / EnjoyDigital 8 | Copyright 2016-2020 / TimVideos.us 9 | 10 | Small footprint and configurable video cores 11 | powered by LiteX 12 | ``` 13 | 14 | [![](https://travis-ci.com/enjoy-digital/litevideo.svg?branch=master)](https://travis-ci.com/enjoy-digital/litevideo) ![License](https://img.shields.io/badge/License-BSD%202--Clause-orange.svg) 15 | 16 | 17 | [> Intro 18 | -------- 19 | LiteVideo provides small footprint and configurable video cores. 20 | 21 | LiteVideo is part of LiteX libraries whose aims are to lower entry level of 22 | complex FPGA cores by providing simple, elegant and efficient implementations 23 | of components used in today's SoC such as Ethernet, SATA, PCIe, SDRAM Controller... 24 | 25 | Using Migen to describe the HDL allows the core to be highly and easily configurable. 26 | 27 | LiteVideo can be used as LiteX library or can be integrated with your standard 28 | design flow by generating the verilog rtl that you will use as a standard core. 29 | 30 | [> Features 31 | ----------- 32 | PHY: 33 | - HDMI input (Spartan6, 7-Series) 34 | - HDMI output (Spartan6, 7-Series) 35 | 36 | Core: 37 | - DMA (input/output) 38 | - Triple buffering (output) 39 | - Color space conversion (RGB <--> YCbCr) 40 | - Chroma resampling 41 | - Floating point arithmetic (WIP) 42 | 43 | [> FPGA Proven 44 | -------------- 45 | LiteVideo is already used in commercial and open-source designs: 46 | - HDMI2USB: http://hdmi2usb.tv/home/ 47 | - and others commercial designs... 48 | 49 | [> Possible improvements 50 | ------------------------ 51 | - add standardized interfaces (AXI, Avalon-ST) 52 | - add Display Port support 53 | - add more documentation 54 | - ... See below Support and consulting :) 55 | 56 | If you want to support these features, please contact us at florent [AT] 57 | enjoy-digital.fr. 58 | 59 | [> Getting started 60 | ------------------ 61 | 1. Install Python 3.6+ and FPGA vendor's development tools. 62 | 2. Install Migen/LiteX and the LiteX's cores: 63 | 64 | ```sh 65 | $ wget https://raw.githubusercontent.com/enjoy-digital/litex/master/litex_setup.py 66 | $ chmod +x litex_setup.py 67 | $ ./litex_setup.py init install --user (--user to install to user directory) 68 | ``` 69 | Later, if you need to update all repositories: 70 | ```sh 71 | $ ./litex_setup.py update 72 | ``` 73 | 74 | 3. TODO: add/describe examples 75 | 76 | [> Tests 77 | -------- 78 | Unit tests are available in ./test/. 79 | To run all the unit tests: 80 | ```sh 81 | $ ./setup.py test 82 | ``` 83 | 84 | Tests can also be run individually: 85 | ```sh 86 | $ python3 -m unittest test.test_name 87 | ``` 88 | 89 | [> License 90 | ---------- 91 | LiteVideo is released under the very permissive two-clause BSD license. Under the 92 | terms of this license, you are authorized to use LiteVideo for closed-source 93 | proprietary designs. 94 | Even though we do not require you to do so, those things are awesome, so please 95 | do them if possible: 96 | - tell us that you are using LiteVideo 97 | - cite LiteVideo in publications related to research it has helped 98 | - send us feedback and suggestions for improvements 99 | - send us bug reports when something goes wrong 100 | - send us the modifications and improvements you have done to LiteVideo. 101 | 102 | [> Support and consulting 103 | ------------------------- 104 | We love open-source hardware and like sharing our designs with others. 105 | 106 | LiteVideo is developed and maintained by EnjoyDigital. 107 | 108 | If you would like to know more about LiteVideo or if you are already a happy user 109 | and would like to extend it for your needs, EnjoyDigital can provide standard 110 | commercial support as well as consulting services. 111 | 112 | So feel free to contact us, we'd love to work with you! (and eventually shorten 113 | the list of the possible improvements :) 114 | 115 | [> Contact 116 | ---------- 117 | E-mail: florent [AT] enjoy-digital.fr 118 | -------------------------------------------------------------------------------- /litevideo/terminal/README.md: -------------------------------------------------------------------------------- 1 | # VGA Terminal emulation 2 | 3 | This is a simple terminal emulation with a 640x480 VGA output signal, generated from a 80x30 characters text screen buffer, useful for debugging and terminal applications. It uses standard VGA fonts, with 8x16 pixels per character. The default font uses code page 437, which is used to initialize the RAM and which can be changed with a constructor argument. 4 | 5 | The RAM initialization also contains a test image. If all VGA signals are wired correctly, it should look like this on a real monitor, without any initialization by a CPU: 6 | 7 | ![Screenshot with init file](screenshot.jpg "Screenshot") 8 | 9 | The VGA standard requires a 25.175 MHz clock with +/- 0.5% accuracy (which you have to provide with a clock domain called `vga`), for a framerate of 60 Hz, but most displays don't have problems with 25 MHz. The VGA clock is independent of the system clock and can be higher or lower. Internally it uses dual-port block RAM for the text and font. 10 | 11 | You can specify the text initialization and the font file with the . If an empty string is passed, it is initialized with 0. The VGA palette is hard-coded in the terminal entity and is the same as the VGA palette in 16 color text mode. 12 | 13 | The VGA output signals are 24 bit true color, but you don't need to connect all bits. Example usage with only 6 bits per color component: 14 | 15 | ``` 16 | from litevideo.terminal.core import Terminal 17 | . 18 | . 19 | . 20 | # VGA clock domain 21 | self.clock_domains.cd_vga = ClockDomain() 22 | self.comb += self.cd_vga.clk.eq(clk_outs[2]) 23 | 24 | # Create VGA terminal 25 | self.mem_map["terminal"] = 0x30000000 26 | self.submodules.terminal = terminal = Terminal() 27 | self.add_wb_slave(mem_decoder(self.mem_map["terminal"]), self.terminal.bus) 28 | self.add_memory_region("terminal", self.mem_map["terminal"], 0x10000) 29 | 30 | # Connect VGA pins 31 | vga = platform.request("vga", 0) 32 | self.comb += [ 33 | vga.vsync.eq(terminal.vsync), 34 | vga.hsync.eq(terminal.hsync), 35 | vga.red.eq(terminal.red[2:8]), 36 | vga.green.eq(terminal.green[2:8]), 37 | vga.blue.eq(terminal.blue[2:8]) 38 | ] 39 | . 40 | . 41 | . 42 | ``` 43 | 44 | For the VGA output, a passive R2R network can be used for the DACs for the RGB color signals. The output above was created with the MAX1000 board and this circuit: 45 | 46 | ![circuit diagram](circuit.png "circuit diagram") 47 | 48 | For a full example with this board and circuit see [here](https://github.com/FrankBuss/NetHack/blob/NetHack-3.6/fpga/litex/targets/max1000.py). 49 | 50 | This also creates a memory mapped region at 0x30000000 for a CPU core, which can be accessed at 4 byte aligned addresses with 32 bit read/write accesses, but only the lowest byte is used (needs less logic compared to a full memory interface with uint8, uint16 and uint32 access). C example program how to use it: 51 | 52 | ``` 53 | typedef unsigned int uint32_t; 54 | 55 | int main(void) { 56 | int d = 0; 57 | while (1) { 58 | volatile uint32_t* vga = (volatile uint32_t*) (0x30000000 | 0x80000000); 59 | for (int y = 0; y < 30; y++) { 60 | for (int x = 0; x < 80; x++) { 61 | vga[0] = d++; 62 | vga[1] = 0x40; 63 | vga += 2; 64 | } 65 | } 66 | } 67 | return 0; 68 | } 69 | 70 | ``` 71 | 72 | This increments all characters in the text buffer in an infinite loop, which are displayed as black characters on a red backgroud and looke like this: 73 | 74 | ![C test](ctest.jpg "C test") 75 | 76 | The memory organization is 4800 bytes for the text, line by line, first the character code, then the color code for each character, followed by 4096 bytes for the font. The color byte uses the lower nibble for the foreground color and the higher nibble for the background color. See [here](https://en.wikipedia.org/wiki/Video_Graphics_Array#Color_palette) for the palette. 77 | 78 | The font file is from [this](https://github.com/perillamint/dkbfnts/blob/master/fntcol16/) repository, which has also font files for other code pages. You can use any `.f16` file. If you don't want to initialze the RAM with a font or init text, you can create the object like this, which results in a black screen on startup: 79 | 80 | ``` 81 | Terminal(crg.cd_vga.clk, screen_init_filename='', font_filename='') 82 | ``` 83 | 84 | Copyright (c) 2019 Frank Buss , License: BSD. 85 | -------------------------------------------------------------------------------- /litevideo/csc/ycbcr2rgb.py: -------------------------------------------------------------------------------- 1 | # ycbcr2rgb 2 | 3 | from migen import * 4 | 5 | from litex.soc.interconnect.stream import * 6 | 7 | from litevideo.csc.common import * 8 | 9 | def ycbcr2rgb_coefs(dw, cw=None): 10 | ca = 0.1819 11 | cb = 0.0618 12 | cc = 0.5512 13 | cd = 0.6495 14 | xcoef_w = None if cw is None else cw-2 15 | return { 16 | "ca" : coef(ca, cw), 17 | "cb" : coef(cb, cw), 18 | "cc" : coef(cc, cw), 19 | "cd" : coef(cd, cw), 20 | "yoffset" : 2**(dw-4), 21 | "coffset" : 2**(dw-1), 22 | "ymax" : 2**dw-1, 23 | "cmax" : 2**dw-1, 24 | "ymin" : 0, 25 | "cmin" : 0, 26 | "acoef": coef(1/cd, xcoef_w), 27 | "bcoef": coef(-cb/(cc*(1-ca-cb)), xcoef_w), 28 | "ccoef": coef(-ca/(cd*(1-ca-cb)), xcoef_w), 29 | "dcoef": coef(1/cc, xcoef_w) 30 | } 31 | 32 | @CEInserter() 33 | class YCbCr2RGBDatapath(Module): 34 | latency = 4 35 | 36 | def __init__(self, ycbcr_w, rgb_w, coef_w): 37 | self.sink = sink = Record(ycbcr444_layout(ycbcr_w)) 38 | self.source = source = Record(rgb_layout(rgb_w)) 39 | 40 | # # # 41 | 42 | coefs = ycbcr2rgb_coefs(rgb_w, coef_w) 43 | 44 | # delay ycbcr signals 45 | ycbcr_delayed = [sink] 46 | for i in range(self.latency): 47 | ycbcr_n = Record(ycbcr444_layout(ycbcr_w)) 48 | for name in ["y", "cb", "cr"]: 49 | self.sync += getattr(ycbcr_n, name).eq(getattr(ycbcr_delayed[-1], name)) 50 | ycbcr_delayed.append(ycbcr_n) 51 | 52 | # Hardware implementation: 53 | # (Equation from XAPP931) 54 | # r = y - yoffset + (cr - coffset)*acoef 55 | # b = y - yoffset + (cb - coffset)*bcoef + (cr - coffset)*ccoef 56 | # g = y - yoffset + (cb - coffset)*dcoef 57 | 58 | # stage 1 59 | # (cr - coffset) & (cr - coffset) 60 | cb_minus_coffset = Signal((ycbcr_w + 1, True)) 61 | cr_minus_coffset = Signal((ycbcr_w + 1, True)) 62 | self.sync += [ 63 | cb_minus_coffset.eq(sink.cb - coefs["coffset"]), 64 | cr_minus_coffset.eq(sink.cr - coefs["coffset"]) 65 | ] 66 | 67 | # stage 2 68 | # (y - yoffset) 69 | # (cr - coffset)*acoef 70 | # (cb - coffset)*bcoef 71 | # (cr - coffset)*ccoef 72 | # (cb - coffset)*dcoef 73 | y_minus_yoffset = Signal((ycbcr_w + 1, True)) 74 | cr_minus_coffset_mult_acoef = Signal((ycbcr_w + coef_w + 4, True)) 75 | cb_minus_coffset_mult_bcoef = Signal((ycbcr_w + coef_w + 4, True)) 76 | cr_minus_coffset_mult_ccoef = Signal((ycbcr_w + coef_w + 4, True)) 77 | cb_minus_coffset_mult_dcoef = Signal((ycbcr_w + coef_w + 4, True)) 78 | self.sync += [ 79 | y_minus_yoffset.eq(ycbcr_delayed[1].y - coefs["yoffset"]), 80 | cr_minus_coffset_mult_acoef.eq(cr_minus_coffset * coefs["acoef"]), 81 | cb_minus_coffset_mult_bcoef.eq(cb_minus_coffset * coefs["bcoef"]), 82 | cr_minus_coffset_mult_ccoef.eq(cr_minus_coffset * coefs["ccoef"]), 83 | cb_minus_coffset_mult_dcoef.eq(cb_minus_coffset * coefs["dcoef"]) 84 | ] 85 | 86 | # stage 3 87 | # line addition for all component 88 | r = Signal((ycbcr_w + 4, True)) 89 | g = Signal((ycbcr_w + 4, True)) 90 | b = Signal((ycbcr_w + 4, True)) 91 | self.sync += [ 92 | r.eq(y_minus_yoffset + cr_minus_coffset_mult_acoef[coef_w-2:]), 93 | g.eq(y_minus_yoffset + cb_minus_coffset_mult_bcoef[coef_w-2:] + 94 | cr_minus_coffset_mult_ccoef[coef_w-2:]), 95 | b.eq(y_minus_yoffset + cb_minus_coffset_mult_dcoef[coef_w-2:]) 96 | ] 97 | 98 | # stage 4 99 | # saturate 100 | self.sync += [ 101 | saturate(r, source.r, 0, 2**rgb_w-1), 102 | saturate(g, source.g, 0, 2**rgb_w-1), 103 | saturate(b, source.b, 0, 2**rgb_w-1) 104 | ] 105 | 106 | 107 | class YCbCr2RGB(PipelinedActor, Module): 108 | def __init__(self, ycbcr_w=8, rgb_w=8, coef_w=8): 109 | self.sink = sink = stream.Endpoint(EndpointDescription(ycbcr444_layout(ycbcr_w))) 110 | self.source = source = stream.Endpoint(EndpointDescription(rgb_layout(rgb_w))) 111 | 112 | # # # 113 | 114 | self.submodules.datapath = YCbCr2RGBDatapath(ycbcr_w, rgb_w, coef_w) 115 | PipelinedActor.__init__(self, self.datapath.latency) 116 | self.comb += self.datapath.ce.eq(self.pipe_ce) 117 | for name in ["y", "cb", "cr"]: 118 | self.comb += getattr(self.datapath.sink, name).eq(getattr(sink, name)) 119 | for name in ["r", "g", "b"]: 120 | self.comb += getattr(source, name).eq(getattr(self.datapath.source, name)) 121 | -------------------------------------------------------------------------------- /litevideo/csc/rgb2ycbcr.py: -------------------------------------------------------------------------------- 1 | # rgb2ycbcr 2 | 3 | from migen import * 4 | 5 | from litex.soc.interconnect.stream import * 6 | 7 | from litevideo.csc.common import * 8 | 9 | 10 | def rgb2ycbcr_coefs(dw, cw=None): 11 | return { 12 | "ca" : coef(0.1819, cw), 13 | "cb" : coef(0.0618, cw), 14 | "cc" : coef(0.5512, cw), 15 | "cd" : coef(0.6495, cw), 16 | "yoffset" : 2**(dw-4), 17 | "coffset" : 2**(dw-1), 18 | "ymax" : 2**dw-1, 19 | "cmax" : 2**dw-1, 20 | "ymin" : 0, 21 | "cmin" : 0 22 | } 23 | 24 | 25 | @CEInserter() 26 | class RGB2YCbCrDatapath(Module): 27 | latency = 8 28 | 29 | def __init__(self, rgb_w, ycbcr_w, coef_w): 30 | self.sink = sink = Record(rgb_layout(rgb_w)) 31 | self.source = source = Record(ycbcr444_layout(ycbcr_w)) 32 | 33 | # # # 34 | 35 | coefs = rgb2ycbcr_coefs(ycbcr_w, coef_w) 36 | 37 | # delay rgb signals 38 | rgb_delayed = [sink] 39 | for i in range(self.latency): 40 | rgb_n = Record(rgb_layout(rgb_w)) 41 | for name in ["r", "g", "b"]: 42 | self.sync += getattr(rgb_n, name).eq(getattr(rgb_delayed[-1], name)) 43 | rgb_delayed.append(rgb_n) 44 | 45 | # Hardware implementation: 46 | # (Equation from XAPP930) 47 | # y = ca*(r-g) + g + cb*(b-g) + yoffset 48 | # cb = cc*(r-y) + coffset 49 | # cr = cd*(b-y) + coffset 50 | 51 | # stage 1 52 | # (r-g) & (b-g) 53 | r_minus_g = Signal((rgb_w + 1, True)) 54 | b_minus_g = Signal((rgb_w + 1, True)) 55 | self.sync += [ 56 | r_minus_g.eq(sink.r - sink.g), 57 | b_minus_g.eq(sink.b - sink.g) 58 | ] 59 | 60 | # stage 2 61 | # ca*(r-g) & cb*(b-g) 62 | ca_mult_rg = Signal((rgb_w + coef_w + 1, True)) 63 | cb_mult_bg = Signal((rgb_w + coef_w + 1, True)) 64 | self.sync += [ 65 | ca_mult_rg.eq(r_minus_g * coefs["ca"]), 66 | cb_mult_bg.eq(b_minus_g * coefs["cb"]) 67 | ] 68 | 69 | # stage 3 70 | # ca*(r-g) + cb*(b-g) 71 | carg_plus_cbbg = Signal((rgb_w + coef_w + 9, True)) # XXX 72 | self.sync += [ 73 | carg_plus_cbbg.eq(ca_mult_rg + cb_mult_bg) 74 | ] 75 | 76 | # stage 4 77 | # yraw = ca*(r-g) + cb*(b-g) + g 78 | yraw = Signal((rgb_w + 3, True)) 79 | self.sync += [ 80 | yraw.eq(carg_plus_cbbg[coef_w:] + rgb_delayed[3].g) 81 | ] 82 | 83 | # stage 5 84 | # r - yraw 85 | # b - yraw 86 | b_minus_yraw = Signal((rgb_w + 4, True)) 87 | r_minus_yraw = Signal((rgb_w + 4, True)) 88 | yraw_r0 = Signal((rgb_w + 3, True)) 89 | self.sync += [ 90 | b_minus_yraw.eq(rgb_delayed[4].b - yraw), 91 | r_minus_yraw.eq(rgb_delayed[4].r - yraw), 92 | yraw_r0.eq(yraw) 93 | ] 94 | 95 | # stage 6 96 | # cc*yraw 97 | # cd*yraw 98 | cc_mult_ryraw = Signal((rgb_w + coef_w + 4, True)) 99 | cd_mult_byraw = Signal((rgb_w + coef_w + 4, True)) 100 | yraw_r1 = Signal((rgb_w + 3, True)) 101 | self.sync += [ 102 | cc_mult_ryraw.eq(b_minus_yraw * coefs["cc"]), 103 | cd_mult_byraw.eq(r_minus_yraw * coefs["cd"]), 104 | yraw_r1.eq(yraw_r0) 105 | ] 106 | 107 | # stage 7 108 | # y = (yraw + yoffset) 109 | # cb = (cc*(r - yraw) + coffset) 110 | # cr = (cd*(b - yraw) + coffset) 111 | y = Signal((rgb_w + 3, True)) 112 | cb = Signal((rgb_w + 4, True)) 113 | cr = Signal((rgb_w + 4, True)) 114 | self.sync += [ 115 | y.eq(yraw_r1 + coefs["yoffset"]), 116 | cb.eq(cc_mult_ryraw[coef_w:] + coefs["coffset"]), 117 | cr.eq(cd_mult_byraw[coef_w:] + coefs["coffset"]) 118 | ] 119 | 120 | # stage 8 121 | # saturate 122 | self.sync += [ 123 | saturate(y, source.y, coefs["ymin"], coefs["ymax"]), 124 | saturate(cb, source.cb, coefs["cmin"], coefs["cmax"]), 125 | saturate(cr, source.cr, coefs["cmin"], coefs["cmax"]) 126 | ] 127 | 128 | 129 | class RGB2YCbCr(PipelinedActor, Module): 130 | def __init__(self, rgb_w=8, ycbcr_w=8, coef_w=8): 131 | self.sink = sink = stream.Endpoint(EndpointDescription(rgb_layout(rgb_w))) 132 | self.source = source = stream.Endpoint(EndpointDescription(ycbcr444_layout(ycbcr_w))) 133 | 134 | # # # 135 | 136 | self.submodules.datapath = RGB2YCbCrDatapath(rgb_w, ycbcr_w, coef_w) 137 | PipelinedActor.__init__(self, self.datapath.latency) 138 | self.comb += self.datapath.ce.eq(self.pipe_ce) 139 | for name in ["r", "g", "b"]: 140 | self.comb += getattr(self.datapath.sink, name).eq(getattr(sink, name)) 141 | for name in ["y", "cb", "cr"]: 142 | self.comb += getattr(source, name).eq(getattr(self.datapath.source, name)) 143 | -------------------------------------------------------------------------------- /litevideo/csc/rgb2rgb16f.py: -------------------------------------------------------------------------------- 1 | # rgb2rgb16f 2 | 3 | from migen import * 4 | 5 | from litex.soc.interconnect.stream import * 6 | 7 | from litevideo.csc.common import * 8 | from litevideo.float_arithmetic.common import LeadOne 9 | 10 | 11 | def lookup_table(pix_val): 12 | ''' 13 | Contents of lut list generated using int2float functions from 14 | litex.csc.test.common 15 | ''' 16 | lut = [ 17 | 0, 7168, 8192, 8704, 9216, 9472, 9728, 9984, 18 | 10240, 10368, 10496, 10624, 10752, 10880, 11008, 11136, 19 | 11264, 11328, 11392, 11456, 11520, 11584, 11648, 11712, 20 | 11776, 11840, 11904, 11968, 12032, 12096, 12160, 12224, 21 | 12288, 12320, 12352, 12384, 12416, 12448, 12480, 12512, 22 | 12544, 12576, 12608, 12640, 12672, 12704, 12736, 12768, 23 | 12800, 12832, 12864, 12896, 12928, 12960, 12992, 13024, 24 | 13056, 13088, 13120, 13152, 13184, 13216, 13248, 13280, 25 | 13312, 13328, 13344, 13360, 13376, 13392, 13408, 13424, 26 | 13440, 13456, 13472, 13488, 13504, 13520, 13536, 13552, 27 | 13568, 13584, 13600, 13616, 13632, 13648, 13664, 13680, 28 | 13696, 13712, 13728, 13744, 13760, 13776, 13792, 13808, 29 | 13824, 13840, 13856, 13872, 13888, 13904, 13920, 13936, 30 | 13952, 13968, 13984, 14000, 14016, 14032, 14048, 14064, 31 | 14080, 14096, 14112, 14128, 14144, 14160, 14176, 14192, 32 | 14208, 14224, 14240, 14256, 14272, 14288, 14304, 14320, 33 | 14336, 14344, 14352, 14360, 14368, 14376, 14384, 14392, 34 | 14400, 14408, 14416, 14424, 14432, 14440, 14448, 14456, 35 | 14464, 14472, 14480, 14488, 14496, 14504, 14512, 14520, 36 | 14528, 14536, 14544, 14552, 14560, 14568, 14576, 14584, 37 | 14592, 14600, 14608, 14616, 14624, 14632, 14640, 14648, 38 | 14656, 14664, 14672, 14680, 14688, 14696, 14704, 14712, 39 | 14720, 14728, 14736, 14744, 14752, 14760, 14768, 14776, 40 | 14784, 14792, 14800, 14808, 14816, 14824, 14832, 14840, 41 | 14848, 14856, 14864, 14872, 14880, 14888, 14896, 14904, 42 | 14912, 14920, 14928, 14936, 14944, 14952, 14960, 14968, 43 | 14976, 14984, 14992, 15000, 15008, 15016, 15024, 15032, 44 | 15040, 15048, 15056, 15064, 15072, 15080, 15088, 15096, 45 | 15104, 15112, 15120, 15128, 15136, 15144, 15152, 15160, 46 | 15168, 15176, 15184, 15192, 15200, 15208, 15216, 15224, 47 | 15232, 15240, 15248, 15256, 15264, 15272, 15280, 15288, 48 | 15296, 15304, 15312, 15320, 15328, 15336, 15344, 15352 49 | ] 50 | return lut[pix_val] 51 | 52 | 53 | @CEInserter() 54 | class PIX2PIXFLUT(Module): 55 | """ 56 | Converts a 8 bit unsigned int represented by a pixel in 57 | the range [0-255] to a 16 bit half precision floating point 58 | pix_number defined in the range [0-1], using a look table 59 | """ 60 | latency = 1 61 | def __init__(self, pix_w, pixf_w): 62 | self.sink = sink = Record(pix_layout(pix_w)) 63 | self.source = source = Record(pixf_layout(pixf_w)) 64 | 65 | # # # 66 | 67 | # delay pix signal 68 | pix_delayed = [sink] 69 | for i in range(self.latency): 70 | pix_n = Record(pix_layout(pix_w)) 71 | self.sync += getattr(pix_n, "pix").eq(getattr(pix_delayed[-1], "pix")) 72 | pix_delayed.append(pix_n) 73 | 74 | # Hardware implementation: 75 | 76 | # Stage 1 77 | for j in range(256): 78 | self.sync += If(sink.pix == j, source.pixf.eq(lookup_table(j))) # FIXME (use case or memory) 79 | 80 | @CEInserter() 81 | class PIX2PIXFDatapath(Module): 82 | """ 83 | Converts a 8 bit unsigned int represented by a pixel in 84 | the range [0-255] to a 16 bit half precision floating point 85 | pix_number defined in the range [0-1] 86 | """ 87 | latency = 2 88 | def __init__(self, pix_w, pixf_w): 89 | self.sink = sink = Record(pix_layout(pix_w)) 90 | self.source = source = Record(pixf_layout(pixf_w)) 91 | 92 | # # # 93 | 94 | # delay pix signal 95 | pix_delayed = [sink] 96 | for i in range(self.latency): 97 | pix_n = Record(pix_layout(pix_w)) 98 | self.sync += getattr(pix_n, "pix").eq(getattr(pix_delayed[-1], "pix")) 99 | pix_delayed.append(pix_n) 100 | 101 | # Hardware implementation: 102 | 103 | # Stage 1 104 | # Leading one detector 105 | 106 | lshift = Signal(4) 107 | frac_val = Signal(10) 108 | 109 | self.submodules.l1 = LeadOne(12) 110 | self.comb += self.l1.datai.eq(sink.pix) 111 | 112 | self.sync += [ 113 | lshift.eq(self.l1.leadone), 114 | frac_val[3:].eq(sink.pix[:7]), 115 | frac_val[:3].eq(0) 116 | ] 117 | 118 | # Stage 2 119 | # Adjust frac and exp components as per lshift 120 | # Pack in 16bit float 121 | 122 | self.sync += [ 123 | source.pixf[:10].eq(frac_val << lshift), 124 | source.pixf[10:15].eq(15 - 1 - lshift), 125 | source.pixf[15].eq(1) 126 | ] 127 | 128 | class RGB2RGB16f(PipelinedActor, Module): 129 | def __init__(self, rgb_w=8, rgb16f_w=16): 130 | self.sink = sink = stream.Endpoint(EndpointDescription(rgb_layout(rgb_w))) 131 | self.source = source = stream.Endpoint(EndpointDescription(rgb16f_layout(rgb16f_w))) 132 | 133 | # # # 134 | 135 | for name in ["r", "g", "b"]: 136 | self.submodules.datapath = PIX2PIXFDatapath(rgb_w, rgb16f_w) 137 | PipelinedActor.__init__(self, self.datapath.latency) # FIXME 138 | self.comb += self.datapath.ce.eq(self.pipe_ce) 139 | self.comb += getattr(self.datapath.sink, "pix").eq(getattr(sink, name)) 140 | self.comb += getattr(source, name + "f").eq(getattr(self.datapath.source, "pixf")) 141 | -------------------------------------------------------------------------------- /litevideo/input/dma.py: -------------------------------------------------------------------------------- 1 | from migen import * 2 | from migen.genlib.fsm import FSM, NextState 3 | 4 | from litex.soc.interconnect.csr import * 5 | from litex.soc.interconnect.csr_eventmanager import * 6 | from litex.soc.interconnect import stream 7 | 8 | from litedram.frontend.dma import LiteDRAMDMAWriter 9 | 10 | # Slot status: EMPTY=0 LOADED=1 PENDING=2 11 | class _Slot(Module, AutoCSR): 12 | def __init__(self, addr_bits, alignment_bits): 13 | self.ev_source = EventSourceLevel() 14 | self.address = Signal(addr_bits) 15 | self.address_reached = Signal(addr_bits) 16 | self.address_valid = Signal() 17 | self.address_done = Signal() 18 | 19 | self._status = CSRStorage(2, write_from_dev=True) 20 | self._address = CSRStorage(addr_bits + alignment_bits, write_from_dev=True) 21 | 22 | # # # 23 | 24 | self.comb += [ 25 | self.address.eq(self._address.storage[alignment_bits:]), 26 | self.address_valid.eq(self._status.storage[0]), 27 | self._status.dat_w.eq(2), 28 | self._status.we.eq(self.address_done), 29 | self._address.dat_w[alignment_bits:].eq(self.address_reached), 30 | self._address.we.eq(self.address_done), 31 | self.ev_source.trigger.eq(self._status.storage[1]) 32 | ] 33 | 34 | 35 | class _SlotArray(Module, AutoCSR): 36 | def __init__(self, nslots, addr_bits, alignment_bits): 37 | self.submodules.ev = EventManager() 38 | self.address = Signal(addr_bits) 39 | self.address_reached = Signal(addr_bits) 40 | self.address_valid = Signal() 41 | self.address_done = Signal() 42 | 43 | # # # 44 | 45 | slots = [_Slot(addr_bits, alignment_bits) for i in range(nslots)] 46 | for n, slot in enumerate(slots): 47 | setattr(self.submodules, "slot"+str(n), slot) 48 | setattr(self.ev, "slot"+str(n), slot.ev_source) 49 | self.ev.finalize() 50 | 51 | change_slot = Signal() 52 | current_slot = Signal(max=nslots) 53 | self.sync += If(change_slot, [If(slot.address_valid, current_slot.eq(n)) 54 | for n, slot in reversed(list(enumerate(slots)))]) 55 | self.comb += change_slot.eq(~self.address_valid | self.address_done) 56 | 57 | self.comb += [ 58 | self.address.eq(Array(slot.address for slot in slots)[current_slot]), 59 | self.address_valid.eq(Array(slot.address_valid for slot in slots)[current_slot]) 60 | ] 61 | self.comb += [slot.address_reached.eq(self.address_reached) for slot in slots] 62 | self.comb += [slot.address_done.eq(self.address_done & (current_slot == n)) 63 | for n, slot in enumerate(slots)] 64 | 65 | 66 | class DMA(Module): 67 | def __init__(self, dram_port, nslots): 68 | bus_aw = dram_port.aw 69 | bus_dw = dram_port.dw 70 | alignment_bits = bits_for(bus_dw//8) - 1 71 | 72 | fifo_word_width = bus_dw 73 | self.frame = stream.Endpoint([("sof", 1), ("pixels", fifo_word_width)]) 74 | self._frame_size = CSRStorage(bus_aw + alignment_bits) 75 | self.submodules._slot_array = _SlotArray(nslots, bus_aw, alignment_bits) 76 | self.ev = self._slot_array.ev 77 | 78 | # # # 79 | 80 | # address generator + maximum memory word count to prevent DMA buffer 81 | # overrun 82 | reset_words = Signal() 83 | count_word = Signal() 84 | last_word = Signal() 85 | current_address = Signal(bus_aw) 86 | mwords_remaining = Signal(bus_aw) 87 | self.comb += [ 88 | self._slot_array.address_reached.eq(current_address), 89 | last_word.eq(mwords_remaining == 1) 90 | ] 91 | self.sync += [ 92 | If(reset_words, 93 | current_address.eq(self._slot_array.address), 94 | mwords_remaining.eq(self._frame_size.storage[alignment_bits:]) 95 | ).Elif(count_word, 96 | current_address.eq(current_address + 1), 97 | mwords_remaining.eq(mwords_remaining - 1) 98 | ) 99 | ] 100 | 101 | memory_word = Signal(bus_dw) 102 | pixbits = [] 103 | for i in range(bus_dw//16): 104 | pixbits.append(self.frame.pixels) 105 | self.comb += memory_word.eq(Cat(*pixbits)) 106 | 107 | # bus accessor 108 | self.submodules._bus_accessor = LiteDRAMDMAWriter(dram_port) 109 | self.comb += [ 110 | self._bus_accessor.sink.address.eq(current_address), 111 | self._bus_accessor.sink.data.eq(memory_word) 112 | ] 113 | 114 | # control FSM 115 | fsm = FSM() 116 | self.submodules += fsm 117 | 118 | fsm.act("WAIT_SOF", 119 | reset_words.eq(1), 120 | self.frame.ready.eq(~self._slot_array.address_valid | 121 | ~self.frame.sof), 122 | If(self._slot_array.address_valid & 123 | self.frame.sof & 124 | self.frame.valid, 125 | NextState("TRANSFER_PIXELS") 126 | ) 127 | ) 128 | fsm.act("TRANSFER_PIXELS", 129 | self.frame.ready.eq(self._bus_accessor.sink.ready), 130 | If(self.frame.valid, 131 | self._bus_accessor.sink.valid.eq(1), 132 | If(self._bus_accessor.sink.ready, 133 | count_word.eq(1), 134 | If(last_word, 135 | NextState("EOF") 136 | ) 137 | ) 138 | ) 139 | ) 140 | fsm.act("EOF", 141 | If(~dram_port.wdata.valid, 142 | self._slot_array.address_done.eq(1), 143 | NextState("WAIT_SOF") 144 | ) 145 | ) 146 | 147 | def get_csrs(self): 148 | return [self._frame_size] + self._slot_array.get_csrs() 149 | -------------------------------------------------------------------------------- /litevideo/float_arithmetic/floatmult.py: -------------------------------------------------------------------------------- 1 | ''' 2 | FloatMultDatapath class: Multiply two floating point numbers a and b, returns 3 | their output c in the same float16 format. 4 | 5 | FloatMult class: Use the FloatMultDatapath above and generates a modules 6 | implemented using five stage pipeline. 7 | ''' 8 | 9 | from migen import * 10 | 11 | from litex.soc.interconnect.stream import * 12 | from litex.soc.interconnect.csr import * 13 | 14 | from litevideo.float_arithmetic.common import * 15 | 16 | 17 | @CEInserter() 18 | class FloatMultDatapath(Module): 19 | """ 20 | This adds a floating point multiplication unit. 21 | Inputs: in1 and in2 22 | Output: out 23 | Implemented as a 5 stage pipeline, design is based on float16 design doc. 24 | Google Docs Link: https://goo.gl/Rvx2B7 25 | """ 26 | latency = 5 27 | def __init__(self, dw): 28 | self.sink = sink = Record(in_layout(dw)) 29 | self.source = source = Record(out_layout(dw)) 30 | 31 | # delay input a/b signals 32 | in_delayed = [sink] 33 | for i in range(self.latency): 34 | in_n = Record(in_layout(dw)) 35 | for name in ["in1", "in2"]: 36 | self.sync += getattr(in_n, name).eq(getattr(in_delayed[-1], name)) 37 | in_delayed.append(in_n) 38 | 39 | # stage 1 40 | # Unpack 41 | # Look for special cases 42 | 43 | in1_frac = Signal(10) 44 | in2_frac = Signal(10) 45 | in1_mant = Signal(11) 46 | in2_mant = Signal(11) 47 | 48 | in1_exp = Signal(5) 49 | in2_exp = Signal(5) 50 | in1_exp1 = Signal(5) 51 | in2_exp1 = Signal(5) 52 | 53 | in1_sign = Signal() 54 | in2_sign = Signal() 55 | 56 | out_status1 = Signal(2) 57 | # 00-0 Zero 58 | # 01-1 Inf 59 | # 10-2 Nan 60 | # 11-3 Normal 61 | 62 | self.comb += [ 63 | in1_frac.eq(sink.in1[:10]), 64 | in2_frac.eq(sink.in2[:10]), 65 | 66 | in1_exp.eq(sink.in1[10:15]), 67 | in2_exp.eq(sink.in2[10:15]), 68 | 69 | in1_sign.eq(sink.in1[15]), 70 | in2_sign.eq(sink.in2[15]) 71 | ] 72 | 73 | self.sync += [ 74 | If(in1_exp == 0, 75 | in1_mant.eq(Cat(in1_frac, 0)), 76 | in1_exp1.eq(in1_exp + 1) 77 | ).Else( 78 | in1_mant.eq(Cat(in1_frac, 1)), 79 | in1_exp1.eq(in1_exp) 80 | ), 81 | 82 | If(in2_exp == 0, 83 | in2_mant.eq(Cat(in2_frac, 0)), 84 | in2_exp1.eq(in2_exp + 1) 85 | ).Else( 86 | in2_mant.eq(Cat(in2_frac, 1)), 87 | in2_exp1.eq(in2_exp) 88 | ), 89 | 90 | If(((in1_exp == 0) & (in1_frac == 0)), 91 | out_status1.eq(0) 92 | ).Elif(((in2_exp == 0) & (in2_frac == 0)), 93 | out_status1.eq(0) 94 | ).Else( 95 | out_status1.eq(3) 96 | ) 97 | ] 98 | 99 | # stage 2 100 | # Multiply fractions and add exponents 101 | out_mult = Signal(22) 102 | out_exp = Signal((7, True)) 103 | out_status2 = Signal(2) 104 | 105 | self.sync += [ 106 | out_mult.eq(in1_mant * in2_mant), 107 | out_exp.eq(in1_exp1 + in2_exp1 - 15), 108 | out_status2.eq(out_status1), 109 | ] 110 | 111 | # stage 3 112 | # Leading one detector 113 | one_ptr = Signal(5) 114 | out_status3 = Signal(2) 115 | out_mult3 = Signal(22) 116 | out_exp3 = Signal((7, True)) 117 | 118 | lead_one_ptr = Signal(5) 119 | self.submodules.leadone = LeadOne(22) 120 | self.comb += [ 121 | self.leadone.datai.eq(out_mult), 122 | lead_one_ptr.eq(self.leadone.leadone) 123 | ] 124 | 125 | self.sync += [ 126 | out_status3.eq(out_status2), 127 | out_mult3.eq(out_mult), 128 | out_exp3.eq(out_exp), 129 | one_ptr.eq(lead_one_ptr) 130 | ] 131 | 132 | # stage 4 133 | # Shift and Adjust 134 | out_exp_adjust = Signal((7, True)) 135 | out_mult_shift = Signal(22) 136 | out_status4 = Signal(2) 137 | 138 | self.sync += [ 139 | out_status4.eq(out_status3), 140 | If((out_exp3 - one_ptr) < 1, 141 | out_exp_adjust.eq(0), 142 | out_mult_shift.eq(((out_mult >> (0 - out_exp3)) << 1)) 143 | ).Else( 144 | out_exp_adjust.eq(out_exp3 + 1 - one_ptr), 145 | out_mult_shift.eq(out_mult << one_ptr + 1) 146 | ) 147 | ] 148 | 149 | # stage 5 150 | # Normalize and pack 151 | self.sync += [ 152 | If(out_status4 == 0, 153 | source.out.eq(0) 154 | ).Elif(out_status4 == 3, 155 | source.out.eq(Cat(out_mult_shift[12:], out_exp_adjust[:5],0)) 156 | ) 157 | ] 158 | 159 | class FloatMult(PipelinedActor, Module, AutoCSR): 160 | def __init__(self, dw=16): 161 | self.sink = sink = stream.Endpoint(EndpointDescription(in_layout(dw))) 162 | self.source = source = stream.Endpoint(EndpointDescription(out_layout(dw))) 163 | 164 | # # # 165 | 166 | self.submodules.datapath = FloatMultDatapath(dw) 167 | PipelinedActor.__init__(self, self.datapath.latency) 168 | self.comb += self.datapath.ce.eq(self.pipe_ce) 169 | for name in ["in1", "in2"]: 170 | self.comb += getattr(self.datapath.sink, name).eq(getattr(sink, name)) 171 | self.comb += getattr(source, "out").eq(getattr(self.datapath.source, "out")) 172 | 173 | # Comment this out when simulating (why?) 174 | 175 | # self._float_in1 = CSRStorage(dw) 176 | # self._float_in2 = CSRStorage(dw) 177 | # self._float_out = CSRStatus(dw) 178 | 179 | # self.comb += [ 180 | # getattr(sink, "in1").eq(self._float_in1.storage), 181 | # getattr(sink, "in2").eq(self._float_in2.storage), 182 | # self._float_out.status.eq(getattr(source, "out")) 183 | # ] 184 | -------------------------------------------------------------------------------- /litevideo/output/hdmi/s7.py: -------------------------------------------------------------------------------- 1 | from migen import * 2 | 3 | from litex.soc.interconnect import stream 4 | from litex.soc.interconnect.csr import * 5 | 6 | from litevideo.output.common import * 7 | from litevideo.output.hdmi.encoder import Encoder 8 | 9 | # Serializer and Clocking initial configurations come 10 | # from http://hamsterworks.co.nz/. 11 | 12 | class S7HDMIOutEncoderSerializer(Module): 13 | def __init__(self, pad_p, pad_n, bypass_encoder=False): 14 | if not bypass_encoder: 15 | self.submodules.encoder = ClockDomainsRenamer("pix")(Encoder()) 16 | self.d, self.c, self.de = self.encoder.d, self.encoder.c, self.encoder.de 17 | self.data = self.encoder.out 18 | else: 19 | self.data = Signal(10) 20 | 21 | # # # 22 | 23 | data = Signal(10) 24 | if hasattr(pad_p, "inverted"): 25 | self.comb += data.eq(~self.data) 26 | else: 27 | self.comb += data.eq(self.data) 28 | 29 | ce = Signal() 30 | self.sync.pix += ce.eq(~ResetSignal("pix")) 31 | 32 | shift = Signal(2) 33 | pad_se = Signal() 34 | 35 | # OSERDESE2 master 36 | self.specials += [ 37 | Instance("OSERDESE2", 38 | p_DATA_WIDTH=10, p_TRISTATE_WIDTH=1, 39 | p_DATA_RATE_OQ="DDR", p_DATA_RATE_TQ="DDR", 40 | p_SERDES_MODE="MASTER", 41 | 42 | o_OQ=pad_se, 43 | i_OCE=ce, 44 | i_TCE=0, 45 | i_RST=ResetSignal("pix"), 46 | i_CLK=ClockSignal("pix5x"), i_CLKDIV=ClockSignal("pix"), 47 | i_D1=data[0], i_D2=data[1], 48 | i_D3=data[2], i_D4=data[3], 49 | i_D5=data[4], i_D6=data[5], 50 | i_D7=data[6], i_D8=data[7], 51 | 52 | i_SHIFTIN1=shift[0], i_SHIFTIN2=shift[1], 53 | #o_SHIFTOUT1=, o_SHIFTOUT2=, 54 | ), 55 | Instance("OSERDESE2", 56 | p_DATA_WIDTH=10, p_TRISTATE_WIDTH=1, 57 | p_DATA_RATE_OQ="DDR", p_DATA_RATE_TQ="DDR", 58 | p_SERDES_MODE="SLAVE", 59 | 60 | i_OCE=ce, 61 | i_TCE=0, 62 | i_RST=ResetSignal("pix"), 63 | i_CLK=ClockSignal("pix5x"), i_CLKDIV=ClockSignal("pix"), 64 | i_D1=0, i_D2=0, 65 | i_D3=data[8], i_D4=data[9], 66 | i_D5=0, i_D6=0, 67 | i_D7=0, i_D8=0, 68 | 69 | i_SHIFTIN1=0, i_SHIFTIN2=0, 70 | o_SHIFTOUT1=shift[0], o_SHIFTOUT2=shift[1] 71 | ), 72 | Instance("OBUFDS", i_I=pad_se, o_O=pad_p, o_OB=pad_n) 73 | ] 74 | 75 | 76 | # This assumes a 100MHz base clock 77 | class S7HDMIOutClocking(Module, AutoCSR): 78 | def __init__(self, pads, external_clocking): 79 | # TODO: implement external clocking 80 | self.clock_domains.cd_pix = ClockDomain("pix") 81 | self.clock_domains.cd_pix5x = ClockDomain("pix5x", reset_less=True) 82 | 83 | self._mmcm_reset = CSRStorage() 84 | self._mmcm_read = CSR() 85 | self._mmcm_write = CSR() 86 | self._mmcm_drdy = CSRStatus() 87 | self._mmcm_adr = CSRStorage(7) 88 | self._mmcm_dat_w = CSRStorage(16) 89 | self._mmcm_dat_r = CSRStatus(16) 90 | 91 | # # # 92 | 93 | mmcm_locked = Signal() 94 | mmcm_fb = Signal() 95 | mmcm_clk0 = Signal() 96 | mmcm_clk1 = Signal() 97 | mmcm_drdy = Signal() 98 | 99 | self.specials += [ 100 | Instance("MMCME2_ADV", 101 | p_BANDWIDTH="OPTIMIZED", 102 | i_RST=self._mmcm_reset.storage, o_LOCKED=mmcm_locked, 103 | 104 | # VCO 105 | p_REF_JITTER1=0.01, p_CLKIN1_PERIOD=10.0, 106 | p_CLKFBOUT_MULT_F=30.0, p_CLKFBOUT_PHASE=0.000, p_DIVCLK_DIVIDE=2, 107 | i_CLKIN1=ClockSignal("clk100"), i_CLKFBIN=mmcm_fb, o_CLKFBOUT=mmcm_fb, 108 | 109 | # CLK0 110 | p_CLKOUT0_DIVIDE_F=10.0, p_CLKOUT0_PHASE=0.000, o_CLKOUT0=mmcm_clk0, 111 | # CLK1 112 | p_CLKOUT1_DIVIDE=2, p_CLKOUT1_PHASE=0.000, o_CLKOUT1=mmcm_clk1, 113 | 114 | # DRP 115 | i_DCLK=ClockSignal(), 116 | i_DWE=self._mmcm_write.re, 117 | i_DEN=self._mmcm_read.re | self._mmcm_write.re, 118 | o_DRDY=mmcm_drdy, 119 | i_DADDR=self._mmcm_adr.storage, 120 | i_DI=self._mmcm_dat_w.storage, 121 | o_DO=self._mmcm_dat_r.status 122 | ), 123 | Instance("BUFG", i_I=mmcm_clk0, o_O=self.cd_pix.clk), 124 | Instance("BUFG", i_I=mmcm_clk1, o_O=self.cd_pix5x.clk) 125 | ] 126 | self.sync += [ 127 | If(self._mmcm_read.re | self._mmcm_write.re, 128 | self._mmcm_drdy.status.eq(0) 129 | ).Elif(mmcm_drdy, 130 | self._mmcm_drdy.status.eq(1) 131 | ) 132 | ] 133 | self.comb += self.cd_pix.rst.eq(~mmcm_locked) 134 | if hasattr(pads, "clk_p"): 135 | self.submodules.clk_gen = S7HDMIOutEncoderSerializer(pads.clk_p, pads.clk_n, bypass_encoder=True) 136 | self.comb += self.clk_gen.data.eq(Signal(10, reset=0b0000011111)) 137 | else: 138 | self.comb += pads.clk.eq(ClockSignal("pix")) # FIXME: use primitive (ODDR2?) 139 | 140 | 141 | class S7HDMIOutPHY(Module): 142 | def __init__(self, pads, mode): 143 | self.sink = sink = stream.Endpoint(phy_layout(mode)) 144 | 145 | # # # 146 | 147 | self.submodules.es0 = S7HDMIOutEncoderSerializer(pads.data0_p, pads.data0_n, mode == "raw") 148 | self.submodules.es1 = S7HDMIOutEncoderSerializer(pads.data1_p, pads.data1_n, mode == "raw") 149 | self.submodules.es2 = S7HDMIOutEncoderSerializer(pads.data2_p, pads.data2_n, mode == "raw") 150 | 151 | if mode == "raw": 152 | self.comb += [ 153 | sink.ready.eq(1), 154 | self.es0.data.eq(sink.c0), 155 | self.es1.data.eq(sink.c1), 156 | self.es2.data.eq(sink.c2) 157 | ] 158 | else: 159 | self.comb += [ 160 | sink.ready.eq(1), 161 | self.es0.d.eq(sink.b), 162 | self.es1.d.eq(sink.g), 163 | self.es2.d.eq(sink.r), 164 | self.es0.c.eq(Cat(sink.hsync, sink.vsync)), 165 | self.es1.c.eq(0), 166 | self.es2.c.eq(0), 167 | self.es0.de.eq(sink.de), 168 | self.es1.de.eq(sink.de), 169 | self.es2.de.eq(sink.de) 170 | ] 171 | -------------------------------------------------------------------------------- /litevideo/float_arithmetic/floatadd.py: -------------------------------------------------------------------------------- 1 | ''' 2 | FloatAddDatapath class: Add two floating point numbers in1 and in2, returns 3 | their output out in the same float16 format. 4 | 5 | FloatAdd class: Use the FloatAddDatapath above and generates a pipelined 6 | module implemented using five stage pipeline. 7 | ''' 8 | 9 | from migen import * 10 | 11 | from litex.soc.interconnect.stream import * 12 | from litex.soc.interconnect.csr import * 13 | 14 | from litevideo.float_arithmetic.common import * 15 | 16 | 17 | @CEInserter() 18 | class FloatAddDatapath(Module): 19 | latency = 5 20 | def __init__(self, dw): 21 | self.sink = sink = Record(in_layout(dw)) 22 | self.source = source = Record(out_layout(dw)) 23 | 24 | # delay input signals 25 | in_delayed = [sink] 26 | for i in range(self.latency): 27 | in_n = Record(in_layout(dw)) 28 | for name in ["in1", "in2"]: 29 | self.sync += getattr(in_n, name).eq(getattr(in_delayed[-1], name)) 30 | in_delayed.append(in_n) 31 | 32 | # Hardware implementation: 33 | 34 | # Stage 1 35 | # Unpack 36 | # Substract Exponents 37 | 38 | in1_frac = Signal(10) 39 | in2_frac = Signal(10) 40 | 41 | in1_mant = Signal(11) 42 | in2_mant = Signal(11) 43 | 44 | in1_exp = Signal(5) 45 | in2_exp = Signal(5) 46 | 47 | in1_minus_in2_exp = Signal((6, True)) 48 | 49 | in1_exp1 = Signal(5) 50 | in2_exp1 = Signal(5) 51 | 52 | in1_sign = Signal() 53 | in2_sign = Signal() 54 | 55 | 56 | out_status1 = Signal(2) 57 | # 00-0 Zero 58 | # 01-1 Inf 59 | # 10-2 Nan 60 | # 11-3 Normal 61 | 62 | in1_stage1 = Signal(16) 63 | in2_stage1 = Signal(16) 64 | 65 | self.comb += [ 66 | in1_frac.eq(sink.in1[:10]), 67 | in2_frac.eq(sink.in2[:10]), 68 | 69 | in1_exp.eq(sink.in1[10:15]), 70 | in2_exp.eq(sink.in2[10:15]), 71 | 72 | in1_sign.eq(sink.in1[15]), 73 | in2_sign.eq(sink.in1[15]) 74 | ] 75 | 76 | self.comb += [ 77 | If(in1_exp == 0, 78 | in1_mant.eq(Cat(sink.in1[:10], 0)), 79 | in1_exp1.eq(sink.in1[10:15] + 1) 80 | ).Else( 81 | in1_mant.eq(Cat(sink.in1[:10], 1)), 82 | in1_exp1.eq(sink.in1[10:15]) 83 | ), 84 | 85 | If(in2_exp == 0, 86 | in2_mant.eq(Cat(sink.in2[:10], 0)), 87 | in2_exp1.eq(sink.in2[10:15] + 1) 88 | ).Else( 89 | in2_mant.eq(Cat(sink.in2[:10], 1)), 90 | in2_exp1.eq(sink.in2[10:15]) 91 | ) 92 | ] 93 | 94 | in1_frac_stage1 = Signal(11) 95 | in2_frac_stage1 = Signal(11) 96 | in1_sign_stage1 = Signal(11) 97 | in2_sign_stage1 = Signal(11) 98 | in1_exp_stage1 = Signal(5) 99 | in2_exp_stage1 = Signal(5) 100 | 101 | self.sync += [ 102 | in1_minus_in2_exp.eq(in1_exp1 - in2_exp), 103 | in1_frac_stage1.eq(in1_mant), 104 | in2_frac_stage1.eq(in2_mant), 105 | in1_exp_stage1.eq(in1_exp1), 106 | in2_exp_stage1.eq(in2_exp1), 107 | in1_sign_stage1.eq(in1_sign), 108 | in2_sign_stage1.eq(in2_sign), 109 | out_status1.eq(3) 110 | ] 111 | 112 | # Stage 2 113 | # Adjust both the input fracs to common exponent 114 | in1_frac_stage2 = Signal(11) 115 | in2_frac_stage2 = Signal(11) 116 | in1_sign_stage2 = Signal(11) 117 | in2_sign_stage2 = Signal(11) 118 | in1_minus_in2_exp_stage2 = Signal(5) 119 | out_2 = Signal(16) 120 | 121 | self.sync += [ 122 | If(~in1_minus_in2_exp[5], 123 | in2_frac_stage2.eq(in2_frac_stage1 >> in1_minus_in2_exp), 124 | in1_frac_stage2.eq(in1_frac_stage1), 125 | in1_minus_in2_exp_stage2.eq(in1_exp_stage1) 126 | ).Else ( 127 | in1_frac_stage2.eq(in1_frac_stage1 >> (-1)*in1_minus_in2_exp), 128 | in1_minus_in2_exp_stage2.eq(in2_exp_stage1), 129 | in2_frac_stage2.eq(in2_frac_stage1) 130 | ), 131 | in1_sign_stage2.eq(in1_sign_stage1), 132 | in2_sign_stage2.eq(in2_sign_stage1) 133 | ] 134 | 135 | # Stage 3 136 | # Adder Unit 137 | in1_plus_in2_frac = Signal(12) 138 | in1_plus_in2_sign = Signal(1) 139 | in1_minus_in2_exp_stage3 = Signal(5) 140 | out_3 = Signal(16) 141 | 142 | self.sync += [ 143 | Cat(in1_plus_in2_frac, in1_plus_in2_sign).eq(in1_frac_stage2 + in2_frac_stage2), 144 | in1_minus_in2_exp_stage3.eq(in1_minus_in2_exp_stage2), 145 | out_3.eq(out_2) 146 | ] 147 | 148 | # Stage 4 149 | # Shift and Adjust 150 | leadone = Signal(4) 151 | self.submodules.l1 = LeadOne(12) 152 | self.comb += [ 153 | self.l1.datai.eq(in1_plus_in2_frac), 154 | leadone.eq(self.l1.leadone) 155 | ] 156 | out_sign_stage4 = Signal(1) 157 | out_frac_stage4 = Signal(12) 158 | out_exp_stage4 = Signal(5) 159 | out_4 = Signal(16) 160 | self.sync += [ 161 | out_frac_stage4.eq(in1_plus_in2_frac << (leadone)), 162 | out_exp_stage4.eq(in1_minus_in2_exp_stage3 - leadone + 1), 163 | out_sign_stage4.eq(in1_plus_in2_sign), 164 | out_4.eq(out_frac_stage4) 165 | ] 166 | 167 | # stage 5 168 | # Normalize and pack 169 | self.sync += [ 170 | source.out.eq(Cat(out_frac_stage4[1:11], out_exp_stage4, out_sign_stage4)) 171 | ] 172 | 173 | 174 | class FloatAdd(PipelinedActor, Module, AutoCSR): 175 | def __init__(self, dw=16): 176 | self.sink = sink = stream.Endpoint(EndpointDescription(in_layout(dw))) 177 | self.source = source = stream.Endpoint(EndpointDescription(out_layout(dw))) 178 | 179 | # # # 180 | 181 | self.submodules.datapath = FloatAddDatapath(dw) 182 | PipelinedActor.__init__(self, self.datapath.latency) 183 | self.comb += self.datapath.ce.eq(self.pipe_ce) 184 | for name in ["in1", "in2"]: 185 | self.comb += getattr(self.datapath.sink, name).eq(getattr(sink, name)) 186 | self.comb += getattr(source, "out").eq(getattr(self.datapath.source, "out")) 187 | 188 | # Comment this out when simulating (FIXME : why?) 189 | 190 | # self._float_in1 = CSRStorage(dw) 191 | # self._float_in2 = CSRStorage(dw) 192 | # self._float_out = CSRStatus(dw) 193 | 194 | # self.comb += [ 195 | # getattr(sink, "in1").eq(self._float_in1.storage), 196 | # getattr(sink, "in2").eq(self._float_in2.storage), 197 | # self._float_out.status.eq(getattr(source, "out")) 198 | # ] 199 | 200 | 201 | -------------------------------------------------------------------------------- /litevideo/input/__init__.py: -------------------------------------------------------------------------------- 1 | from migen import * 2 | 3 | from litex.soc.interconnect.csr import AutoCSR 4 | 5 | from litevideo.input.edid import EDID, _default_edid 6 | from litevideo.input.clocking import S6Clocking, S7Clocking 7 | from litevideo.input.datacapture import S6DataCapture, S7DataCapture 8 | from litevideo.input.charsync import CharSync 9 | from litevideo.input.wer import WER 10 | from litevideo.input.decoding import Decoding, DecodeTERC4 11 | from litevideo.input.chansync import ChanSync 12 | from litevideo.input.analysis import SyncPolarity, ResolutionDetection 13 | from litevideo.input.analysis import FrameExtraction 14 | from litevideo.input.dma import DMA 15 | 16 | from litex.soc.interconnect import stream 17 | from litevideo.input.common import channel_layout 18 | from litevideo.output.common import list_signals 19 | 20 | clocking_cls = { 21 | "xc6" : S6Clocking, 22 | "xc7" : S7Clocking, 23 | } 24 | 25 | datacapture_cls = { 26 | "xc6" : S6DataCapture, 27 | "xc7" : S7DataCapture 28 | } 29 | 30 | class TimingDelayChannel(Module): 31 | def __init__(self, latency): 32 | self.sink = Record(channel_layout) # inputs 33 | self.source = Record(channel_layout) # outputs 34 | 35 | # # # 36 | 37 | for name in list_signals(channel_layout): 38 | s = getattr(self.sink, name) 39 | for i in range(latency): 40 | next_s = Signal(len(s)) # without len(s), this makes only one-bit wide delay lines 41 | self.sync.pix += next_s.eq(s) 42 | s = next_s 43 | self.comb += getattr(self.source, name).eq(s) 44 | 45 | 46 | class HDMIIn(Module, AutoCSR): 47 | def __init__(self, pads, dram_port=None, n_dma_slots=2, fifo_depth=512, device="xc6", 48 | default_edid=_default_edid, clkin_freq=148.5e6, split_mmcm=False, mode="ycbcr422", hdmi=False): 49 | if hasattr(pads, "scl"): 50 | self.submodules.edid = EDID(pads, default_edid) 51 | self.submodules.clocking = clocking_cls[device](pads, clkin_freq, split_mmcm) 52 | 53 | for datan in range(3): 54 | name = "data" + str(datan) 55 | 56 | cap = datacapture_cls[device](getattr(pads, name + "_p"), 57 | getattr(pads, name + "_n")) 58 | setattr(self.submodules, name + "_cap", cap) 59 | if hasattr(cap, "serdesstrobe"): 60 | self.comb += cap.serdesstrobe.eq(self.clocking.serdesstrobe) 61 | 62 | charsync = CharSync() 63 | setattr(self.submodules, name + "_charsync", charsync) 64 | self.comb += charsync.raw_data.eq(cap.d) 65 | 66 | wer = WER() 67 | setattr(self.submodules, name + "_wer", wer) 68 | self.comb += wer.data.eq(charsync.data) 69 | 70 | decoding = Decoding() 71 | setattr(self.submodules, name + "_decod", decoding) 72 | self.comb += [ 73 | decoding.valid_i.eq(charsync.synced), 74 | decoding.input.eq(charsync.data) 75 | ] 76 | 77 | self.submodules.chansync = ChanSync() 78 | self.comb += [ 79 | self.chansync.valid_i.eq(self.data0_decod.valid_o & 80 | self.data1_decod.valid_o & 81 | self.data2_decod.valid_o), 82 | self.chansync.data_in0.eq(self.data0_decod.output), 83 | self.chansync.data_in1.eq(self.data1_decod.output), 84 | self.chansync.data_in2.eq(self.data2_decod.output) 85 | ] 86 | 87 | if hdmi: 88 | decode_terc4 = DecodeTERC4() 89 | self.submodules.decode_terc4 = ClockDomainsRenamer("pix")(decode_terc4) # rename so state machine is in pix domain, not default sys domain 90 | self.comb += [ 91 | self.decode_terc4.valid_i.eq(self.chansync.chan_synced), 92 | self.decode_terc4.data_in0.eq(self.chansync.data_out0), 93 | self.decode_terc4.data_in1.eq(self.chansync.data_out1), 94 | self.decode_terc4.data_in2.eq(self.chansync.data_out2), 95 | ] 96 | 97 | self.submodules.syncpol = SyncPolarity(hdmi) 98 | self.comb += self.syncpol.de_int.eq(self.decode_terc4.de_o) # manually wire up the fancy de signal in case hdmi is True 99 | 100 | # delay the rest of the signals to match the time it took to derive the fancy de signal 101 | #for datan in range(3): 102 | # name = "data" + str(datan) 103 | 104 | # timingdelay = TimingDelayChannel(1) 105 | # timingdelay = ClockDomainsRenamer("pix")(timingdelay) 106 | # setattr(self.submodules, name + "_timingdelay", timingdelay) 107 | # self.comb += timingdelay.sink.eq(getattr(self, "self.chansync.data_out" + str(datan))) # this code doesn't work. why??? 108 | self.submodules.data0_timingdelay = TimingDelayChannel(1) 109 | self.submodules.data1_timingdelay = TimingDelayChannel(1) 110 | self.submodules.data2_timingdelay = TimingDelayChannel(1) 111 | 112 | self.comb += [ 113 | self.data0_timingdelay.sink.eq(self.chansync.data_out0), 114 | self.data1_timingdelay.sink.eq(self.chansync.data_out1), 115 | self.data2_timingdelay.sink.eq(self.chansync.data_out2), 116 | ] 117 | self.comb += [ 118 | self.syncpol.data_in0.eq(self.data0_timingdelay.source), 119 | self.syncpol.data_in1.eq(self.data1_timingdelay.source), 120 | self.syncpol.data_in2.eq(self.data2_timingdelay.source), 121 | self.syncpol.valid_i.eq(self.chansync.chan_synced) # OK not to delay because it's a "once in a blue moon" transition that sorts itself out on the next VSYNC 122 | ] 123 | 124 | else: 125 | self.submodules.syncpol = SyncPolarity() 126 | self.comb += [ 127 | self.syncpol.valid_i.eq(self.chansync.chan_synced), 128 | self.syncpol.data_in0.eq(self.chansync.data_out0), 129 | self.syncpol.data_in1.eq(self.chansync.data_out1), 130 | self.syncpol.data_in2.eq(self.chansync.data_out2) 131 | ] 132 | 133 | self.submodules.resdetection = ResolutionDetection() 134 | self.comb += [ 135 | self.resdetection.valid_i.eq(self.syncpol.valid_o), 136 | self.resdetection.de.eq(self.syncpol.de), 137 | self.resdetection.vsync.eq(self.syncpol.vsync) 138 | ] 139 | 140 | if dram_port is not None: 141 | self.submodules.frame = FrameExtraction(dram_port.dw, fifo_depth, mode) 142 | self.comb += [ 143 | self.frame.valid_i.eq(self.syncpol.valid_o), 144 | self.frame.de.eq(self.syncpol.de), 145 | self.frame.vsync.eq(self.syncpol.vsync), 146 | self.frame.r.eq(self.syncpol.r), 147 | self.frame.g.eq(self.syncpol.g), 148 | self.frame.b.eq(self.syncpol.b) 149 | ] 150 | 151 | 152 | self.submodules.dma = DMA(dram_port, n_dma_slots) 153 | self.comb += self.frame.frame.connect(self.dma.frame) 154 | self.ev = self.dma.ev 155 | 156 | autocsr_exclude = {"ev"} 157 | -------------------------------------------------------------------------------- /litevideo/csc/test/common.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | 3 | import random 4 | from copy import deepcopy 5 | 6 | from migen import * 7 | 8 | from litex.soc.interconnect.stream import * 9 | 10 | class RAWImage: 11 | def __init__(self, coefs, filename=None, size=None): 12 | self.r = None 13 | self.g = None 14 | self.b = None 15 | 16 | self.y = None 17 | self.cb = None 18 | self.cr = None 19 | 20 | self.data = [] 21 | 22 | self.coefs = coefs 23 | self.size = size 24 | self.length = None 25 | 26 | if filename is not None: 27 | self.open(filename) 28 | 29 | 30 | def open(self, filename): 31 | img = Image.open(filename) 32 | if self.size is not None: 33 | img = img.resize((self.size, self.size), Image.ANTIALIAS) 34 | r, g, b = zip(*list(img.getdata())) 35 | self.set_rgb(r, g, b) 36 | 37 | 38 | def save(self, filename): 39 | img = Image.new("RGB" ,(self.size, self.size)) 40 | img.putdata(list(zip(self.r, self.g, self.b))) 41 | img.save(filename) 42 | 43 | 44 | def set_rgb(self, r, g, b): 45 | self.r = r 46 | self.g = g 47 | self.b = b 48 | self.length = len(r) 49 | 50 | 51 | def set_ycbcr(self, y, cb, cr): 52 | self.y = y 53 | self.cb = cb 54 | self.cr = cr 55 | self.length = len(y) 56 | 57 | 58 | def set_data(self, data): 59 | self.data = data 60 | 61 | 62 | def pack_rgb(self): 63 | self.data = [] 64 | for i in range(self.length): 65 | data = (self.r[i] & 0xff) << 16 66 | data |= (self.g[i] & 0xff) << 8 67 | data |= (self.b[i] & 0xff) << 0 68 | self.data.append(data) 69 | return self.data 70 | 71 | 72 | def pack_ycbcr(self): 73 | self.data = [] 74 | for i in range(self.length): 75 | data = (self.y[i] & 0xff) << 16 76 | data |= (self.cb[i] & 0xff) << 8 77 | data |= (self.cr[i] & 0xff) << 0 78 | self.data.append(data) 79 | return self.data 80 | 81 | def pack_rgb16f(self): 82 | self.data = [] 83 | for i in range(self.length): 84 | data = (self.rf[i] & 0xffff) << 32 85 | data |= (self.gf[i] & 0xffff) << 16 86 | data |= (self.bf[i] & 0xffff) << 0 87 | self.data.append(data) 88 | return self.data 89 | 90 | 91 | def unpack_rgb(self): 92 | self.r = [] 93 | self.g = [] 94 | self.b = [] 95 | for data in self.data: 96 | self.r.append((data >> 16) & 0xff) 97 | self.g.append((data >> 8) & 0xff) 98 | self.b.append((data >> 0) & 0xff) 99 | return self.r, self.g, self.b 100 | 101 | 102 | def unpack_ycbcr(self): 103 | self.y = [] 104 | self.cb = [] 105 | self.cr = [] 106 | for data in self.data: 107 | self.y.append((data >> 16) & 0xff) 108 | self.cb.append((data >> 8) & 0xff) 109 | self.cr.append((data >> 0) & 0xff) 110 | return self.y, self.cb, self.cr 111 | 112 | def unpack_rgb16f(self): 113 | self.rf = [] 114 | self.gf = [] 115 | self.bf = [] 116 | for data in self.data: 117 | self.rf.append((data >> 32) & 0xffff) 118 | self.gf.append((data >> 16) & 0xffff) 119 | self.bf.append((data >> 0 ) & 0xffff) 120 | return self.rf, self.gf, self.bf 121 | 122 | # Model for our implementation 123 | def rgb2ycbcr_model(self): 124 | self.y = [] 125 | self.cb = [] 126 | self.cr = [] 127 | for r, g, b in zip(self.r, self.g, self.b): 128 | yraw = self.coefs["ca"]*(r-g) + self.coefs["cb"]*(b-g) + g 129 | self.y.append(int(yraw + self.coefs["yoffset"])) 130 | self.cb.append(int(self.coefs["cc"]*(b-yraw) + self.coefs["coffset"])) 131 | self.cr.append(int(self.coefs["cd"]*(r-yraw) + self.coefs["coffset"])) 132 | return self.y, self.cb, self.cr 133 | 134 | 135 | # Wikipedia implementation used as reference 136 | def rgb2ycbcr(self): 137 | self.y = [] 138 | self.cb = [] 139 | self.cr = [] 140 | for r, g, b in zip(self.r, self.g, self.b): 141 | self.y.append(int(0.299*r + 0.587*g + 0.114*b)) 142 | self.cb.append(int(-0.1687*r - 0.3313*g + 0.5*b + 128)) 143 | self.cr.append(int(0.5*r - 0.4187*g - 0.0813*b + 128)) 144 | return self.y, self.cb, self.cr 145 | 146 | 147 | # Model for our implementation 148 | def ycbcr2rgb_model(self): 149 | self.r = [] 150 | self.g = [] 151 | self.b = [] 152 | for y, cb, cr in zip(self.y, self.cb, self.cr): 153 | self.r.append(int(y - self.coefs["yoffset"] + (cr - self.coefs["coffset"])*self.coefs["acoef"])) 154 | self.g.append(int(y - self.coefs["yoffset"] + (cb - self.coefs["coffset"])*self.coefs["bcoef"] + (cr - self.coefs["coffset"])*self.coefs["ccoef"])) 155 | self.b.append(int(y - self.coefs["yoffset"] + (cb - self.coefs["coffset"])*self.coefs["dcoef"])) 156 | return self.r, self.g, self.b 157 | 158 | 159 | # Wikipedia implementation used as reference 160 | def ycbcr2rgb(self): 161 | self.r = [] 162 | self.g = [] 163 | self.b = [] 164 | for y, cb, cr in zip(self.y, self.cb, self.cr): 165 | self.r.append(int(y + (cr - 128) * 1.402)) 166 | self.g.append(int(y + (cb - 128) * -0.34414 + (cr - 128) * -0.71414)) 167 | self.b.append(int(y + (cb - 128) * 1.772)) 168 | return self.r, self.g, self.b 169 | 170 | # Convert 16 bit float to 8 bit pixel 171 | def rgb16f2rgb_model(self): 172 | self.r = [] 173 | self.g = [] 174 | self.b = [] 175 | for rf, gf, bf in zip(self.rf, self.gf, self.bf): 176 | self.r.append(float2int(rf)) 177 | self.g.append(float2int(gf)) 178 | self.b.append(float2int(bf)) 179 | return self.r, self.g, self.b 180 | 181 | # Convert 8 bit pixel to 16 bit float 182 | def rgb2rgb16f_model(self): 183 | self.rf = [] 184 | self.gf = [] 185 | self.bf = [] 186 | for r, g, b in zip(self.r, self.g, self.b): 187 | self.rf.append(int2float(r)) 188 | self.gf.append(int2float(g)) 189 | self.bf.append(int2float(b)) 190 | return self.rf, self.gf, self.bf 191 | 192 | def int2float(x): 193 | ''' 194 | Converts a 8 bit unsigned int to 16 bit half precision floating 195 | point represntation.Expected input is in the range [0-255] 196 | Output is an 16 bit integer whose bit representation correspond 197 | to half precision float format. 198 | The value of float output is in the range [0-1] 199 | (higher precision in this range) 200 | ''' 201 | if x==0: 202 | return 0 203 | else: 204 | y = bin(x)[2:].zfill(8) # Unpack in string 205 | for i in range(len(y)): # Leading one detector 206 | if y[i] == '1': 207 | shift_val = i 208 | break 209 | 210 | sign = '0' 211 | exp = 15 - 1 - shift_val 212 | frac = y[shift_val+1:][::-1].zfill(10)[::-1] 213 | x = sign+bin(exp)[2:].zfill(5)+frac # Pack together in string 214 | z = int(x, 2) # Convert string to correspondinf float 215 | return z 216 | 217 | def float2int(x): 218 | ''' 219 | Converts a 16 bit half precision floating point represntation 220 | to 8 bit unsigned int. 221 | Output is an 16 bit integer whose bit representation correspond 222 | to half precision float format. 223 | Input is in the range [0-1] 224 | Expected output is in the corresponding range [0-255] 225 | 226 | ''' 227 | if x==0: 228 | return 0 229 | else: 230 | y = bin(x)[2:].zfill(16) # Unpack in string 231 | exp = y[1:6] # Unpack exp 232 | frac = '1'+y[6:16] # Unpack frac 233 | return int(frac,2) >> (17-int(exp,2)) 234 | -------------------------------------------------------------------------------- /litevideo/input/edid.py: -------------------------------------------------------------------------------- 1 | from migen import * 2 | from migen.fhdl.specials import Tristate 3 | from migen.genlib.cdc import MultiReg 4 | from migen.genlib.fsm import FSM, NextState 5 | from migen.genlib.misc import chooser 6 | 7 | from litex.soc.interconnect.csr import CSRStorage, CSRStatus, AutoCSR 8 | 9 | _default_edid = [ # changed to be more compatible with rpi, prefer lower freq mode for lower power/bw 10 | 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x05, 0xb8, 0x4e, 0x54, 0x00, 0x00, 0x00, 0x00, 11 | 0x13, 0x1c, 0x01, 0x03, 0x80, 0x35, 0x1e, 0x78, 0x0a, 0x3d, 0x85, 0xa6, 0x56, 0x4a, 0x9a, 0x24, 12 | 0x12, 0x50, 0x54, 0x00, 0x00, 0x00, 0xd1, 0xc0, 0xd1, 0xc0, 0xd1, 0xc0, 0xd1, 0xc0, 0xd1, 0xc0, 13 | 0xd1, 0xc0, 0xd1, 0xc0, 0xd1, 0xc0, 0x01, 0x1d, 0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x53, 0x2c, 14 | 0x45, 0x00, 0xdd, 0x0c, 0x11, 0x00, 0x00, 0x1e, 0x02, 0x3a, 0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 15 | 0x53, 0x2c, 0x45, 0x00, 0xdd, 0x0c, 0x11, 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x38, 16 | 0x3d, 0x1e, 0x53, 0x0f, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc, 17 | 0x00, 0x41, 0x6c, 0x70, 0x68, 0x61, 0x6d, 0x61, 0x78, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x01, 0x3e, 18 | 19 | 0x02, 0x03, 0x21, 0xf1, 0x4e, 0x90, 0x04, 0x03, 0x01, 0x14, 0x12, 0x05, 0x1f, 0x10, 0x13, 0x00, 20 | 0x00, 0x00, 0x00, 0x23, 0x09, 0x07, 0x07, 0x83, 0x01, 0x00, 0x00, 0x65, 0x03, 0x0c, 0x00, 0x10, 21 | 0x00, 0x02, 0x3a, 0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58, 0x2c, 0x45, 0x00, 0xdd, 0x0c, 0x11, 22 | 0x00, 0x00, 0x1e, 0x01, 0x1d, 0x80, 0x18, 0x71, 0x1c, 0x16, 0x20, 0x58, 0x2c, 0x25, 0x00, 0xdd, 23 | 0x0c, 0x11, 0x00, 0x00, 0x9e, 0x01, 0x1d, 0x00, 0x72, 0x51, 0xd0, 0x1e, 0x20, 0x6e, 0x28, 0x55, 24 | 0x00, 0xdd, 0x0c, 0x11, 0x00, 0x00, 0x1e, 0x8c, 0x0a, 0xd0, 0x8a, 0x20, 0xe0, 0x2d, 0x10, 0x10, 25 | 0x3e, 0x96, 0x00, 0xdd, 0x0c, 0x11, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 26 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xba, 27 | ] 28 | 29 | 30 | class EDID(Module, AutoCSR): 31 | def __init__(self, pads, default=_default_edid): 32 | self._hpd_notif = CSRStatus() 33 | self._hpd_en = CSRStorage() 34 | mem_size = len(default) 35 | assert mem_size%128 == 0 36 | self.specials.mem = Memory(8, mem_size, init=default) 37 | 38 | # # # 39 | 40 | # HPD 41 | if hasattr(pads, "hpd_notif"): 42 | if hasattr(getattr(pads, "hpd_notif"), "inverted"): 43 | hpd_notif_n = Signal() 44 | self.comb += hpd_notif_n.eq(~pads.hpd_notif) 45 | self.specials += MultiReg(hpd_notif_n, self._hpd_notif.status) 46 | else: 47 | self.specials += MultiReg(pads.hpd_notif, self._hpd_notif.status) 48 | else: 49 | self.comb += self._hpd_notif.status.eq(1) 50 | if hasattr(pads, "hpd_en"): 51 | self.comb += pads.hpd_en.eq(self._hpd_en.storage) 52 | 53 | # EDID 54 | scl_raw = Signal() 55 | sda_i = Signal() 56 | sda_raw = Signal() 57 | sda_drv = Signal() 58 | _sda_drv_reg = Signal() 59 | _sda_i_async = Signal() 60 | self.sync += _sda_drv_reg.eq(sda_drv) 61 | 62 | pad_scl = getattr(pads, "scl") 63 | if hasattr(pad_scl, "inverted"): 64 | self.specials += MultiReg(~pads.scl, scl_raw) 65 | else: 66 | self.specials += MultiReg(pads.scl, scl_raw) 67 | 68 | if hasattr(pads, "sda_pu") and hasattr(pads, "sda_pd"): 69 | pad_sda = getattr(pads, "sda") 70 | if hasattr(pad_sda, "inverted"): 71 | self.specials += MultiReg(~pads.sda, sda_raw) 72 | else: 73 | self.specials += MultiReg(pads.sda, sda_raw) 74 | 75 | self.comb += [ 76 | pads.sda_pu.eq(0), 77 | pads.sda_pd.eq(_sda_drv_reg), 78 | ] 79 | else: 80 | self.specials += [ 81 | Tristate(pads.sda, 0, _sda_drv_reg, _sda_i_async), 82 | MultiReg(_sda_i_async, sda_raw), 83 | ] 84 | 85 | # for debug 86 | self.scl = scl_raw 87 | self.sda_i = sda_i 88 | self.sda_o = Signal() 89 | self.comb += self.sda_o.eq(~_sda_drv_reg) 90 | self.sda_oe = _sda_drv_reg 91 | 92 | scl_i = Signal() 93 | samp_count = Signal(6) 94 | samp_carry = Signal() 95 | self.sync += [ 96 | Cat(samp_count, samp_carry).eq(samp_count + 1), 97 | If(samp_carry, 98 | scl_i.eq(scl_raw), 99 | sda_i.eq(sda_raw) 100 | ) 101 | ] 102 | 103 | scl_r = Signal() 104 | sda_r = Signal() 105 | scl_rising = Signal() 106 | sda_rising = Signal() 107 | sda_falling = Signal() 108 | self.sync += [ 109 | scl_r.eq(scl_i), 110 | sda_r.eq(sda_i) 111 | ] 112 | self.comb += [ 113 | scl_rising.eq(scl_i & ~scl_r), 114 | sda_rising.eq(sda_i & ~sda_r), 115 | sda_falling.eq(~sda_i & sda_r) 116 | ] 117 | 118 | start = Signal() 119 | self.comb += start.eq(scl_i & sda_falling) 120 | 121 | din = Signal(8) 122 | counter = Signal(max=9) 123 | self.sync += [ 124 | If(start, counter.eq(0)), 125 | If(scl_rising, 126 | If(counter == 8, 127 | counter.eq(0) 128 | ).Else( 129 | counter.eq(counter + 1), 130 | din.eq(Cat(sda_i, din[:7])) 131 | ) 132 | ) 133 | ] 134 | 135 | self.din = din 136 | self.counter = counter 137 | 138 | is_read = Signal() 139 | update_is_read = Signal() 140 | self.sync += If(update_is_read, is_read.eq(din[0])) 141 | 142 | offset_counter = Signal(max=mem_size) 143 | oc_load = Signal() 144 | oc_inc = Signal() 145 | self.sync += \ 146 | If(oc_load, 147 | offset_counter.eq(din) 148 | ).Elif(oc_inc, 149 | offset_counter.eq(offset_counter + 1) 150 | ) 151 | 152 | rdport = self.mem.get_port() 153 | self.specials += rdport 154 | self.comb += rdport.adr.eq(offset_counter) 155 | data_bit = Signal() 156 | 157 | zero_drv = Signal() 158 | data_drv = Signal() 159 | self.comb += \ 160 | If(zero_drv, 161 | sda_drv.eq(1) 162 | ).Elif(data_drv, 163 | sda_drv.eq(~data_bit) 164 | ) 165 | 166 | data_drv_en = Signal() 167 | data_drv_stop = Signal() 168 | self.sync += \ 169 | If(data_drv_en, 170 | data_drv.eq(1) 171 | ).Elif(data_drv_stop, 172 | data_drv.eq(0) 173 | ) 174 | self.sync += \ 175 | If(data_drv_en, 176 | chooser(rdport.dat_r, counter, data_bit, 8, reverse=True) 177 | ) 178 | 179 | self.submodules.fsm = fsm = FSM() 180 | 181 | fsm.act("WAIT_START") 182 | fsm.act("RCV_ADDRESS", 183 | If(counter == 8, 184 | If(din[1:] == 0x50, 185 | update_is_read.eq(1), 186 | NextState("ACK_ADDRESS0") 187 | ).Else( 188 | NextState("WAIT_START") 189 | ) 190 | ) 191 | ) 192 | fsm.act("ACK_ADDRESS0", 193 | If(~scl_i, NextState("ACK_ADDRESS1")) 194 | ) 195 | fsm.act("ACK_ADDRESS1", 196 | zero_drv.eq(1), 197 | If(scl_i, NextState("ACK_ADDRESS2")) 198 | ) 199 | fsm.act("ACK_ADDRESS2", 200 | zero_drv.eq(1), 201 | If(~scl_i, 202 | If(is_read, 203 | NextState("READ") 204 | ).Else( 205 | NextState("RCV_OFFSET") 206 | ) 207 | ) 208 | ) 209 | 210 | fsm.act("RCV_OFFSET", 211 | If(counter == 8, 212 | oc_load.eq(1), 213 | NextState("ACK_OFFSET0") 214 | ) 215 | ) 216 | fsm.act("ACK_OFFSET0", 217 | If(~scl_i, 218 | NextState("ACK_OFFSET1") 219 | ) 220 | ) 221 | fsm.act("ACK_OFFSET1", 222 | zero_drv.eq(1), 223 | If(scl_i, 224 | NextState("ACK_OFFSET2") 225 | ) 226 | ) 227 | fsm.act("ACK_OFFSET2", 228 | zero_drv.eq(1), 229 | If(~scl_i, 230 | NextState("RCV_ADDRESS") 231 | ) 232 | ) 233 | 234 | fsm.act("READ", 235 | If(~scl_i, 236 | If(counter == 8, 237 | data_drv_stop.eq(1), 238 | NextState("ACK_READ") 239 | ).Else( 240 | data_drv_en.eq(1) 241 | ) 242 | ) 243 | ) 244 | fsm.act("ACK_READ", 245 | If(scl_rising, 246 | oc_inc.eq(1), 247 | If(sda_i, 248 | NextState("WAIT_START") 249 | ).Else( 250 | NextState("READ") 251 | ) 252 | ) 253 | ) 254 | 255 | for state in fsm.actions.keys(): 256 | fsm.act(state, If(start, NextState("RCV_ADDRESS"))) 257 | if hasattr(pads, "hpd_en"): 258 | fsm.act(state, If(~self._hpd_en.storage, NextState("WAIT_START"))) 259 | -------------------------------------------------------------------------------- /litevideo/terminal/core.py: -------------------------------------------------------------------------------- 1 | # This file is Copyright (c) 2019 Frank Buss 2 | # License: BSD 3 | 4 | import os 5 | 6 | from migen import * 7 | 8 | from litex.soc.interconnect import wishbone 9 | 10 | # Terminal emulation with 640 x 480 pixels, 80 x 30 characters, individual foreground and background 11 | # color per character (VGA palette) and user definable font, with code page 437 VGA font initialized. 12 | # 60 Hz framerate, if vga_clk is 25.175 MHz. Independent system clock possible, internal dual-port 13 | # block RAM. 14 | # 15 | # Memory layout: 16 | # 0x0000 - 0x12bf = 2 bytes per character: 17 | # character: index in VGA font 18 | # color: low nibble is foreground color, and high nibble is background color, VGA palette 19 | # 0x12c0 - 0x22bf = VGA font, 16 lines per character, 8 bits width 20 | # 21 | # VGA timings: 22 | # clocks per line: 23 | # 1. HSync low pulse for 96 clocks 24 | # 2. back porch for 48 clocks 25 | # 3. data for 640 clocks 26 | # 4. front porch for 16 clocks 27 | # 28 | # VSync timing per picture (800 clocks = 1 line): 29 | # 1. VSync low pulse for 2 lines 30 | # 2. back porch for 29 lines 31 | # 3. data for 480 lines 32 | # 4. front porch for 10 lines 33 | 34 | # Helpers ------------------------------------------------------------------------------------------ 35 | 36 | def get_path(filename): 37 | """Return filename relative to caller script, if available, otherwise relative to this package 38 | script""" 39 | if os.path.isfile(filename): 40 | return filename 41 | path = os.path.dirname(os.path.realpath(__file__)) 42 | return os.path.join(path, filename) 43 | 44 | def read_ram_init_file(filename, size): 45 | """Read file, if not empty, and test for size. If empty, init with 0""" 46 | if filename == '': 47 | return [0] * size 48 | else: 49 | with open(get_path(filename), "rb") as file: 50 | data = list(file.read()) 51 | if len(data) != size: 52 | raise ValueError("Invalid size for file {}. Expected size: {}, actual size: {}".format( 53 | filename, size, len(data))) 54 | return data 55 | 56 | # Terminal ----------------------------------------------------------------------------------------- 57 | 58 | class Terminal(Module): 59 | def __init__(self, pads=None, font_filename="cp437.bin", screen_init_filename="screen-init.bin"): 60 | # Wishbone interface 61 | self.bus = bus = wishbone.Interface(data_width=8) 62 | 63 | # Acknowledge immediately 64 | self.sync += [ 65 | bus.ack.eq(0), 66 | If (bus.cyc & bus.stb & ~bus.ack, bus.ack.eq(1)) 67 | ] 68 | 69 | # RAM initialization 70 | screen_init = read_ram_init_file(screen_init_filename, 4800) 71 | font = read_ram_init_file(font_filename, 4096) 72 | ram_init = screen_init + font 73 | 74 | # Create RAM 75 | mem = Memory(width=8, depth=8896, init=ram_init) 76 | self.specials += mem 77 | wrport = mem.get_port(write_capable=True, clock_domain="sys") 78 | self.specials += wrport 79 | rdport = mem.get_port(write_capable=False, clock_domain="vga") 80 | self.specials += rdport 81 | 82 | # Memory map internal block RAM to Wishbone interface 83 | self.sync += [ 84 | wrport.we.eq(0), 85 | If (bus.cyc & bus.stb & bus.we & ~bus.ack, 86 | wrport.we.eq(1), 87 | wrport.dat_w.eq(bus.dat_w), 88 | ), 89 | ] 90 | 91 | self.comb += [ 92 | wrport.adr.eq(bus.adr), 93 | bus.dat_r.eq(wrport.dat_r) 94 | ] 95 | 96 | # Display resolution 97 | WIDTH = 640 98 | HEIGHT = 480 99 | 100 | # Offset to font data in RAM 101 | FONT_ADDR = 80 * 30 * 2 102 | 103 | # VGA output 104 | self.red = red = Signal(8) if pads is None else pads.red 105 | self.green = green = Signal(8) if pads is None else pads.green 106 | self.blue = blue = Signal(8) if pads is None else pads.blue 107 | self.hsync = hsync = Signal() if pads is None else pads.hsync 108 | self.vsync = vsync = Signal() if pads is None else pads.vsync 109 | 110 | # VGA timings 111 | H_SYNC_PULSE = 96 112 | H_BACK_PORCH = 48 + H_SYNC_PULSE 113 | H_DATA = WIDTH + H_BACK_PORCH 114 | H_FRONT_PORCH = 16 + H_DATA 115 | 116 | V_SYNC_PULSE = 2 117 | V_BACK_PORCH = 29 + V_SYNC_PULSE 118 | V_DATA = HEIGHT + V_BACK_PORCH 119 | V_FRONT_PORCH = 10 + V_DATA 120 | 121 | pixel_counter = Signal(10) 122 | line_counter = Signal(10) 123 | 124 | # Read address in text RAM 125 | text_addr = Signal(16) 126 | 127 | # Read address in text RAM at line start 128 | text_addr_start = Signal(16) 129 | 130 | # Current line within a character, 0 to 15 131 | fline = Signal(4) 132 | 133 | # Current x position within a character, 0 to 7 134 | fx = Signal(3) 135 | 136 | # Current and next byte for a character line 137 | fbyte = Signal(8) 138 | next_byte = Signal(8) 139 | 140 | # Current foreground color 141 | fgcolor = Signal(24) 142 | next_fgcolor = Signal(24) 143 | 144 | # Current background color 145 | bgcolor = Signal(24) 146 | 147 | # Current fg/bg color index from RAM 148 | color = Signal(8) 149 | 150 | # Color index and lookup 151 | color_index = Signal(4) 152 | color_lookup = Signal(24) 153 | 154 | # VGA palette 155 | palette = [ 156 | 0x000000, 0x0000aa, 0x00aa00, 0x00aaaa, 0xaa0000, 0xaa00aa, 0xaa5500, 0xaaaaaa, 157 | 0x555555, 0x5555ff, 0x55ff55, 0x55ffff, 0xff5555, 0xff55ff, 0xffff55, 0xffffff 158 | ] 159 | cases = {} 160 | for i in range(16): 161 | cases[i] = color_lookup.eq(palette[i]) 162 | self.comb += Case(color_index, cases) 163 | 164 | self.sync.vga += [ 165 | # Default values 166 | red.eq(0), 167 | green.eq(0), 168 | blue.eq(0), 169 | 170 | # Show pixels 171 | If((line_counter >= V_BACK_PORCH) & (line_counter < V_DATA), 172 | If((pixel_counter >= H_BACK_PORCH) & (pixel_counter < H_DATA), 173 | If(fbyte[7], 174 | red.eq(fgcolor[16:24]), 175 | green.eq(fgcolor[8:16]), 176 | blue.eq(fgcolor[0:8]) 177 | ).Else( 178 | red.eq(bgcolor[16:24]), 179 | green.eq(bgcolor[8:16]), 180 | blue.eq(bgcolor[0:8]) 181 | ), 182 | fbyte.eq(Cat(Signal(), fbyte[:-1])) 183 | ) 184 | ), 185 | 186 | # Load next character code, font line and color 187 | If(fx == 1, 188 | # schedule reading the character code 189 | rdport.adr.eq(text_addr), 190 | text_addr.eq(text_addr + 1) 191 | ), 192 | If(fx == 2, 193 | # Schedule reading the color 194 | rdport.adr.eq(text_addr), 195 | text_addr.eq(text_addr + 1) 196 | ), 197 | If(fx == 3, 198 | # Read character code, and set address for font line 199 | rdport.adr.eq(FONT_ADDR + Cat(Signal(4), rdport.dat_r) + fline) 200 | ), 201 | If(fx == 4, 202 | # Read color 203 | color.eq(rdport.dat_r) 204 | ), 205 | If(fx == 5, 206 | # Read font line, and set color index to get foreground color 207 | next_byte.eq(rdport.dat_r), 208 | color_index.eq(color[0:4]), 209 | ), 210 | If(fx == 6, 211 | # Get next foreground color, and set color index for background color 212 | next_fgcolor.eq(color_lookup), 213 | color_index.eq(color[4:8]) 214 | ), 215 | If(fx == 7, 216 | # Set background color and everything for the next 8 pixels 217 | bgcolor.eq(color_lookup), 218 | fgcolor.eq(next_fgcolor), 219 | fbyte.eq(next_byte) 220 | ), 221 | fx.eq(fx + 1), 222 | If(fx == 7, fx.eq(0)), 223 | 224 | # Horizontal timing for one line 225 | pixel_counter.eq(pixel_counter + 1), 226 | If(pixel_counter < H_SYNC_PULSE, 227 | hsync.eq(0) 228 | ).Elif (pixel_counter < H_BACK_PORCH, 229 | hsync.eq(1) 230 | ), 231 | If(pixel_counter == H_BACK_PORCH - 9, 232 | # Prepare reading first character of next line 233 | fx.eq(0), 234 | text_addr.eq(text_addr_start) 235 | ), 236 | If(pixel_counter == H_FRONT_PORCH, 237 | # Initilize next line 238 | pixel_counter.eq(0), 239 | line_counter.eq(line_counter + 1), 240 | 241 | # Font height is 16 pixels 242 | fline.eq(fline + 1), 243 | If(fline == 15, 244 | fline.eq(0), 245 | text_addr_start.eq(text_addr_start + 2 * 80) 246 | ) 247 | ), 248 | 249 | # Vertical timing for one screen 250 | If(line_counter < V_SYNC_PULSE, 251 | vsync.eq(0) 252 | ).Elif(line_counter < V_BACK_PORCH, 253 | vsync.eq(1) 254 | ), 255 | If(line_counter == V_FRONT_PORCH, 256 | # End of image 257 | line_counter.eq(0) 258 | ), 259 | If(line_counter == V_BACK_PORCH - 1, 260 | # Prepare generating next image data 261 | fline.eq(0), 262 | text_addr_start.eq(0) 263 | ) 264 | ] 265 | -------------------------------------------------------------------------------- /litevideo/input/clocking.py: -------------------------------------------------------------------------------- 1 | from migen import * 2 | from migen.genlib.cdc import MultiReg 3 | from migen.genlib.resetsync import AsyncResetSynchronizer 4 | 5 | from litex.soc.interconnect.csr import * 6 | 7 | 8 | class S6Clocking(Module, AutoCSR): 9 | def __init__(self, pads, clkin_freq=None, split_clocking=None): 10 | assert not bool(split_clocking), "Can't use split_clocking with S6Clocking" 11 | self._pll_reset = CSRStorage(reset=1) 12 | self._locked = CSRStatus() 13 | 14 | # DRP 15 | self._pll_adr = CSRStorage(5) 16 | self._pll_dat_r = CSRStatus(16) 17 | self._pll_dat_w = CSRStorage(16) 18 | self._pll_read = CSR() 19 | self._pll_write = CSR() 20 | self._pll_drdy = CSRStatus() 21 | 22 | self.locked = Signal() 23 | self.serdesstrobe = Signal() 24 | self.clock_domains._cd_pix = ClockDomain() 25 | self.clock_domains._cd_pix_o = ClockDomain() 26 | self.clock_domains._cd_pix2x = ClockDomain() 27 | self.clock_domains._cd_pix10x = ClockDomain(reset_less=True) 28 | 29 | # # # 30 | 31 | self.clk_input = Signal() 32 | self.specials += Instance("IBUFDS", name="hdmi_in_ibufds", 33 | i_I=pads.clk_p, i_IB=pads.clk_n, 34 | o_O=self.clk_input) 35 | 36 | clkfbout = Signal() 37 | pll_locked = Signal() 38 | pll_clk0 = Signal() 39 | pll_clk1 = Signal() 40 | pll_clk2 = Signal() 41 | pll_drdy = Signal() 42 | self.sync += If(self._pll_read.re | self._pll_write.re, 43 | self._pll_drdy.status.eq(0) 44 | ).Elif(pll_drdy, 45 | self._pll_drdy.status.eq(1) 46 | ) 47 | self.specials += [ 48 | Instance("PLL_ADV", 49 | name="hdmi_in_pll_adv", 50 | p_CLKFBOUT_MULT=10, 51 | p_CLKOUT0_DIVIDE=1, # pix10x 52 | p_CLKOUT1_DIVIDE=5, # pix2x 53 | p_CLKOUT2_DIVIDE=10, # pix 54 | p_COMPENSATION="INTERNAL", 55 | 56 | i_CLKINSEL=1, 57 | i_CLKIN1=self.clk_input, 58 | o_CLKOUT0=pll_clk0, o_CLKOUT1=pll_clk1, o_CLKOUT2=pll_clk2, 59 | o_CLKFBOUT=clkfbout, i_CLKFBIN=clkfbout, 60 | o_LOCKED=pll_locked, i_RST=self._pll_reset.storage, 61 | 62 | i_DADDR=self._pll_adr.storage, 63 | o_DO=self._pll_dat_r.status, 64 | i_DI=self._pll_dat_w.storage, 65 | i_DEN=self._pll_read.re | self._pll_write.re, 66 | i_DWE=self._pll_write.re, 67 | o_DRDY=pll_drdy, 68 | i_DCLK=ClockSignal()) 69 | ] 70 | 71 | locked_async = Signal() 72 | self.specials += [ 73 | Instance("BUFPLL", name="hdmi_in_bufpll", p_DIVIDE=5, 74 | i_PLLIN=pll_clk0, i_GCLK=ClockSignal("pix2x"), i_LOCKED=pll_locked, 75 | o_IOCLK=self._cd_pix10x.clk, o_LOCK=locked_async, o_SERDESSTROBE=self.serdesstrobe), 76 | Instance("BUFG", name="hdmi_in_pix2x_bufg", i_I=pll_clk1, o_O=self._cd_pix2x.clk), 77 | Instance("BUFG", name="hdmi_in_pix_bufg", i_I=pll_clk2, o_O=self._cd_pix.clk), 78 | MultiReg(locked_async, self.locked, "sys") 79 | ] 80 | self.comb += self._locked.status.eq(self.locked) 81 | 82 | self.specials += [ 83 | AsyncResetSynchronizer(self._cd_pix, ~locked_async), 84 | AsyncResetSynchronizer(self._cd_pix2x, ~locked_async), 85 | ] 86 | self.comb += self._cd_pix_o.clk.eq(self._cd_pix.clk) 87 | 88 | 89 | class S7Clocking(Module, AutoCSR): 90 | def __init__(self, pads, clkin_freq=148.5e6, split_clocking=False): 91 | self._mmcm_reset = CSRStorage(reset=1) 92 | self._locked = CSRStatus() 93 | 94 | # DRP 95 | self._mmcm_read = CSR() 96 | self._mmcm_write = CSR() 97 | self._mmcm_drdy = CSRStatus() 98 | self._mmcm_adr = CSRStorage(7) 99 | self._mmcm_dat_w = CSRStorage(16) 100 | self._mmcm_dat_r = CSRStatus(16) 101 | 102 | self.locked = Signal() 103 | self.clock_domains.cd_pix = ClockDomain() 104 | self.clock_domains.cd_pix_o = ClockDomain() 105 | self.clock_domains.cd_pix1p25x = ClockDomain() 106 | self.clock_domains.cd_pix5x = ClockDomain(reset_less=True) 107 | self.clock_domains.cd_pix5x_o = ClockDomain(reset_less=True) 108 | 109 | if split_clocking: 110 | self._mmcm_write_o = CSR() 111 | self._mmcm_read_o = CSR() 112 | self._mmcm_dat_o_r = CSRStatus(16) 113 | self._mmcm_drdy_o = CSRStatus() 114 | 115 | # # # 116 | 117 | assert clkin_freq in [74.25e6, 148.5e6] 118 | self.clk_input = Signal() 119 | clk_input_bufr = Signal() 120 | if hasattr(pads.clk_p, "inverted"): 121 | self.specials += Instance("IBUFDS_DIFF_OUT", 122 | name="hdmi_in_ibufds", 123 | i_I=pads.clk_p, i_IB=pads.clk_n, 124 | o_OB=self.clk_input) 125 | else: 126 | self.specials += Instance("IBUFDS_DIFF_OUT", 127 | name="hdmi_in_ibufds", 128 | i_I=pads.clk_p, i_IB=pads.clk_n, 129 | o_O=self.clk_input) 130 | self.specials += Instance("BUFR", i_I=self.clk_input, o_O=clk_input_bufr) 131 | 132 | mmcm_fb = Signal() 133 | mmcm_locked = Signal() 134 | mmcm_clk0 = Signal() 135 | mmcm_clk1 = Signal() 136 | mmcm_clk2 = Signal() 137 | mmcm_drdy = Signal() 138 | mmcm_fb_o = Signal() # this should be harmless in single domain, but essential for split 139 | 140 | self.specials += [ 141 | Instance("MMCME2_ADV", 142 | p_BANDWIDTH="OPTIMIZED", i_RST=self._mmcm_reset.storage, o_LOCKED=mmcm_locked, 143 | 144 | # VCO 145 | p_REF_JITTER1=0.01, p_CLKIN1_PERIOD=6.734, 146 | p_CLKFBOUT_MULT_F=5.0, p_CLKFBOUT_PHASE=0.000, p_DIVCLK_DIVIDE=1, 147 | # p_SS_EN="TRUE", p_SS_MODE="CENTER_LOW", 148 | i_CLKIN1=clk_input_bufr, i_CLKFBIN=mmcm_fb_o, o_CLKFBOUT=mmcm_fb, 149 | 150 | # pix clk 151 | p_CLKOUT0_DIVIDE_F=5, p_CLKOUT0_PHASE=0.000, o_CLKOUT0=mmcm_clk0, 152 | # pix1p25x clk 153 | p_CLKOUT1_DIVIDE=4, p_CLKOUT1_PHASE=0.000, o_CLKOUT1=mmcm_clk1, 154 | # pix5x clk 155 | p_CLKOUT2_DIVIDE=1, p_CLKOUT2_PHASE=0.000, o_CLKOUT2=mmcm_clk2, 156 | 157 | # DRP 158 | i_DCLK=ClockSignal(), 159 | i_DWE=self._mmcm_write.re, 160 | i_DEN=self._mmcm_read.re | self._mmcm_write.re, 161 | o_DRDY=mmcm_drdy, 162 | i_DADDR=self._mmcm_adr.storage, 163 | i_DI=self._mmcm_dat_w.storage, 164 | o_DO=self._mmcm_dat_r.status 165 | ), 166 | Instance("BUFG", i_I=mmcm_clk0, o_O=self.cd_pix.clk), 167 | Instance("BUFG", i_I=mmcm_clk1, o_O=self.cd_pix1p25x.clk), 168 | Instance("BUFG", i_I=mmcm_clk2, o_O=self.cd_pix5x.clk), 169 | Instance("BUFG", i_I=mmcm_fb, o_O=mmcm_fb_o), # compensate this delay to minimize phase offset with slave 170 | ] 171 | 172 | self.sync += [ 173 | If(self._mmcm_read.re | self._mmcm_write.re, 174 | self._mmcm_drdy.status.eq(0) 175 | ).Elif(mmcm_drdy, 176 | self._mmcm_drdy.status.eq(1) 177 | ) 178 | ] 179 | 180 | if split_clocking: 181 | mmcm_fb2_o = Signal() 182 | mmcm_locked_o = Signal() 183 | mmcm_clk0_o = Signal() 184 | mmcm_clk2_o = Signal() 185 | mmcm_drdy_o = Signal() 186 | 187 | self.specials += [ 188 | Instance("PLLE2_ADV", 189 | p_BANDWIDTH="LOW", i_RST=self._mmcm_reset.storage, o_LOCKED=mmcm_locked_o, 190 | 191 | # VCO 192 | p_REF_JITTER1=0.01, p_CLKIN1_PERIOD=6.734, 193 | p_CLKFBOUT_MULT=10, p_CLKFBOUT_PHASE=0.000, p_DIVCLK_DIVIDE=1, # PLL range is 800-1866 MHz, unlike MMCM which is 600-1440 MHz 194 | i_CLKIN1=mmcm_clk0, # uncompensated delay for best phase match between master/slave 195 | i_CLKFBIN=mmcm_fb2_o, o_CLKFBOUT=mmcm_fb2_o, 196 | 197 | # pix clk 198 | p_CLKOUT0_DIVIDE=10, p_CLKOUT0_PHASE=0.000, o_CLKOUT0=mmcm_clk0_o, 199 | p_CLKOUT2_DIVIDE=2, p_CLKOUT2_PHASE=0.000, o_CLKOUT2=mmcm_clk2_o, 200 | 201 | # DRP 202 | i_DCLK=ClockSignal(), 203 | i_DWE=self._mmcm_write_o.re, 204 | i_DEN=self._mmcm_read_o.re | self._mmcm_write_o.re, 205 | o_DRDY=mmcm_drdy_o, 206 | i_DADDR=self._mmcm_adr.storage, 207 | i_DI=self._mmcm_dat_w.storage, 208 | o_DO=self._mmcm_dat_o_r.status 209 | ), 210 | Instance("BUFG", i_I=mmcm_clk0_o, o_O=self.cd_pix_o.clk), 211 | Instance("BUFG", i_I=mmcm_clk2_o, o_O=self.cd_pix5x_o.clk), # was BUFIO... 212 | ] 213 | 214 | self.sync += [ 215 | If(self._mmcm_read_o.re | self._mmcm_write_o.re, 216 | self._mmcm_drdy_o.status.eq(0) 217 | ).Elif(mmcm_drdy_o, 218 | self._mmcm_drdy_o.status.eq(1) 219 | ) 220 | ] 221 | else: 222 | self.comb += [ 223 | self.cd_pix_o.clk.eq(self.cd_pix.clk), 224 | self.cd_pix5x_o.clk.eq(self.cd_pix5x.clk) 225 | ] 226 | 227 | self.specials += MultiReg(mmcm_locked, self.locked, "sys") 228 | self.comb += self._locked.status.eq(self.locked) 229 | 230 | self.specials += [ 231 | AsyncResetSynchronizer(self.cd_pix, ~mmcm_locked), 232 | AsyncResetSynchronizer(self.cd_pix1p25x, ~mmcm_locked), 233 | ] 234 | 235 | if split_clocking: 236 | self.specials += AsyncResetSynchronizer(self.cd_pix_o, ~mmcm_locked_o) 237 | else: 238 | self.comb += self.cd_pix_o.rst.eq(self.cd_pix.rst) 239 | 240 | -------------------------------------------------------------------------------- /litevideo/input/analysis.py: -------------------------------------------------------------------------------- 1 | from migen import * 2 | from migen.genlib.cdc import MultiReg, PulseSynchronizer 3 | 4 | from litex.soc.interconnect.csr import * 5 | from litex.soc.interconnect import stream 6 | 7 | from litevideo.input.common import channel_layout 8 | from litevideo.csc.rgb2ycbcr import RGB2YCbCr 9 | from litevideo.csc.ycbcr444to422 import YCbCr444to422 10 | 11 | 12 | class SyncPolarity(Module): 13 | def __init__(self, hdmi=False): 14 | self.valid_i = Signal() 15 | self.data_in0 = Record(channel_layout) 16 | self.data_in1 = Record(channel_layout) 17 | self.data_in2 = Record(channel_layout) 18 | 19 | self.valid_o = Signal() 20 | self.de = Signal() 21 | self.hsync = Signal() 22 | self.vsync = Signal() 23 | self.r = Signal(8) 24 | self.g = Signal(8) 25 | self.b = Signal(8) 26 | self.c0 = Signal(10) 27 | self.c1 = Signal(10) 28 | self.c2 = Signal(10) 29 | self.de_rising = Signal() 30 | 31 | # # # 32 | 33 | if hdmi: 34 | self.de_int = Signal() # we assume de_int is assigned externally 35 | else: 36 | self.de_int = self.data_in0.de 37 | 38 | self.de_r = Signal() 39 | self.c = self.data_in0.c 40 | self.c_polarity = Signal(2) 41 | self.c_out = Signal(2) 42 | 43 | self.comb += [ 44 | self.de.eq(self.de_r), 45 | self.hsync.eq(self.c_out[0]), 46 | self.vsync.eq(self.c_out[1]), 47 | self.de_rising.eq(self.de_r & ~self.de_int), 48 | ] 49 | 50 | self.sync.pix += [ 51 | self.valid_o.eq(self.valid_i), 52 | self.r.eq(self.data_in2.d), 53 | self.g.eq(self.data_in1.d), 54 | self.b.eq(self.data_in0.d), 55 | 56 | self.de_r.eq(self.de_int), 57 | 58 | If(self.de_rising, 59 | self.c_polarity.eq(self.c), 60 | self.c_out.eq(0) 61 | ).Else( 62 | self.c_out.eq(self.c ^ self.c_polarity) 63 | ) 64 | ] 65 | 66 | # move this to a retimed output domain 67 | self.sync.pix_o += [ 68 | self.c0.eq(self.data_in0.raw), 69 | self.c1.eq(self.data_in1.raw), 70 | self.c2.eq(self.data_in2.raw), 71 | ] 72 | 73 | 74 | class ResolutionDetection(Module, AutoCSR): 75 | def __init__(self, nbits=11): 76 | self.valid_i = Signal() 77 | self.vsync = Signal() 78 | self.de = Signal() 79 | 80 | self._hres = CSRStatus(nbits) 81 | self._vres = CSRStatus(nbits) 82 | 83 | # # # 84 | 85 | # Detect DE transitions 86 | de_r = Signal() 87 | pn_de = Signal() 88 | self.sync.pix += de_r.eq(self.de) 89 | self.comb += pn_de.eq(~self.de & de_r) 90 | 91 | # HRES 92 | hcounter = Signal(nbits) 93 | self.sync.pix += \ 94 | If(self.valid_i & self.de, 95 | hcounter.eq(hcounter + 1) 96 | ).Else( 97 | hcounter.eq(0) 98 | ) 99 | 100 | hcounter_st = Signal(nbits) 101 | self.sync.pix += \ 102 | If(self.valid_i, 103 | If(pn_de, hcounter_st.eq(hcounter)) 104 | ).Else( 105 | hcounter_st.eq(0) 106 | ) 107 | self.specials += MultiReg(hcounter_st, self._hres.status) 108 | 109 | # VRES 110 | vsync_r = Signal() 111 | p_vsync = Signal() 112 | self.sync.pix += vsync_r.eq(self.vsync), 113 | self.comb += p_vsync.eq(self.vsync & ~vsync_r) 114 | 115 | vcounter = Signal(nbits) 116 | self.sync.pix += \ 117 | If(self.valid_i & p_vsync, 118 | vcounter.eq(0) 119 | ).Elif(pn_de, 120 | vcounter.eq(vcounter + 1) 121 | ) 122 | 123 | vcounter_st = Signal(nbits) 124 | self.sync.pix += \ 125 | If(self.valid_i, 126 | If(p_vsync, vcounter_st.eq(vcounter)) 127 | ).Else( 128 | vcounter_st.eq(0) 129 | ) 130 | self.specials += MultiReg(vcounter_st, self._vres.status) 131 | 132 | 133 | class FrameExtraction(Module, AutoCSR): 134 | def __init__(self, word_width, fifo_depth, mode="ycbcr422"): 135 | # in pix clock domain 136 | self.valid_i = Signal() 137 | self.vsync = Signal() 138 | self.de = Signal() 139 | self.r = Signal(8) 140 | self.g = Signal(8) 141 | self.b = Signal(8) 142 | 143 | # in sys clock domain 144 | word_layout = [("sof", 1), ("pixels", word_width)] 145 | self.frame = stream.Endpoint(word_layout) 146 | self.busy = Signal() 147 | 148 | self._overflow = CSR() 149 | 150 | # # # 151 | 152 | # start of frame detection 153 | vsync = self.vsync 154 | vsync_r = Signal() 155 | self.new_frame = new_frame = Signal() 156 | self.comb += new_frame.eq(vsync & ~vsync_r) 157 | self.sync.pix += vsync_r.eq(vsync) 158 | 159 | if mode == "ycbcr422": 160 | de_r = Signal() 161 | self.sync.pix += de_r.eq(self.de) 162 | 163 | rgb2ycbcr = RGB2YCbCr() 164 | self.submodules += ClockDomainsRenamer("pix")(rgb2ycbcr) 165 | chroma_downsampler = YCbCr444to422() 166 | self.submodules += ClockDomainsRenamer("pix")(chroma_downsampler) 167 | self.comb += [ 168 | rgb2ycbcr.sink.valid.eq(self.valid_i), 169 | rgb2ycbcr.sink.r.eq(self.r), 170 | rgb2ycbcr.sink.g.eq(self.g), 171 | rgb2ycbcr.sink.b.eq(self.b), 172 | rgb2ycbcr.source.connect(chroma_downsampler.sink), 173 | chroma_downsampler.source.ready.eq(1), 174 | chroma_downsampler.datapath.first.eq(self.de & ~de_r) # XXX need clean up 175 | ] 176 | # XXX need clean up 177 | de = self.de 178 | for i in range(rgb2ycbcr.latency + chroma_downsampler.latency): 179 | next_de = Signal() 180 | next_vsync = Signal() 181 | self.sync.pix += [ 182 | next_de.eq(de), 183 | next_vsync.eq(vsync) 184 | ] 185 | de = next_de 186 | vsync = next_vsync 187 | 188 | 189 | # pack pixels into words 190 | self.cur_word = cur_word = Signal(word_width) 191 | self.cur_word_valid = cur_word_valid = Signal() 192 | encoded_pixel = Signal(16) 193 | self.comb += encoded_pixel.eq(Cat(chroma_downsampler.source.y, 194 | chroma_downsampler.source.cb_cr)), 195 | pack_factor = word_width//16 196 | assert(pack_factor & (pack_factor - 1) == 0) # only support powers of 2 197 | self.pack_counter = pack_counter = Signal(max=pack_factor) 198 | self.sync.pix += [ 199 | cur_word_valid.eq(0), 200 | If(new_frame, 201 | cur_word_valid.eq(pack_counter == (pack_factor - 1)), 202 | pack_counter.eq(0), 203 | ).Elif(chroma_downsampler.source.valid & de, 204 | [If(pack_counter == (pack_factor-i-1), 205 | cur_word[16*i:16*(i+1)].eq(encoded_pixel)) for i in range(pack_factor)], 206 | cur_word_valid.eq(pack_counter == (pack_factor - 1)), 207 | pack_counter.eq(pack_counter + 1) 208 | ) 209 | ] 210 | else: # rgb case...should probably rewrite to call out unsupported modes instead of defaulting to rgb 211 | de = self.de 212 | 213 | # pack pixels into words 214 | self.cur_word = cur_word = Signal(word_width) 215 | self.cur_word_valid = cur_word_valid = Signal() 216 | encoded_pixel = Signal(32) 217 | dummy8 = Signal(8) 218 | self.comb += encoded_pixel.eq(Cat(self.b,self.g,self.r, dummy8)), 219 | pack_factor = word_width//32 220 | assert(pack_factor & (pack_factor - 1) == 0) # only support powers of 2 221 | self.pack_counter = pack_counter = Signal(max=pack_factor) 222 | self.sync.pix += [ 223 | cur_word_valid.eq(0), 224 | If(new_frame, 225 | cur_word_valid.eq(pack_counter == (pack_factor - 1)), 226 | pack_counter.eq(0), 227 | ).Elif(self.valid_i & de, 228 | [If(pack_counter == (pack_factor-i-1), 229 | cur_word[32*i:32*(i+1)].eq(encoded_pixel)) for i in range(pack_factor)], 230 | cur_word_valid.eq(pack_counter == (pack_factor - 1)), 231 | pack_counter.eq(pack_counter + 1) 232 | ) 233 | ] 234 | 235 | 236 | # FIFO 237 | fifo = stream.AsyncFIFO(word_layout, fifo_depth) 238 | fifo = ClockDomainsRenamer({"write": "pix", "read": "sys"})(fifo) 239 | self.submodules += fifo 240 | self.fifo = fifo 241 | self.comb += [ 242 | fifo.sink.pixels.eq(cur_word), 243 | fifo.sink.valid.eq(cur_word_valid) 244 | ] 245 | self.sync.pix += \ 246 | If(new_frame, 247 | fifo.sink.sof.eq(1) 248 | ).Elif(cur_word_valid, 249 | fifo.sink.sof.eq(0) 250 | ) 251 | 252 | self.comb += [ 253 | fifo.source.connect(self.frame), 254 | self.busy.eq(0) 255 | ] 256 | 257 | # overflow detection 258 | pix_overflow = Signal() 259 | pix_overflow_reset = Signal() 260 | self.sync.pix += [ 261 | If(fifo.sink.valid & ~fifo.sink.ready, 262 | pix_overflow.eq(1) 263 | ).Elif(pix_overflow_reset, 264 | pix_overflow.eq(0) 265 | ) 266 | ] 267 | 268 | sys_overflow = Signal() 269 | self.specials += MultiReg(pix_overflow, sys_overflow) 270 | self.submodules.overflow_reset = PulseSynchronizer("sys", "pix") 271 | self.submodules.overflow_reset_ack = PulseSynchronizer("pix", "sys") 272 | self.comb += [ 273 | pix_overflow_reset.eq(self.overflow_reset.o), 274 | self.overflow_reset_ack.i.eq(pix_overflow_reset) 275 | ] 276 | 277 | overflow_mask = Signal() 278 | self.comb += [ 279 | self._overflow.w.eq(sys_overflow & ~overflow_mask), 280 | self.overflow_reset.i.eq(self._overflow.re) 281 | ] 282 | self.sync += \ 283 | If(self._overflow.re, 284 | overflow_mask.eq(1) 285 | ).Elif(self.overflow_reset_ack.o, 286 | overflow_mask.eq(0) 287 | ) 288 | -------------------------------------------------------------------------------- /litevideo/output/hdmi/s6.py: -------------------------------------------------------------------------------- 1 | from migen import * 2 | from migen.genlib.cdc import MultiReg 3 | 4 | from litex.soc.interconnect import stream 5 | from litex.soc.interconnect.csr import * 6 | 7 | from litevideo.output.common import * 8 | from litevideo.output.hdmi.encoder import Encoder 9 | 10 | 11 | # This assumes a 50MHz base clock 12 | class S6HDMIOutClocking(Module, AutoCSR): 13 | def __init__(self, pads, external_clocking, max_pix_clk=100e6): 14 | if external_clocking is None: 15 | self._cmd_data = CSRStorage(10) 16 | self._send_cmd_data = CSR() 17 | self._send_go = CSR() 18 | self._status = CSRStatus(4) 19 | self._max_pix_clk = CSRConstant(max_pix_clk) 20 | 21 | self.clock_domains.cd_pix = ClockDomain(reset_less=True) 22 | self._pll_reset = CSRStorage() 23 | self._pll_adr = CSRStorage(5) 24 | self._pll_dat_r = CSRStatus(16) 25 | self._pll_dat_w = CSRStorage(16) 26 | self._pll_read = CSR() 27 | self._pll_write = CSR() 28 | self._pll_drdy = CSRStatus() 29 | 30 | self.clock_domains.cd_pix2x = ClockDomain(reset_less=True) 31 | self.clock_domains.cd_pix10x = ClockDomain(reset_less=True) 32 | self.serdesstrobe = Signal() 33 | 34 | # # # 35 | 36 | # Generate 1x pixel clock 37 | clk_pix_unbuffered = Signal() 38 | pix_progdata = Signal() 39 | pix_progen = Signal() 40 | pix_progdone = Signal() 41 | 42 | pix_locked = Signal() 43 | 44 | clkfx_md_max = max(2.0/4.0, max_pix_clk/50e6) 45 | self._clkfx_md_max_1000 = CSRConstant(clkfx_md_max*1000.0) 46 | self.specials += Instance("DCM_CLKGEN", 47 | name="hdmi_out_dcm_clkgen", 48 | 49 | # parameters 50 | p_SPREAD_SPECTRUM="NONE", 51 | p_STARTUP_WAIT="FALSE", 52 | 53 | # reset 54 | i_FREEZEDCM=0, 55 | i_RST=ResetSignal(), 56 | 57 | # input 58 | i_CLKIN=ClockSignal("base50"), 59 | p_CLKIN_PERIOD=20.0, 60 | 61 | # output 62 | p_CLKFXDV_DIVIDE=2, 63 | p_CLKFX_MULTIPLY=2, 64 | p_CLKFX_DIVIDE=4, 65 | p_CLKFX_MD_MAX=clkfx_md_max, 66 | o_CLKFX=clk_pix_unbuffered, 67 | o_LOCKED=pix_locked, 68 | 69 | # programming interface 70 | i_PROGCLK=ClockSignal(), 71 | i_PROGDATA=pix_progdata, 72 | i_PROGEN=pix_progen, 73 | o_PROGDONE=pix_progdone 74 | ) 75 | 76 | remaining_bits = Signal(max=11) 77 | transmitting = Signal() 78 | self.comb += transmitting.eq(remaining_bits != 0) 79 | sr = Signal(10) 80 | self.sync += [ 81 | If(self._send_cmd_data.re, 82 | remaining_bits.eq(10), 83 | sr.eq(self._cmd_data.storage) 84 | ).Elif(transmitting, 85 | remaining_bits.eq(remaining_bits - 1), 86 | sr.eq(sr[1:]) 87 | ) 88 | ] 89 | self.comb += [ 90 | pix_progdata.eq(transmitting & sr[0]), 91 | pix_progen.eq(transmitting | self._send_go.re) 92 | ] 93 | 94 | # enforce gap between commands 95 | busy_counter = Signal(max=14) 96 | busy = Signal() 97 | self.comb += busy.eq(busy_counter != 0) 98 | self.sync += If(self._send_cmd_data.re, 99 | busy_counter.eq(13) 100 | ).Elif(busy, 101 | busy_counter.eq(busy_counter - 1) 102 | ) 103 | 104 | mult_locked = Signal() 105 | self.comb += self._status.status.eq(Cat(busy, pix_progdone, pix_locked, mult_locked)) 106 | 107 | # Clock multiplication and buffering 108 | # Route unbuffered 1x pixel clock to PLL 109 | # Generate 1x, 2x and 10x IO pixel clocks 110 | clkfbout = Signal() 111 | pll_locked = Signal() 112 | pll0_pix10x = Signal() 113 | pll1_pix2x = Signal() 114 | pll2_pix = Signal() 115 | locked_async = Signal() 116 | pll_drdy = Signal() 117 | self.sync += If(self._pll_read.re | self._pll_write.re, 118 | self._pll_drdy.status.eq(0) 119 | ).Elif(pll_drdy, 120 | self._pll_drdy.status.eq(1) 121 | ) 122 | self.specials += [ 123 | Instance("PLL_ADV", 124 | name="hdmi_out_pll_adv", 125 | p_CLKFBOUT_MULT=10, 126 | p_CLKOUT0_DIVIDE=1, # pix10x 127 | p_CLKOUT1_DIVIDE=5, # pix2x 128 | p_CLKOUT2_DIVIDE=10, # pix 129 | p_COMPENSATION="INTERNAL", 130 | 131 | i_CLKINSEL=1, 132 | i_CLKIN1=clk_pix_unbuffered, 133 | o_CLKOUT0=pll0_pix10x, o_CLKOUT1=pll1_pix2x, o_CLKOUT2=pll2_pix, 134 | o_CLKFBOUT=clkfbout, i_CLKFBIN=clkfbout, 135 | o_LOCKED=pll_locked, 136 | i_RST=~pix_locked | self._pll_reset.storage, 137 | 138 | i_DADDR=self._pll_adr.storage, 139 | o_DO=self._pll_dat_r.status, 140 | i_DI=self._pll_dat_w.storage, 141 | i_DEN=self._pll_read.re | self._pll_write.re, 142 | i_DWE=self._pll_write.re, 143 | o_DRDY=pll_drdy, 144 | i_DCLK=ClockSignal()), 145 | Instance("BUFPLL", name="hdmi_out_bufpll", p_DIVIDE=5, 146 | i_PLLIN=pll0_pix10x, i_GCLK=ClockSignal("pix2x"), i_LOCKED=pll_locked, 147 | o_IOCLK=self.cd_pix10x.clk, o_LOCK=locked_async, o_SERDESSTROBE=self.serdesstrobe), 148 | Instance("BUFG", name="hdmi_out_pix2x_bufg", i_I=pll1_pix2x, o_O=self.cd_pix2x.clk), 149 | Instance("BUFG", name="hdmi_out_pix_bufg", i_I=pll2_pix, o_O=self.cd_pix.clk), 150 | MultiReg(locked_async, mult_locked, "sys") 151 | ] 152 | 153 | self.pll0_pix10x = pll0_pix10x 154 | self.pll1_pix2x = pll1_pix2x 155 | self.pll2_pix = pll2_pix 156 | self.pll_locked = pll_locked 157 | 158 | else: 159 | self.clock_domains.cd_pix = ClockDomain(reset_less=True) 160 | self.specials += Instance("BUFG", name="hdmi_out_pix_bufg", i_I=external_clocking.pll2_pix, o_O=self.cd_pix.clk) 161 | self.clock_domains.cd_pix2x = ClockDomain(reset_less=True) 162 | self.clock_domains.cd_pix10x = ClockDomain(reset_less=True) 163 | self.serdesstrobe = Signal() 164 | self.specials += [ 165 | Instance("BUFG", name="hdmi_out_pix2x_bufg", i_I=external_clocking.pll1_pix2x, o_O=self.cd_pix2x.clk), 166 | Instance("BUFPLL", name="hdmi_out_bufpll", p_DIVIDE=5, 167 | i_PLLIN=external_clocking.pll0_pix10x, i_GCLK=self.cd_pix2x.clk, i_LOCKED=external_clocking.pll_locked, 168 | o_IOCLK=self.cd_pix10x.clk, o_SERDESSTROBE=self.serdesstrobe), 169 | ] 170 | 171 | # Drive HDMI clock pads 172 | hdmi_clk_se = Signal() 173 | self.specials += Instance("ODDR2", 174 | p_DDR_ALIGNMENT="NONE", p_INIT=0, p_SRTYPE="SYNC", 175 | o_Q=hdmi_clk_se, 176 | i_C0=ClockSignal("pix"), 177 | i_C1=~ClockSignal("pix"), 178 | i_CE=1, 179 | i_D0=not hasattr(pads.clk_p, "inverted"), 180 | i_D1=hasattr(pads.clk_p, "inverted"), 181 | i_R=0, i_S=0) 182 | if hasattr(pads, "clk_p"): 183 | self.specials += Instance("OBUFDS", i_I=hdmi_clk_se, 184 | o_O=pads.clk_p, o_OB=pads.clk_n) 185 | else: 186 | self.comb += pads.clk.eq(hdmi_clk_se) 187 | 188 | 189 | class _S6HDMIOutEncoderSerializer(Module): 190 | def __init__(self, serdesstrobe, pad_p, pad_n, bypass_encoder=False): 191 | if not bypass_encoder: 192 | self.submodules.encoder = ClockDomainsRenamer("pix")(Encoder()) 193 | self.d, self.c, self.de = self.encoder.d, self.encoder.c, self.encoder.de 194 | self.data = self.encoder.out 195 | else: 196 | self.data = Signal(10) 197 | 198 | # # # 199 | 200 | # 2X soft serialization 201 | ed_2x_pol = Signal(5) 202 | ed_2x = Signal(5) 203 | self.sync.pix2x += ed_2x_pol.eq(Mux(ClockSignal("pix"), self.data[:5], self.data[5:])) 204 | if hasattr(pad_p, "inverted"): 205 | self.comb += ed_2x.eq(~ed_2x_pol) 206 | else: 207 | self.comb += ed_2x.eq(ed_2x_pol) 208 | 209 | # 5X hard serialization 210 | cascade_di = Signal() 211 | cascade_do = Signal() 212 | cascade_ti = Signal() 213 | cascade_to = Signal() 214 | pad_se = Signal() 215 | self.specials += [ 216 | Instance("OSERDES2", 217 | p_DATA_WIDTH=5, p_DATA_RATE_OQ="SDR", p_DATA_RATE_OT="SDR", 218 | p_SERDES_MODE="MASTER", p_OUTPUT_MODE="DIFFERENTIAL", 219 | 220 | o_OQ=pad_se, 221 | i_OCE=1, i_IOCE=serdesstrobe, i_RST=0, 222 | i_CLK0=ClockSignal("pix10x"), i_CLK1=0, i_CLKDIV=ClockSignal("pix2x"), 223 | i_D1=ed_2x[4], i_D2=0, i_D3=0, i_D4=0, 224 | i_T1=0, i_T2=0, i_T3=0, i_T4=0, 225 | i_TRAIN=0, i_TCE=1, 226 | i_SHIFTIN1=1, i_SHIFTIN2=1, 227 | i_SHIFTIN3=cascade_do, i_SHIFTIN4=cascade_to, 228 | o_SHIFTOUT1=cascade_di, o_SHIFTOUT2=cascade_ti), 229 | Instance("OSERDES2", 230 | p_DATA_WIDTH=5, p_DATA_RATE_OQ="SDR", p_DATA_RATE_OT="SDR", 231 | p_SERDES_MODE="SLAVE", p_OUTPUT_MODE="DIFFERENTIAL", 232 | 233 | i_OCE=1, i_IOCE=serdesstrobe, i_RST=0, 234 | i_CLK0=ClockSignal("pix10x"), i_CLK1=0, i_CLKDIV=ClockSignal("pix2x"), 235 | i_D1=ed_2x[0], i_D2=ed_2x[1], i_D3=ed_2x[2], i_D4=ed_2x[3], 236 | i_T1=0, i_T2=0, i_T3=0, i_T4=0, 237 | i_TRAIN=0, i_TCE=1, 238 | i_SHIFTIN1=cascade_di, i_SHIFTIN2=cascade_ti, 239 | i_SHIFTIN3=1, i_SHIFTIN4=1, 240 | o_SHIFTOUT3=cascade_do, o_SHIFTOUT4=cascade_to), 241 | Instance("OBUFDS", i_I=pad_se, o_O=pad_p, o_OB=pad_n) 242 | ] 243 | 244 | 245 | class S6HDMIOutPHY(Module): 246 | def __init__(self, pads, mode): 247 | self.serdesstrobe = Signal() 248 | self.sink = sink = stream.Endpoint(phy_layout(mode)) 249 | 250 | # # # 251 | 252 | self.submodules.es0 = _S6HDMIOutEncoderSerializer(self.serdesstrobe, pads.data0_p, pads.data0_n, mode == "raw") 253 | self.submodules.es1 = _S6HDMIOutEncoderSerializer(self.serdesstrobe, pads.data1_p, pads.data1_n, mode == "raw") 254 | self.submodules.es2 = _S6HDMIOutEncoderSerializer(self.serdesstrobe, pads.data2_p, pads.data2_n, mode == "raw") 255 | 256 | if mode == "raw": 257 | self.comb += [ 258 | sink.ready.eq(1), 259 | self.es0.data.eq(sink.c0), 260 | self.es1.data.eq(sink.c1), 261 | self.es2.data.eq(sink.c2) 262 | ] 263 | else: 264 | self.comb += [ 265 | sink.ready.eq(1), 266 | self.es0.d.eq(sink.b), 267 | self.es1.d.eq(sink.g), 268 | self.es2.d.eq(sink.r), 269 | self.es0.c.eq(Cat(sink.hsync, sink.vsync)), 270 | self.es1.c.eq(0), 271 | self.es2.c.eq(0), 272 | self.es0.de.eq(sink.de), 273 | self.es1.de.eq(sink.de), 274 | self.es2.de.eq(sink.de) 275 | ] 276 | -------------------------------------------------------------------------------- /litevideo/output/core.py: -------------------------------------------------------------------------------- 1 | from migen import * 2 | from migen.genlib.cdc import MultiReg, PulseSynchronizer 3 | 4 | from litex.soc.interconnect import stream 5 | from litex.soc.interconnect.csr import * 6 | 7 | from litedram.frontend.dma import LiteDRAMDMAReader 8 | 9 | from litevideo.output.common import * 10 | from litevideo.output.hdmi.s6 import S6HDMIOutClocking, S6HDMIOutPHY 11 | from litevideo.output.hdmi.s7 import S7HDMIOutClocking, S7HDMIOutPHY 12 | 13 | 14 | class Initiator(Module, AutoCSR): 15 | """Initiator 16 | 17 | Generates the H/V and DMA parameters of a frame. 18 | 19 | CSR -> local clock domain via 4-deep FIFO. The FIFO is only read when the timing generator "pulls" it 20 | which I think allows for "intelligent" queueing of new values coming in (e.g. no mid-frame timing changes) 21 | """ 22 | def __init__(self, cd): # CD is the clock domain of the dram port 23 | self.source = stream.Endpoint(frame_parameter_layout + 24 | frame_dma_layout) # outputs are a cd-synchronized set of parameter from CSRs 25 | 26 | # # # 27 | 28 | cdc = stream.AsyncFIFO(self.source.description, 4) 29 | cdc = ClockDomainsRenamer({"write": "sys", 30 | "read": cd})(cdc) 31 | self.submodules += cdc 32 | 33 | self.enable = CSRStorage() 34 | for name, width in frame_parameter_layout + frame_dma_layout: 35 | setattr(self, name, CSRStorage(width, name=name, atomic_write=True)) # builds the CSR list 36 | self.comb += getattr(cdc.sink, name).eq(getattr(self, name).storage) # assigns them to the sink 37 | self.comb += cdc.sink.valid.eq(self.enable.storage) # I don't quite get this line, seems source.valid should be assigned here?? 38 | self.comb += cdc.source.connect(self.source) # FIFO's output ("source") is now our output 39 | 40 | 41 | class DMAReader(Module, AutoCSR): 42 | """DMA reader 43 | 44 | Generates the data stream of a frame. 45 | """ 46 | def __init__(self, dram_port, fifo_depth=512, genlock_stream=None): 47 | self.sink = sink = stream.Endpoint(frame_dma_layout) # "inputs" are the DMA frame parameters 48 | self.source = source = stream.Endpoint([("data", dram_port.dw)]) # "output" is the data stream 49 | 50 | # # # 51 | 52 | self.submodules.dma = LiteDRAMDMAReader(dram_port, fifo_depth, True) 53 | self.submodules.fsm = fsm = FSM(reset_state="IDLE") 54 | 55 | shift = log2_int(dram_port.dw//8) 56 | base = Signal(dram_port.aw) 57 | length = Signal(dram_port.aw) 58 | offset = Signal(dram_port.aw) 59 | self.delay_base = CSRStorage(32) 60 | self.comb += [ 61 | base.eq(sink.base[shift:]), # ignore the lower bits of the base + length to match the DMA's expectations 62 | length.eq(sink.length[shift:]), # need to noodle on what that expectation is, exactly... 63 | ] 64 | 65 | if genlock_stream != None: 66 | self.v = Signal() 67 | self.v_r = Signal() 68 | self.sof = Signal() 69 | self.sync += [ 70 | self.v.eq(genlock_stream.vsync), 71 | self.v_r.eq(self.v), 72 | self.sof.eq(self.v & ~self.v_r), 73 | ] 74 | 75 | if genlock_stream == None: 76 | fsm.act("IDLE", 77 | NextValue(offset, 0), 78 | If(sink.valid, # if our parameters are valid, start reading 79 | NextState("READ") 80 | ).Else( 81 | dram_port.flush.eq(1), 82 | ) 83 | ) 84 | fsm.act("READ", 85 | self.dma.sink.valid.eq(1), # tell the DMA reader that we've got a valid address for it 86 | If(self.dma.sink.ready, # if the LiteDRAMDMAReader shows it's ready for an address (e.g. taken the current address) 87 | NextValue(offset, offset + 1), # increment the offset 88 | If(offset == (length - 1), # at the end... 89 | self.sink.ready.eq(1), # indicate we're ready for more parameters 90 | NextState("IDLE") 91 | ) 92 | ) 93 | ) 94 | else: 95 | fsm.act("IDLE", 96 | NextValue(offset, self.delay_base.storage), 97 | If(sink.valid, # if our parameters are valid, start reading 98 | NextState("READ") 99 | ).Else( 100 | dram_port.flush.eq(1), 101 | ) 102 | ) 103 | fsm.act("READ", 104 | self.dma.sink.valid.eq(1), # tell the DMA reader that we've got a valid address for it 105 | If(self.dma.sink.ready, # if the LiteDRAMDMAReader shows it's ready for an address (e.g. taken the current address) 106 | NextValue(offset, offset + 1), # increment the offset 107 | If(offset == (length - 1), # at the end... 108 | self.sink.ready.eq(1), # indicate we're ready for more parameters 109 | NextState("WAIT_SOF") 110 | ) 111 | ) 112 | ) 113 | fsm.act("WAIT_SOF", # wait till vsync/start of frame 114 | If(self.sof, 115 | NextState("IDLE") 116 | ) 117 | ) 118 | 119 | self.comb += [ 120 | self.dma.sink.address.eq(base + offset), # input to the DMA is an address of base + offset 121 | self.dma.source.connect(self.source) # connect the DMA's output to the output of this module 122 | ] 123 | 124 | 125 | class TimingGenerator(Module): 126 | """Timing Generator 127 | 128 | Generates the H/V timings of a frame. 129 | """ 130 | def __init__(self, genlock_stream=None): 131 | self.sink = sink = stream.Endpoint(frame_parameter_layout) # "inputs" are the parameter layout (via CSR via initiator) 132 | self.source = source = stream.Endpoint(frame_timing_layout) # "outputs" are a frame timing layout 133 | 134 | # # # 135 | if genlock_stream == None: 136 | hactive = Signal() 137 | vactive = Signal() 138 | active = Signal() 139 | 140 | hcounter = Signal(hbits) 141 | vcounter = Signal(vbits) 142 | 143 | self.comb += [ 144 | If(sink.valid, # if the frame parameters are valid... 145 | active.eq(hactive & vactive), # go ahead and let the logic update for active, valid 146 | source.valid.eq(1), 147 | If(active, 148 | source.de.eq(1), 149 | ) 150 | ), ### but else...what? they don't revert to 0, so they stay "stuck" on when sink is invalid??? 151 | sink.ready.eq(source.ready & source.last) 152 | ] 153 | 154 | self.sync += \ 155 | If(~sink.valid, # if our parameters aren't valid, reset everything 156 | hactive.eq(0), 157 | vactive.eq(0), 158 | hcounter.eq(0), 159 | vcounter.eq(0) 160 | ).Elif(source.ready, # otherwise, if the thing downstream from us is ready... 161 | source.last.eq(0), # self.sync is blocking, so this will get over-ridden later as needed 162 | hcounter.eq(hcounter + 1), 163 | 164 | If(hcounter == 0, hactive.eq(1)), 165 | If(hcounter == sink.hres, hactive.eq(0)), # sink is our "input" of parameters 166 | If(hcounter == sink.hsync_start, source.hsync.eq(1)), 167 | If(hcounter == sink.hsync_end, source.hsync.eq(0)), 168 | If(hcounter == sink.hscan, # if we hit the end of the line 169 | hcounter.eq(0), # reset the counter, overriding the +1 earlier coz this is a "blocking" syntax 170 | If(vcounter == sink.vscan, 171 | vcounter.eq(0), 172 | source.last.eq(1) 173 | ).Else( 174 | vcounter.eq(vcounter + 1) 175 | ) 176 | ), 177 | 178 | If(vcounter == 0, vactive.eq(1)), 179 | If(vcounter == sink.vres, vactive.eq(0)), 180 | If(vcounter == sink.vsync_start, source.vsync.eq(1)), 181 | If(vcounter == sink.vsync_end, source.vsync.eq(0)) 182 | ) 183 | else: 184 | self.comb += genlock_stream.connect(self.source) 185 | 186 | 187 | 188 | modes_dw = { 189 | "raw": 32, 190 | "rgb": 24, 191 | "ycbcr422": 16 192 | } 193 | 194 | 195 | class VideoOutCore(Module, AutoCSR): 196 | """Video out core 197 | 198 | Generates a video stream from memory. 199 | """ 200 | def __init__(self, dram_port, mode="rgb", fifo_depth=512, genlock_stream=None): 201 | try: 202 | dw = modes_dw[mode] 203 | except: 204 | raise ValueError("Unsupported {} video mode".format(mode)) 205 | assert dram_port.dw >= dw 206 | assert dram_port.dw == 2**log2_int(dw, need_pow2=False) 207 | self.source = source = stream.Endpoint(video_out_layout(dw)) # "output" is a video layout that's dw wide 208 | 209 | self.underflow_enable = CSRStorage() 210 | self.underflow_update = CSR() 211 | self.underflow_counter = CSRStatus(32) 212 | 213 | # # # 214 | 215 | cd = dram_port.cd 216 | 217 | self.submodules.initiator = initiator = Initiator(cd) 218 | if genlock_stream == None: 219 | self.submodules.timing = timing = ClockDomainsRenamer(cd)(TimingGenerator()) 220 | else: 221 | self.submodules.timing = timing = ClockDomainsRenamer(cd)(TimingGenerator(genlock_stream)) 222 | self.submodules.dma = dma = ClockDomainsRenamer(cd)(DMAReader(dram_port, fifo_depth, genlock_stream)) 223 | 224 | # ctrl path 225 | self.comb += timing.sink.valid.eq(initiator.source.valid) # if the CSR FIFO data is valid, timing may proceed 226 | 227 | self.comb += [ 228 | # dispatch initiator parameters to timing & dma 229 | dma.sink.valid.eq(initiator.source.valid), # the DMA's parameter input "pushed" from the initiator, so connect the valids 230 | initiator.source.ready.eq(timing.sink.ready), # timing's parameters come from initiator, but this is "pulled" by timing so connect readys 231 | 232 | # combine timing and dma 233 | source.valid.eq(timing.source.valid & (~timing.source.de | dma.source.valid)), # our output is valid only when timing's outputs are valid and (when the dma's output is valid or de is low) 234 | # the "or de is low" thing seems like a hack to fix some edge case?? 235 | # flush dma/timing when disabled 236 | If(~initiator.source.valid, # if the initiator's (e.g. CSR) outputs aren't valid 237 | timing.source.ready.eq(1), # force the outputs to 1 to keep the DMA running 238 | dma.source.ready.eq(1) 239 | ).Elif(source.valid & source.ready, # else if our DMA output stream has valid data, and is ready to accept addresses 240 | timing.source.ready.eq(1), # output stream of timing is ready to go, which kicks off the timing generator... 241 | dma.source.ready.eq(timing.source.de | (mode == "raw")) # and the DMA's DMAReader source ready is tied to the timing's DE signal 242 | ) 243 | ] 244 | 245 | # data path 246 | self.comb += [ 247 | # dispatch initiator parameters to timing & dma 248 | initiator.source.connect(timing.sink, keep=list_signals(frame_parameter_layout)), # initiator is a compound source, so use "keep" to demux. initiator sources config data to the timer 249 | initiator.source.connect(dma.sink, keep=list_signals(frame_dma_layout)), # the initiator sources parameters to the DMA, which are DMA layout config data 250 | 251 | # combine timing and dma 252 | source.de.eq(timing.source.de), # manually assign this block's video de, hsync, vsync outputs,, to the respective timing or DMA outputs 253 | source.hsync.eq(timing.source.hsync), 254 | source.vsync.eq(timing.source.vsync), 255 | source.data.eq(dma.source.data) 256 | ] 257 | 258 | # underflow detection 259 | underflow_enable = Signal() 260 | underflow_update = Signal() 261 | underflow_counter = Signal(32) 262 | self.specials += MultiReg(self.underflow_enable.storage, underflow_enable) 263 | underflow_update_synchronizer = PulseSynchronizer("sys", cd) 264 | self.submodules += underflow_update_synchronizer 265 | self.comb += [ 266 | underflow_update_synchronizer.i.eq(self.underflow_update.re), 267 | underflow_update.eq(underflow_update_synchronizer.o) 268 | ] 269 | sync = getattr(self.sync, cd) 270 | sync += [ 271 | If(underflow_enable, 272 | If(~source.valid, # count whenever the source isn't valid... 273 | underflow_counter.eq(underflow_counter + 1) 274 | ) 275 | ).Else( 276 | underflow_counter.eq(0) 277 | ), 278 | If(underflow_update, 279 | self.underflow_counter.status.eq(underflow_counter) 280 | ) 281 | ] 282 | -------------------------------------------------------------------------------- /litevideo/input/decoding.py: -------------------------------------------------------------------------------- 1 | from migen import * 2 | 3 | from migen.genlib.cdc import MultiReg 4 | 5 | from litevideo.input.common import control_tokens, channel_layout 6 | from litex.soc.interconnect import stream 7 | from litex.soc.interconnect.csr import * 8 | 9 | data_gb_tokens = [0b0100110011] 10 | 11 | video_gb_tokens = [ 12 | 0b1011001100, # channel 0 token 13 | 0b0100110011, # channel 1 token 14 | 0b1011001100, # channel 2 token 15 | ] 16 | 17 | terc4_tokens = [ 18 | 0b1010011100, 19 | 0b1001100011, 20 | 0b1011100100, 21 | 0b1011100010, 22 | 0b0101110001, 23 | 0b0100011110, 24 | 0b0110001110, 25 | 0b0100111100, 26 | 0b1011001100, 27 | 0b0100111001, 28 | 0b0110011100, 29 | 0b1011000110, 30 | 0b1010001110, 31 | 0b1001110001, 32 | 0b0101100011, 33 | 0b1011000011, 34 | ] 35 | 36 | class Decoding(Module): 37 | def __init__(self): 38 | self.valid_i = Signal() 39 | self.input = Signal(10) 40 | self.valid_o = Signal() 41 | self.output = Record(channel_layout) 42 | 43 | # # # 44 | 45 | self.sync.pix += self.output.de.eq(1) 46 | for i, t in enumerate(control_tokens): 47 | self.sync.pix += If(self.input == t, 48 | self.output.de.eq(0), 49 | self.output.c.eq(i) 50 | ) 51 | self.sync.pix += self.output.raw.eq(self.input) 52 | self.sync.pix += self.output.d[0].eq(self.input[0] ^ self.input[9]) 53 | for i in range(1, 8): 54 | self.sync.pix += self.output.d[i].eq(self.input[i] ^ 55 | self.input[i-1] ^ 56 | ~self.input[8]) 57 | self.sync.pix += self.valid_o.eq(self.valid_i) 58 | 59 | terc4_layout = [("c", 2), ("de", 1), ("dgb", 1), ("vgb", 1), ("c_valid", 1), ("d", 4)] 60 | 61 | class DecodeTERC4Channel(Module): 62 | def __init__(self, channel): 63 | self.decval = stream.Endpoint(terc4_layout) # decoded values output 64 | self.data_in = Record(channel_layout) # data input from chansync 65 | self.valid_in = Signal() # valid input from chansync &| 66 | 67 | # decode the data path 68 | ### NOTE NOTE NOTE THIS IS UNTESTED 69 | for i, t in enumerate(terc4_tokens): 70 | self.sync.pix += If(self.data_in.raw == t, 71 | self.decval.d.eq(i) 72 | ) 73 | 74 | # decode the control signals 75 | if channel != 1: 76 | self.sync.pix += [ 77 | If(self.valid_in, 78 | If(self.data_in.raw == control_tokens[0], 79 | self.decval.c.eq(0), 80 | self.decval.de.eq(0), 81 | self.decval.dgb.eq(0), 82 | self.decval.vgb.eq(0), 83 | self.decval.c_valid.eq(1) 84 | ).Elif(self.data_in.raw == control_tokens[1], 85 | self.decval.c.eq(1), 86 | self.decval.de.eq(0), 87 | self.decval.dgb.eq(0), 88 | self.decval.vgb.eq(0), 89 | self.decval.c_valid.eq(1) 90 | ).Elif(self.data_in.raw == control_tokens[2], 91 | self.decval.c.eq(2), 92 | self.decval.de.eq(0), 93 | self.decval.dgb.eq(0), 94 | self.decval.vgb.eq(0), 95 | self.decval.c_valid.eq(1) 96 | ).Elif(self.data_in.raw == control_tokens[3], 97 | self.decval.c.eq(3), 98 | self.decval.de.eq(0), 99 | self.decval.dgb.eq(0), 100 | self.decval.vgb.eq(0), 101 | self.decval.c_valid.eq(1) 102 | ).Elif(self.data_in.raw == data_gb_tokens[0], 103 | self.decval.c.eq(0), 104 | self.decval.de.eq(0), 105 | self.decval.dgb.eq(1), 106 | self.decval.vgb.eq(0), 107 | self.decval.c_valid.eq(0) 108 | ).Elif(self.data_in.raw == video_gb_tokens[channel], 109 | self.decval.c.eq(0), 110 | self.decval.de.eq(0), 111 | self.decval.dgb.eq(0), 112 | self.decval.vgb.eq(1), 113 | self.decval.c_valid.eq(0) 114 | ).Else( 115 | self.decval.de.eq(1), 116 | self.decval.dgb.eq(0), 117 | self.decval.vgb.eq(0), 118 | self.decval.c_valid.eq(0) 119 | ) 120 | ).Else( 121 | self.decval.c.eq(0), 122 | self.decval.de.eq(0), 123 | self.decval.dgb.eq(0), 124 | self.decval.vgb.eq(0), 125 | self.decval.c_valid.eq(0) 126 | ) 127 | ] 128 | else: # green channel is special 129 | self.sync.pix += [ 130 | If(self.valid_in, 131 | If(self.data_in.raw == control_tokens[0], 132 | self.decval.c.eq(0), 133 | self.decval.de.eq(0), 134 | self.decval.dgb.eq(0), 135 | self.decval.vgb.eq(0), 136 | self.decval.c_valid.eq(1) 137 | ).Elif(self.data_in.raw == control_tokens[1], 138 | self.decval.c.eq(1), 139 | self.decval.de.eq(0), 140 | self.decval.dgb.eq(0), 141 | self.decval.vgb.eq(0), 142 | self.decval.c_valid.eq(1) 143 | ).Elif(self.data_in.raw == control_tokens[2], 144 | self.decval.c.eq(2), 145 | self.decval.de.eq(0), 146 | self.decval.dgb.eq(0), 147 | self.decval.vgb.eq(0), 148 | self.decval.c_valid.eq(1) 149 | ).Elif(self.data_in.raw == control_tokens[3], 150 | self.decval.c.eq(3), 151 | self.decval.de.eq(0), 152 | self.decval.dgb.eq(0), 153 | self.decval.vgb.eq(0), 154 | self.decval.c_valid.eq(1) 155 | ).Elif(self.data_in.raw == data_gb_tokens[0], 156 | self.decval.c.eq(0), 157 | self.decval.de.eq(0), 158 | self.decval.dgb.eq(1), # green channel gb tokens are ambiguous 159 | self.decval.vgb.eq(1), 160 | self.decval.c_valid.eq(0) 161 | ).Elif(self.data_in.raw == video_gb_tokens[channel], 162 | self.decval.c.eq(0), 163 | self.decval.de.eq(0), 164 | self.decval.dgb.eq(1), #green channel gb tokens are ambiguous 165 | self.decval.vgb.eq(1), 166 | self.decval.c_valid.eq(0) 167 | ).Else( 168 | self.decval.de.eq(1), 169 | self.decval.dgb.eq(0), 170 | self.decval.vgb.eq(0), 171 | self.decval.c_valid.eq(0) 172 | ) 173 | ).Else( 174 | self.decval.c.eq(0), 175 | self.decval.de.eq(0), 176 | self.decval.dgb.eq(0), 177 | self.decval.vgb.eq(0), 178 | self.decval.c_valid.eq(0) 179 | ) 180 | ] 181 | 182 | 183 | class DecodeTERC4(Module, AutoCSR): 184 | def __init__(self): 185 | self.valid_i = Signal() 186 | self.data_in0 = Record(channel_layout) 187 | self.data_in1 = Record(channel_layout) 188 | self.data_in2 = Record(channel_layout) 189 | 190 | self.de_o = Signal() 191 | self.de_hdmi = Signal() 192 | self.encoding_terc4 = Signal() # 1 if encoding terc4, 0 if encoding hdmi 193 | self.encrypting_video = Signal() 194 | self.encrypting_data = Signal() 195 | 196 | self.dvimode = CSRStorage() # a bit to select DVI mode "de" detection 197 | dvimode_bit = Signal() 198 | self.specials += MultiReg(self.dvimode.storage, dvimode_bit) 199 | self.de_r = Signal() 200 | self.sync.pix += [ 201 | self.de_r.eq(self.data_in0.de) # delay one clock to match the HDMI pipe latency 202 | ] 203 | self.comb += [ 204 | If(dvimode_bit, 205 | self.de_o.eq(self.de_r) 206 | ).Else( 207 | self.de_o.eq(self.de_hdmi) 208 | ) 209 | ] 210 | 211 | # derive video, data guardbands and control codes 212 | for datan in range(3): 213 | name = "data" + str(datan) 214 | dect4 = DecodeTERC4Channel(datan) 215 | setattr(self.submodules, name + "_dect4", dect4) 216 | self.comb += [ 217 | dect4.valid_in.eq(self.valid_i), 218 | dect4.data_in.eq(getattr(self, "data_in" + str(datan))) # N=0..2, dataN_dect4.eq(self.data_inN) 219 | ] 220 | 221 | self.submodules.fsm = fsm = FSM(reset_state="INIT") 222 | 223 | self.ctl_code = Signal(4) 224 | self.comb += self.ctl_code.eq(Cat(self.data1_dect4.decval.c,self.data2_dect4.decval.c)) # first argument occupies lower bits of Cat 225 | # ctl_code is {ch2.c1,ch2.c0,ch1.c1,ch1.c0}, and first argument to occupies the LSBs 226 | 227 | all_vgb = Signal() 228 | any_cvalid = Signal() 229 | c2c1_dgb = Signal() 230 | self.comb += [ 231 | all_vgb.eq(self.data0_dect4.decval.vgb & self.data1_dect4.decval.vgb & self.data2_dect4.decval.vgb), 232 | any_cvalid.eq(self.data0_dect4.decval.c_valid | self.data1_dect4.decval.c_valid | self.data2_dect4.decval.c_valid), 233 | c2c1_dgb.eq(self.data2_dect4.decval.dgb & self.data1_dect4.decval.dgb) # because c0 can't have a dgb 234 | ] 235 | fsm.act("INIT", 236 | If(all_vgb, 237 | NextState("GOING_VID") 238 | ).Elif(self.ctl_code == 0b0101, 239 | NextState("PREAM_T4") 240 | ).Elif(self.ctl_code == 0b0001, 241 | NextState("PREAM_VID") 242 | ).Else( 243 | NextState("INIT") 244 | ), 245 | self.encoding_terc4.eq(0), 246 | self.encrypting_data.eq(0), 247 | self.encrypting_video.eq(0), 248 | self.de_hdmi.eq(0) 249 | ) 250 | fsm.act("PREAM_T4", 251 | If(all_vgb, 252 | NextState("GOING_VID") 253 | ).Elif(c2c1_dgb, 254 | NextState("GOING_T4") 255 | ).Elif(self.ctl_code == 0b0101, 256 | NextState("PREAM_T4") 257 | ).Else( 258 | NextState("INIT") 259 | ), 260 | self.encoding_terc4.eq(0), 261 | self.encrypting_data.eq(0), 262 | self.encrypting_video.eq(0), 263 | self.de_hdmi.eq(0) 264 | ) 265 | fsm.act("GOING_T4", 266 | If(c2c1_dgb, 267 | NextState("GOING_T4") 268 | ).Else( 269 | NextState("TERC4") 270 | ), 271 | self.encoding_terc4.eq(1), 272 | self.encrypting_data.eq(0), 273 | self.encrypting_video.eq(0), 274 | self.de_hdmi.eq(0) 275 | ) 276 | fsm.act("TERC4", 277 | If(any_cvalid, 278 | NextState("INIT") 279 | ).Elif(all_vgb, 280 | NextState("GOING_VID") 281 | ).Elif(c2c1_dgb, 282 | NextState("LEAVE_T4") 283 | ).Else( 284 | NextState("TERC4") 285 | ), 286 | self.encoding_terc4.eq(1), 287 | self.encrypting_data.eq(1), 288 | self.encrypting_video.eq(0), 289 | self.de_hdmi.eq(0) 290 | ) 291 | fsm.act("LEAVE_T4", 292 | If(c2c1_dgb, 293 | NextState("LEAVE_T4") 294 | ).Else( 295 | NextState("INIT") 296 | ), 297 | self.encoding_terc4.eq(1), 298 | self.encrypting_data.eq(0), 299 | self.encrypting_video.eq(0), 300 | self.de_hdmi.eq(0) 301 | ) 302 | fsm.act("PREAM_VID", 303 | If(self.ctl_code == 0b0001, 304 | NextState("PREAM_VID") 305 | ).Elif(all_vgb, 306 | NextState("GOING_VID") 307 | ).Else( 308 | NextState("INIT") 309 | ), 310 | self.encoding_terc4.eq(0), 311 | self.encrypting_data.eq(0), 312 | self.encrypting_video.eq(0), 313 | self.de_hdmi.eq(0) 314 | ) 315 | fsm.act("GOING_VID", 316 | If(all_vgb, 317 | NextState("GOING_VID") 318 | ).Else( 319 | NextState("VIDEO") 320 | ), 321 | self.encoding_terc4.eq(0), 322 | self.encrypting_data.eq(0), 323 | self.encrypting_video.eq(0), 324 | self.de_hdmi.eq(0) 325 | ) 326 | fsm.act("VIDEO", 327 | If(any_cvalid, 328 | NextState("INIT") 329 | ).Else( 330 | NextState("VIDEO") 331 | ), 332 | self.encoding_terc4.eq(0), 333 | self.encrypting_data.eq(0), 334 | self.encrypting_video.eq(1), 335 | self.de_hdmi.eq(1) 336 | ) 337 | --------------------------------------------------------------------------------