├── testData ├── test1.bin ├── timer_packets.bin └── strip_packets_from_wireshark.sh ├── internal ├── ljh │ ├── demo_chan11.ljh │ └── ljh_test.go ├── lancero │ ├── test_data │ │ └── cringeGlobals.json │ ├── no_hardware_test.go │ ├── cmd │ │ ├── oddashtx │ │ │ └── oddashtx.go │ │ └── acquire │ │ │ └── acquire.go │ ├── lancero_collector.go │ ├── no_hardware.go │ └── lancero_test.go ├── asyncbufio │ ├── asyncbufio_test.go │ └── asyncbufio.go ├── getbytes │ ├── getbytes_test.go │ └── getbytes.go └── off │ ├── off_test.go │ └── off.go ├── cmd ├── udp_example │ ├── publisher │ │ └── publisher.go │ └── subscriber │ │ └── subscriber.go ├── vipertest │ └── vtest.go ├── udpdump │ └── udpdump.go ├── bahama │ └── bahama_test.go ├── writetester │ └── writetester.go ├── acquire │ └── acquire.go.txt └── dastard │ └── dastard.go ├── update-path.sh ├── .gitignore ├── binary_format_test.go ├── map_test.go ├── publish_data_slices.go ├── .github └── workflows │ └── go.yml ├── LICENSE ├── go.mod ├── doc ├── Inverted_Channels.md ├── BINARY_FORMATS.md ├── PORTS.md └── LJH.md ├── global_config.go ├── Makefile ├── writing_state_test.go ├── Dockerfile ├── mix.go ├── DOCKER.md ├── map.go ├── phase_unwrap.go ├── phase_unwrap_test.go ├── writing_state.go ├── group_trigger_test.go ├── roach_test.go ├── data_source_test.go ├── segment_test.go ├── go.sum ├── client_updater.go ├── maps ├── dastard_raven_map.txt └── ar14_30rows_map.cfg ├── edge_multi_test.go └── publish_data_test.go /testData/test1.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/usnistgov/dastard/HEAD/testData/test1.bin -------------------------------------------------------------------------------- /testData/timer_packets.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/usnistgov/dastard/HEAD/testData/timer_packets.bin -------------------------------------------------------------------------------- /internal/ljh/demo_chan11.ljh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/usnistgov/dastard/HEAD/internal/ljh/demo_chan11.ljh -------------------------------------------------------------------------------- /internal/lancero/test_data/cringeGlobals.json: -------------------------------------------------------------------------------- 1 | { 2 | "SETT": 18, 3 | "seqln": 28, 4 | "lsync": 30, 5 | "testpattern": 2, 6 | "propagationdelay": 9, 7 | "NSAMP": 10, 8 | "carddelay": 7, 9 | "XPT": 3 10 | } -------------------------------------------------------------------------------- /cmd/udp_example/publisher/publisher.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "net" 6 | "time" 7 | ) 8 | 9 | // From https://jameshfisher.com/2016/11/17/udp-in-go.html 10 | func main() { 11 | Conn, _ := net.DialUDP("udp", nil, &net.UDPAddr{IP: []byte{127, 0, 0, 1}, Port: 12321, Zone: ""}) 12 | defer Conn.Close() 13 | for i := 1; i <= 60; i++ { 14 | Conn.Write(fmt.Appendf(nil, "hello %d", i)) 15 | time.Sleep(time.Second) 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /cmd/udp_example/subscriber/subscriber.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "net" 4 | import "fmt" 5 | 6 | // From https://jameshfisher.com/2016/11/17/udp-in-go.html 7 | func main() { 8 | ServerConn, _ := net.ListenUDP("udp", &net.UDPAddr{IP: []byte{0, 0, 0, 0}, Port: 12321, Zone: ""}) 9 | defer ServerConn.Close() 10 | buf := make([]byte, 1024) 11 | for { 12 | n, addr, _ := ServerConn.ReadFromUDP(buf) 13 | fmt.Println("Received ", string(buf[0:n]), " from ", addr) 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /update-path.sh: -------------------------------------------------------------------------------- 1 | # Check that you have your go path in the bash PATH. 2 | # Use this by sourcing the current file: 3 | # source update-path.sh 4 | 5 | dir=`go env GOPATH`/bin 6 | BASHRC=$HOME/.bashrc 7 | 8 | if [ `echo :$PATH: | grep -F :$dir:` ]; then 9 | echo "$dir is already in the UNIX path" 10 | else 11 | echo "$dir is not in the UNIX path. Updating \$PATH and $BASHRC" 12 | echo >> $BASHRC 13 | echo "# Updated by Dastard package update-path.sh" >> $BASHRC 14 | echo "export PATH=\$PATH:$dir" >> $BASHRC 15 | export PATH=$PATH:$dir 16 | echo $PATH 17 | fi 18 | 19 | unset dir 20 | unset BASHRC 21 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.dll 4 | *.so 5 | *.dylib 6 | *.o 7 | lib*.a 8 | 9 | # Test binary, build with `go test -c` 10 | *.test 11 | 12 | # Output of the go coverage tool, specifically when used with LiteIDE 13 | *.out 14 | 15 | # Delve debugger output 16 | __debug_bin 17 | 18 | # Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 19 | .glide/ 20 | 21 | cmd/dastard/dastard 22 | lancero/cmd/acquire/acquire 23 | lancero/cmd/oddashtx/oddashtx 24 | 25 | # Some test files 26 | dastardtestlogfile 27 | writertest.ljh* 28 | TestPublishData.ljh* 29 | tstfile 30 | *.off 31 | 32 | dastard 33 | !dastard/ 34 | .idea 35 | .vscode 36 | -------------------------------------------------------------------------------- /internal/lancero/no_hardware_test.go: -------------------------------------------------------------------------------- 1 | package lancero 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestNoHardware(t *testing.T) { 8 | var ncolsSet, nrowsSet, linePeriodSet int 9 | ncolsSet = 8 10 | nrowsSet = 8 11 | linePeriodSet = 20 12 | lan, err := NewNoHardware(ncolsSet, nrowsSet, linePeriodSet) 13 | if err != nil { 14 | t.Error(err) 15 | } 16 | ncols, nrows, linePeriod, err := testLanceroerSubroutine(lan, t) 17 | if err != nil { 18 | t.Error(err) 19 | } 20 | if ncols != ncolsSet { 21 | t.Errorf("want %v, have %v", ncolsSet, ncols) 22 | } 23 | if nrows != nrowsSet { 24 | t.Errorf("want %v, have %v", nrowsSet, nrows) 25 | } 26 | if linePeriod != linePeriodSet { 27 | t.Errorf("want %v, have %v", linePeriodSet, linePeriod) 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /testData/strip_packets_from_wireshark.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | 3 | # Take a wireshark packet sniffer data file ($INF) and generate an output 4 | # file $OUTF containing only the raw UDP packet contents for the first 4 packets. 5 | # I happen to know that the first 3 packets are external triggers, packets of length 6 | # 0x260 with some wireshark junk of length 0x4c between them, and the first starting at 7 | # 0x14a. Then the 4th packet is a longer packet (length 0x1060) of µMUX data. 8 | # Use this knowledge to surgically remove these 4 packets and store in $OUTF. 9 | 10 | # This script probably isn't good for re-use, but it can document how the test 11 | # data file timer_packets.bin was generated. 12 | 13 | INF=~/Downloads/timestamp_enabled_with_data.pcapng 14 | OUTF=timer_packets.bin 15 | rm -f $OUTF 16 | dd bs=1 skip=0x14a count=0x260 if=$INF of=$OUTF 17 | dd bs=1 skip=0x3f6 count=0x260 seek=0x260 if=$INF of=$OUTF 18 | dd bs=1 skip=0x6a2 count=0x260 seek=0x4c0 if=$INF of=$OUTF 19 | dd bs=1 skip=0x94e count=0x1060 seek=0x720 if=$INF of=$OUTF 20 | -------------------------------------------------------------------------------- /binary_format_test.go: -------------------------------------------------------------------------------- 1 | package dastard 2 | 3 | import ( 4 | "bytes" 5 | "encoding/binary" 6 | "io" 7 | "testing" 8 | "time" 9 | ) 10 | 11 | // TestPublishRecord checks packet(DataRecord) makes a reasonable header and message. 12 | func TestPublishRecord(t *testing.T) { 13 | data := []RawType{1, 2, 3, 4, 5, 4, 3, 2, 1} 14 | rec := &DataRecord{data: data, trigTime: time.Now()} 15 | 16 | fullMessage := messageRecords(rec) 17 | header := fullMessage[0] 18 | message := fullMessage[1] 19 | buf := bytes.NewReader(header) 20 | if buf.Len() != len(header) { 21 | t.Errorf("bytes.Reader has length %d, want %d", buf.Len(), len(header)) 22 | } 23 | var b uint8 24 | for range header { 25 | if err := binary.Read(buf, binary.LittleEndian, &b); err != nil { 26 | t.Errorf("binary.Read failed: %v", err) 27 | } 28 | } 29 | if err := binary.Read(buf, binary.LittleEndian, &b); err != io.EOF { 30 | t.Errorf("binary.Read should have failed, but did not") 31 | } 32 | 33 | if len(message)/2 != len(data) { 34 | t.Errorf("packet generated message of %d samples, want %d", len(message)/2, len(data)) 35 | } 36 | 37 | } 38 | -------------------------------------------------------------------------------- /map_test.go: -------------------------------------------------------------------------------- 1 | package dastard 2 | 3 | import ( 4 | "path/filepath" 5 | "testing" 6 | ) 7 | 8 | func TestMap(t *testing.T) { 9 | fname := filepath.Join("maps", "ar14_30rows_map.cfg") 10 | m, err := readMap(fname) 11 | if err != nil { 12 | t.Fatalf("Could not read map %q: %v", fname, err) 13 | } 14 | if m.Spacing != 520 { 15 | t.Errorf("map.Spacing=%d, want 520", m.Spacing) 16 | } 17 | if len(m.Pixels) != 240 { 18 | t.Errorf("map has %d pixels, want 240", len(m.Pixels)) 19 | } else { 20 | x := []int{290, 290, 290, 290} 21 | y := []int{-3470, -4510, -1910, -870} 22 | for i, p := range m.Pixels { 23 | if p.X != x[i] { 24 | t.Errorf("map.Pixels[%d]=%d, want %d", i, p.X, x[i]) 25 | } 26 | if p.Y != y[i] { 27 | t.Errorf("map.Pixels[%d]=%d, want %d", i, p.Y, y[i]) 28 | } 29 | if i == 3 { 30 | break 31 | } 32 | } 33 | } 34 | if _, err1 := readMap("doesnotexist.map.1234"); err1 == nil { 35 | t.Error("readMap() on nonexistent file should error") 36 | } 37 | if _, err1 := readMap("map_test.go"); err1 == nil { 38 | t.Error("readMap() on non-map file should error") 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /cmd/vipertest/vtest.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path/filepath" 7 | 8 | "github.com/spf13/viper" 9 | ) 10 | 11 | type newdata struct { 12 | Figgie string 13 | NChildren int 14 | } 15 | 16 | func main() { 17 | viper.SetDefault("Verbose", false) 18 | 19 | viper.SetConfigName("testconfig") 20 | viper.AddConfigPath(filepath.FromSlash("/etc/dastard")) 21 | HOME, err := os.UserHomeDir() 22 | if err != nil { // Handle errors reading the config file 23 | fmt.Printf("Error finding User Home Dir: %s\n", err) 24 | } 25 | viper.AddConfigPath(filepath.Join(HOME, ".dastard")) 26 | viper.AddConfigPath(".") 27 | err = viper.ReadInConfig() // Find and read the config file 28 | if err != nil { // Handle errors reading the config file 29 | fmt.Printf("Error reading config file: %s \n", err) 30 | } 31 | 32 | fmt.Printf("Verbose: %t\n", viper.Get("Verbose")) 33 | fmt.Printf("Figgie: %v\n", viper.Get("Figgie")) 34 | fmt.Printf("sfffd: %v\n", viper.Get("sdffd")) 35 | fmt.Printf("cur time: %v\n", viper.Get("currenttime")) 36 | 37 | d := newdata{"black cat", 3} 38 | viper.Set("newdata", d) 39 | viper.WriteConfig() 40 | } 41 | -------------------------------------------------------------------------------- /publish_data_slices.go: -------------------------------------------------------------------------------- 1 | // These functions use unsafe.Slice, which is available only from Go version 1.17+. 2 | // Dastard version 0.2.16 showed how to use conditional compilation to handle that. 3 | 4 | package dastard 5 | 6 | import ( 7 | "unsafe" 8 | ) 9 | 10 | // rawTypeToBytes convert a []RawType to []byte using unsafe.Slice 11 | func rawTypeToBytes(slice_in []RawType) []byte { 12 | if len(slice_in) == 0 { 13 | return []byte{} 14 | } 15 | outlength := uintptr(len(slice_in)) * unsafe.Sizeof(slice_in[0]) / unsafe.Sizeof(byte(0)) 16 | return unsafe.Slice((*byte)(unsafe.Pointer(&slice_in[0])), outlength) 17 | } 18 | 19 | // rawTypeToUint16convert a []RawType to []uint16 using unsafe 20 | func rawTypeToUint16(slice_in []RawType) []uint16 { 21 | if len(slice_in) == 0 { 22 | return []uint16{} 23 | } 24 | outlength := uintptr(len(slice_in)) * unsafe.Sizeof(slice_in[0]) / unsafe.Sizeof(uint16(0)) 25 | return unsafe.Slice((*uint16)(unsafe.Pointer(&slice_in[0])), outlength) 26 | } 27 | 28 | func bytesToRawType(slice_in []byte) []RawType { 29 | if len(slice_in) == 0 { 30 | return []RawType{} 31 | } 32 | outlength := uintptr(len(slice_in)) * unsafe.Sizeof(slice_in[0]) / unsafe.Sizeof(RawType(0)) 33 | return unsafe.Slice((*RawType)(unsafe.Pointer(&slice_in[0])), outlength) 34 | } 35 | -------------------------------------------------------------------------------- /internal/lancero/cmd/oddashtx/oddashtx.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "log" 5 | "os" 6 | "os/signal" 7 | 8 | "github.com/usnistgov/dastard/internal/lancero" 9 | ) 10 | 11 | func main() { 12 | // Start the adapter 13 | lan, err := lancero.NewLancero(0) 14 | defer lan.Close() 15 | 16 | if err != nil { 17 | log.Println("ERROR: ", err) 18 | return 19 | } 20 | 21 | const timeoutSec = 2 22 | const verbosity = 0 23 | err = lan.StartAdapter(timeoutSec, verbosity) 24 | defer lan.StopAdapter() 25 | if err != nil { 26 | log.Println("Could not start adapter: ", err) 27 | return 28 | } 29 | lan.InspectAdapter() 30 | 31 | // Configure and start the collector 32 | err = lan.CollectorConfigure(1, 1, 0xFFFF, 1) 33 | if err != nil { 34 | return 35 | } 36 | err = lan.StartCollector(false) 37 | if err != nil { 38 | return 39 | } 40 | defer lan.StopCollector() 41 | 42 | var buffer []byte 43 | 44 | // Trap interrupts so we can cleanly exit the program 45 | interruptCatcher := make(chan os.Signal, 1) 46 | signal.Notify(interruptCatcher, os.Interrupt) 47 | 48 | var bytesRead int 49 | for bytesRead < 1000000 { 50 | select { 51 | case <-interruptCatcher: 52 | log.Println("caught interrupt") 53 | return 54 | default: 55 | _, _, err = lan.Wait() 56 | if err != nil { 57 | return 58 | } 59 | buffer, _, err = lan.AvailableBuffer() 60 | bytesRead += len(buffer) 61 | if err != nil { 62 | return 63 | } 64 | log.Println(lancero.OdDashTX(buffer, 10)) 65 | } 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /.github/workflows/go.yml: -------------------------------------------------------------------------------- 1 | # This workflow will build a golang project 2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go 3 | 4 | name: Go Build+Test 5 | 6 | on: 7 | push: 8 | pull_request: 9 | branches: [ "master" ] 10 | release: 11 | types: [created] 12 | 13 | jobs: 14 | 15 | build: 16 | runs-on: ubuntu-latest 17 | strategy: 18 | matrix: 19 | go-version: [ '1.22', 'stable' ] 20 | 21 | steps: 22 | - uses: actions/checkout@v4 23 | 24 | - name: Install deps (can we eliminate libsodium?) 25 | run: | 26 | sudo apt-get --yes update 27 | sudo apt-get --yes install libsodium-dev libzmq3-dev 28 | 29 | - name: Setup Go ${{ matrix.go-version }} 30 | uses: actions/setup-go@v5 31 | with: 32 | go-version: ${{ matrix.go-version }} 33 | cache: true 34 | 35 | # You can test your matrix by printing the current Go version 36 | # (see https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go) 37 | - name: Display Go version 38 | run: go version 39 | 40 | - name: Build with "make build" (to automate linking in correct build date and git hash) 41 | run: make build && ls -l 42 | 43 | - name: Test 44 | run: go test -timeout 30s -v ./... 45 | 46 | - name: Publish GitHub Release Artifacts 47 | if: github.event_name == 'release' && github.event.action == 'created' 48 | uses: SierraSoftworks/gh-releases@v1.0.7 49 | with: 50 | token: ${{ secrets.GITHUB_TOKEN }} 51 | overwrite: 'true' 52 | files: dastard 53 | 54 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | This software was developed by employees of the National Institute of Standards and Technology (NIST), 2 | an agency of the Federal Government and is being made available as a public service. Pursuant to title 3 | 17 United States Code Section 105, works of NIST employees are not subject to copyright protection in 4 | the United States. This software may be subject to foreign copyright. Permission in the United States 5 | and in foreign countries, to the extent that NIST may hold copyright, to use, copy, modify, create 6 | derivative works, and distribute this software and its documentation without fee is hereby granted 7 | on a non-exclusive basis, provided that this notice and disclaimer of warranty appears in all copies. 8 | 9 | THE SOFTWARE IS PROVIDED 'AS IS' WITHOUT ANY WARRANTY OF ANY KIND, EITHER EXPRESSED, IMPLIED, OR 10 | STATUTORY, INCLUDING, BUT NOT LIMITED TO, ANY WARRANTY THAT THE SOFTWARE WILL CONFORM TO 11 | SPECIFICATIONS, ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND 12 | FREEDOM FROM INFRINGEMENT, AND ANY WARRANTY THAT THE DOCUMENTATION WILL CONFORM TO THE SOFTWARE, OR 13 | ANY WARRANTY THAT THE SOFTWARE WILL BE ERROR FREE. IN NO EVENT SHALL NIST BE LIABLE FOR ANY DAMAGES, 14 | INCLUDING, BUT NOT LIMITED TO, DIRECT, INDIRECT, SPECIAL OR CONSEQUENTIAL DAMAGES, ARISING OUT OF, 15 | RESULTING FROM, OR IN ANY WAY CONNECTED WITH THIS SOFTWARE, WHETHER OR NOT BASED UPON WARRANTY, 16 | CONTRACT, TORT, OR OTHERWISE, WHETHER OR NOT INJURY WAS SUSTAINED BY PERSONS OR PROPERTY OR OTHERWISE, 17 | AND WHETHER OR NOT LOSS WAS SUSTAINED FROM, OR AROSE OUT OF THE RESULTS OF, OR USE OF, THE SOFTWARE 18 | OR SERVICES PROVIDED HEREUNDER. 19 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/usnistgov/dastard 2 | 3 | go 1.24.3 4 | 5 | require ( 6 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc 7 | github.com/lorenzosaino/go-sysctl v0.3.1 8 | github.com/pebbe/zmq4 v1.4.0 9 | github.com/sbinet/npyio v0.9.0 10 | github.com/spf13/viper v1.21.0 11 | github.com/stretchr/testify v1.11.1 12 | gonum.org/v1/gonum v0.16.0 13 | gopkg.in/natefinch/lumberjack.v2 v2.2.1 14 | ) 15 | 16 | require ( 17 | github.com/BurntSushi/toml v1.5.0 // indirect 18 | github.com/fsnotify/fsnotify v1.9.0 // indirect 19 | github.com/go-viper/mapstructure/v2 v2.4.0 // indirect 20 | github.com/nlpodyssey/gopickle v0.3.0 // indirect 21 | github.com/pelletier/go-toml/v2 v2.2.4 // indirect 22 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect 23 | github.com/sagikazarmark/locafero v0.12.0 // indirect 24 | github.com/spf13/afero v1.15.0 // indirect 25 | github.com/spf13/cast v1.10.0 // indirect 26 | github.com/spf13/pflag v1.0.10 // indirect 27 | github.com/subosito/gotenv v1.6.0 // indirect 28 | go.yaml.in/yaml/v3 v3.0.4 // indirect 29 | golang.org/x/exp/typeparams v0.0.0-20251209150349-8475f28825e9 // indirect 30 | golang.org/x/lint v0.0.0-20241112194109-818c5a804067 // indirect 31 | golang.org/x/mod v0.31.0 // indirect 32 | golang.org/x/sync v0.19.0 // indirect 33 | golang.org/x/sys v0.39.0 // indirect 34 | golang.org/x/text v0.32.0 // indirect 35 | golang.org/x/tools v0.40.0 // indirect 36 | golang.org/x/tools/go/expect v0.1.1-deprecated // indirect 37 | gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect 38 | gopkg.in/yaml.v3 v3.0.1 // indirect 39 | honnef.co/go/tools v0.6.1 // indirect 40 | ) 41 | -------------------------------------------------------------------------------- /doc/Inverted_Channels.md: -------------------------------------------------------------------------------- 1 | # Interting an arbitrary subset of µMUX (Abaco) channels 2 | 3 | In February 2024, for the HEATES experiment at J-PARC, we needed the ability in software to invert the sign of an arbitrary subset of channels ([issue #330](https://github.com/usnistgov/dastard/issues/330)). 4 | 5 | This is now available in Dastard through the `AbacoUnwrapOptions` configuration object, specifically the `InvertChan` slice. How can you set channels to be in this list? 6 | 7 | ## Method 1: use the dastard-commander GUI 8 | 9 | In the main window, under the Data Sources tab, if you select Abaco µMUX as the source type, you can find a large text edit box labeled Inverted Channels. This box is generally disabled (cannot be edited). If you want to add or remove channels from the inverted-channels list, you first need to enable the box. You can find the control in the Expert menu. Select the menu item `Change Inverted Chans` to enable editing in the Inverted Channels text box. 10 | 11 | When you Start Data for an Abaco source, this list is "normalized": 12 | * Words are split by whitespace and/or commas. 13 | * Words are converted to integers (or ignored when they cannot be). 14 | * Duplicates are removed. 15 | * The numbers are sorted. 16 | 17 | Then the normalized list is sent to the Dastard server (and used to change the entries in the text edit box). At this, the `Change Inverted Chans` expert menu item becomes un-checked. This is intended as a way to make it difficult to change the list of inverted channels (because it will not need frequent changes). 18 | 19 | ## Method 2: send an RPC request to a running Dastard. 20 | 21 | I'm still working on a minimal python script that will connect to Dastard and make the change to the configuration... 22 | -------------------------------------------------------------------------------- /internal/asyncbufio/asyncbufio_test.go: -------------------------------------------------------------------------------- 1 | package asyncbufio 2 | 3 | import ( 4 | "crypto/md5" 5 | "fmt" 6 | "io" 7 | "log" 8 | "os" 9 | "testing" 10 | "time" 11 | ) 12 | 13 | func md5sum(fname string) string { 14 | f, err := os.Open(fname) 15 | if err != nil { 16 | log.Fatal(err) 17 | } 18 | defer f.Close() 19 | 20 | h := md5.New() 21 | if _, err := io.Copy(h, f); err != nil { 22 | log.Fatal(err) 23 | } 24 | return fmt.Sprintf("%x", h.Sum(nil)) 25 | } 26 | 27 | func TestWrite(t *testing.T) { 28 | f, err := os.CreateTemp("", "example") 29 | if err != nil { 30 | t.Error(err) 31 | } 32 | defer os.Remove(f.Name()) // clean up 33 | 34 | w := NewWriter(f, 100, time.Second) 35 | for i := range 100 { 36 | sometext := fmt.Appendf(nil, "Line of text %3d\n", i) 37 | w.Write(sometext) 38 | if i%25 == 19 { 39 | w.Flush() 40 | } 41 | } 42 | w.Write([]byte("Last line\n")) 43 | w.Close() 44 | 45 | // Verify exact file contents 46 | actual := md5sum(f.Name()) 47 | expected := "49c3d3dc6d2929a997016c9509010333" 48 | if actual != expected { 49 | t.Errorf("example file md5=%s, want %s", actual, expected) 50 | } 51 | 52 | // Tricky way to test for an expected panic: 53 | defer func() { recover() }() 54 | w.Flush() 55 | t.Errorf("asyncbufio.Writer.Flush() after .Close() did not panic") 56 | } 57 | 58 | func TestCloseTwice(t *testing.T) { 59 | f, err := os.CreateTemp("", "example") 60 | if err != nil { 61 | t.Error(err) 62 | } 63 | defer os.Remove(f.Name()) // clean up 64 | 65 | w := NewWriter(f, 100, time.Second) 66 | w.Close() 67 | 68 | // Tricky way to test for an expected panic: 69 | defer func() { recover() }() 70 | w.Close() 71 | t.Errorf("asyncbufio.Writer.Flush() after .Close() did not panic") 72 | } 73 | -------------------------------------------------------------------------------- /global_config.go: -------------------------------------------------------------------------------- 1 | package dastard 2 | 3 | import ( 4 | "log" 5 | "os" 6 | "time" 7 | ) 8 | 9 | // Portnumbers structs can contain all TCP port numbers used by Dastard. 10 | type Portnumbers struct { 11 | RPC int 12 | Status int 13 | Trigs int 14 | SecondaryTrigs int 15 | Summaries int 16 | } 17 | 18 | // Ports globally holds all TCP port numbers used by Dastard. 19 | var Ports Portnumbers 20 | 21 | const BasePort = 5500 22 | 23 | func setPortnumbers(base int) { 24 | Ports.RPC = base 25 | Ports.Status = base + 1 26 | Ports.Trigs = base + 2 27 | Ports.SecondaryTrigs = base + 3 28 | Ports.Summaries = base + 4 29 | } 30 | 31 | // BuildInfo can contain compile-time information about the build 32 | type BuildInfo struct { 33 | Version string // version stored in global_config.go 34 | Githash string // 7-character git commit hash 35 | Gitdate string // Date/time in the git commit 36 | Date string // Date/time of the go build command 37 | Host string 38 | Summary string // A summary to enter into file database 39 | } 40 | 41 | // Build is a global holding compile-time information about the build 42 | var Build = BuildInfo{ 43 | Version: "0.3.9pre2", 44 | Githash: "no git hash computed", 45 | Gitdate: "no git commit date entered", 46 | Date: "no build date computed", 47 | Host: "no host found", 48 | Summary: "DASTARD Version x.y.z (git commit ....... of date time)", 49 | } 50 | 51 | // DastardStartTime is a global holding the time init() was run 52 | var DastardStartTime time.Time 53 | 54 | // ProblemLogger will log warning messages to a file 55 | var ProblemLogger *log.Logger 56 | 57 | // UpdateLogger will log client updates to a file 58 | var UpdateLogger *log.Logger 59 | 60 | func init() { 61 | setPortnumbers(BasePort) 62 | DastardStartTime = time.Now() 63 | 64 | // Dastard main program will override this, but at least initialize with a sensible value 65 | ProblemLogger = log.New(os.Stderr, "", log.LstdFlags) 66 | UpdateLogger = log.New(os.Stdout, "", log.LstdFlags) 67 | } 68 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | GOCMD=go 2 | GOFMT=$(GOCMD) fmt 3 | GOGET=$(GOCMD) get 4 | GOBUILD=$(GOCMD) build 5 | GOCLEAN=$(GOCMD) clean 6 | GOTEST=$(GOCMD) test 7 | BINARY_NAME=dastard 8 | STATIC_NAME=dastard_static 9 | 10 | # The following uses the pure-Go "net" package netgo, instead of the usual link against C libraries. 11 | # Added March 7, 2023 to make the Dastard binary more portable. But you can change to "NETGO=" to 12 | # go back to the old way, if it seems useful. 13 | BUILDTAGS=netgo 14 | TAGS = -tags "$(BUILDTAGS)" 15 | 16 | .PHONY: all build install test clean run deps static 17 | 18 | BUILDDATE := $(shell date '+%a, %e %b %Y %H:%M:%S %z') 19 | GITHASH := $(shell git rev-parse --short HEAD) 20 | GITDATE := $(shell git log -1 --pretty=format:"%ad" --date=format:"%a, %e %b %Y %H:%M:%S %z") 21 | 22 | GLOBALVARIABLES=-X 'main.buildDate=$(BUILDDATE)' -X main.githash=$(GITHASH) -X 'main.gitdate=$(GITDATE)' 23 | GOLINKFLAGS=-ldflags "$(GLOBALVARIABLES)" 24 | build: $(BINARY_NAME) 25 | all: test build install 26 | 27 | $(BINARY_NAME): Makefile *.go cmd/dastard/dastard.go */*.go internal/*/*.go 28 | $(GOBUILD) $(GOLINKFLAGS) $(TAGS) -o $(BINARY_NAME) cmd/dastard/dastard.go 29 | 30 | test: 31 | $(GOFMT) 32 | $(GOTEST) $(NETGO) -v ./... 33 | 34 | clean: 35 | $(GOCLEAN) 36 | rm -f $(BINARY_NAME) 37 | 38 | run: build 39 | ./$(BINARY_NAME) 40 | 41 | deps: 42 | $(GOGET) -v -t ./... 43 | 44 | install: build 45 | cp -p $(BINARY_NAME) `go env GOPATH`/bin/ 46 | 47 | # EXPERIMENTAL: build a statically linked dastard binary with "make static". 48 | # make static will _always_ rebuild the binary, and always with static linking 49 | # The magic below won't make static binaries on Mac OS X (Darwin) at this time, so error on Macs. 50 | STATICLDFLAGS=-ldflags "-linkmode external -extld g++ -extldflags '-static -lsodium' $(GLOBALVARIABLES)" 51 | OS_NAME := $(shell uname -s | tr A-Z a-z) 52 | static: $(STATIC_NAME) 53 | 54 | $(STATIC_NAME): Makefile *.go cmd/dastard/dastard.go */*.go 55 | ifeq ($(OS_NAME),darwin) 56 | $(error Cannot build static binary on Mac OS) 57 | endif 58 | $(GOBUILD) $(STATICLDFLAGS) $(NETGO) -o $(BINARY_NAME) cmd/dastard/dastard.go 59 | -------------------------------------------------------------------------------- /internal/getbytes/getbytes_test.go: -------------------------------------------------------------------------------- 1 | package getbytes 2 | 3 | import ( 4 | "encoding/hex" 5 | "testing" 6 | ) 7 | 8 | func TestFromGetBytes(t *testing.T) { 9 | var byteslicetests = []struct { 10 | byteslice []byte 11 | expect string 12 | }{ 13 | {FromSliceUint8([]uint8{0xAB, 0xCD, 0xEF, 0x01, 0x23, 0x45, 0x67, 0x89}), "abcdef0123456789"}, 14 | {FromSliceUint16([]uint16{0xABCD, 0xEF01, 0x2345, 0x6789}), "cdab01ef45238967"}, 15 | {FromSliceUint32([]uint32{0xABCDEF01, 0x23456789}), "01efcdab89674523"}, 16 | {FromSliceUint64([]uint64{0xABCDEF0123456789}), "8967452301efcdab"}, 17 | {FromSliceInt8([]int8{0x00, 0x0A, 0x0B, 0x0C, 0x0D, 0x0F, 0x01, 0x02}), "000a0b0c0d0f0102"}, 18 | {FromSliceInt16([]int16{1, 2, 3, 4}), "0100020003000400"}, 19 | {FromSliceInt32([]int32{1, 2}), "0100000002000000"}, 20 | {FromSliceInt64([]int64{1}), "0100000000000000"}, 21 | {FromSliceFloat32([]float32{1, 2}), "0000803f00000040"}, 22 | {FromSliceFloat64([]float64{2, 4}), "00000000000000400000000000001040"}, 23 | {FromSliceUint8([]uint8{}), ""}, 24 | {FromSliceUint16([]uint16{}), ""}, 25 | {FromSliceUint32([]uint32{}), ""}, 26 | {FromSliceUint64([]uint64{}), ""}, 27 | {FromSliceInt8([]int8{}), ""}, 28 | {FromSliceInt16([]int16{}), ""}, 29 | {FromSliceInt32([]int32{}), ""}, 30 | {FromSliceInt64([]int64{}), ""}, 31 | {FromSliceFloat32([]float32{}), ""}, 32 | {FromSliceFloat64([]float64{}), ""}, 33 | } 34 | for _, test := range byteslicetests { 35 | encodedStr := hex.EncodeToString(test.byteslice) 36 | if expectStr := test.expect; encodedStr != expectStr { 37 | t.Errorf("want %v, have %v", expectStr, encodedStr) 38 | } 39 | } 40 | 41 | var sizetests = []struct { 42 | dlen int 43 | want int 44 | }{ 45 | {len(FromUint8(1)), 1}, 46 | {len(FromUint16(1)), 2}, 47 | {len(FromUint32(1)), 4}, 48 | {len(FromUint64(1)), 8}, 49 | {len(FromInt8(1)), 1}, 50 | {len(FromInt16(1)), 2}, 51 | {len(FromInt32(1)), 4}, 52 | {len(FromInt64(1)), 8}, 53 | {len(FromFloat32(1)), 4}, 54 | {len(FromFloat64(1)), 8}, 55 | } 56 | for _, test := range sizetests { 57 | if test.dlen != test.want { 58 | t.Errorf("wrong length: %d, want %d", test.dlen, test.want) 59 | } 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /writing_state_test.go: -------------------------------------------------------------------------------- 1 | package dastard 2 | 3 | import ( 4 | "os" 5 | "path/filepath" 6 | "testing" 7 | ) 8 | 9 | // See also data_source_test.go, which contains several implicit tests of WritingState. 10 | 11 | func TestWriteControl(t *testing.T) { 12 | tmp, err1 := os.MkdirTemp("", "dastardTest") 13 | if err1 != nil { 14 | t.Errorf("could not make TempDir") 15 | return 16 | } 17 | defer os.RemoveAll(tmp) 18 | 19 | ds := AnySource{nchan: 4} 20 | ds.rowColCodes = make([]RowColCode, ds.nchan) 21 | ds.PrepareChannels() 22 | ds.PrepareRun(256, 1024) 23 | defer ds.Stop() 24 | config := &WriteControlConfig{Request: "Pause", Path: tmp, WriteLJH22: true} 25 | for _, request := range []string{"Pause", "Unpause", "Stop"} { 26 | config.Request = request 27 | if err := ds.WriteControl(config); err != nil { 28 | t.Errorf("WriteControl request %s failed on a non-writing file: %v", request, err) 29 | } 30 | } 31 | config.Request = "notvalid" 32 | if err := ds.WriteControl(config); err == nil { 33 | t.Errorf("WriteControl request %s should fail, but didn't", config.Request) 34 | } 35 | config.Request = "Start" 36 | config.WriteLJH22 = false 37 | if err := ds.WriteControl(config); err == nil { 38 | t.Errorf("WriteControl request Start with no valid filetype should fail, but didn't") 39 | } 40 | config.WriteLJH22 = true 41 | config.Path = filepath.FromSlash("/notvalid/because/permissions") 42 | if err := ds.WriteControl(config); err == nil { 43 | t.Errorf("WriteControl request Start with nonvalid path should fail, but didn't") 44 | } 45 | 46 | config.Path = tmp 47 | if err := ds.WriteControl(config); err != nil { 48 | t.Errorf("WriteControl request %s failed: %v", config.Request, err) 49 | } 50 | for _, request := range []string{"Pause", "Unpause", "Stop", "Start"} { 51 | config.Request = request 52 | if err := ds.WriteControl(config); err != nil { 53 | t.Errorf("WriteControl request %s failed on a writing file: %v", request, err) 54 | } 55 | } 56 | 57 | // The Stop step in the following tests that the bug given in issue #239 is fixed. 58 | ds.HandleDataDrop(5, 10) 59 | for _, request := range []string{"Pause", "Unpause", "Stop"} { 60 | config.Request = request 61 | if err := ds.WriteControl(config); err != nil { 62 | t.Errorf("WriteControl request %s failed on a writing file: %v", request, err) 63 | } 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | # 3 | # Here is a demonstration Dockerfile for building Dastard and installing its variants. 4 | # Not sure we want to use Docker, but this can help us get started. 5 | # 6 | FROM golang:1.24.5 AS build-stage 7 | # RUN apk add --no-cache libzmq zeromq-dev 8 | RUN apt-get update && apt-get install -y libsodium-dev libczmq-dev 9 | 10 | # Set destination for COPY 11 | WORKDIR /app 12 | 13 | # Download Go modules 14 | COPY go.mod go.sum ./ 15 | RUN go mod download 16 | 17 | # Copy the source code. Note the slash at the end, as explained in 18 | # https://docs.docker.com/reference/dockerfile/#copy 19 | COPY . ./ 20 | 21 | # Build 22 | # To do: set time zone and host name in the container from the host. 23 | RUN GOARCH=$(uname -m) GOOS=linux && go build -o /bahama cmd/bahama/bahama.go 24 | RUN export DATE_VAR=$(date -u '+%a, %e %b %Y %H:%M:%S %z') && \ 25 | export GITHASH=$(git rev-parse --short HEAD) && \ 26 | export GITDATE=$(git log -1 --pretty=format:"%ad" --date=format:"%a, %e %b %Y %H:%M:%S %z") && \ 27 | GOARCH=$(uname -m) GOOS=linux && \ 28 | go build -ldflags "-X 'main.buildDate=${DATE_VAR}' -X main.githash=${GITHASH} -X 'main.gitdate=${GITDATE}'" -tags netgo -o /dastard cmd/dastard/dastard.go 29 | 30 | 31 | FROM build-stage AS run-test-stage 32 | COPY internal/lancero/test_data/* ./internal/lancero/test_data/ 33 | COPY maps/* ./maps/ 34 | RUN groupadd -r myuser && useradd -r -g myuser myuser 35 | RUN mkdir /home/myuser && chown -R myuser:myuser /home/myuser 36 | RUN chown -R myuser: /app 37 | USER myuser 38 | RUN go test ./... 39 | ENTRYPOINT ["/bin/bash"] 40 | 41 | 42 | FROM debian:latest AS build-release-stage 43 | RUN apt-get update && apt-get install -y libsodium-dev libczmq-dev file less 44 | 45 | # Couldn't get the compiled binary to work on Alpine linux. The instructions would have been: 46 | # FROM alpine AS build-release-stage 47 | # RUN apk add --no-cache libzmq zeromq-dev 48 | 49 | WORKDIR /app 50 | COPY --from=build-stage /dastard . 51 | COPY --from=build-stage /bahama . 52 | 53 | # Optional: 54 | # To bind to a TCP port, runtime parameters must be supplied to the docker command. 55 | # But we can document in the Dockerfile what ports 56 | # the application is going to listen on by default. 57 | # https://docs.docker.com/reference/dockerfile/#expose 58 | EXPOSE 5500 5501 5502 5503 5504 59 | 60 | ENTRYPOINT ["./dastard"] 61 | -------------------------------------------------------------------------------- /mix.go: -------------------------------------------------------------------------------- 1 | package dastard 2 | 3 | import ( 4 | "math" 5 | ) 6 | 7 | // Mix performns the mix for lancero data and retards the raw data stream by 8 | // one sample so it can be mixed with the appropriate error sample. This corrects 9 | // for a poor choice in the TDM firmware design, but so it goes. 10 | // 11 | // fb_physical[n] refers to the feedback signal applied during tick [n] 12 | // err_physical[n] refers to the error signal measured during tick [n] 13 | // 14 | // Unfortunately, the data stream pairs them up differently: 15 | // fb_data[n] = fb_physical[n+1] 16 | // err_data[n] = err_physical[n] 17 | // 18 | // At frame [n] we get data for the error measured during frame [n] 19 | // and the feedback computed based on it, which is the feedback 20 | // that will be _applied_ during frame [n+1]. 21 | // 22 | // We want 23 | // mix[n] = fb_physical[n] + errorScale * err_physical[n], so 24 | // mix[n] = fb_data[n-1] + errorScale * err_data[n], or 25 | // mix[n+1] = fb_data[n] + errorScale * err_data[n+1] 26 | // 27 | // Second issue: the error signal we work with is a sum of NSAMP samples from 28 | // the ADC, but autotune's values assume that we work with the _mean_ (because it 29 | // lets autotune communicate an NSAMP-agnostic value). So we store NOT the auto- 30 | // tune value but the value that actually multiplies the error sum. 31 | type Mix struct { 32 | errorScale float64 // Multiply this by raw error data. NSAMP is scaled out. 33 | lastFb RawType 34 | } 35 | 36 | // MixRetardFb mixes err into fbs, alters fbs in place to contain the mixed values 37 | // consecutive calls must be on consecutive data. 38 | // The following ASSUMES that error signals are signed. That holds for Lancero 39 | // TDM systems, at least, and that is the only source that uses Mix. 40 | func (m *Mix) MixRetardFb(fbs *[]RawType, errs *[]RawType) { 41 | const mask = ^RawType(0x03) 42 | if m.errorScale == 0.0 { 43 | for j := 0; j < len(*fbs); j++ { 44 | fb := m.lastFb 45 | m.lastFb = (*fbs)[j] & mask 46 | (*fbs)[j] = fb 47 | } 48 | return 49 | } 50 | for j := 0; j < len(*fbs); j++ { 51 | fb := m.lastFb 52 | mixAmount := float64(int16((*errs)[j])) * m.errorScale 53 | // Be careful not to overflow! 54 | floatMixResult := mixAmount + float64(fb) 55 | m.lastFb = (*fbs)[j] & mask 56 | if floatMixResult >= math.MaxUint16 { 57 | (*fbs)[j] = math.MaxUint16 58 | } else if floatMixResult < 0 { 59 | (*fbs)[j] = 0 60 | } else { 61 | (*fbs)[j] = RawType(roundint(floatMixResult)) 62 | } 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /internal/lancero/lancero_collector.go: -------------------------------------------------------------------------------- 1 | package lancero 2 | 3 | const ( 4 | colRegisterBase int64 = 0x100 // collector register base address 5 | colRegisterIDV int64 = colRegisterBase + 0x00 // collector register offset for id and version numbers 6 | colRegisterCtrl int64 = colRegisterBase + 0x04 // collector register offset for control register 7 | colRegisterLP int64 = colRegisterBase + 0x08 // collector register offset for line sync period 8 | colRegisterDD int64 = colRegisterBase + 0x0c // collector register offset for data delay 9 | colRegisterMask int64 = colRegisterBase + 0x10 // collector register offset for channel masks 10 | colRegisterFL int64 = colRegisterBase + 0x14 // collector register offset for frame length 11 | 12 | bitsCtrlRun uint32 = 0x1 // collector control register bit for running acquisition 13 | bitsCtrlSim uint32 = 0x2 // collector control register bit for sim mode 14 | ) 15 | 16 | // collector is the interface to the component that combines several optical 17 | // fibers onto one serial stream. Must be started and stopped; also controls 18 | // fiber-reading mode vs simulated data mode. 19 | type collector struct { 20 | device *lanceroDevice 21 | simulated bool 22 | } 23 | 24 | func (c *collector) configure(linePeriod, dataDelay, channelMask, frameLength uint32) error { 25 | if err := c.device.writeRegister(colRegisterLP, linePeriod); err != nil { 26 | return err 27 | } 28 | if err := c.device.writeRegister(colRegisterDD, dataDelay); err != nil { 29 | return err 30 | } 31 | if err := c.device.writeRegister(colRegisterMask, channelMask); err != nil { 32 | return err 33 | } 34 | if err := c.device.writeRegisterFlush(colRegisterFL, frameLength); err != nil { 35 | return err 36 | } 37 | return nil 38 | } 39 | 40 | func (c *collector) start(simulate bool) error { 41 | c.simulated = simulate 42 | runCmd := bitsCtrlRun 43 | if simulate { 44 | // Start the simulator 45 | if err := c.device.writeRegisterFlush(colRegisterCtrl, bitsCtrlSim); err != nil { 46 | return err 47 | } 48 | runCmd |= bitsCtrlSim 49 | } 50 | // Now enable the clock (with sim still enabled or not) 51 | if err := c.device.writeRegisterFlush(colRegisterCtrl, runCmd); err != nil { 52 | return err 53 | } 54 | return nil 55 | } 56 | 57 | func (c *collector) stop() error { 58 | if c.simulated { 59 | if err := c.device.writeRegisterFlush(colRegisterCtrl, bitsCtrlSim); err != nil { 60 | return err 61 | } 62 | } 63 | c.simulated = false 64 | return c.device.writeRegisterFlush(colRegisterCtrl, 0) 65 | } 66 | -------------------------------------------------------------------------------- /cmd/udpdump/udpdump.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "flag" 6 | "fmt" 7 | "github.com/usnistgov/dastard/packets" 8 | "net" 9 | "strconv" 10 | "strings" 11 | ) 12 | 13 | func probe(npack int, endpoint string) error { 14 | fmt.Printf("Probing %s for the first %d packets received...\n", endpoint, npack) 15 | address, err := net.ResolveUDPAddr("udp", endpoint) 16 | if err != nil { 17 | return err 18 | } 19 | ServerConn, _ := net.ListenUDP("udp", address) 20 | defer ServerConn.Close() 21 | 22 | buf := make([]byte, 8192) 23 | for range npack { 24 | if _, _, err := ServerConn.ReadFromUDP(buf); err != nil { 25 | return err 26 | } 27 | if pack, err := packets.ReadPacket(bytes.NewReader(buf)); err != nil { 28 | return err 29 | } else { 30 | nchan, chan0 := pack.ChannelInfo() 31 | fmt.Printf("%s with %d frames for %d channels [%d-%d]\n", pack.String(), 32 | pack.Frames(), nchan, chan0, chan0+nchan-1) 33 | } 34 | } 35 | return nil 36 | } 37 | 38 | func main() { 39 | var npack int 40 | var port int 41 | const default_host = "localhost" 42 | const default_port = 4000 43 | host := default_host 44 | flag.IntVar(&npack, "n", 10, "Number of packets to dump") 45 | flag.IntVar(&port, "port", default_port, "Port to monitor") 46 | flag.IntVar(&port, "p", default_port, "Port to monitor (shorthand)") 47 | 48 | flag.Usage = func() { 49 | fmt.Printf("udpdump, for dumping the first N packet headers, by default those from localhost:%d\n", 50 | default_port) 51 | fmt.Println("Usage: udpdump [flags] [host][:port]") 52 | flag.PrintDefaults() 53 | } 54 | flag.Parse() 55 | if flag.NArg() > 0 { 56 | host = flag.Arg(0) 57 | 58 | // If host ends in :portnum, split that off and update the port value 59 | if pieces := strings.Split(host, ":"); len(pieces) > 1 { 60 | if len(pieces) > 2 { 61 | fmt.Printf("Cannot parse host '%s' with %d colon separators\n", host, len(pieces)-1) 62 | return 63 | } 64 | attachedport, err := strconv.Atoi(pieces[1]) 65 | if err != nil { 66 | fmt.Printf("Cannot convert port '%s' to integer\n", pieces[1]) 67 | return 68 | } 69 | if port != default_port && port != attachedport { 70 | fmt.Printf("Cannot use -p argument and a conflicting host:port pair\n") 71 | return 72 | } 73 | if len(pieces[0]) == 0 { 74 | host = default_host 75 | } else { 76 | host = pieces[0] 77 | } 78 | port = attachedport 79 | } 80 | } 81 | 82 | endpoint := fmt.Sprintf("%s:%4.4d", host, port) 83 | if err := probe(npack, endpoint); err != nil { 84 | fmt.Printf("error: %v\n", err) 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /DOCKER.md: -------------------------------------------------------------------------------- 1 | # Building and running DASTARD in a Docker container 2 | 3 | Joe Fowler. July 18, 2025 4 | 5 | The included `Dockerfile` allows you to build, test, and run DASTARD within a Docker container. As 6 | of this writing (July 18, 2025), Docker builds are experimental, but they were shown to work on 7 | both Mac OS 15 (with Apple processors) and Ubuntu Linux. 8 | 9 | It is still possible to install DASTARD outside of Docker, in which case you'd want to ignore this 10 | file and the `Dockerfile`. 11 | 12 | 13 | ## Install Docker 14 | 15 | The instructions for installing [Docker Desktop on Ubuntu Linux](https://docs.docker.com/desktop/setup/install/linux/ubuntu/) worked on Ubuntu 22.04. 16 | 17 | 1. Go to [Docker Desktop release notes](https://docs.docker.com/desktop/release-notes/) and find the latest Debian. Download the `docker-debian-amd64.deb` file. 18 | 2. Then install it: 19 | 20 | ```bash 21 | sudo apt-get update 22 | sudo apt-get install ./docker-desktop-amd64.deb 23 | ``` 24 | 25 | ## Build an image for Dastard 26 | 27 | From the DASTARD main directory: 28 | 29 | ```bash 30 | docker build --tag dastard . 31 | ``` 32 | 33 | To run the tests instead 34 | 35 | ```bash 36 | docker build --tag dastard-tests --target run-test-stage . 37 | ``` 38 | 39 | In docker desktop, go to Settings / Resources and select _Enable host netowrking_. (You'll have to restart Docker.) 40 | 41 | ## Run Dastard 42 | 43 | ```bash 44 | # To run the usual way. 45 | # The --rm means the container will be automatically deleted when it's done running. 46 | # The --net=host will make the container share an IP address space with the host system. 47 | # The --mount type=bind... will bind the directory ~/.dastard on the host to the container. 48 | docker run --rm --net=host --mount type=bind,src=${HOME}/.dastard,dst=/root/.dastard dastard 49 | 50 | # To run the version or help 51 | docker run dastard -version 52 | docker run dastard -help 53 | 54 | # To run a bash shell within the container, you must override the default entrypoint 55 | docker run -it --entrypoint /bin/bash dastard 56 | ``` 57 | 58 | The run command's `--mount` argument will mount a host directory (`~/.dastard/`) into the container. 59 | This binding ensures that any dastard configuration changes will both persist AND be available 60 | for examination and change outside the container. They will even be shared between Dastard instances run in a 61 | container and outside of it. 62 | 63 | ### To do 64 | 65 | * The command arguments required to run Dastard in a Docker container could be stored in a `docker-compose.yml` file. Docker 66 | compose is generally designed for using a bunch of containers in parallel, but we could set it up even for one container. 67 | * Figure out how to interact with our Python programs, specifically dastard-commander and microscope. 68 | -------------------------------------------------------------------------------- /map.go: -------------------------------------------------------------------------------- 1 | package dastard 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "os" 7 | ) 8 | 9 | // Pixel represents the physical location of a TES 10 | type Pixel struct { 11 | X, Y int 12 | Name string 13 | } 14 | 15 | // Map represents an entire array of pixel locations 16 | type Map struct { 17 | Spacing int 18 | Pixels []Pixel 19 | Filename string 20 | } 21 | 22 | func readMap(filename string) (*Map, error) { 23 | m := new(Map) 24 | m.Pixels = make([]Pixel, 0) 25 | m.Filename = filename 26 | 27 | file, err := os.Open(filename) 28 | if err != nil { 29 | return nil, err 30 | } 31 | defer file.Close() 32 | 33 | if _, err := fmt.Fscanf(file, "spacing: %d\n", &m.Spacing); err != nil { 34 | return nil, err 35 | } 36 | var matterChannums bool 37 | for { 38 | var chnum int 39 | var p Pixel 40 | _, err := fmt.Fscanf(file, "%d %d %d %s", &chnum, &p.X, &p.Y, &p.Name) 41 | if err == io.EOF { 42 | break 43 | } 44 | if err != nil { 45 | fmt.Println(m) 46 | fmt.Println(chnum, p) 47 | return m, err 48 | } 49 | m.Pixels = append(m.Pixels, p) 50 | if len(m.Pixels) == 2 { 51 | if chnum == 3 { 52 | matterChannums = true 53 | // fmt.Println("reading map with matter style channel numbers in legacy mode") 54 | } 55 | } 56 | if len(m.Pixels) > 2 { 57 | if matterChannums { 58 | matterChannum := 2*len(m.Pixels) - 1 59 | if chnum != matterChannum { 60 | return nil, fmt.Errorf("readMap: have chnum %v, want matterChannum %v (matter channel number legacy mode)", chnum, matterChannum) 61 | } 62 | } else { 63 | channelNumber := len(m.Pixels) 64 | if chnum != channelNumber { 65 | return nil, fmt.Errorf("readMap: have chnum %v, want channelNumber %v", chnum, channelNumber) 66 | 67 | } 68 | } 69 | } 70 | 71 | } 72 | return m, nil 73 | } 74 | 75 | // MapServer is the RPC service that loads and broadcasts TES maps 76 | type MapServer struct { 77 | Map *Map 78 | clientUpdates chan<- ClientUpdate 79 | } 80 | 81 | func newMapServer() *MapServer { 82 | return new(MapServer) 83 | } 84 | 85 | // Load reads a map file and broadcasts it to clients 86 | func (ms *MapServer) Load(filename *string, reply *bool) error { 87 | m, err := readMap(*filename) 88 | *reply = err == nil 89 | if err != nil { 90 | return err 91 | } 92 | ms.Map = m 93 | ms.broadcastMap() 94 | return nil 95 | } 96 | 97 | // Unload forgets the current map file 98 | func (ms *MapServer) Unload(zero *int, reply *bool) error { 99 | ms.Map = nil 100 | ms.broadcastMap() 101 | *reply = true 102 | return nil 103 | } 104 | 105 | func (ms *MapServer) broadcastMap() { 106 | if ms.Map == nil { 107 | ms.clientUpdates <- ClientUpdate{"TESMAPFILE", "no map file"} 108 | ms.clientUpdates <- ClientUpdate{"TESMAP", "no map loaded"} 109 | } else { 110 | ms.clientUpdates <- ClientUpdate{"TESMAPFILE", ms.Map.Filename} 111 | ms.clientUpdates <- ClientUpdate{"TESMAP", ms.Map} 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /doc/BINARY_FORMATS.md: -------------------------------------------------------------------------------- 1 | # Binary Serialization Formats for DASTARD Data Structures 2 | 3 | ## Binary format for triggered data records 4 | 5 | 12/21/2017 (updated 9/2/2020) 6 | 7 | Triggered records are published on a ZMQ PUB socket. Primary triggers appear on 8 | port *BASE*+2, and secondary (cross-talk) triggers appear on port *BASE*+3. 9 | 10 | ### Message Version 0 11 | 12 | Dated 4/19/2018. Triggered data records go into a 2-frame ZMQ message. The first frame contains 13 | the header, which is 36 bytes long. The second frame is the raw record data, which 14 | is of variable length and packed in little-endian byte order. 15 | The header also contains little-endian values: 16 | 17 | * Byte 0 (2 bytes): channel number 18 | * Byte 2 (1 byte): header version number (0 in this version) 19 | * Byte 3 (1 byte): data type code (see below) 20 | * Byte 4 (4 bytes): samples before trigger 21 | * Byte 8 (4 bytes): samples in record 22 | * Byte 12 (4 bytes): sample period in seconds (float) 23 | * Byte 16 (4 bytes): volts per arb (float) 24 | * Byte 20 (8 bytes): trigger time (nanoseconds since 1 Jan 1970) 25 | * Byte 28 (8 bytes): trigger frame index 26 | 27 | Because the channel number makes up the first 2 bytes, ZMQ subscriber sockets can 28 | subscribe selectively to only certain channels. 29 | 30 | Data type code: so far, only uint16 and int16 are allowed. 31 | 32 | * 0 = int8 33 | * 1 = uint8 34 | * 2 = int16 35 | * 3 = uint16 36 | * 4 = int32 37 | * 5 = uint32 38 | * 6 = int64 39 | * 7 = uint64 40 | 41 | 42 | ## Binary format for triggered data summaries 43 | 44 | 45 | Summaries of triggered records are published on a ZMQ PUB socket on port *BASE*+4. The summaries 46 | assume that the data have been projected onto a low-dimensional linear subspace (a basis). 47 | 48 | ### Message Version 0 49 | 50 | Dated 9/2/2020. Triggered data records go into a 2-frame ZMQ message. The first frame contains 51 | the header, which is 36 bytes long. The second frame is the raw record data, which 52 | is of variable length and packed in little-endian byte order. 53 | The header also contains little-endian values: 54 | 55 | * Byte 0 (2 bytes): channel number 56 | * Byte 2 (2 byte): header version number (0 in this version) 57 | * Byte 4 (4 bytes): samples before trigger 58 | * Byte 8 (4 bytes): samples in record 59 | * Byte 12 (4 bytes): pretrigger mean value 60 | * Byte 16 (4 bytes): peak value (pretrigger mean subtracted first) 61 | * Byte 20 (4 bytes): pulse RMS (pretrigger mean subtracted first) 62 | * Byte 24 (4 bytes): pulse average (pretrigger mean subtracted first) 63 | * Byte 28 (4 bytes): residual standard deviation (basis vectors projected out first) 64 | * Byte 32 (8 bytes): trigger time (nanoseconds since 1 Jan 1970) 65 | * Byte 40 (8 bytes): trigger frame index 66 | 67 | Because the channel number makes up the first 2 bytes, ZMQ subscriber sockets can 68 | subscribe selectively to only certain channels. 69 | 70 | The second frame consists of the projection coefficients, from the linear projection into the basis. 71 | The coefficients are float64, and the size of the second frame should be 8 times the number of coefficients. 72 | -------------------------------------------------------------------------------- /internal/off/off_test.go: -------------------------------------------------------------------------------- 1 | package off 2 | 3 | import ( 4 | "encoding/json" 5 | "os" 6 | "testing" 7 | 8 | "gonum.org/v1/gonum/mat" 9 | ) 10 | 11 | // func matPrint(X mat.Matrix, t *testing.T) { 12 | // fa := mat.Formatted(X, mat.Prefix(""), mat.Squeeze()) 13 | // t.Logf("%v\n", fa) 14 | // fmt.Println(fa) 15 | // } 16 | 17 | func TestOff(t *testing.T) { 18 | 19 | // assign the projectors and basis 20 | nbases := 3 21 | nsamples := 4 22 | projectors := mat.NewDense(nbases, nsamples, 23 | []float64{1.124, 0, 1.124, 0, 24 | 0, 1, 0, 0, 25 | 0, 0, 1, 0}) 26 | basis := mat.NewDense(nsamples, nbases, 27 | []float64{1, 0, 0, 28 | 0, 1, 0, 29 | 0, 0, 1, 30 | 0, 0, 0}) 31 | 32 | const maxpre = 100 33 | const maxsamp = 200 34 | w := NewWriter("off_test.off", 0, "chan1", 1, maxpre, maxsamp, 9.6e-6, projectors, basis, "dummy model for testing", 35 | "DastardVersion Placeholder", "GitHash Placeholder", "SourceName Placeholder", TimeDivisionMultiplexingInfo{}, 36 | PixelInfo{}) 37 | if err := w.CreateFile(); err != nil { 38 | t.Fatal(err) 39 | } 40 | if w.headerWritten { 41 | t.Error("headerWritten should be false, have", w.headerWritten) 42 | } 43 | if err := w.WriteHeader(); err != nil { 44 | t.Error(err) 45 | } 46 | if !w.headerWritten { 47 | t.Error("headerWritten should be true, have", w.headerWritten) 48 | } 49 | if err := w.WriteHeader(); err == nil { 50 | t.Errorf("expect error from writing header again") 51 | } 52 | w.Flush() 53 | stat, _ := os.Stat("off_test.off") 54 | sizeHeader := stat.Size() 55 | if err := w.WriteRecord(0, 0, 123456, 0, 0, 0, .123456, make([]float32, 3)); err != nil { 56 | t.Error(err) 57 | } 58 | w.Flush() 59 | stat, _ = os.Stat("off_test.off") 60 | expectSize := sizeHeader + 36 + 4*3 61 | if stat.Size() != expectSize { 62 | t.Errorf("wrong size, want %v, have %v", expectSize, stat.Size()) 63 | } 64 | if w.recordsWritten != 1 { 65 | t.Error("wrong number of records written, want 1, have", w.recordsWritten) 66 | } 67 | if err := w.WriteRecord(0, 0, 0, 0, 0, 0, 0, make([]float32, 10)); err == nil { 68 | t.Error("should have complained about wrong number of bases") 69 | } 70 | w.Close() 71 | if w.RecordsWritten() != w.recordsWritten { 72 | t.Error() 73 | } 74 | if w.HeaderWritten() != w.headerWritten { 75 | t.Error() 76 | } 77 | 78 | // Check the OFF file contents, at least certain header info 79 | fp, err := os.Open("off_test.off") 80 | if err != nil { 81 | t.Error("Could not open off test file") 82 | } 83 | defer fp.Close() 84 | offtext := make([]byte, 8192) 85 | _, err = fp.Read(offtext) 86 | if err != nil { 87 | t.Error("Could not read off test file") 88 | } 89 | // Scan through the text read, and stop before first non-printing ASCII byte. 90 | for i, b := range offtext { 91 | if b > 127 { 92 | offtext = offtext[:i] 93 | break 94 | } 95 | } 96 | var x Writer 97 | json.Unmarshal(offtext, &x) 98 | if x.MaxPresamples != maxpre { 99 | t.Errorf("OFF file says MaxPresamples=%d, want %d", x.MaxPresamples, maxpre) 100 | } 101 | if x.MaxSamples != maxsamp { 102 | t.Errorf("OFF file says MaxPresamples=%d, want %d", x.MaxSamples, maxsamp) 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /internal/asyncbufio/asyncbufio.go: -------------------------------------------------------------------------------- 1 | package asyncbufio 2 | 3 | import ( 4 | "bufio" 5 | "io" 6 | "time" 7 | ) 8 | 9 | // Writer provides asynchronous writing to an underlying io.Writer using buffered channels. 10 | type Writer struct { 11 | writer *bufio.Writer // Buffered writer: this does the writing 12 | flushNow chan struct{} // Channel to signal the underlying writer to flush itself 13 | flushComplete chan struct{} // Channel to signal underlying writer flush is complete 14 | datachannel chan []byte // Channel to hold data before writing it 15 | flushInterval time.Duration // Interval for flushing the writer periodically 16 | } 17 | 18 | // NewWriter creates a new Writer instance. 19 | func NewWriter(w io.Writer, channelDepth int, flushInterval time.Duration) *Writer { 20 | aw := &Writer{ 21 | writer: bufio.NewWriter(w), 22 | datachannel: make(chan []byte, channelDepth), 23 | flushNow: make(chan struct{}), 24 | flushComplete: make(chan struct{}), 25 | flushInterval: flushInterval, // Set the flush interval 26 | } 27 | 28 | go aw.writeLoop() 29 | return aw 30 | } 31 | 32 | // Write sends data to the Writer's channel, storing it for later writing. 33 | func (aw *Writer) Write(p []byte) (int, error) { 34 | select { 35 | case aw.datachannel <- p: 36 | return len(p), nil 37 | default: 38 | return 0, io.ErrShortWrite // Return an error if channel is full 39 | } 40 | } 41 | 42 | // WriteString sends a string to the channel for later writing (with an annoying copy--sorry!) 43 | func (aw *Writer) WriteString(s string) (int, error) { 44 | return aw.Write([]byte(s)) 45 | } 46 | 47 | // Flush flushes any remaining data in the channel to the underlying writer. 48 | // Blocks until the flush is complete. 49 | func (aw *Writer) Flush() error { 50 | aw.flushNow <- struct{}{} 51 | <-aw.flushComplete 52 | return nil 53 | } 54 | 55 | // Close closes the Writer, flushing remaining data and waiting for the writeLoop to finish. 56 | // It will cause a panic to call Write(p) or Flush() after Close()--we don't 57 | // test for that case. 58 | func (aw *Writer) Close() { 59 | close(aw.flushNow) // Closing the flushNow channel signals the writeLoop to exit 60 | <-aw.flushComplete // Wait until writing is complete 61 | } 62 | 63 | // writeLoop is a goroutine that continuously moves data from the channel to the writer. 64 | func (aw *Writer) writeLoop() { 65 | ticker := time.NewTicker(aw.flushInterval) // Ticker to flush periodically 66 | defer ticker.Stop() // Stop the ticker when the writeLoop exits 67 | 68 | for { 69 | select { 70 | case data := <-aw.datachannel: 71 | aw.writer.Write(data) // Write data from the channel to the writer 72 | 73 | case _, ok := <-aw.flushNow: 74 | aw.flush() 75 | // Signal whoever requested this that flushing is done 76 | aw.flushComplete <- struct{}{} 77 | if !ok { 78 | return 79 | } 80 | 81 | case <-ticker.C: 82 | aw.flush() 83 | } 84 | } 85 | } 86 | 87 | func (aw *Writer) flush() { 88 | // This loop empties the aw.datachannel channel before finally 89 | // calling the underlying writer's Flush() method 90 | for { 91 | select { 92 | case data := <-aw.datachannel: 93 | aw.writer.Write(data) 94 | default: 95 | aw.writer.Flush() 96 | return 97 | } 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /cmd/bahama/bahama_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | "time" 7 | ) 8 | 9 | func TestHelpers(t *testing.T) { 10 | f := 1.0 11 | j := 1 12 | mins := []float64{0, -10, 10} 13 | maxs := []float64{2, 0, 20} 14 | expect := []float64{1, 0, 10} 15 | for i := range mins { 16 | coerceFloat(&f, mins[i], maxs[i]) 17 | if f != expect[i] { 18 | t.Errorf("coerceInt made f=%.4f, want %.4f", f, expect[i]) 19 | } 20 | coerceInt(&j, int(mins[i]), int(maxs[i])) 21 | e := int(expect[i]) 22 | if j != e { 23 | t.Errorf("coerceInt made f=%d, want %d", j, e) 24 | } 25 | } 26 | } 27 | 28 | func TestInterleave(t *testing.T) { 29 | const ngroup = 3 30 | const npackets = 28 31 | 32 | // First test interleaved, non-staggered packets 33 | c1 := make([]chan []byte, ngroup) 34 | p := make([][]byte, ngroup) 35 | for i := range ngroup { 36 | c1[i] = make(chan []byte) 37 | p[i] = []byte{byte(i)} 38 | } 39 | c2 := make(chan []byte) 40 | go interleavePackets(c2, c1, false) 41 | 42 | for j := range ngroup { 43 | go func(cid int) { 44 | for range npackets { 45 | c1[cid] <- p[cid] 46 | } 47 | close(c1[cid]) 48 | }(j) 49 | } 50 | for i := range npackets * ngroup { 51 | pi, ok := <-c2 52 | if !ok { 53 | t.Errorf("Expected %d non-staggered packets before output channel closed, got %d", npackets*ngroup, i) 54 | } 55 | expect := byte((i / 4) % ngroup) 56 | if pi[0] != expect { 57 | t.Errorf("Non-staggered interleave packet %3d source is %d, want %d", i, pi[0], expect) 58 | } 59 | } 60 | if _, ok := <-c2; ok { 61 | t.Errorf("Expected interleavePackets to close output channel.") 62 | } 63 | 64 | // Now test staggered, interleaved packets 65 | c1 = make([]chan []byte, ngroup) 66 | for i := range ngroup { 67 | c1[i] = make(chan []byte) 68 | } 69 | c2 = make(chan []byte) 70 | go interleavePackets(c2, c1, true) 71 | for j := range ngroup { 72 | go func(cid int) { 73 | for range npackets { 74 | c1[cid] <- p[cid] 75 | } 76 | close(c1[cid]) 77 | }(j) 78 | } 79 | expectby4 := []byte{0, 1, 1, 2, 2, 2, 0, 0, 0, 1, 1, 1, 2, 2, 2, 0, 0, 0, 1, 1, 2} 80 | for i := range npackets * ngroup { 81 | pi, ok := <-c2 82 | if !ok { 83 | t.Errorf("Expected %d staggered packets before output channel closed, got %d", npackets*ngroup, i) 84 | } 85 | expect := expectby4[i/4] 86 | if pi[0] != expect { 87 | t.Errorf("Staggered interleave packet %3d source is %d, want %d", i, pi[0], expect) 88 | } 89 | } 90 | if _, ok := <-c2; ok { 91 | t.Errorf("Expected interleavePackets to close output channel.") 92 | } 93 | } 94 | 95 | func TestGenerate(t *testing.T) { 96 | cancel := make(chan os.Signal) 97 | go func() { 98 | time.Sleep(40 * time.Millisecond) 99 | close(cancel) 100 | }() 101 | control := BahamaControl{Nchan: 4, Ngroups: 1, sinusoid: true, sawtooth: true, pulses: true, 102 | noiselevel: 5.0, samplerate: 100000} 103 | 104 | // Keep the data channel drained... 105 | ch := make(chan []byte) 106 | go func() { 107 | for { 108 | <-ch 109 | } 110 | }() 111 | err := generateData(control.Nchan, 0, ch, cancel, control) 112 | if err != nil { 113 | t.Errorf("generateData() returned %s", err.Error()) 114 | } 115 | 116 | } 117 | 118 | func BenchmarkUDPGenerate(b *testing.B) { 119 | cancel := make(chan os.Signal) 120 | go func() { 121 | time.Sleep(60 * time.Second) 122 | close(cancel) 123 | }() 124 | 125 | packetchan := make(chan []byte) 126 | 127 | control := BahamaControl{Nchan: 64, Ngroups: 1, Nsources: 1, pulses: true, 128 | noiselevel: 5.0, samplerate: 244140, port: 4000} 129 | if err := udpwriter(control.port, packetchan); err != nil { 130 | b.Errorf("udpwriter(%d,...) failed: %v\n", control.port, err) 131 | } 132 | if err := generateData(control.Nchan, 0, packetchan, cancel, control); err != nil { 133 | b.Errorf("generateData() returned %s", err.Error()) 134 | } 135 | } 136 | -------------------------------------------------------------------------------- /cmd/writetester/writetester.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "os" 7 | "os/signal" 8 | "path/filepath" 9 | "sync" 10 | "time" 11 | ) 12 | 13 | const flushWithinBlock = true 14 | const flushAfterBlocks = true 15 | 16 | type Writer struct { 17 | FileName string 18 | headerWritten bool 19 | writer *bufio.Writer 20 | } 21 | 22 | func (w *Writer) writeHeader() error { 23 | file, err := os.Create(w.FileName) 24 | if err != nil { 25 | return err 26 | } 27 | w.writer = bufio.NewWriterSize(file, 32768) 28 | w.writer.WriteString("HEADER\n") 29 | w.headerWritten = true 30 | return nil 31 | } 32 | 33 | func (w *Writer) writeRecord(nBytes int) error { 34 | data := make([]byte, nBytes) 35 | nWritten, err := w.writer.Write(data) 36 | if nWritten != nBytes { 37 | return fmt.Errorf("wrong number of bytes written") 38 | } 39 | return err 40 | } 41 | 42 | func main() { 43 | dirname, err0 := os.MkdirTemp("", "") 44 | if err0 != nil { 45 | panic(err0) 46 | } 47 | fmt.Println(dirname) 48 | recordLength := 500 49 | numberOfChannels := 240 50 | recordsPerChanPerTick := 5 51 | writers := make([]*Writer, numberOfChannels) 52 | abortChan := make(chan struct{}) 53 | for i := range writers { 54 | fname := filepath.Join(dirname, fmt.Sprintf("%v.ljh", i)) 55 | writers[i] = &Writer{FileName: fname} 56 | } 57 | go func() { 58 | signalChan := make(chan os.Signal) 59 | signal.Notify(signalChan, os.Interrupt) 60 | <-signalChan 61 | close(abortChan) 62 | }() 63 | 64 | tickDuration := 50 * time.Millisecond 65 | ticker := time.NewTicker(tickDuration) 66 | z := 0 67 | tLast := time.Now() 68 | fmt.Printf("recordsPerChanPerTick %v, Chans %v, tickDuration %v\n", recordsPerChanPerTick, numberOfChannels, tickDuration) 69 | fmt.Printf("records/second/chan %v, records/second total %v\n", float64(recordsPerChanPerTick)/tickDuration.Seconds(), float64(recordsPerChanPerTick*numberOfChannels)/tickDuration.Seconds()) 70 | fmt.Printf("megabytes/second total %v\n", float64(recordLength)*float64(recordsPerChanPerTick*numberOfChannels)/tickDuration.Seconds()*1e-6) 71 | fmt.Printf("flushWithinBlock %v, flushAfterBlocks %v\n", flushWithinBlock, flushAfterBlocks) 72 | for { 73 | z++ 74 | // 1. here we would get data from data source 75 | select { 76 | case <-abortChan: 77 | fmt.Println("clean exit") 78 | return 79 | case <-ticker.C: 80 | var wg sync.WaitGroup 81 | writeDurations := make([]time.Duration, numberOfChannels) 82 | flushDurations := make([]time.Duration, numberOfChannels) 83 | for i, w := range writers { 84 | wg.Add(1) 85 | go func(w *Writer, i int) { 86 | // 2. here we would process data, we launch one goroutine per channel to parallelize this processing 87 | tStart := time.Now() 88 | defer wg.Done() 89 | // 3. here we write data to disk, still within the same goroutines that did the processing 90 | for range recordsPerChanPerTick { 91 | if !w.headerWritten { 92 | err := w.writeHeader() 93 | if err != nil { 94 | panic(fmt.Sprintf("failed create file and write header: %v\n", err)) 95 | } 96 | } 97 | w.writeRecord(recordLength) 98 | } 99 | tWrite := time.Now() 100 | if flushWithinBlock { 101 | w.writer.Flush() 102 | } 103 | writeDurations[i] = tWrite.Sub(tStart) 104 | flushDurations[i] = time.Since(tWrite) 105 | }(w, i) 106 | } 107 | wg.Wait() 108 | for _, w := range writers { 109 | if flushAfterBlocks { 110 | w.writer.Flush() 111 | } 112 | } 113 | var writeSum time.Duration 114 | var flushSum time.Duration 115 | var writeMax time.Duration 116 | var flushMax time.Duration 117 | for i := range writeDurations { 118 | writeSum += writeDurations[i] 119 | flushSum += flushDurations[i] 120 | if writeDurations[i] > writeMax { 121 | writeMax = writeDurations[i] 122 | } 123 | if flushDurations[i] > flushMax { 124 | flushMax = flushDurations[i] 125 | } 126 | } 127 | if z%100 == 0 || time.Since(tLast) > 75*time.Millisecond { 128 | fmt.Printf("z %v, time.Since(tLast) %v\n", z, time.Since(tLast)) 129 | fmt.Printf("writeMean %v, flushMean %v, writeMax %v, flushMax %v\n", writeSum/time.Duration(numberOfChannels), flushSum/time.Duration(numberOfChannels), writeMax, flushMax) 130 | } 131 | tLast = time.Now() 132 | } 133 | } 134 | 135 | } 136 | -------------------------------------------------------------------------------- /doc/PORTS.md: -------------------------------------------------------------------------------- 1 | # DASTARD TCP PORTS 2 | 3 | (Last revision: 17 March 2021.) 4 | 5 | DASTARD uses a series of TCP ports for communications with its GUI control 6 | clients (such as [DASTARD-Commander](https://github.com/usnistgov/dastard-commander)) and to publish triggered pulse data for plotting 7 | ([Microscope](https://github.com/usnistgov/microscope)). See also the [UDP ports](#DASTARD-UDP-PORTS) 8 | 9 | These ports are numbered sequentially from a *base port* number. By default, 10 | this number is **BASE=5500**. We might allow this number to be set at the 11 | DASTARD command-line, but for now it's a constant. The TCP ports are: 12 | 13 | * **5500** (base+0): **Control**. JSON-RPC port for controlling DASTARD. (JSON-RPC = "Remote Procedure Calls" specificed by JSON data format). Message format is defined by [json-rpc version 1.0](http://www.jsonrpc.org/specification_v1). 14 | * **5501** (base+1): **Status**. ZMQ PUB port where DASTARD reports its status to all control GUIs. 15 | * **5502** (base+2): **Pulses**. ZMQ PUB port where DASTARD puts all pulse records. Subscribe by 4-byte channel number. These are for Microscope to use, so it can plot data. 16 | * **5503** (base+3): **Secondary records**. ZMQ PUB port, same as BASE+2, except that here we put only the secondary triggered records (i.e from a group trigger). 17 | * **5504** (base+4): **Pulse summaries**. ZMQ PUB port. Just has summary info and model fit coefficients. 18 | 19 | ### JSON-RPC commands (BASE+0) 20 | 21 | Hmm. Should document these. 22 | 23 | ### Status messages (BASE+1) 24 | Format is a text message-key (as a ZMQ frame) then a status block in JSON format. The messages are meant to be adequate to inform all Dastard control clients (the `dastard-commander` GUI, or others) everything they need to know about the Dastard internal state. Message keys include: 25 | 26 | * **ALIVE**: a "heartbeat" message, whether data source is active, time and MB of data since previous ALIVE message. Expect <5 seconds apart. 27 | * **STATUS**: what data source is active; idling or running; What # of rows, columns, channels, samples, and pre-trigger samples. 28 | * **TRIGGER**: contains the complete trigger configuration (publish only when it changes). An efficiency: send only 1 copy of each unique state, along with a list of the channel numbers that are in that specific state. 29 | * **TRIGCOUPLING**: whether FB->Error or Error->FB trigger coupling is active, or neither. 30 | * **STATELABEL**: the current "experiment state". 31 | * **SIMPULSE**: contains the configuration of the Simulated Pulse data source. 32 | * **TRIANGLE**: contains the configuration of the Triangle Wave data source. 33 | * **LANCERO**: contains the configuration of the Lancero data source (e.g., which cards to use, fiber mask, etc.). 34 | * **ABACO**: contains the configuration of the Abaco data source (e.g., which UDP host:ports to use). 35 | * **TRIGGERRATE**: how many triggers have been counted (array-wide) over some duration, plus the clock time of last checked sample. 36 | * **NUMBERWRITTEN**: counts how many records have been written to file. 37 | * **DATADROP**: counts data frames dropped from an active data source (since previous message). 38 | * **EXTERNALTRIGGER**: counts how many external triggers have been seen (since previous message). 39 | * **TESMAP**: characterizes the entire TES array geometry. 40 | * **TESMAPFILE**: names the TES array map file being used. 41 | * **MIX**: TDM mixing state. Like TRIGGER, publish all values that match as a block of identically mixed channels. 42 | * **WRITING**: contains output file information (type, filename, writing status stop/go/pause) (publish on change). 43 | * **CHANNELNAMES**: a list of the unique channel names. 44 | 45 | ### Primary and secondary pulse records (BASE+2 and BASE+3) 46 | 47 | Each message on these ports consists of a single pulse record. The first 2 bytes are an int16 channel number, so that programs can subscribe to specific channels. The message format is found in file BINARY_FORMATS.md 48 | 49 | ### Pulse summaries (BASE+4) 50 | 51 | Each message on these ports contains _summaries_ of a single pulse record. The first 2 bytes are an int16 channel number, so that programs can subscribe to specific channels. The message format is found in file BINARY_FORMATS.md 52 | 53 | # DASTARD UDP PORTS 54 | 55 | DASTARD can receive data packets placed onto UDP. Currently, it makes assumptions about the source of the data based on the port to which the datagrams are sent: 56 | 57 | * UDP ports 4000-4299 for µMUX systems 58 | * UDP ports 4400-4699 for TDM systems (still under development) 59 | -------------------------------------------------------------------------------- /phase_unwrap.go: -------------------------------------------------------------------------------- 1 | package dastard 2 | 3 | import "fmt" 4 | 5 | // PhaseUnwrapper makes phase values continous by adding integers multiples of 2π phase as needed. 6 | // It also optionally inverts channels' data. 7 | type PhaseUnwrapper struct { 8 | lastVal uint16 9 | offset uint16 10 | fractionBits uint // Before unwrapping, this many low bits are fractional ϕ0 11 | lowBitsToDrop uint // Drop this many least significant bits in each value 12 | upperStepLim int16 13 | lowerStepLim int16 14 | twoPi uint16 15 | resetCount int 16 | resetAfter int // jump back to near 0 after this many 17 | resetOffset uint16 18 | signMask RawType // Mask off the upper bits of raw: effect = convert signed->unsigned. 19 | enable bool // are we even unwrapping at all? 20 | invertData bool // do we invert all samples from this channel? 21 | } 22 | 23 | // NewPhaseUnwrapper creates a new PhaseUnwrapper object 24 | func NewPhaseUnwrapper(fractionBits, lowBitsToDrop uint, enable bool, biasLevel, resetAfter, pulseSign int, 25 | invertData bool) *PhaseUnwrapper { 26 | 27 | // Subtle point here: if no bits are to be dropped, then it makes no sense to perform 28 | // phase unwrapping. When lowBitsToDrop==0, we cannot allow enable==true (because where would you 29 | // put the bits set in the unwrapping process when there are no dropped bits?) 30 | if lowBitsToDrop == 0 && enable { 31 | panic("NewPhaseUnwrapper is enabled but with lowBitsToDrop=0, must be >0.") 32 | } 33 | 34 | u := new(PhaseUnwrapper) 35 | // data bytes representing a 2s complement integer 36 | // where 2^fractionBits = ϕ0 of phase. 37 | // so int(data[i])/2^fractionBits is a number from -0.5 to 0.5 ϕ0 38 | // after this function we want 2^(fractionBits-lowBitsToDrop) to be 39 | // exactly one single ϕ0, or 2π of phase. 40 | // 41 | // As of Jan 2021, we decided to let fractionBits = all bits for Abaco sources, so 16 42 | // or 32 for int16 or int32, but leave that parameter here--ROACH2 sources need it. 43 | u.fractionBits = fractionBits 44 | u.lowBitsToDrop = lowBitsToDrop 45 | u.signMask = ^(RawType(0xffff) << fractionBits) 46 | u.enable = enable 47 | u.invertData = invertData 48 | 49 | if lowBitsToDrop > 0 && enable { 50 | u.twoPi = uint16(1) << (fractionBits - lowBitsToDrop) 51 | onePi := int16(1) << (fractionBits - lowBitsToDrop - 1) 52 | bias := int16(biasLevel>>lowBitsToDrop) % int16(u.twoPi) 53 | u.upperStepLim = bias + onePi 54 | u.lowerStepLim = bias - onePi 55 | 56 | if pulseSign > 0 { 57 | u.resetOffset = u.twoPi 58 | } else { 59 | u.resetOffset = uint16(-2 * int(u.twoPi)) 60 | } 61 | u.offset = u.resetOffset 62 | 63 | u.resetAfter = resetAfter 64 | 65 | if resetAfter <= 0 && enable { 66 | panic(fmt.Sprintf("NewPhaseUnwrapper is enabled but with resetAfter=%d, expect positive", resetAfter)) 67 | } 68 | } 69 | return u 70 | } 71 | 72 | // UnwrapInPlace unwraps in place 73 | func (u *PhaseUnwrapper) UnwrapInPlace(data *[]RawType) { 74 | 75 | // Invert raw data for all channels that require it, before all other operations. 76 | if u.invertData { 77 | for i, rawVal := range *data { 78 | (*data)[i] = rawVal ^ 0xffff 79 | } 80 | } 81 | 82 | drop := u.lowBitsToDrop 83 | if drop == 0 { 84 | return 85 | } 86 | 87 | // When unwrapping is disabled, simply drop the low bits. 88 | if !u.enable { 89 | u.resetCount = 0 90 | for i, rawVal := range *data { 91 | (*data)[i] = (rawVal & u.signMask) >> drop 92 | } 93 | return 94 | } 95 | 96 | // Enter this loop only if unwrapping is enabled 97 | for i, rawVal := range *data { 98 | v := uint16(rawVal&u.signMask) >> drop 99 | thisstep := int16(v - u.lastVal) 100 | u.lastVal = v 101 | 102 | // Short-term unwrapping 103 | if thisstep > u.upperStepLim { 104 | u.offset -= u.twoPi 105 | } else if thisstep < u.lowerStepLim { 106 | u.offset += u.twoPi 107 | } 108 | 109 | // Long-term unwrapping means keeping baseline at same ϕ0. 110 | // So if the offset is unequal to the resetOffset for a long time, set it to resetOffset. 111 | // This will cause a one-time jump by an integer number of ϕ0 units (an integer 112 | // multiple of 2π in phase angle). 113 | if u.offset == u.resetOffset { 114 | u.resetCount = 0 115 | } else { 116 | u.resetCount++ 117 | if u.resetCount > u.resetAfter { 118 | u.offset = u.resetOffset 119 | u.resetCount = 0 120 | } 121 | } 122 | (*data)[i] = RawType(v + u.offset) 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /cmd/acquire/acquire.go.txt: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | // "bytes" 5 | // "encoding/binary" 6 | "bufio" 7 | "flag" 8 | "fmt" 9 | "io" 10 | "log" 11 | "os" 12 | "os/signal" 13 | "time" 14 | 15 | "github.com/usnistgov/dastard" 16 | ) 17 | 18 | type acquireOptions struct { 19 | verbosity int 20 | threshold int 21 | nSamples int 22 | devnum int 23 | output string 24 | useBuffer bool 25 | } 26 | 27 | var opt acquireOptions 28 | 29 | func parseOptions() error { 30 | flag.IntVar(&opt.verbosity, "v", 0, "verbosity level") 31 | flag.IntVar(&opt.threshold, "t", 1024, "threshold (in frames), fill level interrupt") 32 | flag.IntVar(&opt.nSamples, "n", 0, "number of samples to acquire (<=0 means run indenfinitely)") 33 | flag.IntVar(&opt.devnum, "d", 0, "device number for /dev/xdma0_c2h_*") 34 | flag.BoolVar(&opt.useBuffer, "b", false, "use buffered I/O") 35 | flag.StringVar(&opt.output, "o", "", "output filename") 36 | flag.Parse() 37 | 38 | switch { 39 | case opt.threshold < 1: 40 | return fmt.Errorf("Threshold (%d) must be at least 1", opt.threshold) 41 | case opt.threshold < 1024: 42 | log.Printf("WARNING: Threshold (%d) is recommended to be at least 1024", opt.threshold) 43 | } 44 | return nil 45 | } 46 | 47 | func acquire(abaco *dastard.AbacoDevice) (bytesRead int, err error) { 48 | 49 | // Store output? 50 | var fd *os.File 51 | saveData := len(opt.output) > 0 52 | if saveData { 53 | fd, err = os.Create(opt.output) 54 | if err != nil { 55 | return 56 | } 57 | defer fd.Close() 58 | } else { 59 | fd = nil 60 | } 61 | 62 | // Start something?? 63 | 64 | var buffer []byte 65 | var nbytes int 66 | bufsize := 1024*1024 - 16384*2 67 | 68 | // Trap interrupts so we can cleanly exit the program 69 | interruptCatcher := make(chan os.Signal, 1) 70 | signal.Notify(interruptCatcher, os.Interrupt) 71 | 72 | var bufReader *bufio.Reader 73 | if opt.useBuffer { 74 | bufReader = bufio.NewReaderSize(abaco.File, bufsize) 75 | } 76 | 77 | for { 78 | select { 79 | case <-interruptCatcher: 80 | return 81 | default: 82 | buffer = make([]byte, bufsize) 83 | nreads := 0 84 | if opt.useBuffer { 85 | for bytesConsumed := 0; bytesConsumed < bufsize; { 86 | // log.Printf("There are %d bytes ready to read", bufReader.Buffered()) 87 | nbytes, err = bufReader.Read(buffer) 88 | bytesConsumed += nbytes 89 | nreads++ 90 | time.Sleep(1 * time.Millisecond) 91 | 92 | if err == io.EOF { 93 | return 94 | } else if err != nil { 95 | log.Printf("ERROR %v", err) 96 | return 97 | } 98 | } 99 | if err == io.EOF { 100 | return 101 | } else if err != nil { 102 | log.Printf("ERROR %v", err) 103 | return 104 | } 105 | } else { 106 | for bytesConsumed := 0; bytesConsumed < bufsize; { 107 | nbytes, err = abaco.File.Read(buffer[bytesConsumed:]) 108 | bytesConsumed += nbytes 109 | nreads++ 110 | time.Sleep(1 * time.Millisecond) 111 | 112 | if err == io.EOF { 113 | return 114 | } else if err != nil { 115 | log.Printf("ERROR %v", err) 116 | return 117 | } 118 | } 119 | } 120 | 121 | log.Printf("%x %x %x %x\n", buffer[0:4], buffer[4:8], buffer[8:12], buffer[12:16]) 122 | totalBytes := len(buffer) 123 | log.Printf("Filled a buffer of full size %d in %2d reads", len(buffer), nreads) 124 | log.Println() 125 | 126 | if saveData { 127 | bytesWritten := bytesRead 128 | if len(buffer) > 0 { 129 | var n int 130 | if len(buffer)+bytesWritten <= opt.nSamples*4 { 131 | n, err = fd.Write(buffer) 132 | } else { 133 | nwrite := opt.nSamples*4 - bytesWritten 134 | n, err = fd.Write(buffer[:nwrite]) 135 | } 136 | if err != nil { 137 | return 138 | } 139 | if n != len(buffer) { 140 | err = fmt.Errorf("Wrote %d bytes, expected %d", n, len(buffer)) 141 | return 142 | } 143 | } 144 | } 145 | 146 | // Quit when read enough samples. 147 | bytesRead += totalBytes 148 | if opt.nSamples > 0 && opt.nSamples <= bytesRead/4 { 149 | return 150 | } 151 | 152 | log.Println() 153 | } 154 | } 155 | 156 | } 157 | 158 | func main() { 159 | err := parseOptions() 160 | if err != nil { 161 | log.Println("ERROR: ", err) 162 | return 163 | } 164 | 165 | abaco, err := dastard.NewAbacoDevice(opt.devnum) 166 | if err != nil { 167 | log.Println("ERROR: ", err) 168 | return 169 | } 170 | // defer abaco.Delete() 171 | 172 | bytesRead, _ := acquire(abaco) 173 | log.Printf("Read %d bytes.\n", bytesRead) 174 | } 175 | -------------------------------------------------------------------------------- /internal/getbytes/getbytes.go: -------------------------------------------------------------------------------- 1 | // These functions use unsafe.Slice, which is available only from Go version 1.17+. 2 | // Dastard version 0.2.16 showed how to use conditional compilation to handle that. 3 | 4 | package getbytes 5 | 6 | import ( 7 | "unsafe" 8 | ) 9 | 10 | // FromSliceUint8 convert a []uint8 to []byte using unsafe 11 | func FromSliceUint8(d []uint8) []byte { 12 | if len(d) == 0 { 13 | return []byte{} 14 | } 15 | outlength := uintptr(len(d)) * unsafe.Sizeof(d[0]) / unsafe.Sizeof(byte(0)) 16 | return unsafe.Slice((*byte)(unsafe.Pointer(&d[0])), outlength) 17 | } 18 | 19 | // FromSliceUint16 convert a []uint16 to []byte using unsafe 20 | func FromSliceUint16(d []uint16) []byte { 21 | if len(d) == 0 { 22 | return []byte{} 23 | } 24 | outlength := uintptr(len(d)) * unsafe.Sizeof(d[0]) / unsafe.Sizeof(byte(0)) 25 | return unsafe.Slice((*byte)(unsafe.Pointer(&d[0])), outlength) 26 | } 27 | 28 | // FromSliceUint32 convert a []uint32 to []byte using unsafe 29 | func FromSliceUint32(d []uint32) []byte { 30 | if len(d) == 0 { 31 | return []byte{} 32 | } 33 | outlength := uintptr(len(d)) * unsafe.Sizeof(d[0]) / unsafe.Sizeof(byte(0)) 34 | return unsafe.Slice((*byte)(unsafe.Pointer(&d[0])), outlength) 35 | } 36 | 37 | // FromSliceUint64 convert a []uint64 to []byte using unsafe 38 | func FromSliceUint64(d []uint64) []byte { 39 | if len(d) == 0 { 40 | return []byte{} 41 | } 42 | outlength := uintptr(len(d)) * unsafe.Sizeof(d[0]) / unsafe.Sizeof(byte(0)) 43 | return unsafe.Slice((*byte)(unsafe.Pointer(&d[0])), outlength) 44 | } 45 | 46 | // FromSliceInt8 convert a []int8 to []byte using unsafe 47 | func FromSliceInt8(d []int8) []byte { 48 | if len(d) == 0 { 49 | return []byte{} 50 | } 51 | outlength := uintptr(len(d)) * unsafe.Sizeof(d[0]) / unsafe.Sizeof(byte(0)) 52 | return unsafe.Slice((*byte)(unsafe.Pointer(&d[0])), outlength) 53 | } 54 | 55 | // FromSliceInt16 convert a []int16 to []byte using unsafe 56 | func FromSliceInt16(d []int16) []byte { 57 | if len(d) == 0 { 58 | return []byte{} 59 | } 60 | outlength := uintptr(len(d)) * unsafe.Sizeof(d[0]) / unsafe.Sizeof(byte(0)) 61 | return unsafe.Slice((*byte)(unsafe.Pointer(&d[0])), outlength) 62 | } 63 | 64 | // FromSliceInt32 convert a []int32 to []byte using unsafe 65 | func FromSliceInt32(d []int32) []byte { 66 | if len(d) == 0 { 67 | return []byte{} 68 | } 69 | outlength := uintptr(len(d)) * unsafe.Sizeof(d[0]) / unsafe.Sizeof(byte(0)) 70 | return unsafe.Slice((*byte)(unsafe.Pointer(&d[0])), outlength) 71 | } 72 | 73 | // FromSliceInt64 convert a []int64 to []byte using unsafe 74 | func FromSliceInt64(d []int64) []byte { 75 | if len(d) == 0 { 76 | return []byte{} 77 | } 78 | outlength := uintptr(len(d)) * unsafe.Sizeof(d[0]) / unsafe.Sizeof(byte(0)) 79 | return unsafe.Slice((*byte)(unsafe.Pointer(&d[0])), outlength) 80 | } 81 | 82 | // FromSliceFloat32 convert a []float32 to []byte using unsafe 83 | func FromSliceFloat32(d []float32) []byte { 84 | if len(d) == 0 { 85 | return []byte{} 86 | } 87 | outlength := uintptr(len(d)) * unsafe.Sizeof(d[0]) / unsafe.Sizeof(byte(0)) 88 | return unsafe.Slice((*byte)(unsafe.Pointer(&d[0])), outlength) 89 | } 90 | 91 | // FromSliceFloat64 convert a []float64 to []byte using unsafe 92 | func FromSliceFloat64(d []float64) []byte { 93 | if len(d) == 0 { 94 | return []byte{} 95 | } 96 | outlength := uintptr(len(d)) * unsafe.Sizeof(d[0]) / unsafe.Sizeof(byte(0)) 97 | return unsafe.Slice((*byte)(unsafe.Pointer(&d[0])), outlength) 98 | } 99 | 100 | // FromUint8 converts a uint8 to []byte using unsafe 101 | func FromUint8(d uint8) []byte { 102 | return FromSliceUint8([]uint8{d}) 103 | } 104 | 105 | // FromUint16 converts a uint16 to []byte using unsafe 106 | func FromUint16(d uint16) []byte { 107 | return FromSliceUint16([]uint16{d}) 108 | } 109 | 110 | // FromUint32 converts a uint32 to []byte using unsafe 111 | func FromUint32(d uint32) []byte { 112 | return FromSliceUint32([]uint32{d}) 113 | } 114 | 115 | // FromUint64 converts a uint64 to []byte using unsafe 116 | func FromUint64(d uint64) []byte { 117 | return FromSliceUint64([]uint64{d}) 118 | } 119 | 120 | // FromInt8 converts a uint8 to []byte using unsafe 121 | func FromInt8(d int8) []byte { 122 | return FromSliceInt8([]int8{d}) 123 | } 124 | 125 | // FromInt16 converts a int16 to []byte using unsafe 126 | func FromInt16(d int16) []byte { 127 | return FromSliceInt16([]int16{d}) 128 | } 129 | 130 | // FromInt32 converts a int32 to []byte using unsafe 131 | func FromInt32(d int32) []byte { 132 | return FromSliceInt32([]int32{d}) 133 | } 134 | 135 | // FromInt64 converts a int64 to []byte using unsafe 136 | func FromInt64(d int64) []byte { 137 | return FromSliceInt64([]int64{d}) 138 | } 139 | 140 | // FromFloat32 converts a float32 to []byte using unsafe 141 | func FromFloat32(d float32) []byte { 142 | return FromSliceFloat32([]float32{d}) 143 | } 144 | 145 | // FromFloat64 converts a float65 to []byte using unsafe 146 | func FromFloat64(d float64) []byte { 147 | return FromSliceFloat64([]float64{d}) 148 | } 149 | -------------------------------------------------------------------------------- /internal/lancero/no_hardware.go: -------------------------------------------------------------------------------- 1 | package lancero 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "time" 7 | ) 8 | 9 | // NoHardware is a drop in replacement for Lancero (implements Lanceroer) 10 | // that requires no hardware. Use it for testing the interface. 11 | type NoHardware struct { 12 | ncols int 13 | nrows int 14 | linePeriod int 15 | nsPerLinePeriod int 16 | isOpen bool 17 | isStarted bool 18 | collectorStarted bool 19 | bytesReleased int 20 | lastReadTime time.Time 21 | minTimeBetweenReads time.Duration 22 | rowCount int 23 | idNum int 24 | } 25 | 26 | // String implements Stringer for NoHardware, aka controls how Println output looks 27 | func (lan NoHardware) String() string { 28 | return fmt.Sprintf("lancero.NoHardware: idNum %v", lan.idNum) 29 | } 30 | 31 | var idNumCounter int 32 | 33 | // NewNoHardware generates and returns a new Lancero object in test mode, 34 | // meaning it emulate a lancero without any hardware. Here linePeriod is 35 | // the equivalent of LSYNC: how many 8 ns clocks per row of readout. 36 | func NewNoHardware(ncols int, nrows int, linePeriod int) (*NoHardware, error) { 37 | const nsPerLinePeriod = 8 38 | lan := NoHardware{ncols: ncols, nrows: nrows, linePeriod: linePeriod, 39 | nsPerLinePeriod: nsPerLinePeriod, isOpen: true, lastReadTime: time.Now(), 40 | minTimeBetweenReads: 10 * time.Millisecond, idNum: idNumCounter} 41 | idNumCounter++ 42 | return &lan, nil 43 | } 44 | 45 | // ChangeRingBuffer doesn't error 46 | func (lan *NoHardware) ChangeRingBuffer(length, threshold int) error { 47 | return nil 48 | } 49 | 50 | // Close errors if already closed 51 | func (lan *NoHardware) Close() error { 52 | if !lan.isOpen { 53 | return fmt.Errorf("NoHardware.Close: already closed: id %v", lan.idNum) 54 | } 55 | lan.isOpen = false 56 | return nil 57 | } 58 | 59 | // StartAdapter errors if already started 60 | func (lan *NoHardware) StartAdapter(waitSeconds, verbosity int) error { 61 | if lan.isStarted { 62 | return fmt.Errorf("NoHardware.StartAdapter: already started: id %v", lan.idNum) 63 | } 64 | lan.isStarted = true 65 | return nil 66 | } 67 | 68 | // StopAdapter errors if not started 69 | func (lan *NoHardware) StopAdapter() error { 70 | if !lan.isStarted { 71 | return fmt.Errorf("NoHardware.StopAdapter: not started: id %v", lan.idNum) 72 | } 73 | lan.isStarted = false 74 | return nil 75 | } 76 | 77 | // CollectorConfigure returns nil 78 | func (lan *NoHardware) CollectorConfigure(linePeriod, dataDelay int, channelMask uint32, 79 | frameLength int) error { 80 | return nil 81 | } 82 | 83 | // StartCollector errors if Collector Already Started 84 | func (lan *NoHardware) StartCollector(simulate bool) error { 85 | if lan.collectorStarted { 86 | return fmt.Errorf("NoHardware.StartCollector: collector started already: id %v", lan.idNum) 87 | } 88 | lan.collectorStarted = true 89 | return nil 90 | } 91 | 92 | // StopCollector errors if Collector not started 93 | func (lan *NoHardware) StopCollector() error { 94 | if !lan.collectorStarted { 95 | return fmt.Errorf("NoHardware.StopCollector: collector stopped already: id %v", lan.idNum) 96 | } 97 | lan.collectorStarted = false 98 | return nil 99 | } 100 | 101 | // Wait sleeps until lastReadTime + minTimeBetweenReads 102 | func (lan *NoHardware) Wait() (time.Time, time.Duration, error) { 103 | sleepDuration := time.Until(lan.lastReadTime.Add(lan.minTimeBetweenReads)) 104 | time.Sleep(sleepDuration) 105 | now := time.Now() 106 | return now, now.Sub(lan.lastReadTime), nil 107 | } 108 | 109 | // AvailableBuffer returns some simulated data 110 | // size matches what you should get in 1 millisecond 111 | // all entries other than frame bits are zeros 112 | func (lan *NoHardware) AvailableBuffer() ([]byte, time.Time, error) { 113 | var buf bytes.Buffer 114 | now := time.Now() 115 | if !lan.isStarted { 116 | return buf.Bytes(), now, fmt.Errorf("err in NoHardware.AvailableBuffers: not started: id %v", lan.idNum) 117 | } 118 | if !lan.collectorStarted { 119 | return buf.Bytes(), now, fmt.Errorf("err in NoHardware.AvailableBuffers: collector not started: id %v", lan.idNum) 120 | } 121 | if !lan.isOpen { 122 | return buf.Bytes(), now, fmt.Errorf("err in NoHardware.AvailableBuffers: not open: id %v", lan.idNum) 123 | } 124 | sinceLastRead := now.Sub(lan.lastReadTime) 125 | lan.lastReadTime = now 126 | frameDurationNanoseconds := lan.linePeriod * lan.nsPerLinePeriod * lan.nrows 127 | frames := int(sinceLastRead.Nanoseconds()) / frameDurationNanoseconds 128 | // log.Printf("id %v read at %v\n", lan.idNum, time.Now()) 129 | if sinceLastRead > 100*lan.minTimeBetweenReads { 130 | return buf.Bytes(), now, fmt.Errorf("reads were %v apart, want < %v", sinceLastRead, 50*lan.minTimeBetweenReads) 131 | } 132 | 133 | for range frames { // i counts frames 134 | for row := 0; row < lan.nrows; row++ { 135 | for col := 0; col < lan.ncols; col++ { 136 | v := byte(uint8(lan.rowCount)) 137 | lan.rowCount++ 138 | if row == 0 { 139 | // first row has frame bit 140 | buf.Write([]byte{0x00, v, 0x01, v}) 141 | } else { 142 | // all data is zeros 143 | buf.Write([]byte{0x00, v, 0x00, v}) 144 | } 145 | } 146 | } 147 | 148 | } 149 | return buf.Bytes(), now, nil 150 | } 151 | 152 | // ReleaseBytes increments bytesReleased 153 | func (lan *NoHardware) ReleaseBytes(nBytes int) error { 154 | lan.bytesReleased += nBytes 155 | return nil 156 | } 157 | 158 | // InspectAdapter does nothing and returns 0 159 | func (lan *NoHardware) InspectAdapter() uint32 { 160 | return uint32(0) 161 | } 162 | -------------------------------------------------------------------------------- /phase_unwrap_test.go: -------------------------------------------------------------------------------- 1 | package dastard 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func assertPanic(t *testing.T, f func()) { 8 | defer func() { 9 | if r := recover(); r == nil { 10 | t.Errorf("The code did not panic") 11 | } 12 | }() 13 | f() 14 | } 15 | 16 | func TestUnwrap(t *testing.T) { 17 | const bits2drop = 2 18 | var biaslevel int 19 | const pulsesign = 1 20 | const dontInvert = false 21 | enables := []bool{true, false} 22 | 23 | shouldFail1 := func() { 24 | NewPhaseUnwrapper(13, bits2drop, true, biaslevel, -1, pulsesign, dontInvert) 25 | } 26 | shouldFail2 := func() { 27 | NewPhaseUnwrapper(13, 0, true, biaslevel, -1, pulsesign, dontInvert) 28 | } 29 | assertPanic(t, shouldFail1) 30 | assertPanic(t, shouldFail2) 31 | 32 | NewPhaseUnwrapper(13, bits2drop, false, biaslevel, -1, pulsesign, dontInvert) 33 | NewPhaseUnwrapper(13, bits2drop, true, biaslevel, 100, pulsesign, dontInvert) 34 | 35 | expectA := map[uint]RawType{13: 0x1fff, 14: 0x3fff, 15: 0x7fff, 16: 0xffff} 36 | for fractionbits, expectMask := range expectA { 37 | pu := NewPhaseUnwrapper(fractionbits, bits2drop, true, 0, 20000, pulsesign, dontInvert) 38 | if expectMask != pu.signMask { 39 | t.Errorf("PhaseUnwrapper.signMask=%x, want %x", pu.signMask, expectMask) 40 | } 41 | } 42 | 43 | // Check inversion happens, even with unwrap disabled and 0 bits to drop 44 | puInverter := NewPhaseUnwrapper(14, 0, false, 0, 20000, pulsesign, true) 45 | data := make([]RawType, 0xffff) 46 | for i := range RawType(0xffff) { 47 | data[i] = i 48 | } 49 | puInverter.UnwrapInPlace(&data) 50 | for i := range RawType(0xffff) { 51 | if data[i]+i != 0xffff { 52 | t.Errorf("unwrapping with inversion 0x%4.4x -> 0x%4.4x, want 0x%4.4x", i, 53 | data[i], 0xffff-i) 54 | } 55 | } 56 | 57 | for fractionbits := uint(13); fractionbits <= 16; fractionbits++ { 58 | for _, enable := range enables { 59 | const resetAfter = 20000 60 | resetValue := RawType(0) 61 | if enable { 62 | resetValue = RawType(1) << (fractionbits - bits2drop) 63 | } 64 | pu := NewPhaseUnwrapper(fractionbits, bits2drop, enable, biaslevel, resetAfter, pulsesign, dontInvert) 65 | const ndata = 16 66 | data := make([]RawType, ndata) 67 | target := make([]RawType, ndata) 68 | 69 | // Test unwrap when no change is expected 70 | pu.UnwrapInPlace(&data) 71 | for i := range ndata { 72 | if data[i] != resetValue { 73 | t.Errorf("data[%d] = %d, want %d", i, data[i], resetValue) 74 | } 75 | } 76 | // Test basic unwrap 77 | twopi := RawType(1) << fractionbits // this is a jump of 2π 78 | baseline := RawType(100) 79 | for i := range ndata { 80 | data[i] = baseline 81 | if i > 5 && i < 10 { 82 | data[i] += twopi 83 | } 84 | target[i] = (baseline >> bits2drop) + resetValue 85 | } 86 | pu.UnwrapInPlace(&data) 87 | 88 | for i, want := range target { 89 | if data[i] != want { 90 | t.Errorf("unwrap fb=%d error: enable=%t, data[%d] = %d, want %d", fractionbits, enable, i, data[i], want) 91 | } 92 | } 93 | // Test unwrap on sawtooth of 4 steps 94 | // Result should be a line. 95 | step := 1 << (fractionbits - 2) 96 | mod := step * 4 97 | for i := range ndata { 98 | data[i] = RawType((i * step) % mod) 99 | if enable { 100 | target[i] = RawType(i*(step>>bits2drop)) + resetValue 101 | } else { 102 | target[i] = data[i] >> bits2drop 103 | } 104 | } 105 | pu.UnwrapInPlace(&data) 106 | for i, want := range target { 107 | if data[i] != want { 108 | t.Errorf("unwrap: %t, (%d,%d) data[%d] = %d, want %d", enable, fractionbits, 109 | bits2drop, i, data[i], want) 110 | } 111 | } 112 | } 113 | } 114 | 115 | // Test biased unwrapping. Range that does NOT trigger an unwrap should be [-22768,42768] 116 | biasX := 10000 117 | pu1 := NewPhaseUnwrapper(16, bits2drop, true, 0, 100, pulsesign, dontInvert) 118 | pu2 := NewPhaseUnwrapper(16, bits2drop, true, biasX, 100, pulsesign, dontInvert) 119 | // In order, have big steps that overflow both, overflow just the unbiased, negative that overflows just 120 | // the biased, and then negative that overflows both. 121 | steps := []int{80, 40, -20, 0, 44000, 0, 40000, 0, -28000, 0, -40000} 122 | expectsteps1 := []int{20, 10, -5, 0, 11000 - 16384, 0, 10000 - 16384, 0, -7000, 0, 6384} 123 | expectsteps2 := []int{20, 10, -5, 0, 11000 - 16384, 0, 10000, 0, 16384 - 7000, 0, 6384} 124 | 125 | input1 := make([]RawType, 1+len(steps)) 126 | input2 := make([]RawType, 1+len(steps)) 127 | input1[0] = 20000 128 | input2[0] = 20000 129 | for i, val := range steps { 130 | input1[i+1] = input1[i] + RawType(val) 131 | input2[i+1] = input2[i] + RawType(val) 132 | } 133 | pu1.UnwrapInPlace(&input1) 134 | pu2.UnwrapInPlace(&input2) 135 | for i, expect := range expectsteps1 { 136 | step := input1[i+1] - input1[i] 137 | if step != RawType(expect) { 138 | t.Errorf("step[%d]=0x%x-0x%x step %d with bias=0, want %d", i, input1[i+1], input1[i], step, RawType(expect)) 139 | } 140 | } 141 | for i, expect := range expectsteps2 { 142 | step := input2[i+1] - input2[i] 143 | if step != RawType(expect) { 144 | t.Errorf("step[%d]=0x%x-0x%x step %d with bias=10000, want %d", i, input2[i+1], input2[i], step, RawType(expect)) 145 | } 146 | } 147 | } 148 | 149 | func BenchmarkPhaseUnwrap(b *testing.B) { 150 | Nsamples := 5000000 151 | data := make([]RawType, Nsamples) 152 | for i := range Nsamples { 153 | data[i] = RawType(i % 50000) 154 | } 155 | 156 | const bits2drop = 2 157 | const bias = 0 158 | const pulsesign = +1 159 | const invertData = false 160 | for fractionbits := uint(13); fractionbits <= 16; fractionbits++ { 161 | const enable = true 162 | const resetAfter = 20000 163 | pu := NewPhaseUnwrapper(fractionbits, bits2drop, enable, bias, resetAfter, pulsesign, invertData) 164 | for b.Loop() { 165 | pu.UnwrapInPlace(&data) 166 | } 167 | } 168 | } 169 | -------------------------------------------------------------------------------- /cmd/dastard/dastard.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "log" 7 | "os" 8 | "path" 9 | "path/filepath" 10 | "runtime" 11 | "runtime/pprof" 12 | "strings" 13 | 14 | "github.com/spf13/viper" 15 | "github.com/usnistgov/dastard" 16 | "gopkg.in/natefinch/lumberjack.v2" 17 | ) 18 | 19 | var githash = "githash not computed" 20 | var gitdate = "git date not computed" 21 | var buildDate = "build date not computed" 22 | 23 | // makeFileExist checks that dir/filename exists, and creates the directory 24 | // and file if it doesn't. 25 | func makeFileExist(dir, filename string) (string, error) { 26 | // Replace 1 instance of "$HOME" in the path with the actual home directory. 27 | if strings.Contains(dir, "$HOME") { 28 | home, err := os.UserHomeDir() 29 | if err != nil { 30 | return "", err 31 | } 32 | dir = strings.Replace(dir, "$HOME", home, 1) 33 | } 34 | 35 | // Create directory , if needed 36 | if _, err := os.Stat(dir); err != nil { 37 | if !os.IsNotExist(err) { 38 | return "", err 39 | } 40 | err2 := os.MkdirAll(dir, 0775) 41 | if err2 != nil { 42 | return "", err2 43 | } 44 | } 45 | 46 | // Create an empty file path/filename, if it doesn't exist. 47 | fullname := path.Join(dir, filename) 48 | _, err := os.Stat(fullname) 49 | if os.IsNotExist(err) { 50 | f, err2 := os.OpenFile(fullname, os.O_WRONLY|os.O_CREATE, 0664) 51 | if err2 != nil { 52 | return "", err2 53 | } 54 | f.Close() 55 | } 56 | return fullname, nil 57 | } 58 | 59 | // setupViper sets up the viper configuration manager: says where to find config 60 | // files and the filename and suffix. Sets some defaults. 61 | func setupViper() error { 62 | viper.SetDefault("Verbose", false) 63 | 64 | HOME, err := os.UserHomeDir() 65 | if err != nil { // Handle errors reading the config file 66 | fmt.Printf("Error finding User Home Dir: %s\n", err) 67 | } 68 | dotDastard := filepath.Join(HOME, ".dastard") 69 | const filename string = "config" 70 | const suffix string = ".yaml" 71 | if _, err := makeFileExist(dotDastard, filename+suffix); err != nil { 72 | return err 73 | } 74 | 75 | viper.SetConfigName(filename) 76 | viper.AddConfigPath(filepath.FromSlash("/etc/dastard")) 77 | viper.AddConfigPath(dotDastard) 78 | viper.AddConfigPath(".") 79 | err = viper.ReadInConfig() // Find and read the config file 80 | if err != nil { // Handle errors reading the config file 81 | return fmt.Errorf("error reading config file: %s", err) 82 | } 83 | return nil 84 | } 85 | 86 | func startLogger(pfname string) *log.Logger { 87 | probFile, err := os.OpenFile(pfname, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) 88 | if err != nil { 89 | msg := fmt.Sprintf("Could not open log file '%s'", pfname) 90 | panic(msg) 91 | } 92 | probLogger := log.New(probFile, "", log.LstdFlags) 93 | probLogger.SetOutput(&lumberjack.Logger{ 94 | Filename: pfname, 95 | MaxSize: 10, // megabytes after which new file is created 96 | MaxBackups: 4, // number of backups 97 | MaxAge: 180, // days 98 | Compress: true, // whether to gzip the backups 99 | }) 100 | return probLogger 101 | } 102 | 103 | func main() { 104 | dastard.Build.Date = buildDate 105 | dastard.Build.Githash = githash 106 | dastard.Build.Gitdate = gitdate 107 | dastard.Build.Summary = fmt.Sprintf("DASTARD version %s (git commit %s of %s)", dastard.Build.Version, githash, gitdate) 108 | if host, err := os.Hostname(); err == nil { 109 | dastard.Build.Host = host 110 | } else { 111 | dastard.Build.Host = "host not detected" 112 | } 113 | 114 | printVersion := flag.Bool("version", false, "print version and quit") 115 | cpuprofile := flag.String("cpuprofile", "", "write CPU profile to this file") 116 | memprofile := flag.String("memprofile", "", "write memory profile to this file") 117 | flag.Parse() 118 | 119 | if *printVersion { 120 | fmt.Printf("This is DASTARD version %s\n", dastard.Build.Version) 121 | fmt.Printf("Hostname: %s\n", dastard.Build.Host) 122 | fmt.Printf("Git commit hash: %s\n", githash) 123 | fmt.Printf("Git commit date: %s\n", gitdate) 124 | fmt.Printf("Build date+time: %s\n", buildDate) 125 | fmt.Printf("Built on go version %s\n", runtime.Version()) 126 | fmt.Printf("Running on %d CPUs.\n", runtime.NumCPU()) 127 | os.Exit(0) 128 | } 129 | banner := fmt.Sprintf("\nThis is DASTARD version %s (git commit %s)\n", dastard.Build.Version, githash) 130 | fmt.Print(banner) 131 | 132 | if *cpuprofile != "" { 133 | f, err := os.Create(*cpuprofile) 134 | if err != nil { 135 | log.Fatal(err) 136 | } 137 | pprof.StartCPUProfile(f) 138 | defer pprof.StopCPUProfile() 139 | } 140 | 141 | // Start logging problems and updates to 2 log files. 142 | HOME, err := os.UserHomeDir() 143 | if err != nil { 144 | panic(err) 145 | } 146 | logdir := filepath.Join(HOME, ".dastard", "logs") 147 | problemname, err := makeFileExist(logdir, "problems.log") 148 | if err != nil { 149 | panic(err) 150 | } 151 | logname, err := makeFileExist(logdir, "updates.log") 152 | if err != nil { 153 | panic(err) 154 | } 155 | dastard.ProblemLogger = startLogger(problemname) 156 | dastard.UpdateLogger = startLogger(logname) 157 | fmt.Printf("Logging problems to %s\n", problemname) 158 | fmt.Printf("Logging client updates to %s\n\n", logname) 159 | dastard.UpdateLogger.Printf("\n\n\n\n%s", banner) 160 | 161 | // Find config file, creating it if needed, and read it. 162 | if err := setupViper(); err != nil { 163 | panic(err) 164 | } 165 | 166 | abort := make(chan struct{}) 167 | go dastard.RunClientUpdater(dastard.Ports.Status, abort) 168 | dastard.RunRPCServer(dastard.Ports.RPC, true) 169 | close(abort) 170 | 171 | if *memprofile != "" { 172 | f, err := os.Create(*memprofile) 173 | if err != nil { 174 | log.Fatal("could not create memory profile: ", err) 175 | } 176 | defer f.Close() // error handling omitted for example 177 | runtime.GC() // get up-to-date statistics 178 | if err := pprof.WriteHeapProfile(f); err != nil { 179 | log.Fatal("could not write memory profile: ", err) 180 | } 181 | } 182 | } 183 | -------------------------------------------------------------------------------- /writing_state.go: -------------------------------------------------------------------------------- 1 | package dastard 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "os" 7 | "sync" 8 | "time" 9 | ) 10 | 11 | // WritingState monitors the state of file writing. 12 | type WritingState struct { 13 | Active bool 14 | Paused bool 15 | BasePath string 16 | FilenamePattern string 17 | WriteLJH22 bool // which file formats are active 18 | WriteOFF bool 19 | WriteLJH3 bool 20 | FlushAlsoSyncs bool 21 | experimentStateFile *os.File 22 | ExperimentStateFilename string 23 | ExperimentStateLabel string 24 | ExperimentStateLabelUnixNano int64 25 | ExternalTriggerFilename string 26 | externalTriggerNumberObserved int 27 | externalTriggerFileBufferedWriter *bufio.Writer 28 | externalTriggerTicker *time.Ticker 29 | externalTriggerFile *os.File 30 | DataDropFilename string 31 | dataDropsObserved int 32 | dataDropFileBufferedWriter *bufio.Writer 33 | dataDropTicker *time.Ticker 34 | dataDropFile *os.File 35 | dataDropHaveSentAMessage bool 36 | sync.Mutex 37 | } 38 | 39 | // IsActive will return ws.Active, with proper locking 40 | func (ws *WritingState) IsActive() bool { 41 | ws.Lock() 42 | defer ws.Unlock() 43 | return ws.Active 44 | } 45 | 46 | // ComputeState will return a property-by-property copy of the WritingState. 47 | // It will not copy the "active" features like open files, tickers, etc. 48 | func (ws *WritingState) ComputeState() *WritingState { 49 | ws.Lock() 50 | defer ws.Unlock() 51 | var copyState WritingState 52 | copyState.Active = ws.Active 53 | copyState.Paused = ws.Paused 54 | copyState.BasePath = ws.BasePath 55 | copyState.FilenamePattern = ws.FilenamePattern 56 | copyState.ExperimentStateFilename = ws.ExperimentStateFilename 57 | copyState.ExperimentStateLabel = ws.ExperimentStateLabel 58 | copyState.ExperimentStateLabelUnixNano = ws.ExperimentStateLabelUnixNano 59 | copyState.ExternalTriggerFilename = ws.ExternalTriggerFilename 60 | copyState.externalTriggerNumberObserved = ws.externalTriggerNumberObserved 61 | copyState.WriteLJH22 = ws.WriteLJH22 62 | copyState.WriteLJH3 = ws.WriteLJH3 63 | copyState.WriteOFF = ws.WriteOFF 64 | copyState.FlushAlsoSyncs = ws.FlushAlsoSyncs 65 | return ©State 66 | } 67 | 68 | // Start will set the WritingState to begin writing 69 | func (ws *WritingState) Start(filenamePattern, path string, config *WriteControlConfig) error { 70 | ws.Lock() 71 | defer ws.Unlock() 72 | ws.Active = true 73 | ws.Paused = false 74 | ws.BasePath = path 75 | ws.WriteLJH22 = config.WriteLJH22 76 | ws.WriteLJH3 = config.WriteLJH3 77 | ws.WriteOFF = config.WriteOFF 78 | ws.FlushAlsoSyncs = config.FlushAlsoSyncs 79 | ws.FilenamePattern = filenamePattern 80 | ws.ExperimentStateFilename = fmt.Sprintf(filenamePattern, "experiment_state", "txt") 81 | ws.ExternalTriggerFilename = fmt.Sprintf(filenamePattern, "external_trigger", "bin") 82 | ws.DataDropFilename = fmt.Sprintf(filenamePattern, "data_drop", "txt") 83 | return ws.setExperimentStateLabel(time.Now(), "START") 84 | } 85 | 86 | // Stop will set the WritingState to be completely stopped 87 | func (ws *WritingState) Stop() error { 88 | ws.Lock() 89 | defer ws.Unlock() 90 | ws.Active = false 91 | ws.Paused = false 92 | ws.FilenamePattern = "" 93 | if ws.experimentStateFile != nil { 94 | if err := ws.setExperimentStateLabel(time.Now(), "STOP"); err != nil { 95 | return err 96 | } 97 | 98 | if err := ws.experimentStateFile.Close(); err != nil { 99 | return fmt.Errorf("failed to close experimentStatefile, err: %v", err) 100 | } 101 | } 102 | ws.experimentStateFile = nil 103 | ws.ExperimentStateFilename = "" 104 | ws.ExperimentStateLabel = "" 105 | ws.ExperimentStateLabelUnixNano = 0 106 | if ws.externalTriggerFile != nil { 107 | if err := ws.externalTriggerFileBufferedWriter.Flush(); err != nil { 108 | return fmt.Errorf("failed to flush externalTriggerFileBufferedWriter, err: %v", err) 109 | } 110 | if err := ws.externalTriggerFile.Close(); err != nil { 111 | return fmt.Errorf("failed to close externalTriggerFile, err: %v", err) 112 | } 113 | ws.externalTriggerFileBufferedWriter = nil 114 | ws.externalTriggerFile = nil 115 | } 116 | if ws.dataDropFile != nil { 117 | if err := ws.dataDropFileBufferedWriter.Flush(); err != nil { 118 | return fmt.Errorf("failed to flush externalTriggerFileBufferedWriter, err: %v", err) 119 | } 120 | if err := ws.dataDropFile.Close(); err != nil { 121 | return fmt.Errorf("failed to close dataDropFile, err: %v", err) 122 | } 123 | ws.dataDropFileBufferedWriter = nil 124 | ws.dataDropFile = nil 125 | } 126 | ws.externalTriggerNumberObserved = 0 127 | ws.ExternalTriggerFilename = "" 128 | ws.DataDropFilename = "" 129 | return nil 130 | } 131 | 132 | // SetExperimentStateLabel writes to a file with name like XXX_experiment_state.txt 133 | // The file is created upon the first call to this function for a given file writing. 134 | // This exported version locks the WritingState object. 135 | func (ws *WritingState) SetExperimentStateLabel(timestamp time.Time, stateLabel string) error { 136 | ws.Lock() 137 | defer ws.Unlock() 138 | if !ws.Active { 139 | return fmt.Errorf("cannot set experiment state label when writing is not active") 140 | } 141 | return ws.setExperimentStateLabel(timestamp, stateLabel) 142 | } 143 | 144 | func (ws *WritingState) setExperimentStateLabel(timestamp time.Time, stateLabel string) error { 145 | if ws.experimentStateFile == nil { 146 | // create state file if neccesary 147 | var err error 148 | ws.experimentStateFile, err = os.Create(ws.ExperimentStateFilename) 149 | if err != nil { 150 | return fmt.Errorf("%v, filename: <%v>", err, ws.ExperimentStateFilename) 151 | } 152 | // write header 153 | _, err1 := ws.experimentStateFile.WriteString("# unix time in nanoseconds, state label\n") 154 | if err1 != nil { 155 | return err 156 | } 157 | } 158 | ws.ExperimentStateLabel = stateLabel 159 | ws.ExperimentStateLabelUnixNano = timestamp.UnixNano() 160 | _, err := ws.experimentStateFile.WriteString(fmt.Sprintf("%v, %v\n", ws.ExperimentStateLabelUnixNano, stateLabel)) 161 | if err != nil { 162 | return err 163 | } 164 | return nil 165 | } 166 | -------------------------------------------------------------------------------- /group_trigger_test.go: -------------------------------------------------------------------------------- 1 | package dastard 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | // TestBrokerConnections checks that we can connect/disconnect group triggers 10 | // from the broker and the coupling of err and FB into each other for LanceroSources. 11 | func TestBrokerConnections(t *testing.T) { 12 | N := 4 13 | broker := NewTriggerBroker(N) 14 | 15 | // First be sure there are no connections, initially. 16 | for i := 0; i < N+1; i++ { 17 | for j := 0; j < N+1; j++ { 18 | if broker.isConnected(i, j) { 19 | t.Errorf("New TriggerBroker.isConnected(%d,%d)==true, want false", i, j) 20 | } 21 | } 22 | } 23 | 24 | gts := broker.computeGroupTriggerState() 25 | assert.Equal(t, len(gts.Connections), 0, "TriggerBroker should have 0 connections") 26 | 27 | // Add 2 connections and make sure they are completed, but others aren't. 28 | broker.AddConnection(0, 2) 29 | broker.AddConnection(2, 0) 30 | if !broker.isConnected(0, 2) { 31 | t.Errorf("TriggerBroker.isConnected(0,2)==false, want true") 32 | } 33 | if !broker.isConnected(2, 0) { 34 | t.Errorf("TriggerBroker.isConnected(2,0)==false, want true") 35 | } 36 | i := 1 37 | for j := 0; j < N+1; j++ { 38 | if broker.isConnected(i, j) { 39 | t.Errorf("TriggerBroker.isConnected(%d,%d)==true, want false after connecting 0->2", i, j) 40 | } 41 | } 42 | gts = broker.computeGroupTriggerState() 43 | var expected = [][]int{ 44 | {0, 2}, 45 | {2, 0}, 46 | } 47 | for _, want := range expected { 48 | rx, ok := gts.Connections[want[0]] 49 | assert.Equal(t, ok, true, "Expect connection %d->something", want[0]) 50 | assert.Equal(t, rx[0], want[1], "Expect connection %d->%d", want[0], want[1]) 51 | } 52 | 53 | // Now break the connections and check that they are disconnected 54 | broker.DeleteConnection(0, 2) 55 | broker.DeleteConnection(2, 0) 56 | for i := 0; i < N+1; i++ { 57 | for j := 0; j < N+1; j++ { 58 | if broker.isConnected(i, j) { 59 | t.Errorf("TriggerBroker.isConnected(%d,%d)==true, want false after disconnecting all", i, j) 60 | } 61 | } 62 | } 63 | gts = broker.computeGroupTriggerState() 64 | assert.Equal(t, len(gts.Connections), 0, "TriggerBroker should have 0 connections") 65 | 66 | // Try Add/Delete/check on channel numbers that should fail 67 | if err := broker.AddConnection(0, N); err == nil { 68 | t.Errorf("TriggerBroker.AddConnection(%d,0) should fail but didn't", N) 69 | } 70 | if err := broker.DeleteConnection(0, N); err == nil { 71 | t.Errorf("TriggerBroker.DeleteConnection(%d,0) should fail but didn't", N) 72 | } 73 | 74 | // Check the SourcesForReceiver method 75 | for i := -1; i < 1; i++ { 76 | con := broker.SourcesForReceiver(i) 77 | if len(con) > 0 { 78 | t.Errorf("TriggerBroker.SourcesForReceiver(%d)) has length %d, want 0", i, len(con)) 79 | } 80 | } 81 | broker.AddConnection(1, 0) 82 | broker.AddConnection(2, 0) 83 | broker.AddConnection(3, 0) 84 | broker.AddConnection(2, 0) 85 | broker.AddConnection(3, 0) 86 | sources := broker.SourcesForReceiver(0) 87 | if len(sources) != 3 { 88 | t.Errorf("TriggerBroker.SourcesForReceiver(0) has length %d, want 3", len(sources)) 89 | } 90 | if sources[0] { 91 | t.Errorf("TriggerBroker.SourcesForReceiver(0)[0]==true, want false") 92 | } 93 | for i := 1; i < 4; i++ { 94 | if !sources[i] { 95 | t.Errorf("TriggerBroker.SourcesForReceiver(0)[%d]==false, want true", i) 96 | } 97 | } 98 | gts = broker.computeGroupTriggerState() 99 | expected = [][]int{ 100 | {1, 0}, 101 | {2, 0}, 102 | {3, 0}, 103 | } 104 | for _, want := range expected { 105 | rx, ok := gts.Connections[want[0]] 106 | assert.Equal(t, ok, true, "Expect connection %d->something", want[0]) 107 | assert.Equal(t, rx[0], want[1], "Expect connection %d->%d", want[0], want[1]) 108 | } 109 | 110 | // Test StopTriggerCoupling 111 | broker.StopTriggerCoupling() 112 | gts = broker.computeGroupTriggerState() 113 | assert.Equal(t, len(gts.Connections), 0, "TriggerBroker should have 0 connections") 114 | 115 | // Now test FB <-> err coupling. This works when broker is embedded in a 116 | // LanceroSource. 117 | broker = NewTriggerBroker(N) 118 | var ls LanceroSource 119 | ls.nchan = N 120 | ls.broker = broker 121 | 122 | // FBToErr 123 | if err := ls.SetCoupling(FBToErr); err != nil { 124 | t.Errorf("SetCoupling(FBToErr) failed: %v", err) 125 | } else { 126 | for src := range N { 127 | for rx := range N { 128 | expect := (src-rx) == 1 && src%2 == 1 129 | c := broker.isConnected(src, rx) 130 | if c != expect { 131 | t.Errorf("After FB->Error isConnected(src=%d, rx=%d) is %t, want %t", 132 | src, rx, c, expect) 133 | } 134 | } 135 | } 136 | } 137 | 138 | // ErrToFB 139 | if err := ls.SetCoupling(ErrToFB); err != nil { 140 | t.Errorf("SetCoupling(ErrToFB) failed: %v", err) 141 | } else { 142 | for src := range N { 143 | for rx := range N { 144 | expect := (rx-src) == 1 && src%2 == 0 145 | c := broker.isConnected(src, rx) 146 | if c != expect { 147 | t.Errorf("After Error->Fb isConnected(src=%d, rx=%d) is %t, want %t", 148 | src, rx, c, expect) 149 | } 150 | } 151 | } 152 | } 153 | 154 | // None 155 | if err := ls.SetCoupling(NoCoupling); err != nil { 156 | t.Errorf("SetCoupling(NoCoupling) failed: %v", err) 157 | } else { 158 | for src := range N { 159 | for rx := range N { 160 | expect := false 161 | c := broker.isConnected(src, rx) 162 | if c != expect { 163 | t.Errorf("After NoCoupling isConnected(src=%d, rx=%d) is %t, want %t", 164 | src, rx, c, expect) 165 | } 166 | } 167 | } 168 | } 169 | } 170 | 171 | // TestBrokering checks the group trigger brokering operations. 172 | func TestBrokering(t *testing.T) { 173 | N := 4 174 | broker := NewTriggerBroker(N) 175 | broker.AddConnection(0, 3) 176 | broker.AddConnection(2, 3) 177 | 178 | for range 3 { 179 | allTrigs := make(map[int]triggerList) 180 | for i := range N { 181 | trigs := triggerList{channelIndex: i, frames: []FrameIndex{FrameIndex(i) + 10, FrameIndex(i) + 20, 30}} 182 | allTrigs[i] = trigs 183 | } 184 | secondaryMap, _ := broker.Distribute(allTrigs) 185 | t0 := secondaryMap[0] 186 | t1 := secondaryMap[1] 187 | t2 := secondaryMap[2] 188 | t3 := secondaryMap[3] 189 | for i, tn := range [][]FrameIndex{t0, t1, t2} { 190 | if len(tn) > 0 { 191 | t.Errorf("TriggerBroker chan %d received %d secondary triggers, want 0", i, len(tn)) 192 | } 193 | } 194 | expected := []FrameIndex{10, 12, 20, 22, 30, 30} 195 | if len(t3) != len(expected) { 196 | t.Errorf("TriggerBroker chan %d received %d secondary triggers, want %d", 3, len(t3), len(expected)) 197 | } 198 | for i := range expected { 199 | if t3[i] != expected[i] { 200 | t.Errorf("TriggerBroker chan %d secondary trig[%d]=%d, want %d", 3, i, t2[i], expected[i]) 201 | } 202 | } 203 | } 204 | } 205 | -------------------------------------------------------------------------------- /roach_test.go: -------------------------------------------------------------------------------- 1 | //go:build !ci 2 | // +build !ci 3 | 4 | // Don't run these Roach tests on CI platforms. There were UDP problems 10/23/2020 on Travis. 5 | 6 | package dastard 7 | 8 | import ( 9 | "bytes" 10 | "encoding/binary" 11 | "fmt" 12 | "net" 13 | "testing" 14 | "time" 15 | ) 16 | 17 | func newBuffer(nchan, nsamp uint16, sampnum uint64) []byte { 18 | buf := new(bytes.Buffer) 19 | 20 | header := []any{ 21 | uint8(0), // unused 22 | uint8(1), // flag: 1=flux-ramp demodulation 23 | nchan, 24 | nsamp, 25 | uint16(1), // flag: 1 means 2-byte words 26 | sampnum, 27 | } 28 | for _, v := range header { 29 | if err := binary.Write(buf, binary.BigEndian, v); err != nil { 30 | fmt.Println("binary.Write failed:", err) 31 | return buf.Bytes() 32 | } 33 | } 34 | for i := uint16(0); i < nchan*nsamp; i++ { 35 | if err := binary.Write(buf, binary.BigEndian, i); err != nil { 36 | fmt.Println("binary.Write failed:", err) 37 | break 38 | } 39 | } 40 | return buf.Bytes() 41 | } 42 | 43 | func publishRoachPackets(port int, nchan uint16) (closer chan struct{}, err error) { 44 | 45 | host := fmt.Sprintf("127.0.0.1:%d", port) 46 | raddr, err := net.ResolveUDPAddr("udp", host) 47 | if err != nil { 48 | return nil, err 49 | } 50 | conn, err := net.DialUDP("udp", nil, raddr) 51 | if err != nil { 52 | return nil, err 53 | } 54 | 55 | nsamp := 8000 / (2 * nchan) 56 | 57 | closer = make(chan struct{}) 58 | go func() { 59 | defer conn.Close() 60 | for i := uint64(0); ; i += uint64(nsamp) { 61 | select { 62 | case <-closer: 63 | return 64 | default: 65 | buffer := newBuffer(nchan, nsamp, i) 66 | conn.Write(buffer) 67 | // fmt.Printf("Wrote buffer iteration %4d of size %d: %v\n", i, len(buffer), buffer[0:20]) 68 | time.Sleep(100 * time.Millisecond) 69 | } 70 | } 71 | }() 72 | return closer, nil 73 | } 74 | 75 | // TestDevice checks that the raw RoachDevice can receive and parse a header 76 | func TestRoachDevice(t *testing.T) { 77 | // Start generating Roach packets, until closer is closed. 78 | port := 60001 79 | var nchan uint16 = 40 80 | packetSourceCloser, err := publishRoachPackets(port, nchan) 81 | defer close(packetSourceCloser) 82 | if err != nil { 83 | t.Errorf("publishRoachPackets returned %v", err) 84 | } 85 | 86 | host := fmt.Sprintf("localhost:%d", port) 87 | dev, err := NewRoachDevice(host, 40000.0) 88 | if err != nil { 89 | t.Errorf("NewRoachDevice returned %v", err) 90 | } 91 | dev.unwrapOpts.PulseSign = +1 92 | dev.unwrapOpts.Bias = false 93 | dev.unwrapOpts.RescaleRaw = true 94 | dev.unwrapOpts.Unwrap = true 95 | time.Sleep(50 * time.Millisecond) 96 | err = dev.samplePacket() 97 | if err != nil { 98 | t.Errorf("samplePacket returned %v", err) 99 | } 100 | if dev.nchan != int(nchan) { 101 | t.Errorf("parsed packet header says nchan=%d, want %d", dev.nchan, nchan) 102 | } 103 | 104 | nextBlock := make(chan *dataBlock) 105 | go dev.readPackets(nextBlock) 106 | timeout := time.NewTimer(time.Second) 107 | select { 108 | case <-timeout.C: 109 | t.Errorf("RoachDevice.readPackets launched but no data received after timeout") 110 | case block := <-nextBlock: 111 | if len(block.segments) != dev.nchan { 112 | t.Errorf("RoachDevice block has %d data segments, want %d", len(block.segments), dev.nchan) 113 | } 114 | for i, seg := range block.segments { 115 | want := RawType(i/4) + 4096 116 | if seg.rawData[0] != want { 117 | t.Errorf("RoachDevice block.segments[%d][0] = %d, want %d", 118 | i, seg.rawData[0], want) 119 | } 120 | if len(seg.rawData) != block.nSamp { 121 | t.Errorf("RoachDevice block.segments[%d] length=%d, want %d", i, len(seg.rawData), block.nSamp) 122 | } 123 | } 124 | if block.nSamp < 10 { 125 | t.Errorf("RoachDevice block.nSamp = %d, want at least 10", block.nSamp) 126 | } 127 | } 128 | } 129 | 130 | // TestDevice checks that the full RoachSource can receive and parse a header 131 | func TestRoachSource(t *testing.T) { 132 | // Start generating Roach packets, until closer is closed. 133 | port := 60005 134 | var nchan uint16 = 40 135 | packetSourceCloser, err := publishRoachPackets(port, nchan) 136 | defer close(packetSourceCloser) 137 | 138 | if err != nil { 139 | t.Errorf("publishRoachPackets returned %v", err) 140 | } 141 | 142 | rs, err := NewRoachSource() 143 | if err != nil { 144 | t.Errorf("NewRoachSource returned %v", err) 145 | } 146 | host := fmt.Sprintf("localhost:%d", port) 147 | config := RoachSourceConfig{ 148 | HostPort: []string{host}, 149 | Rates: []float64{40000.0, 50000.0}, // 2 Rates will be an error 150 | AbacoUnwrapOptions: AbacoUnwrapOptions{ 151 | PulseSign: +1, 152 | Unwrap: false, 153 | }, 154 | } 155 | err = rs.Configure(&config) 156 | if err == nil { 157 | t.Errorf("RoachSource.Configure should fail when HostPort and Rates are of unequal length") 158 | } 159 | 160 | config.Rates = config.Rates[0:1] // Fix rates so it's now of length 1 161 | err = rs.Configure(&config) 162 | if err != nil { 163 | t.Errorf("RoachSource.Configure returned %v", err) 164 | } 165 | if len(rs.active) != 1 { 166 | t.Errorf("RoachSource.active has length %d, want 1", len(rs.active)) 167 | } 168 | dev := rs.active[0] 169 | if dev.conn == nil { 170 | t.Errorf("RoachSource[0].conn is nil, should be connected") 171 | } 172 | if dev.nchan != 0 { 173 | t.Errorf("RoachSource[0].nchan before Sample is %d, should be 0", dev.nchan) 174 | } 175 | 176 | err = rs.Sample() 177 | if err != nil { 178 | t.Errorf("RoachSource.Sample returned %v", err) 179 | } 180 | if dev.nchan != int(nchan) { 181 | t.Errorf("RoachSource[0].nchan after Sample is %d, should be %d", dev.nchan, nchan) 182 | } 183 | 184 | queuedRequests := make(chan func()) 185 | npre := 300 186 | nsamp := 1000 187 | err = Start(rs, queuedRequests, npre, nsamp) 188 | if err != nil { 189 | t.Errorf("Start(RoachSource,...) returned %v", err) 190 | } 191 | err = rs.Configure(&config) 192 | if err == nil { 193 | t.Errorf("RoachSource.Configure should fail when source is Active, but it didn't") 194 | } 195 | 196 | timeout := time.NewTimer(500 * time.Millisecond) // this was 200 Millisecond, but tests hung sometimes so I made it bigger 197 | select { 198 | case <-timeout.C: 199 | t.Errorf("RoachDevice.readPackets launched but no data received after timeout") 200 | 201 | case block := <-rs.nextBlock: 202 | if len(block.segments) != dev.nchan { 203 | t.Errorf("RoachSource block has %d data segments, want %d", len(block.segments), dev.nchan) 204 | } 205 | for i, seg := range block.segments { 206 | want := RawType(i/4) + 4096 207 | if seg.rawData[0] != want { 208 | t.Errorf("RoachSource block.segments[%d][0] = %d, want %d", 209 | i, seg.rawData[0], want) 210 | } 211 | if len(seg.rawData) != block.nSamp { 212 | t.Errorf("RoachSource block.segments[%d] length=%d, want %d", i, len(seg.rawData), block.nSamp) 213 | } 214 | } 215 | if block.nSamp < 10 { 216 | t.Errorf("RoachSource block.nSamp = %d, want at least 10", block.nSamp) 217 | } 218 | <-timeout.C 219 | } 220 | 221 | err = rs.Stop() 222 | if err != nil { 223 | t.Errorf("RoachSource.Stop returned %v", err) 224 | } 225 | } 226 | -------------------------------------------------------------------------------- /data_source_test.go: -------------------------------------------------------------------------------- 1 | package dastard 2 | 3 | import ( 4 | "os" 5 | "strings" 6 | "testing" 7 | 8 | "gonum.org/v1/gonum/mat" 9 | ) 10 | 11 | func TestTriggerCoupling(t *testing.T) { 12 | ds := AnySource{nchan: 8} 13 | ds.PrepareChannels() 14 | ds.PrepareRun(100, 1000) 15 | var err error 16 | if err = ds.SetCoupling(NoCoupling); err != nil { 17 | t.Errorf("ds.SetCoupling(NoCoupling) should be allowed") 18 | } 19 | if err = ds.SetCoupling(FBToErr); err == nil { 20 | t.Errorf("ds.SetCoupling(FBToErr) should not be allowed (for non-Lancero source)") 21 | } 22 | 23 | // Make a GTS object with 5 connections 24 | connections := make(map[int][]int) 25 | connections[1] = []int{2, 3, 4} 26 | connections[5] = []int{6, 7} 27 | gts := &GroupTriggerState{Connections: connections} 28 | 29 | // Turn on the 5 connections and check some of them. 30 | ds.ChangeGroupTrigger(true, gts) 31 | if ds.broker.nconnections != 5 { 32 | t.Errorf("Broker has %d connections, want 5", ds.broker.nconnections) 33 | } 34 | for _, rx := range connections[1] { 35 | if !ds.broker.isConnected(1, rx) { 36 | t.Errorf("Broker 1->%d not connected (%v)", rx, ds.broker.sources) 37 | } 38 | } 39 | 40 | // Turn them on again; should not be an error. 41 | ds.ChangeGroupTrigger(true, gts) 42 | if ds.broker.nconnections != 5 { 43 | t.Errorf("Broker has %d connections, want 5", ds.broker.nconnections) 44 | } 45 | 46 | // Turn off the 5 connections and check some of them. 47 | ds.ChangeGroupTrigger(false, gts) 48 | if ds.broker.nconnections != 0 { 49 | t.Errorf("Broker has %d connections, want 0", ds.broker.nconnections) 50 | } 51 | for _, rx := range connections[1] { 52 | if ds.broker.isConnected(1, rx) { 53 | t.Errorf("Broker 1->%d is still connected (%v)", rx, ds.broker.sources) 54 | } 55 | } 56 | 57 | // Turn them off again; should not be an error. 58 | ds.ChangeGroupTrigger(false, gts) 59 | if ds.broker.nconnections != 0 { 60 | t.Errorf("Broker has %d connections, want 0", ds.broker.nconnections) 61 | } 62 | } 63 | 64 | func TestChannelNames(t *testing.T) { 65 | ds := AnySource{nchan: 4} 66 | ds.PrepareChannels() 67 | expect := []string{"chan0", "chan1", "chan2", "chan3"} 68 | if len(ds.chanNames) != 4 { 69 | t.Errorf("ds.chanNames length = %d, want 4", len(ds.chanNames)) 70 | } else { 71 | for i, n := range ds.chanNames { 72 | if n != expect[i] { 73 | t.Errorf("ds.chanNames[%d]=%q, want %q", i, n, expect[i]) 74 | } 75 | } 76 | } 77 | 78 | row := 3 79 | col := 4 80 | nrows := 5 81 | ncols := 10 82 | code := rcCode(row, col, nrows, ncols) 83 | if code.row() != row { 84 | t.Errorf("rcCode(%d,%d,%d,%d).row() = %d, want %d", row, col, nrows, ncols, code.row(), row) 85 | } 86 | if code.col() != col { 87 | t.Errorf("rcCode(%d,%d,%d,%d).col() = %d, want %d", row, col, nrows, ncols, code.col(), col) 88 | } 89 | if code.rows() != nrows { 90 | t.Errorf("rcCode(%d,%d,%d,%d).rows() = %d, want %d", row, col, nrows, ncols, code.rows(), nrows) 91 | } 92 | if code.cols() != ncols { 93 | t.Errorf("rcCode(%d,%d,%d,%d).cols() = %d, want %d", row, col, nrows, ncols, code.cols(), ncols) 94 | } 95 | } 96 | 97 | func TestWritingFiles(t *testing.T) { 98 | tmp, err1 := os.MkdirTemp("", "dastardTest") 99 | if err1 != nil { 100 | t.Errorf("could not make TempDir") 101 | return 102 | } 103 | defer os.RemoveAll(tmp) 104 | 105 | dir, err2 := makeDirectory(tmp) 106 | if err2 != nil { 107 | t.Error(err2) 108 | } else if !strings.HasPrefix(dir, tmp) { 109 | t.Errorf("Writing in path %s, which should be a prefix of %s", tmp, dir) 110 | } 111 | dir2, err2 := makeDirectory(tmp) 112 | if err2 != nil { 113 | t.Error(err2) 114 | } else if !strings.HasPrefix(dir2, tmp) { 115 | t.Errorf("Writing in path %s, which should be a prefix of %s", tmp, dir2) 116 | } else if !strings.HasSuffix(dir2, "run0001_%s.%s") { 117 | t.Errorf("makeDirectory produces %s, of which %q should be a suffix", dir2, "run0001_%s.%s") 118 | } 119 | 120 | if _, err := makeDirectory("/notallowed"); err == nil { 121 | t.Errorf("makeDirectory(%s) should have failed", "/notallowed") 122 | } 123 | 124 | ds := AnySource{nchan: 4} 125 | ds.rowColCodes = make([]RowColCode, ds.nchan) 126 | ds.PrepareChannels() 127 | ds.PrepareRun(256, 1024) 128 | defer ds.Stop() 129 | config := &WriteControlConfig{Request: "Pause", Path: tmp, WriteLJH22: true} 130 | 131 | // set projectors so that we can use WriterOFF = true 132 | config.WriteOFF = true 133 | config.Request = "start" 134 | if err := ds.WriteControl(config); err == nil { 135 | t.Errorf("expected error for asking to WriteOFF with no projectors set\n%v", config.Request) 136 | } 137 | nbases := 1 138 | nsamples := 1024 139 | projectors := mat.NewDense(nbases, nsamples, make([]float64, nbases*nsamples)) 140 | basis := mat.NewDense(nsamples, nbases, make([]float64, nbases*nsamples)) 141 | if err1 := ds.processors[0].SetProjectorsBasis(projectors, basis, "test model"); err1 != nil { 142 | t.Error(err1) 143 | } 144 | config.Request = "Start" 145 | config.WriteLJH22 = true 146 | config.WriteOFF = true 147 | config.WriteLJH3 = true 148 | if err := ds.WriteControl(config); err != nil { 149 | t.Errorf("%v\n%v", err, config.Request) 150 | } 151 | if !ds.processors[0].DataPublisher.HasLJH22() { 152 | t.Error("WriteLJH22 did not result in HasLJH22") 153 | } 154 | if !ds.processors[0].DataPublisher.HasOFF() { 155 | t.Error("WriteOFF did not result in HasOFF") 156 | } 157 | if ds.processors[1].DataPublisher.HasOFF() { 158 | t.Error("WriteOFF resulting in HasOFF for a channel without projectors") 159 | } 160 | if !ds.processors[0].DataPublisher.HasLJH3() { 161 | t.Error("WriteLJH3 did not result in HasLJH3") 162 | } 163 | config.Request = "PAUSE" 164 | if err := ds.WriteControl(config); err != nil { 165 | t.Errorf("%v\n%v", err, config.Request) 166 | } 167 | config.Request = "UnPAUSE " 168 | if err := ds.WriteControl(config); err == nil { 169 | t.Errorf("expected error for length==8, %v", config.Request) 170 | } 171 | config.Request = "UnPAUSEZZZZ" 172 | if err := ds.WriteControl(config); err == nil { 173 | t.Errorf("expected error for 8th character not equal to a space, %v", config.Request) 174 | } 175 | config.Request = "UnPAUSE AQ7" 176 | if err := ds.WriteControl(config); err != nil { 177 | t.Error(err) 178 | } 179 | experimentStateFilename := ds.writingState.ExperimentStateFilename 180 | config.Request = "Stop" 181 | if err := ds.WriteControl(config); err != nil { 182 | t.Errorf("%v\n%v", err, config.Request) 183 | } 184 | if ds.processors[0].DataPublisher.HasLJH22() { 185 | t.Error("Stop did not result in !HasLJH22") 186 | } 187 | if ds.processors[0].DataPublisher.HasOFF() { 188 | t.Error("Stop did not result in !HasOFF") 189 | } 190 | if ds.processors[0].DataPublisher.HasLJH3() { 191 | t.Error("Stop did not result in !HasLJH3") 192 | } 193 | 194 | fileContents, err2 := os.ReadFile(experimentStateFilename) 195 | fileContentsStr := string(fileContents) 196 | if err2 != nil { 197 | t.Error(err2) 198 | } 199 | expectFileContentsStr := "# unix time in nanoseconds, state label\n1538424162462127037, START\n1538174046828690465, AQ7\n1538424428433771969, STOP\n" 200 | if !strings.HasPrefix(fileContentsStr, "# unix time in nanoseconds, state label\n") || 201 | !strings.Contains(fileContentsStr, ", START\n") || 202 | !strings.Contains(fileContentsStr, ", AQ7\n") || 203 | !strings.HasSuffix(fileContentsStr, ", STOP\n") || 204 | len(expectFileContentsStr) != len(fileContentsStr) { 205 | t.Errorf("have\n%v\nwant (except timestamps should disagree)\n%v\n", fileContentsStr, expectFileContentsStr) 206 | } 207 | } 208 | -------------------------------------------------------------------------------- /segment_test.go: -------------------------------------------------------------------------------- 1 | package dastard 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | "time" 7 | ) 8 | 9 | // TestSegment checks that DataSegment works as expected 10 | func TestSegment(t *testing.T) { 11 | seg0 := new(DataSegment) 12 | if len(seg0.rawData) > 0 { 13 | t.Errorf(`new(DataSegment) length = %d, want 0`, len(seg0.rawData)) 14 | } 15 | 16 | for _, n := range []int{0, 1, 5, 100} { 17 | data := make([]RawType, n) 18 | segN := NewDataSegment(data, 1, 0, time.Now(), time.Millisecond) 19 | if len(segN.rawData) != n { 20 | t.Errorf("new(DataSegment) length = %d, want %d", len(segN.rawData), n) 21 | } 22 | } 23 | } 24 | 25 | // TestStream checks that DataStream works as expected 26 | func TestStream(t *testing.T) { 27 | str0 := new(DataStream) 28 | if len(str0.rawData) > 0 { 29 | t.Errorf(`new(DataStream) length = %d, want 0`, len(str0.rawData)) 30 | } 31 | 32 | for _, n := range []int{0, 1, 10, 100} { 33 | data := make([]RawType, n) 34 | strN := NewDataStream(data, 1, 0, time.Now(), time.Microsecond) 35 | if len(strN.rawData) != n { 36 | t.Errorf("new(DataStream) length = %d, want %d", len(strN.rawData), n) 37 | } 38 | } 39 | 40 | // Test DataStream.AppendSegment(DataSegment) 41 | ftime := 5 * time.Second 42 | dA := []RawType{0, 1, 2, 3, 4, 5, 6} 43 | dB := []RawType{10, 7, 8, 9, 10} 44 | dC := append(dA, dB...) 45 | tA := time.Now() 46 | tB := tA.Add(ftime * time.Duration(len(dA))) 47 | strA := NewDataStream(dA, 1, 0, tA, ftime) 48 | strB := NewDataSegment(dB, 1, FrameIndex(len(dA)), tB, ftime) 49 | 50 | strA.AppendSegment(strB) 51 | if len(strA.rawData) != len(dC) { 52 | t.Errorf("DataStream.AppendSegment result was length %d, want %d", len(strA.rawData), len(dC)) 53 | } 54 | if strA.samplesSeen != len(dC) { 55 | t.Errorf("DataStream.AppendSegment samplesSeen was length %d, want %d", strA.samplesSeen, len(dC)) 56 | } 57 | if !reflect.DeepEqual(strA.rawData, dC) { 58 | t.Errorf("DataStream.AppendSegment result was %v, want %v", strA.rawData, dC) 59 | } 60 | expectf1 := strB.firstFrameIndex - FrameIndex(len(dA)) 61 | if strA.firstFrameIndex != expectf1 { 62 | t.Errorf("DataStream.AppendSegment firstFrameIndex = %d, want %d", strA.firstFrameIndex, 63 | expectf1) 64 | } 65 | if strA.firstTime != tA { 66 | t.Errorf("DataStream.AppendSegment firstTime = %v, want %v", strA.firstTime, tA) 67 | } 68 | 69 | // Test DataStream.TrimKeepingN(int) 70 | trimN := []int{100, 11, 10, 9, 8, 5, 8, 5, 2, 1, 0} 71 | for _, N := range trimN { 72 | trimmedLength := len(strA.rawData) 73 | if trimmedLength > N { 74 | trimmedLength = N 75 | } 76 | 77 | strA.TrimKeepingN(N) 78 | expectedData := dC[len(dC)-trimmedLength:] 79 | if len(strA.rawData) != len(expectedData) { 80 | t.Errorf("DataStream.TrimKeepingN result was length %d, want %d", len(strA.rawData), len(expectedData)) 81 | } 82 | if strA.samplesSeen != len(dC) { 83 | t.Errorf("DataStream.TrimKeepingN samplesSeen was length %d, want %d", strA.samplesSeen, len(dC)) 84 | } 85 | if !reflect.DeepEqual(strA.rawData, expectedData) { 86 | t.Errorf("DataStream.TrimKeepingN result was %v, want %v", strA.rawData, expectedData) 87 | } 88 | if cap(strA.rawData) < len(dC) { 89 | t.Errorf("DataStream.TrimKeepingN left cap(rawData)=%d, want at least %d", cap(strA.rawData), len(dC)) 90 | } 91 | expectf1 := FrameIndex(len(dC) - len(strA.rawData)) 92 | if strA.firstFrameIndex != expectf1 { 93 | t.Errorf("DataStream.TrimKeepingN firstFrameIndex = %d, want %d", strA.firstFrameIndex, 94 | expectf1) 95 | } 96 | expectt1 := tA.Add(time.Duration(len(dC)-len(strA.rawData)) * ftime) 97 | if strA.firstTime != expectt1 { 98 | t.Errorf("DataStream.TrimKeepingN firstTime = %v, want %v", strA.firstTime, expectt1) 99 | } 100 | } 101 | } 102 | 103 | // TestStreamGap checks that DataStream appends as expected, even when time/frame number 104 | // aren't consistent 105 | func TestStreamGap(t *testing.T) { 106 | ftime := 5 * time.Second 107 | dA := []RawType{6, 4, 2, 5, 1, 0} 108 | dB := []RawType{10, 7, 8, 9, 10} 109 | dC := append(dA, dB...) 110 | gap := 10 111 | fA := FrameIndex(100) 112 | fB := FrameIndex(len(dA)+gap) + fA 113 | tA := time.Now() 114 | tB := tA.Add(ftime * time.Duration(len(dA)+gap)) 115 | strA := NewDataStream(dA, 1, fA, tA, ftime) 116 | strB := NewDataSegment(dB, 1, fB, tB, ftime) 117 | 118 | strA.AppendSegment(strB) 119 | if len(strA.rawData) != len(dC) { 120 | t.Errorf("DataStream.AppendSegment result was length %d, want %d", len(strA.rawData), len(dC)) 121 | } 122 | if !reflect.DeepEqual(strA.rawData, dC) { 123 | t.Errorf("DataStream.AppendSegment result was %v, want %v", strA.rawData, dC) 124 | } 125 | expectf1 := fA + FrameIndex(gap) 126 | if strA.firstFrameIndex != expectf1 { 127 | t.Errorf("DataStream.AppendSegment firstFrameIndex = %d, want %d", strA.firstFrameIndex, 128 | expectf1) 129 | } 130 | expecttA := tA.Add(time.Duration(gap) * ftime) 131 | if strA.TimeOf(0) != expecttA { 132 | t.Errorf("DataStream.AppendSegment firstTime = %v, want %v", strA.firstTime, expecttA) 133 | } 134 | } 135 | 136 | func TestStreamDecimated(t *testing.T) { 137 | ftime := time.Second 138 | dA := []RawType{6, 4, 2, 5, 1, 0} 139 | dB := []RawType{10, 7, 8, 9, 10} 140 | // dC := append(dA, dB...) 141 | fA := FrameIndex(100) 142 | tA := time.Now() 143 | 144 | decimations := []int{1, 2, 3, 5} 145 | for _, decimationA := range decimations { 146 | aframes := len(dA) * decimationA 147 | for _, decimationB := range decimations { 148 | strA := NewDataStream(dA, decimationA, fA, tA, ftime) 149 | fB := fA + FrameIndex(aframes) 150 | tB := tA.Add(ftime * time.Duration(aframes)) 151 | strB := NewDataSegment(dB, decimationB, fB, tB, ftime) 152 | 153 | strA.AppendSegment(strB) 154 | expectf1 := fB - FrameIndex(len(dA)*decimationB) 155 | if strA.firstFrameIndex != expectf1 { 156 | t.Errorf("DataStream.AppendSegment firstFrameIndex = %d, want %d with dec %d, %d", 157 | strA.firstFrameIndex, expectf1, decimationA, decimationB) 158 | } 159 | expecttA := tA.Add(ftime * time.Duration(len(dA)*(decimationA-decimationB))) 160 | if strA.TimeOf(0) != expecttA { 161 | t.Errorf("DataStream.AppendSegment firstTime = %v, want %v %d, %d", 162 | strA.firstTime, expecttA, decimationA, decimationB) 163 | } 164 | } 165 | 166 | } 167 | } 168 | 169 | func TestDecimation(t *testing.T) { 170 | N := 100 171 | data := make([]RawType, N) 172 | for i := range N { 173 | data[i] = RawType(i * 2) 174 | } 175 | 176 | for _, useAvg := range []bool{true, false} { 177 | for _, decimation := range []int{1, 2, 3, 4, 6} { 178 | ch := new(DataStreamProcessor) 179 | ch.DecimateLevel = decimation 180 | ch.Decimate = decimation > 1 181 | ch.DecimateAvgMode = useAvg 182 | dcopy := make([]RawType, N) 183 | copy(dcopy, data) 184 | seg := &DataSegment{rawData: dcopy, framesPerSample: 1, signed: false} 185 | ch.DecimateData(seg) 186 | if seg.framesPerSample != decimation { 187 | t.Errorf("DataChannel.DecimateData did not alter framesPerSample = %d, want %d", 188 | seg.framesPerSample, decimation) 189 | } 190 | expect := (len(data) + decimation - 1) / decimation 191 | if len(seg.rawData) != expect { 192 | t.Errorf("DataChannel.DecimateData data length = %d, want %d", 193 | len(seg.rawData), expect) 194 | } 195 | for i := 0; i < N/decimation; i++ { 196 | expect := i * 2 * decimation 197 | if useAvg { 198 | expect += decimation - 1 199 | } 200 | if seg.rawData[i] != RawType(expect) { 201 | t.Errorf("DataChannel.DecimateData (avg=%v, dec=%d) data[%d] = %d, want %d", 202 | useAvg, decimation, i, seg.rawData[i], expect) 203 | } 204 | } 205 | } 206 | } 207 | } 208 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= 2 | github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= 3 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= 4 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 5 | github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= 6 | github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= 7 | github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= 8 | github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= 9 | github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= 10 | github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= 11 | github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= 12 | github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 13 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 14 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= 15 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 16 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 17 | github.com/lorenzosaino/go-sysctl v0.3.1 h1:3phX80tdITw2fJjZlwbXQnDWs4S30beNcMbw0cn0HtY= 18 | github.com/lorenzosaino/go-sysctl v0.3.1/go.mod h1:5grcsBRpspKknNS1qzt1eIeRDLrhpKZAtz8Fcuvs1Rc= 19 | github.com/nlpodyssey/gopickle v0.3.0 h1:BLUE5gxFLyyNOPzlXxt6GoHEMMxD0qhsE4p0CIQyoLw= 20 | github.com/nlpodyssey/gopickle v0.3.0/go.mod h1:f070HJ/yR+eLi5WmM1OXJEGaTpuJEUiib19olXgYha0= 21 | github.com/pebbe/zmq4 v1.4.0 h1:gO5P92Ayl8GXpPZdYcD62Cwbq0slSBVVQRIXwGSJ6eQ= 22 | github.com/pebbe/zmq4 v1.4.0/go.mod h1:nqnPueOapVhE2wItZ0uOErngczsJdLOGkebMxaO8r48= 23 | github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= 24 | github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= 25 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= 26 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 27 | github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= 28 | github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= 29 | github.com/sagikazarmark/locafero v0.12.0 h1:/NQhBAkUb4+fH1jivKHWusDYFjMOOKU88eegjfxfHb4= 30 | github.com/sagikazarmark/locafero v0.12.0/go.mod h1:sZh36u/YSZ918v0Io+U9ogLYQJ9tLLBmM4eneO6WwsI= 31 | github.com/sbinet/npyio v0.9.0 h1:A7h8OyYsOsc+NPRtynRMSf70xSgATZNpamNp8nQ8Tjc= 32 | github.com/sbinet/npyio v0.9.0/go.mod h1:vgjQEMRTS9aMS9GdXhr+5jounCmGqjDO2JI+IpSokns= 33 | github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= 34 | github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= 35 | github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= 36 | github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= 37 | github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= 38 | github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= 39 | github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= 40 | github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= 41 | github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= 42 | github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= 43 | github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= 44 | github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= 45 | go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= 46 | go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= 47 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 48 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 49 | golang.org/x/exp/typeparams v0.0.0-20251209150349-8475f28825e9 h1:DXiKAjbw2KpfWz1Bq2YqF/dBDPEZGJsl3IA2JuVzy8U= 50 | golang.org/x/exp/typeparams v0.0.0-20251209150349-8475f28825e9/go.mod h1:4Mzdyp/6jzw9auFDJ3OMF5qksa7UvPnzKqTVGcb04ms= 51 | golang.org/x/lint v0.0.0-20241112194109-818c5a804067 h1:adDmSQyFTCiv19j015EGKJBoaa7ElV0Q1Wovb/4G7NA= 52 | golang.org/x/lint v0.0.0-20241112194109-818c5a804067/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= 53 | golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= 54 | golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI= 55 | golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg= 56 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 57 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 58 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 59 | golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= 60 | golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= 61 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 62 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 63 | golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= 64 | golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= 65 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 66 | golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= 67 | golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= 68 | golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= 69 | golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= 70 | golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= 71 | golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM= 72 | golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= 73 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 74 | gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= 75 | gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= 76 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 77 | gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= 78 | gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 79 | gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= 80 | gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= 81 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 82 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 83 | honnef.co/go/tools v0.6.1 h1:R094WgE8K4JirYjBaOpz/AvTyUu/3wbmAoskKN/pxTI= 84 | honnef.co/go/tools v0.6.1/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4= 85 | -------------------------------------------------------------------------------- /internal/lancero/lancero_test.go: -------------------------------------------------------------------------------- 1 | package lancero 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "log" 7 | "math" 8 | "os" 9 | "os/signal" 10 | "strings" 11 | "testing" 12 | 13 | "github.com/davecgh/go-spew/spew" 14 | ) 15 | 16 | // lanceroFBOffset gives the location of the frame bit is in bytes 2, 6, 10... 17 | const lanceroFBOffset int = 2 18 | 19 | func TestLancero(t *testing.T) { 20 | // call flag.Parse() here if TestMain uses flags 21 | devs, err := EnumerateLanceroDevices() 22 | if err != nil { 23 | t.Error(err) 24 | } 25 | if len(devs) < 1 { 26 | log.Println("found zero lancero devices") 27 | return 28 | } 29 | log.Printf("Found lancero devices %v\n", devs) 30 | devnum := devs[0] 31 | lan, err := NewLancero(devnum) 32 | if err != nil { 33 | t.Error(err) 34 | } 35 | defer lan.Close() 36 | testLanceroerSubroutine(lan, t) 37 | } 38 | 39 | func testLanceroerSubroutine(lan Lanceroer, t *testing.T) (int, int, int, error) { 40 | // devs, err := EnumerateLanceroDevices() 41 | // t.Logf("Lancero devices: %v\n", devs) 42 | // if err != nil { 43 | // t.Errorf("EnumerateLanceroDevices() failed with err=%s", err.Error()) 44 | // } 45 | // 46 | // defer lan.Close() 47 | // if err != nil { 48 | // t.Errorf("%v", err) 49 | // } 50 | var nrows, ncols, linePeriod int 51 | const timeoutSec = 2 52 | const verbosityIsIgnored = 0 53 | if err := lan.StartAdapter(timeoutSec, verbosityIsIgnored); err != nil { 54 | t.Error("failed to start lancero (driver problem):", err) 55 | } 56 | lan.InspectAdapter() 57 | defer lan.StopAdapter() 58 | linePeriodSet := 1 // use dummy values for things we will learn 59 | dataDelay := 1 60 | channelMask := uint32(0xffff) 61 | frameLength := 1 62 | err := lan.CollectorConfigure(linePeriodSet, dataDelay, channelMask, frameLength) 63 | if err != nil { 64 | t.Errorf("CollectorConfigure err, %v", err) 65 | } 66 | simulate := false 67 | err = lan.StartCollector(simulate) 68 | defer lan.StopCollector() 69 | if err != nil { 70 | t.Errorf("StartCollector err, %v", err) 71 | } 72 | interruptCatcher := make(chan os.Signal, 1) 73 | signal.Notify(interruptCatcher, os.Interrupt) 74 | 75 | var bytesRead int 76 | for { 77 | if 100000000 <= bytesRead { 78 | break 79 | } 80 | select { 81 | case <-interruptCatcher: 82 | return 0, 0, 0, fmt.Errorf("interruptCatcher") 83 | default: 84 | _, waittime, err := lan.Wait() 85 | if err != nil { 86 | return 0, 0, 0, fmt.Errorf("lan.Wait: %v", err) 87 | } 88 | buffer, _, err := lan.AvailableBuffer() 89 | totalBytes := len(buffer) 90 | // log.Printf("waittime: %v\n", waittime) 91 | if err != nil { 92 | return 0, 0, 0, fmt.Errorf("lan.AvailableBuffers: %v", err) 93 | } 94 | // log.Printf("Found buffers with %9d total bytes, bytes read previously=%10d\n", totalBytes, bytesRead) 95 | if totalBytes > 0 { 96 | q, p, n, err := FindFrameBits(buffer, lanceroFBOffset) 97 | bytesPerFrame := 4 * (p - q) 98 | if err != nil { 99 | log.Println("Error in findFrameBits:", err) 100 | spew.Println(buffer) 101 | log.Println(q, p, n) 102 | return 0, 0, 0, err 103 | } 104 | // log.Println(q, p, bytesPerFrame, n, err) 105 | nrows = (p - q) / n 106 | ncols = n 107 | // log.Println("cols=", n, "rows=", nrows) 108 | periodNS := waittime.Nanoseconds() / (int64(totalBytes) / int64(bytesPerFrame)) 109 | linePeriod = roundint(float64(periodNS) / float64(nrows*8)) // 8 is nanoseconds per row 110 | // log.Printf("frame period %5d ns, linePeriod=%d\n", periodNS, linePeriod) 111 | } 112 | // Quit when read enough samples. 113 | bytesRead += totalBytes 114 | 115 | lan.ReleaseBytes(totalBytes) 116 | } 117 | } 118 | return ncols, nrows, linePeriod, nil 119 | } 120 | 121 | func TestOdDashTX(t *testing.T) { 122 | b := make([]byte, 10000) 123 | expect := 354 124 | if s := OdDashTX(b, 15); len(s) != expect { 125 | t.Errorf("have %v\n\n WRONG LENGTH, have %v, want %d", s, len(s), expect) 126 | } 127 | for i := range b { 128 | b[i] = byte(i) 129 | } 130 | expect = 1353 131 | if s := OdDashTX(b, 15); len(s) != expect { 132 | t.Errorf("have %v\n\n WRONG LENGTH, have %v, want %d", s, len(s), expect) 133 | } 134 | b = make([]byte, 0) 135 | expect = 254 136 | if s := OdDashTX(b, 15); len(s) != expect { 137 | t.Errorf("have %v\n\n WRONG LENGTH, have %v, want %d", s, len(s), expect) 138 | } 139 | b = []byte{0xef, 0xbe, 0xad, 0xde, 0xef, 0xbe, 0xad, 0xde, 0xef, 0xbe, 0xad, 0xde, 0xef, 0xbe, 0xad, 0xde, 0xef, 0xbe, 0xad, 0xde, 0xef, 0xbe, 0xad, 0xde, 140 | 0xef, 0xbe, 0xad, 0xde, 0xef, 0xbe, 0xad, 0xde, 0xef, 0xbe, 0xad, 0xde, 0xef, 0xbe, 0xad, 0xde, 0xef, 0xbe, 0xad, 0xde, 0xef, 0xbe, 0xad, 0xde} 141 | if s := OdDashTX(b, 15); strings.Compare("deadbeef", s[255:263]) != 0 { 142 | t.Errorf("have %v, want %v", s[255:263], "deadbeef") 143 | } 144 | } 145 | 146 | // used in TestFindFrameBits 147 | type frameBitFindTestDataMaker struct { 148 | frames int 149 | nrows int 150 | ncols int 151 | leadingWords int 152 | } 153 | 154 | // used in TestFindFrameBits 155 | func (f frameBitFindTestDataMaker) bytes() []byte { 156 | var buf bytes.Buffer 157 | for i := 0; i < f.leadingWords; i++ { 158 | buf.Write([]byte{0x00, 0x00, 0x00, 0x00}) 159 | } 160 | for i := 0; i < f.frames; i++ { // i counts frames 161 | for row := 0; row < f.nrows; row++ { 162 | for col := 0; col < f.ncols; col++ { 163 | if row == 0 { 164 | // first row has frame bit 165 | buf.Write([]byte{0x00, 0x00, 0x01, 0x00}) 166 | } else { 167 | // all data is zeros 168 | buf.Write([]byte{0x00, 0x00, 0x00, 0x00}) 169 | } 170 | } 171 | } 172 | } 173 | return buf.Bytes() 174 | } 175 | 176 | func TestFindFrameBits(t *testing.T) { 177 | b := frameBitFindTestDataMaker{frames: 100, nrows: 8, ncols: 2, leadingWords: 0}.bytes() 178 | firstWord, secondWord, nConsecutive, err := FindFrameBits(b, lanceroFBOffset) 179 | if err != nil { 180 | t.Error(err) 181 | } 182 | if firstWord != 16 { 183 | t.Errorf("have %v, want 16", firstWord) 184 | } 185 | if secondWord != 16+16 { 186 | t.Errorf("have %v, want 32", secondWord) 187 | } 188 | if nConsecutive != 2 { 189 | t.Errorf("have %v, want 2", nConsecutive) 190 | } 191 | b = frameBitFindTestDataMaker{frames: 100, nrows: 8, ncols: 2, leadingWords: 0}.bytes() 192 | firstWord, secondWord, nConsecutive, err = FindFrameBits(b[4:len(b)-1], lanceroFBOffset) 193 | // start mid frame bits 194 | if err != nil { 195 | t.Error(err) 196 | } 197 | if firstWord != 15 { 198 | t.Errorf("have %v, want 15", firstWord) 199 | } 200 | if secondWord != 15+16 { 201 | t.Errorf("have %v, want 31", secondWord) 202 | } 203 | if nConsecutive != 2 { 204 | t.Errorf("have %v, want 2", nConsecutive) 205 | } 206 | b = frameBitFindTestDataMaker{frames: 100, nrows: 8, ncols: 2, leadingWords: 10}.bytes() 207 | firstWord, secondWord, nConsecutive, err = FindFrameBits(b, lanceroFBOffset) 208 | if err != nil { 209 | t.Error(err) 210 | } 211 | if firstWord != 10 { 212 | t.Errorf("have %v, want 10", firstWord) 213 | } 214 | if secondWord != 10+16 { 215 | t.Errorf("have %v, want 26", secondWord) 216 | } 217 | if nConsecutive != 2 { 218 | t.Errorf("have %v, want 2", nConsecutive) 219 | } 220 | b = frameBitFindTestDataMaker{frames: 100, nrows: 8, ncols: 8, leadingWords: 0}.bytes() 221 | firstWord, secondWord, nConsecutive, err = FindFrameBits(b, lanceroFBOffset) 222 | if err != nil { 223 | t.Error(err) 224 | } 225 | if firstWord != 64 { 226 | t.Errorf("have %v, want 10", firstWord) 227 | } 228 | if secondWord != 64+64 { 229 | t.Errorf("have %v, want 26", secondWord) 230 | } 231 | if nConsecutive != 8 { 232 | t.Errorf("have %v, want 2", nConsecutive) 233 | } 234 | } 235 | 236 | // Imperfect round to nearest integer 237 | func roundint(x float64) int { 238 | return int(x + math.Copysign(0.5, x)) 239 | } 240 | -------------------------------------------------------------------------------- /client_updater.go: -------------------------------------------------------------------------------- 1 | package dastard 2 | 3 | // Contain the ClientUpdater object, which publishes JSON-encoded messages 4 | // giving the latest DASTARD state. Most of these messages are saved to 5 | // disk with viper. 6 | 7 | import ( 8 | "encoding/json" 9 | "fmt" 10 | "log" 11 | "os" 12 | "reflect" 13 | "strings" 14 | "time" 15 | 16 | "github.com/pebbe/zmq4" 17 | "github.com/spf13/viper" 18 | ) 19 | 20 | // ClientUpdate carries the messages to be published on the status port. 21 | type ClientUpdate struct { 22 | tag string 23 | state any 24 | } 25 | 26 | // nopublishMessages is a set of message names that you don't send to clients, because they 27 | // contain no configuration that makes sense for clients to hear. 28 | var nopublishMessages = map[string]struct{}{ 29 | "CURRENTTIME": {}, 30 | "___1": {}, 31 | "___2": {}, 32 | "___3": {}, 33 | "___4": {}, 34 | "___5": {}, 35 | } 36 | 37 | // nologMessages is a set of message names that you don't log to the terminal, because they 38 | // are too long or too frequent to bother with. 39 | var nologMessages = map[string]struct{}{ 40 | "TRIGGERRATE": {}, 41 | "CHANNELNAMES": {}, 42 | "ALIVE": {}, 43 | "NUMBERWRITTEN": {}, 44 | "EXTERNALTRIGGER": {}, 45 | } 46 | 47 | // var messageSerial int 48 | 49 | // publish sends to all clients of the status update socket a 2-part message, with 50 | // the `update.tag` as the first part and `message` as the second. The latter should be 51 | // decodable as JSON. 52 | func publish(pubSocket *zmq4.Socket, update ClientUpdate, message []byte) { 53 | updateType := reflect.TypeOf(update.state).String() 54 | tag := update.tag 55 | if _, ok := nopublishMessages[tag]; ok { 56 | return 57 | } 58 | if _, ok := nologMessages[tag]; !ok { 59 | UpdateLogger.Printf("SEND %v %v\n-> message body: %v\n", tag, updateType, string(message)) 60 | } 61 | // Send the 2-part message to all subscribers (clients). 62 | // If there are errors, retry up to `maxSendAttempts` times with a sleep between. 63 | fullmessage := [][]byte{[]byte(tag), message} 64 | const maxSendAttempts = 5 65 | var err error 66 | for range maxSendAttempts { 67 | if _, err = pubSocket.SendMessage(fullmessage); err == nil { 68 | break 69 | } 70 | time.Sleep(time.Millisecond) 71 | } 72 | if err != nil { 73 | fmt.Printf("Could not send a %s message even with %d attempts in client_updater.publish", tag, maxSendAttempts) 74 | panic(err) 75 | } 76 | } 77 | 78 | var clientMessageChan chan ClientUpdate 79 | 80 | func init() { 81 | clientMessageChan = make(chan ClientUpdate, 10) 82 | } 83 | 84 | // RunClientUpdater forwards any message from its input channel to the ZMQ publisher socket 85 | // to publish any information that clients need to know. 86 | func RunClientUpdater(statusport int, abort <-chan struct{}) { 87 | hostname := fmt.Sprintf("tcp://*:%d", statusport) 88 | pubSocket, err := zmq4.NewSocket(zmq4.PUB) 89 | if err != nil { 90 | panicmsg := fmt.Errorf("could not create client updater port %d\n\terr=%v", statusport, err) 91 | panic(panicmsg) 92 | } 93 | defer pubSocket.Close() 94 | // pubSocket.SetSndhwm(100) 95 | if err = pubSocket.Bind(hostname); err != nil { 96 | panicmsg := fmt.Errorf("could not bind client updater port %d\n\terr=%v", statusport, err) 97 | panic(panicmsg) 98 | } 99 | 100 | // The ZMQ middleware will need some time for existing SUBscribers (and their 101 | // subscription topics) to be hooked up to this new PUBlisher. 102 | // The result is that the first few messages will be dropped, including the 103 | // NEWDASTARD one. By sleeping a fraction of a second, we can avoid this 104 | // dropped-message problem most of the time (though there's no guarantee). 105 | time.Sleep(250 * time.Millisecond) 106 | 107 | // Save the state to the standard saved-state file this often. 108 | savePeriod := time.Minute 109 | saveStateRegularlyTicker := time.NewTicker(savePeriod) 110 | defer saveStateRegularlyTicker.Stop() 111 | 112 | // And also save state every time it's changed, but after a delay of this long. 113 | saveDelayAfterChange := time.Second * 2 114 | saveStateOnceTimer := time.NewTimer(saveDelayAfterChange) 115 | 116 | // Here, store the last message of each type seen. Use when storing state. 117 | lastMessages := make(map[string]any) 118 | lastMessageStrings := make(map[string]string) 119 | 120 | for { 121 | select { 122 | case <-abort: 123 | return 124 | 125 | case update := <-clientMessageChan: 126 | if update.tag == "SENDALL" { 127 | for k, v := range lastMessages { 128 | publish(pubSocket, ClientUpdate{tag: k, state: v}, []byte(lastMessageStrings[k])) 129 | } 130 | continue 131 | } 132 | 133 | // Send state to clients now. 134 | message, err := json.Marshal(update.state) 135 | if err == nil { 136 | publish(pubSocket, update, message) 137 | } 138 | 139 | // Don't save NEWDASTARD messages--they don't contain state 140 | if update.tag == "NEWDASTARD" { 141 | continue 142 | } 143 | 144 | // Check if the state has changed; if so, remember the message for later 145 | // (we'll need to broadcast it when a new client asks for a SENDALL). 146 | // If it's also NOT on the no-save list, save to Viper config file after a delay. 147 | // The delay allows us to accumulate many near-simultaneous changes then 148 | // save only once. 149 | updateString := string(message) 150 | if lastMessageStrings[update.tag] != updateString { 151 | lastMessages[update.tag] = update.state 152 | lastMessageStrings[update.tag] = updateString 153 | 154 | if _, ok := nosaveMessages[strings.ToLower((update.tag))]; !ok { 155 | saveStateOnceTimer.Stop() 156 | saveStateOnceTimer = time.NewTimer(saveDelayAfterChange) 157 | } 158 | } 159 | 160 | case <-saveStateRegularlyTicker.C: 161 | saveState(lastMessages) 162 | 163 | case <-saveStateOnceTimer.C: 164 | saveState(lastMessages) 165 | } 166 | } 167 | } 168 | 169 | // nosaveMessages is a set of message names that you don't save, because they 170 | // contain no configuration that makes sense to preserve across runs of dastard. 171 | var nosaveMessages = map[string]struct{}{ 172 | "channelnames": {}, 173 | "alive": {}, 174 | "triggerrate": {}, 175 | "numberwritten": {}, 176 | "newdastard": {}, 177 | "tesmap": {}, 178 | "externaltrigger": {}, 179 | } 180 | 181 | // saveState stores server configuration to the standard config file. 182 | func saveState(lastMessages map[string]any) { 183 | 184 | lastMessages["___1"] = "DASTARD configuration file. Written and read by DASTARD." 185 | lastMessages["___2"] = "Human intervention by experts is permitted but not expected." 186 | now := time.Now().Format(time.UnixDate) 187 | lastMessages["CURRENTTIME"] = now 188 | // Note that the nosaveMessages shouldn't get into the lastMessages map. 189 | for k, v := range lastMessages { 190 | if _, ok := nosaveMessages[strings.ToLower(k)]; !ok { 191 | viper.Set(k, v) 192 | } 193 | } 194 | 195 | mainname := viper.ConfigFileUsed() 196 | tmpname := strings.Replace(mainname, ".yaml", ".tmp.yaml", 1) 197 | bakname := mainname + ".bak" 198 | err := viper.WriteConfigAs(tmpname) 199 | if err != nil { 200 | log.Println("Could not store config file ", tmpname, ": ", err) 201 | return 202 | } 203 | 204 | // Move old config file to backup and new file to standard config name. 205 | err = os.Remove(bakname) 206 | if err != nil && !os.IsNotExist(err) { 207 | log.Println("Could not remove backup file ", bakname, " even though it exists: ", err) 208 | return 209 | } 210 | err = os.Rename(mainname, bakname) 211 | if err != nil && !os.IsNotExist(err) { 212 | log.Println("Could not save backup file: ", err) 213 | return 214 | } 215 | err = os.Rename(tmpname, mainname) 216 | if err != nil { 217 | log.Printf("Could not update dastard config file %s", mainname) 218 | } 219 | 220 | } 221 | -------------------------------------------------------------------------------- /internal/lancero/cmd/acquire/acquire.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "encoding/binary" 6 | "flag" 7 | "fmt" 8 | "log" 9 | "os" 10 | "os/signal" 11 | 12 | "github.com/usnistgov/dastard/internal/lancero" 13 | ) 14 | 15 | type acquireOptions struct { 16 | period, delay int 17 | length, verbosity int 18 | threshold int 19 | nSamples int 20 | mask uint32 21 | output string 22 | simulate bool 23 | verify bool 24 | oddashtx bool 25 | } 26 | 27 | var opt acquireOptions 28 | 29 | func parseOptions() error { 30 | imask := 0 31 | verify := true 32 | flag.IntVar(&opt.period, "p", 32, "line sync period, in clock cycles") 33 | flag.IntVar(&opt.delay, "d", 0, "data delay, in clock cycles") 34 | flag.IntVar(&opt.length, "l", 32, "frame length") 35 | flag.IntVar(&opt.verbosity, "v", 0, "verbosity level") 36 | flag.IntVar(&opt.threshold, "t", 1024, "threshold (in frames), fill level interrupt") 37 | flag.IntVar(&opt.nSamples, "n", 0, "number of samples to acquire (<=0 means run indenfinitely)") 38 | flag.IntVar(&imask, "m", 0xffff, "channel mask for each of 16 channels") 39 | flag.StringVar(&opt.output, "o", "", "output filename") 40 | flag.BoolVar(&opt.simulate, "s", false, "simulate data (if false, read from fibers)") 41 | flag.BoolVar(&verify, "verify", true, "verify simulated data (set false if using many channels)") 42 | flag.BoolVar(&opt.oddashtx, "oddashtx", false, "print od -tx like output") 43 | flag.Parse() 44 | opt.mask = uint32(imask) 45 | opt.verify = opt.simulate && verify 46 | 47 | switch { 48 | case opt.period < 16: 49 | return fmt.Errorf("line sync period (%d) must be at least 16", opt.period) 50 | case opt.period >= 1024: 51 | return fmt.Errorf("line sync period (%d) must be < 1024", opt.period) 52 | case opt.mask > 0xffff: 53 | return fmt.Errorf("line sync period (0x%x) must be < 0xffff", opt.mask) 54 | case opt.delay < 0 || opt.delay >= 32: 55 | return fmt.Errorf("line delay (%d) must be in [0,31]", opt.delay) 56 | case opt.threshold < 1: 57 | return fmt.Errorf("threshold (%d) must be at least 1", opt.threshold) 58 | case opt.threshold < 1024: 59 | log.Printf("WARNING: Threshold (%d) is recommended to be at least 1024", opt.threshold) 60 | } 61 | return nil 62 | } 63 | 64 | type verifier struct { 65 | nRows, nColumns uint32 66 | row, column, error uint32 67 | columns []uint32 68 | mask uint32 69 | messages int 70 | } 71 | 72 | func newVerifier(frameLength uint32, mask uint32) *verifier { 73 | v := &verifier{nRows: frameLength, mask: mask} 74 | for i := range 16 { 75 | if mask&1 != 0 { 76 | v.columns = append(v.columns, uint32(i)) 77 | v.nColumns++ 78 | } 79 | mask = mask >> 1 80 | } 81 | return v 82 | } 83 | 84 | func (v *verifier) checkWord(data uint32) bool { 85 | channel := (data >> 28) & 0xf 86 | row := (data >> 18) & 0x3ff 87 | overRange := data&0x20000 != 0 88 | frame := data&0x10000 != 0 89 | errval := data & 0xffff 90 | 91 | expected := (v.columns[v.column] << 28) | (v.row << 18) | (v.error) 92 | 93 | frameExpected := (v.row == 0) 94 | if frameExpected { 95 | expected |= 0x10000 96 | } 97 | if v.messages < 0 { 98 | log.Printf("verify(): saw 0x%08x, expected 0x%08x\n", data, expected) 99 | v.messages++ 100 | } 101 | 102 | ok := (frame == frameExpected) && !overRange && 103 | (channel == v.columns[v.column]) && (row == v.row) && (errval == v.error) 104 | if v.messages < 1000 { 105 | if frame != frameExpected { 106 | log.Printf("verify(): The frame bit was %v, expected %v.\n", frame, frameExpected) 107 | } 108 | if overRange { 109 | log.Println("verify(): The over-range bit was 1, expected 0.") 110 | } 111 | if channel != v.columns[v.column] { 112 | log.Printf("verify(): Saw channel %d, expected %d.\n", channel, v.columns[v.column]) 113 | } 114 | if row != v.row { 115 | log.Printf("verify(): Saw row %d, expected %d.\n", row, v.row) 116 | } 117 | if errval != v.error { 118 | log.Printf("verify(): Saw error val 0x%x, expected 0x%x.\n", errval, v.error) 119 | } 120 | v.messages++ 121 | } 122 | 123 | // Update. The simulator firmware proceeds like this: 124 | // 1 column per value; 1 row each time column wraps; and 1 "error" value 125 | // each time that the row wraps (i.e., per frame). 126 | v.column = (v.column + 1) % v.nColumns 127 | if v.column == 0 { 128 | v.row = (v.row + 1) % v.nRows 129 | if v.row == 0 { 130 | v.error = (v.error + 1) % 0x8000 131 | } 132 | } 133 | return ok 134 | } 135 | 136 | func (v *verifier) checkBuffer(b []byte) bool { 137 | ok := true 138 | buf := bytes.NewReader(b) 139 | var val uint32 140 | for { 141 | err := binary.Read(buf, binary.LittleEndian, &val) 142 | if err != nil { 143 | break 144 | } 145 | ok = v.checkWord(val) && ok 146 | } 147 | return ok 148 | } 149 | 150 | func acquire(lan *lancero.Lancero) (bytesRead int, err error) { 151 | var NROWS uint32 = 32 152 | verifier := newVerifier(NROWS, opt.mask) 153 | 154 | // Store output? 155 | var fd *os.File 156 | saveData := len(opt.output) > 0 157 | if saveData { 158 | fd, err = os.Create(opt.output) 159 | if err != nil { 160 | return 161 | } 162 | defer fd.Close() 163 | } else { 164 | fd = nil 165 | } 166 | 167 | // Start the adapter 168 | const timeoutSec = 2 169 | const verbosity = 0 170 | err = lan.StartAdapter(timeoutSec, verbosity) 171 | if err != nil { 172 | log.Println("Could not start adapter: ", err) 173 | return 174 | } 175 | defer lan.StopAdapter() 176 | 177 | // Configure and start the collector 178 | err = lan.CollectorConfigure(opt.period, opt.delay, opt.mask, opt.length) 179 | if err != nil { 180 | return 181 | } 182 | err = lan.StartCollector(opt.simulate) 183 | if err != nil { 184 | return 185 | } 186 | defer lan.StopCollector() 187 | defer lan.InspectAdapter() 188 | 189 | var buffer []byte 190 | 191 | // Trap interrupts so we can cleanly exit the program 192 | interruptCatcher := make(chan os.Signal, 1) 193 | signal.Notify(interruptCatcher, os.Interrupt) 194 | 195 | for { 196 | select { 197 | case <-interruptCatcher: 198 | return 199 | default: 200 | _, _, err = lan.Wait() 201 | if err != nil { 202 | return 203 | } 204 | buffer, _, err = lan.AvailableBuffer() 205 | if opt.oddashtx { 206 | log.Println(lancero.OdDashTX(buffer, 20)) 207 | } 208 | totalBytes := len(buffer) 209 | if err != nil { 210 | return 211 | } 212 | log.Printf("Found buffer with %d total bytes", totalBytes) 213 | log.Printf(" size %d,", len(buffer)) 214 | log.Println() 215 | lan.InspectAdapter() 216 | 217 | if saveData { 218 | bytesWritten := bytesRead 219 | if len(buffer) > 0 { 220 | var n int 221 | if len(buffer)+bytesWritten <= opt.nSamples*4 { 222 | n, err = fd.Write(buffer) 223 | } else { 224 | nwrite := opt.nSamples*4 - bytesWritten 225 | n, err = fd.Write(buffer[:nwrite]) 226 | } 227 | if err != nil { 228 | return 229 | } 230 | if n != len(buffer) { 231 | err = fmt.Errorf("wrote %d bytes, expected %d", n, len(buffer)) 232 | return 233 | } 234 | } 235 | } 236 | 237 | // Quit when read enough samples. 238 | bytesRead += totalBytes 239 | if opt.nSamples > 0 && opt.nSamples <= bytesRead/4 { 240 | 241 | return 242 | } 243 | 244 | // Verify the simulated data, if simulated. 245 | if opt.simulate && opt.verify { 246 | if ok := verifier.checkBuffer(buffer); !ok { 247 | log.Println("Buffer did not verify.") 248 | return 249 | } 250 | } 251 | 252 | lan.ReleaseBytes(totalBytes) 253 | log.Println() 254 | } 255 | } 256 | 257 | } 258 | 259 | func main() { 260 | err := parseOptions() 261 | if err != nil { 262 | log.Println("ERROR: ", err) 263 | return 264 | } 265 | 266 | lan, err := lancero.NewLancero(0) 267 | if err != nil { 268 | log.Println("ERROR: ", err) 269 | return 270 | } 271 | defer lan.Close() 272 | 273 | bytesRead, _ := acquire(lan) 274 | log.Printf("Read %d bytes.\n", bytesRead) 275 | } 276 | -------------------------------------------------------------------------------- /maps/dastard_raven_map.txt: -------------------------------------------------------------------------------- 1 | spacing: 520 2 | 1 2030 4170 crap 3 | 3 2030 3130 crap 4 | 5 2610 2610 crap 5 | 7 2610 3650 crap 6 | 9 870 1450 crap 7 | 11 1450 3530 crap 8 | 13 1450 2490 crap 9 | 15 1450 1970 crap 10 | 17 290 1330 crap 11 | 19 870 3530 crap 12 | 21 870 2490 crap 13 | 23 290 4450 crap 14 | 25 290 3410 crap 15 | 27 2030 3650 crap 16 | 29 2030 2610 crap 17 | 31 2610 3130 crap 18 | 33 3190 3710 crap 19 | 35 1450 4050 crap 20 | 37 1450 3010 crap 21 | 39 1450 1450 crap 22 | 41 870 4570 crap 23 | 43 870 4050 crap 24 | 45 870 3010 crap 25 | 47 870 1970 crap 26 | 49 290 3930 crap 27 | 51 290 3930 crap 28 | 53 290 2890 crap 29 | 55 290 1850 crap 30 | 57 2950 290 crap 31 | 59 3990 290 crap 32 | 61 2430 290 crap 33 | 63 1390 290 crap 34 | 65 2950 870 crap 35 | 67 3990 870 crap 36 | 69 1910 870 crap 37 | 71 1390 870 crap 38 | 73 2030 2030 crap 39 | 75 3070 1450 crap 40 | 77 4110 1450 crap 41 | 79 3190 2610 crap 42 | 81 3070 2030 crap 43 | 83 3470 290 crap 44 | 85 4510 290 crap 45 | 87 1910 290 crap 46 | 89 870 290 crap 47 | 91 3470 870 crap 48 | 93 4510 870 crap 49 | 95 870 870 crap 50 | 97 2030 1450 crap 51 | 99 2550 1450 crap 52 | 101 3590 1450 crap 53 | 103 2430 870 crap 54 | 105 3710 2610 crap 55 | 107 3710 2610 crap 56 | 109 3590 2030 crap 57 | 111 2550 2030 crap 58 | 113 4170 -2030 crap 59 | 115 3130 -2030 crap 60 | 117 2610 -2610 crap 61 | 119 3650 -2610 crap 62 | 121 1450 -870 crap 63 | 123 3530 -1450 crap 64 | 125 2490 -1450 crap 65 | 127 1970 -1450 crap 66 | 129 1330 -290 crap 67 | 131 3530 -870 crap 68 | 133 2490 -870 crap 69 | 135 4450 -290 crap 70 | 137 3410 -290 crap 71 | 139 3650 -2030 crap 72 | 141 2610 -2030 crap 73 | 143 3130 -2610 crap 74 | 145 3710 -3190 crap 75 | 147 4050 -1450 crap 76 | 149 3010 -1450 crap 77 | 151 1450 -1450 crap 78 | 153 4570 -870 crap 79 | 155 4050 -870 crap 80 | 157 3010 -870 crap 81 | 159 1970 -870 crap 82 | 161 3930 -290 crap 83 | 163 3930 -290 crap 84 | 165 2890 -290 crap 85 | 167 1850 -290 crap 86 | 169 290 -2950 crap 87 | 171 290 -3990 crap 88 | 173 290 -2430 crap 89 | 175 290 -1390 crap 90 | 177 870 -2950 crap 91 | 179 870 -3990 crap 92 | 181 870 -1910 crap 93 | 183 870 -1390 crap 94 | 185 2030 -2030 crap 95 | 187 1450 -3070 crap 96 | 189 1450 -4110 crap 97 | 191 2610 -3190 crap 98 | 193 2030 -3070 crap 99 | 195 290 -3470 crap 100 | 197 290 -4510 crap 101 | 199 290 -1910 crap 102 | 201 290 -870 crap 103 | 203 870 -3470 crap 104 | 205 870 -4510 crap 105 | 207 870 -870 crap 106 | 209 1450 -2030 crap 107 | 211 1450 -2550 crap 108 | 213 1450 -3590 crap 109 | 215 870 -2430 crap 110 | 217 2610 -3710 crap 111 | 219 2610 -3710 crap 112 | 221 2030 -3590 crap 113 | 223 2030 -2550 crap 114 | 225 -2030 -4170 crap 115 | 227 -2030 -3130 crap 116 | 229 -2610 -2610 crap 117 | 231 -2610 -3650 crap 118 | 233 -870 -1450 crap 119 | 235 -1450 -3530 crap 120 | 237 -1450 -2490 crap 121 | 239 -1450 -1970 crap 122 | 241 -290 -1330 crap 123 | 243 -870 -3530 crap 124 | 245 -870 -2490 crap 125 | 247 -290 -4450 crap 126 | 249 -290 -3410 crap 127 | 251 -2030 -3650 crap 128 | 253 -2030 -2610 crap 129 | 255 -2610 -3130 crap 130 | 257 -3190 -3710 crap 131 | 259 -1450 -4050 crap 132 | 261 -1450 -3010 crap 133 | 263 -1450 -1450 crap 134 | 265 -870 -4570 crap 135 | 267 -870 -4050 crap 136 | 269 -870 -3010 crap 137 | 271 -870 -1970 crap 138 | 273 -290 -3930 crap 139 | 275 -290 -3930 crap 140 | 277 -290 -2890 crap 141 | 279 -290 -1850 crap 142 | 281 -2950 -290 crap 143 | 283 -3990 -290 crap 144 | 285 -2430 -290 crap 145 | 287 -1390 -290 crap 146 | 289 -2950 -870 crap 147 | 291 -3990 -870 crap 148 | 293 -1910 -870 crap 149 | 295 -1390 -870 crap 150 | 297 -2030 -2030 crap 151 | 299 -3070 -1450 crap 152 | 301 -4110 -1450 crap 153 | 303 -3190 -2610 crap 154 | 305 -3070 -2030 crap 155 | 307 -3470 -290 crap 156 | 309 -4510 -290 crap 157 | 311 -1910 -290 crap 158 | 313 -870 -290 crap 159 | 315 -3470 -870 crap 160 | 317 -4510 -870 crap 161 | 319 -870 -870 crap 162 | 321 -2030 -1450 crap 163 | 323 -2550 -1450 crap 164 | 325 -3590 -1450 crap 165 | 327 -2430 -870 crap 166 | 329 -3710 -2610 crap 167 | 331 -3710 -2610 crap 168 | 333 -3590 -2030 crap 169 | 335 -2550 -2030 crap 170 | 337 -4170 2030 crap 171 | 339 -3130 2030 crap 172 | 341 -2610 2610 crap 173 | 343 -3650 2610 crap 174 | 345 -1450 870 crap 175 | 347 -3530 1450 crap 176 | 349 -2490 1450 crap 177 | 351 -1970 1450 crap 178 | 353 -1330 290 crap 179 | 355 -3530 870 crap 180 | 357 -2490 870 crap 181 | 359 -4450 290 crap 182 | 361 -3410 290 crap 183 | 363 -3650 2030 crap 184 | 365 -2610 2030 crap 185 | 367 -3130 2610 crap 186 | 369 -3710 3190 crap 187 | 371 -4050 1450 crap 188 | 373 -3010 1450 crap 189 | 375 -1450 1450 crap 190 | 377 -4570 870 crap 191 | 379 -4050 870 crap 192 | 381 -3010 870 crap 193 | 383 -1970 870 crap 194 | 385 -3930 290 crap 195 | 387 -3930 290 crap 196 | 389 -2890 290 crap 197 | 391 -1850 290 crap 198 | 393 -290 2950 crap 199 | 395 -290 3990 crap 200 | 397 -290 2430 crap 201 | 399 -290 1390 crap 202 | 401 -870 2950 crap 203 | 403 -870 3990 crap 204 | 405 -870 1910 crap 205 | 407 -870 1390 crap 206 | 409 -2030 2030 crap 207 | 411 -1450 3070 crap 208 | 413 -1450 4110 crap 209 | 415 -2610 3190 crap 210 | 417 -2030 3070 crap 211 | 419 -290 3470 crap 212 | 421 -290 4510 crap 213 | 423 -290 1910 crap 214 | 425 -290 870 crap 215 | 427 -870 3470 crap 216 | 429 -870 4510 crap 217 | 431 -870 870 crap 218 | 433 -1450 2030 crap 219 | 435 -1450 2550 crap 220 | 437 -1450 3590 crap 221 | 439 -870 2430 crap 222 | 441 -2610 3710 crap 223 | 443 -2610 3710 crap 224 | 445 -2030 3590 crap 225 | 447 -2030 2550 crap 226 | -------------------------------------------------------------------------------- /edge_multi_test.go: -------------------------------------------------------------------------------- 1 | package dastard 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestEdgeMultiParts1(t *testing.T) { 10 | raw := [21]RawType{0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0} 11 | iFirst := int32(1) 12 | iLastMax := int32(len(raw) - 1) 13 | threshold := int32(1) 14 | nmonotone := int32(1) 15 | maxNmonotone := int32(1) 16 | enableZeroThreshold := false 17 | iLast := iLastMax - maxNmonotone 18 | result := edgeMultiFindNextTriggerInd(raw[:], iFirst, iLast, threshold, nmonotone, maxNmonotone, enableZeroThreshold) 19 | assert.Equal(t, NextTriggerIndResult{8, true, 10}, result, "basic edgeMultiFindNextTriggerInd usage 1") 20 | result = edgeMultiFindNextTriggerInd(raw[:], result.nextIFirst, iLast, threshold, nmonotone, maxNmonotone, enableZeroThreshold) 21 | assert.Equal(t, NextTriggerIndResult{12, true, 14}, result, "basic edgeMultiFindNextTriggerInd usage 2") 22 | result = edgeMultiFindNextTriggerInd(raw[:], result.nextIFirst, iLast, threshold, nmonotone, maxNmonotone, enableZeroThreshold) 23 | assert.Equal(t, NextTriggerIndResult{0, false, iLast + 1}, result, "basic edgeMultiFindNextTriggerInd usage 3") 24 | 25 | s := EMTState{ 26 | threshold: 1, mode: EMTRecordsFullLengthIsolated, 27 | nmonotone: 1, npre: 1, nsamp: 2} 28 | recordSpecs := s.edgeMultiComputeRecordSpecs(raw[:], 0) 29 | expectRecordSpecs := [2]RecordSpec{{firstRisingFrameIndex: 8, npre: 1, nsamp: 2}, 30 | {firstRisingFrameIndex: 12, npre: 1, nsamp: 2}} 31 | assert.Equal(t, expectRecordSpecs[:], recordSpecs, "edgeMultiComputeAppendRecordSpecs usage 1") 32 | assert.Equal(t, FrameIndex(12), s.u, "EMTState should have u==v, to indicated that u has has been recordized") 33 | assert.Equal(t, FrameIndex(12), s.v, "EMTState should have u==v, to indicated that u has has been recordized") 34 | 35 | // a record right at the boundary 36 | rawF := [21]RawType{0, 0, 0, 0, 10, 20, 0, 10, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} 37 | sF := EMTState{ 38 | threshold: 1, mode: EMTRecordsVariableLength, 39 | nmonotone: 1, npre: 2, nsamp: 4} 40 | expectRecordSpecsF := [2]RecordSpec{{firstRisingFrameIndex: 4, npre: 2, nsamp: 4}, 41 | {firstRisingFrameIndex: 7, npre: 1, nsamp: 3}} 42 | recordSpecsF1 := sF.edgeMultiComputeRecordSpecs(rawF[0:8], 0) 43 | assert.Equal(t, expectRecordSpecsF[:0], recordSpecsF1, "no triggers first go F2") 44 | assert.Equal(t, FrameIndex(7), sF.nextFrameIndexToInspect, "edgeMultiComputeAppendRecordSpecs usage F2") 45 | n0F := len(rawF) - sF.NToKeepOnTrim() 46 | recordSpecsF2 := sF.edgeMultiComputeRecordSpecs(rawF[n0F:], FrameIndex(n0F)) 47 | assert.Equal(t, expectRecordSpecsF[:], recordSpecsF2, "both triggers 2nd go F2") 48 | 49 | // two records, boundary such that first trigger in firt go, 2nd in 2nd go 50 | rawG := [21]RawType{0, 0, 0, 0, 10, 20, 0, 10, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} 51 | sG := EMTState{ 52 | threshold: 1, mode: EMTRecordsVariableLength, 53 | nmonotone: 1, npre: 2, nsamp: 4} 54 | expectRecordSpecsG := [2]RecordSpec{{firstRisingFrameIndex: 4, npre: 2, nsamp: 4}, 55 | {firstRisingFrameIndex: 7, npre: 1, nsamp: 3}} 56 | recordSpecsG1 := sG.edgeMultiComputeRecordSpecs(rawG[0:10], 0) 57 | assert.Equal(t, expectRecordSpecsG[:1], recordSpecsG1, "first record appears edgeMultiComputeAppendRecordSpecs usage G2") 58 | assert.Equal(t, FrameIndex(10), sG.nextFrameIndexToInspect, "edgeMultiComputeAppendRecordSpecs usage G2") 59 | n0G := len(rawG) - sG.NToKeepOnTrim() 60 | recordSpecsG2 := sG.edgeMultiComputeRecordSpecs(rawG[n0G:], FrameIndex(n0G)) 61 | assert.Equal(t, expectRecordSpecsG[1:], recordSpecsG2, "2nd record appears") 62 | 63 | var tests = []struct { 64 | raw []RawType 65 | state EMTState 66 | want []RecordSpec 67 | label string 68 | }{ 69 | { 70 | []RawType{0, 0, 0, 0, 10, 20, 0, 0, 10, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 71 | EMTState{threshold: 1, mode: EMTRecordsFullLengthIsolated, 72 | nmonotone: 1, npre: 2, nsamp: 4}, 73 | []RecordSpec{{firstRisingFrameIndex: 4, npre: 2, nsamp: 4}, 74 | {firstRisingFrameIndex: 8, npre: 2, nsamp: 4}}, 75 | "edgeMultiComputeAppendRecordSpecs: 2 records far enough apart to both trigger", 76 | }, 77 | { 78 | []RawType{0, 0, 0, 0, 10, 20, 0, 10, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 79 | EMTState{threshold: 1, mode: EMTRecordsFullLengthIsolated, 80 | nmonotone: 1, npre: 2, nsamp: 4}, 81 | []RecordSpec{{firstRisingFrameIndex: 4, npre: 2, nsamp: 4}}, 82 | "edgeMultiComputeAppendRecordSpecs: 2 records too close so only first should recordize", 83 | }, 84 | { 85 | []RawType{0, 0, 0, 0, 10, 20, 0, 10, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 86 | EMTState{threshold: 1, mode: EMTRecordsTwoFullLength, 87 | nmonotone: 1, npre: 2, nsamp: 4}, 88 | []RecordSpec{{firstRisingFrameIndex: 4, npre: 2, nsamp: 4}, 89 | {firstRisingFrameIndex: 7, npre: 2, nsamp: 4}}, 90 | "edgeMultiComputeAppendRecordSpecs: 2 records should yield two full-length records with TwoFullLength", 91 | }, 92 | { 93 | []RawType{0, 0, 0, 0, 10, 20, 0, 10, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 94 | EMTState{threshold: 1, mode: EMTRecordsOneFullLength, 95 | nmonotone: 1, npre: 2, nsamp: 4}, 96 | []RecordSpec{{firstRisingFrameIndex: 4, npre: 2, nsamp: 4}}, 97 | "edgeMultiComputeAppendRecordSpecs: 2 records should yield one full-length records with OneFullLength", 98 | }, 99 | { 100 | []RawType{0, 0, 0, 0, 10, 20, 0, 10, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 101 | EMTState{threshold: 1, mode: EMTRecordsVariableLength, 102 | nmonotone: 1, npre: 2, nsamp: 4}, 103 | []RecordSpec{{firstRisingFrameIndex: 4, npre: 2, nsamp: 4}, 104 | {firstRisingFrameIndex: 7, npre: 1, nsamp: 3}}, 105 | "edgeMultiComputeAppendRecordSpecs: 2 records should yield one full-length record, one shorter", 106 | }, 107 | { 108 | []RawType{100, 100, 100, 100, 90, 80, 100, 100, 90, 80, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100}, 109 | EMTState{threshold: -1, mode: EMTRecordsFullLengthIsolated, 110 | nmonotone: 1, npre: 2, nsamp: 4}, 111 | []RecordSpec{{firstRisingFrameIndex: 4, npre: 2, nsamp: 4}, 112 | {firstRisingFrameIndex: 8, npre: 2, nsamp: 4}}, 113 | "edgeMultiComputeAppendRecordSpecs: 2 records far enough apart to trigger both, negative going", 114 | }, 115 | { 116 | []RawType{0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10}, 117 | EMTState{threshold: 1, mode: EMTRecordsTwoFullLength, 118 | nmonotone: 1, npre: 2, nsamp: 4}, 119 | []RecordSpec{{firstRisingFrameIndex: 2, npre: 2, nsamp: 4}, 120 | {firstRisingFrameIndex: 4, npre: 2, nsamp: 4}, 121 | {firstRisingFrameIndex: 6, npre: 2, nsamp: 4}, 122 | {firstRisingFrameIndex: 8, npre: 2, nsamp: 4}, 123 | {firstRisingFrameIndex: 10, npre: 2, nsamp: 4}, 124 | {firstRisingFrameIndex: 12, npre: 2, nsamp: 4}, 125 | {firstRisingFrameIndex: 14, npre: 2, nsamp: 4}, 126 | {firstRisingFrameIndex: 16, npre: 2, nsamp: 4}}, 127 | "edgeMultiComputeAppendRecordSpecs: lots over overlapping full length records", 128 | }, 129 | } 130 | for _, test := range tests { 131 | recordSpecs := test.state.edgeMultiComputeRecordSpecs(test.raw[:], 0) 132 | assert.True(t, test.state.valid(), "EMT state should be valid") 133 | assert.Equal(t, test.want[:], recordSpecs, test.label) 134 | } 135 | } 136 | 137 | func TestEdgeMultiShouldRecord(t *testing.T) { 138 | var tests = []struct { 139 | tuv []FrameIndex 140 | mode EMTMode 141 | want []int 142 | valid bool 143 | errmsg string 144 | }{ 145 | {[]FrameIndex{100, 200, 301}, EMTRecordsFullLengthIsolated, []int{200, 50, 100}, true, ""}, 146 | {[]FrameIndex{301, 401, 700}, EMTRecordsFullLengthIsolated, []int{401, 50, 100}, true, ""}, 147 | {[]FrameIndex{55, 55, 700}, EMTRecordsFullLengthIsolated, []int{-1, -1, -1}, false, "u==t is invalid"}, 148 | {[]FrameIndex{55, 700, 700}, EMTRecordsFullLengthIsolated, []int{-1, -1, -1}, false, "u==v is invalid"}, 149 | {[]FrameIndex{401, 460, 500}, EMTRecordsVariableLength, []int{460, 9, 49}, true, 150 | "variable length record are greedy on post trigger, give up pretrigger"}, 151 | } 152 | for _, test := range tests { 153 | T := test.tuv[0] 154 | u := test.tuv[1] 155 | v := test.tuv[2] 156 | rspec, valid := edgeMultiShouldRecord(T, u, v, 50, 100, test.mode) 157 | frfi := FrameIndex(test.want[0]) 158 | npre := int32(test.want[1]) 159 | nsamp := int32(test.want[2]) 160 | assert.Equal(t, RecordSpec{firstRisingFrameIndex: frfi, npre: npre, nsamp: nsamp}, rspec, "1") 161 | assert.Equal(t, test.valid, valid, test.errmsg) 162 | } 163 | } 164 | -------------------------------------------------------------------------------- /internal/off/off.go: -------------------------------------------------------------------------------- 1 | // Package off provides classes that write OFF files 2 | // OFF files store TES pulses projected into a linear basis 3 | // OFF files have a JSON header followed by a single newline 4 | // after the header records are written sequentially in little endian format 5 | // bytes type meaning 6 | // 0-3 int32 recordSamples (could be calculated from nearest neighbor pulses in princple) 7 | // 4-7 int32 recordPreSamples (could be calculated from nearest neighbor pulses in princple) 8 | // 8-15 int64 framecount 9 | // 16-23 int64 timestamp from time.Time.UnixNano() 10 | // 24-27 float32 pretriggerMean (from raw data, not from modeled pulse, really shouldn't be neccesary, just in case for now!) 11 | // 28-31 float32 residualStdDev (in raw data space, not Mahalanobis distance) 12 | // 32-Z float32 the NumberOfBases model coefficients of the pulse projected in to the model 13 | // Z = 31+4*NumberOfBases 14 | package off 15 | 16 | import ( 17 | "bufio" 18 | "encoding/json" 19 | "errors" 20 | "fmt" 21 | "os" 22 | "time" 23 | 24 | "github.com/usnistgov/dastard/internal/asyncbufio" 25 | "github.com/usnistgov/dastard/internal/getbytes" 26 | "gonum.org/v1/gonum/mat" 27 | ) 28 | 29 | // The buffer size (bytes) of the bufio.Writer that buffers disk output 30 | const BUFIOSIZE = 65536 31 | 32 | // The capacity of unprocessed pulse records before even the "asynchronous" writes will block. 33 | const WRITECHANCAPACITY = 1000 34 | 35 | // Flush the ouputfile regularly at this interval 36 | const FLUSHINTERVAL = 3 * time.Second 37 | 38 | // Writer writes OFF files 39 | type Writer struct { 40 | ChannelIndex int 41 | ChannelName string 42 | ChannelNumberMatchingName int 43 | MaxPresamples int 44 | MaxSamples int 45 | FramePeriodSeconds float64 46 | FileFormat string 47 | FileFormatVersion string 48 | NumberOfBases int 49 | ModelInfo ModelInfo 50 | CreationInfo CreationInfo 51 | ReadoutInfo TimeDivisionMultiplexingInfo 52 | PixelInfo PixelInfo 53 | 54 | // items not serialized to JSON header 55 | recordsWritten int 56 | fileName string 57 | headerWritten bool 58 | file *os.File 59 | writer *asyncbufio.Writer 60 | syncwithflush bool 61 | } 62 | 63 | // NewWriter creates a new OFF writer. No file is created until the first call to WriteRecord 64 | func NewWriter(fileName string, ChannelIndex int, ChannelName string, ChannelNumberMatchingName int, 65 | MaxPresamples int, MaxSamples int, FramePeriodSeconds float64, 66 | Projectors *mat.Dense, Basis *mat.Dense, ModelDescription string, 67 | DastardVersion string, GitHash string, SourceName string, 68 | ReadoutInfo TimeDivisionMultiplexingInfo, pixelInfo PixelInfo) *Writer { 69 | writer := new(Writer) 70 | writer.ChannelIndex = ChannelIndex 71 | writer.ChannelName = ChannelName 72 | writer.ChannelNumberMatchingName = ChannelNumberMatchingName 73 | writer.MaxPresamples = MaxPresamples 74 | writer.MaxSamples = MaxSamples 75 | writer.FramePeriodSeconds = FramePeriodSeconds 76 | writer.FileFormat = "OFF" 77 | writer.FileFormatVersion = "0.3.0" 78 | writer.NumberOfBases, _ = Projectors.Dims() 79 | writer.ModelInfo = ModelInfo{Projectors: *NewArrayJsoner(Projectors), Basis: *NewArrayJsoner(Basis), 80 | Description: ModelDescription, projectors: Projectors, basis: Basis} 81 | writer.CreationInfo = CreationInfo{DastardVersion: DastardVersion, GitHash: GitHash, 82 | SourceName: SourceName, CreationTime: time.Now()} 83 | writer.ReadoutInfo = ReadoutInfo 84 | writer.PixelInfo = pixelInfo 85 | writer.fileName = fileName 86 | return writer 87 | } 88 | 89 | // ModelInfo stores info related to the model (aka basis, aka projectors) for printing to the file header, aids with json formatting 90 | type ModelInfo struct { 91 | Projectors ArrayJsoner 92 | projectors *mat.Dense 93 | Basis ArrayJsoner 94 | basis *mat.Dense 95 | Description string 96 | } 97 | 98 | type PixelInfo struct { 99 | XPosition int 100 | YPosition int 101 | Name string 102 | } 103 | 104 | // ArrayJsoner aids in formatting arrays for writing to JSON 105 | type ArrayJsoner struct { 106 | Rows int 107 | Cols int 108 | SavedAs string 109 | } 110 | 111 | // NewArrayJsoner creates an ArrayJsoner from a mat.Dense 112 | func NewArrayJsoner(array *mat.Dense) *ArrayJsoner { 113 | v := new(ArrayJsoner) 114 | v.Rows, v.Cols = array.Dims() 115 | v.SavedAs = "float64 binary data after header and before records. projectors first then basis, nbytes = rows*cols*8 for each projectors and basis" 116 | return v 117 | } 118 | 119 | // CreationInfo stores info related to file creation for printing to the file header, aids with json formatting 120 | type CreationInfo struct { 121 | DastardVersion string 122 | GitHash string 123 | SourceName string 124 | CreationTime time.Time 125 | } 126 | 127 | // TimeDivisionMultiplexingInfo stores info related to tdm readout for printing to the file header, aids with json formatting 128 | type TimeDivisionMultiplexingInfo struct { 129 | NumberOfRows int 130 | NumberOfColumns int 131 | NumberOfChans int 132 | SubframeDivisions int 133 | ColumnNum int 134 | RowNum int 135 | SubframeOffset int 136 | } 137 | 138 | // HeaderWritten returns true if header has been written. 139 | func (w *Writer) HeaderWritten() bool { 140 | return w.headerWritten 141 | } 142 | 143 | // RecordsWritten return the nunber of records written. 144 | func (w *Writer) RecordsWritten() int { 145 | return w.recordsWritten 146 | } 147 | 148 | // WriteHeader writes a header to the file 149 | func (w *Writer) WriteHeader() error { 150 | if w.headerWritten { 151 | return errors.New("header already written") 152 | } 153 | s, err0 := json.MarshalIndent(w, "", " ") 154 | if err0 != nil { 155 | return err0 156 | } 157 | if _, err := w.writer.Write(s); err != nil { 158 | return err 159 | } 160 | if _, err := w.writer.WriteString("\n"); err != nil { 161 | return err 162 | } 163 | if _, err := w.writer.Write(getbytes.FromSliceFloat64(w.ModelInfo.projectors.RawMatrix().Data)); err != nil { 164 | return err 165 | } 166 | if _, err := w.writer.Write(getbytes.FromSliceFloat64(w.ModelInfo.basis.RawMatrix().Data)); err != nil { 167 | return err 168 | } 169 | w.headerWritten = true 170 | return nil 171 | } 172 | 173 | // WriteRecord writes a record to the file 174 | func (w *Writer) WriteRecord(recordSamples int32, recordPreSamples int32, framecount int64, 175 | timestamp int64, pretriggerMean float32, pretriggerDelta float32, residualStdDev float32, data []float32) error { 176 | if len(data) != w.NumberOfBases { 177 | return fmt.Errorf("wrong number of bases, have %v, want %v", len(data), w.NumberOfBases) 178 | } 179 | if _, err := w.writer.Write(getbytes.FromInt32(int32(recordSamples))); err != nil { 180 | return err 181 | } 182 | if _, err := w.writer.Write(getbytes.FromInt32(int32(recordPreSamples))); err != nil { 183 | return err 184 | } 185 | if _, err := w.writer.Write(getbytes.FromInt64(framecount)); err != nil { 186 | return err 187 | } 188 | if _, err := w.writer.Write(getbytes.FromInt64(timestamp)); err != nil { 189 | return err 190 | } 191 | if _, err := w.writer.Write(getbytes.FromFloat32(pretriggerMean)); err != nil { 192 | return err 193 | } 194 | if _, err := w.writer.Write(getbytes.FromFloat32(pretriggerDelta)); err != nil { 195 | return err 196 | } 197 | if _, err := w.writer.Write(getbytes.FromFloat32(residualStdDev)); err != nil { 198 | return err 199 | } 200 | if _, err := w.writer.Write(getbytes.FromSliceFloat32(data)); err != nil { 201 | return err 202 | } 203 | w.recordsWritten++ 204 | return nil 205 | } 206 | 207 | // Flush flushes the write buffer 208 | func (w Writer) Flush() { 209 | if w.writer != nil { 210 | w.writer.Flush() 211 | if w.syncwithflush { 212 | w.file.Sync() 213 | } 214 | } 215 | } 216 | 217 | // Close closes the file, it flushes the bufio.Writer first 218 | func (w Writer) Close() { 219 | if w.writer != nil { 220 | w.writer.Close() 221 | } 222 | w.file.Close() 223 | } 224 | 225 | // SetFlushAlsoSyncs sets whether to call `Sync` with every `Flush` to the output file. 226 | func (w *Writer) SetFlushAlsoSyncs(sync bool) { 227 | w.syncwithflush = sync 228 | } 229 | 230 | // CreateFile creates a file at w.FileName 231 | // must be called before WriteHeader or WriteRecord. 232 | func (w *Writer) CreateFile() error { 233 | if w.file == nil { 234 | file, err := os.Create(w.fileName) 235 | if err != nil { 236 | return err 237 | } 238 | w.file = file 239 | } else { 240 | return errors.New("file already exists") 241 | } 242 | bw := bufio.NewWriterSize(w.file, BUFIOSIZE) 243 | w.writer = asyncbufio.NewWriter(bw, WRITECHANCAPACITY, FLUSHINTERVAL) 244 | return nil 245 | } 246 | -------------------------------------------------------------------------------- /publish_data_test.go: -------------------------------------------------------------------------------- 1 | package dastard 2 | 3 | import ( 4 | "bytes" 5 | "encoding/binary" 6 | "encoding/hex" 7 | "path/filepath" 8 | "slices" 9 | "testing" 10 | "time" 11 | 12 | "gonum.org/v1/gonum/mat" 13 | ) 14 | 15 | func TestPublishData(t *testing.T) { 16 | ljh2Testfile := filepath.Join("testData", "TestPublishData.ljh") 17 | ljh3Testfile := filepath.Join("testData", "TestPublishData.ljh3") 18 | offTestfile := filepath.Join("testData", "TestPublishData.off") 19 | 20 | dp := DataPublisher{} 21 | d := []RawType{10, 10, 10, 10, 15, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10} 22 | rec := &DataRecord{data: d, presamples: 4, modelCoefs: make([]float64, 3)} 23 | records := []*DataRecord{rec, rec, rec, nil} 24 | // Any errors in the triggering calcultion produce nil values for a record; remove them 25 | records = slices.DeleteFunc(records, func(r *DataRecord) bool { return r == nil }) 26 | 27 | if err := dp.PublishData(records); err != nil { 28 | t.Fail() 29 | } 30 | startTime := time.Now() 31 | dp.SetLJH22(1, 4, len(d), 1, 1, startTime, 8, 1, 16, 8, 3, 0, 3, 32 | ljh2Testfile, "testSource", "chanX", 1, Pixel{}) 33 | if err := dp.PublishData(records); err != nil { 34 | t.Fail() 35 | } 36 | if dp.LJH22.RecordsWritten != 3 { 37 | t.Fail() 38 | } 39 | if dp.numberWritten != 3 { 40 | t.Errorf("expected PublishData numberWritten with LJH22 enabled, (want %d, found %d)", 3, dp.numberWritten) 41 | } 42 | if !dp.HasLJH22() { 43 | t.Error("HasLJH22() false, want true") 44 | } 45 | dp.RemoveLJH22() 46 | if dp.HasLJH22() { 47 | t.Error("HasLJH22() true, want false") 48 | } 49 | if dp.numberWritten != 0 { 50 | t.Errorf("expected RemoveLJH22 to set numberWritten to 0") 51 | } 52 | 53 | if dp.HasPubRecords() { 54 | t.Error("HasPubRecords() true, want false") 55 | } 56 | dp.SetPubRecords() 57 | 58 | if !dp.HasPubRecords() { 59 | t.Error("HasPubRecords() false, want true") 60 | } 61 | 62 | dp.PublishData(records) 63 | if dp.numberWritten != 0 { 64 | t.Errorf("expected PublishData to not increment numberWritten with only PubRecords enabled") 65 | } 66 | dp.RemovePubRecords() 67 | if dp.HasPubRecords() { 68 | t.Error("HasPubRecords() true, want false") 69 | } 70 | 71 | if dp.HasPubSummaries() { 72 | t.Error("HasPubSummaries() true, want false") 73 | } 74 | dp.SetPubSummaries() 75 | 76 | if !dp.HasPubSummaries() { 77 | t.Error("HasPubSummaries() false, want true") 78 | } 79 | 80 | dp.PublishData(records) 81 | 82 | dp.RemovePubSummaries() 83 | if dp.HasPubSummaries() { 84 | t.Error("HasPubSummaries() true, want false") 85 | } 86 | 87 | dp.SetLJH3(0, 0, 0, 0, 0, 0, ljh3Testfile) 88 | if err := dp.PublishData(records); err != nil { 89 | t.Error("failed to publish record") 90 | } 91 | if dp.LJH3.RecordsWritten != 3 { 92 | t.Error("wrong number of RecordsWritten, want 3, have", dp.LJH3.RecordsWritten) 93 | } 94 | if dp.numberWritten != 3 { 95 | t.Errorf("expected PublishedData to increment numberWritten, (want %d, found %d)", 3, dp.numberWritten) 96 | } 97 | if !dp.HasLJH3() { 98 | t.Error("HasLJH3() false, want true") 99 | } 100 | dp.RemoveLJH3() 101 | if dp.numberWritten != 0 { 102 | t.Errorf("expected RemoveLJH3 to set numberWritten to 0") 103 | } 104 | if dp.HasLJH3() { 105 | t.Error("HasLJH3() true, want false") 106 | } 107 | 108 | nbases := 3 109 | nsamples := 4 110 | projectors := mat.NewDense(nbases, nsamples, make([]float64, nbases*nsamples)) 111 | basis := mat.NewDense(nsamples, nbases, make([]float64, nbases*nsamples)) 112 | dp.SetOFF(0, 0, 0, 1, 1, time.Now(), 1, 1, 1, 1, 1, 1, 1, offTestfile, "sourceName", 113 | "chanName", 1, projectors, basis, "ModelDescription", Pixel{}) 114 | if err := dp.PublishData(records); err != nil { 115 | t.Error(err) 116 | } 117 | if dp.OFF.RecordsWritten() != 3 { 118 | t.Error("wrong number of RecordsWritten, want 3, have", dp.OFF.RecordsWritten()) 119 | } 120 | if dp.numberWritten != 3 { 121 | t.Errorf("expected PublishedData to increment numberWritten (want %d, found %d)", 3, dp.numberWritten) 122 | } 123 | if !dp.HasOFF() { 124 | t.Error("HasOFF() false, want true") 125 | } 126 | dp.RemoveOFF() 127 | if dp.numberWritten != 0 { 128 | t.Errorf("expected RemoveOFF to set numberWritten to 0") 129 | } 130 | if dp.HasOFF() { 131 | t.Error("HasOFF() true, want false") 132 | } 133 | 134 | if err := configurePubRecordsSocket(); err == nil { 135 | t.Error("it should be an error to configurePubRecordsSocket twice") 136 | } 137 | if err := configurePubSummariesSocket(); err == nil { 138 | t.Error("it should be an error to configurePubSummariesSocket twice") 139 | } 140 | 141 | rec = &DataRecord{data: d, presamples: 4} 142 | for i, signed := range []bool{false, true} { 143 | (*rec).signed = signed 144 | msg := messageRecords(rec) 145 | header := msg[0] 146 | dtype := header[3] 147 | expect := []uint8{3, 2} 148 | if dtype != expect[i] { 149 | t.Errorf("messageRecords with signed=%t gives dtype=%d, want %d", 150 | signed, dtype, expect[i]) 151 | } 152 | 153 | } 154 | 155 | } 156 | 157 | func TestRawTypeToX(t *testing.T) { 158 | d := []RawType{0xFFFF, 0x0101, 0xABCD, 0xEF01, 0x2345, 0x6789} 159 | b := rawTypeToBytes(d) 160 | encodedStr := hex.EncodeToString(b) 161 | expectStr := "ffff0101cdab01ef45238967" 162 | if encodedStr != expectStr { 163 | t.Errorf("hex.EncodeToString(rawTypeToBytes(d)) have %v, want %v", encodedStr, expectStr) 164 | } 165 | if len(b) != 2*len(d) { 166 | t.Errorf("rawTypeToBytes giveswrong length, have %v, want %v", len(b), len(d)) 167 | } 168 | c := rawTypeToUint16(d) 169 | expect := []uint16{0xFFFF, 0x0101, 0xABCD, 0xEF01, 0x2345, 0x6789} 170 | if len(c) != len(expect) { 171 | t.Errorf("rawTypeToUint16 length %d, want %d", len(c), len(expect)) 172 | } 173 | for i, v := range expect { 174 | if c[i] != v { 175 | t.Errorf("rawTypeToUint16(b)[%d] = %v, want %v", i, c[i], v) 176 | } 177 | } 178 | 179 | d2 := bytesToRawType(b) 180 | if len(d) != len(d2) { 181 | t.Errorf("bytesToRawType length %d, want %d", len(d2), len(d)) 182 | } 183 | for i, val := range d { 184 | if d2[i] != val { 185 | t.Errorf("bytesToRawType(b)[%d] = 0x%x, want 0x%x", i, d2[i], val) 186 | } 187 | } 188 | } 189 | 190 | func BenchmarkPublish(b *testing.B) { 191 | ljh2Testfile := filepath.Join("testData", "TestPublishData.ljh") 192 | ljh3Testfile := filepath.Join("testData", "TestPublishData.ljh3") 193 | 194 | d := make([]RawType, 1000) 195 | rec := &DataRecord{data: d, presamples: 4} 196 | records := make([]*DataRecord, 1) 197 | for i := range records { 198 | records[i] = rec 199 | } 200 | slowPart := func(b *testing.B, dp DataPublisher, records []*DataRecord) { 201 | for b.Loop() { 202 | dp.PublishData(records) 203 | b.SetBytes(int64(len(d) * 2 * len(records))) 204 | } 205 | } 206 | startTime := time.Now() 207 | 208 | b.Run("PubRecords", func(b *testing.B) { 209 | dp := DataPublisher{} 210 | dp.SetPubRecords() 211 | defer dp.RemovePubRecords() 212 | slowPart(b, dp, records) 213 | }) 214 | b.Run("PubSummaries", func(b *testing.B) { 215 | dp := DataPublisher{} 216 | dp.SetPubSummaries() 217 | defer dp.RemovePubSummaries() 218 | slowPart(b, dp, records) 219 | }) 220 | b.Run("PubLJH22", func(b *testing.B) { 221 | dp := DataPublisher{} 222 | dp.SetLJH22(0, 0, len(d), 1, 0, startTime, 0, 0, 0, 0, 0, 0, 0, 223 | "TestPublishData.ljh", "testSource", "chanX", 1, Pixel{}) 224 | defer dp.RemoveLJH22() 225 | slowPart(b, dp, records) 226 | }) 227 | b.Run("PubLJH3", func(b *testing.B) { 228 | dp := DataPublisher{} 229 | dp.SetLJH3(0, 0, 0, 0, 0, 0, ljh3Testfile) 230 | defer dp.RemoveLJH3() 231 | slowPart(b, dp, records) 232 | }) 233 | b.Run("PubAll", func(b *testing.B) { 234 | dp := DataPublisher{} 235 | dp.SetPubRecords() 236 | defer dp.RemovePubRecords() 237 | dp.SetPubSummaries() 238 | defer dp.RemovePubSummaries() 239 | dp.SetLJH22(0, 0, len(d), 1, 0, startTime, 0, 0, 0, 0, 0, 0, 0, 240 | ljh2Testfile, "testSource", "chanX", 1, Pixel{}) 241 | defer dp.RemoveLJH22() 242 | dp.SetLJH3(0, 0, 0, 0, 0, 0, ljh3Testfile) 243 | defer dp.RemoveLJH3() 244 | slowPart(b, dp, records) 245 | }) 246 | b.Run("PubNone", func(b *testing.B) { 247 | dp := DataPublisher{} 248 | slowPart(b, dp, records) 249 | }) 250 | b.Run("RawTypeToUint16", func(b *testing.B) { 251 | for b.Loop() { 252 | data := make([]uint16, len(rec.data)) 253 | for i, v := range rec.data { 254 | data[i] = uint16(v) 255 | } 256 | b.SetBytes(int64(2 * len(rec.data))) 257 | } 258 | }) 259 | b.Run("binary.Write", func(b *testing.B) { 260 | for b.Loop() { 261 | var buf bytes.Buffer 262 | binary.Write(&buf, binary.LittleEndian, rec.data) 263 | b.SetBytes(int64(2 * len(rec.data))) 264 | } 265 | }) 266 | b.Run("rawTypeToBytes", func(b *testing.B) { 267 | for b.Loop() { 268 | data := rawTypeToBytes(rec.data) 269 | b.SetBytes(int64(2 * len(data))) 270 | } 271 | }) 272 | } 273 | -------------------------------------------------------------------------------- /maps/ar14_30rows_map.cfg: -------------------------------------------------------------------------------- 1 | spacing: 520 2 | 1 290 -3470 c0r0 3 | 3 290 -4510 c0r1 4 | 5 290 -1910 c0r2 5 | 7 290 -870 c0r3 6 | 9 870 -2430 c0r4 7 | 11 870 -3470 c0r5 8 | 13 870 -4510 c0r6 9 | 15 870 -870 c0r7 10 | 17 1450 -2030 c0r8 11 | 19 1450 -2550 c0r9 12 | 21 1450 -3590 c0r10 13 | 23 3190 -3190 c0r11 14 | 25 2610 -3710 c0r12 15 | 27 2030 -3590 c0r13 16 | 29 2030 -2550 c0r14 17 | 31 290 -2950 c0r15 18 | 33 290 -3990 c0r16 19 | 35 290 -2430 c0r17 20 | 37 290 -1390 c0r18 21 | 39 870 -3990 c0r19 22 | 41 870 -1910 c0r20 23 | 43 870 -1390 c0r21 24 | 45 2030 -2030 c0r22 25 | 47 1450 -3070 c0r23 26 | 49 1450 -4110 c0r24 27 | 51 870 -2950 c0r25 28 | 53 3190 -3710 c0r26 29 | 55 2610 -3190 c0r27 30 | 57 2030 -3070 c0r28 31 | 59 2030 -4110 c0r29 32 | 61 -2030 -3650 c1r0 33 | 63 -2030 -2610 c1r1 34 | 65 -2610 -3130 c1r2 35 | 67 -3190 -3710 c1r3 36 | 69 -870 -1970 c1r4 37 | 71 -1450 -4050 c1r5 38 | 73 -1450 -3010 c1r6 39 | 75 -1450 -1450 c1r7 40 | 77 -870 -4570 c1r8 41 | 79 -870 -4050 c1r9 42 | 81 -870 -3010 c1r10 43 | 83 -290 -810 c1r11 44 | 85 -290 -3930 c1r12 45 | 87 -290 -2890 c1r13 46 | 89 -290 -1850 c1r14 47 | 91 -2030 -4170 c1r15 48 | 93 -2030 -3130 c1r16 49 | 95 -2610 -2610 c1r17 50 | 97 -2610 -3650 c1r18 51 | 99 -1450 -3530 c1r19 52 | 101 -1450 -2490 c1r20 53 | 103 -1450 -1970 c1r21 54 | 105 -290 -1330 c1r22 55 | 107 -870 -3530 c1r23 56 | 109 -870 -2490 c1r24 57 | 111 -870 -1450 c1r25 58 | 113 -290 -290 c1r26 59 | 115 -290 -4450 c1r27 60 | 117 -290 -3410 c1r28 61 | 119 -290 -2370 c1r29 62 | 121 -3470 -290 c2r0 63 | 123 -4510 -290 c2r1 64 | 125 -1910 -290 c2r2 65 | 127 -870 -290 c2r3 66 | 129 -2430 -870 c2r4 67 | 131 -3470 -870 c2r5 68 | 133 -4510 -870 c2r6 69 | 135 -870 -870 c2r7 70 | 137 -2030 -1450 c2r8 71 | 139 -2550 -1450 c2r9 72 | 141 -3590 -1450 c2r10 73 | 143 -3190 -3190 c2r11 74 | 145 -3710 -2610 c2r12 75 | 147 -3590 -2030 c2r13 76 | 149 -2550 -2030 c2r14 77 | 151 -2950 -290 c2r15 78 | 153 -3990 -290 c2r16 79 | 155 -2430 -290 c2r17 80 | 157 -1390 -290 c2r18 81 | 159 -3990 -870 c2r19 82 | 161 -1910 -870 c2r20 83 | 163 -1390 -870 c2r21 84 | 165 -2030 -2030 c2r22 85 | 167 -3070 -1450 c2r23 86 | 169 -4110 -1450 c2r24 87 | 171 -2950 -870 c2r25 88 | 173 -3710 -3190 c2r26 89 | 175 -3190 -2610 c2r27 90 | 177 -3070 -2030 c2r28 91 | 179 -4110 -2030 c2r29 92 | 181 -3650 2030 c3r0 93 | 183 -2610 2030 c3r1 94 | 185 -3130 2610 c3r2 95 | 187 -3710 3190 c3r3 96 | 189 -1970 870 c3r4 97 | 191 -4050 1450 c3r5 98 | 193 -3010 1450 c3r6 99 | 195 -1450 1450 c3r7 100 | 197 -4570 870 c3r8 101 | 199 -4050 870 c3r9 102 | 201 -3010 870 c3r10 103 | 203 -810 290 c3r11 104 | 205 -3930 290 c3r12 105 | 207 -2890 290 c3r13 106 | 209 -1850 290 c3r14 107 | 211 -4170 2030 c3r15 108 | 213 -3130 2030 c3r16 109 | 215 -2610 2610 c3r17 110 | 217 -3650 2610 c3r18 111 | 219 -3530 1450 c3r19 112 | 221 -2490 1450 c3r20 113 | 223 -1970 1450 c3r21 114 | 225 -1330 290 c3r22 115 | 227 -3530 870 c3r23 116 | 229 -2490 870 c3r24 117 | 231 -1450 870 c3r25 118 | 233 -290 290 c3r26 119 | 235 -4450 290 c3r27 120 | 237 -3410 290 c3r28 121 | 239 -2370 290 c3r29 122 | 241 -290 3470 c4r0 123 | 243 -290 4510 c4r1 124 | 245 -290 1910 c4r2 125 | 247 -290 870 c4r3 126 | 249 -870 2430 c4r4 127 | 251 -870 3470 c4r5 128 | 253 -870 4510 c4r6 129 | 255 -870 870 c4r7 130 | 257 -1450 2030 c4r8 131 | 259 -1450 2550 c4r9 132 | 261 -1450 3590 c4r10 133 | 263 -3190 3190 c4r11 134 | 265 -2610 3710 c4r12 135 | 267 -2030 3590 c4r13 136 | 269 -2030 2550 c4r14 137 | 271 -290 2950 c4r15 138 | 273 -290 3990 c4r16 139 | 275 -290 2430 c4r17 140 | 277 -290 1390 c4r18 141 | 279 -870 3990 c4r19 142 | 281 -870 1910 c4r20 143 | 283 -870 1390 c4r21 144 | 285 -2030 2030 c4r22 145 | 287 -1450 3070 c4r23 146 | 289 -1450 4110 c4r24 147 | 291 -870 2950 c4r25 148 | 293 -3190 3710 c4r26 149 | 295 -2610 3190 c4r27 150 | 297 -2030 3070 c4r28 151 | 299 -2030 4110 c4r29 152 | 301 2030 3650 c5r0 153 | 303 2030 2610 c5r1 154 | 305 2610 3130 c5r2 155 | 307 3190 3710 c5r3 156 | 309 870 1970 c5r4 157 | 311 1450 4050 c5r5 158 | 313 1450 3010 c5r6 159 | 315 1450 1450 c5r7 160 | 317 870 4570 c5r8 161 | 319 870 4050 c5r9 162 | 321 870 3010 c5r10 163 | 323 290 810 c5r11 164 | 325 290 3930 c5r12 165 | 327 290 2890 c5r13 166 | 329 290 1850 c5r14 167 | 331 2030 4170 c5r15 168 | 333 2030 3130 c5r16 169 | 335 2610 2610 c5r17 170 | 337 2610 3650 c5r18 171 | 339 1450 3530 c5r19 172 | 341 1450 2490 c5r20 173 | 343 1450 1970 c5r21 174 | 345 290 1330 c5r22 175 | 347 870 3530 c5r23 176 | 349 870 2490 c5r24 177 | 351 870 1450 c5r25 178 | 353 290 290 c5r26 179 | 355 290 4450 c5r27 180 | 357 290 3410 c5r28 181 | 359 290 2370 c5r29 182 | 361 3470 290 c6r0 183 | 363 4510 290 c6r1 184 | 365 1910 290 c6r2 185 | 367 870 290 c6r3 186 | 369 2430 870 c6r4 187 | 371 3470 870 c6r5 188 | 373 4510 870 c6r6 189 | 375 870 870 c6r7 190 | 377 2030 1450 c6r8 191 | 379 2550 1450 c6r9 192 | 381 3590 1450 c6r10 193 | 383 3190 3190 c6r11 194 | 385 3710 2610 c6r12 195 | 387 3590 2030 c6r13 196 | 389 2550 2030 c6r14 197 | 391 2950 290 c6r15 198 | 393 3990 290 c6r16 199 | 395 2430 290 c6r17 200 | 397 1390 290 c6r18 201 | 399 3990 870 c6r19 202 | 401 1910 870 c6r20 203 | 403 1390 870 c6r21 204 | 405 2030 2030 c6r22 205 | 407 3070 1450 c6r23 206 | 409 4110 1450 c6r24 207 | 411 2950 870 c6r25 208 | 413 3710 3190 c6r26 209 | 415 3190 2610 c6r27 210 | 417 3070 2030 c6r28 211 | 419 4110 2030 c6r29 212 | 421 3650 -2030 c7r0 213 | 423 2610 -2030 c7r1 214 | 425 3130 -2610 c7r2 215 | 427 3710 -3190 c7r3 216 | 429 1970 -870 c7r4 217 | 431 4050 -1450 c7r5 218 | 433 3010 -1450 c7r6 219 | 435 1450 -1450 c7r7 220 | 437 4570 -870 c7r8 221 | 439 4050 -870 c7r9 222 | 441 3010 -870 c7r10 223 | 443 810 -290 c7r11 224 | 445 3930 -290 c7r12 225 | 447 2890 -290 c7r13 226 | 449 1850 -290 c7r14 227 | 451 4170 -2030 c7r15 228 | 453 3130 -2030 c7r16 229 | 455 2610 -2610 c7r17 230 | 457 3650 -2610 c7r18 231 | 459 3530 -1450 c7r19 232 | 461 2490 -1450 c7r20 233 | 463 1970 -1450 c7r21 234 | 465 1330 -290 c7r22 235 | 467 3530 -870 c7r23 236 | 469 2490 -870 c7r24 237 | 471 1450 -870 c7r25 238 | 473 290 -290 c7r26 239 | 475 4450 -290 c7r27 240 | 477 3410 -290 c7r28 241 | 479 2370 -290 c7r29 242 | -------------------------------------------------------------------------------- /doc/LJH.md: -------------------------------------------------------------------------------- 1 | # LJH Memorial File Format 2 | **LJH Version 2.2.0** is the current LJH file format. 3 | 4 | The LJH file format consists of a _human-readable ASCII header_ followed by an arbitrary number of _binary data records_. Information in the header specifies the exact length in bytes of each data record. 5 | 6 | ## Header Information 7 | 8 | The human-readable ASCII header is the start of the LJH file. That means you can say `less myfile_chan5.ljh` at a unix terminal and get meaningful information about the file...before the gibberish starts. Handy, right? 9 | 10 | The header is somewhat fragile (it would have been better written in YAML or TOML or even JSON, but we decided just to live with it). It consists essentially of key-value pairs, with a format of `Key: value`, one pair per line. 11 | 12 | ### Header Notes: 13 | 14 | * Lines begining with `#` are usually ignored. 15 | * `#End of Header` marks the end of the header and the beginning of the binary data. 16 | * `#End of Description` has special meaning if `System description of this File:` has been read. 17 | * Newlines are the newlines of the digitizing computer. The interpreting program must accept LF, CR, or CRLF. 18 | * Capitalization must be matched. 19 | * One space follows a colon. Additional spaces are treated as part of the value. 20 | * Programs that read LJH files ignore header keys that are unexpected or unneeded. 21 | 22 | ``` 23 | #LJH Memorial File Format 24 | ``` 25 | 26 | This line indicates that the file is based on format described here. 27 | 28 | ```text 29 | Save File Format Version: 2.2.0 30 | Software Version: DASTARD version 0.2.15 31 | Software Git Hash: 85ab821 32 | Data source: Lancero 33 | ``` 34 | 35 | These lines uniquely identify the exact format, so the interpreting program can adapt. While the first line should be sufficient for this purpose, the second and third lines take in the possibility that a particular program may have a bug. The interpreting program may be aware of this bug and compensate. The Data source is meant for later human reference: values include Abaco, Lancero, and Roach. 36 | 37 | ``` 38 | Number of rows: 32 39 | Number of columns: 1 40 | Row number (from 0-31 inclusive): 12 41 | Column number (from 0-0 inclusive): 0 42 | Number of channels: 32 43 | Channel name: chan12 44 | Channel: 12 45 | ChannelIndex (in dastard): 12 46 | Subframe divisions: 32 47 | Subframe offset: 12 48 | ``` 49 | 50 | Dastard inserts this information to help downstream analysis tools understand the array being used when this file was acquired. 51 | 52 | In February 2024, we added the Subframe divisions/offset tags. 53 | * _Subframe divisions_ indicates the speed of the subframe counter, which is used for timing external triggers. It will be the row rate/frame rate ratio (i.e., the number of rows) for Lancero sources, and some arbitrary multiplier like 64 for other sources. 54 | * _Subframe offset_ means how many subframe divisions delayed is THIS channel w.r.t. the frame clock. It will be the row number for Lancero sources, and 0 for other sources that read all channels simultaneously. 55 | 56 | ``` 57 | Digitized Word Size in Bytes: 2 58 | ``` 59 | 60 | Each sample is stored in this many bytes. 61 | 62 | ``` 63 | Location: LLNL 64 | Cryostat: C3PO 65 | Thermometer: GRT1 66 | Temperature (Ohm or K): 0.1 67 | Bridge range: 20.0E3 68 | Magnetic field (A or Gauss): 0.75 69 | Detector: SnTES#8 70 | Sample: Orange peel 71 | Excitation/Source: none 72 | Operator: Leisure Larry 73 | ``` 74 | 75 | Like the several lines above, most lines are comments for later human use and are not interpreted by general-purpose LJH readers. 76 | 77 | 78 | ``` 79 | System description of this File: 80 | blah 81 | blah 82 | blah 83 | User description of this File: 84 | blah 85 | blah 86 | blah 87 | #End of Description 88 | ``` 89 | This is a multiline comment. Once the ''Description of this File:'' line is read, all following lines are concantenated until ''#End of Description'' is read. 90 | 91 | Again, this is ignored by programs that read LJH files. 92 | 93 | ``` 94 | Number of Digitizers: 1 95 | Number of Active Channels: 2 96 | ``` 97 | 98 | The number of digitizers and channels present in the file are given so that space may be allocated for them by the interpreting program, if necessary. 99 | 100 | ``` 101 | Timestamp offset (s): 3016738980.049000 102 | ``` 103 | 104 | The meaning of this and the means of interpreting it are dependent upon the particular programs creating and reading this file. It was a necessary offset in earlier versions of LJH, where we did not reserve enough bytes per record to record a full timestamp. In LJH 2.2, it serves as a simple zero-time (all records should be no earlier than this "offset"). 105 | 106 | ``` 107 | Server Start Time: 18 Nov 2022, 15:47:34 MST 108 | First Record Time: 18 Nov 2022, 16:54:15 MST 109 | ``` 110 | 111 | These times show when the server (Dastard, in this case) started running, and when the first record was written to this file. 112 | 113 | ``` 114 | Timebase: 5.000000E-8 115 | Number of samples per point: 1 116 | ``` 117 | 118 | Timebase gives the sampling period (in seconds). Number of samples per point is generally 1, but can be more in special cases where samples are averaged and downsampled before recording. 119 | 120 | ``` 121 | Presamples: 256 122 | Total Samples: 1024 123 | ``` 124 | 125 | Total samples is the actual record length in samples. The trigger point will be located at sample number Presamples. 126 | 127 | ## Binary Information 128 | 129 | If you read an LJH file until the characters `#End of Header`, then the remainder of the file is the binary section. It consists of a sequence of data records. 130 | 131 | Each record starts with a 16-byte time marker. The record's waveform data consists of the next L*M bytes, where L is the number of samples (`Total Samples:` value from the header) and M is the number of bytes per sample (`Digitized Word Size in Bytes:` from the header). M is always 2 bytes per sample, in practice. 132 | * The full record's length is **16+L*M** . 133 | * All values in the data record are little endian. 134 | * The first 8-byte word is the subframe counter. It counts the number of _subframe_ divisions read out since the server started. If the server has to resynchronize on the raw data, then the subframe counter will be incremented by an _estimate_ to account for the time missed. 135 | * The second 8-byte word is the POSIX microsecond time, i.e., the time in microseconds since 1 January 1970 00:00 UT. (Warning: this will overflow in 292,226 years if you interpret it as a signed number.) 136 | * The next L words (of M bytes each) are the data record, as a signed or unsigned integer. (Typically, we use signed for the TDM error signal and unsigned for the TDM feedback, and unsigned for µMUX data.) 137 | 138 | 139 | ### Binary Information (for LJH version 2.1.0) 140 | 141 | **Version 2.1.0 follows. Warning! You probably want 2.2.0.** 142 | 143 | Each record starts with a 6-byte time marker. The record's waveform data consists of the next L*M bytes, where L is the number of samples (`Total Samples:` value from the header) and M is the number of bytes per sample (`Digitized Word Size in Bytes:` from the header). M is always 2 bytes per sample, in practice. 144 | * The full record's length is **6+L*M** . 145 | * All values in the data record are little endian. 146 | * The first byte is a "4 microsecond tick". That is, it counts microseconds past the millisecond counter and records the count divided by 4. Beware that previous versions of LJH used the first byte to signify something about the type of data. Igor seems to ignore this byte, though, so I think we're okay to stuff timing information into it. 147 | * The second byte used to signify a channel number N, which corresponds to the Nth channel described in the header. Channel number 255 is reserved for temperature readout with the DMM. Since 2010, this has always been meaningless. 148 | * The next 4 bytes are an unsigned 32-bit number that is the value of a millisecond counter on the digitizing computer. 149 | * The next L words (of M bytes each) are the data record, as a signed or unsigned integer. (Typically, we use signed for the TDM error signal and unsigned for the TDM feedback.) 150 | 151 | ### Changelog 152 | 153 | * **Version 2.2.0 (6 Aug 2015)** Changed the binary definition to include 8 bytes each record for pulse timing (microsecond precision) and frame number. 154 | * **Version 2.1.0 (23 Sep 2011)** Used the first byte of each record to get 4 microsec timing resolution instead of 1 ms. 155 | * **Version 2.0.0 (27 Mar 2011)** Defined inversion and offset more clearly 156 | * **Version 2.0.0 (5 Jan 2001)** Changed definition of discrimination level 157 | * **Version 2.0.0 (24 May 2000)** since most PCs have least significant byte first, the binary information has been changed to default 158 | * **Version 1.1.0 (8 May 2000)** added a few more user and channel parameters as well as provisions for temperature monitoring 159 | * **Initial 1.0.0 (5 Aug 1999)** definition by Larry J. Hiller 160 | -------------------------------------------------------------------------------- /internal/ljh/ljh_test.go: -------------------------------------------------------------------------------- 1 | package ljh 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "os" 7 | "path/filepath" 8 | "testing" 9 | "time" 10 | ) 11 | 12 | func TestRead(t *testing.T) { 13 | fileName := "demo_chan11.ljh" 14 | r, err := OpenReader(fileName) 15 | if err != nil { 16 | t.Error(err) 17 | } 18 | defer r.Close() 19 | 20 | if r.VersionNumber != Version2_2 { 21 | t.Error(`r.VersionNumber != Version2_2`) 22 | } 23 | var headertests = []struct { 24 | name string 25 | found int 26 | want int 27 | }{ 28 | {"r.ChannelIndex", r.ChannelIndex, 11}, 29 | {"r.Presamples", r.Presamples, 256}, 30 | {"r.Samples", r.Samples, 1024}, 31 | {"r.VersionNumber", int(r.VersionNumber), int(Version2_2)}, 32 | {"r.WordSize", r.WordSize, 2}, 33 | {"r.headerLength", r.headerLength, 1216}, 34 | {"r.recordLength", r.recordLength, 2064}, 35 | } 36 | for _, ht := range headertests { 37 | if ht.found != ht.want { 38 | t.Errorf(`%s = %d, want %d`, ht.name, ht.found, ht.want) 39 | } 40 | } 41 | 42 | expectedTC := []int64{1462566784410601, 1462566784420431} 43 | expectedRC := []int64{25221272465, 25221303185} 44 | for i, tc := range expectedTC { 45 | pr, errnp := r.NextPulse() 46 | if errnp != nil { 47 | t.Error(errnp) 48 | } 49 | if pr.TimeCode != tc { 50 | t.Errorf("r.NextPulse().TimeCode = %d, want %d", pr.TimeCode, tc) 51 | } 52 | if pr.SubframeCount != expectedRC[i] { 53 | t.Errorf("r.NextPulse().SubframeCount = %d, want %d", pr.SubframeCount, expectedRC[i]) 54 | } 55 | } 56 | _, err = r.NextPulse() 57 | if err == nil { 58 | t.Errorf("r.NextPulse() works after EOF, want error") 59 | } 60 | } 61 | 62 | func TestCannotOpenNonLJH(t *testing.T) { 63 | fileName := "ljh.go" 64 | if _, err := OpenReader(fileName); err == nil { 65 | t.Errorf("Opened non-LJH file '%s' without error, expected error", fileName) 66 | } 67 | 68 | fileName = "doesnt exist and can\not exist" 69 | if _, err := OpenReader(fileName); err == nil { 70 | t.Errorf("Opened non-existent file '%s' without error, expected error", fileName) 71 | } 72 | } 73 | 74 | func TestBadVersionNumbers(t *testing.T) { 75 | tempDir, err := os.MkdirTemp("", "ljh_test") 76 | if err != nil { 77 | t.Error(err) 78 | } 79 | defer os.RemoveAll(tempDir) 80 | tempFile := filepath.Join(tempDir, "t1.ljh") 81 | // t.Logf("Temp dir: %s\n", tempFile) 82 | 83 | var versiontests = []struct { 84 | vnum string 85 | wanterror bool 86 | }{ 87 | {"askdjhf", true}, 88 | {"2.2", true}, 89 | {"1.2.3", true}, 90 | {"2.1.0", true}, 91 | {"2.1.1", false}, 92 | {"2.2.0", false}, 93 | } 94 | for _, vt := range versiontests { 95 | content := 96 | fmt.Appendf(nil, "#LJH Memorial File Format\nSave File Format Version: %s\n#End of Header\n", vt.vnum) 97 | if err = os.WriteFile(tempFile, content, 0666); err != nil { 98 | t.Error(err) 99 | } 100 | _, err = OpenReader(tempFile) 101 | if (err != nil) != vt.wanterror { 102 | t.Errorf(`Version number = %s gives error %v, want %v`, vt.vnum, err, vt.wanterror) 103 | } 104 | } 105 | 106 | } 107 | 108 | func TestWriter(t *testing.T) { 109 | w := Writer{FileName: "writertest.ljh", 110 | Samples: 100, 111 | Presamples: 50, 112 | SubframeDivisions: 2, 113 | SubframeOffset: 1, 114 | } 115 | err := w.CreateFile() 116 | if err != nil { 117 | t.Errorf("file creation error: %v", err) 118 | } 119 | if w.RecordsWritten != 0 { 120 | t.Error("RecordsWritten want 0, have", w.RecordsWritten) 121 | } 122 | if w.HeaderWritten { 123 | t.Error("TestWriter: header written should be false") 124 | } 125 | err = w.WriteHeader(time.Now()) 126 | if !w.HeaderWritten { 127 | t.Error("TestWriter: header written should be true") 128 | } 129 | if err != nil { 130 | t.Errorf("WriteHeader Error: %v", err) 131 | } 132 | data := make([]uint16, 100) 133 | w.Flush() 134 | stat, _ := os.Stat("writertest.ljh") 135 | sizeHeader := stat.Size() 136 | err = w.WriteRecord(8888888, 127, data) 137 | if err != nil { 138 | t.Errorf("WriteRecord Error: %v", err) 139 | } 140 | if w.RecordsWritten != 1 { 141 | t.Error("RecordsWritten want 1, have", w.RecordsWritten) 142 | } 143 | w.Flush() 144 | stat, _ = os.Stat("writertest.ljh") 145 | sizeRecord := stat.Size() 146 | expectSize := sizeHeader + 8 + 8 + 2*int64(w.Samples) 147 | if sizeRecord != expectSize { 148 | t.Errorf("ljh file wrong size after writing record, want %v, have %v", expectSize, sizeRecord) 149 | } 150 | // write a record of incorrect size, check for error 151 | wrongData := make([]uint16, 101) 152 | err = w.WriteRecord(0, 0, wrongData) 153 | if err == nil { 154 | t.Errorf("WriterTest: should have non-nil Error") 155 | } 156 | w.Close() 157 | r, err := OpenReader("writertest.ljh") 158 | if err != nil { 159 | t.Errorf("WriterTest, OpenReader Error: %v", err) 160 | } 161 | record, err := r.NextPulse() 162 | if err != nil { 163 | t.Errorf("WriterTest, NextPulse Error: %v", err) 164 | } 165 | if record.TimeCode != 127 { 166 | t.Errorf("WriterTest, TimeCode Wrong, have %v, wand %v", record.TimeCode, 127) 167 | } 168 | // WriteRecord accepts a framecount of 8888888 here, but we write the SubframeCount 8888888*2+1=17777777 169 | // w.NumberOfRows = 2, w.RowNum = 1 170 | if record.SubframeCount != 17777777 { 171 | t.Errorf("WriterTest, SubframeCount Wrong, have %v, want %v", record.SubframeCount, 17777777) 172 | } 173 | w.SourceName = "Lancero" 174 | w.WriteHeader(time.Now()) 175 | 176 | if err1 := w.CreateFile(); err1 == nil { 177 | t.Errorf("tried to create an existing file did not raise error") 178 | } 179 | w = Writer{FileName: filepath.FromSlash("/doesnt_exist/notpermitted"), 180 | Samples: 100, 181 | Presamples: 50} 182 | if err1 := w.CreateFile(); err1 == nil { 183 | t.Errorf("creation of unallowed file did not raise error") 184 | } 185 | 186 | } 187 | 188 | func TestWriter3(t *testing.T) { 189 | w := Writer3{FileName: "writertest.ljh3"} 190 | err := w.CreateFile() 191 | if err != nil { 192 | t.Errorf("file creation error: %v", err) 193 | } 194 | if w.RecordsWritten != 0 { 195 | t.Error("RecordsWritten want 0, have", w.RecordsWritten) 196 | } 197 | if w.HeaderWritten { 198 | t.Error("TestWriter: header written should be false") 199 | } 200 | err = w.WriteHeader() 201 | if !w.HeaderWritten { 202 | t.Error("TestWriter: header written should be true") 203 | } 204 | if err != nil { 205 | t.Errorf("WriteHeader Error: %v", err) 206 | } 207 | data := make([]uint16, 100) 208 | w.Flush() 209 | stat, _ := os.Stat("writertest.ljh3") 210 | sizeHeader := stat.Size() 211 | err = w.WriteRecord(0, 0, 0, data) 212 | if err != nil { 213 | t.Errorf("WriteRecord Error: %v", err) 214 | } 215 | if w.RecordsWritten != 1 { 216 | t.Error("RecordsWritten want 1, have", w.RecordsWritten) 217 | } 218 | w.Flush() 219 | stat, _ = os.Stat("writertest.ljh3") 220 | sizeRecord := stat.Size() 221 | expectSize := sizeHeader + 4 + 4 + 8 + 8 + 2*int64(len(data)) 222 | if sizeRecord != expectSize { 223 | t.Errorf("ljh file wrong size after writing record, want %v, have %v", expectSize, sizeRecord) 224 | } 225 | // write a record of different length, should work 226 | otherLengthData := make([]uint16, 101) 227 | err = w.WriteRecord(0, 0, 0, otherLengthData) 228 | if err != nil { 229 | t.Errorf("WriterTest: couldn't write other size") 230 | } 231 | w.Flush() 232 | stat, _ = os.Stat("writertest.ljh3") 233 | sizeRecord = stat.Size() 234 | expectSize += 4 + 4 + 8 + 8 + 2*int64(len(otherLengthData)) 235 | if sizeRecord != expectSize { 236 | t.Errorf("ljh file wrong size after writing record, want %v, have %v", expectSize, sizeRecord) 237 | } 238 | w.Close() 239 | } 240 | 241 | func BenchmarkLJH22(b *testing.B) { 242 | w := Writer{FileName: "writertest.ljh", 243 | Samples: 1000, 244 | Presamples: 50} 245 | w.CreateFile() 246 | w.WriteHeader(time.Now()) 247 | data := make([]uint16, 1000) 248 | 249 | for b.Loop() { 250 | err := w.WriteRecord(8888888, 127, data) 251 | if err != nil { 252 | b.Fatal(fmt.Sprint(err)) 253 | } 254 | b.SetBytes(int64(2 * len(data))) 255 | } 256 | } 257 | func BenchmarkLJH3(b *testing.B) { 258 | w := Writer3{FileName: "writertest.ljh"} 259 | w.CreateFile() 260 | w.WriteHeader() 261 | data := make([]uint16, 1000) 262 | 263 | for b.Loop() { 264 | err := w.WriteRecord(0, 0, 0, data) 265 | if err != nil { 266 | b.Fatal(fmt.Sprint(err)) 267 | } 268 | b.SetBytes(int64(2 * len(data))) 269 | } 270 | } 271 | func BenchmarkFileWrite(b *testing.B) { 272 | f, _ := os.Create("benchmark.ljh") 273 | data := make([]byte, 2000) 274 | 275 | for b.Loop() { 276 | _, err := f.Write(data) 277 | if err != nil { 278 | b.Fatal(fmt.Sprint(err)) 279 | } 280 | b.SetBytes(int64(len(data))) 281 | } 282 | } 283 | func BenchmarkBufIOWrite(b *testing.B) { 284 | f, _ := os.Create("benchmark.ljh") 285 | w := bufio.NewWriterSize(f, 65536) 286 | defer w.Flush() 287 | defer f.Close() 288 | data := make([]byte, 2000) 289 | 290 | for b.Loop() { 291 | _, err := w.Write(data) 292 | if err != nil { 293 | b.Fatal(fmt.Sprint(err)) 294 | } 295 | b.SetBytes(int64(len(data))) 296 | } 297 | } 298 | --------------------------------------------------------------------------------