├── sdr ├── build │ └── .gitignore ├── utils.hpp ├── pseudorandom_phase.cpp ├── pseudorandom_phase.hpp ├── rf_settings.hpp ├── utils.cpp ├── pseudorandom_phase_to_file.cpp ├── CMakeLists.txt └── rf_settings.cpp ├── tests ├── .gitignore ├── data │ └── .gitignore ├── Plot Duty Cycle Pickles.ipynb └── error_code_late_command_sweep.py ├── postprocessing ├── notebooks │ ├── .gitignore │ ├── orca_paper │ │ ├── outputs │ │ │ └── .gitignore │ │ ├── Plot Phase Coherence Pickles.ipynb │ │ ├── Plot Power Calibration.ipynb │ │ └── Noise Power Dithering Figure.ipynb │ ├── archived_experiments │ │ ├── 20230629 Summary of Noise Variance.ipynb │ │ └── 20220329 Filtered impacts on cross correlation.ipynb │ ├── Transmit Amplitude.ipynb │ ├── Radar 1D Stacking.ipynb │ ├── Radar 1D File Compare.ipynb │ ├── Radar Spectrogram.ipynb │ └── Dask Demo.ipynb ├── plot_dask.py ├── save_data.py ├── merge_data.py ├── plot_samples.py ├── noise_test.py └── test_scripts │ └── phase_noise_simulation │ └── Simplified Mixer Phase Noise.ipynb ├── manager ├── radar_service.sh └── uav_payload_manager.py ├── .gitignore ├── run_default.sh ├── run_x310.sh ├── data └── README.md ├── environment.yaml ├── environment-rpi.yaml ├── x310_startup.sh ├── LICENSE ├── tips ├── conda.md └── vscode.md ├── config ├── default_x310.yaml ├── orca_paper │ ├── duty_cycle_b205.yaml │ ├── dithering_b205.yaml │ └── phase_noise_b205.yaml ├── synthetic_config.yaml └── default.yaml ├── preprocessing └── generate_chirp.py ├── README.md └── run.py /sdr/build/.gitignore: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /tests/.gitignore: -------------------------------------------------------------------------------- 1 | *.png 2 | -------------------------------------------------------------------------------- /tests/data/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | !.gitignore 3 | -------------------------------------------------------------------------------- /postprocessing/notebooks/.gitignore: -------------------------------------------------------------------------------- 1 | *-outputs/ -------------------------------------------------------------------------------- /postprocessing/notebooks/orca_paper/outputs/.gitignore: -------------------------------------------------------------------------------- 1 | *.png 2 | *.svg 3 | *.pickle -------------------------------------------------------------------------------- /manager/radar_service.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source $HOME/miniconda/etc/profile.d/conda.sh 3 | conda activate uhd 4 | cd /home/ubuntu/uhd_radar 5 | python -u manager/uav_payload_manager.py 6 | 7 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | build/ 2 | sdr/build/ 3 | *.bin 4 | .vscode/**/* 5 | data/**/* 6 | !data/README.md 7 | archive/ 8 | .idea/* 9 | .DS_Store 10 | uhd_stdout.log 11 | calibration/ 12 | postprocessing/*.png 13 | *.tmp 14 | 15 | # Python 16 | 17 | __pycache__/ 18 | 19 | # Notebooks 20 | 21 | .ipynb_checkpoints -------------------------------------------------------------------------------- /run_default.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | # "$@" passes all command line arguments to this script on to the other scripts called here 4 | python preprocessing/generate_chirp.py "$@" 5 | cd sdr/build 6 | cmake .. 7 | make 8 | time ./radar "$@" 9 | cd ../.. 10 | python postprocessing/save_data.py "$@" 11 | -------------------------------------------------------------------------------- /run_x310.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | # "$@" passes all command line arguments to this script on to the other scripts called here 4 | python preprocessing/generate_chirp.py "$@" 5 | cd sdr/build 6 | cmake .. 7 | make 8 | ./radar "$@" 2>&1 | tee ../../data/terminal_log.txt 9 | cd ../.. 10 | python postprocessing/save_data.py "$@" 11 | -------------------------------------------------------------------------------- /data/README.md: -------------------------------------------------------------------------------- 1 | This is where all scripts in this repo should save data by default. You can store local copies of your data in this directory. 2 | 3 | `.gitignore` is configured so that everything in this directory will be ignored except for this file for version control purposes. 4 | 5 | Please don't commit your data to the repo. (But do back it up somewhere else!) -------------------------------------------------------------------------------- /environment.yaml: -------------------------------------------------------------------------------- 1 | name: orca 2 | channels: 3 | - conda-forge 4 | - defaults 5 | dependencies: 6 | - python>3.8 7 | - boost-cpp 8 | - uhd<=4.4.0 # Due to open bug with 4.5.0 and above: https://github.com/EttusResearch/uhd/issues/751 9 | - scipy 10 | - matplotlib 11 | - yaml-cpp=0.6.3 12 | - xarray 13 | - dask 14 | - zarr 15 | - hvplot 16 | - datashader 17 | -------------------------------------------------------------------------------- /sdr/utils.hpp: -------------------------------------------------------------------------------- 1 | // Created 10/23/2021 2 | 3 | #ifndef UTILS_HPP 4 | #define UTILS_HPP 5 | 6 | #include 7 | 8 | // Change filename, e.g. from usrp_samples.dat to usrp_samples.00.dat, 9 | // if multiple filenames should be generated 10 | std::string generate_out_filename( 11 | const std::string& base_fn, size_t n_names, size_t this_name); 12 | 13 | #endif //UTILS_HPP 14 | -------------------------------------------------------------------------------- /environment-rpi.yaml: -------------------------------------------------------------------------------- 1 | name: uhd 2 | channels: 3 | - conda-forge 4 | - defaults 5 | dependencies: 6 | - python>3.8 7 | - boost-cpp 8 | - uhd<=4.4.0 # Due to open bug with 4.5.0 and above: https://github.com/EttusResearch/uhd/issues/751 9 | - scipy 10 | - matplotlib 11 | - yaml-cpp=0.6.3 12 | - xarray 13 | - dask 14 | - zarr 15 | - hvplot 16 | - datashader 17 | - pip 18 | - pip: 19 | - gpiozero 20 | - RPi.GPIO 21 | - sparkfun-qwiic 22 | - pi-ina219 23 | -------------------------------------------------------------------------------- /sdr/pseudorandom_phase.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include "pseudorandom_phase.hpp" 3 | 4 | using namespace std; 5 | 6 | 7 | // Return a single float generated from random_generator 8 | float get_next_phase(bool transmit){ 9 | if (transmit) { 10 | return (float) random_generator_tx(); 11 | } else { 12 | return (float) random_generator_rx(); 13 | } 14 | } 15 | 16 | // Return a vector of the next n phases 17 | vector get_next_n_phases(int n, bool transmit){ 18 | vector ph(n); 19 | for(int i=0;i 5 | 6 | using namespace std; 7 | 8 | // Random generators for phaase modulation 9 | // Seed is identical (and hard-coded) so they will each produce the same sequence 10 | inline mt19937 random_generator_tx(0); // Used on transmit for phase modulation 11 | inline mt19937 random_generator_rx(0); // Used on receive for inverting phase modulation 12 | 13 | float get_next_phase(bool transmit); // Return a single float generated from random_generator 14 | vector get_next_n_phases(int n, bool transmit); // Return a vector of the next n phases from random_generator 15 | 16 | #endif // PSEUDORANDOM_PHASE_HPP -------------------------------------------------------------------------------- /sdr/rf_settings.hpp: -------------------------------------------------------------------------------- 1 | // Created 10/22/2021 2 | 3 | #ifndef RF_SETTINGS_HPP 4 | #define RF_SETTINGS_HPP 5 | 6 | #include 7 | #include "yaml-cpp/yaml.h" 8 | 9 | using namespace uhd; 10 | using namespace std; 11 | 12 | // Set USRP RF parameters for a single channel of operation 13 | bool set_rf_params_single(usrp::multi_usrp::sptr usrp, YAML::Node rf0, 14 | vector rx_channels, vector tx_channels); 15 | 16 | // Set USRP RF parameters for multi channel operation 17 | bool set_rf_params_multi(usrp::multi_usrp::sptr usrp, YAML::Node rf0, YAML::Node rf1, 18 | vector rx_channels, vector tx_channels); 19 | 20 | // Check whether requested RF parameters are equal to the reported values 21 | bool rf_error_check(usrp::multi_usrp::sptr usrp, YAML::Node rf, size_t tx_channel, 22 | size_t rx_channel); 23 | 24 | #endif // RF_SETTINGS_HPP -------------------------------------------------------------------------------- /sdr/utils.cpp: -------------------------------------------------------------------------------- 1 | // Created 10/23/2021 2 | 3 | #include 4 | #include 5 | #include 6 | #include "utils.hpp" 7 | 8 | using namespace std; 9 | 10 | /** 11 | * Change filename, e.g. from usrp_samples.dat to usrp_samples.00.dat, 12 | * but only if multiple names are to be generated. 13 | * 14 | * Inputs: base_fn - base filename (e.g. usrp_samples.dat) 15 | * n_names - number of filenames to be generated, corresponds to 16 | * number of RX channels 17 | * this_num - number corresponding to this filename/channel 18 | * Output: string holding the new filename (e.g. usrp_samples.00.dat) 19 | */ 20 | std::string generate_out_filename( 21 | const std::string& base_fn, size_t n_names, size_t this_num) 22 | { 23 | if (n_names == 1) { 24 | return base_fn; 25 | } 26 | 27 | boost::filesystem::path base_fn_fp(base_fn); 28 | base_fn_fp.replace_extension(boost::filesystem::path( 29 | str(boost::format("%02d%s") % this_num % base_fn_fp.extension().string()))); 30 | return base_fn_fp.string(); 31 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Stanford Radio Glaciology 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /tips/conda.md: -------------------------------------------------------------------------------- 1 | Intro tutorial: https://conda.io/projects/conda/en/latest/user-guide/getting-started.html 2 | 3 | The very useful page on managing conda environments: https://conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html 4 | 5 | As a side note, there are a couple of configuration changes that we recommend considering: 6 | 1. By default, conda will automatically open up the "base" environment every time you open a terminal window. You can disable this if you prefer: `conda config --set auto_activate_base false` 7 | 2. Conda uses "channels" to manage where you get software packages from. There are tons of channels out there, but there are basically only two you need to know about: the default set and `conda-forge`. The default set is a curated set of packaged that's supported and maintained as part of the Anaconda commercial service. `conda-forge` is a community-supported much larger repository of packages. In our case, the up-to-date UHD software we need is only available in `conda-forge`. You may wish to prioritize using packages from `conda-forge` when they are available, in which case you can set: `conda config --add channels conda-forge` 8 | 9 | (The `environment.yaml` file provided in this repo internally adds `conda-forge` as the default channel only for the environment it creates.) -------------------------------------------------------------------------------- /sdr/pseudorandom_phase_to_file.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include "pseudorandom_phase.hpp" 5 | 6 | using namespace std; 7 | 8 | int main(int argc, char *argv[]) { 9 | if (argc != 3) { 10 | cout << "Usage: " << argv[0] << " " << endl; 11 | cout << "n is the number of random phases to produce" << endl; 12 | cout << "filename is a path to write the phases to. Each phase is a floating point value. The file is binary file with each float." << endl; 13 | return 1; 14 | } 15 | 16 | int n = atoi(argv[1]); 17 | if (n <= 0) { 18 | cout << "Invalid value for n. Please provide a positive integer." << endl; 19 | return 1; 20 | } 21 | 22 | vector phases = get_next_n_phases(n, true); 23 | string filename = argv[2]; 24 | 25 | ofstream outputFile(filename, ios::binary | ios::out); 26 | if (!outputFile.is_open()) { 27 | cout << "Error opening the file " << filename << endl; 28 | return 1; 29 | } 30 | 31 | // Writing the phases as binary data 32 | outputFile.write(reinterpret_cast(phases.data()), sizeof(float) * phases.size()); 33 | 34 | outputFile.close(); 35 | cout << "Phases written to " << filename << " successfully." << endl; 36 | 37 | return 0; 38 | } 39 | -------------------------------------------------------------------------------- /postprocessing/plot_dask.py: -------------------------------------------------------------------------------- 1 | import xarray as xr 2 | import dask.array as da 3 | 4 | import numpy as np 5 | import matplotlib.pyplot as plt 6 | 7 | def plot_radargram(pulse_compressed, figsize=None, vmin=-70, vmax=-40, ylims=(65,15), sig_speed=None): 8 | duration_s = pulse_compressed.slow_time[-1] - pulse_compressed.slow_time[0] 9 | 10 | if figsize is None: 11 | figsize = (duration_s/10, 5) 12 | 13 | fig, ax = plt.subplots(1,1, figsize=figsize) 14 | 15 | return_power = 20*np.log10(np.abs(pulse_compressed["radar_data"].compute())) 16 | 17 | if sig_speed: 18 | y_axis = pulse_compressed.travel_time * (sig_speed / 2) 19 | y_axis_label = 'Distance to reflector [m]' 20 | else: 21 | y_axis = pulse_compressed.travel_time 22 | y_axis_label = 'Two-way travel time [s]' 23 | 24 | p = ax.pcolormesh(pulse_compressed.slow_time, y_axis, return_power.T, cmap='inferno', vmin=vmin, vmax=vmax, shading='nearest') 25 | clb = fig.colorbar(p, ax=ax) 26 | clb.set_label('Power [dB]') 27 | ax.set_xlabel('Time [s]') 28 | ax.set_ylabel(y_axis_label) 29 | 30 | ax.set_ylim(ylims[0], ylims[1]) 31 | 32 | if 'basename' in pulse_compressed.attrs: 33 | ax.text(0, 1.05, pulse_compressed.basename, horizontalalignment='left', verticalalignment='center', transform=ax.transAxes) 34 | 35 | for item in ([ax.title, ax.xaxis.label, ax.yaxis.label, clb.ax.yaxis.label] + 36 | ax.get_xticklabels() + ax.get_yticklabels() + clb.ax.get_yticklabels()): 37 | item.set_fontsize(18) 38 | item.set_fontfamily('sans-serif') 39 | 40 | fig.tight_layout() 41 | 42 | return fig, ax 43 | -------------------------------------------------------------------------------- /postprocessing/save_data.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import shutil 3 | import argparse 4 | import numpy as np 5 | import scipy.signal as sp 6 | import processing as pr 7 | import matplotlib.pyplot as plt 8 | from datetime import datetime 9 | from ruamel.yaml import YAML as ym 10 | 11 | def save_data(yaml_filename, extra_files={}, alternative_rx_samps_loc=None, num_files=1): 12 | # Initialize Constants 13 | yaml = ym() 14 | with open(yaml_filename) as stream: 15 | config = yaml.load(stream) 16 | 17 | file_prefix = datetime.now().strftime("data/%Y%m%d_%H%M%S") 18 | 19 | print(f"Copying data to {file_prefix}...") 20 | 21 | shutil.copy(yaml_filename, file_prefix + "_config.yaml") 22 | if config['FILES']['max_chirps_per_file'] == -1: 23 | shutil.move(config['FILES']['save_loc'], file_prefix + "_rx_samps.bin") 24 | else: 25 | if config['RUN_MANAGER']['save_partial_files']: 26 | base_filename = config['FILES']['save_loc'] 27 | for i in range(num_files): 28 | f = base_filename + "." + str(i) 29 | shutil.copy(f, file_prefix + "_p" + str(i) + "_rx_samps.bin") 30 | if alternative_rx_samps_loc is not None: 31 | shutil.copy(alternative_rx_samps_loc, file_prefix + "_rx_samps.bin") 32 | 33 | for source_file, dest_tag in extra_files.items(): 34 | shutil.copy(source_file, file_prefix + "_" + dest_tag) 35 | 36 | if config['RUN_MANAGER']['save_gps']: 37 | shutil.copy(config['FILES']['gps_loc'], file_prefix + "_gps_log.txt") 38 | 39 | print(f"File copying complete.") 40 | 41 | return file_prefix 42 | 43 | if __name__ == "__main__": 44 | # Check if a YAML file was provided as a command line argument 45 | parser = argparse.ArgumentParser() 46 | parser.add_argument("yaml_file", nargs='?', default='config/default.yaml', 47 | help='Path to YAML configuration file') 48 | args = parser.parse_args() 49 | yaml_filename = args.yaml_file 50 | 51 | save_data(yaml_filename) 52 | 53 | -------------------------------------------------------------------------------- /postprocessing/merge_data.py: -------------------------------------------------------------------------------- 1 | import shutil 2 | import glob 3 | import argparse 4 | import re 5 | import os 6 | 7 | if __name__ == "__main__": 8 | # Accept one command line argument, a string called prefix 9 | parser = argparse.ArgumentParser() 10 | parser.add_argument("prefix", 11 | help='Prefix of files to be merged, such as "data/20230713"') 12 | parser.add_argument("--output", required=False, 13 | help='Output file name. If left blank, will be prefix + "_rx_samps.bin"') 14 | args = parser.parse_args() 15 | 16 | path = args.prefix + '_p*_rx_samps.bin' 17 | files = glob.glob(path, recursive=False) 18 | file_ordering = {} 19 | for filename in files: 20 | filename_search = re.search('\d{8}_\d{6}_p(\d+)_rx_samps.bin', filename) 21 | if filename_search: 22 | file_ordering[int(filename_search.group(1))] = filename 23 | 24 | file_idxs = list(file_ordering.keys()) 25 | file_idxs.sort() 26 | 27 | print("Found files:") 28 | for idx in file_idxs: 29 | print(f"[{idx}] {file_ordering[idx]}") 30 | 31 | # Sanity check file list to see if we're missing anything, have duplicates, or have no files 32 | if len(set(file_idxs)) != len(file_idxs): 33 | print("Error: duplicate file indices found.") 34 | exit(1) 35 | 36 | if len(file_idxs) == 0: 37 | print("Error: no files found.") 38 | exit(1) 39 | 40 | if len(file_idxs) != file_idxs[-1] + 1: 41 | print("Error: missing file indices.") 42 | exit(1) 43 | 44 | # Generate output filename 45 | if args.output is None: 46 | args.output = args.prefix + '_rx_samps.bin' 47 | 48 | # Check if it already exists 49 | if os.path.exists(args.output): 50 | print(f"Error: output file {args.output} already exists.") 51 | exit(1) 52 | 53 | print(f"\nEverything looks OK. Merging files to {args.output}...") 54 | with open(args.output, 'wb') as outfile: 55 | for idx in file_idxs: 56 | print(f"Copying {file_ordering[idx]}...") 57 | shutil.copyfileobj(open(file_ordering[idx], 'rb'), outfile) 58 | 59 | print(f"Done. Merged data written to {args.output}") -------------------------------------------------------------------------------- /postprocessing/plot_samples.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import argparse 3 | import numpy as np 4 | import scipy.signal as sp 5 | import processing as pr 6 | import matplotlib.pyplot as plt 7 | from ruamel.yaml import YAML as ym 8 | 9 | # Check if a YAML file was provided as a command line argument 10 | parser = argparse.ArgumentParser() 11 | parser.add_argument("yaml_file", nargs='?', default='config/default.yaml', 12 | help='Path to YAML configuration file') 13 | args = parser.parse_args() 14 | 15 | # Initialize Constants 16 | yaml = ym() # Always use safe load if not dumping 17 | with open(args.yaml_file) as stream: 18 | config = yaml.load(stream) 19 | rx_params = config["PLOT"] 20 | sample_rate = rx_params["sample_rate"] # Hertz 21 | rx_samps = rx_params["rx_samps"] # Received data to analyze 22 | orig_ch = rx_params["orig_chirp"] # Chirp associated with the received data 23 | direct_start = rx_params["direct_start"] 24 | echo_start = rx_params["echo_start"] 25 | sig_speed = rx_params["sig_speed"] 26 | 27 | print("--- Loaded constants from config.yaml ---") 28 | 29 | # Read and plot RX/TX 30 | rx_sig = pr.extractSig(rx_samps) 31 | print("--- Plotting real samples read from %s ---" % rx_samps) 32 | pr.plotChirpVsTime(rx_sig, 'Received Samples', sample_rate) 33 | 34 | tx_sig = pr.extractSig(orig_ch) 35 | print("--- Plotting transmited chirp, stored in %s ---" % orig_ch) 36 | pr.plotChirpVsTime(tx_sig, 'Transmitted Chirp', sample_rate) 37 | 38 | # Correlate the two chirps to determine time difference 39 | print("--- Match filtering received chirp with transmitted chirp ---") 40 | xcorr_sig = sp.correlate(rx_sig, tx_sig, mode='valid', method='auto') 41 | # as finddirectpath is written right now, it must be called before taking log of the signal 42 | # because if not, negative log values could have a greater absolute value than positive log values. 43 | dir_peak = pr.findDirectPath(xcorr_sig, direct_start, True) 44 | xcorr_sig = 20 * np.log10(np.absolute(xcorr_sig)) 45 | 46 | print("--- Plotting result of match filter ---") 47 | xcorr_samps = np.shape(xcorr_sig)[0] 48 | xcorr_time = np.zeros(xcorr_samps) 49 | for x in range (xcorr_samps): 50 | xcorr_time[x] = x * 1e6 /sample_rate 51 | 52 | plt.figure() 53 | plt.plot(xcorr_time, xcorr_sig) 54 | plt.title("Output of Match Filter: Signal") 55 | plt.xlabel('Time (us)') 56 | plt.ylabel('Power [dB]') 57 | plt.grid() 58 | 59 | plt.figure() 60 | plt.plot(range(-10,60), xcorr_sig[dir_peak-10:dir_peak+60]) 61 | plt.title("Output of Match Filter: Peaks") 62 | plt.xlabel('Sample') 63 | plt.ylabel('Power [dB]') 64 | plt.grid() 65 | 66 | [echo_samp, echo_dist] = pr.findEcho(xcorr_sig, sample_rate, dir_peak, echo_start, sig_speed, True) 67 | 68 | sys.stdout.flush() 69 | plt.show() -------------------------------------------------------------------------------- /tips/vscode.md: -------------------------------------------------------------------------------- 1 | # Using VSCode as your editor 2 | 3 | If you want to use Visual Studio Code as your IDE, it'll make your life easier if you tell it about your conda environment. 4 | 5 | Install the `Python`, `C/C++`, and `CMake Tools` extensions. 6 | 7 | ## Python setup 8 | 9 | Open the command palette (Ctrl-Shift-P or F1) and select `Python: Select Interpreter`. Hopefully, you'll see a path corresponding to a python binary in your conda environment. Select that one. 10 | 11 | ## C++ setup 12 | 13 | You'll need to create a script somewhere that activates your choosen conda environment. You can put it anywhere you like. It should be something like this: 14 | 15 | ```bash 16 | #!/bin/bash 17 | source /etc/profile.d/conda.sh 18 | conda activate 19 | ``` 20 | 21 | Open the command palette again and select `CMake: Edit User-Local CMake Kits` 22 | 23 | Copy the existing entry and create a new entry after it. Change the name to something you can recognize. Add a new paramter `environmentSetupScript` with a value of the full path to the activation script you just created. 24 | 25 | ```json 26 | { 27 | "name": "UHD Environment GCC 10.3.0 x86_64-linux-gnu", 28 | "compilers": { 29 | "C": "/bin/x86_64-linux-gnu-gcc-10", # copy from your default configuration 30 | "CXX": "/bin/x86_64-linux-gnu-g++-10" # copy from your default configuration 31 | }, 32 | "environmentSetupScript": "" 33 | } 34 | ``` 35 | 36 | Now the CMake extension will know to activate your conda environment before trying to build your project. 37 | 38 | Open command palette one more time and go to `C/C++: Edit Configurations (UI)`. This should open a file called `c_cpp_properties.json` in the `.vscode` folder. At the start of the file add an environment section with the path to your conda environement, like this: 39 | ```json 40 | { 41 | "env": { 42 | "conda.prefix": "/Users/abroome/opt/miniconda3/envs/srg_uhd_radar" 43 | }, 44 | "configurations:" [ 45 | ... 46 | ] 47 | } 48 | ``` 49 | 50 | Then find "include path" in the configuration section and add these two lines: 51 | 52 | ${env:conda.prefix}/include 53 | ${env:conda.prefix}/lib 54 | 55 | Save it and you should be done. 56 | 57 | If you don't know the path to your conda environment, you can find it by typing this in a terminal (with your conda environment activated): 58 | 59 | `echo $CONDA_PREFIX` 60 | 61 | The `conda.prefix` line should be all you need to change if you are switching between different conda environments. 62 | 63 | ## Specify a Build Directory 64 | To specify the source file location and build location for your cmake outputs, your `.vscode/settings.json` file would look something like this: 65 | 66 | ```json 67 | { 68 | "cmake.sourceDirectory": "${workspaceFolder}/sdr", 69 | "cmake.buildDirectory": "${workspaceFolder}/sdr/build", 70 | "files.associations": { 71 | "__threading_support": "cpp" 72 | } 73 | } 74 | ``` 75 | -------------------------------------------------------------------------------- /sdr/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2014-2015 Ettus Research LLC 3 | # Copyright 2018 Ettus Research, a National Instruments Company 4 | # 5 | # SPDX-License-Identifier: GPL-3.0-or-later 6 | # 7 | 8 | cmake_minimum_required(VERSION 3.5.1) 9 | project(INIT_USRP CXX) 10 | 11 | message(STATUS "conda prefix: $ENV{CONDA_PREFIX}") 12 | set(CMAKE_LIBRARY_PATH "$ENV{CONDA_PREFIX}/lib") 13 | set(CMAKE_INCLUDE_PATH "$ENV{CONDA_PREFIX}/include") 14 | 15 | ### Configure Compiler ######################################################## 16 | set(CMAKE_CXX_STANDARD 17) 17 | 18 | if(CMAKE_SYSTEM_NAME STREQUAL "FreeBSD" AND ${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang") 19 | set(CMAKE_EXE_LINKER_FLAGS "-lthr ${CMAKE_EXE_LINKER_FLAGS}") 20 | set(CMAKE_CXX_FLAGS "-stdlib=libc++ ${CMAKE_CXX_FLAGS}") 21 | endif() 22 | 23 | set(CMAKE_CXX_FLAGS "-Wno-psabi ${CMAKE_CXX_FLAGS}") 24 | 25 | ### Set up build environment ################################################## 26 | # Choose a static or shared-library build (shared is default, and static will 27 | # probably need some special care!) 28 | # Set this to ON in order to link a static build of UHD: 29 | option(UHD_USE_STATIC_LIBS OFF) 30 | 31 | # To add UHD as a dependency to this project, add a line such as this: 32 | find_package(UHD 3.15.0 REQUIRED) 33 | find_package(yaml-cpp REQUIRED) 34 | # The version in ^^^^^ here is a minimum version. 35 | # To specify an exact version: 36 | #find_package(UHD 4.0.0 EXACT REQUIRED) 37 | 38 | # This example also requires Boost. 39 | # Set components here, then include UHDBoost to do the actual finding 40 | set(UHD_BOOST_REQUIRED_COMPONENTS 41 | program_options 42 | system 43 | thread 44 | filesystem 45 | chrono 46 | ) 47 | set(BOOST_MIN_VERSION 1.58) 48 | include(UHDBoost) 49 | 50 | # need these include and link directories for the build 51 | include_directories( 52 | ${Boost_INCLUDE_DIRS} 53 | ${UHD_INCLUDE_DIRS} 54 | ${YAML_CPP_INCLUDE_DIR} 55 | ) 56 | link_directories(${Boost_LIBRARY_DIRS}) 57 | 58 | ### Make the executables ####################################################### 59 | # Radar executable 60 | add_executable(radar main.cpp rf_settings.cpp rf_settings.hpp utils.cpp utils.hpp pseudorandom_phase.cpp pseudorandom_phase.hpp) 61 | # Psuedorandom phase noise generation for post-processing 62 | add_executable(pseudorandom_phase_codes_to_file pseudorandom_phase_to_file.cpp pseudorandom_phase.cpp pseudorandom_phase.hpp) 63 | 64 | set(CMAKE_BUILD_TYPE "Release") 65 | 66 | # Shared library case: All we need to do is link against the library, and 67 | # anything else we need (in this case, some Boost libraries): 68 | if(NOT UHD_USE_STATIC_LIBS) 69 | message(STATUS "Linking against shared UHD library.") 70 | target_link_libraries(radar ${UHD_LIBRARIES} ${Boost_LIBRARIES} ${YAML_CPP_LIBRARIES}) 71 | # Shared library case: All we need to do is link against the library, and 72 | # anything else we need (in this case, some Boost libraries): 73 | else(NOT UHD_USE_STATIC_LIBS) 74 | message(STATUS "Linking against static UHD library.") 75 | target_link_libraries(radar 76 | # We could use ${UHD_LIBRARIES}, but linking requires some extra flags, 77 | # so we use this convenience variable provided to us 78 | ${UHD_STATIC_LIB_LINK_FLAG} 79 | # Also, when linking statically, we need to pull in all the deps for 80 | # UHD as well, because the dependencies don't get resolved automatically 81 | ${UHD_STATIC_LIB_DEPS} 82 | ) 83 | endif(NOT UHD_USE_STATIC_LIBS) 84 | 85 | ### Once it's built... ######################################################## 86 | # Here, you would have commands to install your program. 87 | # We will skip these in this example. 88 | -------------------------------------------------------------------------------- /postprocessing/notebooks/archived_experiments/20230629 Summary of Noise Variance.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "id": "69874c58-b409-447e-97a7-bc7805ea136f", 7 | "metadata": {}, 8 | "outputs": [], 9 | "source": [ 10 | "import os\n", 11 | "import xarray as xr\n", 12 | "import hvplot.xarray\n", 13 | "import holoviews as hv\n", 14 | "import numpy as np\n", 15 | "\n", 16 | "import matplotlib\n", 17 | "import matplotlib.pyplot as plt" 18 | ] 19 | }, 20 | { 21 | "cell_type": "code", 22 | "execution_count": null, 23 | "id": "841d2aa3-6c1d-4edd-a779-631880d003cf", 24 | "metadata": {}, 25 | "outputs": [], 26 | "source": [ 27 | "input_base = \"20230628-outputs/\"\n", 28 | "input_suffix = \"-stack.nc\"\n", 29 | "\n", 30 | "files = [\n", 31 | " (0.003, \"20230629_135734\"),\n", 32 | " \n", 33 | " (0.003, \"20230621_173826\"),\n", 34 | " (0.006, \"20230621_181056\"),\n", 35 | " (0.009, \"20230622_101719\"),\n", 36 | " (0.012, \"20230622_104230\"),\n", 37 | " \n", 38 | " (0.03, \"20230629_114710\"),\n", 39 | " (0.3, \"20230629_133038\"),\n", 40 | " (0.3, \"20230629_160608\"), # longer total duration\n", 41 | " (3.0, \"20230629_134549\"),\n", 42 | "]" 43 | ] 44 | }, 45 | { 46 | "cell_type": "code", 47 | "execution_count": null, 48 | "id": "2eb9a737-4f1e-43c9-b8f7-7c3f1b62c94d", 49 | "metadata": {}, 50 | "outputs": [], 51 | "source": [ 52 | "cmap = [matplotlib.colors.rgb2hex(c) for c in plt.cm.viridis(np.linspace(0, 1, len(files)))]\n", 53 | "cmap" 54 | ] 55 | }, 56 | { 57 | "cell_type": "code", 58 | "execution_count": null, 59 | "id": "00432bc9-79c9-4d01-8b91-d74a8ba69d40", 60 | "metadata": {}, 61 | "outputs": [], 62 | "source": [ 63 | "plots = []\n", 64 | "\n", 65 | "for idx, (pulse_rep_int, prefix) in enumerate(files):\n", 66 | " ds = xr.open_dataset(os.path.join(input_base, prefix+input_suffix)).swap_dims({'t':'n_stack'})\n", 67 | "\n", 68 | " plots.append(hv.Curve(ds, label=f\"{pulse_rep_int} [{prefix[-2:]}]\").opts(logx=True, logy=True, color=cmap[idx]))\n", 69 | " plots.append(hv.VLine(100.0/pulse_rep_int).opts(color=cmap[idx], alpha=0.5, line_dash=\"dashed\"))\n", 70 | "\n", 71 | "p = hv.Overlay(plots).opts(width=1000, height=500, show_grid=True)\n", 72 | "p" 73 | ] 74 | }, 75 | { 76 | "cell_type": "code", 77 | "execution_count": null, 78 | "id": "3ef94f2c-f70e-444e-a728-29225ad72882", 79 | "metadata": {}, 80 | "outputs": [], 81 | "source": [ 82 | "plots = []\n", 83 | "\n", 84 | "for idx, (pulse_rep_int, prefix) in enumerate(files):\n", 85 | " ds = xr.open_dataset(os.path.join(input_base, prefix+input_suffix))\n", 86 | "\n", 87 | " plots.append(hv.Curve(ds, label=f\"{pulse_rep_int} [{prefix[-2:]}]\").opts(logx=True, logy=True, color=cmap[idx]))\n", 88 | " #plots.append(hv.VLine(10.0/pulse_rep_int).opts(color=cmap[idx], alpha=0.5, line_dash=\"dashed\"))\n", 89 | "\n", 90 | "p = hv.Overlay(plots).opts(width=1000, height=500, show_grid=True)\n", 91 | "p" 92 | ] 93 | }, 94 | { 95 | "cell_type": "code", 96 | "execution_count": null, 97 | "id": "c439298d-0cb6-4a3f-b088-6e34b4c1afa6", 98 | "metadata": {}, 99 | "outputs": [], 100 | "source": [] 101 | } 102 | ], 103 | "metadata": { 104 | "kernelspec": { 105 | "display_name": "rg2", 106 | "language": "python", 107 | "name": "python3" 108 | }, 109 | "language_info": { 110 | "codemirror_mode": { 111 | "name": "ipython", 112 | "version": 3 113 | }, 114 | "file_extension": ".py", 115 | "mimetype": "text/x-python", 116 | "name": "python", 117 | "nbconvert_exporter": "python", 118 | "pygments_lexer": "ipython3", 119 | "version": "3.11.3" 120 | } 121 | }, 122 | "nbformat": 4, 123 | "nbformat_minor": 5 124 | } 125 | -------------------------------------------------------------------------------- /config/default_x310.yaml: -------------------------------------------------------------------------------- 1 | # This is a configuration file meant for the Ettus X310 USRP 2 | 3 | # ----------------------- Chirp generation ------------------------ # 4 | GENERATE: 5 | sample_rate: &s_rate 20e6 # Of the generated chirp, tx and rx sample rates [sps] 6 | chirp_type: 'linear' # Can be 'linear' 7 | chirp_bandwidth: 20e6 # [Hz] 8 | lo_offset_sw: 0e6 # [Hz] lo offset to be applied digitally in chirp generation 9 | window: 'rectangular' # Window function applied to the chirp 10 | chirp_length: &chirp_len 10e-6 # [s] chirp length without zero padding 11 | pulse_length: &pulse_len 10e-6 # [s] total pulse length (chirp + symmetric zero padding) 12 | out_file: &ch_sent "data/chirp.bin" # The name of the output binary file 13 | show_plot: False # Display a time-domain plot of the generated chirp 14 | 15 | # ----------------- TX/RX for X310 --------------------- # 16 | DEVICE: 17 | device_args: "addr=192.168.10.2,dboard_clock_rate=20e6" # device address 18 | subdev: "A:0 B:0" 19 | clk_ref: "internal" # gpsdo, internal (default), or external 20 | clk_rate: 200e6 # Clock Rate [Hz] 21 | tx_channels: "0" 22 | rx_channels: "1" 23 | cpu_format: "fc32" # CPU-side format - see https://files.ettus.com/manual/structuhd_1_1stream__args__t.html#a602a64b4937a85dba84e7f724387e252 24 | # Note: the rest of the processing pipeline supports only the following cpu_format options: fc32, sc16, sc8 25 | otw_format: "sc16" # On the wire format - see https://files.ettus.com/manual/structuhd_1_1stream__args__t.html#a0ba0e946d2f83f7ac085f4f4e2ce9578 26 | GPIO: 27 | gpio_bank: "FP0" # which GPIO bank to use (FP0 is front panel and default) 28 | pwr_amp_pin: "-1" # which GPIO pin (on the DB15 connector) to use for external power amplifier control (set to -1 if not using), (normally 8) 29 | ref_out: 0 # whether to turn the 10 MHz reference out signal on (1) or off (0) set to (-1) if SDR does not support 30 | RF0: # parameters for first RF channel 31 | rx_rate: *s_rate # RX Sample Rate [sps] 32 | tx_rate: *s_rate # TX Sample Rate [sps] 33 | freq: 330e6 # Center Frequency (for mixing) 34 | lo_offset: 0e6 # LO offset [Hz] 35 | rx_gain: 10 # RX Gain [dB] 36 | tx_gain: 20 # TX Gain [dB] 37 | bw: 0 # TX/RW Bandwidth [Hz] 38 | tx_ant: "TX/RX" 39 | rx_ant: "RX2" 40 | transmit: true # "true" (or not set) for normal operation, set to "false" to completely disable transmit 41 | tuning_args: "" # set int_n or fractional tuning args, leave as "" to do nothing 42 | RF1: # parameters for second RF channel, only used if set to multiple channels 43 | rx_rate: *s_rate # RX Sample Rate [sps] 44 | tx_rate: *s_rate # TX Sample Rate [sps] 45 | freq: 20e6 # Center Frequency (for mixing) 46 | lo_offset: 0 # LO offset [Hz] 47 | rx_gain: 10 # RX Gain [dB] 48 | tx_gain: 10 # TX Gain [dB] 49 | bw: 0 # TX/RW Bandwidth [Hz] 50 | tx_ant: "TX/RX" 51 | rx_ant: "RX2" 52 | CHIRP: 53 | time_offset: 1 # Time before first receive [s] 54 | tx_duration: *pulse_len # Transmission duration [s] 55 | rx_duration: 60e-6 # Receive duration [s] 56 | tr_on_lead: 0e-6 # Time from GPIO output toggle on to TX [s] 57 | tr_off_trail: 0e-6 # Time from TX off to GPIO output off [s] 58 | pulse_rep_int: 500e-6 # Chirp period [s] 59 | tx_lead: 0e-6 # Time between start of TX and RX[s] 60 | num_pulses: &num_pulses 10000 # No. of chirps to TX/RX - set to -1 to continuously transmit pulses until stopped 61 | num_presums: 100 62 | phase_dithering: true 63 | FILES: 64 | chirp_loc: *ch_sent # Chirp to transmit 65 | save_loc: &save_loc "data/rx_samps.bin" # Save rx data here 66 | gps_loc: &gps_save_loc "data/gps_log.txt" # save gps data here (only works if gpsdo is selected as the clock source) 67 | max_chirps_per_file: 50000 # Maximum number of RX from a chirp to write to a single file -- set to -1 to avoid breaking into multiple files 68 | RUN_MANAGER: # These settings are only used by run.py -- not read by main.cpp at all 69 | final_save_loc: "data/rx_samps_merged.bin" # specify the save location for the big final file, leave blank if you don't want to save a big file 70 | save_partial_files: False # set to true if you want individual small files to be copied, set to false if you just want the big merged file to be copied 71 | save_gps: False # set to true if using gps and wanting to save gps location data, set to false otherwise -------------------------------------------------------------------------------- /config/orca_paper/duty_cycle_b205.yaml: -------------------------------------------------------------------------------- 1 | # This is a configuration file meant for the Ettus USRP B205mini-i 2 | 3 | # ----------------------- Chirp generation ------------------------ # 4 | GENERATE: 5 | sample_rate: &s_rate 56e6 # Of the generated chirp, tx and rx sample rates [sps] 6 | chirp_type: 'linear' # Can be 'linear' 7 | chirp_bandwidth: 20e6 # [Hz] 8 | lo_offset_sw: 0e6 # [Hz] lo offset to be applied digitally in chirp generation 9 | window: 'rectangular' # Window function applied to the chirp 10 | chirp_length: &chirp_len 10e-6 # [s] chirp length without zero padding 11 | pulse_length: &pulse_len 10e-6 # [s] total pulse length (chirp + symmetric zero padding) 12 | out_file: &ch_sent "data/chirp.bin" # The name of the output binary file 13 | show_plot: False # Display a time-domain plot of the generated chirp 14 | 15 | # ----------------- TX/RX for USRP_B205mini-i --------------------- # 16 | DEVICE: 17 | device_args: "num_recv_frames=700,num_send_frames=700,recv_frame_size=11000,send_frame_size=11000" # device address 18 | subdev: "A:A" 19 | clk_ref: "internal" 20 | clk_rate: 56e6 # Clock Rate [Hz] 21 | tx_channels: "0" 22 | rx_channels: "0" 23 | cpu_format: "fc32" # CPU-side format - see https://files.ettus.com/manual/structuhd_1_1stream__args__t.html#a602a64b4937a85dba84e7f724387e252 24 | # Note: the rest of the processing pipeline supports only the following cpu_format options: fc32, sc16, sc8 25 | otw_format: "sc12" # On the wire format - see https://files.ettus.com/manual/structuhd_1_1stream__args__t.html#a0ba0e946d2f83f7ac085f4f4e2ce9578 26 | GPIO: 27 | gpio_bank: "FP0" # which GPIO bank to use (FP0 is front panel and default) 28 | pwr_amp_pin: "-1" # which GPIO pin (on the DB15 connector) to use for external power amplifier control (set to -1 if not using) 29 | ref_out: -1 # whether to turn the 10 MHz reference out signal on (1) or off (0) set to (-1) if SDR does not support 30 | RF0: 31 | rx_rate: *s_rate # RX Sample Rate [sps] 32 | tx_rate: *s_rate # TX Sample Rate [sps] 33 | freq: 340e6 # Center Frequency (for mixing) 34 | lo_offset: 0e6 # LO offset [Hz] 35 | rx_gain: 40 # RX Gain [dB] 36 | tx_gain: 50 # TX Gain [dB] - 60.8 is -10 dBm output 37 | bw: 56e6 # TX/RW Bandwidth [Hz] 38 | tx_ant: "TX/RX" 39 | rx_ant: "RX2" 40 | transmit: true # "true" (or not set) for normal operation, set to "false" to completely disable transmit 41 | tuning_args: "" # set int_n or fractional tuning args, leave as "" to do nothing 42 | RF1: # parameters for second RF channel 43 | rx_rate: *s_rate # RX Sample Rate [sps] 44 | tx_rate: *s_rate # TX Sample Rate [sps] 45 | freq: 500e6 # Center Frequency (for mixing) 46 | lo_offset: 0 # LO offset [Hz] 47 | rx_gain: 10 # RX Gain [dB] 48 | tx_gain: 10 # TX Gain [dB] 49 | bw: 0 # TX/RW Bandwidth [Hz] 50 | tx_ant: "TX/RX" 51 | rx_ant: "RX2" 52 | CHIRP: 53 | time_offset: 1 # Time before first receive [s] 54 | tx_duration: *pulse_len # Transmission duration [s] 55 | rx_duration: 100e-6 # Receive duration [s] 56 | tr_on_lead: 0e-6 # Time from GPIO output toggle on to TX [s] 57 | tr_off_trail: 0e-6 # Time from TX off to GPIO output off [s] 58 | pulse_rep_int: 250e-6 # Chirp period [s] 59 | tx_lead: 0e-6 # Time between start of TX and RX[s] 60 | num_pulses: &num_pulses 1000000 # No. of chirps to TX/RX - set to -1 to continuously transmit pulses until stopped 61 | num_presums: 100 62 | phase_dithering: true 63 | FILES: 64 | chirp_loc: *ch_sent # Chirp to transmit 65 | save_loc: &save_loc "data/rx_samps.bin" #"/dev/shm/rx_samps.bin" #"/media/usb/usrp_test/rx_samps.bin" #"data/rx_samps.bin" # Save rx data here 66 | gps_loc: &gps_save_loc "data/gps_log.txt" # save gps data here (only works if gpsdo is selected as the clock source) 67 | max_chirps_per_file: -1 # Maximum number of RX from a chirp to write to a single file -- set to -1 to avoid breaking into multiple files 68 | RUN_MANAGER: # These settings are only used by run.py -- not read by main.cpp at all 69 | # Note: if max_chirps_per_file = -1 (i.e. all data will be written directly to a single file, then final_save_loc and save_partial_files will be ignored 70 | final_save_loc: null #"data/rx_samps_merged.bin" # specify the save location for the big final file, set to null if you don't want to save a big file 71 | save_partial_files: False # set to true if you want individual small files to be copied, set to false if you just want the big merged file to be copied 72 | save_gps: False # set to true if using gps and wanting to save gps location data, set to false otherwise` 73 | -------------------------------------------------------------------------------- /config/orca_paper/dithering_b205.yaml: -------------------------------------------------------------------------------- 1 | # This is a configuration file meant for the Ettus USRP B205mini-i 2 | 3 | # ----------------------- Chirp generation ------------------------ # 4 | GENERATE: 5 | sample_rate: &s_rate 56e6 # Of the generated chirp, tx and rx sample rates [sps] 6 | chirp_type: 'linear' # Can be 'linear' 7 | chirp_bandwidth: 20e6 # [Hz] 8 | lo_offset_sw: 12.5e6 # [Hz] lo offset to be applied digitally in chirp generation 9 | window: 'rectangular' # Window function applied to the chirp 10 | chirp_length: &chirp_len 10e-6 # [s] chirp length without zero padding 11 | pulse_length: &pulse_len 10e-6 # [s] total pulse length (chirp + symmetric zero padding) 12 | out_file: &ch_sent "data/chirp.bin" # The name of the output binary file 13 | show_plot: False # Display a time-domain plot of the generated chirp 14 | 15 | # ----------------- TX/RX for USRP_B205mini-i --------------------- # 16 | DEVICE: 17 | device_args: "num_recv_frames=700,num_send_frames=700,recv_frame_size=11000,send_frame_size=11000" # device address 18 | subdev: "A:A" 19 | clk_ref: "internal" 20 | clk_rate: 56e6 # Clock Rate [Hz] 21 | tx_channels: "0" 22 | rx_channels: "0" 23 | cpu_format: "fc32" # CPU-side format - see https://files.ettus.com/manual/structuhd_1_1stream__args__t.html#a602a64b4937a85dba84e7f724387e252 24 | # Note: the rest of the processing pipeline supports only the following cpu_format options: fc32, sc16, sc8 25 | otw_format: "sc12" # On the wire format - see https://files.ettus.com/manual/structuhd_1_1stream__args__t.html#a0ba0e946d2f83f7ac085f4f4e2ce9578 26 | GPIO: 27 | gpio_bank: "FP0" # which GPIO bank to use (FP0 is front panel and default) 28 | pwr_amp_pin: "-1" # which GPIO pin (on the DB15 connector) to use for external power amplifier control (set to -1 if not using) 29 | ref_out: -1 # whether to turn the 10 MHz reference out signal on (1) or off (0) set to (-1) if SDR does not support 30 | RF0: 31 | rx_rate: *s_rate # RX Sample Rate [sps] 32 | tx_rate: *s_rate # TX Sample Rate [sps] 33 | freq: 330e6 # Center Frequency (for mixing) 34 | lo_offset: 0e6 # LO offset [Hz] 35 | rx_gain: 40 # RX Gain [dB] 36 | tx_gain: 33.5 # TX Gain [dB] - 60.8 is -10 dBm output 37 | bw: 56e6 # TX/RW Bandwidth [Hz] 38 | tx_ant: "TX/RX" 39 | rx_ant: "RX2" 40 | transmit: true # "true" (or not set) for normal operation, set to "false" to completely disable transmit 41 | tuning_args: "" # set int_n or fractional tuning args, leave as "" to do nothing 42 | RF1: # parameters for second RF channel 43 | rx_rate: *s_rate # RX Sample Rate [sps] 44 | tx_rate: *s_rate # TX Sample Rate [sps] 45 | freq: 500e6 # Center Frequency (for mixing) 46 | lo_offset: 0 # LO offset [Hz] 47 | rx_gain: 10 # RX Gain [dB] 48 | tx_gain: 10 # TX Gain [dB] 49 | bw: 0 # TX/RW Bandwidth [Hz] 50 | tx_ant: "TX/RX" 51 | rx_ant: "RX2" 52 | CHIRP: 53 | time_offset: 1 # Time before first receive [s] 54 | tx_duration: *pulse_len # Transmission duration [s] 55 | rx_duration: 25e-6 # Receive duration [s] 56 | tr_on_lead: 0e-6 # Time from GPIO output toggle on to TX [s] 57 | tr_off_trail: 0e-6 # Time from TX off to GPIO output off [s] 58 | pulse_rep_int: 250e-6 # Chirp period [s] 59 | tx_lead: 0e-6 # Time between start of TX and RX[s] 60 | num_pulses: &num_pulses 1000000 # No. of chirps to TX/RX - set to -1 to continuously transmit pulses until stopped 61 | num_presums: 1 62 | phase_dithering: true 63 | FILES: 64 | chirp_loc: *ch_sent # Chirp to transmit 65 | save_loc: &save_loc "data/rx_samps.bin" #"/dev/shm/rx_samps.bin" #"/media/usb/usrp_test/rx_samps.bin" #"data/rx_samps.bin" # Save rx data here 66 | gps_loc: &gps_save_loc "data/gps_log.txt" # save gps data here (only works if gpsdo is selected as the clock source) 67 | max_chirps_per_file: -1 # Maximum number of RX from a chirp to write to a single file -- set to -1 to avoid breaking into multiple files 68 | RUN_MANAGER: # These settings are only used by run.py -- not read by main.cpp at all 69 | # Note: if max_chirps_per_file = -1 (i.e. all data will be written directly to a single file, then final_save_loc and save_partial_files will be ignored 70 | final_save_loc: null #"data/rx_samps_merged.bin" # specify the save location for the big final file, set to null if you don't want to save a big file 71 | save_partial_files: False # set to true if you want individual small files to be copied, set to false if you just want the big merged file to be copied 72 | save_gps: False # set to true if using gps and wanting to save gps location data, set to false otherwise` 73 | 74 | 75 | 76 | -------------------------------------------------------------------------------- /config/orca_paper/phase_noise_b205.yaml: -------------------------------------------------------------------------------- 1 | # This is a configuration file meant for the Ettus USRP B205mini-i 2 | 3 | # ----------------------- Chirp generation ------------------------ # 4 | GENERATE: 5 | sample_rate: &s_rate 56e6 # Of the generated chirp, tx and rx sample rates [sps] 6 | chirp_type: 'linear' # Can be 'linear' 7 | chirp_bandwidth: 20e6 # [Hz] 8 | lo_offset_sw: 12.5e6 # [Hz] lo offset to be applied digitally in chirp generation 9 | window: 'rectangular' # Window function applied to the chirp 10 | chirp_length: &chirp_len 10e-6 # [s] chirp length without zero padding 11 | pulse_length: &pulse_len 10e-6 # [s] total pulse length (chirp + symmetric zero padding) 12 | out_file: &ch_sent "data/chirp.bin" # The name of the output binary file 13 | show_plot: False # Display a time-domain plot of the generated chirp 14 | 15 | # ----------------- TX/RX for USRP_B205mini-i --------------------- # 16 | DEVICE: 17 | device_args: "num_recv_frames=700,num_send_frames=700,recv_frame_size=11000,send_frame_size=11000" # device address 18 | subdev: "A:A" 19 | clk_ref: "internal" 20 | clk_rate: 56e6 # Clock Rate [Hz] 21 | tx_channels: "0" 22 | rx_channels: "0" 23 | cpu_format: "fc32" # CPU-side format - see https://files.ettus.com/manual/structuhd_1_1stream__args__t.html#a602a64b4937a85dba84e7f724387e252 24 | # Note: the rest of the processing pipeline supports only the following cpu_format options: fc32, sc16, sc8 25 | otw_format: "sc12" # On the wire format - see https://files.ettus.com/manual/structuhd_1_1stream__args__t.html#a0ba0e946d2f83f7ac085f4f4e2ce9578 26 | GPIO: 27 | gpio_bank: "FP0" # which GPIO bank to use (FP0 is front panel and default) 28 | pwr_amp_pin: "-1" # which GPIO pin (on the DB15 connector) to use for external power amplifier control (set to -1 if not using) 29 | ref_out: -1 # whether to turn the 10 MHz reference out signal on (1) or off (0) set to (-1) if SDR does not support 30 | RF0: 31 | rx_rate: *s_rate # RX Sample Rate [sps] 32 | tx_rate: *s_rate # TX Sample Rate [sps] 33 | freq: 330e6 # Center Frequency (for mixing) 34 | lo_offset: 0e6 # LO offset [Hz] 35 | rx_gain: 40 # RX Gain [dB] 36 | tx_gain: 33.5 # TX Gain [dB] - 60.8 is -10 dBm output 37 | bw: 56e6 # TX/RW Bandwidth [Hz] 38 | tx_ant: "TX/RX" 39 | rx_ant: "RX2" 40 | transmit: true # "true" (or not set) for normal operation, set to "false" to completely disable transmit 41 | tuning_args: "" # set int_n or fractional tuning args, leave as "" to do nothing 42 | RF1: # parameters for second RF channel 43 | rx_rate: *s_rate # RX Sample Rate [sps] 44 | tx_rate: *s_rate # TX Sample Rate [sps] 45 | freq: 500e6 # Center Frequency (for mixing) 46 | lo_offset: 0 # LO offset [Hz] 47 | rx_gain: 10 # RX Gain [dB] 48 | tx_gain: 10 # TX Gain [dB] 49 | bw: 0 # TX/RW Bandwidth [Hz] 50 | tx_ant: "TX/RX" 51 | rx_ant: "RX2" 52 | CHIRP: 53 | time_offset: 1 # Time before first receive [s] 54 | tx_duration: *pulse_len # Transmission duration [s] 55 | rx_duration: 25e-6 # Receive duration [s] 56 | tr_on_lead: 0e-6 # Time from GPIO output toggle on to TX [s] 57 | tr_off_trail: 0e-6 # Time from TX off to GPIO output off [s] 58 | pulse_rep_int: 250e-6 # Chirp period [s] 59 | tx_lead: 0e-6 # Time between start of TX and RX[s] 60 | num_pulses: &num_pulses 6000000 # No. of chirps to TX/RX - set to -1 to continuously transmit pulses until stopped 61 | num_presums: 1 62 | phase_dithering: true 63 | FILES: 64 | chirp_loc: *ch_sent # Chirp to transmit 65 | save_loc: &save_loc "data/rx_samps.bin" #"/dev/shm/rx_samps.bin" #"/media/usb/usrp_test/rx_samps.bin" #"data/rx_samps.bin" # Save rx data here 66 | gps_loc: &gps_save_loc "data/gps_log.txt" # save gps data here (only works if gpsdo is selected as the clock source) 67 | max_chirps_per_file: -1 # Maximum number of RX from a chirp to write to a single file -- set to -1 to avoid breaking into multiple files 68 | RUN_MANAGER: # These settings are only used by run.py -- not read by main.cpp at all 69 | # Note: if max_chirps_per_file = -1 (i.e. all data will be written directly to a single file, then final_save_loc and save_partial_files will be ignored 70 | final_save_loc: null #"data/rx_samps_merged.bin" # specify the save location for the big final file, set to null if you don't want to save a big file 71 | save_partial_files: False # set to true if you want individual small files to be copied, set to false if you just want the big merged file to be copied 72 | save_gps: False # set to true if using gps and wanting to save gps location data, set to false otherwise` 73 | 74 | 75 | 76 | -------------------------------------------------------------------------------- /config/synthetic_config.yaml: -------------------------------------------------------------------------------- 1 | # This is a configuration file meant for the Ettus X310 USRP 2 | 3 | # ----------------------- Chirp generation ------------------------ # 4 | GENERATE: 5 | sample_rate: &s_rate 50e6 # Of the generated chirp, tx and rx sample rates [sps] 6 | chirp_type: 'linear' # Can be 'linear' 7 | chirp_bandwidth: 40e6 # [Hz] 8 | window: 'rectangular' # Window function applied to the chirp 9 | chirp_length: &chirp_len 20e-6 # [s] chirp length without zero padding 10 | pulse_length: &pulse_len 20e-6 # [s] total pulse length (chirp + symmetric zero padding) 11 | out_file: &ch_sent "data/chirp.bin" # The name of the output binary file 12 | show_plot: False # Display a time-domain plot of the generated chirp 13 | 14 | # ----------------- TX/RX for X310 --------------------- # 15 | DEVICE: 16 | device_args: "addr=192.168.10.2,dboard_clock_rate=20e6" # device address 17 | subdev: "A:0 B:0" 18 | clk_ref: "internal" # gpsdo, internal (default), or external 19 | clk_rate: 200e6 # Clock Rate [Hz] 20 | tx_channels: "0" 21 | rx_channels: "1" 22 | otw_format: "sc16" # On the wire format - see https://files.ettus.com/manual/structuhd_1_1stream__args__t.html#a0ba0e946d2f83f7ac085f4f4e2ce9578 23 | GPIO: 24 | gpio_bank: "FP0" # which GPIO bank to use (FP0 is front panel and default) 25 | pwr_amp_pin: "-1" # which GPIO pin (on the DB15 connector) to use for external power amplifier control (set to -1 if not using), (normally 8) 26 | ref_out: true # whether to turn the 10 MHz reference out signal on (true) or off (false) 27 | RF0: # parameters for first RF channel 28 | rx_rate: *s_rate # RX Sample Rate [sps] 29 | tx_rate: *s_rate # TX Sample Rate [sps] 30 | freq: 330e6 # Center Frequency (for mixing) 31 | rx_gain: 10 # RX Gain [dB] 32 | tx_gain: 10 # TX Gain [dB] 33 | bw: 0 # TX/RW Bandwidth [Hz] 34 | tx_ant: "TX/RX" 35 | rx_ant: "RX2" 36 | RF1: # parameters for second RF channel, only used if set to multiple channels 37 | rx_rate: *s_rate # RX Sample Rate [sps] 38 | tx_rate: *s_rate # TX Sample Rate [sps] 39 | freq: 20e6 # Center Frequency (for mixing) 40 | rx_gain: 10 # RX Gain [dB] 41 | tx_gain: 10 # TX Gain [dB] 42 | bw: 0 # TX/RW Bandwidth [Hz] 43 | tx_ant: "TX/RX" 44 | rx_ant: "RX2" 45 | CHIRP: 46 | time_offset: 1 # Time before first receive [s] 47 | tx_duration: *pulse_len # Transmission duration [s] 48 | rx_duration: 30e-6 # Receive duration [s] 49 | tr_on_lead: 0e-6 # Time from GPIO output toggle on to TX [s] 50 | tr_off_trail: 0e-6 # Time from TX off to GPIO output off [s] 51 | pulse_rep_int: 5e-3 # Chirp period [s] 52 | tx_lead: 0e-6 # Time between start of TX and RX[s] 53 | num_pulses: &num_pulses 1000 # No. of chirps to TX/RX - set to -1 to continuously transmit pulses until stopped 54 | num_presums: &num_presums 1 # No. of chirps to (coherently) presum onboard (must be at least 1) 55 | FILES: 56 | chirp_loc: *ch_sent # Chirp to transmit 57 | save_loc: &save_loc "data/rx_samps.bin" # Save rx data here 58 | gps_loc: &gps_save_loc "data/gps_log.txt" # save gps data here (only works if gpsdo is selected as the clock source) 59 | max_chirps_per_file: 50000 # Maximum number of RX from a chirp to write to a single file -- set to -1 to avoid breaking into multiple files 60 | RUN_MANAGER: # These settings are only used by run.py -- not read by main.cpp at all 61 | final_save_loc: "data/rx_samps_merged.bin" # specify the save location for the big final file, leave blank if you don't want to save a big file 62 | save_partial_files: False # set to true if you want individual small files to be copied, set to false if you just want the big merged file to be copied 63 | save_gps: False # set to true if using gps and wanting to save gps location data, set to false otherwise 64 | # -------------------------- Plot Samples --------------------------- # 65 | PLOT: 66 | rx_samps: *save_loc # Receive data to use 67 | orig_chirp: *ch_sent # Chirp associated with receive data 68 | sample_rate: *s_rate 69 | sig_speed: 1.685e8 # Speed of signal through medium [m/s] 70 | direct_start: &dir_start 0 # Start search for direct path @ this sample 71 | echo_start: 1 # Start search for echo @ this # of samps. after direct path 72 | 73 | # --------------------------- Noise Test -----------------------------# 74 | NOISE: 75 | rx_samps: *save_loc # Receive data to use 76 | orig_chirp: *ch_sent # Chirp associated with receive data 77 | sample_rate: *s_rate 78 | noise_std: 0.1 # Standard deviation for white noise generation 79 | #coherent_sums: *coh_sums # Number of sums to eliminate noise. 80 | direct_start: *dir_start # Sample at which to start search for direct path 81 | show_graphs: False # For debugging 82 | describe: False # For debugging -------------------------------------------------------------------------------- /postprocessing/notebooks/orca_paper/Plot Phase Coherence Pickles.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import pickle\n", 10 | "import numpy as np\n", 11 | "import matplotlib.pyplot as plt\n", 12 | "import matplotlib" 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": null, 18 | "metadata": {}, 19 | "outputs": [], 20 | "source": [ 21 | "matplotlib.rcParams.update({\n", 22 | " 'font.size': 16,\n", 23 | " 'legend.fontsize': 10,\n", 24 | " 'lines.linewidth': 2,\n", 25 | " })" 26 | ] 27 | }, 28 | { 29 | "cell_type": "code", 30 | "execution_count": null, 31 | "metadata": {}, 32 | "outputs": [], 33 | "source": [ 34 | "pickle_paths = [\n", 35 | " (\"B205\", \"/media/thomas/Extreme SSD/orca_paper_data_files/phase_noise/b205/20240222_203345_stacking_stats_20240228_161251_bin187.pickle\"),\n", 36 | " (\"X310\", \"/media/thomas/Extreme SSD/orca_paper_data_files/phase_noise/x310/20240222_143225-phase-noise-stats.pickle\")\n", 37 | "]\n", 38 | "\n", 39 | "phase_stats = []\n", 40 | "\n", 41 | "for name, path in pickle_paths:\n", 42 | " with open(path, \"rb\") as f:\n", 43 | " s = pickle.load(f)\n", 44 | " for k in list(s.keys()):\n", 45 | " s[\"stack_\"+k] = s[k]\n", 46 | " phase_stats.append((name, s))\n" 47 | ] 48 | }, 49 | { 50 | "cell_type": "markdown", 51 | "metadata": {}, 52 | "source": [ 53 | "### Comparison plot" 54 | ] 55 | }, 56 | { 57 | "cell_type": "code", 58 | "execution_count": null, 59 | "metadata": {}, 60 | "outputs": [], 61 | "source": [ 62 | "fig, ax_pwr = plt.subplots(figsize=(6, 4))\n", 63 | "\n", 64 | "for (title, data) in phase_stats:\n", 65 | " pwr_normalized = data[\"stack_signal_peak_pwr_mean\"] / data[\"stack_signal_peak_pwr_mean\"][0]\n", 66 | " pwr_db_normalized = 10*np.log10(pwr_normalized)\n", 67 | "\n", 68 | " std_upper = 10*np.log10((data[\"stack_signal_peak_pwr_mean\"] + np.sqrt(data[\"stack_signal_peak_pwr_variance\"]))/data[\"stack_signal_peak_pwr_mean\"][0])\n", 69 | " std_lower = 10*np.log10((data[\"stack_signal_peak_pwr_mean\"] - np.sqrt(data[\"stack_signal_peak_pwr_variance\"]))/data[\"stack_signal_peak_pwr_mean\"][0])\n", 70 | " \n", 71 | " l = ax_pwr.plot(data[\"actual_stack_t\"], pwr_db_normalized, label=title+\" Mean\")\n", 72 | " c = l[0].get_color()\n", 73 | " ax_pwr.plot(data[\"actual_stack_t\"], std_upper, linestyle=\"--\", color=c, linewidth=1, label=\"+/- 1 Std Dev\")\n", 74 | " ax_pwr.plot(data[\"actual_stack_t\"], std_lower, linestyle=\"--\", color=c, linewidth=1)\n", 75 | "\n", 76 | " #ax_pwr.fill_between(data[\"actual_stack_t\"], std_lower, std_upper, alpha=0.5)\n", 77 | "\n", 78 | "ax_pwr.set_ylabel(\"Signal Power [dB]\\n(relative to single pulse)\")\n", 79 | "ax_pwr.set_xlabel(\"Wall Clock Integration Time [s]\")\n", 80 | "ax_pwr.semilogx()\n", 81 | "ax_pwr.legend()\n", 82 | "ax_pwr.grid(True)\n", 83 | "\n", 84 | "fig.tight_layout()\n", 85 | "fig.savefig(\"phase_noise_summary.png\", dpi=300)\n", 86 | "plt.show()" 87 | ] 88 | }, 89 | { 90 | "cell_type": "markdown", 91 | "metadata": {}, 92 | "source": [ 93 | "### Single file plot" 94 | ] 95 | }, 96 | { 97 | "cell_type": "code", 98 | "execution_count": null, 99 | "metadata": {}, 100 | "outputs": [], 101 | "source": [ 102 | "stats = phase_stats[1]\n", 103 | "data = stats[1]\n", 104 | "title = stats[0]\n", 105 | "print(title)\n", 106 | "\n", 107 | "fig, ax_pwr = plt.subplots(figsize=(6, 4))\n", 108 | "\n", 109 | "ax_pwr.plot(data[\"actual_stack_t\"], data[\"stack_signal_peak_pwr_mean\"])\n", 110 | "# Add a shaded region for the variance\n", 111 | "ax_pwr.fill_between(data[\"actual_stack_t\"], data[\"stack_signal_peak_pwr_mean\"] - np.sqrt(data[\"stack_signal_peak_pwr_variance\"]),\n", 112 | " data[\"stack_signal_peak_pwr_mean\"] + np.sqrt(data[\"stack_signal_peak_pwr_variance\"]), alpha=0.4)\n", 113 | "ax_pwr.set_title(\"Signal Power\")\n", 114 | "ax_pwr.loglog()\n", 115 | "ax_pwr.grid(True)\n", 116 | "\n", 117 | "\n", 118 | "plt.show()" 119 | ] 120 | }, 121 | { 122 | "cell_type": "code", 123 | "execution_count": null, 124 | "metadata": {}, 125 | "outputs": [], 126 | "source": [] 127 | } 128 | ], 129 | "metadata": { 130 | "kernelspec": { 131 | "display_name": "Python 3", 132 | "language": "python", 133 | "name": "python3" 134 | }, 135 | "language_info": { 136 | "codemirror_mode": { 137 | "name": "ipython", 138 | "version": 3 139 | }, 140 | "file_extension": ".py", 141 | "mimetype": "text/x-python", 142 | "name": "python", 143 | "nbconvert_exporter": "python", 144 | "pygments_lexer": "ipython3", 145 | "version": "3.11.3" 146 | } 147 | }, 148 | "nbformat": 4, 149 | "nbformat_minor": 2 150 | } 151 | -------------------------------------------------------------------------------- /postprocessing/notebooks/Transmit Amplitude.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "%load_ext autoreload\n", 10 | "%autoreload 2\n", 11 | "\n", 12 | "from dask.distributed import Client, LocalCluster\n", 13 | "\n", 14 | "client = Client() # Note that `memory_limit` is the limit **per worker**.\n", 15 | "# n_workers=4,\n", 16 | "# threads_per_worker=1,\n", 17 | "# memory_limit='3GB'\n", 18 | "client # If you click the dashboard link in the output, you can monitor real-time progress and get other cool visualizations." 19 | ] 20 | }, 21 | { 22 | "cell_type": "code", 23 | "execution_count": null, 24 | "metadata": {}, 25 | "outputs": [], 26 | "source": [ 27 | "import copy\n", 28 | "import sys\n", 29 | "import xarray as xr\n", 30 | "import numpy as np\n", 31 | "import dask.array as da\n", 32 | "\n", 33 | "import matplotlib.pyplot as plt\n", 34 | "import hvplot.xarray\n", 35 | "import holoviews as hv\n", 36 | "import scipy.constants\n", 37 | "\n", 38 | "sys.path.append(\"..\")\n", 39 | "import processing_dask as pr\n", 40 | "import plot_dask\n", 41 | "import processing as old_processing\n", 42 | "\n", 43 | "sys.path.append(\"../../preprocessing/\")\n", 44 | "from generate_chirp import generate_chirp" 45 | ] 46 | }, 47 | { 48 | "cell_type": "code", 49 | "execution_count": null, 50 | "metadata": {}, 51 | "outputs": [], 52 | "source": [ 53 | "# file path to data and configs\n", 54 | "\n", 55 | "#prefix = \"/home/radioglaciology/thomas/radar_data/20240510-bench-decay/20240510_234434\"\n", 56 | "\n", 57 | "#prefix = \"/home/radioglaciology/thomas/radar_data/20240510-bench-decay/20240510_234556\" # 20 us rx time\n", 58 | "#prefix = \"/home/radioglaciology/thomas/radar_data/20240510-bench-decay/20240510_234720\" # 60 us rx time\n", 59 | "prefix = \"/home/radioglaciology/thomas/radar_data/20240510-bench-decay/20240510_234720\" # 100 us rx time\n", 60 | "\n", 61 | "# resave data as zarr for dask processing\n", 62 | "zarr_path = pr.save_radar_data_to_zarr(prefix)\n", 63 | "\n", 64 | "# open zarr file, adjust chunk size to be 10 MB - 1 GB based on sample rate/bit depth\n", 65 | "raw = xr.open_zarr(zarr_path, chunks={\"pulse_idx\": 1000})" 66 | ] 67 | }, 68 | { 69 | "cell_type": "markdown", 70 | "metadata": {}, 71 | "source": [ 72 | "### View raw pulse in time domain to check for clipping" 73 | ] 74 | }, 75 | { 76 | "cell_type": "code", 77 | "execution_count": null, 78 | "metadata": {}, 79 | "outputs": [], 80 | "source": [ 81 | "single_pulse_raw = raw.radar_data[{'pulse_idx': 0}].compute()\n", 82 | "plot1 = np.real(single_pulse_raw).hvplot.line(x='fast_time', color='red', label=\"real\") * np.imag(single_pulse_raw).hvplot.line(x='fast_time', color=\"blue\", label=\"imag\")\n", 83 | "\n", 84 | "plot1 = plot1.opts(xlabel='Fast Time (s)', ylabel='Raw Amplitude')\n", 85 | "plot1" 86 | ] 87 | }, 88 | { 89 | "cell_type": "code", 90 | "execution_count": null, 91 | "metadata": {}, 92 | "outputs": [], 93 | "source": [ 94 | "plot_objs = []\n", 95 | "\n", 96 | "for pulse_idx in np.arange(4):\n", 97 | " single_pulse_raw = raw.radar_data[{'pulse_idx': pulse_idx}].compute()\n", 98 | " plot_objs.append(np.real(single_pulse_raw).hvplot.line(x='fast_time', label=f\"pulse_idx {pulse_idx}\"))\n", 99 | "\n", 100 | "plot1 = hv.Overlay(plot_objs).opts(xlabel='Fast Time (s)', ylabel='Raw Amplitude')\n", 101 | "plot1" 102 | ] 103 | }, 104 | { 105 | "cell_type": "code", 106 | "execution_count": null, 107 | "metadata": {}, 108 | "outputs": [], 109 | "source": [ 110 | "raw.config['GENERATE']['chirp_length']" 111 | ] 112 | }, 113 | { 114 | "cell_type": "code", 115 | "execution_count": null, 116 | "metadata": {}, 117 | "outputs": [], 118 | "source": [ 119 | "radar_data_chirp = raw.radar_data.where(raw.fast_time <= raw.config['GENERATE']['chirp_length'], drop=True)\n", 120 | "(20*np.log10(np.sqrt((np.abs(radar_data_chirp)**2).mean(dim='sample_idx')))).hvplot.scatter(x=\"pulse_idx\").opts(ylabel=\"RX signal mean amplitude [dB]\")" 121 | ] 122 | }, 123 | { 124 | "cell_type": "code", 125 | "execution_count": null, 126 | "metadata": {}, 127 | "outputs": [], 128 | "source": [ 129 | "#plots = []\n", 130 | "p = (20*np.log10(np.sqrt((np.abs(radar_data_chirp)**2).mean(dim='sample_idx')))).hvplot.scatter(x=\"pulse_idx\", label=\"100 us RX\").opts(ylabel=\"RX signal mean amplitude [dB]\")\n", 131 | "plots.append(p)" 132 | ] 133 | }, 134 | { 135 | "cell_type": "code", 136 | "execution_count": null, 137 | "metadata": {}, 138 | "outputs": [], 139 | "source": [ 140 | "hv.Overlay(plots)" 141 | ] 142 | }, 143 | { 144 | "cell_type": "code", 145 | "execution_count": null, 146 | "metadata": {}, 147 | "outputs": [], 148 | "source": [] 149 | } 150 | ], 151 | "metadata": { 152 | "language_info": { 153 | "name": "python" 154 | } 155 | }, 156 | "nbformat": 4, 157 | "nbformat_minor": 2 158 | } 159 | -------------------------------------------------------------------------------- /tests/Plot Duty Cycle Pickles.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import pickle\n", 10 | "from ruamel.yaml import YAML as ym\n", 11 | "import pandas as pd\n", 12 | "import matplotlib\n", 13 | "import matplotlib.pyplot as plt" 14 | ] 15 | }, 16 | { 17 | "cell_type": "code", 18 | "execution_count": null, 19 | "metadata": {}, 20 | "outputs": [], 21 | "source": [ 22 | "def load_old_style_pickle(pickle_path, config_path):\n", 23 | " yaml = ym()\n", 24 | " config = yaml.load(open(config_path))\n", 25 | "\n", 26 | " # Old data format has a series of pickles, each containing one experiment of data\n", 27 | " data = []\n", 28 | " with open(pickle_path, \"rb\") as f:\n", 29 | " while True:\n", 30 | " try:\n", 31 | " data.append(pickle.load(f))\n", 32 | " except EOFError:\n", 33 | " break\n", 34 | " \n", 35 | " # Convert to a pandas dataframe\n", 36 | " df = pd.DataFrame(data)\n", 37 | " df = df.drop_duplicates()\n", 38 | " \n", 39 | " df[\"duty_cycle\"] = config[\"CHIRP\"][\"rx_duration\"] / df[\"values\"]\n", 40 | " df[\"error_rate\"] = df[\"n_error_list\"] / df[\"n_pulse_attempts\"]\n", 41 | "\n", 42 | " return df\n", 43 | "\n", 44 | "def load_new_style_pickle(pickle_path):\n", 45 | " with open(pickle_path, \"rb\") as f:\n", 46 | " data = pickle.load(f)\n", 47 | " \n", 48 | " config = data[\"config\"]\n", 49 | " data.pop(\"config\")\n", 50 | "\n", 51 | " # Convert to a pandas dataframe\n", 52 | " df = pd.DataFrame(data)\n", 53 | " if \"values\" in df.columns:\n", 54 | " df[\"pri\"] = df[\"values\"]\n", 55 | " \n", 56 | " df[\"duty_cycle\"] = config[\"CHIRP\"][\"rx_duration\"] / df[\"pri\"]\n", 57 | " df[\"error_rate\"] = df[\"n_error_list\"] / df[\"n_pulse_attempts\"]\n", 58 | " # Replace nan error_rates with 1 (100% errors)\n", 59 | " df[\"error_rate\"] = df[\"error_rate\"].fillna(1)\n", 60 | "\n", 61 | " return df\n" 62 | ] 63 | }, 64 | { 65 | "cell_type": "code", 66 | "execution_count": null, 67 | "metadata": {}, 68 | "outputs": [], 69 | "source": [ 70 | "duty_cycle_pickles = [\n", 71 | " # (path to pickle, label, is it in the new data format?)\n", 72 | " (\"/media/thomas/Extreme SSD/orca_paper_data_files/duty_cycle/b205/20240227_200503_error_code_late_command.pickle\", \"B205, USB 3, Laptop\", False),\n", 73 | " (\"/media/thomas/Extreme SSD/orca_paper_data_files/duty_cycle/b205/20240227_142644_error_code_late_command.pickle\", \"B205, USB 3, Pi 4\", False),\n", 74 | " (\"/media/thomas/Extreme SSD/orca_paper_data_files/duty_cycle/b205/20240326_231441_error_code_late_command.pickle\", \"B205, USB 3, Pi 5\", True),\n", 75 | " (\"/media/thomas/Extreme SSD/orca_paper_data_files/duty_cycle/x310/20231220_114225_error_code_late_command.pickle\", \"X310, 10 Gbit Ethernet, Laptop\", False),\n", 76 | " (\"/media/thomas/Extreme SSD/orca_paper_data_files/duty_cycle/x310/20231010_044215_error_code_late_command.pickle\", \"X310, 1 Gbit Ethernet, Laptop\", True),\n", 77 | "]\n", 78 | "\n", 79 | "config_path = \"/media/thomas/Extreme SSD/orca_paper_data_files/duty_cycle/b205/duty_cycle_b205.yaml\"\n", 80 | "\n", 81 | "duty_cycle_dfs = {}\n", 82 | "\n", 83 | "for path, label, is_new_format in duty_cycle_pickles:\n", 84 | " if is_new_format:\n", 85 | " df = load_new_style_pickle(path)\n", 86 | " else:\n", 87 | " df = load_old_style_pickle(path, config_path)\n", 88 | " duty_cycle_dfs[label] = df" 89 | ] 90 | }, 91 | { 92 | "cell_type": "code", 93 | "execution_count": null, 94 | "metadata": {}, 95 | "outputs": [], 96 | "source": [ 97 | "matplotlib.rcParams.update({\n", 98 | " 'font.size': 16,\n", 99 | " 'legend.fontsize': 10,\n", 100 | " 'lines.linewidth': 2,\n", 101 | " })\n", 102 | "\n", 103 | "fig, ax = plt.subplots(figsize=(6, 5))\n", 104 | "\n", 105 | "markers = ['D', 'p', 'o', 's', '*']\n", 106 | "colors = ['C1', 'C3', 'C4', \"C0\", \"C2\"]\n", 107 | "\n", 108 | "for marker, color, (label, df) in zip(markers, colors, duty_cycle_dfs.items()):\n", 109 | " ax.plot(100*df[\"duty_cycle\"], 100*df[\"error_rate\"], label=label, marker=marker, c=color)\n", 110 | "\n", 111 | "ax.set_xlabel(\"Duty cycle [%]\")\n", 112 | "ax.set_ylabel(\"Error rate [%]\")\n", 113 | "ax.grid(True)\n", 114 | "ax.set_ylim(0, 25)\n", 115 | "ax.set_xlim(0, 100)\n", 116 | "\n", 117 | "ax.legend(bbox_to_anchor=(0.5, 1.3), loc=\"upper center\", ncol=2)\n", 118 | "\n", 119 | "fig.tight_layout()\n", 120 | "fig.savefig('duty_cycle_comparison_with_pi5.png', dpi=300)\n", 121 | "plt.show()" 122 | ] 123 | }, 124 | { 125 | "cell_type": "code", 126 | "execution_count": null, 127 | "metadata": {}, 128 | "outputs": [], 129 | "source": [] 130 | } 131 | ], 132 | "metadata": { 133 | "kernelspec": { 134 | "display_name": "Python 3", 135 | "language": "python", 136 | "name": "python3" 137 | }, 138 | "language_info": { 139 | "codemirror_mode": { 140 | "name": "ipython", 141 | "version": 3 142 | }, 143 | "file_extension": ".py", 144 | "mimetype": "text/x-python", 145 | "name": "python", 146 | "nbconvert_exporter": "python", 147 | "pygments_lexer": "ipython3", 148 | "version": "3.11.3" 149 | } 150 | }, 151 | "nbformat": 4, 152 | "nbformat_minor": 2 153 | } 154 | -------------------------------------------------------------------------------- /postprocessing/notebooks/orca_paper/Plot Power Calibration.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import numpy as np\n", 10 | "import matplotlib.pyplot as plt\n", 11 | "import matplotlib\n", 12 | "import pickle\n", 13 | "import os" 14 | ] 15 | }, 16 | { 17 | "cell_type": "code", 18 | "execution_count": null, 19 | "metadata": {}, 20 | "outputs": [], 21 | "source": [ 22 | "# data dictionaries are [gain][frequency][inpt_power] = (output_power OR list of output powers)\n", 23 | "\n", 24 | "def correct_for_attenuation(d, attenuation, min_input_power = None):\n", 25 | " for g in d:\n", 26 | " for f in d[g]:\n", 27 | " new_data = {}\n", 28 | " for inpt_pwr in d[g][f]:\n", 29 | " if min_input_power and (inpt_pwr < min_input_power):\n", 30 | " continue\n", 31 | " new_data[inpt_pwr - attenuation] = d[g][f][inpt_pwr]\n", 32 | " d[g][f] = new_data\n", 33 | "\n", 34 | "def merge_data(cal_data, d):\n", 35 | " for g in d:\n", 36 | " if not g in cal_data:\n", 37 | " cal_data[g] = {}\n", 38 | " for f in d[g]:\n", 39 | " if not f in cal_data[g]:\n", 40 | " cal_data[g][f] = {}\n", 41 | " for inpt_pwr in d[g][f]:\n", 42 | " if not inpt_pwr in cal_data[g][f]:\n", 43 | " cal_data[g][f][inpt_pwr] = []\n", 44 | " cal_data[g][f][inpt_pwr].append(d[g][f][inpt_pwr])\n" 45 | ] 46 | }, 47 | { 48 | "cell_type": "code", 49 | "execution_count": null, 50 | "metadata": {}, 51 | "outputs": [], 52 | "source": [ 53 | "base_dir = \"/media/thomas/Extreme SSD/orca_paper_data_files/power_calibration/b205/20210513-cal-data-2\"\n", 54 | "input_files_b205 = {\n", 55 | " os.path.join(base_dir, \"power_calibration_data_10_atten_1.pickle\"): 10,\n", 56 | " os.path.join(base_dir, \"power_calibration_data_40_atten_1.pickle\"): 40,\n", 57 | " os.path.join(base_dir, \"power_calibration_data_70_atten_1.pickle\"): 70,\n", 58 | " os.path.join(base_dir, \"power_calibration_data_80_atten_1.pickle\"): 80,\n", 59 | "}\n", 60 | "rigol_min_input_power = -25 # Minimum (uncorrect) input where where the signal generator has sufficient SNR for the measurement\n", 61 | "\n", 62 | "input_files = {'/media/thomas/Extreme SSD/orca_paper_data_files/power_calibration/x310/20231208/power_calibration_data_dbA_rfsiggen.pickle': 0}\n", 63 | "sig_gen_min_input_power = None\n", 64 | "\n", 65 | "def load_cal_data(input_files, min_input_power):\n", 66 | " cal_data = {}\n", 67 | " for idx, path in enumerate(input_files):\n", 68 | " with open(path, 'rb') as f:\n", 69 | " d = pickle.load(f)\n", 70 | " if idx == len(input_files) - 1:\n", 71 | " min_input_power = None # No other data available, so don't enforce a minimum SNR for the highest attenuation measurement\n", 72 | " correct_for_attenuation(d, input_files[path], min_input_power=min_input_power)\n", 73 | " merge_data(cal_data, d)\n", 74 | " return cal_data\n", 75 | "\n", 76 | "cal_data_b205 = load_cal_data(input_files_b205, rigol_min_input_power)[36] # Pick a single gain value to plot\n", 77 | "cal_data_x310 = load_cal_data(input_files, sig_gen_min_input_power)[30]\n", 78 | "\n", 79 | "freqs_to_plot = np.array([50e6, 450e6])" 80 | ] 81 | }, 82 | { 83 | "cell_type": "code", 84 | "execution_count": null, 85 | "metadata": {}, 86 | "outputs": [], 87 | "source": [ 88 | "matplotlib.rcParams.update({\n", 89 | " 'font.size': 16,\n", 90 | " 'legend.fontsize': 10,\n", 91 | " 'lines.linewidth': 2,\n", 92 | " })" 93 | ] 94 | }, 95 | { 96 | "cell_type": "code", 97 | "execution_count": null, 98 | "metadata": {}, 99 | "outputs": [], 100 | "source": [ 101 | "fig, ax = plt.subplots(figsize=(6,4), facecolor='white')\n", 102 | "\n", 103 | "linestyles = ['-', ':']\n", 104 | "\n", 105 | "for idx, freq in enumerate(freqs_to_plot):\n", 106 | " inpt_pwrs = list(cal_data_b205[freq].keys())\n", 107 | " inpt_pwrs.sort()\n", 108 | " ax.plot(inpt_pwrs, [np.mean(cal_data_b205[freq][x]) for x in inpt_pwrs], label = f'[B205] {freq/1000000} MHz', linestyle=linestyles[idx], color='C0')#, marker='o', markersize=4)\n", 109 | "\n", 110 | " inpt_pwrs = list(cal_data_x310[freq].keys())\n", 111 | " inpt_pwrs.sort()\n", 112 | " ax.plot(inpt_pwrs, [np.mean(cal_data_x310[freq][x]) for x in inpt_pwrs], label = f'[X310] {freq/1000000} MHz', linestyle=linestyles[idx], color='C1')#, marker='o', markersize=4)\n", 113 | "ax.set_xlabel('Input Power [dBm]')\n", 114 | "ax.set_ylabel('SDR Recorded Power [dBFS]')\n", 115 | "ax.legend()\n", 116 | "ax.grid()\n", 117 | "ax.set_xlim(-110, -20)\n", 118 | "fig.tight_layout()\n", 119 | "#fig.savefig('power_calibration.png', dpi=300)\n", 120 | "plt.show()" 121 | ] 122 | }, 123 | { 124 | "cell_type": "code", 125 | "execution_count": null, 126 | "metadata": {}, 127 | "outputs": [], 128 | "source": [] 129 | } 130 | ], 131 | "metadata": { 132 | "kernelspec": { 133 | "display_name": "Python 3", 134 | "language": "python", 135 | "name": "python3" 136 | }, 137 | "language_info": { 138 | "codemirror_mode": { 139 | "name": "ipython", 140 | "version": 3 141 | }, 142 | "file_extension": ".py", 143 | "mimetype": "text/x-python", 144 | "name": "python", 145 | "nbconvert_exporter": "python", 146 | "pygments_lexer": "ipython3", 147 | "version": "3.11.3" 148 | } 149 | }, 150 | "nbformat": 4, 151 | "nbformat_minor": 2 152 | } 153 | -------------------------------------------------------------------------------- /postprocessing/noise_test.py: -------------------------------------------------------------------------------- 1 | # Assumes that coherent summation is OFF in coherent.cpp 2 | # (This depends on #ifdef and can't be set via yaml. Edit coherent.cpp 3 | # directly--or, if you're doing this a lot, change #ifdef to an if 4 | # statement so that you can set the value of average_before_save via yaml) 5 | # Assumes that SDR configuration is loopback. 6 | import numpy as np 7 | import numpy.random as rand 8 | import scipy.signal as sp 9 | import matplotlib.pyplot as plt 10 | import processing as pr 11 | from ruamel.yaml import YAML as ym 12 | 13 | # Initialize constants 14 | yaml = ym(typ='safe') # Always use safe load if not dumping 15 | with open("config/default.yaml") as stream: 16 | config = yaml.load(stream) 17 | noise_params = config["NOISE"] 18 | sample_rate = noise_params["sample_rate"] 19 | rx_samps = noise_params["rx_samps"] 20 | orig_chirp = noise_params["orig_chirp"] 21 | noise_std = noise_params["noise_std"] 22 | coh_sums = noise_params["coherent_sums"] 23 | direct_start = noise_params["direct_start"] 24 | show_graphs = noise_params["show_graphs"] 25 | describe = noise_params["describe"] 26 | 27 | print("--- Loaded constants from config.yaml ---") 28 | 29 | # Open received data 30 | print("--- Opening data and determining direct path peak for first signal---") 31 | rx_sig = pr.extractSig(rx_samps) 32 | n_rx_samps = int(np.shape(rx_sig)[0]) 33 | if (show_graphs): pr.plotSignal(rx_sig, 'Loopback Test', sample_rate) 34 | 35 | # Read original chirp 36 | tx_sig = pr.extractSig(orig_chirp) 37 | n_tx_samps = int(np.shape(tx_sig)[0]) 38 | if (show_graphs): pr.plotSignal(tx_sig, 'Original Chirp', sample_rate) 39 | 40 | # Read the first chirp in received data -- this is the "ideal" signal for SNR 41 | samps_per = int(n_rx_samps / coh_sums) 42 | rx_ideal = rx_sig[0:samps_per] 43 | xcorr_ideal = np.abs(sp.correlate(rx_ideal, tx_sig, mode='valid', method='auto')) 44 | dir_peak = pr.findDirectPath(xcorr_ideal, direct_start, describe) 45 | 46 | if (show_graphs): pr.plotChirpVsTime(rx_ideal, "'Ideal' Signal", sample_rate) 47 | 48 | # Split received data into individual signals 49 | print("--- Splitting rx signal into individual chirps ---") 50 | signals = [] 51 | for x in range(coh_sums): 52 | ind_start = x * (samps_per) 53 | this_sig = np.zeros(samps_per, dtype=np.csingle) 54 | for y in range(samps_per): 55 | this_sig[y] = rx_sig[ind_start + y] 56 | signals.append(this_sig) 57 | signals = np.asarray(signals) 58 | 59 | # For each signal: 60 | count = 1 61 | num_misalign = 0 62 | total = np.zeros(samps_per, dtype=np.csingle) 63 | snrs = np.zeros(coh_sums) 64 | for sig in signals: 65 | 66 | # Match filter signal & determine direct path peak to ensure alignment with ideal signal 67 | if (describe): print("\n--- Beginning Processing of Signal %d ---" % count) 68 | xcorr_sig = np.abs(sp.correlate(sig, tx_sig, mode='valid', method='auto')) 69 | 70 | aligned = (dir_peak == pr.findDirectPath(xcorr_sig, direct_start, describe)) 71 | if (describe): print("\tIs Signal %d aligned with the first? %r" % (count, aligned)) 72 | if (not aligned): 73 | if describe: print("--- Skipping signal %d due to poor alignment with first signal ---", count) 74 | num_misalign += 1 75 | continue 76 | 77 | # Add white noise 78 | if (describe): print("--- Adding White Noise ---") 79 | white_noise = np.zeros(samps_per, dtype=np.csingle) 80 | rx_noise = np.zeros(samps_per, dtype=np.csingle) 81 | for x in range(samps_per): 82 | noise_r = rand.normal(0, noise_std) 83 | noise_i = rand.normal(0, noise_std) 84 | white_noise[x] = np.csingle(complex(np.float32(noise_r), np.float32(noise_i))) 85 | rx_noise = np.add(white_noise, sig) # Element-wise 86 | 87 | # Plot an example signal with white noise and match filter 88 | if (count == 1): 89 | if (describe): print("--- Plotting an individual signal with added white noise ---") 90 | rx_time = np.zeros(samps_per) 91 | for x in range(samps_per): 92 | rx_time[x] = x/sample_rate 93 | rx_time *= 1e6 94 | 95 | pr.plotChirpVsTime(rx_noise, "Individual Signal With Noise", sample_rate) 96 | 97 | xcorr_noise = np.abs(sp.correlate(rx_noise, tx_sig, mode='valid', method='auto')) 98 | xcorr_noise_samps = np.shape(xcorr_noise)[0] 99 | xcorr_noise_time = np.zeros(xcorr_noise_samps) 100 | for x in range(xcorr_noise_samps): 101 | xcorr_noise_time[x] = x * 1e6 / sample_rate 102 | 103 | plt.figure() 104 | plt.plot(xcorr_noise_time, 20* np.log10(xcorr_noise)) 105 | plt.title("Match-filter of Individual Signal with Noise") 106 | plt.xlabel("Time (ms)") 107 | plt.ylabel("Power [dB]") 108 | plt.show() 109 | 110 | # Coherently sum and calculate SNR 111 | if (describe): print("--- Coherently sum result ---") 112 | for x in range(samps_per): 113 | total[x] += rx_noise[x] 114 | avg_total = np.divide(total, count) 115 | if (show_graphs): pr.plotSignal(avg_total, 'Received signal after %d coherent summations' % (count-num_misalign), sample_rate) 116 | 117 | print("--- Calculating SNR after %d coherent summations ---" % (count - num_misalign)) 118 | this_snr = pr.getSNR(rx_ideal, avg_total) 119 | print("\tSNR: %f" % this_snr) 120 | snrs[count-1]=this_snr 121 | 122 | count += 1 123 | 124 | print("\n--- Plotting total signal after %d coherent summations ---" % coh_sums) 125 | pr.plotChirpVsTime(avg_total, "Total after %d summations" % coh_sums, sample_rate) 126 | 127 | print("--- Plotting match filter of total signal after %d coherent summations ---" % coh_sums) 128 | xcorr_total = np.abs(sp.correlate(avg_total, tx_sig, mode='valid', method='auto')) 129 | plt.figure() 130 | plt.plot(xcorr_noise_time, 20*np.log10(xcorr_total)) # xcorr_noise and xcorr_total are the same shape 131 | plt.title("Match-filter of %d coherent summations" % coh_sums) 132 | plt.xlabel('Time (ms)') 133 | plt.ylabel('Power [dB]') 134 | plt.show() 135 | 136 | print("--- Plotting SNR versus # Sums ---") 137 | plt.figure() 138 | plt.plot(np.add(1, range(coh_sums)), snrs) 139 | plt.xlabel("Number of sums") 140 | plt.ylabel("Calculated SNR") 141 | plt.title("Signal to Noise Ratio versus Number of Coherent Sums") 142 | plt.show() 143 | 144 | 145 | 146 | -------------------------------------------------------------------------------- /postprocessing/notebooks/Radar 1D Stacking.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "%load_ext autoreload\n", 10 | "%autoreload 2" 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": null, 16 | "metadata": {}, 17 | "outputs": [], 18 | "source": [ 19 | "from dask.distributed import Client, LocalCluster\n", 20 | "\n", 21 | "client = Client() # Note that `memory_limit` is the limit **per worker**.\n", 22 | "# n_workers=4,\n", 23 | "# threads_per_worker=1,\n", 24 | "# memory_limit='3GB'\n", 25 | "client # If you click the dashboard link in the output, you can monitor real-time progress and get other cool visualizations." 26 | ] 27 | }, 28 | { 29 | "cell_type": "code", 30 | "execution_count": null, 31 | "metadata": {}, 32 | "outputs": [], 33 | "source": [ 34 | "import os\n", 35 | "import copy\n", 36 | "import sys\n", 37 | "import xarray as xr\n", 38 | "import numpy as np\n", 39 | "import dask.array as da\n", 40 | "\n", 41 | "import matplotlib.pyplot as plt\n", 42 | "import hvplot.xarray\n", 43 | "import holoviews as hv\n", 44 | "import scipy.constants\n", 45 | "\n", 46 | "sys.path.append(\"..\")\n", 47 | "import processing_dask as pr\n", 48 | "import plot_dask\n", 49 | "import processing as old_processing\n", 50 | "\n", 51 | "sys.path.append(\"../../preprocessing/\")\n", 52 | "from generate_chirp import generate_chirp" 53 | ] 54 | }, 55 | { 56 | "cell_type": "code", 57 | "execution_count": null, 58 | "metadata": {}, 59 | "outputs": [], 60 | "source": [ 61 | "# file path to data and configs\n", 62 | "\n", 63 | "#prefix = \"/home/radioglaciology/thomas/radar_data/20230721-summit-day1-bench/20230721_104552\"\n", 64 | "#prefix = \"/home/radioglaciology/thomas/radar_data/20230723-summit-day3-2start/20230723_103641\"\n", 65 | "#prefix = \"/home/radioglaciology/thomas/radar_data/20230723-summit-day3-2start/20230723_103845\" # 6 db higher tx\n", 66 | "#prefix = \"/home/radioglaciology/thomas/radar_data/20230723-summit-day3-2start/20230723_104059\"\n", 67 | "#prefix = \"/home/radioglaciology/thomas/radar_data/20230723-summit-day3-2start/20230723_104248\" # another 6 db\n", 68 | "#prefix = \"/home/radioglaciology/thomas/radar_data/20230723-summit-day3-2start/20230723_104456\" # 3db higher rx\n", 69 | "#prefix = \"/home/radioglaciology/thomas/radar_data/20230723-summit-day3-2start/20230723_104742\" # 20 -> 30 mhz chirp bw\n", 70 | "#prefix = \"/home/radioglaciology/thomas/radar_data/20230723-summit-day3-2start/20230723_105025\" # bw back to 20, 15 mhz lo offset\n", 71 | "#prefix = \"/home/radioglaciology/thomas/radar_data/20230723-summit-day3-2start/20230723_105509\" # 100k pulses, 30 us transmit, 90 us rx\n", 72 | "#prefix = \"/home/radioglaciology/thomas/radar_data/20230723-summit-day3-2start/20230723_105921\" # 100k pulses, 10 us transmit, 90 us rx\n", 73 | "#prefix = \"/home/radioglaciology/thomas/radar_data/20230723-summit-day3-2start/20230723_110253\" # 100k pulses, 5 us transmit, 60 us rx\n", 74 | "\n", 75 | "#prefix = \"/media/radioglaciology/Extreme SSD/summit_2023_mapper/20230723_151432\" # 20 us transmit\n", 76 | "#prefix = \"/media/radioglaciology/Extreme SSD/summit_2023_mapper/20230723_153254\" # 10 us transmit\n", 77 | "prefix = \"/media/radioglaciology/Extreme SSD/summit_2023_mapper/20230723_152404\" # 5 us transmit\n", 78 | "\n", 79 | "# resave data as zarr for dask processing\n", 80 | "zarr_path = pr.save_radar_data_to_zarr(prefix)\n", 81 | "\n", 82 | "# open zarr file, adjust chunk size to be 10 MB - 1 GB based on sample rate/bit depth\n", 83 | "raw = xr.open_zarr(zarr_path, chunks={\"pulse_idx\": 1000})" 84 | ] 85 | }, 86 | { 87 | "cell_type": "code", 88 | "execution_count": null, 89 | "metadata": {}, 90 | "outputs": [], 91 | "source": [ 92 | "#zero_sample_idx = 36 # X310, fs = 20 MHz\n", 93 | "zero_sample_idx = 63 # X310, fs = 50 MHz\n", 94 | "#zero_sample_idx = 159 # B205mini, fs = 56 MHz\n", 95 | "\n", 96 | "modify_rx_window = True # set to true if you want to window the reference chirp only on receive, false uses ref chirp as transmitted in config file\n", 97 | "rx_window = \"blackman\" # what you want to change the rx window to if modify_rx_window is true\n", 98 | "\n", 99 | "#dielectric_constant = 2.2957\n", 100 | "dielectric_constant = 3.17 # ice (air = 1, 66% velocity coax = 2.2957)\n", 101 | "sig_speed = scipy.constants.c / np.sqrt(dielectric_constant)" 102 | ] 103 | }, 104 | { 105 | "cell_type": "code", 106 | "execution_count": null, 107 | "metadata": {}, 108 | "outputs": [], 109 | "source": [ 110 | "if modify_rx_window:\n", 111 | " config = copy.deepcopy(raw.config)\n", 112 | " config['GENERATE']['window'] = rx_window\n", 113 | "else:\n", 114 | " config = raw.config\n", 115 | "\n", 116 | "chirp_ts, ref_chirp = generate_chirp(config)" 117 | ] 118 | }, 119 | { 120 | "cell_type": "code", 121 | "execution_count": null, 122 | "metadata": {}, 123 | "outputs": [], 124 | "source": [ 125 | "plots = []\n", 126 | "\n", 127 | "for n_stack in [1, 100, 1000, 10000, len(raw.pulse_idx)]:\n", 128 | "\n", 129 | " stacked = pr.stack(raw, n_stack)\n", 130 | " compressed = pr.pulse_compress(stacked, ref_chirp,\n", 131 | " fs=stacked.config['GENERATE']['sample_rate'],\n", 132 | " zero_sample_idx=zero_sample_idx,\n", 133 | " signal_speed=sig_speed)\n", 134 | "\n", 135 | " compressed_power = xr.apply_ufunc(\n", 136 | " lambda x: 20*np.log10(np.abs(x)),\n", 137 | " compressed,\n", 138 | " dask=\"parallelized\"\n", 139 | " )\n", 140 | "\n", 141 | " plots.append(compressed_power.radar_data[0,:].hvplot.line(label=f\"{n_stack}\"))\n", 142 | "\n", 143 | "plot1D = hv.Overlay(plots)\n", 144 | "plot1D = plot1D.opts(xlabel='Reflection Distance (m)', ylabel='Return Power (dB)', height=600, title=f\"1D Radargram Stacking Comparison [{os.path.basename(raw.prefix)}]\")\n", 145 | "plot1D = plot1D.opts(xlim=(0, 1000), ylim=(-80, 0), show_grid=True)\n", 146 | "plot1D" 147 | ] 148 | }, 149 | { 150 | "cell_type": "code", 151 | "execution_count": null, 152 | "metadata": {}, 153 | "outputs": [], 154 | "source": [ 155 | "hvplot.save(plot1D, f\"outputs/{os.path.basename(raw.prefix)}-1d-stack-compare-near.png\")" 156 | ] 157 | }, 158 | { 159 | "cell_type": "code", 160 | "execution_count": null, 161 | "metadata": {}, 162 | "outputs": [], 163 | "source": [] 164 | } 165 | ], 166 | "metadata": { 167 | "kernelspec": { 168 | "display_name": "uhd_radar_2", 169 | "language": "python", 170 | "name": "python3" 171 | }, 172 | "language_info": { 173 | "codemirror_mode": { 174 | "name": "ipython", 175 | "version": 3 176 | }, 177 | "file_extension": ".py", 178 | "mimetype": "text/x-python", 179 | "name": "python", 180 | "nbconvert_exporter": "python", 181 | "pygments_lexer": "ipython3", 182 | "version": "3.8.17" 183 | }, 184 | "orig_nbformat": 4 185 | }, 186 | "nbformat": 4, 187 | "nbformat_minor": 2 188 | } 189 | -------------------------------------------------------------------------------- /preprocessing/generate_chirp.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import argparse 3 | import numpy as np 4 | import scipy.signal 5 | import scipy.fft 6 | import matplotlib.pyplot as plt 7 | from ruamel.yaml import YAML 8 | 9 | def generate_chirp(config): 10 | """ 11 | Generate a chirp according to parameters in the config dictionary, typically 12 | loaded from a config YAML file. 13 | 14 | Returns a tuple (ts, chirp_complex), where ts is a numpy array of time 15 | samples, and chirp_complex is a numpy array of complex floating point values 16 | representing the chirp. 17 | 18 | If you're looking for a floating point valued chirp to use in convolution, 19 | this is probably the right function. 20 | 21 | This function does not convert the complex numpy array to the cpu format 22 | expected by the radar code. If you want to produce samples to feed the radar 23 | code, look at `generate_from_yaml_filename` (later in this file) instead. 24 | """ 25 | # Load parameters 26 | gen_params = config["GENERATE"] 27 | chirp_type = gen_params["chirp_type"] 28 | sample_rate = gen_params["sample_rate"] 29 | chirp_bandwidth = gen_params["chirp_bandwidth"] 30 | offset = gen_params.get("lo_offset_sw", 0) 31 | window = gen_params["window"] 32 | chirp_length = gen_params["chirp_length"] 33 | pulse_length = gen_params.get("pulse_length", chirp_length) # default to chirp_length is no pulse_length is specified 34 | 35 | # Build chirp 36 | 37 | end_freq = chirp_bandwidth / 2 # Chirp goes from -BW/2 to BW/2 38 | start_freq = -1 * end_freq 39 | 40 | start_freq += offset 41 | end_freq += offset 42 | 43 | ts = np.arange(0, chirp_length-(1/(2*sample_rate)), 1/(sample_rate)) 44 | ts_zp = np.arange(0, (pulse_length)-(1/(2*sample_rate)), 1/(sample_rate)) 45 | 46 | if chirp_type == 'linear': 47 | ph = 2*np.pi*((start_freq)*ts + (end_freq - start_freq) * ts**2 / (2*chirp_length)) 48 | elif chirp_type == 'hyperbolic': 49 | ph = 2*np.pi*(-1*start_freq*end_freq*chirp_length/(end_freq-start_freq))*np.log(1- (end_freq-start_freq)*ts/(end_freq*chirp_length)) 50 | else: 51 | ph = 2*np.pi*(start_freq*ts + (end_freq - start_freq) * ts**2 / (2*chirp_length)) 52 | printf("[ERROR] Unrecognized chirp type '{chirp_type}'") 53 | return None, None 54 | 55 | chirp_complex = np.exp(1j*ph) 56 | 57 | if window == "blackman": 58 | chirp_complex = chirp_complex * np.blackman(chirp_complex.size) 59 | elif window == "hamming": 60 | chirp_complex = chirp_complex * np.hamming(chirp_complex.size) 61 | elif window == "kaiser14": 62 | chirp_complex = chirp_complex * np.kaiser(chirp_complex.size, 14.0) 63 | elif window == "kaiser10": 64 | chirp_complex = chirp_complex * np.kaiser(chirp_complex.size, 10.0) 65 | elif window == "kaiser18": 66 | chirp_complex = chirp_complex * np.kaiser(chirp_complex.size, 18.0) 67 | elif window != "rectangular": 68 | print("[ERROR] Unrecognized window function '{window}'") 69 | return None, None 70 | 71 | chirp_complex = np.pad(chirp_complex, (int(np.floor(ts_zp.size - ts.size)/2),), 'constant') 72 | 73 | chirp_complex = chirp_complex 74 | 75 | return ts_zp, chirp_complex 76 | 77 | 78 | def generate_from_yaml_filename(yaml_filename): 79 | """ 80 | Generate a chirp and save it to a binary file, according to parameters loaded 81 | from the supplied YAML filename. 82 | 83 | Typically, this function is called to produce a chirp file. The save location 84 | of this file is specified in the YAML file, under the GENERATE section. 85 | 86 | This function also returns the numpy array written to the file. Note that this 87 | is different from the chirp_complex array returned by generate_chirp(), as this 88 | array is in the format that is written to the file (interleaved I/Q samples 89 | with whatever datatype was specified in config['DEVICE']['cpu_format']). 90 | """ 91 | 92 | # Load YAML file 93 | yaml = YAML(typ='safe') 94 | stream = open(yaml_filename) 95 | config = yaml.load(stream) 96 | 97 | # Load some additional paramters needed here 98 | filename = config['GENERATE']["out_file"] 99 | show_plot = config['GENERATE']['show_plot'] 100 | sample_rate = config['GENERATE']['sample_rate'] 101 | 102 | cpu_format = config['DEVICE'].get('cpu_format', 'fc32') 103 | 104 | # Create the chirp 105 | ts, chirp_complex = generate_chirp(config) 106 | 107 | if ts is None: 108 | print("Error occured when generating chirp.") 109 | sys.exit(1) 110 | 111 | 112 | if show_plot: 113 | 114 | fig, axs = plt.subplots(2,1) 115 | 116 | # Time domain plot 117 | axs[0].plot(ts*1e6, np.real(chirp_complex), label='I') 118 | axs[0].plot(ts*1e6, np.imag(chirp_complex), label='Q') 119 | axs[0].set_xlabel('Time [us]') 120 | axs[0].set_ylabel('Samples') 121 | axs[0].set_title('Time Domain') 122 | axs[0].legend() 123 | 124 | # Frequency domain plot 125 | freqs = scipy.fft.fftshift(scipy.fft.fftfreq(chirp_complex.size, d=1/sample_rate)) 126 | ms = 20*np.log10(scipy.fft.fftshift(np.abs(scipy.fft.fft(chirp_complex)))) 127 | axs[1].plot(freqs/1e6, ms) 128 | axs[1].set_xlabel('Frequency [MHz]') 129 | axs[1].set_ylabel('Amplitude [dB]') 130 | axs[1].set_title('Frequency Domain') 131 | axs[1].grid() 132 | 133 | fig.tight_layout() 134 | 135 | plt.show() 136 | 137 | # Convert to file 138 | print("--- Converting Chirp to File ---") 139 | 140 | if cpu_format == 'fc32': 141 | output_dtype = np.float32 142 | scale_factor = 1.0 143 | elif cpu_format == 'sc16': 144 | output_dtype = np.int16 145 | scale_factor = np.iinfo(output_dtype).max - 1 146 | elif cpu_format == 'sc8': 147 | output_dtype = np.int8 148 | scale_factor = np.iinfo(output_dtype).max - 1 149 | else: 150 | raise Exception(f"Unrecognized cpu_format '{cpu_format}'. Must be one of 'fc32', 'sc16', or 'sc8'.") 151 | 152 | chirp_floats = np.empty(shape=(2* np.shape(chirp_complex)[0],), dtype=output_dtype) 153 | for x in range(np.shape(chirp_complex)[0]): 154 | chirp_floats[2*x] = scale_factor * chirp_complex[x].real 155 | chirp_floats[2*x+1] = scale_factor * chirp_complex[x].imag 156 | 157 | chirp_floats.tofile(filename, sep='') 158 | 159 | # Read file just to check success 160 | recov_floats = np.fromfile(filename, dtype=output_dtype, count=-1, sep='', offset=0) 161 | if np.array_equiv(recov_floats, chirp_floats): 162 | print("\tChirp successfully stored in %s" % filename) 163 | return chirp_floats 164 | else: 165 | print("\t[ERROR] Chirp was not successfully stored in %s" % filename) 166 | raise Exception("Chirp was not successfully stored") 167 | 168 | 169 | if __name__ == '__main__': 170 | # Check if a YAML file was provided as a command line argument 171 | parser = argparse.ArgumentParser() 172 | parser.add_argument("yaml_file", nargs='?', default='config/default.yaml', 173 | help='Path to YAML configuration file') 174 | args = parser.parse_args() 175 | 176 | try: 177 | generate_from_yaml_filename(args.yaml_file) 178 | except Exception as e: 179 | print(e) 180 | sys.exit(1) 181 | -------------------------------------------------------------------------------- /manager/uav_payload_manager.py: -------------------------------------------------------------------------------- 1 | import gpiozero 2 | import time 3 | import argparse 4 | import os 5 | import sys 6 | import subprocess 7 | import signal 8 | import threading 9 | 10 | sys.path.append("preprocessing") 11 | from generate_chirp import generate_from_yaml_filename 12 | sys.path.append("postprocessing") 13 | from save_data import save_data 14 | 15 | # Nominal flow: 16 | # setup -> ready -[button press]-> starting -> recording -[button press]-> saving -> ready 17 | # >> if issue: error 18 | # 19 | # States: 20 | # setup - Initial one-time setup (includes generating the chirp) 21 | # ready - Waiting for user to press button to begin -- press button to start 22 | # starting - USRP is being setup 23 | # recording - USRP is actively recording -- press button to end 24 | # saving - done recording and saving data 25 | # error - an issue occurred requiring manual intervention 26 | 27 | current_state = "setup" # Current actual state 28 | displayed_state = None # Current state displayed on LED 29 | expected_cwd = "/home/ubuntu/uhd_radar" 30 | yaml_filename = None 31 | uhd_process = None 32 | uhd_output_reader_thread = None 33 | 34 | # Setup button and button LED 35 | button = gpiozero.Button(4, pull_up=False, hold_time=5) 36 | led = gpiozero.PWMLED(18) 37 | 38 | def button_press(): 39 | global current_state 40 | 41 | print("Button pressed") 42 | 43 | if current_state == "ready": 44 | start_recording() 45 | elif current_state == "recording": 46 | stop_recording() 47 | 48 | def button_hold(): 49 | global current_state 50 | 51 | print("Button hold") 52 | 53 | if current_state == "recording": 54 | stop_recording() 55 | 56 | print("Button held down -- asking system to shutdown") 57 | os.system("sudo shutdown -h now") 58 | exit(0) 59 | 60 | def update_led_state(): 61 | global current_state, displayed_state 62 | if current_state != displayed_state: 63 | try: 64 | if current_state == "setup": 65 | led.blink(on_time=0.1, off_time=0.1) 66 | displayed_state = "setup" 67 | elif current_state == "ready": 68 | led.blink(on_time=0.2, off_time=1) 69 | displayed_state = "ready" 70 | elif current_state == "starting": 71 | led.blink(on_time=0.1, off_time=0.1) 72 | displayed_state = "starting" 73 | elif current_state == "recording": 74 | led.pulse() 75 | displayed_state = "recording" 76 | elif current_state == "saving": 77 | led.blink(on_time=0.1, off_time=0.1) 78 | displayed_state = "saving" 79 | elif current_state == "error": 80 | led.on() 81 | displayed_state = "error" 82 | except Exception as e: 83 | print("Exception while updating LED state") 84 | print(e) 85 | 86 | # Output logging 87 | def log_output_from_usrp(out, file_out): 88 | global current_state 89 | for line in iter(out.readline, ''): 90 | if (current_state != "saving") and (line.startswith("Received chirp") or line.startswith("[START]")): 91 | current_state = "recording" 92 | file_out.write(f"[{time.time():0.3f}] \t{line}") 93 | print(f"UHD output: \t{line}", end="") 94 | out.close() 95 | file_out.close() 96 | 97 | def start_recording(): 98 | global current_state, uhd_process, uhd_output_reader_thread 99 | 100 | print("Starting UHD process") 101 | current_state = "starting" 102 | update_led_state() 103 | 104 | # Chirp generation 105 | print("Re-generating chirp") 106 | try: 107 | generate_from_yaml_filename(yaml_filename) 108 | except Exception as e: 109 | print(e) 110 | error_and_quit() 111 | 112 | uhd_process = subprocess.Popen(["./radar", yaml_filename], stdout=subprocess.PIPE, bufsize=1, close_fds=True, text=True, cwd="sdr/build") 113 | uhd_output_reader_thread = threading.Thread(target=log_output_from_usrp, args=(uhd_process.stdout, open('uhd_stdout.log', 'w'))) 114 | uhd_output_reader_thread.daemon = True # thread dies with the program 115 | uhd_output_reader_thread.start() 116 | 117 | def stop_recording(): 118 | global current_state, yaml_filename 119 | 120 | was_force_killed = False 121 | 122 | print("Attemping to stop UHD process") 123 | current_state = "saving" 124 | uhd_process.send_signal(signal.SIGINT) 125 | update_led_state() 126 | timeout = 10 127 | print(f"Waiting up to {timeout} seconds for the process to quit") 128 | try: 129 | uhd_process.wait(timeout=timeout) 130 | except subprocess.TimeoutExpired as e: 131 | print(f"UHD process did not terminate within time limit. Killing...") 132 | uhd_process.kill() 133 | was_force_killed = True 134 | 135 | # Save output 136 | print("Copying data files...") 137 | save_data(yaml_filename, extra_files={"uhd_stdout.log": "uhd_stdout.log"}) 138 | print("Finished copying data.") 139 | 140 | if was_force_killed: 141 | print("Copying completed, but the process had to be killed.") 142 | error_and_quit() 143 | else: 144 | current_state = "ready" 145 | 146 | def error_and_quit(): 147 | current_state = "error" 148 | update_led_state() 149 | time.sleep(5) 150 | exit(1) 151 | 152 | # Handle SIGINT 153 | def signal_handler(sig, frame): 154 | print('uav_payload_manager received SIGINT') 155 | if current_state == "recording": 156 | print("Stopping recording in response to SIGINT...") 157 | stop_recording() 158 | sys.exit(0) 159 | 160 | signal.signal(signal.SIGINT, signal_handler) 161 | 162 | # Get ready to run 163 | 164 | update_led_state() 165 | 166 | # Check for correct working directory 167 | if os.getcwd() != expected_cwd: 168 | print(f"This script should ONLY be run from {expected_cwd}. Detected CWD {os.getcwd()}") 169 | error_and_quit() 170 | 171 | # Check if a YAML file was provided as a command line argument 172 | parser = argparse.ArgumentParser() 173 | parser.add_argument("yaml_file", nargs='?', default='config/default.yaml', 174 | help='Path to YAML configuration file') 175 | args = parser.parse_args() 176 | yaml_filename = args.yaml_file 177 | 178 | # Chirp generation 179 | try: 180 | generate_from_yaml_filename(yaml_filename) 181 | except Exception as e: 182 | print(e) 183 | error_and_quit() 184 | 185 | # Compile UHD program 186 | def run_and_fail_on_nonzero(cmd): 187 | retval = os.system(cmd) 188 | if retval != 0: 189 | print(f"Running '{cmd}' produced non-zero return value {retval}. Quitting...") 190 | error_and_quit() 191 | 192 | os.chdir("sdr/build") 193 | run_and_fail_on_nonzero("cmake ..") 194 | run_and_fail_on_nonzero("make") 195 | os.chdir("../..") 196 | 197 | # If successful, move on to ready state 198 | time.sleep(1) # TODO: Could remove - helps make it more obvious what's happening 199 | current_state = "ready" 200 | button.when_released = button_press # Toggle recording state on quick button press+release 201 | button.when_held = button_hold # When held, ask system to shutdown 202 | update_led_state() 203 | 204 | # The rest is handled asynchronously 205 | while True: 206 | # Check and updated LED state 207 | update_led_state() 208 | 209 | # Check if UHD process ended on it's own 210 | if (current_state == "recording") and uhd_process: 211 | retval = uhd_process.poll() 212 | if retval == 0: 213 | stop_recording() 214 | elif retval: 215 | print(f"UHD command returned non-zero output {retval}. Quitting...") 216 | stop_recording() 217 | error_and_quit() 218 | 219 | time.sleep(0.5) 220 | -------------------------------------------------------------------------------- /postprocessing/notebooks/Radar 1D File Compare.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "%load_ext autoreload\n", 10 | "%autoreload 2" 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": null, 16 | "metadata": {}, 17 | "outputs": [], 18 | "source": [ 19 | "from dask.distributed import Client, LocalCluster\n", 20 | "\n", 21 | "client = Client() # Note that `memory_limit` is the limit **per worker**.\n", 22 | "# n_workers=4,\n", 23 | "# threads_per_worker=1,\n", 24 | "# memory_limit='3GB'\n", 25 | "client # If you click the dashboard link in the output, you can monitor real-time progress and get other cool visualizations." 26 | ] 27 | }, 28 | { 29 | "cell_type": "code", 30 | "execution_count": null, 31 | "metadata": {}, 32 | "outputs": [], 33 | "source": [ 34 | "import os\n", 35 | "import copy\n", 36 | "import sys\n", 37 | "import xarray as xr\n", 38 | "import numpy as np\n", 39 | "import dask.array as da\n", 40 | "\n", 41 | "import matplotlib.pyplot as plt\n", 42 | "import hvplot.xarray\n", 43 | "import holoviews as hv\n", 44 | "import scipy.constants\n", 45 | "\n", 46 | "sys.path.append(\"..\")\n", 47 | "import processing_dask as pr\n", 48 | "import plot_dask\n", 49 | "import processing as old_processing\n", 50 | "\n", 51 | "sys.path.append(\"../../preprocessing/\")\n", 52 | "from generate_chirp import generate_chirp" 53 | ] 54 | }, 55 | { 56 | "cell_type": "code", 57 | "execution_count": null, 58 | "metadata": {}, 59 | "outputs": [], 60 | "source": [ 61 | "# file path to data and configs\n", 62 | "\n", 63 | "prefixes = []\n", 64 | "\n", 65 | "#prefix = \"/home/radioglaciology/thomas/radar_data/20230721-summit-day1-bench/20230721_104552\"\n", 66 | "#prefix = \"/home/radioglaciology/thomas/radar_data/20230723-summit-day3-2start/20230723_103641\"\n", 67 | "#prefix = \"/home/radioglaciology/thomas/radar_data/20230723-summit-day3-2start/20230723_103845\" # 6 db higher tx\n", 68 | "#prefix = \"/home/radioglaciology/thomas/radar_data/20230723-summit-day3-2start/20230723_104059\"\n", 69 | "#prefix = \"/home/radioglaciology/thomas/radar_data/20230723-summit-day3-2start/20230723_104248\" # another 6 db\n", 70 | "#prefix = \"/home/radioglaciology/thomas/radar_data/20230723-summit-day3-2start/20230723_104456\" # 3db higher rx\n", 71 | "#prefix = \"/home/radioglaciology/thomas/radar_data/20230723-summit-day3-2start/20230723_104742\" # 20 -> 30 mhz chirp bw\n", 72 | "#prefix = \"/home/radioglaciology/thomas/radar_data/20230723-summit-day3-2start/20230723_105025\" # bw back to 20, 15 mhz lo offset\n", 73 | "# prefixes.append(\"/home/radioglaciology/thomas/radar_data/20230723-summit-day3-2start/20230723_105509\") # 100k pulses, 30 us transmit, 90 us rx\n", 74 | "# prefixes.append(\"/home/radioglaciology/thomas/radar_data/20230723-summit-day3-2start/20230723_105921\") # 100k pulses, 10 us transmit, 90 us rx\n", 75 | "# prefixes.append(\"/home/radioglaciology/thomas/radar_data/20230723-summit-day3-2start/20230723_110253\") # 100k pulses, 5 us transmit, 60 us rx\n", 76 | "\n", 77 | "prefixes.append(\"/media/radioglaciology/Extreme SSD/summit_2023_mapper/20230723_145418\")\n", 78 | "prefixes.append(\"/media/radioglaciology/Extreme SSD/summit_2023_mapper/20230723_150425\")\n", 79 | "prefixes.append(\"/media/radioglaciology/Extreme SSD/summit_2023_mapper/20230723_150853\")\n", 80 | "\n", 81 | "# prefixes.append(\"/media/radioglaciology/Extreme SSD/summit_2023_mapper/20230723_151432\") # 20 us transmit\n", 82 | "# prefixes.append(\"/media/radioglaciology/Extreme SSD/summit_2023_mapper/20230723_153254\") # 10 us transmit\n", 83 | "# prefixes.append(\"/media/radioglaciology/Extreme SSD/summit_2023_mapper/20230723_152404\") # 5 us transmit\n", 84 | "\n", 85 | "raw_datasets = []\n", 86 | "for prefix in prefixes:\n", 87 | " # resave data as zarr for dask processing\n", 88 | " zarr_path = pr.save_radar_data_to_zarr(prefix)\n", 89 | "\n", 90 | " # open zarr file, adjust chunk size to be 10 MB - 1 GB based on sample rate/bit depth\n", 91 | " raw = xr.open_zarr(zarr_path, chunks={\"pulse_idx\": 1000})\n", 92 | " raw_datasets.append(raw)" 93 | ] 94 | }, 95 | { 96 | "cell_type": "code", 97 | "execution_count": null, 98 | "metadata": {}, 99 | "outputs": [], 100 | "source": [ 101 | "zero_sample_idx = 36 # X310, fs = 20 MHz\n", 102 | "#zero_sample_idx = 63 # X310, fs = 50 MHz\n", 103 | "#zero_sample_idx = 159 # B205mini, fs = 56 MHz\n", 104 | "\n", 105 | "modify_rx_window = True # set to true if you want to window the reference chirp only on receive, false uses ref chirp as transmitted in config file\n", 106 | "rx_window = \"blackman\" # what you want to change the rx window to if modify_rx_window is true\n", 107 | "\n", 108 | "#dielectric_constant = 2.2957\n", 109 | "dielectric_constant = 3.17 # ice (air = 1, 66% velocity coax = 2.2957)\n", 110 | "sig_speed = scipy.constants.c / np.sqrt(dielectric_constant)\n", 111 | "\n", 112 | "n_stack = 60000 #900000" 113 | ] 114 | }, 115 | { 116 | "cell_type": "code", 117 | "execution_count": null, 118 | "metadata": {}, 119 | "outputs": [], 120 | "source": [ 121 | "plots = []\n", 122 | "\n", 123 | "for raw in raw_datasets:\n", 124 | " # Generate reference chirp\n", 125 | " if modify_rx_window:\n", 126 | " config = copy.deepcopy(raw.config)\n", 127 | " config['GENERATE']['window'] = rx_window\n", 128 | " else:\n", 129 | " config = raw.config\n", 130 | "\n", 131 | " chirp_ts, ref_chirp = generate_chirp(config)\n", 132 | "\n", 133 | " # Stack and pulse compress\n", 134 | " stacked = pr.stack(raw, n_stack)\n", 135 | " compressed = pr.pulse_compress(stacked, ref_chirp,\n", 136 | " fs=stacked.config['GENERATE']['sample_rate'],\n", 137 | " zero_sample_idx=zero_sample_idx,\n", 138 | " signal_speed=sig_speed)\n", 139 | "\n", 140 | " compressed_power = xr.apply_ufunc(\n", 141 | " lambda x: 20*np.log10(np.abs(x)),\n", 142 | " compressed,\n", 143 | " dask=\"parallelized\"\n", 144 | " )\n", 145 | "\n", 146 | " plots.append(compressed_power.radar_data[0,:].hvplot.line(label=f\"{os.path.basename(raw.prefix)}\"))" 147 | ] 148 | }, 149 | { 150 | "cell_type": "code", 151 | "execution_count": null, 152 | "metadata": {}, 153 | "outputs": [], 154 | "source": [ 155 | "plot1D = hv.Overlay(plots)\n", 156 | "plot1D = plot1D.opts(xlabel='Reflection Distance (m)', ylabel='Return Power (dB)', height=600, title=f\"1D Radargram Multi-File Comparison\")\n", 157 | "plot1D = plot1D.opts(xlim=(0, 500), ylim=(-80, 0), show_grid=True)\n", 158 | "plot1D" 159 | ] 160 | }, 161 | { 162 | "cell_type": "code", 163 | "execution_count": null, 164 | "metadata": {}, 165 | "outputs": [], 166 | "source": [ 167 | "#hvplot.save(plot1D, f\"outputs/tmp.png\")" 168 | ] 169 | }, 170 | { 171 | "cell_type": "code", 172 | "execution_count": null, 173 | "metadata": {}, 174 | "outputs": [], 175 | "source": [] 176 | } 177 | ], 178 | "metadata": { 179 | "kernelspec": { 180 | "display_name": "uhd_radar_2", 181 | "language": "python", 182 | "name": "python3" 183 | }, 184 | "language_info": { 185 | "codemirror_mode": { 186 | "name": "ipython", 187 | "version": 3 188 | }, 189 | "file_extension": ".py", 190 | "mimetype": "text/x-python", 191 | "name": "python", 192 | "nbconvert_exporter": "python", 193 | "pygments_lexer": "ipython3", 194 | "version": "3.8.17" 195 | }, 196 | "orig_nbformat": 4 197 | }, 198 | "nbformat": 4, 199 | "nbformat_minor": 2 200 | } 201 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Open Radar Code Architecture (ORCA) 2 | 3 | [![Code - uhd_radar](https://img.shields.io/badge/Code-uhd__radar-2ea44f?logo=github)](https://github.com/radioglaciology/uhd_radar) [![Docs - orca-documentation](https://img.shields.io/badge/Docs-orca--documentation-blue)](https://radioglaciology.github.io/orca-documentation/) [![Please Cite Us!](https://img.shields.io/badge/Please_Cite_Us!-blue)](https://radioglaciology.github.io/orca-documentation/about/) 4 | 5 | This repository contains a unified set of code for operating coherent, chirped radar systems USRP-based software-defined radios. ORCA was developed by Stanford Radio Glaciology to increase the accesibility of radar sounder instruments in the glaciological community. Most of Stanford Radio Glaciology's USRP-based radar instruments, including MAPPERR and PEREGRINE, run on this code. 6 | 7 | For detailed documentation, see the "Docs" badge above. 8 | 9 | ## Repository Organization 10 | 11 | * `config/` contains YAML files encapsulating all of the settings needed to run various experiments on different hardware setups. Ideally, the same code should be able to be run for every SDR and every type of measurement with a different YAML file from this folder determining all of the necessary settings and parameters. 12 | * `data/` is where you can locally store whatever results/outputs your experiment creates. Everything (except the readme) in this folder will be ignored by git. Please don't check your specific results into version control (but do back them up somewhere). 13 | * `preprocessing/` contains any scripts that need to run BEFORE the SDR code but that don't directly interface with the SDR. (For example: generation of a chirp waveform.) 14 | * `sdr/` contains any code that directly controls the SDR 15 | * `postprocessing/` contains any code that processing or plots data recorded by the SDR without directly interfacing with the SDR. 16 | * `run.py` is a utility that manages the whole process of generating your output chirp, compiling the C++ code, running the radar, and collecting your results. This is the recommended way to run the radar system. 17 | 18 | ## Configuring your environment 19 | 20 | The easiest way to make sure you have the right dependencies to run everything here is to use conda. If you're new to conda, there are some notes on setting things up [here](tips/conda.md). 21 | 22 | Dependencies for this project are managed through the `environment.yaml` file. You can create a conda environment with everything you need to run this code like this: 23 | 24 | `conda env create -n myenvironmentname -f environment.yaml` 25 | 26 | (Specifying `-n myenvironmentname` is optional. The default, as specified in `environment.yaml` is `uhd`.) 27 | 28 | If you are setting an environment up on a Raspberry Pi, we recommend using `environment-rpi.yaml` instead. This version includes additional dependencies used by `manager/uav_payload_manager.py`, a helper script designed to run only on Raspberry Pi-based radar instruments. 29 | 30 | Then activate it like this: 31 | 32 | `conda activate myenvironmentname` 33 | 34 | And you're good to go. This installs UHD and all the other necessary dependencies. 35 | 36 | For directly interacting with the SDRs, you will need to download the FPGA images. After activating the environment, you can do this by running: `uhd_images_downloader` 37 | 38 | ### Running the code 39 | 40 | Everything about the experiment you want to run is defined by a configuration YAML file. You can take a look at the examples in the `config/` directory, 41 | but you'll likely need to create your own file for whatever you want to do specifically. The `config/default.yaml` file contains comments explaining roughly what each parameter does. 42 | 43 | The recommended way of running everything is by using the `run.py` utility. In general, you run it like this: 44 | 45 | `python run.py config/your_config_file.yaml` 46 | 47 | This utility handles creating the chirp that will be transmitted, compiling and running the C++ code that interacts with the SDR, and collecting the output data. 48 | All of the configuration for this is contained within your configuration YAML file. 49 | 50 | ## Information for developers and troubleshooting tips 51 | 52 | ### Adding a dependency 53 | 54 | If you need to add a new package, you can update `environment.yaml`. It's probably easiest to just do this manually, however, if you really want, you can also update it by exporting your existing environment: 55 | 56 | `conda env export --from-history > environment.yaml` 57 | 58 | Please check that the changes you made are what you expected before committing them. Also, please do not do this without the `--from-history` command. The full environment export is a nightmare to debug across multiple platforms. When in doubt, manually update it. And always test it out by using the new `environment.yaml` to create a new environment. 59 | 60 | ### Using Visual Studio Code 61 | 62 | It takes a few extra steps to tell Visual Studio Code that you're using the conda environment. For setup instructions, [see here](tips/vscode.md). 63 | 64 | ## Adding features, git conventions 65 | 66 | The basic workflow for adding features in should be something like this: 67 | 68 | 1. Create your own branch of this repository: `git checkout -b thomas/cool-new-feature` 69 | 70 | The `name/` thing is just a convention, but it's a nice way of who is working on what. Also, many git GUIs (such as Sublime Merge) will sort different branches into folders so you can easily see all of your (or someone else's) branches. 71 | 72 | 2. Prototype whatever changes you want to make. Do whatever it takes to make it work. 73 | 74 | 3. (Optional) You might want to commit these changes (`git add `, `git commit -m "what I did"`) and then push the changes to GitHub (`git push --set-upstream origin thomas/cool-new-feature`). This will push your changes to GitHub, still in a separate branch, allowing others to see your branch along with the `main` branch and all the others. 75 | 76 | 3. Figure out how to cleanly integrate whatever your doing with the rest of the code. This will probably mean making it possible to configure your feature or disable it completley with only changes to a YAML config file. Then make sure that the defaults are to not use it (if the feature won't be needed by others), so your changes don't break someone else's instrument. 77 | 78 | 4. Commit again. Push your changes. (See #3.) 79 | 80 | 5. Go to GitHub and you'll be prompted to create a [pull request](https://docs.github.com/en/github/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/about-pull-requests). Pull requests are a GitHub feature that lets you and others preview what happens if you merge your branch into the `main` branch. You can send others a link so we can all review the changes you're propsoing, including making comments on them and discussing if there are any issues. 81 | 82 | 83 | ## Miscellaneous Notes 84 | ### Symbol Errors when Compiling 85 | If you get errors like this: 86 | ``` 87 | dyld: lazy symbol binding failed: Symbol not found: __ZN5boost24scoped_static_mutex_lockC1ERNS_12static_mutexEb 88 | Referenced from: /opt/local/lib/libuhd.3.15.0.dylib 89 | Expected in: /opt/local/lib/libboost_regex-mt.dylib 90 | 91 | dyld: Symbol not found: __ZN5boost24scoped_static_mutex_lockC1ERNS_12static_mutexEb 92 | Referenced from: /opt/local/lib/libuhd.3.15.0.dylib 93 | Expected in: /opt/local/lib/libboost_regex-mt.dylib 94 | 95 | ./run_default.sh: line 8: 60892 Abort trap: 6 96 | ``` 97 | you should invalidate (clear/clean/delete) your `CMakeCache.txt` file (located in your `build/` directory) and double check that in `CMakeLists.txt` you have the include and library paths set to your conda installation: 98 | 99 | ``` 100 | set(CMAKE_LIBRARY_PATH "/Users/abroome/opt/miniconda3/envs/srg_uhd_radar/lib") 101 | set(CMAKE_INCLUDE_PATH "/Users/abroome/opt/miniconda3/envs/srg_uhd_radar/include") 102 | ``` 103 | 104 | ### Right Shift Operator Warnings 105 | If you get warnings like this: 106 | ``` 107 | space required between adjacent '>' delimeters of nested template argument lists ('>>' is the right shift operator) 108 | ``` 109 | check your `.vscode/c_cpp_properties.json` file and make sure that the C++ standard is set to at least 11: `"cppStandard": "c++11"`. 110 | 111 | ### Running on X310 112 | The `x310_startup.sh` script should be run once when first connecting to the X310. 113 | 114 | 115 | ## Potentially Helpful Ettus/NI Application Notes 116 | * [Timed commands](https://kb.ettus.com/Synchronizing_USRP_Events_Using_Timed_Commands_in_UHD) 117 | -------------------------------------------------------------------------------- /tests/error_code_late_command_sweep.py: -------------------------------------------------------------------------------- 1 | import time 2 | import argparse 3 | import os 4 | import sys 5 | import subprocess 6 | import signal 7 | import threading 8 | from ruamel.yaml import YAML 9 | import matplotlib.pyplot as plt 10 | import numpy as np 11 | import re 12 | import pickle 13 | 14 | sys.path.append("preprocessing") 15 | from generate_chirp import generate_from_yaml_filename 16 | sys.path.append("postprocessing") 17 | from save_data import save_data 18 | 19 | def run_and_fail_on_nonzero(cmd): 20 | retval = os.system(cmd) 21 | if retval != 0: 22 | print(f"Running '{cmd}' produced non-zero return value {retval}. Quitting...") 23 | exit(retval) 24 | 25 | # Output logging 26 | def log_output_from_usrp(out, file_out): 27 | global current_state 28 | for line in iter(out.readline, ''): 29 | file_out.write(f"[{time.time():0.3f}] \t{line}") 30 | print(f"UHD output: \t{line}", end="") 31 | out.close() 32 | file_out.close() 33 | 34 | def test_with_pulse_rep_int(yaml_filename, pulse_rep_int, timeout_s=60*2, tmp_yaml_filename='tmp_config.yaml.tmp'): 35 | # Load YAML file 36 | yaml = YAML(typ='safe') 37 | stream = open(yaml_filename) 38 | config = yaml.load(stream) 39 | 40 | # Modify 41 | config['CHIRP']['pulse_rep_int'] = pulse_rep_int 42 | print(f"Starting run with PRI of {pulse_rep_int} seconds.") 43 | 44 | with open(tmp_yaml_filename, 'w') as f: 45 | yaml.dump(config, f) 46 | 47 | uhd_process = subprocess.Popen(["./radar", tmp_yaml_filename], stdout=subprocess.PIPE, bufsize=1, close_fds=True, text=True, cwd="sdr/build") 48 | uhd_output_reader_thread = threading.Thread(target=log_output_from_usrp, args=(uhd_process.stdout, open('uhd_stdout.log', 'w'))) 49 | uhd_output_reader_thread.daemon = True # thread dies with the program 50 | uhd_output_reader_thread.start() 51 | 52 | print(f"Waiting up to {timeout_s} seconds for the process to quit") 53 | killed = False 54 | try: 55 | uhd_process.wait(timeout=timeout_s) 56 | except subprocess.TimeoutExpired as e: 57 | print(f"UHD process did not terminate within time limit. Killing...") 58 | uhd_process.kill() 59 | killed = True 60 | 61 | # Save output 62 | print("Copying data files...") 63 | file_prefix = save_data(yaml_filename, extra_files={"uhd_stdout.log": "uhd_stdout.log"}) 64 | print("Finished copying data.") 65 | 66 | with open("uhd_stdout.log", "r") as f: 67 | n_errors = sum(line.count("ERROR_CODE_LATE_COMMAND") for line in f) 68 | 69 | if killed: 70 | n_pulses_received = np.nan #max(config['CHIRP']['num_pulses'], n_errors) 71 | else: 72 | with open("uhd_stdout.log", "r") as f: 73 | rex = '.Total pulses attempted: (\d+)' 74 | n_pulses_received = re.findall(rex, f.read(), re.DOTALL) 75 | n_pulses_received = int(n_pulses_received[0]) 76 | 77 | print(f"{n_errors}/{n_pulses_received} ERROR_CODE_LATE_COMMAND errors detected / pulses attempted") 78 | 79 | return {'file_prefix': file_prefix, 'pulse_rep_int': pulse_rep_int, 'n_errors': n_errors, 'n_attempts': n_pulses_received, 'process_killed': killed} 80 | 81 | if __name__ == "__main__": 82 | 83 | # Check for correct working directory 84 | expected_cwd = os.popen('git rev-parse --show-toplevel').read().strip() # Root of git repo 85 | if os.getcwd() != expected_cwd: 86 | raise Exception(f"This script should ONLY be run from {expected_cwd}. Detected CWD {os.getcwd()}") 87 | 88 | # Check if a YAML file was provided as a command line argument 89 | parser = argparse.ArgumentParser() 90 | parser.add_argument("yaml_file", nargs='?', default='config/default.yaml', 91 | help='Path to YAML configuration file') 92 | parser.add_argument("--half_duplex", action='store_true', 93 | help='Calculate duty cycle for a half duplex transport layer. By default, assumes full duplex.') 94 | args = parser.parse_args() 95 | yaml_filename = args.yaml_file 96 | 97 | yaml = YAML(typ='safe') 98 | with open(yaml_filename) as stream: 99 | config = yaml.load(stream) 100 | 101 | # Chirp generation 102 | generate_from_yaml_filename(yaml_filename) 103 | 104 | # Compile UHD program 105 | 106 | os.chdir("sdr/build") 107 | run_and_fail_on_nonzero("cmake ..") 108 | run_and_fail_on_nonzero("make") 109 | os.chdir("../..") 110 | 111 | # Figure out a reasonable sweep range 112 | if args.half_duplex: 113 | active_time = (config['CHIRP']['tx_duration'] + config['CHIRP']['rx_duration']) / 2 114 | print(f"Half-duplex mode. Using active time of {active_time} seconds, which is the average of tx_duration and rx_duration") 115 | else: 116 | active_time = max(config['CHIRP']['tx_duration'], config['CHIRP']['rx_duration']) 117 | print(f"Full-duplex mode. Using active time of {active_time} seconds, which is the maximum of tx_duration and rx_duration") 118 | 119 | # Converters to show pulse_rep_int in terms of the effective duty cycle 120 | # (averaged across TX and RX) 121 | # pri is in milliseconds 122 | def pri_to_duty(pri): 123 | return 100 * active_time / (pri) 124 | 125 | def duty_to_pri(duty): 126 | return 100 * active_time / (duty) 127 | 128 | duty_cycles = np.arange(pri_to_duty(max(config['CHIRP']['tx_duration'], config['CHIRP']['rx_duration'])), 1.0, -5) # in percent 129 | duty_cycles = np.flip(duty_cycles) 130 | pris = duty_to_pri(duty_cycles) 131 | 132 | print(f"pri values: {pris}") 133 | print(f"duty cycles: {duty_cycles}") 134 | results = {} 135 | 136 | # Run sweep 137 | for pri in pris: 138 | expected_time = 120 + ((pri * config['CHIRP']['num_pulses']) * 2) # Time to allow process to run -- two minutes (for setup) + 2x the expected error-free time 139 | results[pri] = test_with_pulse_rep_int(yaml_filename, pulse_rep_int = float(pri), timeout_s=expected_time) 140 | 141 | for i, j in results.items(): 142 | print(f"pulse_rep_int: {i} \tn_errors: {j['n_errors']}\tn_pulses_attempted: {j['n_attempts']}\tprefix: {j['file_prefix']}") 143 | 144 | # Save results 145 | # Save after each run to preserve in case of a crash 146 | pickle_path = f"./tests/{results[pris[0]]['file_prefix']}_error_code_late_command.pickle" 147 | 148 | pris_so_far = list(results.keys()) 149 | n_error_list = np.array([results[val]['n_errors'] for val in pris_so_far]) 150 | n_pulse_attempts = np.array([results[val]['n_attempts'] for val in pris_so_far]) 151 | was_killed = np.array([results[val]['process_killed'] for val in pris_so_far]) 152 | with open(pickle_path, 'wb') as f: 153 | pickle.dump({ 154 | 'n_error_list': n_error_list, 155 | 'n_pulse_attempts': n_pulse_attempts, 156 | 'was_killed': was_killed, 157 | 'pri': pris_so_far, 158 | 'config': config 159 | }, f) 160 | print(f"Pickle file saved to: {pickle_path}") 161 | 162 | duty_cycles = [] 163 | n_errors = [] 164 | n_attempted = [] 165 | 166 | for pri, result in results.items(): 167 | duty_cycles.append(pri_to_duty(pri)) 168 | n_errors.append(result['n_errors']) 169 | n_attempted.append(result['n_attempts']) 170 | 171 | duty_cyles = np.array(duty_cycles) 172 | n_errors = np.array(n_errors) 173 | n_attempted = np.array(n_attempted) 174 | 175 | fig, ax = plt.subplots(figsize=(10,6), facecolor='white') 176 | #ax.scatter(duty_cycles, n_error_list / config['CHIRP']['num_pulses'] * 100) 177 | ax.scatter(duty_cycles, n_errors / n_attempted * 100) 178 | ax.set_xlabel('Duty cycle [%]') 179 | secax = ax.secondary_xaxis('top', functions=(duty_to_pri, pri_to_duty)) 180 | secax.set_xlabel('pulse_rep_int [microseconds]') 181 | secax.set_xticks(duty_to_pri(ax.get_xticks())) 182 | secax.set_xticklabels([f"{x*1e6:.2f} us" for x in secax.get_xticks()], rotation="vertical") 183 | ax.set_ylim(0, 100) 184 | ax.set_xlim(duty_cycles[-1], duty_cycles[0]) 185 | 186 | ax.set_ylabel('Percent of ERROR_CODE_LATE_COMMAND [%]') # AB: COULD CALL THIS THE ERROR RATE 187 | ax.set_title(f"tx_duration: {config['CHIRP']['tx_duration']}, rx_duration: {config['CHIRP']['rx_duration']}, num_pulses: {config['CHIRP']['num_pulses']}") 188 | ax.grid() 189 | fig.tight_layout() 190 | 191 | fig_path = f"./tests/{results[pris[0]]['file_prefix']}_error_code_late_command.png" 192 | fig.savefig(fig_path) 193 | print(f"Figure saved to: {fig_path}") -------------------------------------------------------------------------------- /postprocessing/notebooks/Radar Spectrogram.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "%load_ext autoreload\n", 10 | "%autoreload 2" 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": null, 16 | "metadata": {}, 17 | "outputs": [], 18 | "source": [ 19 | "from dask.distributed import Client, LocalCluster\n", 20 | "\n", 21 | "client = Client() # Note that `memory_limit` is the limit **per worker**.\n", 22 | "# n_workers=4,\n", 23 | "# threads_per_worker=1,\n", 24 | "# memory_limit='3GB'\n", 25 | "client # If you click the dashboard link in the output, you can monitor real-time progress and get other cool visualizations." 26 | ] 27 | }, 28 | { 29 | "cell_type": "code", 30 | "execution_count": null, 31 | "metadata": {}, 32 | "outputs": [], 33 | "source": [ 34 | "import os\n", 35 | "import copy\n", 36 | "import sys\n", 37 | "import xarray as xr\n", 38 | "import numpy as np\n", 39 | "import dask.array as da\n", 40 | "\n", 41 | "import matplotlib.pyplot as plt\n", 42 | "import hvplot.xarray\n", 43 | "import scipy.constants\n", 44 | "\n", 45 | "sys.path.append(\"..\")\n", 46 | "import processing_dask as pr\n", 47 | "import plot_dask\n", 48 | "import processing as old_processing\n", 49 | "\n", 50 | "sys.path.append(\"../../preprocessing/\")\n", 51 | "from generate_chirp import generate_chirp" 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": null, 57 | "metadata": {}, 58 | "outputs": [], 59 | "source": [ 60 | "# file path to data and configs\n", 61 | "\n", 62 | "#prefix = \"/home/radioglaciology/thomas/radar_data/20220903-vatnajokull-day4/20220903_033000\" # Vatnajokull flight\n", 63 | "#prefix = \"/home/radioglaciology/thomas/radar_data/20230315-slakbreen-day3/20230315_064228\" # Slakbreen flight\n", 64 | "\n", 65 | "\n", 66 | "#prefix = \"/home/radioglaciology/thomas/radar_data/20230721-summit-day1-bench/20230721_104552\"\n", 67 | "#prefix = \"/home/radioglaciology/thomas/radar_data/20230723-summit-day3-2start/20230723_103641\"\n", 68 | "#prefix = \"/home/radioglaciology/thomas/radar_data/20230723-summit-day3-2start/20230723_103845\" # 6 db higher tx\n", 69 | "#prefix = \"/home/radioglaciology/thomas/radar_data/20230723-summit-day3-2start/20230723_104059\"\n", 70 | "#prefix = \"/home/radioglaciology/thomas/radar_data/20230723-summit-day3-2start/20230723_104248\" # another 6 db\n", 71 | "#prefix = \"/home/radioglaciology/thomas/radar_data/20230723-summit-day3-2start/20230723_104456\" # 3db higher rx\n", 72 | "#prefix = \"/home/radioglaciology/thomas/radar_data/20230723-summit-day3-2start/20230723_104742\" # 20 -> 30 mhz chirp bw\n", 73 | "#prefix = \"/home/radioglaciology/thomas/radar_data/20230723-summit-day3-2start/20230723_105025\" # bw back to 20, 15 mhz lo offset\n", 74 | "#prefix = \"/home/radioglaciology/thomas/radar_data/20230723-summit-day3-2start/20230723_105509\" # 100k pulses, 30 us transmit, 90 us rx\n", 75 | "#prefix = \"/home/radioglaciology/thomas/radar_data/20230723-summit-day3-2start/20230723_105921\" # 100k pulses, 10 us transmit, 90 us rx\n", 76 | "#prefix = \"/home/radioglaciology/thomas/radar_data/20230723-summit-day3-2start/20230723_110253\" # 100k pulses, 5 us transmit, 60 us rx\n", 77 | "\n", 78 | "#prefix = \"/media/radioglaciology/Extreme SSD/summit_2023_mapper/20230723_151432\" # 20 us transmit\n", 79 | "#prefix = \"/media/radioglaciology/Extreme SSD/summit_2023_mapper/20230723_153254\" # 10 us transmit\n", 80 | "#prefix = \"/media/radioglaciology/Extreme SSD/summit_2023_mapper/20230723_152404\" # 5 us transmit\n", 81 | "\n", 82 | "#prefix = \"/media/radioglaciology/Extreme SSD/summit_2023_mapper/07252023/20230725_161051\" # mapper\n", 83 | "\n", 84 | "#prefix = \"/Volumes/Extreme SSD/Summit2023/07232023_anna/20230723_151432\"\n", 85 | "#prefix = \"/Volumes/Extreme SSD/Summit2023/07252023_anna/20230725_141923\"\n", 86 | "\n", 87 | "prefix = \"/Volumes/Extreme SSD/orca_paper_data_files/dithering/b205/20240226_232654\" # Thomas, yes dithering, no lo offset\n", 88 | "\n", 89 | "# resave data as zarr for dask processing\n", 90 | "zarr_path = pr.save_radar_data_to_zarr(prefix, zarr_base_location=\"/Volumes/Extreme SSD/Summit2023/07232023_anna/zarrs/\")\n", 91 | "\n", 92 | "# open zarr file, adjust chunk size to be 10 MB - 1 GB based on sample rate/bit depth\n", 93 | "raw = xr.open_zarr(zarr_path, chunks={\"pulse_idx\": 1000})" 94 | ] 95 | }, 96 | { 97 | "cell_type": "code", 98 | "execution_count": null, 99 | "metadata": {}, 100 | "outputs": [], 101 | "source": [ 102 | "n_stack = len(raw.pulse_idx)\n", 103 | "stacked = pr.stack(raw.chunk({'sample_idx': 1000}), n_stack).chunk({'sample_idx': -1})" 104 | ] 105 | }, 106 | { 107 | "cell_type": "code", 108 | "execution_count": null, 109 | "metadata": {}, 110 | "outputs": [], 111 | "source": [ 112 | "start_time = 20e-6 # 3e5\n", 113 | "input_data = stacked[\"radar_data\"].where(stacked.fast_time.compute() >= start_time, drop=True) #.where(stacked.slow_time <= 115, drop=True).where(stacked.slow_time >= 95, drop=True)\n", 114 | "normalize = False\n", 115 | "#inpt = raw\n", 116 | "#pulse = pr.stack(inpt, n_stack)[{'pulse_idx':0}][\"radar_data\"].to_numpy()\n", 117 | "\n", 118 | "# f, t, S = scipy.signal.spectrogram(\n", 119 | "# input_data.to_numpy().T.flatten(),\n", 120 | "# fs=raw.attrs[\"config\"][\"GENERATE\"][\"sample_rate\"],\n", 121 | "# window='flattop',\n", 122 | "# nperseg=64,\n", 123 | "# noverlap=32,\n", 124 | "# scaling='density', mode='psd',\n", 125 | "# return_onesided=False\n", 126 | "# )\n", 127 | "\n", 128 | "f, t, S = scipy.signal.spectrogram(\n", 129 | " input_data.to_numpy().T.flatten(),\n", 130 | " fs=raw.attrs[\"config\"][\"GENERATE\"][\"sample_rate\"],\n", 131 | " window='flattop',\n", 132 | " nperseg=64,\n", 133 | " noverlap=32,\n", 134 | " scaling='density', mode='psd',\n", 135 | " return_onesided=False\n", 136 | ")\n", 137 | "\n", 138 | "if normalize:\n", 139 | " S /= np.max(S)" 140 | ] 141 | }, 142 | { 143 | "cell_type": "code", 144 | "execution_count": null, 145 | "metadata": {}, 146 | "outputs": [], 147 | "source": [ 148 | "S.shape" 149 | ] 150 | }, 151 | { 152 | "cell_type": "code", 153 | "execution_count": null, 154 | "metadata": {}, 155 | "outputs": [], 156 | "source": [ 157 | "fig, ax = plt.subplots(facecolor='white', figsize=(10,6))\n", 158 | "freq_mhz = (np.fft.fftshift(f) + raw.attrs['config']['RF0']['freq']) / 1e6\n", 159 | "pcm = ax.pcolormesh(10*np.log10(np.abs(np.fft.fftshift(S, axes=0))), shading='nearest')#, vmin=-400, vmax=-200)\n", 160 | "clb = fig.colorbar(pcm, ax=ax)\n", 161 | "clb.set_label('Power [dB]')\n", 162 | "#ax.set_xlabel('Slow Time [s]')\n", 163 | "ax.set_xlabel('Fast Time')\n", 164 | "ax.set_ylabel('Frequency [MHz]')\n", 165 | "ax.set_title(f\"{os.path.basename(stacked.prefix)}\\nSpectrogram of received data with n_stack={n_stack},\\ntime gated to >={start_time} s fast time\")\n", 166 | "#ax.set_xlim(0, 1e-5)\n", 167 | "#ax.set_xlim(50000, 100000)" 168 | ] 169 | }, 170 | { 171 | "cell_type": "code", 172 | "execution_count": null, 173 | "metadata": {}, 174 | "outputs": [], 175 | "source": [ 176 | "input_data" 177 | ] 178 | }, 179 | { 180 | "cell_type": "code", 181 | "execution_count": null, 182 | "metadata": {}, 183 | "outputs": [], 184 | "source": [ 185 | "fig.savefig(f\"/Users/abroome/Desktop/Summit2023/figs/{raw.basename}-spectrogram-st-stack{n_stack}_gating.png\")" 186 | ] 187 | }, 188 | { 189 | "cell_type": "code", 190 | "execution_count": null, 191 | "metadata": {}, 192 | "outputs": [], 193 | "source": [ 194 | "test = pr.stack(raw, 10)\n", 195 | "test.pulse_idx[1] - test.pulse_idx[0]" 196 | ] 197 | }, 198 | { 199 | "cell_type": "code", 200 | "execution_count": null, 201 | "metadata": {}, 202 | "outputs": [], 203 | "source": [] 204 | } 205 | ], 206 | "metadata": { 207 | "kernelspec": { 208 | "display_name": "sprinkles", 209 | "language": "python", 210 | "name": "sprinkles" 211 | }, 212 | "language_info": { 213 | "codemirror_mode": { 214 | "name": "ipython", 215 | "version": 3 216 | }, 217 | "file_extension": ".py", 218 | "mimetype": "text/x-python", 219 | "name": "python", 220 | "nbconvert_exporter": "python", 221 | "pygments_lexer": "ipython3", 222 | "version": "3.9.7" 223 | } 224 | }, 225 | "nbformat": 4, 226 | "nbformat_minor": 4 227 | } 228 | -------------------------------------------------------------------------------- /postprocessing/notebooks/orca_paper/Noise Power Dithering Figure.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "%load_ext autoreload\n", 10 | "%autoreload 2" 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": null, 16 | "metadata": {}, 17 | "outputs": [], 18 | "source": [ 19 | "from dask.distributed import Client, LocalCluster\n", 20 | "client = Client(n_workers=1,\n", 21 | " threads_per_worker=4,\n", 22 | " memory_limit='16GB')\n", 23 | "client" 24 | ] 25 | }, 26 | { 27 | "cell_type": "code", 28 | "execution_count": null, 29 | "metadata": {}, 30 | "outputs": [], 31 | "source": [ 32 | "import copy\n", 33 | "import sys\n", 34 | "import xarray as xr\n", 35 | "import numpy as np\n", 36 | "import dask.array as da\n", 37 | "import time\n", 38 | "import os\n", 39 | "\n", 40 | "import dask\n", 41 | "\n", 42 | "import matplotlib.pyplot as plt\n", 43 | "import hvplot.xarray\n", 44 | "import holoviews as hv\n", 45 | "import scipy.constants\n", 46 | "import scipy\n", 47 | "\n", 48 | "sys.path.append(\"../..\")\n", 49 | "import processing_dask as pr\n", 50 | "import plot_dask\n", 51 | "\n", 52 | "sys.path.append(\"../../../preprocessing/\")\n", 53 | "from generate_chirp import generate_chirp" 54 | ] 55 | }, 56 | { 57 | "cell_type": "code", 58 | "execution_count": null, 59 | "metadata": {}, 60 | "outputs": [], 61 | "source": [ 62 | "#prefix = \"/media/thomas/Extreme SSD/orca_paper_data_files/phase_noise/b205/20240222_203345\"\n", 63 | "\n", 64 | "#prefix = \"/Volumes/Extreme SSD/orca_paper/20240226_105437\" # no phase dithering, no LO offset\n", 65 | "#prefix = \"/Volumes/Extreme SSD/orca_paper/20240226_105916\" # yes phase dithering, no LO offset\n", 66 | "#prefix = \"/Volumes/Extreme SSD/orca_paper/20240226_110410\" # no phase dithering, software LO offset of 12.5 MHz\n", 67 | "#prefix = \"/Volumes/Extreme SSD/orca_paper/20240226_110948\" # yes phase dithering, software LO offset of 12.5 MHz\n", 68 | "\n", 69 | "prefix = \"/Volumes/Extreme SSD/orca_paper_data_files/dithering/b205/20240226_225223\" # no phase dithering, software LO offset of 12.5 MHz\n", 70 | "\n", 71 | "#zero_sample_idx = 63 # X310, fs = 50 MHz\n", 72 | "zero_sample_idx = 159\n", 73 | "\n", 74 | "dielectric_constant = 2.2957 # ice (air = 1, 66% velocity coax = 2.2957)\n", 75 | "sig_speed = scipy.constants.c / np.sqrt(dielectric_constant)\n", 76 | "\n", 77 | "zarr_path = pr.save_radar_data_to_zarr(prefix)\n", 78 | "\n", 79 | "zarr_path" 80 | ] 81 | }, 82 | { 83 | "cell_type": "code", 84 | "execution_count": null, 85 | "metadata": {}, 86 | "outputs": [], 87 | "source": [ 88 | "raw = xr.open_zarr(zarr_path)" 89 | ] 90 | }, 91 | { 92 | "cell_type": "code", 93 | "execution_count": null, 94 | "metadata": {}, 95 | "outputs": [], 96 | "source": [ 97 | "chirp_ts, chirp = generate_chirp(raw.config)\n", 98 | "\n", 99 | "compressed = pr.pulse_compress(raw, chirp,\n", 100 | " fs=raw.config['GENERATE']['sample_rate'],\n", 101 | " zero_sample_idx=zero_sample_idx,\n", 102 | " signal_speed=sig_speed).persist()" 103 | ] 104 | }, 105 | { 106 | "cell_type": "code", 107 | "execution_count": null, 108 | "metadata": {}, 109 | "outputs": [], 110 | "source": [ 111 | "# save pulse compressed data to location\n", 112 | "#zarr_base_location = \"/Volumes/Extreme SSD/orca_paper/\"\n", 113 | "zarr_base_location = \"/Volumes/Extreme SSD/orca_paper_data_files/dithering/b205\"\n", 114 | "compressed_zarr_path = os.path.join(zarr_base_location, raw.basename + \"_pulsecompressed.zarr\")\n", 115 | "print(\"Writing pulse compressed data to: \", compressed_zarr_path)\n", 116 | "\n", 117 | "compressed.to_zarr(compressed_zarr_path, mode='w')" 118 | ] 119 | }, 120 | { 121 | "cell_type": "code", 122 | "execution_count": null, 123 | "metadata": {}, 124 | "outputs": [], 125 | "source": [ 126 | "compressed = xr.open_zarr(\"/Volumes/Extreme SSD/orca_paper_data_files/dithering/b205/20240226_225223_pulsecompressed.zarr\")" 127 | ] 128 | }, 129 | { 130 | "cell_type": "code", 131 | "execution_count": null, 132 | "metadata": {}, 133 | "outputs": [], 134 | "source": [ 135 | "stacks = [1, 10, 100, 1000, 10000, 100000, 1000000]\n", 136 | "ts = stacks\n", 137 | "\n", 138 | "noise_start_distance_1way = 1000 # m" 139 | ] 140 | }, 141 | { 142 | "cell_type": "code", 143 | "execution_count": null, 144 | "metadata": {}, 145 | "outputs": [], 146 | "source": [ 147 | "compressed" 148 | ] 149 | }, 150 | { 151 | "cell_type": "markdown", 152 | "metadata": {}, 153 | "source": [ 154 | "## Noise Floor Variance" 155 | ] 156 | }, 157 | { 158 | "cell_type": "code", 159 | "execution_count": null, 160 | "metadata": {}, 161 | "outputs": [], 162 | "source": [ 163 | "actual_stack_t = np.nan * np.zeros_like(ts)\n", 164 | "actual_stack_n = np.zeros_like(ts, dtype=int)\n", 165 | "\n", 166 | "# Statistics to compute\n", 167 | "stack_noise_var = np.nan * np.zeros_like(ts)\n", 168 | "stack_noise_mean = np.nan * np.zeros_like(ts)" 169 | ] 170 | }, 171 | { 172 | "cell_type": "code", 173 | "execution_count": null, 174 | "metadata": { 175 | "scrolled": true, 176 | "tags": [] 177 | }, 178 | "outputs": [], 179 | "source": [ 180 | "for t_idx, t in enumerate(ts):\n", 181 | " if not np.isnan(stack_noise_mean[t_idx]):\n", 182 | " continue # Skip if already computed (in case of interruption and restart)\n", 183 | " \n", 184 | " timestamp = time.time() # Track computation time \n", 185 | "\n", 186 | " actual_stack_n[t_idx] = t\n", 187 | " actual_stack_t[t_idx] = actual_stack_n[t_idx] * raw.attrs['config']['CHIRP']['pulse_rep_int'] # TODO: Account for errors?\n", 188 | " print(f\"[{t_idx+1}/{len(ts)}] \\tt={actual_stack_t[t_idx]} \\tn_stack={actual_stack_n[t_idx]}\")\n", 189 | " \n", 190 | " with dask.config.set(**{'array.slicing.split_large_chunks': False}):\n", 191 | "\n", 192 | " stacked = pr.stack(compressed, actual_stack_n[t_idx])\n", 193 | " compressed_pwr = xr.apply_ufunc(lambda x: np.abs(x)**2, stacked, dask='parallelized').chunk(\"auto\")\n", 194 | " \n", 195 | " noise_pwr = compressed_pwr[\"radar_data\"].where((compressed_pwr.reflection_distance > noise_start_distance_1way)).dropna('travel_time').chunk(\"auto\")\n", 196 | " \n", 197 | " stack_noise_var[t_idx] = noise_pwr.var(dim=\"travel_time\").mean().compute().item()\n", 198 | " stack_noise_mean[t_idx] = noise_pwr.mean().compute().item()\n", 199 | "\n", 200 | " \n", 201 | " print(f\"Completed in {time.time() - timestamp} seconds from {len(noise_pwr)} stacked pulses\")" 202 | ] 203 | }, 204 | { 205 | "cell_type": "code", 206 | "execution_count": null, 207 | "metadata": {}, 208 | "outputs": [], 209 | "source": [ 210 | "fig, (ax_noise_var, ax_noise_mean) = plt.subplots(2, 1, figsize=(10, 20))\n", 211 | "\n", 212 | "ax_noise_var.scatter(actual_stack_n, stack_noise_var)\n", 213 | "ax_noise_var.set_title(\"Noise Variance\")\n", 214 | "ax_noise_var.loglog()\n", 215 | "ax_noise_var.grid()\n", 216 | "\n", 217 | "ax_noise_mean.scatter(actual_stack_n, stack_noise_mean)\n", 218 | "ax_noise_mean.set_title(\"Noise Mean\")\n", 219 | "ax_noise_mean.loglog()\n", 220 | "ax_noise_mean.grid()" 221 | ] 222 | }, 223 | { 224 | "cell_type": "code", 225 | "execution_count": null, 226 | "metadata": {}, 227 | "outputs": [], 228 | "source": [ 229 | "import pickle\n", 230 | "with open(f\"outputs/{raw.basename}-noise-stats.pickle\", \"wb\") as f:\n", 231 | " pickle.dump({'n_stacks': actual_stack_n, 'stack_times': actual_stack_t, 'stack_noise_var': stack_noise_var, 'stack_noise_mean': stack_noise_mean, 'prefix': raw.prefix}, f)" 232 | ] 233 | }, 234 | { 235 | "cell_type": "code", 236 | "execution_count": null, 237 | "metadata": {}, 238 | "outputs": [], 239 | "source": [] 240 | } 241 | ], 242 | "metadata": { 243 | "kernelspec": { 244 | "display_name": "sprinkles", 245 | "language": "python", 246 | "name": "sprinkles" 247 | }, 248 | "language_info": { 249 | "codemirror_mode": { 250 | "name": "ipython", 251 | "version": 3 252 | }, 253 | "file_extension": ".py", 254 | "mimetype": "text/x-python", 255 | "name": "python", 256 | "nbconvert_exporter": "python", 257 | "pygments_lexer": "ipython3", 258 | "version": "3.9.7" 259 | } 260 | }, 261 | "nbformat": 4, 262 | "nbformat_minor": 4 263 | } 264 | -------------------------------------------------------------------------------- /run.py: -------------------------------------------------------------------------------- 1 | import time 2 | import argparse 3 | import os 4 | import sys 5 | import shutil 6 | import subprocess 7 | import signal 8 | import threading 9 | import queue 10 | from ruamel.yaml import YAML 11 | 12 | sys.path.append("preprocessing") 13 | from generate_chirp import generate_from_yaml_filename 14 | sys.path.append("postprocessing") 15 | from save_data import save_data 16 | 17 | """ 18 | Provides a simple interface to build, run, and manage data outputs from the SDR code 19 | 20 | Should generally be used like this: 21 | 22 | runner = RadarProcessRunner(yaml_filename) 23 | runner.setup() # Build the binary and get everything ready 24 | runner.run() # Run the radar program 25 | runner.wait() # Wait for it to finish (if `num_pulses` == -1, then you need to call runner.stop() before this will return) 26 | runner.stop() # Wrap everything up and save data -- you need to call this even if the radar program finishes on its own 27 | """ 28 | class RadarProcessRunner(): 29 | """ 30 | yaml_filename -- path to the YAML config file that provides all the required settings 31 | output_log_path -- (temporary) location to store stdout of the C++ code 32 | log_processing_function -- optional function if you need to read and process the stdout data in real time 33 | output_to_stdout -- set True to also print the output, in addition to storing it to a file 34 | """ 35 | def __init__(self, yaml_filename, output_log_path="uhd_stdout.log", log_processing_function=None, output_to_stdout=True): 36 | self.yaml_filename = yaml_filename 37 | self.output_log_path = output_log_path 38 | self.log_processing_function = log_processing_function 39 | self.output_to_stdout = output_to_stdout 40 | 41 | self.setup_complete = False 42 | self.is_running = False 43 | 44 | self.file_queue = queue.Queue() 45 | self.file_queue_size = 1 46 | self.output_file = None 47 | self.output_file_path = None 48 | 49 | """ 50 | Manage the stdout of the radar program, including logging it to a file and optionally sending it for additional processing 51 | """ 52 | def process_usrp_output(self, out, file_out, also_print=True): 53 | for line in iter(out.readline, ''): 54 | # Save output to specified file with timestamp 55 | t = time.time() 56 | file_out.write(f"[{t:0.3f}] \t{line}") 57 | 58 | # If provided, pass output to external function for processing 59 | if self.log_processing_function is not None: 60 | self.log_processing_function(line) 61 | 62 | # If specified, also print to stdout 63 | if also_print: 64 | print(f"[{t:0.3f}] \t{line}", end="") 65 | 66 | # Enqueue for saving somewhere else 67 | if line.startswith("[CLOSE FILE]"): 68 | filename = (line[13:]).strip() 69 | if filename.startswith("../../"): # Automatically added to escape cwd of binary 70 | filename = filename[6:] # Strip it out 71 | self.file_queue.put(filename) 72 | 73 | out.close() 74 | file_out.close() 75 | self.file_queue_size = self.file_queue.qsize() 76 | 77 | """ 78 | Build the radar program, generate the chirp, and get ready to run 79 | """ 80 | def setup(self): 81 | 82 | # Verify CWD 83 | git_root = subprocess.check_output(["git", "rev-parse", "--show-toplevel"]).decode('utf-8') 84 | git_root = "".join(git_root.split()) 85 | cwd = os.getcwd() 86 | if os.path.normpath(git_root) != os.path.normpath(cwd): 87 | print(f"This script should ONLY be run from the root of the git repo ({git_root}). Detected CWD {cwd}") 88 | exit(1) 89 | 90 | # Load YAML 91 | yaml = YAML() 92 | with open(self.yaml_filename) as stream: 93 | self.config = yaml.load(stream) 94 | 95 | # Verify file save options 96 | if (self.config['RUN_MANAGER']['final_save_loc'] is None) and (self.config['RUN_MANAGER']['save_partial_files'] is False) and (self.config['FILES']['max_chirps_per_file'] != -1): 97 | print("You must choose to save at least some of your data. In yaml: file_save_loc cannot be empty and save_partial files cannot be false at the same time, unless .") 98 | exit(1) 99 | 100 | # Chirp generation 101 | try: 102 | generate_from_yaml_filename(self.yaml_filename) 103 | except Exception as e: 104 | print(e) 105 | exit(1) 106 | 107 | # Compile UHD program 108 | def run_and_fail_on_nonzero(cmd): 109 | retval = os.system(cmd) 110 | if retval != 0: 111 | print(f"Running '{cmd}' produced non-zero return value {retval}. Quitting...") 112 | exit(1) 113 | 114 | os.chdir("sdr/build") 115 | run_and_fail_on_nonzero("cmake ..") 116 | run_and_fail_on_nonzero("make") 117 | os.chdir("../..") 118 | 119 | self.setup_complete = True 120 | 121 | """ 122 | Start the radar program 123 | """ 124 | def run(self): 125 | if not self.setup_complete: 126 | raise Exception("Must call setup() before calling run(). If setup() does not complete successfully, you cannot call run().") 127 | 128 | self.uhd_process = subprocess.Popen(["./radar", self.yaml_filename], stdout=subprocess.PIPE, bufsize=1, close_fds=True, text=True, cwd="sdr/build") 129 | self.uhd_output_reader_thread = threading.Thread(target=self.process_usrp_output, args=(self.uhd_process.stdout, open('uhd_stdout.log', 'w'), self.output_to_stdout)) 130 | self.uhd_output_reader_thread.daemon = True # thread dies with the program 131 | self.uhd_output_reader_thread.start() 132 | self.is_running = True 133 | 134 | """ 135 | Wait (up to `timeout` seconds, if not None) for the process to complete 136 | """ 137 | def wait(self, timeout = None): 138 | t = time.time() 139 | while self.uhd_process.returncode is None: 140 | self.uhd_process.poll() 141 | time.sleep(1) 142 | if (timeout is not None) and (time.time() - t > timeout): 143 | self.stop() 144 | 145 | """ 146 | Ends the radar program (if not already terminated) and saves data 147 | """ 148 | def stop(self, timeout = 10): 149 | if not self.is_running: 150 | return 0 151 | 152 | was_force_killed = False 153 | 154 | print("Attemping to stop UHD process") 155 | self.uhd_process.send_signal(signal.SIGINT) 156 | print(f"Waiting up to {timeout} seconds for the process to quit") 157 | t = time.time() 158 | try: 159 | self.uhd_process.wait(timeout=timeout) 160 | except subprocess.TimeoutExpired as e: 161 | print(f"UHD process did not terminate within time limit. Killing...") 162 | self.uhd_process.kill() 163 | was_force_killed = True 164 | self.is_running = False 165 | 166 | self.uhd_output_reader_thread.join() 167 | 168 | # If necessary, concatenate data files into a single file 169 | alternative_rx_samps_loc = None 170 | if (self.config['RUN_MANAGER']['final_save_loc'] is not None) and (self.config['FILES']['max_chirps_per_file'] != -1): 171 | print("Calling save_from_queue()") 172 | self.save_from_queue() 173 | alternative_rx_samps_loc = self.output_file_path 174 | 175 | # Save output 176 | print("Copying data files...") 177 | file_prefix = save_data(self.yaml_filename, alternative_rx_samps_loc=alternative_rx_samps_loc, num_files=self.file_queue_size, extra_files={"uhd_stdout.log": "uhd_stdout.log"}) 178 | print("Finished copying data.") 179 | 180 | self.output_file = None 181 | 182 | return file_prefix 183 | 184 | """ 185 | Copy data from split files into a single data output file 186 | """ 187 | def save_from_queue(self): 188 | if self.output_file is None: 189 | self.output_file_path = self.config['RUN_MANAGER']['final_save_loc'] 190 | self.output_file = open(self.output_file_path, 'wb') 191 | 192 | while(not self.file_queue.empty()): 193 | with open(self.file_queue.get(), 'rb') as f: 194 | shutil.copyfileobj(f, self.output_file) 195 | 196 | self.output_file.close() 197 | 198 | 199 | if __name__ == "__main__": 200 | 201 | # Check if a YAML file was provided as a command line argument 202 | parser = argparse.ArgumentParser() 203 | parser.add_argument("yaml_file", nargs='?', default='config/default.yaml', 204 | help='Path to YAML configuration file') 205 | args = parser.parse_args() 206 | yaml_filename = args.yaml_file 207 | 208 | # Build and run UHD radar code 209 | 210 | runner = RadarProcessRunner(yaml_filename) 211 | 212 | def sigint_handler(signum, frame): 213 | runner.stop() # On Ctrl-C, attempt to stop radar process 214 | 215 | runner.setup() 216 | runner.run() 217 | signal.signal(signal.SIGINT, sigint_handler) 218 | runner.wait() 219 | runner.stop() 220 | 221 | -------------------------------------------------------------------------------- /config/default.yaml: -------------------------------------------------------------------------------- 1 | # This is an example of a YAML configuration file used to set parameters for the 2 | # ORCA system. 3 | # This configuration file is meant for the Ettus USRP B205mini-i. 4 | 5 | ### CHIRP AND PULSE PARAMETERS 6 | GENERATE: 7 | sample_rate: &s_rate 56e6 # [Hz] Sample rate of the generated 8 | # chirp (used as TX and RX rate too) 9 | chirp_type: 'linear' # Chirp frequency progression type. 10 | # Supported options: "linear", 11 | # "hyperbolic" 12 | chirp_bandwidth: 25e6 # [Hz] Bandwidth of the chirp 13 | lo_offset_sw: 0e6 # [Hz] Center frequency of the chirp 14 | # (relative to RF center frequency) 15 | window: 'rectangular' # Window function applied to the chirp 16 | # Supported options: "rectangular", 17 | # "hamming", "blackman", "kaiser10", 18 | # "kaiser14", "kaiser18" 19 | chirp_length: &chirp_len 20e-6 # [s] Chirp length without zero padding 20 | pulse_length: &pulse_len 20e-6 # [s] Total pulse length (chirp + 21 | # symmetric zero padding) 22 | out_file: &ch_sent "data/chirp.bin" # The name of the output binary file 23 | # containing the pulse samples 24 | show_plot: False # Display a time-domain plot of the 25 | # generated chirp 26 | 27 | ### DEVICE CONNECTION AND DATA TRANSFER 28 | DEVICE: 29 | # USRP device arguments are used to identify specific SDRs (if multiple are 30 | # connected to the same computer), to configure model-specific parameters, 31 | # and to set transport parameters of the link (USB, ethernet) between the 32 | # SDR and the host computer. For advice on tuning transport parameters, see: 33 | # https://radioglaciology.github.io/orca-documentation/docs/radar/host-connection/ 34 | # For more details, see the relevant Ettus help pages: 35 | # Device identification: https://files.ettus.com/manual/page_identification.html 36 | # Advanced config: https://files.ettus.com/manual/page_configuration.html 37 | # Transport parameters: https://files.ettus.com/manual/page_transport.html 38 | device_args: "num_recv_frames=700,num_send_frames=700,recv_frame_size=11000,send_frame_size=11000" 39 | subdev: "A:A" # Active SDR submodules 40 | # See https://files.ettus.com/manual/page_configuration.html 41 | clk_ref: "internal" # Clock reference source 42 | # See https://files.ettus.com/manual/page_sync.html 43 | clk_rate: 56e6 # [Hz] SDR main clock frequency 44 | tx_channels: "0" # List of TX channels to use (command 45 | # separated) 46 | rx_channels: "0" # List of RX channels to use (command 47 | # separated) 48 | # (must be the same length as tx_channels) 49 | cpu_format: "fc32" # CPU-side sample format 50 | # See https://files.ettus.com/manual/structuhd_1_1stream__args__t.html#a602a64b4937a85dba84e7f724387e252 51 | # Supported options: "fc32", "sc16", 52 | # "sc8" 53 | otw_format: "sc12" # On the wire format 54 | # See https://files.ettus.com/manual/structuhd_1_1stream__args__t.html#a0ba0e946d2f83f7ac085f4f4e2ce9578 55 | # (Any format supported.) 56 | ### GPIO PIN CONFIGURATION 57 | GPIO: 58 | gpio_bank: "FP0" # Which GPIO bank to use (FP0 is front 59 | # panel and default) 60 | pwr_amp_pin: "-1" # Which GPIO pin to use for external 61 | # power amplifier control 62 | # (set to -1 if not using) 63 | ref_out: -1 # Turns the 10 MHz reference out signal 64 | # on (1) or off (0) 65 | # set to (-1) if SDR does not support 66 | ### RF FRONTEND 0 CONFIGURATION 67 | RF0: 68 | rx_rate: *s_rate # [Hz] RX Sample Rate 69 | tx_rate: *s_rate # [Hz] TX Sample Rate 70 | freq: 450e6 # [Hz] Center Frequency (mixer frequency) 71 | lo_offset: 15e6 # [Hz] LO offset 72 | rx_gain: 36 # [dB] RX Gain 73 | tx_gain: 65 # [dB] TX Gain - 60.8 is about -10 dBm 74 | # output on the b205mini 75 | bw: 56e6 # [Hz] Configurable filter bandwidth 76 | tx_ant: "TX/RX" # Port to be used for TX 77 | rx_ant: "RX2" # port to be used for RX 78 | transmit: true # "true" (or not set) for normal operation, 79 | # set to "false" to completely disable transmit 80 | tuning_args: "" # Set int_n or fractional tuning args, 81 | # leave as "" to do nothing (only 82 | # supported on some SDRs) 83 | ### RF FRONTEND 1 CONFIGURATION (not supported on b205mini) 84 | RF1: 85 | rx_rate: *s_rate # [Hz] RX Sample Rate 86 | tx_rate: *s_rate # [Hz] TX Sample Rate 87 | freq: 450e6 # [Hz] Center Frequency (mixer frequency) 88 | lo_offset: 0e6 # [Hz] LO offset 89 | rx_gain: 10 # [dB] RX Gain 90 | tx_gain: 10 # [dB] TX Gain - 60.8 is about -10 dBm 91 | # output on the b205mini 92 | bw: 56e6 # [Hz] Configurable filter bandwidth 93 | tx_ant: "TX/RX" # Port to be used for TX 94 | rx_ant: "RX2" # port to be used for RX 95 | transmit: false # "true" (or not set) for normal operation, 96 | # set to "false" to completely disable transmit 97 | tuning_args: "" # Set int_n or fractional tuning args, 98 | # leave as "" to do nothing (only 99 | # supported on some SDRs) 100 | ### PULSE TIMING 101 | CHIRP: 102 | time_offset: 1 # [s] Offset time before the first 103 | # received sample 104 | tx_duration: *pulse_len # [s] Transmission duration 105 | rx_duration: 20e-6 # [s] Receive duration 106 | tr_on_lead: 0e-6 # [s] Time from GPIO output toggle on 107 | # to TX (if using GPIO) 108 | tr_off_trail: 0e-6 # [s] Time from TX off to GPIO output 109 | # off (if using GPIO) 110 | pulse_rep_int: 200e-6 # [s] Pulse period 111 | tx_lead: 0e-6 # [s] Time between start of TX and RX 112 | num_pulses: &num_pulses 10000 # No. of chirps to TX/RX - set to -1 to 113 | # continuously transmit pulses until 114 | # stopped 115 | num_presums: 1 # Number of received pulses to average 116 | # over before writing to file 117 | phase_dithering: true # Enable phase dithering 118 | ### DURING-RECORDING FILE LOCATIONS 119 | FILES: 120 | chirp_loc: *ch_sent # Chirp file to transmit 121 | save_loc: &save_loc "data/rx_samps.bin" # (Temporary) location to write 122 | # received samples to 123 | gps_loc: &gps_save_loc "data/gps_log.txt" # (Temporary) location to save GPS 124 | # data (only works if gpsdo is 125 | # selected as the clock source) 126 | max_chirps_per_file: -1 # Maximum number of RX from a chirp to 127 | # write to a single file set to -1 to 128 | # avoid breaking into multiple files 129 | ### RUN.PY FILE SAVE LOCATIONS 130 | RUN_MANAGER: # These settings are only used by run.py -- not read by main.cpp 131 | # Note: if max_chirps_per_file = -1 (i.e. all data will be written directly 132 | # to a single file, then final_save_loc and save_partial_files will be ignored 133 | final_save_loc: null # Save location for the big final file, 134 | # set to null if you don't want to 135 | # save a big file 136 | save_partial_files: False # Set to true if you want individual 137 | # small files to be copied, set to 138 | # false if you just want the big 139 | # merged file to be copied 140 | save_gps: False # Set to true if using gps and wanting 141 | # to save gps location data, set to 142 | # false otherwise 143 | 144 | 145 | 146 | -------------------------------------------------------------------------------- /postprocessing/notebooks/Dask Demo.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "%load_ext autoreload\n", 10 | "%autoreload 2" 11 | ] 12 | }, 13 | { 14 | "cell_type": "markdown", 15 | "metadata": {}, 16 | "source": [ 17 | "### Preview of dask-backed radar processing interface\n", 18 | "\n", 19 | "Most of the core code is packaged into `processing_dask.py`, so that file is worth a browse.\n", 20 | "\n", 21 | "In the immediate future, the core advantage is dask's ability to automatically chunk data such\n", 22 | "that small chunks of giant files can be processed without running out of memory. To facilitate this,\n", 23 | "you can set a memory limit in the next cell. Dask will process chunks until it runs against this\n", 24 | "limit and then start storing already-completed results to disk.\n", 25 | "\n", 26 | "In the longer term, this infrastructure should also allow us to do some other fun tricks, such as\n", 27 | "storing out data in cloud storage buckets and automatically farming out computations to\n", 28 | "SLURM-managed clusters (i.e. Sherlock) or cloud compute services.\n", 29 | "\n", 30 | "There are some new dependencies:\n", 31 | "* xarray\n", 32 | "* dask\n", 33 | "* zarr\n", 34 | "* hvplot\n", 35 | "* datashader\n", 36 | "\n", 37 | "The last two are technically only needed for plotting (more about that below)." 38 | ] 39 | }, 40 | { 41 | "cell_type": "code", 42 | "execution_count": null, 43 | "metadata": {}, 44 | "outputs": [], 45 | "source": [ 46 | "# This setup activates the \"distributed\" scheduler. In this case, we're still running it locally, but this gives us access to additional features (i.e. memory limits).\n", 47 | "# See https://docs.dask.org/en/stable/scheduling.html\n", 48 | "from dask.distributed import Client, LocalCluster\n", 49 | "client = Client(n_workers=1,\n", 50 | " threads_per_worker=1,\n", 51 | " memory_limit='20GB') # Note that `memory_limit` is the limit **per worker**.\n", 52 | "client # If you click the dashboard link in the output, you can monitor real-time progress and get other cool visualizations." 53 | ] 54 | }, 55 | { 56 | "cell_type": "code", 57 | "execution_count": null, 58 | "metadata": {}, 59 | "outputs": [], 60 | "source": [ 61 | "import copy\n", 62 | "import sys\n", 63 | "import xarray as xr\n", 64 | "import numpy as np\n", 65 | "import dask.array as da\n", 66 | "\n", 67 | "import matplotlib.pyplot as plt\n", 68 | "import hvplot.xarray\n", 69 | "import scipy.constants\n", 70 | "\n", 71 | "sys.path.append(\"..\")\n", 72 | "import processing_dask as pr\n", 73 | "import plot_dask\n", 74 | "import processing as old_processing\n", 75 | "\n", 76 | "sys.path.append(\"../../preprocessing/\")\n", 77 | "from generate_chirp import generate_chirp" 78 | ] 79 | }, 80 | { 81 | "cell_type": "code", 82 | "execution_count": null, 83 | "metadata": {}, 84 | "outputs": [], 85 | "source": [ 86 | "#prefix = \"/home/thomas/Documents/StanfordGrad/RadioGlaciology/drone/radar_data/20230621-bench-prf-testing/20230621_163428\"\n", 87 | "#prefix = \"/home/thomas/Documents/StanfordGrad/RadioGlaciology/drone/radar_data/20230621-bench-prf/20230622_104230\"\n", 88 | "#prefix = \"/home/thomas/Documents/StanfordGrad/RadioGlaciology/drone/radar_data/20230621-bench-prf/20230621_173826\"\n", 89 | "prefix = \"/Users/abroome/Documents/SDR/uhd_radar/data/20230626_143830\"\n", 90 | "prefix = \"/home/thomas/Documents/StanfordGrad/RadioGlaciology/sdr/data/20230713_115750\"\n", 91 | "\n", 92 | "#zero_sample_idx = 159\n", 93 | "zero_sample_idx = 36\n", 94 | "\n", 95 | "#zarr_path = pr.save_radar_data_to_zarr(prefix, zarr_base_location=\"/home/thomas/Documents/StanfordGrad/RadioGlaciology/test_tmp_zarr_cache/\", skip_if_cached=False)\n", 96 | "zarr_path = pr.save_radar_data_to_zarr(prefix, zarr_base_location=\"/Users/abroome/Documents/SDR/uhd_radar/test_tmp_zarr_cache/\", skip_if_cached=False)\n", 97 | "\n", 98 | "zarr_path" 99 | ] 100 | }, 101 | { 102 | "cell_type": "code", 103 | "execution_count": null, 104 | "metadata": {}, 105 | "outputs": [], 106 | "source": [ 107 | "raw = xr.open_zarr(zarr_path)\n", 108 | "\n", 109 | "#raw # Uncomment this to explore the structure of the resulting dataset" 110 | ] 111 | }, 112 | { 113 | "cell_type": "markdown", 114 | "metadata": {}, 115 | "source": [ 116 | "If you're new to xarray, it's probably worth a read through some of their intro docs: https://docs.xarray.dev/en/stable/getting-started-guide/quick-overview.html\n", 117 | "\n", 118 | "The dataset has a single \"data variable\" called `radar_data` (and I think we should keep it this way).\n", 119 | "\n", 120 | "In addition to the data variable, there are dimensions, coordinates, and attributes.\n", 121 | "\n", 122 | "Dimensions are just names for the indices into the data variables. You can see them above under \"indexes\". For us, they are `pulse_idx` and `sample_idx`.\n", 123 | "\n", 124 | "Coordinates are variables associated to one or more dimensions that are sort of like metadata. In our case, we have `pulse_idx`, `slow_time`, `sample_idx`, and `fast_time`. Coordinates that have the same name as a dimension are considered \"dimension coordinates\" and can be directly used for indexing. You can only have one dimension coordinate per dimension, but you can easily swap which coordinate is the dimension coordinate with `swap_dims()` (see, for example, the second plotting example).\n", 125 | "\n", 126 | "More about this here: https://docs.xarray.dev/en/stable/user-guide/data-structures.html#coordinates\n", 127 | "\n", 128 | "Attributes serves as a dictionary of random other stuff you can package with your dataset. I've put the config and log output there. For example, you can access anything from the associated config YAML like this:\n", 129 | "\n", 130 | "`raw.attrs['config']['GENERATE']['sample_rate']`" 131 | ] 132 | }, 133 | { 134 | "cell_type": "code", 135 | "execution_count": null, 136 | "metadata": {}, 137 | "outputs": [], 138 | "source": [ 139 | "stacked = pr.remove_errors(raw)\n", 140 | "stacked = pr.stack(stacked,10)" 141 | ] 142 | }, 143 | { 144 | "cell_type": "code", 145 | "execution_count": null, 146 | "metadata": {}, 147 | "outputs": [], 148 | "source": [ 149 | "config_blackman_window = copy.deepcopy(stacked.config)\n", 150 | "config_blackman_window['GENERATE']['window'] = 'rectangular'\n", 151 | "\n", 152 | "chirp_ts, chirp = generate_chirp(config_blackman_window)\n", 153 | "compressed = pr.pulse_compress(stacked, chirp,\n", 154 | " fs=stacked.config['GENERATE']['sample_rate'],\n", 155 | " zero_sample_idx=zero_sample_idx,\n", 156 | " signal_speed=scipy.constants.c * (2/3))" 157 | ] 158 | }, 159 | { 160 | "cell_type": "code", 161 | "execution_count": null, 162 | "metadata": {}, 163 | "outputs": [], 164 | "source": [ 165 | "print(compressed.reflection_distance[0])\n", 166 | "\n", 167 | "print(compressed.reflection_distance[-1])\n", 168 | "print(compressed.sel(travel_time=12e-6, method='nearest'))" 169 | ] 170 | }, 171 | { 172 | "cell_type": "markdown", 173 | "metadata": {}, 174 | "source": [ 175 | "Everything has run quickly up to now because (apart from the data translation to zarr) no actual data has been processed.\n", 176 | "\n", 177 | "\n", 178 | "When you actually request data (by calling `.compute()` on it), the processing actually begins. Plotting triggers this process." 179 | ] 180 | }, 181 | { 182 | "cell_type": "code", 183 | "execution_count": null, 184 | "metadata": {}, 185 | "outputs": [], 186 | "source": [ 187 | "# This roughly matches the old plot_radargram function I was using and uses matplotlib\n", 188 | "plot_dask.plot_radargram(compressed, sig_speed=scipy.constants.c * (2/3), figsize=(20,6), vmin=-90, vmax=-40, ylims=(200, -10))\n", 189 | "plt.show()" 190 | ] 191 | }, 192 | { 193 | "cell_type": "code", 194 | "execution_count": null, 195 | "metadata": {}, 196 | "outputs": [], 197 | "source": [ 198 | "# Here's an example of a parallelizable implementation of log compression\n", 199 | "return_power = xr.apply_ufunc(\n", 200 | " lambda x: 20*np.log10(np.abs(x)),\n", 201 | " compressed,\n", 202 | " dask=\"parallelized\"\n", 203 | ")" 204 | ] 205 | }, 206 | { 207 | "cell_type": "code", 208 | "execution_count": null, 209 | "metadata": {}, 210 | "outputs": [], 211 | "source": [ 212 | "# If you expect to repeatedly need the same data, you can explicitly tell dask to keep it around in memory.\n", 213 | "#return_power.persist()" 214 | ] 215 | }, 216 | { 217 | "cell_type": "code", 218 | "execution_count": null, 219 | "metadata": {}, 220 | "outputs": [], 221 | "source": [ 222 | "# Example of plotting using hvplot -- this is supposed to be more efficient at directly plotting giant datasets\n", 223 | "# I haven't really dug into this much yet, but there's documentation here: https://holoviews.org/user_guide/Large_Data.html\n", 224 | "# In the backend, this uses bokeh, so the plots are also interactive\n", 225 | "return_power.swap_dims({'pulse_idx': 'slow_time', 'travel_time': 'reflection_distance'}).hvplot.quadmesh(x='slow_time', ylim=(100,-50), clim=(-90,-40), cmap='inferno')" 226 | ] 227 | }, 228 | { 229 | "cell_type": "markdown", 230 | "metadata": {}, 231 | "source": [ 232 | "## Spectrum visualization" 233 | ] 234 | }, 235 | { 236 | "cell_type": "code", 237 | "execution_count": null, 238 | "metadata": {}, 239 | "outputs": [], 240 | "source": [ 241 | "inpt = pr.remove_errors(raw)\n", 242 | "inpt[\"radar_data\"].shape" 243 | ] 244 | }, 245 | { 246 | "cell_type": "code", 247 | "execution_count": null, 248 | "metadata": {}, 249 | "outputs": [], 250 | "source": [ 251 | "n = 200\n", 252 | "pulse = pr.stack(inpt, n)[{'pulse_idx':0}][\"radar_data\"].to_numpy()\n", 253 | "\n", 254 | "f, t, S = scipy.signal.spectrogram(\n", 255 | " pulse,\n", 256 | " fs=raw.attrs[\"config\"][\"GENERATE\"][\"sample_rate\"],\n", 257 | " window='flattop',\n", 258 | " nperseg=100,\n", 259 | " scaling='density', mode='psd',\n", 260 | " return_onesided=False\n", 261 | ")\n", 262 | "\n", 263 | "fig, ax = plt.subplots(facecolor='white', figsize=(10,6))\n", 264 | "freq_mhz = (np.fft.fftshift(f) + inpt.attrs['config']['RF0']['freq']) / 1e6\n", 265 | "pcm = ax.pcolormesh(t, freq_mhz, 20*np.log10(np.abs(np.fft.fftshift(S, axes=0))), shading='nearest', vmin=-420, vmax=-200)\n", 266 | "clb = fig.colorbar(pcm, ax=ax)\n", 267 | "clb.set_label('Power [dB]')\n", 268 | "ax.set_xlabel('Time [s]')\n", 269 | "ax.set_ylabel('Frequency [MHz]')\n", 270 | "ax.set_title(f\"Spectrogram of received data with n_stack={n}\")" 271 | ] 272 | }, 273 | { 274 | "cell_type": "code", 275 | "execution_count": null, 276 | "metadata": {}, 277 | "outputs": [], 278 | "source": [] 279 | } 280 | ], 281 | "metadata": { 282 | "kernelspec": { 283 | "display_name": "sprinkles", 284 | "language": "python", 285 | "name": "sprinkles" 286 | }, 287 | "language_info": { 288 | "codemirror_mode": { 289 | "name": "ipython", 290 | "version": 3 291 | }, 292 | "file_extension": ".py", 293 | "mimetype": "text/x-python", 294 | "name": "python", 295 | "nbconvert_exporter": "python", 296 | "pygments_lexer": "ipython3", 297 | "version": "3.9.7" 298 | }, 299 | "vscode": { 300 | "interpreter": { 301 | "hash": "bb81f79795c75689c11bb9ecc505fc7b83ca5f9665fb7ad4bb0ce31dc1de5ece" 302 | } 303 | } 304 | }, 305 | "nbformat": 4, 306 | "nbformat_minor": 4 307 | } 308 | -------------------------------------------------------------------------------- /sdr/rf_settings.cpp: -------------------------------------------------------------------------------- 1 | // Created 10/22/2021 2 | 3 | #include 4 | #include 5 | #include "yaml-cpp/yaml.h" 6 | #include "rf_settings.hpp" 7 | 8 | using namespace std; 9 | using namespace uhd; 10 | 11 | /** 12 | * Set USRP RF parameters for a single channel of operation (one 13 | * set of tx/rx ports, single daughterboard). 14 | * 15 | * Inputs: usrp - sptr to a USRP device 16 | * rf0 - YAML node describing the RF paramters to be set up, 17 | * specific parameters are set in the config file 18 | * Outputs: returns true if all RF parameters were successfully set, 19 | * otherwise returns false 20 | */ 21 | bool set_rf_params_single(usrp::multi_usrp::sptr usrp, YAML::Node rf0, 22 | vector rx_channels, vector tx_channels) 23 | { 24 | // get first block of rf parameters (for channel 0) 25 | double rx_rate = rf0["rx_rate"].as(); 26 | double tx_rate = rf0["tx_rate"].as(); 27 | double fc = rf0["freq"].as(); 28 | double lo_offset = rf0["lo_offset"].as(0.0); 29 | double rx_gain = rf0["rx_gain"].as(); 30 | double tx_gain = rf0["tx_gain"].as(); 31 | double bw = rf0["bw"].as(); 32 | string tx_ant = rf0["tx_ant"].as(); 33 | string rx_ant = rf0["rx_ant"].as(); 34 | bool transmit = rf0["transmit"].as(true); 35 | string tuning_args = rf0["tuning_args"].as(); 36 | 37 | if (!(rx_channels.size() == tx_channels.size())) { 38 | throw std::runtime_error("Different TX and RX channel list lengths are not currently supported."); 39 | } 40 | size_t tx_channel = tx_channels[0]; 41 | size_t rx_channel = rx_channels[0]; 42 | 43 | // set the sample rates 44 | usrp->set_rx_rate(rx_rate, rx_channel); 45 | if (transmit) { 46 | usrp->set_tx_rate(tx_rate, tx_channel); 47 | } 48 | 49 | // Set command time to current time + 0.1 seconds 50 | usrp->clear_command_time(); 51 | usrp->set_command_time(usrp->get_time_now() + time_spec_t(0.1)); 52 | 53 | // Set the center frequency and LO offset. 54 | 55 | tune_request_t tune_request_tx(fc, lo_offset); 56 | tune_request_t tune_request_rx(fc, lo_offset); 57 | tune_request_tx.args = device_addr_t(tuning_args); 58 | tune_request_rx.args = device_addr_t(tuning_args); 59 | /*tune_request_tx.dsp_freq_policy = tune_request_t::policy_t::POLICY_MANUAL; 60 | tune_request_rx.dsp_freq_policy = tune_request_t::policy_t::POLICY_MANUAL; 61 | tune_request_tx.rf_freq_policy = tune_request_t::policy_t::POLICY_MANUAL; 62 | tune_request_rx.rf_freq_policy = tune_request_t::policy_t::POLICY_MANUAL; 63 | tune_request_tx.target_freq = fc; 64 | tune_request_rx.target_freq = fc; 65 | tune_request_tx.rf_freq = 452.5e6; 66 | tune_request_rx.rf_freq = 452.5e6; 67 | tune_request_tx.dsp_freq = -12.5e6; 68 | tune_request_rx.dsp_freq = 12.5e6;*/ 69 | tune_result_t tune_result_rx = usrp->set_rx_freq(tune_request_rx, rx_channel); 70 | cout << "RX:\n" << tune_result_rx.to_pp_string() << endl; 71 | if (transmit) { 72 | tune_result_t tune_result_tx = usrp->set_tx_freq(tune_request_tx, tx_channel); 73 | cout << "TX:\n" << tune_result_tx.to_pp_string() << endl; 74 | } 75 | 76 | // sleep 100ms (~10ms after retune occurs) to allow LO to lock 77 | this_thread::sleep_for(chrono::milliseconds(110)); 78 | usrp->clear_command_time(); 79 | 80 | // set the rf gain 81 | usrp->set_rx_gain(rx_gain, rx_channel); 82 | if (transmit) { 83 | usrp->set_tx_gain(tx_gain, tx_channel); 84 | } 85 | 86 | // set the IF filter bandwidth 87 | if (bw != 0) 88 | { 89 | usrp->set_rx_bandwidth(bw, rx_channel); 90 | } 91 | 92 | // set the antenna 93 | usrp->set_rx_antenna(rx_ant, rx_channel); 94 | if (transmit) { 95 | usrp->set_tx_antenna(tx_ant, tx_channel); 96 | } 97 | 98 | // sanity check actual values against requested values 99 | bool mismatch = rf_error_check(usrp, rf0, tx_channel, rx_channel); 100 | 101 | return !mismatch; 102 | } 103 | 104 | /** 105 | * Set USRP RF parameters for multi channel operation. Right now this only works 106 | * for a two channel set up (will not work with multiple USRP devices configured 107 | * in one multi-usrp set up). See the following links for more information: 108 | * https://files.ettus.com/manual/page_configuration.html 109 | * https://files.ettus.com/manual/page_multiple.html 110 | * 111 | * Inputs: usrp - sptr to a USRP device 112 | * rf0 - YAML node describing the RF parameters to be set up for channel 0, 113 | * specific parameters are set in the config file 114 | * rf1 - YAML node describing the RF parameters to be set up for channel 1, 115 | * specific parameters are set in the config file 116 | * Outputs: returns true if all RF parameters were successfully set, 117 | * otherwise returns false 118 | */ 119 | bool set_rf_params_multi(usrp::multi_usrp::sptr usrp, YAML::Node rf0, YAML::Node rf1, 120 | vector rx_channels, vector tx_channels) 121 | { 122 | // get first block of rf parameters (for channel 0) 123 | double rx_rate0 = rf0["rx_rate"].as(); 124 | double tx_rate0 = rf0["tx_rate"].as(); 125 | double fc0 = rf0["freq"].as(); 126 | double rx_gain0 = rf0["rx_gain"].as(); 127 | double tx_gain0 = rf0["tx_gain"].as(); 128 | double bw0 = rf0["bw"].as(); 129 | string tx_ant0 = rf0["tx_ant"].as(); 130 | string rx_ant0 = rf0["rx_ant"].as(); 131 | 132 | // get first block of rf parameters (for channel 1) 133 | double rx_rate1 = rf1["rx_rate"].as(); 134 | double tx_rate1 = rf1["tx_rate"].as(); 135 | double fc1 = rf1["freq"].as(); 136 | double rx_gain1 = rf1["rx_gain"].as(); 137 | double tx_gain1 = rf1["tx_gain"].as(); 138 | double bw1 = rf1["bw"].as(); 139 | string tx_ant1 = rf1["tx_ant"].as(); 140 | string rx_ant1 = rf1["rx_ant"].as(); 141 | 142 | // check if tx and rx channel lists are the same 143 | if (!(rx_channels.size() == tx_channels.size())) { 144 | throw std::runtime_error("Different TX and RX channel list lengths are not currently supported."); 145 | } 146 | 147 | // set the sample rates 148 | cout << "Setting the sample rates in set_rf_params_multi()" << endl; 149 | usrp->set_tx_rate(tx_rate0, tx_channels[0]); 150 | usrp->set_rx_rate(rx_rate0, rx_channels[0]); 151 | usrp->set_tx_rate(tx_rate1, tx_channels[1]); 152 | usrp->set_rx_rate(rx_rate1, rx_channels[1]); 153 | 154 | // set center frequency at same time for both daughterboards 155 | usrp->clear_command_time(); 156 | usrp->set_command_time(usrp->get_time_now() + time_spec_t(0.1)); 157 | 158 | tune_request_t tune_request0(fc0); 159 | tune_request_t tune_request1(fc1); 160 | usrp->set_tx_freq(tune_request0, tx_channels[0]); 161 | usrp->set_rx_freq(tune_request0, rx_channels[0]); 162 | usrp->set_tx_freq(tune_request1, tx_channels[1]); 163 | usrp->set_rx_freq(tune_request1, rx_channels[1]); 164 | 165 | // sleep 100ms (~10ms after retune occurs) to allow LO to lock 166 | this_thread::sleep_for(chrono::milliseconds(110)); 167 | usrp->clear_command_time(); 168 | 169 | // set the rf gain 170 | usrp->set_rx_gain(rx_gain0, rx_channels[0]); 171 | usrp->set_tx_gain(tx_gain0, tx_channels[0]); 172 | usrp->set_rx_gain(rx_gain1, rx_channels[1]); 173 | usrp->set_tx_gain(tx_gain1, tx_channels[1]); 174 | 175 | // set the IF filter bandwidth 176 | if (bw0 != 0) { 177 | usrp->set_rx_bandwidth(bw0, rx_channels[0]); 178 | } 179 | if (bw1 != 0) { 180 | usrp->set_rx_bandwidth(bw1, rx_channels[1]); 181 | } 182 | 183 | // set the antenna 184 | usrp->set_rx_antenna(rx_ant0, rx_channels[0]); 185 | usrp->set_tx_antenna(tx_ant0, tx_channels[0]); 186 | usrp->set_rx_antenna(rx_ant1, rx_channels[1]); 187 | usrp->set_tx_antenna(tx_ant1, tx_channels[1]); 188 | 189 | // sanity check actual values against requested values 190 | bool mismatch = rf_error_check(usrp, rf0, tx_channels[0], rx_channels[0]); 191 | mismatch = mismatch || rf_error_check(usrp, rf1, tx_channels[1], rx_channels[1]); 192 | 193 | return !mismatch; 194 | } 195 | 196 | /** 197 | * Check whether requested RF parameters are equal to the reported values. 198 | * 199 | * Inputs: usrp - sptr to a USRP device 200 | * rf - YAML node describing the requested RF parameters, 201 | * specific parameters are set in the config file 202 | * channel - which channel these parameters correspond to 203 | * Outputs: returns true if there is a mismatch between requested and 204 | * actual RF parameters. Returns false if everything was successfully set. 205 | */ 206 | bool rf_error_check(usrp::multi_usrp::sptr usrp, YAML::Node rf, size_t tx_channel, 207 | size_t rx_channel) { 208 | bool mismatch = false; 209 | 210 | // get first block of requested rf parameters (for channel 0) 211 | double rx_rate = rf["rx_rate"].as(); 212 | double tx_rate = rf["tx_rate"].as(); 213 | double fc = rf["freq"].as(); 214 | double rx_gain = rf["rx_gain"].as(); 215 | double tx_gain = rf["tx_gain"].as(); 216 | double bw = rf["bw"].as(); 217 | string tx_ant = rf["tx_ant"].as(); 218 | string rx_ant = rf["rx_ant"].as(); 219 | bool transmit = rf["transmit"].as(true); 220 | 221 | 222 | if (usrp->get_rx_rate(rx_channel) != rx_rate) { 223 | cout << boost::format("[WARNING]: Requested RX rate (CH%d): %f Hz. Actual RX rate: %f Hz.") 224 | % rx_channel % (rx_rate) % (usrp->get_rx_rate(rx_channel)) << endl; 225 | mismatch = true; 226 | } 227 | 228 | if (transmit && (usrp->get_tx_rate(tx_channel) != tx_rate)) { 229 | cout << boost::format("[WARNING]: Requested TX rate (CH%d): %f Hz. Actual TX rate: %f Hz.") 230 | % tx_channel % (tx_rate) % (usrp->get_tx_rate(tx_channel)) << endl; 231 | mismatch = true; 232 | } 233 | 234 | if (usrp->get_rx_freq(rx_channel) != fc) { 235 | cout << boost::format("[WARNING]: Requested RX center freq (CH%d): %f Hz. Actual RX center freq: %f Hz.") 236 | % rx_channel % (fc) % (usrp->get_rx_freq(rx_channel)) << endl; 237 | mismatch = true; 238 | } 239 | 240 | if (transmit && (usrp->get_tx_freq(tx_channel) != fc)) { 241 | cout << boost::format("[WARNING]: Requested TX center freq (CH%d): %f Hz. Actual TX center freq: %f Hz.") 242 | % tx_channel % (fc) % (usrp->get_tx_freq(tx_channel)) << endl; 243 | mismatch = true; 244 | } 245 | 246 | if (transmit) { 247 | double tx_rx_center_freq_diff = usrp->get_tx_freq(tx_channel) - usrp->get_rx_freq(rx_channel); 248 | if (tx_rx_center_freq_diff != 0){ 249 | cout << boost::format("[WARNING] [**** FOR REAL THIS IS IMPORTANT ****] TX and RX center frequencies are not the same. Difference is %f Hz.") 250 | % tx_rx_center_freq_diff << endl; 251 | } 252 | double tx_rx_rate_diff = usrp->get_tx_rate(tx_channel) - usrp->get_rx_rate(rx_channel); 253 | if (tx_rx_rate_diff != 0){ 254 | cout << boost::format("[WARNING] [**** FOR REAL THIS IS IMPORTANT ****] TX and RX sample rates are not the same. Difference is %f Hz.") 255 | % tx_rx_rate_diff << endl; 256 | } 257 | } 258 | 259 | 260 | if (usrp->get_rx_gain(rx_channel) != rx_gain) { 261 | cout << boost::format("[WARNING]: Requested RX gain (CH%d): %f dB. Actual RX gain: %f dB.") 262 | % rx_channel % rx_gain % usrp->get_rx_gain(rx_channel) << endl; 263 | mismatch = true; 264 | } 265 | 266 | if (transmit && (usrp->get_tx_gain(tx_channel) != tx_gain)) { 267 | cout << boost::format("[WARNING]: Requested TX gain (CH%d): %f dB. Actual TX gain: %f dB.") 268 | % tx_channel % tx_gain % usrp->get_tx_gain(tx_channel) << endl; 269 | mismatch = true; 270 | } 271 | 272 | if ((usrp->get_rx_bandwidth(rx_channel) != bw) && (bw != 0)) { 273 | cout << boost::format("[WARNING]: Requested analog bandwidth (CH%d): %f MHz. Actual analog bandwidth: %f MHz.") 274 | % rx_channel % bw % (usrp->get_rx_bandwidth(rx_channel) / (1e6)) << endl; 275 | mismatch = true; 276 | } 277 | 278 | if (transmit && (usrp->get_tx_antenna(tx_channel) != tx_ant)) { 279 | cout << boost::format("[WARNING]: Requested TX ant (CH%d): %s. Actual TX ant: %s.") 280 | % tx_channel % tx_ant % usrp->get_tx_antenna(tx_channel) << endl; 281 | mismatch = true; 282 | } 283 | 284 | if (usrp->get_rx_antenna(rx_channel) != rx_ant) { 285 | cout << boost::format("[WARNING]: Requested RX ant (CH%d): %s. Actual RX ant: %s.") 286 | % rx_channel % rx_ant % usrp->get_rx_antenna(rx_channel) << endl; 287 | mismatch = true; 288 | } 289 | 290 | return mismatch; 291 | } -------------------------------------------------------------------------------- /postprocessing/notebooks/archived_experiments/20220329 Filtered impacts on cross correlation.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "id": "e696f3a8", 7 | "metadata": {}, 8 | "outputs": [], 9 | "source": [ 10 | "import sys\n", 11 | "import argparse\n", 12 | "import numpy as np\n", 13 | "import scipy.signal as sp\n", 14 | "import scipy.interpolate\n", 15 | "import processing as pr\n", 16 | "import matplotlib.pyplot as plt\n", 17 | "import pandas as pd\n", 18 | "from ruamel.yaml import YAML as ym\n", 19 | "import datetime\n", 20 | "import copy\n", 21 | "\n", 22 | "%matplotlib widget\n", 23 | "import mplcursors\n", 24 | "\n", 25 | "sys.path.append(\"../preprocessing\")\n", 26 | "from generate_chirp import generate_chirp\n", 27 | "\n", 28 | "# Widgets are only needed if you want to use the interactive plot at the end\n", 29 | "# Installation instructions: https://ipywidgets.readthedocs.io/en/latest/user_install.html\n", 30 | "import ipywidgets as widgets\n", 31 | "\n", 32 | "import scipy.fft" 33 | ] 34 | }, 35 | { 36 | "cell_type": "code", 37 | "execution_count": null, 38 | "id": "7d28ca74", 39 | "metadata": {}, 40 | "outputs": [], 41 | "source": [ 42 | "# Just an example for pulse measurements\n", 43 | "prefix = \"../../drone/radar_data/20220321-tellbreen/20220321_052716\"\n", 44 | "\n", 45 | "zero_sample_idx = 159\n", 46 | "\n", 47 | "yaml_file = prefix + \"_config.yaml\"\n", 48 | "bin_file = prefix + \"_rx_samps.bin\"\n", 49 | "\n", 50 | "# Initialize Constants\n", 51 | "yaml = ym()\n", 52 | "with open(yaml_file) as stream:\n", 53 | " config = yaml.load(stream)\n", 54 | " sample_rate = config[\"PLOT\"][\"sample_rate\"] # Hertz\n", 55 | " sig_speed = config[\"PLOT\"][\"sig_speed\"] / np.sqrt(3.17)\n", 56 | "\n", 57 | " #expected_n_rxs = int(config['CHIRP']['num_pulses'] / config['CHIRP']['num_presums'])\n", 58 | " \n", 59 | " rx_len_samples = int(config['CHIRP']['rx_duration'] * config['GENERATE']['sample_rate'])\n", 60 | " \n", 61 | "rx_samps = bin_file\n", 62 | "\n", 63 | "config_modified = copy.deepcopy(config)\n", 64 | "config_modified['GENERATE']['window'] = 'rectangular'\n", 65 | "config_modified['GENERATE']['chirp_length'] = 300e-6" 66 | ] 67 | }, 68 | { 69 | "cell_type": "code", 70 | "execution_count": null, 71 | "id": "e653131b", 72 | "metadata": {}, 73 | "outputs": [], 74 | "source": [ 75 | "config_modified['GENERATE']['chirp_bandwidth']" 76 | ] 77 | }, 78 | { 79 | "cell_type": "code", 80 | "execution_count": null, 81 | "id": "ae6bb4cf", 82 | "metadata": {}, 83 | "outputs": [], 84 | "source": [ 85 | "config_modified['RF0']['freq'] + (config_modified['GENERATE']['chirp_bandwidth']/2)" 86 | ] 87 | }, 88 | { 89 | "cell_type": "code", 90 | "execution_count": null, 91 | "id": "54c67602", 92 | "metadata": {}, 93 | "outputs": [], 94 | "source": [ 95 | "# Read and plot RX/TX\n", 96 | "# This cell loads all of the data - it can take a while with a large file. You don't need to re-run this cell if you only change n_stack\n", 97 | "_, tx_sig = generate_chirp(config_modified) # TODO\n", 98 | "start_freq = config_modified['RF0']['freq'] - (config_modified['GENERATE']['chirp_bandwidth']/2)\n", 99 | "end_freq = config_modified['RF0']['freq'] + (config_modified['GENERATE']['chirp_bandwidth']/2)\n", 100 | "\n", 101 | "pr.plotChirpVsTime(tx_sig, 'Transmitted Chirp', sample_rate)\n", 102 | "print(f\"len(tx_sig): {len(tx_sig)}\")\n", 103 | "\n", 104 | "t0 = datetime.datetime.now()\n", 105 | "rx_sig = pr.extractSig(rx_samps)\n", 106 | "print(datetime.datetime.now() - t0)" 107 | ] 108 | }, 109 | { 110 | "cell_type": "code", 111 | "execution_count": null, 112 | "id": "9e24690a", 113 | "metadata": {}, 114 | "outputs": [], 115 | "source": [ 116 | "\"\"\" Load S11 data from FieldFox CSV files \"\"\"\n", 117 | "def load_fieldfox_s1p_data(filename):\n", 118 | " return pd.read_csv(filename, header=1, names=[\"frequency\", \"magnitude\", \"angle\"], comment=\"!\", sep='\\t')\n", 119 | "\n", 120 | "#s1p = load_fieldfox_s1p_data(\"/home/thomas/Documents/StanfordGrad/RadioGlaciology/antennas/20220303-rev2p5-antenna-checkout/03.Mar.2022.13.07_sn10_lab.s1p\")\n", 121 | "s1p = load_fieldfox_s1p_data(\"/home/thomas/Documents/StanfordGrad/RadioGlaciology/antennas/20220305-aeromao-installation-inside/05.Mar.2022.13.42_SN13_WING.s1p\")\n", 122 | "\n", 123 | "s11_derived_transmit_weights = 1 - 10**(s1p['magnitude'] / 20) # approximate portion of signal transmitted\n", 124 | "transmit_weight_interpolator = scipy.interpolate.interp1d(s1p['frequency'], s11_derived_transmit_weights, kind='linear')\n", 125 | "transmit_phase_interpolator = scipy.interpolate.interp1d(s1p['frequency'], s1p['angle'] * (np.pi/180), kind='linear')\n", 126 | "\n", 127 | "inst_chirp_freq = np.linspace(start_freq, end_freq, len(tx_sig))\n", 128 | "tx_sig_antenna_filtered = tx_sig * (transmit_weight_interpolator(inst_chirp_freq)**2) * np.exp(1j * transmit_phase_interpolator(inst_chirp_freq))\n", 129 | "\n", 130 | "fig, ax = pr.plotChirpVsTime(tx_sig_antenna_filtered, 'Transmitted Chirp w/ Simulated Antenna', sample_rate)\n", 131 | "fig.show()\n", 132 | "\n", 133 | "#pd.read_csv(\"/home/thomas/Documents/StanfordGrad/RadioGlaciology/antennas/20220303-rev2p5-antenna-checkout/03.Mar.2022.13.07_sn10_lab.s1p\", header=1, names=[\"frequency\", \"magnitude\", \"angle\"], comment=\"!\", sep='\\t')" 134 | ] 135 | }, 136 | { 137 | "cell_type": "code", 138 | "execution_count": null, 139 | "id": "49c9efe3", 140 | "metadata": {}, 141 | "outputs": [], 142 | "source": [] 143 | }, 144 | { 145 | "cell_type": "code", 146 | "execution_count": null, 147 | "id": "56103321", 148 | "metadata": {}, 149 | "outputs": [], 150 | "source": [ 151 | "def create_single_synthetic_rx_signal(tx_sig, rx_len_samples, delays={zero_sample_idx: -4}, phase_noise_std=0.0, noise_power_db=0.0):\n", 152 | " rx_sig = np.zeros(rx_len_samples, dtype=np.complex64)\n", 153 | " \n", 154 | " # Delayed copies\n", 155 | " for delay_samples, magnitude_db in delays.items():\n", 156 | " magnitude = 10 ** (magnitude_db / 20)\n", 157 | " len_delayed_chirp = np.minimum(len(tx_sig), len(rx_sig)-delay_samples)\n", 158 | " rx_sig[delay_samples:delay_samples+len_delayed_chirp] += magnitude * tx_sig[:len_delayed_chirp]\n", 159 | " \n", 160 | " # Phase noise\n", 161 | " rx_sig *= np.exp(1j*phase_noise_std*np.random.randn())\n", 162 | " \n", 163 | " # White noise\n", 164 | " noise_amplitude = 10**(noise_power_db/20)\n", 165 | " noise_mag = noise_amplitude * np.random.randn(len(rx_sig))\n", 166 | " noise_phase = np.random.uniform(size=len(rx_sig), low=0,high=2*np.pi)\n", 167 | " noise = noise_mag * np.exp(1j*noise_phase)\n", 168 | " rx_sig += noise\n", 169 | " \n", 170 | " return rx_sig\n", 171 | "\n", 172 | "rx_sig = create_single_synthetic_rx_signal(tx_sig, int(len(tx_sig)*1.5),\n", 173 | " delays={zero_sample_idx: -3, zero_sample_idx+40: -30},\n", 174 | " phase_noise_std=0.0007527001552626,\n", 175 | " noise_power_db=-3\n", 176 | " )\n", 177 | "\n", 178 | "fig, axs = pr.plotChirpVsTime(rx_sig, 'Recieved Chirp', sample_rate)" 179 | ] 180 | }, 181 | { 182 | "cell_type": "code", 183 | "execution_count": null, 184 | "id": "9e81a650", 185 | "metadata": {}, 186 | "outputs": [], 187 | "source": [ 188 | "fig, ax = plt.subplots(figsize=(10,5), facecolor='white')\n", 189 | "\n", 190 | "\n", 191 | "for n_stack in [1000]:\n", 192 | "\n", 193 | " rx_sigs = [create_single_synthetic_rx_signal(tx_sig_antenna_filtered, int(len(tx_sig)*1.5),\n", 194 | " delays={zero_sample_idx: -3, zero_sample_idx+40: -30},\n", 195 | " phase_noise_std=0,\n", 196 | " noise_power_db=-10\n", 197 | " ) for n in range(n_stack)]\n", 198 | " rx_sigs = np.array(rx_sigs)\n", 199 | " print(np.shape(rx_sigs))\n", 200 | " rx_sig = np.mean(rx_sigs, axis=0)\n", 201 | "\n", 202 | " xcorr_res_tmp = sp.correlate(rx_sig, tx_sig, mode='valid', method='direct')\n", 203 | "\n", 204 | " distance_to_reflector = np.linspace(0, np.shape(xcorr_res_tmp)[0]/sample_rate, np.shape(xcorr_res_tmp)[0]) * sig_speed / 2\n", 205 | " distance_to_reflector = distance_to_reflector - distance_to_reflector[zero_sample_idx]\n", 206 | "\n", 207 | " line = ax.plot(distance_to_reflector, 20*np.log10(np.abs(xcorr_res_tmp)), label=f\"n_stack = {n_stack}\")\n", 208 | " mplcursors.cursor(line)\n", 209 | "\n", 210 | "#ax.set_ylim(-20,40)\n", 211 | "ax.set_xlim(-100, 200)\n", 212 | "ax.set_xlabel('Distance [m]')\n", 213 | "ax.set_ylabel('Power [dB]')\n", 214 | "ax.grid()\n", 215 | "fig.show()" 216 | ] 217 | }, 218 | { 219 | "cell_type": "code", 220 | "execution_count": null, 221 | "id": "93e3baf4", 222 | "metadata": {}, 223 | "outputs": [], 224 | "source": [ 225 | "def butter_lowpass(cutoff, fs, order=5):\n", 226 | " return scipy.signal.butter(order, cutoff, fs=fs, btype='low', analog=False)\n", 227 | "\n", 228 | "def butter_lowpass_filter(data, cutoff, fs, order=5):\n", 229 | " b, a = butter_lowpass(cutoff, fs, order=order)\n", 230 | " y = scipy.signal.lfilter(b, a, data)\n", 231 | " return y\n", 232 | "\n", 233 | "# fig, ax = plt.subplots()\n", 234 | "# ax.plot(chirp_scaling)\n", 235 | "# fig.show()" 236 | ] 237 | }, 238 | { 239 | "cell_type": "code", 240 | "execution_count": null, 241 | "id": "d3c3a79d", 242 | "metadata": {}, 243 | "outputs": [], 244 | "source": [ 245 | "# def butter_lowpass(cutoff, fs, order=5):\n", 246 | "# return scipy.signal.butter(order, cutoff, fs=fs, btype='low', analog=False)\n", 247 | "\n", 248 | "# def butter_lowpass_filter(data, cutoff, fs, order=5):\n", 249 | "# b, a = butter_lowpass(cutoff, fs, order=order)\n", 250 | "# y = scipy.signal.lfilter(b, a, data)\n", 251 | "# return y\n", 252 | "\n", 253 | "# chirp_scaling = np.cumsum(np.random.randn(len(tx_sig)))\n", 254 | "# chirp_scaling = butter_lowpass_filter(chirp_scaling, 0.001, 1)\n", 255 | "# chirp_scaling = chirp_scaling - np.min(chirp_scaling)\n", 256 | "# chirp_scaling = chirp_scaling / np.max(chirp_scaling)\n", 257 | "\n", 258 | "tx_sig_autocorr = sp.correlate(tx_sig, tx_sig, mode='same', method='auto')\n", 259 | "\n", 260 | "distance_to_reflector_autocorr = np.linspace(0, len(tx_sig_autocorr)/sample_rate, len(tx_sig_autocorr)) * sig_speed / 2\n", 261 | "distance_to_reflector_autocorr = distance_to_reflector_autocorr - distance_to_reflector_autocorr[len(tx_sig_autocorr)//2]\n", 262 | "\n", 263 | "fig, ax = plt.subplots(figsize=(10,5))\n", 264 | "tx_sig_autocorr_db = 20*np.log10(np.abs(tx_sig_autocorr))\n", 265 | "ax.plot(distance_to_reflector_autocorr, tx_sig_autocorr_db - np.max(tx_sig_autocorr_db), label=\"TX sig autocorr without filtering\", c='black')\n", 266 | "\n", 267 | "for n in range(10):\n", 268 | " # Generate random scaling\n", 269 | " chirp_scaling = np.cumsum(np.random.randn(len(tx_sig)))\n", 270 | " chirp_scaling = butter_lowpass_filter(chirp_scaling, 0.01, 1)\n", 271 | " chirp_scaling = chirp_scaling - np.min(chirp_scaling)\n", 272 | " chirp_scaling = chirp_scaling / np.max(chirp_scaling)\n", 273 | " \n", 274 | " # Calculate correlation\n", 275 | " tx_sig_modified = sp.correlate(tx_sig, tx_sig * chirp_scaling, mode='same', method='auto')\n", 276 | " #tx_sig_modified = sp.correlate(tx_sig, tx_sig * chirp_scaling + 2*np.random.randn(len(tx_sig)), mode='same', method='auto')\n", 277 | " \n", 278 | " # Plot\n", 279 | " ax.plot(distance_to_reflector_autocorr, 20*np.log10(np.abs(tx_sig_modified)) - np.max(tx_sig_autocorr_db), alpha=0.2)\n", 280 | "\n", 281 | "ax.set_xlim(-150,150)\n", 282 | "#ax.set_xlim(len(tx_sig_autocorr)/2 - 200, len(tx_sig_autocorr)/2 + 200)\n", 283 | "ax.set_ylim(-100,0)\n", 284 | "ax.legend()" 285 | ] 286 | }, 287 | { 288 | "cell_type": "code", 289 | "execution_count": null, 290 | "id": "1c1bb334", 291 | "metadata": {}, 292 | "outputs": [], 293 | "source": [] 294 | }, 295 | { 296 | "cell_type": "code", 297 | "execution_count": null, 298 | "id": "745bfe00", 299 | "metadata": {}, 300 | "outputs": [], 301 | "source": [] 302 | }, 303 | { 304 | "cell_type": "code", 305 | "execution_count": null, 306 | "id": "b11887da", 307 | "metadata": {}, 308 | "outputs": [], 309 | "source": [] 310 | } 311 | ], 312 | "metadata": { 313 | "kernelspec": { 314 | "display_name": "Python [conda env:rg]", 315 | "language": "python", 316 | "name": "conda-env-rg-filprofile" 317 | }, 318 | "language_info": { 319 | "codemirror_mode": { 320 | "name": "ipython", 321 | "version": 3 322 | }, 323 | "file_extension": ".py", 324 | "mimetype": "text/x-python", 325 | "name": "python", 326 | "nbconvert_exporter": "python", 327 | "pygments_lexer": "ipython3", 328 | "version": "3.8.10" 329 | } 330 | }, 331 | "nbformat": 4, 332 | "nbformat_minor": 5 333 | } 334 | -------------------------------------------------------------------------------- /postprocessing/test_scripts/phase_noise_simulation/Simplified Mixer Phase Noise.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "%load_ext autoreload\n", 10 | "%autoreload 2" 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": null, 16 | "metadata": {}, 17 | "outputs": [], 18 | "source": [ 19 | "import os\n", 20 | "import copy\n", 21 | "import sys\n", 22 | "import xarray as xr\n", 23 | "import numpy as np\n", 24 | "import dask.array as da\n", 25 | "\n", 26 | "import matplotlib.pyplot as plt\n", 27 | "from matplotlib.ticker import EngFormatter\n", 28 | "import hvplot.xarray\n", 29 | "import scipy.constants\n", 30 | "\n", 31 | "sys.path.append(\"..\")\n", 32 | "import processing_dask as pr\n", 33 | "import plot_dask\n", 34 | "import processing as old_processing\n", 35 | "\n", 36 | "import colorednoise as cn\n", 37 | "\n", 38 | "#sys.path.append(\"../../preprocessing/\")\n", 39 | "#from generate_chirp import generate_chirp\n", 40 | "\n", 41 | "sys.path.append(\"..\")\n", 42 | "from processing import pulse_compress" 43 | ] 44 | }, 45 | { 46 | "cell_type": "code", 47 | "execution_count": null, 48 | "metadata": {}, 49 | "outputs": [], 50 | "source": [ 51 | "import matplotlib\n", 52 | "matplotlib.rcParams.update({'font.size': 12})" 53 | ] 54 | }, 55 | { 56 | "cell_type": "code", 57 | "execution_count": null, 58 | "metadata": {}, 59 | "outputs": [], 60 | "source": [ 61 | "def generate_chirp_with_jitter(config, t_jitter=0):\n", 62 | " \"\"\"\n", 63 | " Generate a chirp according to parameters in the config dictionary, typically\n", 64 | " loaded from a config YAML file.\n", 65 | "\n", 66 | " Returns a tuple (ts, chirp_complex), where ts is a numpy array of time\n", 67 | " samples, and chirp_complex is a numpy array of complex floating point values\n", 68 | " representing the chirp.\n", 69 | "\n", 70 | " If you're looking for a floating point valued chirp to use in convolution,\n", 71 | " this is probably the right function.\n", 72 | "\n", 73 | " This function does not convert the complex numpy array to the cpu format\n", 74 | " expected by the radar code. If you want to produce samples to feed the radar\n", 75 | " code, look at `generate_from_yaml_filename` (later in this file) instead.\n", 76 | " \"\"\"\n", 77 | " # Load parameters\n", 78 | " gen_params = config[\"GENERATE\"]\n", 79 | " chirp_type = gen_params[\"chirp_type\"]\n", 80 | " sample_rate = gen_params[\"sample_rate\"]\n", 81 | " chirp_bandwidth = gen_params[\"chirp_bandwidth\"]\n", 82 | " offset = gen_params.get(\"lo_offset_sw\", 0)\n", 83 | " window = gen_params[\"window\"]\n", 84 | " chirp_length = gen_params[\"chirp_length\"]\n", 85 | " pulse_length = gen_params.get(\"pulse_length\", chirp_length) # default to chirp_length is no pulse_length is specified\n", 86 | "\n", 87 | " # Build chirp\n", 88 | "\n", 89 | " end_freq = chirp_bandwidth / 2 # Chirp goes from -BW/2 to BW/2\n", 90 | " start_freq = -1 * end_freq\n", 91 | "\n", 92 | " start_freq += offset\n", 93 | " end_freq += offset\n", 94 | "\n", 95 | " ts = np.arange(0, chirp_length-(1/(2*sample_rate)), 1/(sample_rate)) + t_jitter\n", 96 | " ts_zp = np.arange(0, (pulse_length)-(1/(2*sample_rate)), 1/(sample_rate))\n", 97 | "\n", 98 | " if chirp_type == 'linear':\n", 99 | " ph = 2*np.pi*((start_freq)*ts + (end_freq - start_freq) * ts**2 / (2*chirp_length))\n", 100 | " elif chirp_type == 'hyperbolic':\n", 101 | " ph = 2*np.pi*(-1*start_freq*end_freq*chirp_length/(end_freq-start_freq))*np.log(1- (end_freq-start_freq)*ts/(end_freq*chirp_length))\n", 102 | " else:\n", 103 | " ph = 2*np.pi*(start_freq*ts + (end_freq - start_freq) * ts**2 / (2*chirp_length))\n", 104 | " printf(\"[ERROR] Unrecognized chirp type '{chirp_type}'\")\n", 105 | " return None, None\n", 106 | "\n", 107 | " chirp_complex = np.exp(1j*ph)\n", 108 | "\n", 109 | " if window == \"blackman\":\n", 110 | " chirp_complex = chirp_complex * np.blackman(chirp_complex.size)\n", 111 | " elif window == \"hamming\":\n", 112 | " chirp_complex = chirp_complex * np.hamming(chirp_complex.size)\n", 113 | " elif window == \"kaiser14\":\n", 114 | " chirp_complex = chirp_complex * np.kaiser(chirp_complex.size, 14.0)\n", 115 | " elif window == \"kaiser10\":\n", 116 | " chirp_complex = chirp_complex * np.kaiser(chirp_complex.size, 10.0)\n", 117 | " elif window == \"kaiser18\": \n", 118 | " chirp_complex = chirp_complex * np.kaiser(chirp_complex.size, 18.0)\n", 119 | " elif window != \"rectangular\":\n", 120 | " print(\"[ERROR] Unrecognized window function '{window}'\")\n", 121 | " return None, None\n", 122 | "\n", 123 | " chirp_complex = np.pad(chirp_complex, (int(np.floor(ts_zp.size - ts.size)/2),), 'constant')\n", 124 | "\n", 125 | " chirp_complex = chirp_complex\n", 126 | "\n", 127 | " return ts_zp, chirp_complex\n" 128 | ] 129 | }, 130 | { 131 | "cell_type": "code", 132 | "execution_count": null, 133 | "metadata": {}, 134 | "outputs": [], 135 | "source": [ 136 | "def plot_spectrum(ts, sig, sample_rate, one_sided=False, log_scale=False, nperseg=2**16, ax=None, label=\"\", alpha=1.0, time_delay=None):\n", 137 | " if ax is None:\n", 138 | " fig, ax = plt.subplots()\n", 139 | " fig.tight_layout()\n", 140 | " else:\n", 141 | " fig = None\n", 142 | "\n", 143 | " # Calculate power spectral density\n", 144 | " freq, spectrum = scipy.signal.welch(sig, fs=sample_rate, nperseg=nperseg, return_onesided=one_sided, detrend=False, window='rectangular', scaling='density')\n", 145 | " \n", 146 | " if time_delay is not None:\n", 147 | " ratio = 4 * (np.sin(np.pi * freq * time_delay))**2\n", 148 | " \n", 149 | " if not one_sided:\n", 150 | " freq = np.fft.fftshift(freq)\n", 151 | " spectrum = np.fft.fftshift(spectrum)\n", 152 | " \n", 153 | " ax.plot(freq, 10*np.log10(np.abs(spectrum)), label=label, alpha=alpha)\n", 154 | "\n", 155 | " if time_delay is not None:\n", 156 | " ax.plot(freq, 10*np.log10(np.abs(ratio*spectrum)), label=f\"Theoretical for delay {time_delay}\", alpha=alpha, linestyle='--')\n", 157 | "\n", 158 | " if log_scale:\n", 159 | " ax.set_xscale('log')\n", 160 | " ax.set_xlim([0.01, None])\n", 161 | " # if not one_sided:\n", 162 | " # raise ValueError(\"Can't use log scale with two-sided spectrum\")\n", 163 | " ax.set_xlabel('Frequency [Hz]')\n", 164 | " ax.set_ylabel('Power [dB]')\n", 165 | " ax.set_title('Frequency Domain')\n", 166 | " ax.grid(True)\n", 167 | "\n", 168 | " return fig, ax\n" 169 | ] 170 | }, 171 | { 172 | "cell_type": "code", 173 | "execution_count": null, 174 | "metadata": {}, 175 | "outputs": [], 176 | "source": [ 177 | "formatter_hz = EngFormatter(unit='Hz')" 178 | ] 179 | }, 180 | { 181 | "cell_type": "code", 182 | "execution_count": null, 183 | "metadata": {}, 184 | "outputs": [], 185 | "source": [ 186 | "fs = 5e6\n", 187 | "ts = np.arange(0, 5.0, 1/fs)\n", 188 | "\n", 189 | "noise_bandpass_center = 100000\n", 190 | "\n", 191 | "f_zero = 10**(-20/20) * cn.powerlaw_psd_gaussian(0, len(ts))\n", 192 | "f_zero_2 = 10**(20/20) * cn.powerlaw_psd_gaussian(0, len(ts))\n", 193 | "# bandpass f_zero_2 to produce a much higher narrow-band noise source\n", 194 | "sos = scipy.signal.butter(1, [0.9*noise_bandpass_center, 1.1*noise_bandpass_center], btype='bandpass', fs=fs, output='sos')\n", 195 | "f_zero_2_filt = scipy.signal.sosfiltfilt(sos, f_zero_2)\n", 196 | "ph_noise = f_zero_2_filt + f_zero\n", 197 | "\n", 198 | "fig, ax = plot_spectrum(ts, ph_noise, fs, log_scale=True, one_sided=True, nperseg=len(ph_noise)/10)\n", 199 | "ax.set_xlim([10, 1e6])\n", 200 | "ax.set_xticks([10, 100, 1e3, 10e3, 100e3, 1e6])\n", 201 | "ax.set_xticklabels([formatter_hz(x) for x in ax.get_xticks()])" 202 | ] 203 | }, 204 | { 205 | "cell_type": "code", 206 | "execution_count": null, 207 | "metadata": {}, 208 | "outputs": [], 209 | "source": [ 210 | "10*np.log10(np.sum(np.abs(np.exp(1j*ph_noise)**2)))" 211 | ] 212 | }, 213 | { 214 | "cell_type": "code", 215 | "execution_count": null, 216 | "metadata": {}, 217 | "outputs": [], 218 | "source": [ 219 | "config = {\n", 220 | " 'GENERATE':\n", 221 | " {\n", 222 | " 'chirp_type': 'linear',\n", 223 | " 'sample_rate': fs,\n", 224 | " 'chirp_bandwidth': 4e6,\n", 225 | " 'window': \"rectangular\",\n", 226 | " 'chirp_length': 20e-6,\n", 227 | " 'pulse_length': 20e-6\n", 228 | " }\n", 229 | "}\n", 230 | "expected_chirp_length = len(np.arange(0, (config['GENERATE']['chirp_length'])-(1/(2*config['GENERATE']['sample_rate'])), 1/(config['GENERATE']['sample_rate'])))\n", 231 | "\n", 232 | "pri = 100e-6\n", 233 | "n_chirps = int((len(ts)) // (pri * fs))\n", 234 | "chirps = np.zeros(shape=(n_chirps, int(fs*pri)), dtype=np.complex64)\n", 235 | "ts_chirp, reference_chirp = generate_chirp_with_jitter(config)\n", 236 | "print(f\"n_chirps: {n_chirps}, prf: {1/pri} Hz\")\n", 237 | "\n", 238 | "delay_samples = 200\n", 239 | "\n", 240 | "for i in range(n_chirps):\n", 241 | " jitter_start_idx = int(i * (pri * fs))\n", 242 | " # Generate chirps\n", 243 | " ts_chirp, chirps[i, delay_samples:delay_samples+expected_chirp_length] = generate_chirp_with_jitter(config) #, t_jitter=t_jitter[jitter_start_idx:jitter_start_idx+expected_chirp_length])\n", 244 | " # Add in phase contributions from TX and RX LOs\n", 245 | " chirps[i, delay_samples:delay_samples+expected_chirp_length] *= np.exp(1j * ph_noise[jitter_start_idx:jitter_start_idx+expected_chirp_length])\n" 246 | ] 247 | }, 248 | { 249 | "cell_type": "code", 250 | "execution_count": null, 251 | "metadata": {}, 252 | "outputs": [], 253 | "source": [ 254 | "n_stacks = np.append(np.logspace(0, int(np.log10(n_chirps)), 10*(int(np.log10(n_chirps))+1), dtype=int), int(n_chirps))\n", 255 | "peaks_lin_mean = np.zeros_like(n_stacks, dtype=np.float64)\n", 256 | "peaks_lin_std = np.zeros_like(n_stacks, dtype=np.float64)\n", 257 | "\n", 258 | "nstack_1_peak_phases = np.zeros(shape=(n_chirps,))\n", 259 | "\n", 260 | "for i, n_stack in enumerate(n_stacks):\n", 261 | " peaks_lin = []\n", 262 | " for start_idx in np.arange(0, n_chirps-n_stack+1, n_stack):\n", 263 | " stacked = np.mean(chirps[start_idx:start_idx+n_stack, :], axis=0)\n", 264 | " fast_time, compressed = pulse_compress(stacked, reference_chirp, fs)\n", 265 | " peaks_lin.append(np.max(np.abs(compressed)))\n", 266 | " if n_stack == 1:\n", 267 | " nstack_1_peak_phases[start_idx] = np.angle(compressed[np.argmax(np.abs(compressed))])\n", 268 | " \n", 269 | " peaks_lin_mean[i] = np.mean(peaks_lin)\n", 270 | " peaks_lin_std[i] = np.std(peaks_lin)" 271 | ] 272 | }, 273 | { 274 | "cell_type": "code", 275 | "execution_count": null, 276 | "metadata": {}, 277 | "outputs": [], 278 | "source": [ 279 | "#fig_summary, (ax_spec, ax_power) = plt.subplots(1, 2, figsize=(10,5))\n", 280 | "#first_plot = True" 281 | ] 282 | }, 283 | { 284 | "cell_type": "code", 285 | "execution_count": null, 286 | "metadata": {}, 287 | "outputs": [], 288 | "source": [ 289 | "effective_prf = 1/(n_stacks * pri)\n", 290 | "\n", 291 | "# Spectrum of phase noise\n", 292 | "plot_spectrum(ts, ph_noise, fs, log_scale=True, one_sided=True, ax=ax_spec, label=formatter_hz(noise_bandpass_center), nperseg=len(ph_noise)/10)\n", 293 | "ax_spec.set_xlim([10, 1e6])\n", 294 | "ax_spec.set_xticks([10, 100, 1e3, 10e3, 100e3, 1e6])\n", 295 | "ax_spec.set_xticklabels([formatter_hz(x) for x in ax_spec.get_xticks()])\n", 296 | "ax_spec.set_ylabel('Phase Noise Spectral Density\\n[10 log10(rad^2/Hz)]')\n", 297 | "ax_spec.tick_params(axis='x', labelrotation=45)\n", 298 | "ax_spec.set_title('')\n", 299 | "#ax_spec.set_ylim([-10, 1])\n", 300 | "\n", 301 | "# Peak power vs n_stack\n", 302 | "ax_power.scatter(n_stacks, 20*np.log10(peaks_lin_mean))\n", 303 | "ax_power.set_xscale('log')\n", 304 | "\n", 305 | "ax_power.set_xscale('log')\n", 306 | "#ax.set_xlim(ax.get_xlim())\n", 307 | "x_n = np.logspace(0, int(np.log10(n_chirps)), (int(np.log10(n_chirps))+1), dtype=int)\n", 308 | "ax_power.set_xticks(x_n)\n", 309 | "ax_power.set_xlabel(\"Number of stacked chirps\")\n", 310 | "ax_power.set_xlim([1, 1000])\n", 311 | "\n", 312 | "if first_plot:\n", 313 | " ax_prf = ax_power.twiny()\n", 314 | " first_plot = False\n", 315 | "formatter0 = EngFormatter(unit='Hz')\n", 316 | "#ax_prf.xaxis.set_major_formatter(formatter0)\n", 317 | "ax_prf.set_xlabel(\"Effective Pulse Repetition Frequency\")\n", 318 | "ax_prf.set_xlim(1/(np.array(ax_power.get_xlim()) * pri))\n", 319 | "ax_prf.set_xscale('log')\n", 320 | "x_pri = 1/(x_n * pri)\n", 321 | "ax_prf.set_xticks(x_pri)\n", 322 | "ax_prf.set_xticklabels([f\"{formatter0(x)}\" for x in x_pri])\n", 323 | "ax_prf.tick_params(axis='x', labelrotation=45)\n", 324 | "\n", 325 | "ax_power.set_ylabel(\"Pulse Compressed Peak Power [dB]\")\n", 326 | "\n", 327 | "ax_power.grid('both')\n", 328 | "\n", 329 | "ax_spec.legend()\n", 330 | "\n", 331 | "fig_summary.tight_layout()\n", 332 | "\n", 333 | "fig_summary" 334 | ] 335 | }, 336 | { 337 | "cell_type": "code", 338 | "execution_count": null, 339 | "metadata": {}, 340 | "outputs": [], 341 | "source": [ 342 | "fig_summary.savefig(\"phase_noise_intuition.png\", dpi=500)" 343 | ] 344 | }, 345 | { 346 | "cell_type": "code", 347 | "execution_count": null, 348 | "metadata": {}, 349 | "outputs": [], 350 | "source": [] 351 | } 352 | ], 353 | "metadata": { 354 | "kernelspec": { 355 | "display_name": "rg2", 356 | "language": "python", 357 | "name": "python3" 358 | }, 359 | "language_info": { 360 | "codemirror_mode": { 361 | "name": "ipython", 362 | "version": 3 363 | }, 364 | "file_extension": ".py", 365 | "mimetype": "text/x-python", 366 | "name": "python", 367 | "nbconvert_exporter": "python", 368 | "pygments_lexer": "ipython3", 369 | "version": "3.11.3" 370 | } 371 | }, 372 | "nbformat": 4, 373 | "nbformat_minor": 2 374 | } 375 | --------------------------------------------------------------------------------