├── src └── ab │ ├── bsw │ ├── templates │ │ └── default.yaml │ ├── campaign_header.yaml │ ├── bpe.pl │ ├── MENU_CMP.INP │ ├── __init__.py │ ├── bpe.py │ └── bpe_terminal_output.py │ ├── station │ ├── __init__.py │ ├── sitelog │ │ ├── models.py │ │ └── __init__.py │ └── 1.03.STA │ ├── qaqc │ ├── __init__.py │ └── check_example.py │ ├── __init__.py │ ├── typing.py │ ├── strings.py │ ├── imports.py │ ├── configuration │ ├── bsw_env_vars │ ├── sources.py │ ├── dispatchers.py │ ├── constructors │ │ ├── __init__.py │ │ ├── strings.py │ │ ├── dates.py │ │ └── paths.py │ ├── tasks.py │ └── core.yaml │ ├── cli │ ├── about.py │ ├── _arguments.py │ ├── _input.py │ ├── config.py │ ├── _actions.py │ ├── _filter.py │ ├── logs.py │ ├── dateinfo.py │ ├── __init__.py │ ├── _output.py │ ├── _options.py │ ├── qc.py │ ├── download.py │ ├── station.py │ └── troposphere.py │ ├── files.py │ ├── data │ ├── __init__.py │ ├── compress.py │ ├── http.py │ ├── stats.py │ ├── file.py │ ├── sftp.py │ ├── ftp.py │ └── source.py │ ├── country_code │ ├── __init__.py │ └── ISO-3166-1-alpha-3.yaml │ ├── pkg.py │ ├── paths.py │ ├── parameters.py │ ├── tasks.py │ └── dates.py ├── tests └── ab │ ├── configuration │ ├── configuration_files │ │ ├── merge_1 │ │ ├── merge_2 │ │ ├── a.yaml │ │ ├── campaign.yaml │ │ ├── b.yaml │ │ ├── c.yaml │ │ └── common.yaml │ └── test_configuration.py │ ├── test_package.py │ ├── test_env.yaml │ ├── test_strings.py │ ├── test_parameters.py │ ├── test_vmf.py │ ├── test_country_code.py │ ├── data │ ├── test_init.py │ └── test_source.py │ ├── conftest.py │ ├── bsw │ ├── test_campaign.py │ └── test_bpe.py │ ├── station │ └── test_sitelog.py │ ├── test_paths.py │ └── test_dates.py ├── docs ├── assets │ └── logo.png ├── dev │ ├── assets │ │ └── diagrams │ │ │ ├── structurizr-1-BSW.png │ │ │ ├── structurizr-1-Data.png │ │ │ ├── structurizr-1-Container-1.png │ │ │ └── structurizr-1-SystemContext-1.png │ ├── system-overview.md │ └── contribute.md ├── manual │ ├── assets │ │ ├── create-campaign-configuration.png │ │ ├── autobernese.yaml │ │ ├── quick-start_run.cast │ │ └── campaign.yaml │ ├── install-autobernese.md │ └── quick-start.md ├── stylesheets │ └── extra.css ├── prerequisites.md ├── references.md └── index.md ├── .pre-commit-config.yaml ├── .github └── workflows │ ├── black.yml │ ├── backport.yml │ ├── test.yml │ └── docs.yaml ├── environment.yml ├── environment-dev.yml ├── pyproject.toml ├── setup.cfg ├── LICENSE ├── mkdocs.yml ├── .gitignore ├── README.md └── workspace └── structurizr └── workspace.dsl /src/ab/bsw/templates/default.yaml: -------------------------------------------------------------------------------- 1 | tasks: [] 2 | sources: [] 3 | -------------------------------------------------------------------------------- /tests/ab/configuration/configuration_files/merge_1: -------------------------------------------------------------------------------- 1 | merge_1 2 | -------------------------------------------------------------------------------- /tests/ab/configuration/configuration_files/merge_2: -------------------------------------------------------------------------------- 1 | merge_2 2 | -------------------------------------------------------------------------------- /src/ab/station/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Read and convert station data 3 | 4 | """ 5 | -------------------------------------------------------------------------------- /src/ab/qaqc/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Quality assurance [QA] and control [QC] 3 | 4 | """ 5 | -------------------------------------------------------------------------------- /docs/assets/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SDFIdk/AutoBernese/HEAD/docs/assets/logo.png -------------------------------------------------------------------------------- /tests/ab/configuration/configuration_files/a.yaml: -------------------------------------------------------------------------------- 1 | section_1: 2 | - &letter a 3 | 4 | section_2: 5 | foo: bar 6 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/psf/black-pre-commit-mirror 3 | rev: 25.1.0 4 | hooks: 5 | - id: black 6 | -------------------------------------------------------------------------------- /tests/ab/configuration/configuration_files/campaign.yaml: -------------------------------------------------------------------------------- 1 | # Section may override the previous 2 | troposphere: 3 | ipath: !Path [*U, bar] 4 | -------------------------------------------------------------------------------- /docs/dev/assets/diagrams/structurizr-1-BSW.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SDFIdk/AutoBernese/HEAD/docs/dev/assets/diagrams/structurizr-1-BSW.png -------------------------------------------------------------------------------- /docs/dev/assets/diagrams/structurizr-1-Data.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SDFIdk/AutoBernese/HEAD/docs/dev/assets/diagrams/structurizr-1-Data.png -------------------------------------------------------------------------------- /docs/manual/assets/create-campaign-configuration.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SDFIdk/AutoBernese/HEAD/docs/manual/assets/create-campaign-configuration.png -------------------------------------------------------------------------------- /docs/dev/assets/diagrams/structurizr-1-Container-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SDFIdk/AutoBernese/HEAD/docs/dev/assets/diagrams/structurizr-1-Container-1.png -------------------------------------------------------------------------------- /tests/ab/configuration/configuration_files/b.yaml: -------------------------------------------------------------------------------- 1 | section_1: 2 | - *letter 3 | 4 | section_2: 5 | bar: baz 6 | 7 | section_3: §ion_3 8 | hello: you 9 | -------------------------------------------------------------------------------- /docs/dev/assets/diagrams/structurizr-1-SystemContext-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SDFIdk/AutoBernese/HEAD/docs/dev/assets/diagrams/structurizr-1-SystemContext-1.png -------------------------------------------------------------------------------- /tests/ab/configuration/configuration_files/c.yaml: -------------------------------------------------------------------------------- 1 | section_1: 2 | - c 3 | 4 | section_2: 5 | baz: no 6 | 7 | section_3: 8 | <<: *section_3 9 | hi: there 10 | -------------------------------------------------------------------------------- /docs/stylesheets/extra.css: -------------------------------------------------------------------------------- 1 | :root > * { 2 | --md-primary-fg-color: #368ccb; 3 | --md-primary-fg-color--light: #fff; 4 | --md-primary-fg-color--dark: #26367c; 5 | } 6 | -------------------------------------------------------------------------------- /src/ab/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | AutoBernese 3 | 4 | """ 5 | 6 | from importlib import metadata 7 | 8 | import ab 9 | 10 | 11 | __version__ = metadata.version(ab.__name__) 12 | -------------------------------------------------------------------------------- /src/ab/typing.py: -------------------------------------------------------------------------------- 1 | """ 2 | Common typing annotation for AutoBernese 3 | 4 | """ 5 | 6 | from collections import abc 7 | 8 | 9 | type AnyFunction[T, **P] = abc.Callable[P, T] 10 | -------------------------------------------------------------------------------- /.github/workflows/black.yml: -------------------------------------------------------------------------------- 1 | name: Lint 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | lint: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - uses: actions/checkout@v2 10 | - uses: psf/black@stable 11 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: ab 2 | channels: 3 | - conda-forge 4 | dependencies: 5 | - click 6 | - humanize 7 | - pip 8 | - python=3.13 9 | - pyyaml 10 | - requests 11 | - rich 12 | - pip: 13 | - click-aliases 14 | - pyyaml_env_tag 15 | -------------------------------------------------------------------------------- /tests/ab/configuration/configuration_files/common.yaml: -------------------------------------------------------------------------------- 1 | # Not allowed 2 | bsw_env: {} 3 | bsw_files: {} 4 | env: {} 5 | runtime: {} 6 | campaign: {} 7 | 8 | # Section may override the previous 9 | troposphere: 10 | ipath: !Path [*D, foo] 11 | -------------------------------------------------------------------------------- /src/ab/bsw/campaign_header.yaml: -------------------------------------------------------------------------------- 1 | metadata: 2 | version: &version {version} 3 | username: &username {username} 4 | created: &created {created} 5 | template: &template {template} 6 | campaign: &campaign {campaign} 7 | beg: &beg {beg} 8 | end: &end {end} 9 | -------------------------------------------------------------------------------- /src/ab/strings.py: -------------------------------------------------------------------------------- 1 | """ 2 | Work with strings 3 | 4 | """ 5 | 6 | from typing import Any 7 | 8 | 9 | class Operator(str): 10 | def operate(self: str, method: str, /, *args: list[Any]) -> Any: 11 | assert hasattr(self, method) 12 | return getattr(self, method)(*args) 13 | -------------------------------------------------------------------------------- /docs/dev/system-overview.md: -------------------------------------------------------------------------------- 1 | --- 2 | hide: 3 | - navigation 4 | - toc 5 | --- 6 | 7 | ## System context 8 | 9 | ![System context](assets/diagrams/structurizr-1-SystemContext-1.png) 10 | 11 | 12 | ## Main parts of the software | Containers 13 | 14 | ![Containers](assets/diagrams/structurizr-1-Container-1.png) 15 | -------------------------------------------------------------------------------- /src/ab/imports.py: -------------------------------------------------------------------------------- 1 | from importlib import import_module 2 | 3 | from ab.typing import AnyFunction 4 | 5 | 6 | def import_function(specification: str) -> AnyFunction: 7 | module_spec, func_name = specification.rsplit(".", 1) 8 | module = import_module(module_spec) 9 | return getattr(module, func_name) 10 | -------------------------------------------------------------------------------- /src/ab/configuration/bsw_env_vars: -------------------------------------------------------------------------------- 1 | VERSION 2 | F_VERS 3 | F_VERS_LIST 4 | C 5 | SRC 6 | LG 7 | FG 8 | XG 9 | XQ 10 | SPT 11 | BPE 12 | EXE 13 | SUP 14 | DOC 15 | HLP 16 | PAN 17 | GLOBAL 18 | MODEL 19 | CONFIG 20 | USR 21 | OPT 22 | PCF 23 | SCR 24 | BPE_SERVER_HOST 25 | U 26 | T 27 | P 28 | D 29 | S 30 | QTBERN 31 | OS 32 | OS_NAME 33 | CGROUP 34 | -------------------------------------------------------------------------------- /src/ab/configuration/sources.py: -------------------------------------------------------------------------------- 1 | """ 2 | Configuration tools to build Source instances 3 | 4 | """ 5 | 6 | from typing import Any 7 | 8 | from ab.configuration import SectionListItemType 9 | from ab.data.source import Source 10 | 11 | 12 | def load(kwargs: SectionListItemType) -> Source: 13 | return Source(**kwargs) 14 | 15 | 16 | def load_all(raw: list[SectionListItemType]) -> list[Source]: 17 | return [load(kwargs) for kwargs in raw] 18 | -------------------------------------------------------------------------------- /tests/ab/test_package.py: -------------------------------------------------------------------------------- 1 | from ab import __version__ 2 | from ab import pkg 3 | 4 | 5 | def test_package_data_exist(): 6 | assert pkg.core.is_file() 7 | assert pkg.bsw_env_vars.is_file() 8 | assert pkg.country_codes.is_file() 9 | assert pkg.template_sta_file.is_file() 10 | assert pkg.bpe_runner.is_file() 11 | assert pkg.template_campaign_menu_list.is_file() 12 | assert pkg.template_campaign_default.is_file() 13 | assert pkg.campaign_header.is_file() 14 | -------------------------------------------------------------------------------- /environment-dev.yml: -------------------------------------------------------------------------------- 1 | name: ab-dev 2 | channels: 3 | - conda-forge 4 | dependencies: 5 | - click 6 | - humanize 7 | - pip 8 | - python=3.13 9 | - pyyaml 10 | - requests 11 | - rich 12 | - pip: 13 | - click-aliases 14 | - mkdocs-glightbox 15 | - mkdocs-kroki-plugin 16 | - pyyaml_env_tag 17 | 18 | - bandit 19 | - coverage 20 | - graphviz 21 | - mkdocs-graphviz 22 | - mkdocs-material 23 | - mypy 24 | - pytest 25 | - pre-commit 26 | # For MyPy 27 | - types-pyyaml 28 | - types-requests 29 | -------------------------------------------------------------------------------- /src/ab/cli/about.py: -------------------------------------------------------------------------------- 1 | """ 2 | Get information about Bernese GNSS Software and AutoBernese 3 | 4 | """ 5 | 6 | from rich import print 7 | 8 | from ab import __version__ 9 | from ab.bsw import get_bsw_release 10 | 11 | 12 | def autobernese() -> str: 13 | return f"AutoBernese {__version__}" 14 | 15 | 16 | def bernese() -> str: 17 | return f"Bernese {get_bsw_release()}" 18 | 19 | 20 | def versions() -> str: 21 | return f"{autobernese()}; {bernese()}" 22 | 23 | 24 | def print_versions() -> None: 25 | print(versions()) 26 | -------------------------------------------------------------------------------- /src/ab/bsw/bpe.pl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl 2 | use lib $ENV{BPE}; 3 | use startBPE; 4 | 5 | my $bpe = new startBPE(); 6 | 7 | $$bpe{PCF_FILE} = "$ENV{AB_BPE_PCF_FILE}"; 8 | $$bpe{CPU_FILE} = "$ENV{AB_BPE_CPU_FILE}"; 9 | $$bpe{BPE_CAMPAIGN} = "$ENV{AB_BPE_CAMPAIGN}"; 10 | $$bpe{YEAR} = "$ENV{AB_BPE_YEAR}"; 11 | $$bpe{SESSION} = "$ENV{AB_BPE_SESSION}"; 12 | $$bpe{SYSOUT} = "$ENV{AB_BPE_SYSOUT}"; 13 | $$bpe{STATUS} = "$ENV{AB_BPE_STATUS}"; 14 | $$bpe{TASKID} = "$ENV{AB_BPE_TASKID}"; 15 | 16 | $bpe->resetCPU(); 17 | $bpe->run(); 18 | -------------------------------------------------------------------------------- /src/ab/files.py: -------------------------------------------------------------------------------- 1 | """ 2 | Handle local data 3 | 4 | """ 5 | 6 | from pathlib import Path 7 | import shutil 8 | import logging 9 | 10 | 11 | log = logging.getLogger(__name__) 12 | 13 | 14 | def delete_directory_content(path: str) -> None: 15 | """ 16 | Remove all children in a given directory. 17 | 18 | """ 19 | for child in Path(path).iterdir(): 20 | log.info(f"Deleting {child!r} ...") 21 | if child.is_file() or child.is_symlink(): 22 | child.unlink() 23 | if child.is_dir(): 24 | shutil.rmtree(child) 25 | -------------------------------------------------------------------------------- /tests/ab/test_env.yaml: -------------------------------------------------------------------------------- 1 | # Bernese 2 | - VERSION: "VERSION" 3 | - F_VERS: "F_VERS" 4 | - F_VERS_LIST: "F_VERS_LIST" 5 | - C 6 | - SRC 7 | - LG 8 | - FG 9 | - XG 10 | - XQ 11 | - SPT 12 | - BPE 13 | - EXE 14 | - SUP 15 | - DOC: [RELEASE.TXT] 16 | - HLP 17 | - PAN: [MENU_CMP.INP] 18 | - GLOBAL 19 | - MODEL 20 | - CONFIG 21 | - USR 22 | - OPT 23 | - PCF 24 | - SCR 25 | - BPE_SERVER_HOST: "BPE_SERVER_HOST" 26 | - U 27 | - T 28 | - P 29 | - D 30 | - S 31 | - QTBERN 32 | - OS: "OS" 33 | - OS_NAME: "OS_NAME" 34 | - CGROUP: "CGROUP" 35 | 36 | # AutoBernese 37 | - autobernese: ['templates'] 38 | -------------------------------------------------------------------------------- /tests/ab/test_strings.py: -------------------------------------------------------------------------------- 1 | from ab.strings import Operator 2 | 3 | 4 | def test_Operator(): 5 | s = Operator("a") 6 | 7 | expected = "A" 8 | result = s.operate("upper") 9 | 10 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 11 | 12 | expected = ["a"] 13 | result = s.operate("split", "-") 14 | 15 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 16 | 17 | expected = "-a-" 18 | result = s.operate("join", ["-", "-"]) 19 | 20 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 21 | -------------------------------------------------------------------------------- /.github/workflows/backport.yml: -------------------------------------------------------------------------------- 1 | name: Backport 2 | on: 3 | pull_request_target: 4 | types: 5 | - closed 6 | - labeled 7 | 8 | jobs: 9 | backport: 10 | runs-on: ubuntu-18.04 11 | name: Backport 12 | steps: 13 | - name: Backport Bot 14 | id: backport 15 | if: github.event.pull_request.merged && ( ( github.event.action == 'closed' && contains( join( github.event.pull_request.labels.*.name ), 'backport') ) || contains( github.event.label.name, 'backport' ) ) 16 | uses: m-kuhn/backport@v1.1.1 17 | with: 18 | github_token: ${{ secrets.GITHUB_TOKEN }} 19 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = [ 3 | 'wheel', 4 | 'setuptools>=43.0.0', 5 | ] 6 | build-backend = 'setuptools.build_meta' 7 | 8 | [tool.pytest.ini_options] 9 | addopts = '--strict-markers -r A -q' 10 | testpaths = ['tests'] 11 | 12 | [tool.coverage.paths] 13 | source = ['src'] 14 | 15 | [tool.coverage.run] 16 | branch = true 17 | source = ['ab'] 18 | 19 | [tool.coverage.report] 20 | show_missing = true 21 | fail_under = 100 22 | 23 | [tool.mypy] 24 | python_version = "3.13" 25 | strict = true 26 | pretty = true 27 | show_column_numbers = true 28 | show_error_codes = true 29 | show_error_context = true 30 | mypy_path = "src" 31 | -------------------------------------------------------------------------------- /src/ab/cli/_arguments.py: -------------------------------------------------------------------------------- 1 | """ 2 | Common command-line arguments 3 | 4 | """ 5 | 6 | import click 7 | 8 | from ab.cli import _input 9 | 10 | 11 | # Configuration 12 | section = click.argument("section", default=None, type=str, required=False) 13 | 14 | # Campaign 15 | name = click.argument("name", type=str) 16 | names = click.argument("names", nargs=-1, type=str) 17 | template = click.argument("template", default=None, type=str, required=False) 18 | 19 | # Date information 20 | date = click.argument("date", type=_input.date) 21 | week = click.argument("week", type=int) 22 | year = click.argument("year", type=int) 23 | doy = click.argument("doy", type=int) 24 | -------------------------------------------------------------------------------- /src/ab/data/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Transfer and manage local and remote data sources. 3 | 4 | """ 5 | 6 | from dataclasses import ( 7 | dataclass, 8 | field, 9 | ) 10 | 11 | 12 | @dataclass 13 | class TransferStatus: 14 | existing: int = 0 15 | success: int = 0 16 | failed: int = 0 17 | not_found: int = 0 18 | exceptions: list[Exception] = field(repr=False, default_factory=list) 19 | 20 | def __add__(self, other: "TransferStatus") -> "TransferStatus": 21 | self.existing += other.existing 22 | self.success += other.success 23 | self.failed += other.failed 24 | self.not_found += other.not_found 25 | return self 26 | 27 | __radd__ = __add__ 28 | -------------------------------------------------------------------------------- /src/ab/cli/_input.py: -------------------------------------------------------------------------------- 1 | """ 2 | Handle command-line interface input 3 | 4 | """ 5 | 6 | import os 7 | from typing import Final 8 | import datetime as dt 9 | 10 | 11 | class ENV_VARS: 12 | AB_CLI_PROMPT_ANSWER = "AB_CLI_PROMPT_ANSWER" 13 | 14 | 15 | DATE_FORMAT: Final = "%Y-%m-%d" 16 | 17 | 18 | def date(s: str) -> dt.date: 19 | return dt.datetime.strptime(s, DATE_FORMAT).date() 20 | 21 | 22 | def set_prompt_proceed_yes() -> None: 23 | os.environ[ENV_VARS.AB_CLI_PROMPT_ANSWER] = "Y" 24 | 25 | 26 | def prompt_proceed() -> bool: 27 | answer = os.getenv(ENV_VARS.AB_CLI_PROMPT_ANSWER) 28 | if answer is None: 29 | answer = input("Proceed? (y/[n]): ") 30 | return answer.lower() == "y" 31 | -------------------------------------------------------------------------------- /docs/prerequisites.md: -------------------------------------------------------------------------------- 1 | 2 | ## Install the MambaForge Python distribution 3 | 4 | [GitHub][MAMBA-INSTALLER] the following command for Linux/Unix installs the 5 | software: 6 | 7 | [MAMBA-INSTALLER]: https://github.com/conda-forge/miniforge#mambaforge 8 | 9 | ```sh 10 | curl -L -O "https://github.com/conda-forge/miniforge/releases/latest/download/Mambaforge-$(uname)-$(uname -m).sh" 11 | bash Mambaforge-$(uname)-$(uname -m).sh 12 | ``` 13 | 14 | 20 | -------------------------------------------------------------------------------- /src/ab/bsw/MENU_CMP.INP: -------------------------------------------------------------------------------- 1 | 2 | ! List of Campaigns 3 | ! ----------------- 4 | CAMPAIGN {count}{separator}{campaigns} 5 | ## widget = uniline; numlines = 30 6 | 7 | MSG_CAMPAIGN 1 "Campaign directory" 8 | 9 | 10 | # BEGIN_PANEL NO_CONDITION ##################################################### 11 | # EDIT LIST OF CAMPAIGNS - MENU_CMP # 12 | # # 13 | # > Campaign_directory < # CAMPAIGN 14 | # # 15 | # END_PANEL #################################################################### 16 | -------------------------------------------------------------------------------- /tests/ab/test_parameters.py: -------------------------------------------------------------------------------- 1 | from ab.parameters import ( 2 | permutations, 3 | resolvable, 4 | ) 5 | 6 | 7 | def test_permutations(): 8 | parameters = dict(a=(0, 1), b=(2, 3)) 9 | expected = [ 10 | dict(a=0, b=2), 11 | dict(a=0, b=3), 12 | dict(a=1, b=2), 13 | dict(a=1, b=3), 14 | ] 15 | result = permutations(parameters) 16 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 17 | 18 | 19 | def test_resolvable(): 20 | parameters = dict(a=1, b=2) 21 | template = "{a}{a.bit_count()}" 22 | expected = dict(a=1) 23 | result = resolvable(parameters, template) 24 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 25 | -------------------------------------------------------------------------------- /src/ab/bsw/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module for Bernese GNSS Software system interaction. 3 | 4 | """ 5 | 6 | from dataclasses import dataclass 7 | 8 | from ab import configuration 9 | 10 | 11 | @dataclass 12 | class ReleaseInfo: 13 | version: str 14 | release: str 15 | 16 | def __str__(self) -> str: 17 | return f"{self.version} ({self.release})" 18 | 19 | 20 | def get_bsw_release() -> ReleaseInfo: 21 | config = configuration.load() 22 | fname = config.get("bsw_files", {}).get("release_info") 23 | if fname is None: 24 | return ReleaseInfo("", "") 25 | lines = fname.read_text().strip().splitlines() 26 | return ReleaseInfo( 27 | lines[0].split()[-1], 28 | lines[1].split()[-1], 29 | ) 30 | -------------------------------------------------------------------------------- /src/ab/data/compress.py: -------------------------------------------------------------------------------- 1 | """ 2 | File compression 3 | 4 | """ 5 | 6 | from pathlib import Path 7 | 8 | import shutil 9 | import gzip as _gzip 10 | 11 | from ab.paths import resolve_wildcards 12 | 13 | 14 | def gzip(fname: str | Path) -> None: 15 | ifname = Path(fname) 16 | if not ifname.is_file(): 17 | raise IOError(f"File {fname!r} does not exist ...") 18 | ofname = ifname.with_suffix(ifname.suffix + ".gz") 19 | # From: https://docs.python.org/3.12/library/gzip.html 20 | with open(ifname, "rb") as f_in: 21 | with _gzip.open(ofname, "wb") as f_out: 22 | shutil.copyfileobj(f_in, f_out) 23 | 24 | 25 | def gzip_glob(fname: str | Path) -> None: 26 | for resolved in resolve_wildcards(fname): 27 | gzip(resolved) 28 | -------------------------------------------------------------------------------- /tests/ab/test_vmf.py: -------------------------------------------------------------------------------- 1 | import datetime as dt 2 | from pathlib import Path 3 | 4 | from ab import configuration 5 | from ab.vmf import _input_filepaths 6 | 7 | 8 | def test_input_filenames(): 9 | path = Path("{date.year}") 10 | date = dt.date(2023, 1, 1) 11 | ifname = configuration.load().get("troposphere").get("ifname") 12 | result_list = _input_filepaths(path, ifname, date) 13 | expected_list = [ 14 | Path("2023/VMF3_20230101.H00"), 15 | Path("2023/VMF3_20230101.H06"), 16 | Path("2023/VMF3_20230101.H12"), 17 | Path("2023/VMF3_20230101.H18"), 18 | Path("2023/VMF3_20230102.H00"), 19 | ] 20 | for result, expected in zip(result_list, expected_list): 21 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 22 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Run tests 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | 7 | test: 8 | 9 | runs-on: ubuntu-latest 10 | timeout-minutes: 10 11 | defaults: 12 | run: 13 | shell: bash -l {0} 14 | 15 | steps: 16 | 17 | - name: Checkout repository code 18 | uses: actions/checkout@v3 19 | 20 | - name: Setup conda 21 | uses: conda-incubator/setup-miniconda@v3 22 | with: 23 | miniforge-variant: Miniforge3 24 | miniforge-version: latest 25 | use-mamba: true 26 | environment-file: environment-dev.yml 27 | auto-activate-base: false 28 | activate-environment: ab-dev 29 | 30 | - name: Install test dependencies 31 | run: python -m pip install -e . 32 | 33 | - name: Test 34 | run: pytest 35 | -------------------------------------------------------------------------------- /src/ab/country_code/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Three-letter country codes 3 | 4 | """ 5 | 6 | import functools 7 | 8 | import yaml 9 | 10 | from ab import pkg 11 | 12 | 13 | _COUNTRY_CODES: dict[str, str] | None = None 14 | 15 | 16 | @functools.cache 17 | def get(country_name: str) -> str | None: 18 | """ 19 | Get three-letter country code for given country name if it exists in the 20 | package-version of the ISO 3166 standard. 21 | 22 | References: 23 | ----------- 24 | * [ISO 3166 Country Codes](https://www.iso.org/iso-3166-country-codes.html) 25 | 26 | """ 27 | global _COUNTRY_CODES 28 | if _COUNTRY_CODES is None: 29 | _COUNTRY_CODES = yaml.safe_load(pkg.country_codes.read_text()) 30 | found = _COUNTRY_CODES.get(country_name.strip()) 31 | if found is not None: 32 | return str(found) 33 | return found 34 | -------------------------------------------------------------------------------- /src/ab/station/sitelog/models.py: -------------------------------------------------------------------------------- 1 | import datetime as dt 2 | from dataclasses import dataclass 3 | 4 | 5 | @dataclass 6 | class SiteIdentificationOfTheGNSSMonument: 7 | site_name: str 8 | four_character_id: str 9 | nine_character_id: str | None 10 | domes: str 11 | date_installed: str 12 | 13 | 14 | @dataclass 15 | class SiteLocationInformation: 16 | city_or_town: str 17 | country: str 18 | 19 | 20 | @dataclass 21 | class GNSSReceiverInformation: 22 | receiver_type: str 23 | receiver_serial_number: str 24 | firmware: str 25 | date_installed: str 26 | date_removed: str 27 | 28 | 29 | @dataclass 30 | class GNSSAntennaInformation: 31 | antenna_type: str 32 | antenna_serial_number: str 33 | marker_up: str 34 | marker_north: str 35 | marker_east: str 36 | date_installed: str 37 | date_removed: str 38 | -------------------------------------------------------------------------------- /src/ab/cli/config.py: -------------------------------------------------------------------------------- 1 | """ 2 | Command-line interface for AutoBernese configuration content. 3 | 4 | """ 5 | 6 | import logging 7 | 8 | import click 9 | from click_aliases import ClickAliasedGroup 10 | from rich import print 11 | 12 | from ab import configuration 13 | from ab.cli import ( 14 | _arguments, 15 | _options, 16 | ) 17 | from ab.bsw import campaign as _campaign 18 | 19 | 20 | log = logging.getLogger(__name__) 21 | 22 | 23 | @click.command 24 | @_arguments.section 25 | @_options.campaign 26 | def config(section: str, name: str | None = None) -> None: 27 | """ 28 | Show all or specified configuration section(s). 29 | 30 | """ 31 | if name is not None: 32 | config = _campaign.load(name) 33 | else: 34 | config = configuration.load() 35 | 36 | if section is None: 37 | print(config) 38 | else: 39 | print(config.get(section, {})) 40 | -------------------------------------------------------------------------------- /.github/workflows/docs.yaml: -------------------------------------------------------------------------------- 1 | name: Publish docs via GitHub Pages 2 | on: 3 | push: 4 | branches: 5 | - main 6 | 7 | pull_request: 8 | paths: 9 | - 'docs/**' 10 | - 'mkdocs.yml' 11 | 12 | 13 | jobs: 14 | build: 15 | name: Deploy docs 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Checkout main 19 | uses: actions/checkout@v2 20 | 21 | - name: Deploy docs 22 | uses: actions/setup-python@v2 23 | with: 24 | python-version: 3.x 25 | - name: Install MkDocs and dependencies 26 | run: pip install mkdocs-material mkdocs-glightbox mkdocs-kroki-plugin mkdocs-graphviz 27 | 28 | - name: Build and deploy documentation to official documentation website 29 | if: github.event_name == 'push' 30 | run: mkdocs gh-deploy --force 31 | 32 | - name: Build documentation to retrieve artifacts 33 | if: github.event_name != 'push' 34 | run: mkdocs build 35 | -------------------------------------------------------------------------------- /src/ab/cli/_actions.py: -------------------------------------------------------------------------------- 1 | """ 2 | Common command actions 3 | 4 | """ 5 | 6 | import asyncio 7 | from collections.abc import ( 8 | Callable, 9 | Iterable, 10 | ) 11 | 12 | from rich import print 13 | 14 | from ab.cli import _output 15 | from ab.tasks import Task 16 | 17 | 18 | def run_tasks(tasks: Iterable[Task]) -> None: 19 | """ 20 | Run tasks, synchronously 21 | 22 | """ 23 | for task in tasks: 24 | task.run() 25 | 26 | 27 | def run_tasks_async(tasks: Iterable[Task]) -> None: 28 | """ 29 | Run tasks, asynchronously 30 | 31 | """ 32 | 33 | async def resolved_tasks() -> None: 34 | async_tasks = [asyncio.to_thread(task.run) for task in tasks] 35 | await asyncio.gather(*async_tasks) 36 | 37 | asyncio.run(resolved_tasks()) 38 | 39 | 40 | def get_task_runner(asynchronous: bool) -> Callable[[list[Task]], None]: 41 | if asynchronous is True: 42 | return run_tasks_async 43 | return run_tasks 44 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | name = ab 3 | version = 1.0.1 4 | description = AutoBernese 5 | long_description = file: README.md 6 | long_description_content_type = text/markdown; charset=UTF-8 7 | url = https://github.com/SDFIDK/AutoBernese 8 | author = Joachim Mortensen 9 | author_email = joamo@kds.dk 10 | license = MIT 11 | license_file = LICENSE 12 | project_urls = 13 | Documentation = https://SDFIDK.github.io/AutoBernese 14 | Source = https://github.com/SDFIDK/AutoBernese 15 | Tracker = https://github.com/SDFIDK/AutoBernese/issues 16 | 17 | [options] 18 | zip_safe = False 19 | packages = find: 20 | package_dir = 21 | = src 22 | platforms = any 23 | python_requires = >=3.13 24 | install_requires = 25 | coverage 26 | 27 | [options.packages.find] 28 | where = src 29 | 30 | [options.package_data] 31 | ab = 32 | bsw/bpe.pl 33 | bsw/campaign_header.yaml 34 | bsw/MENU_CMP.INP 35 | bsw/templates/default.yaml 36 | configuration/core.yaml 37 | configuration/bsw_env_vars 38 | country_code/ISO-3166-1-alpha-3.yaml 39 | station/1.03.STA 40 | 41 | [options.entry_points] 42 | console_scripts = 43 | ab = ab.cli:main 44 | -------------------------------------------------------------------------------- /src/ab/configuration/dispatchers.py: -------------------------------------------------------------------------------- 1 | """ 2 | Dispatch functions are functions that restructure or otherwise pre-process given 3 | arguments to make them work for a specific API-level function used by the 4 | TaskDefinition. 5 | 6 | Return type for dispatch function must be an Iterable match signature of 7 | API-level function. 8 | 9 | """ 10 | 11 | from typing import Any 12 | from collections.abc import Iterable 13 | 14 | from ab.parameters import ArgumentsType 15 | from ab.paths import resolve_wildcards 16 | from ab.data import compress 17 | from ab.vmf import ( 18 | DayFileBuilder, 19 | day_file_builders, 20 | ) 21 | 22 | 23 | type GZipCompressArgumentType = dict[str, Any] 24 | type VMFBuildArgumentType = dict[str, Any] 25 | 26 | 27 | def gzip_dispatch(arguments: ArgumentsType) -> Iterable[GZipCompressArgumentType]: 28 | key = "fname" 29 | filenames = resolve_wildcards(arguments[key]) 30 | return [{**arguments, **{key: fname}} for fname in filenames] 31 | 32 | 33 | def vmf_dispatch(arguments: ArgumentsType) -> Iterable[VMFBuildArgumentType]: 34 | return (dict(builder=builder) for builder in day_file_builders(**arguments)) 35 | -------------------------------------------------------------------------------- /src/ab/cli/_filter.py: -------------------------------------------------------------------------------- 1 | """ 2 | Command-line helper for filtering list items 3 | 4 | """ 5 | 6 | import logging 7 | 8 | from ab.configuration import ( 9 | ConfigurationType, 10 | SectionListItemType, 11 | ) 12 | 13 | 14 | log = logging.getLogger(__name__) 15 | 16 | 17 | def get_raw( 18 | config: ConfigurationType, 19 | section: str, 20 | identifiers: list[str] | None = None, 21 | exclude: list[str] | None = None, 22 | ) -> list[SectionListItemType]: 23 | """ 24 | Get items in sections `sources` or `tasks` and take all or selected. 25 | 26 | """ 27 | raw: list[SectionListItemType] = config.get(section, []) 28 | if not raw: 29 | msg = f"No {section} found ..." 30 | print(msg) 31 | log.info(msg) 32 | 33 | if identifiers is not None and len(identifiers) > 0: 34 | raw = [ 35 | raw_item for raw_item in raw if raw_item.get("identifier") in identifiers 36 | ] 37 | 38 | if exclude is not None and len(exclude) > 0: 39 | raw = [ 40 | raw_item for raw_item in raw if raw_item.get("identifier") not in exclude 41 | ] 42 | 43 | return raw 44 | -------------------------------------------------------------------------------- /tests/ab/test_country_code.py: -------------------------------------------------------------------------------- 1 | import yaml 2 | 3 | from ab import ( 4 | pkg, 5 | country_code, 6 | ) 7 | 8 | 9 | def test_country_code(): 10 | test_data = ( 11 | # Function expects correctly formatted country names 12 | ("Denmark", "DNK"), 13 | ("France", "FRA"), 14 | ("Italy", "ITA"), 15 | # Country names are trimmed 16 | (" Denmark ", "DNK"), 17 | (" France ", "FRA"), 18 | (" Italy ", "ITA"), 19 | # Function does not fix spelling mistakes 20 | ("denmark", None), 21 | ("france", None), 22 | ("italy", None), 23 | ("Neverland", None), 24 | ("Narnia", None), 25 | ("Madeuparupa", None), 26 | ) 27 | for country_name, expected in test_data: 28 | result = country_code.get(country_name) 29 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 30 | 31 | test_data_full = yaml.safe_load(pkg.country_codes.read_text()) 32 | for country_name, expected in test_data_full.items(): 33 | result = country_code.get(country_name) 34 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 35 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 The Danish Agency for Climate Data (https://eng.kds.dk/) 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /src/ab/cli/logs.py: -------------------------------------------------------------------------------- 1 | """ 2 | Command-line interface for seeing the log file 3 | 4 | """ 5 | 6 | import logging 7 | import subprocess as sub 8 | 9 | import click 10 | from click_aliases import ClickAliasedGroup 11 | from rich import print 12 | 13 | from ab import configuration 14 | 15 | 16 | log = logging.getLogger(__name__) 17 | 18 | 19 | @click.command 20 | def logs() -> None: 21 | """ 22 | Follow log file (run `tail -f path/to/logfile.log`). 23 | 24 | """ 25 | runtime = configuration.load().get("runtime") 26 | if runtime is None: 27 | raise SystemExit(f"No runtime entry found in configuration.") 28 | filename = runtime.get("logging", {}).get("filename") 29 | if filename is None: 30 | raise SystemExit(f"No log-file name entry found in logging configuration.") 31 | 32 | process: sub.Popen | None = None # type: ignore 33 | try: 34 | log.debug(f"Show log tail ...") 35 | process = sub.Popen(["/usr/bin/tail", "-f", f"{filename}"]) 36 | process.wait() 37 | 38 | except KeyboardInterrupt: 39 | log.debug(f"Log tail finished ...") 40 | 41 | finally: 42 | print() 43 | if process is not None: 44 | process.terminate() 45 | process.kill() 46 | -------------------------------------------------------------------------------- /src/ab/pkg.py: -------------------------------------------------------------------------------- 1 | """ 2 | Package file paths 3 | 4 | """ 5 | 6 | from importlib import resources 7 | 8 | import ab 9 | 10 | 11 | # Package root 12 | module = resources.files(ab) 13 | 14 | with resources.as_file(module) as base: 15 | 16 | # Configuration module 17 | core = base.joinpath("configuration/core.yaml") 18 | bsw_env_vars = base.joinpath("configuration/bsw_env_vars") 19 | 20 | # Country codes 21 | country_codes = base.joinpath("country_code/ISO-3166-1-alpha-3.yaml") 22 | 23 | # Template for the STA file generated from site-log files. 24 | template_sta_file = base.joinpath("station/1.03.STA") 25 | 26 | # Bernese GNSS Software API 27 | 28 | # The script that runs Bernese Processing Engine with given input arguments 29 | bpe_runner = base.joinpath("bsw/bpe.pl") 30 | 31 | # Internal BSW file with the list of existing campaigns 32 | template_campaign_menu_list = base.joinpath("bsw/MENU_CMP.INP") 33 | 34 | # Template for the pre-processed meta data that are added to each campaign-configuration file. 35 | campaign_header = base.joinpath("bsw/campaign_header.yaml") 36 | 37 | # Campaign templates included in the package 38 | template_campaign_default = base.joinpath("bsw/templates/default.yaml") 39 | -------------------------------------------------------------------------------- /src/ab/paths.py: -------------------------------------------------------------------------------- 1 | """ 2 | Work with path wildcards 3 | 4 | """ 5 | 6 | from typing import Any 7 | from collections.abc import Iterable 8 | from pathlib import Path 9 | 10 | 11 | def resolve_wildcards(path: Path | str) -> Iterable[Path]: 12 | """ 13 | Return all files resolved by the wildcards used, or just the match if the 14 | path exists. 15 | 16 | """ 17 | path = Path(path) 18 | parts = path.parts[path.is_absolute() :] 19 | return Path(path.root).glob(str(Path(*parts))) 20 | 21 | 22 | def _parts(path: str, *, sep: str = "/") -> list[str]: 23 | """ 24 | Split path string into parts, ignoring prepended directory separator and 25 | pre- and post-fixed dot `.`, i.e. `./` and `/.` 26 | 27 | """ 28 | if path.startswith("./"): 29 | path = path[2:] 30 | if path.endswith("/."): 31 | path = path[:-2] 32 | return [part for part in path.split(sep) if part.strip()] 33 | 34 | 35 | def _parents(path: str, *, sep: str = "/") -> list[str]: 36 | """ 37 | Accummulate a list of parent directories. 38 | 39 | """ 40 | parts = _parts(path) 41 | parents = [parts[0]] 42 | for ix, part in enumerate(parts[1:]): 43 | parents.append(sep.join([parents[ix], part])) 44 | return parents 45 | -------------------------------------------------------------------------------- /src/ab/configuration/constructors/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Custom PyYAML constructors 3 | 4 | """ 5 | 6 | from yaml.loader import SafeLoader 7 | from yaml_env_tag import construct_env_tag # type: ignore 8 | 9 | from ab.configuration.constructors import ( 10 | paths, 11 | dates, 12 | strings, 13 | ) 14 | 15 | 16 | _tag_constructor_map = ( 17 | # Read environment variables 18 | ("!ENV", construct_env_tag), 19 | # Build paths 20 | ("!Path", paths.path_constructor), 21 | ("!PathStr", paths.path_as_str_constructor), 22 | ("!Parent", paths.parent_constructor), 23 | # Use parameters 24 | ("!DateRange", dates.date_range_constructor), 25 | ("!AsGPSDate", dates.date_to_gps_date_constructor), 26 | # Manipulate strings in sequences 27 | ("!StringTransform", strings.string_transform_constructor), 28 | ) 29 | 30 | 31 | def add() -> None: 32 | for tag, constructor in _tag_constructor_map: 33 | SafeLoader.add_constructor(tag, constructor) 34 | 35 | # Make all timestamps GPSDate instances 36 | tag = "tag:yaml.org,2002:timestamp" 37 | timestamp_constructor = SafeLoader.yaml_constructors[tag] 38 | wrapped_timestamp_constructor = dates.timestamp2GPSDate(timestamp_constructor) 39 | SafeLoader.yaml_constructors[tag] = wrapped_timestamp_constructor 40 | -------------------------------------------------------------------------------- /docs/references.md: -------------------------------------------------------------------------------- 1 | 2 | ## Bernese GNSS Software 3 | 4 | * Dach, R., S. Lutz, P. Walser, P. Fridez (Eds); 2015: **Bernese GNSS Software 5 | Version 5.2**. User manual, Astronomical Institute, University of Bern, Bern 6 | Open Publishing. DOI: [10.7892/boris.72297][BSW-DOI]; ISBN: 978-3-906813-05-9. 7 | 8 | [BSW-DOI]: http://dx.doi.org/10.7892/boris.72297 9 | 10 | 11 | ## YAML 12 | 13 | * [YAML specification](https://yaml.org/) 14 | * [PyYAML documentation](https://pyyaml.org/wiki/PyYAMLDocumentation) 15 | 16 | 17 | ## Standards and Formats 18 | 19 | * [IGS Site Log Manager User Guide](https://igs.org/site-log-manager-user-guide/) 20 | - [Template sitelog](https://files.igs.org/pub/station/general/blank.log) 21 | - [Template instructions](https://files.igs.org/pub/station/general/sitelog_instr.txt) 22 | 23 | * [IGS switch to IGS20/igs20.atx and repro3 standards](https://igs.org/news/igs20/) 24 | * [IGS Site Log Manager User Guide](https://www.igs.org/site-log-manager-user-guide) | [Empty sitelog](https://files.igs.org/pub/station/general/blank.log) 25 | 26 | * [GNSS Format Descriptions](https://gage.upc.edu/en/learning-materials/library/gnss-format-descriptions) 27 | * [Standards and data formats](https://gssc.esa.int/education/library/standards-and-data-formats/) 28 | * [Research group of Astronomy and GEomatics. gAGE](https://gage.upc.edu/en) 29 | -------------------------------------------------------------------------------- /tests/ab/data/test_init.py: -------------------------------------------------------------------------------- 1 | import json 2 | from dataclasses import asdict 3 | 4 | from ab.data import TransferStatus 5 | 6 | 7 | def test_add_download_statusses(): 8 | status1 = TransferStatus(1, 2) 9 | status2 = TransferStatus(2, 1) 10 | result = status1 + status2 11 | expected = TransferStatus(3, 3) 12 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 13 | 14 | status3 = TransferStatus(1, 2, 1, 2) 15 | status4 = TransferStatus(2, 1, 2, 1) 16 | result2 = status3 + status4 17 | expected2 = TransferStatus(3, 3, 3, 3) 18 | assert result2 == expected2, f"Expected {result2!r} to be {expected2!r} ..." 19 | 20 | 21 | def test_radd_transfer_statusses(): 22 | result = TransferStatus(1, 2) 23 | result += TransferStatus(2, 1) 24 | expected = TransferStatus(3, 3) 25 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 26 | 27 | result2 = TransferStatus(1, 2, 1, 2) 28 | result2 += TransferStatus(2, 1, 2, 1) 29 | expected2 = TransferStatus(3, 3, 3, 3) 30 | assert result2 == expected2, f"Expected {result2!r} to be {expected2!r} ..." 31 | 32 | 33 | def test_download_status_asdict(): 34 | result = asdict(TransferStatus(3, 3)) 35 | expected = { 36 | "existing": 3, 37 | "success": 3, 38 | "failed": 0, 39 | "not_found": 0, 40 | "exceptions": [], 41 | } 42 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 43 | -------------------------------------------------------------------------------- /src/ab/cli/dateinfo.py: -------------------------------------------------------------------------------- 1 | """ 2 | Command-line interface for date information 3 | 4 | """ 5 | 6 | import logging 7 | from pathlib import Path 8 | import json 9 | import datetime as dt 10 | from typing import ( 11 | Any, 12 | Final, 13 | ) 14 | 15 | import click 16 | from click_aliases import ClickAliasedGroup 17 | from rich import print 18 | 19 | from ab import ( 20 | dates, 21 | ) 22 | 23 | from ab.cli import ( 24 | _input, 25 | _arguments, 26 | ) 27 | 28 | 29 | log = logging.getLogger(__name__) 30 | 31 | 32 | @click.group(cls=ClickAliasedGroup) 33 | def dateinfo() -> None: 34 | """ 35 | Print date info on date, year+doy or GPS week. 36 | 37 | """ 38 | 39 | 40 | @dateinfo.command 41 | @_arguments.date 42 | def ymd(date: dt.date) -> None: 43 | """ 44 | Show date information based on date. 45 | 46 | """ 47 | gps_date = dates.GPSDate.from_date(date) 48 | print(json.dumps(gps_date.info, indent=2)) 49 | 50 | 51 | @dateinfo.command 52 | @_arguments.week 53 | def gpsweek(week: int) -> None: 54 | """ 55 | Show date information based on GPS week. 56 | 57 | """ 58 | gps_date = dates.GPSDate.from_gps_week(week) 59 | print(json.dumps(gps_date.info, indent=2)) 60 | 61 | 62 | @dateinfo.command 63 | @_arguments.year 64 | @_arguments.doy 65 | def ydoy(year: int, doy: int) -> None: 66 | """ 67 | Show date information based on Year and day of year [DOY]. 68 | 69 | """ 70 | gps_date = dates.GPSDate.from_year_doy(year, doy) 71 | print(json.dumps(gps_date.info, indent=2)) 72 | -------------------------------------------------------------------------------- /src/ab/data/http.py: -------------------------------------------------------------------------------- 1 | """ 2 | Transfer files over HTTP 3 | 4 | """ 5 | 6 | from pathlib import Path 7 | import logging 8 | 9 | import requests 10 | 11 | from ab.data import TransferStatus 12 | from ab.data.source import Source 13 | from ab.data.stats import already_updated 14 | 15 | 16 | log = logging.getLogger(__name__) 17 | 18 | 19 | _SESSION: requests.Session | None = None 20 | 21 | 22 | def get_session() -> requests.Session: 23 | global _SESSION 24 | if _SESSION is None: 25 | _SESSION = requests.Session() 26 | return _SESSION 27 | 28 | 29 | def download(source: Source) -> TransferStatus: 30 | """ 31 | Download a file over HTTP (TLS or not) 32 | 33 | """ 34 | status = TransferStatus() 35 | for pair in source.resolve(): 36 | destination = Path(pair.path_local) 37 | destination.mkdir(parents=True, exist_ok=True) 38 | ofname = destination / pair.fname 39 | 40 | if already_updated(ofname, max_age=source.max_age): 41 | log.debug(f"{ofname.name} already downloaded ...") 42 | status.existing += 1 43 | continue 44 | 45 | log.info(f"Download {pair.uri} to {ofname} ...") 46 | response = get_session().get(pair.uri, allow_redirects=True, timeout=30) 47 | 48 | if not response.ok: 49 | # Calling it a failure, without knowing the cause of the error. 50 | status.failed += 1 51 | continue 52 | 53 | ofname.write_bytes(response.content) 54 | status.success += 1 55 | 56 | return status 57 | -------------------------------------------------------------------------------- /src/ab/data/stats.py: -------------------------------------------------------------------------------- 1 | """ 2 | Probe local files. 3 | 4 | """ 5 | 6 | import os 7 | import datetime as dt 8 | from typing import Any 9 | from collections.abc import Iterable 10 | import math 11 | from pathlib import Path 12 | import functools 13 | 14 | import logging 15 | 16 | 17 | log = logging.getLogger(__name__) 18 | 19 | 20 | def date_changed(fname: Path | str) -> dt.date: 21 | """ 22 | Return the last modification date for given file. 23 | 24 | """ 25 | fname = Path(fname) 26 | if not fname.is_file(): 27 | raise ValueError(f"File {fname!r} does not exist ...") 28 | return dt.datetime.fromtimestamp(fname.stat().st_ctime).date() 29 | 30 | 31 | def file_age(fname: Path) -> int: 32 | """ 33 | Returns age of file in days since today. 34 | 35 | """ 36 | return (dt.date.today() - date_changed(fname)).days 37 | 38 | 39 | def already_updated(fname: Path, *, max_age: int | float = math.inf) -> bool: 40 | """ 41 | A file is already updated if it exists, and it is newer than the given 42 | maximum age. 43 | 44 | """ 45 | return fname.is_file() and file_age(fname) < max_age 46 | 47 | 48 | @functools.cache 49 | def dir_size(start_path: str = ".") -> float: 50 | """ 51 | Return size of files in directory, excluding symbolic links. 52 | 53 | References: 54 | 55 | * Adapted from https://stackoverflow.com/a/1392549 56 | 57 | """ 58 | return sum( 59 | os.path.getsize(ifname) 60 | for (path, _, fnames) in os.walk(start_path) 61 | for fname in fnames 62 | if not os.path.islink(ifname := os.path.join(path, fname)) 63 | ) 64 | -------------------------------------------------------------------------------- /tests/ab/data/test_source.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | from ab.data.source import Source 4 | 5 | 6 | def test_create_Source_using_strings(): 7 | 8 | # Arrange 9 | identifier: str = "SOURCE" 10 | description: str = "DESCRIPTION" 11 | url: str | Path = "file:///path/to/file.txt" 12 | destination: str | Path = "file:///path/to/destination" 13 | 14 | # Act 15 | source = Source( 16 | identifier, 17 | description, 18 | url, 19 | destination, 20 | ) 21 | 22 | # Assert 23 | result = source.url_ 24 | expected = "file:///path/to/file.txt" 25 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 26 | 27 | result = source.destination_ 28 | expected = "file:/path/to/destination" 29 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 30 | 31 | result = source.protocol 32 | expected = "file" 33 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 34 | 35 | 36 | def test_create_Source_using_Path_instances(): 37 | 38 | # Arrange 39 | identifier: str = "SOURCE" 40 | description: str = "DESCRIPTION" 41 | url: str | Path = Path("file:///path/to/file.txt") 42 | destination: str | Path = Path("file:///path/to/destination") 43 | 44 | # Act 45 | source = Source( 46 | identifier, 47 | description, 48 | url, 49 | destination, 50 | ) 51 | 52 | # Expected 53 | url_ = "file:/path/to/file.txt" 54 | 55 | # Assert 56 | result = source.url_ 57 | expected = url_ 58 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 59 | -------------------------------------------------------------------------------- /tests/ab/conftest.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | 4 | import pytest 5 | import yaml 6 | 7 | 8 | @pytest.fixture(scope="session", autouse=True) 9 | def environment(tmp_path_factory): 10 | 11 | # Physical environment 12 | __ab__ = tmp_path_factory.mktemp("__ab__") 13 | ifname_env = Path(__file__).parent / "test_env.yaml" 14 | schema = yaml.safe_load(ifname_env.read_text()) 15 | 16 | # Create and return the argument to enable single-line assignment and creation 17 | mkdir = lambda path: path.mkdir(exist_ok=True, parents=True) or path 18 | touch = lambda fname: fname.touch() or fname 19 | 20 | # Record keeping 21 | keys_to_remove = [] 22 | 23 | # Set environment variables and create directories and files 24 | for env_var in schema: 25 | 26 | if isinstance(env_var, str): 27 | path = mkdir(__ab__ / env_var) 28 | os.environ[env_var] = str(path) 29 | keys_to_remove.append(env_var) 30 | continue 31 | 32 | if isinstance(env_var, dict): 33 | key, value = list(env_var.items())[0] 34 | 35 | if isinstance(value, str): 36 | os.environ[key] = value 37 | keys_to_remove.append(key) 38 | 39 | elif isinstance(value, list): 40 | path = mkdir(__ab__ / key) 41 | os.environ[key] = str(path) 42 | keys_to_remove.append(key) 43 | for fname in value: 44 | touch(path / fname) 45 | continue 46 | 47 | raise RuntimeError("Schema not correctly structured ...") 48 | 49 | # Go test ... 50 | yield 51 | 52 | # Remove keys to remove 53 | for key in keys_to_remove: 54 | del os.environ[key] 55 | -------------------------------------------------------------------------------- /src/ab/configuration/constructors/strings.py: -------------------------------------------------------------------------------- 1 | """ 2 | String-operation constructor 3 | 4 | """ 5 | 6 | from typing import ( 7 | Any, 8 | Final, 9 | ) 10 | from collections.abc import Iterable 11 | from dataclasses import ( 12 | dataclass, 13 | asdict, 14 | ) 15 | 16 | import yaml 17 | 18 | from ab.strings import Operator 19 | 20 | 21 | METHODS_SUPPORTED: Final = ( 22 | "upper", 23 | "lower", 24 | "title", 25 | "capitalize", 26 | "lstrip", 27 | "rstrip", 28 | "strip", 29 | "removeprefix", 30 | "removesuffix", 31 | "replace", 32 | "ljust", 33 | "rjust", 34 | "swapcase", 35 | "zfill", 36 | ) 37 | "Limit user input by only allowing these string methods" 38 | 39 | 40 | @dataclass 41 | class OperatorArguments: 42 | sequence: str 43 | method: str 44 | arguments: Iterable[Any] = () 45 | 46 | 47 | def string_transform_constructor(loader: yaml.Loader, node: yaml.Node) -> list[str]: 48 | """ 49 | Configuration example 50 | 51 | ```yaml 52 | 53 | !StringTransform 54 | sequence: [s1] 55 | method: upper 56 | arguments: [] 57 | 58 | ``` 59 | """ 60 | if not isinstance(node, yaml.MappingNode): 61 | raise TypeError(f"Node type {node!r} not supported for tag ...") 62 | instance_arguments: dict[str, str] = loader.construct_mapping(node, deep=True) 63 | operator_arguments = OperatorArguments(**instance_arguments) 64 | sequence, method, arguments = asdict(operator_arguments).values() 65 | if not all(isinstance(s, str) for s in sequence): 66 | raise ValueError(f"Sequence items must be strings. Got {sequence!r} ...") 67 | if not method in METHODS_SUPPORTED: 68 | raise ValueError(f"Transform method {method!r} not supported ...") 69 | return [Operator(s).operate(method, *arguments) for s in sequence] 70 | -------------------------------------------------------------------------------- /src/ab/cli/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Command-line interface for AutoBernese 3 | 4 | """ 5 | 6 | import logging 7 | 8 | import click 9 | from click_aliases import ClickAliasedGroup 10 | from rich import print 11 | 12 | from ab import configuration 13 | from ab.cli import ( 14 | _input, 15 | about, 16 | config, 17 | logs, 18 | qc, 19 | dateinfo, 20 | campaign, 21 | station, 22 | troposphere, 23 | download, 24 | ) 25 | 26 | 27 | log = logging.getLogger(__name__) 28 | 29 | 30 | @click.group(cls=ClickAliasedGroup, invoke_without_command=True) 31 | @click.option("--version", "show_version", is_flag=True, default=False) 32 | @click.option("--bsw-release", "bsw_release", is_flag=True, default=False) 33 | @click.pass_context 34 | def main(ctx: click.Context, show_version: bool, bsw_release: bool) -> None: 35 | """ 36 | AutoBernese is a tool that can 37 | 38 | 1. Create Bernese campaigns using its built-in template system. 39 | 40 | 2. Download and organise data for common or campaign use. 41 | 42 | 3. Run GNSS-related tasks such as the Bernese Processing Engine and more. 43 | 44 | 4. Perform various stand-alone operations needed for a complete workflow. 45 | 46 | """ 47 | 48 | if show_version: 49 | print(about.autobernese()) 50 | raise SystemExit 51 | 52 | if bsw_release: 53 | print(about.bernese()) 54 | raise SystemExit 55 | 56 | if ctx.invoked_subcommand is None: 57 | click.echo(ctx.get_help()) 58 | raise SystemExit 59 | 60 | configuration.set_up_runtime_environment() 61 | 62 | 63 | main.add_command(config.config) 64 | main.add_command(logs.logs) 65 | main.add_command(qc.qc) 66 | main.add_command(dateinfo.dateinfo, aliases=["dt"]) 67 | main.add_command(download.download, aliases=["dl"]) 68 | main.add_command(campaign.campaign, aliases=["c"]) 69 | main.add_command(station.station, aliases=["st"]) 70 | main.add_command(troposphere.troposphere, aliases=["tr"]) 71 | -------------------------------------------------------------------------------- /src/ab/cli/_output.py: -------------------------------------------------------------------------------- 1 | """ 2 | Common command-output 3 | 4 | """ 5 | 6 | import os 7 | from typing import Final 8 | from collections.abc import Iterable 9 | import logging 10 | 11 | from rich import print 12 | 13 | from ab.tasks import Task 14 | 15 | 16 | log = logging.getLogger(__name__) 17 | 18 | TERM_WIDTH: Final = os.get_terminal_size().columns 19 | 20 | 21 | def divide(fill: str = "=", /) -> str: 22 | assert isinstance(fill, str) 23 | assert len(fill) == 1 24 | return fill * TERM_WIDTH 25 | 26 | 27 | def title_divide(s: str, fill: str = "=", /, *, pad: int = 1) -> str: 28 | assert isinstance(fill, str) 29 | assert len(fill) == 1 30 | padding = " " * pad 31 | return f"{padding}{s.title()}{padding}".center(TERM_WIDTH, fill) 32 | 33 | 34 | def print_task_result_and_exception(tasks: Iterable[Task]) -> None: 35 | print(title_divide("Task execution status")) 36 | for task in tasks: 37 | result = task.result 38 | 39 | if result.finished and result.exception is None: 40 | log.info( 41 | f"{task.identifier} finished and returned {result.return_value!r} ..." 42 | ) 43 | postfix = "[green][ done ][/]" 44 | else: 45 | log.info( 46 | f"{task.identifier} failed with exception ({result.exception}) ..." 47 | ) 48 | postfix = "[red][ error ][/]" 49 | 50 | # A single line for the task ID and overall status 51 | print(title_divide("Task", "-")) 52 | print(f"{task.identifier}: {postfix}") 53 | 54 | # The result if this is returned from the function called by run 55 | if result.return_value: 56 | print(title_divide("Return value", ".")) 57 | print(result.return_value) 58 | # print(divide("-")) 59 | 60 | # Captured exception 61 | if result.exception: 62 | print(title_divide("Exception", ".")) 63 | print(result.exception) 64 | 65 | print(divide()) 66 | print() 67 | -------------------------------------------------------------------------------- /src/ab/data/file.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copy local files 3 | 4 | """ 5 | 6 | from pathlib import Path 7 | import shutil 8 | import logging 9 | from collections.abc import Iterable 10 | import re 11 | 12 | RINEX2_PATTERN = r"\w{4}\d{3}\w\.\d{1,2}[oOdDnNgG]\.[zZ]" 13 | 14 | from ab import configuration 15 | from ab.paths import resolve_wildcards 16 | from ab.data import TransferStatus 17 | from ab.data.source import Source 18 | from ab.data.stats import already_updated 19 | 20 | 21 | log = logging.getLogger(__name__) 22 | 23 | 24 | def download(source: Source) -> TransferStatus: 25 | """ 26 | Download local paths resolved from a Source instance. 27 | 28 | """ 29 | status = TransferStatus() 30 | 31 | for pair in source.resolve(): 32 | destination = Path(pair.path_local) 33 | destination.mkdir(parents=True, exist_ok=True) 34 | 35 | # Loop over each file resolved 36 | for ifname in resolve_wildcards(pair.uri): 37 | # dirty workaround to fix naming of RINEX2 files (Bernese stumbles 38 | # a bit when station names are lower case). 39 | if re.match(RINEX2_PATTERN, ifname.name): 40 | ofname = destination / ifname.name.upper() 41 | else: 42 | ofname = destination / ifname.name 43 | 44 | if not ifname.is_file(): 45 | log.warning(f"File {ifname!r} not found ...") 46 | status.not_found += 1 47 | continue 48 | 49 | if already_updated(ofname, max_age=source.max_age): 50 | log.debug(f"{ofname.name!r} already downloaded ...") 51 | status.existing += 1 52 | continue 53 | 54 | log.info(f"Copy {ifname} to {ofname} ...") 55 | shutil.copy2(ifname, ofname) 56 | 57 | if not ofname.is_file(): 58 | log.warning(f"File {ofname.name!r} not copied ...") 59 | status.failed += 1 60 | continue 61 | 62 | status.success += 1 63 | 64 | return status 65 | -------------------------------------------------------------------------------- /tests/ab/bsw/test_campaign.py: -------------------------------------------------------------------------------- 1 | from ab.bsw.campaign import ( 2 | build_campaign_menu, 3 | ) 4 | 5 | 6 | def test_build_campaign_menu(): 7 | campaigns = [ 8 | "/path/to/CAMPAIGN54/foo2222", 9 | "/some/other/path/tmp/foo", 10 | ] 11 | expected = """\ 12 | 13 | ! List of Campaigns 14 | ! ----------------- 15 | CAMPAIGN 2 16 | "/path/to/CAMPAIGN54/foo2222" 17 | "/some/other/path/tmp/foo" 18 | ## widget = uniline; numlines = 30 19 | 20 | MSG_CAMPAIGN 1 "Campaign directory" 21 | 22 | 23 | # BEGIN_PANEL NO_CONDITION ##################################################### 24 | # EDIT LIST OF CAMPAIGNS - MENU_CMP # 25 | # # 26 | # > Campaign_directory < # CAMPAIGN 27 | # # 28 | # END_PANEL #################################################################### 29 | """ 30 | result = build_campaign_menu(campaigns) 31 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 32 | 33 | 34 | def test_build_campaign_menu_empty(): 35 | campaigns = [] 36 | expected = """\ 37 | 38 | ! List of Campaigns 39 | ! ----------------- 40 | CAMPAIGN 0 41 | ## widget = uniline; numlines = 30 42 | 43 | MSG_CAMPAIGN 1 "Campaign directory" 44 | 45 | 46 | # BEGIN_PANEL NO_CONDITION ##################################################### 47 | # EDIT LIST OF CAMPAIGNS - MENU_CMP # 48 | # # 49 | # > Campaign_directory < # CAMPAIGN 50 | # # 51 | # END_PANEL #################################################################### 52 | """ 53 | result = build_campaign_menu(campaigns) 54 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 55 | -------------------------------------------------------------------------------- /tests/ab/station/test_sitelog.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | from ab.station.sitelog import Sitelog 4 | 5 | # https://www.epncb.oma.be/ftp/station 6 | 7 | 8 | def test_sitelog_parsing_with_default_four_id_characters(): 9 | directory = Path(__file__).parent / "log" 10 | ifnames = directory.glob("*.log") 11 | for ifname in ifnames: 12 | # Instance opens provided file and parses it 13 | # It will fail, when not working. 14 | expected = ifname.name[: ifname.name.find("_")].upper() 15 | "Station_ID from the filename (ignoring the modification date) in uppercase" 16 | try: 17 | sitelog = Sitelog(ifname) 18 | except Exception as e: 19 | assert False, f"Error reading {ifname.name!r} ..." 20 | 21 | four_character_id = sitelog.section_1.four_character_id 22 | assert ( 23 | four_character_id is not None 24 | ), f"Expected to find `four_character_id` in {ifname} ..." 25 | 26 | result = four_character_id.upper() 27 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 28 | 29 | 30 | def test_sitelog_parsing_with_nine_id_characters(): 31 | directory = Path(__file__).parent / "log_9char" 32 | ifnames = directory.glob("*.log") 33 | for ifname in ifnames: 34 | # Instance opens provided file and parses it 35 | # It will fail, when not working. 36 | expected = ifname.name[: ifname.name.find("_")].upper() 37 | "Station_ID from the filename (ignoring the modification date) in uppercase" 38 | try: 39 | sitelog = Sitelog(ifname, preferred_station_id_length="nine") 40 | except Exception as e: 41 | assert False, f"Error reading {ifname.name!r}. Got {e.__class__}: {e} ..." 42 | 43 | nine_character_id = sitelog.section_1.nine_character_id 44 | assert ( 45 | nine_character_id is not None 46 | ), f"Expected to find `nine_character_id` in {ifname} ..." 47 | 48 | result = nine_character_id # .upper() 49 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 50 | -------------------------------------------------------------------------------- /mkdocs.yml: -------------------------------------------------------------------------------- 1 | dev_addr: localhost:7000 2 | site_name: GNSS-Process Automation with Bernese GNSS Software 3 | site_description: 4 | site_author: Joachim Mortensen 5 | 6 | repo_name: AutoBernese 7 | repo_url: https://github.com/SDFIdk/AutoBernese 8 | 9 | nav: 10 | - Home: index.md 11 | - User manual: 12 | - Install AutoBernese: manual/install-autobernese.md 13 | - Quick start: manual/quick-start.md 14 | - Command-line reference: manual/command-reference.md 15 | - Configuration files: manual/configuration-files.md 16 | - Download sources: manual/download-sources.md 17 | - Development: 18 | - Contribute: dev/contribute.md 19 | - System overview: dev/system-overview.md 20 | - Prerequisites: prerequisites.md 21 | - References: references.md 22 | 23 | theme: 24 | name: material 25 | logo: assets/logo.png 26 | favicon: assets/logo.png 27 | 28 | language: en 29 | features: 30 | - navigation.sections 31 | - navigation.expand 32 | - navigation.top 33 | 34 | extra_css: 35 | - stylesheets/extra.css 36 | - stylesheets/asciinema-player.css 37 | 38 | extra_javascript: 39 | - javascript/asciinema-player.min.js 40 | 41 | plugins: 42 | - search 43 | - glightbox 44 | - kroki: 45 | FileTypes: 46 | - png 47 | - svg 48 | # - mkdocs-asciinema 49 | 50 | markdown_extensions: 51 | - admonition 52 | - attr_list 53 | - footnotes 54 | - toc: 55 | permalink: true 56 | 57 | - pymdownx.arithmatex: 58 | generic: true 59 | 60 | - pymdownx.betterem: 61 | smart_enable: all 62 | 63 | # Make admonitions collapsable with ??? instead of !!! 64 | - pymdownx.details 65 | 66 | - pymdownx.emoji: 67 | emoji_index: !!python/name:material.extensions.emoji.twemoji 68 | emoji_generator: !!python/name:material.extensions.emoji.to_svg 69 | 70 | - pymdownx.highlight: 71 | anchor_linenums: true 72 | - pymdownx.inlinehilite 73 | 74 | - pymdownx.keys 75 | 76 | - pymdownx.superfences: 77 | custom_fences: 78 | - name: mermaid 79 | class: mermaid 80 | format: !!python/name:pymdownx.superfences.fence_code_format 81 | - pymdownx.tabbed: 82 | alternate_style: true 83 | - pymdownx.tasklist: 84 | custom_checkbox: true 85 | - pymdownx.tilde 86 | - pymdownx.snippets 87 | 88 | - mkdocs_graphviz 89 | - md_in_html -------------------------------------------------------------------------------- /src/ab/configuration/tasks.py: -------------------------------------------------------------------------------- 1 | """ 2 | Configuration tools to build TaskDefinition instances 3 | 4 | """ 5 | 6 | import typing as t 7 | import sys 8 | 9 | from ab.typing import AnyFunction 10 | from ab.configuration import ( 11 | SectionListItemType, 12 | dispatchers, 13 | ) 14 | from ab.tasks import TaskDefinition 15 | from ab.imports import import_function 16 | from ab.data import compress 17 | from ab.bsw import bpe 18 | from ab.data import sftp 19 | from ab.station import sta 20 | from ab import vmf 21 | 22 | 23 | _MODULE: t.Final = sys.modules[__name__] 24 | "This module" 25 | 26 | 27 | _SHORTCUTS: dict[str, AnyFunction] = { 28 | # Use as value for `run` key 29 | "RunBPE": bpe.run_bpe, 30 | "Compress": compress.gzip, 31 | "CompressGlob": compress.gzip_glob, 32 | "SFTPUpload": sftp.upload, 33 | "Sitelogs2STAFile": sta.create_sta_file_from_sitelogs, 34 | "BuildVMF": vmf.build, 35 | "CheckVMF": vmf.check, 36 | # Tasks to come: 37 | # "CopyToSAVEDISK": 38 | # 39 | # Use as value for `dispatch_with` key 40 | "DispatchCompress": dispatchers.gzip_dispatch, 41 | "DispatchVMF": dispatchers.vmf_dispatch, 42 | } 43 | "Shortcut names for API-level functions or pre-processing functions [dispatchers]." 44 | 45 | 46 | def get_func(name: str) -> AnyFunction: 47 | if name in _SHORTCUTS: 48 | return _SHORTCUTS[name] 49 | elif name in dir(_MODULE): 50 | candidate: AnyFunction | None = getattr(_MODULE, name) 51 | assert callable(candidate) 52 | return candidate 53 | elif "." in name: 54 | return import_function(name) 55 | else: 56 | raise NameError(name) 57 | 58 | 59 | def load(kwargs: SectionListItemType) -> TaskDefinition: 60 | 61 | key = "run" 62 | if not key in kwargs: 63 | raise RuntimeError(f"Expected {key!r} to be in task definition ...") 64 | kwargs = {**kwargs, **{key: get_func(kwargs[key])}} 65 | 66 | key = "dispatch_with" 67 | if key in kwargs: 68 | kwargs = {**kwargs, **{key: get_func(kwargs[key])}} 69 | 70 | return TaskDefinition(**kwargs) 71 | 72 | 73 | def load_all(raw: list[SectionListItemType]) -> list[TaskDefinition]: 74 | return [load(kwargs) for kwargs in raw] 75 | -------------------------------------------------------------------------------- /src/ab/station/1.03.STA: -------------------------------------------------------------------------------- 1 | STATION INFORMATION FILE FOR BERNESE GNSS SOFTWARE 5.4 {created_time} 2 | -------------------------------------------------------------------------------- 3 | 4 | FORMAT VERSION: 1.03 5 | TECHNIQUE: GNSS 6 | 7 | TYPE 001: RENAMING OF STATIONS 8 | ------------------------------ 9 | 10 | STATION NAME FLG FROM TO OLD STATION NAME REMARK 11 | **************** *** YYYY MM DD HH MM SS YYYY MM DD HH MM SS ******************** ************************ 12 | {type_1_rows} 13 | 14 | 15 | TYPE 002: STATION INFORMATION 16 | ----------------------------- 17 | 18 | STATION NAME FLG FROM TO RECEIVER TYPE RECEIVER SERIAL NBR REC # ANTENNA TYPE ANTENNA SERIAL NBR ANT # NORTH EAST UP AZIMUTH LONG NAME DESCRIPTION REMARK 19 | **************** *** YYYY MM DD HH MM SS YYYY MM DD HH MM SS ******************** ******************** ****** ******************** ******************** ****** ***.**** ***.**** ***.**** ****.* ********* ********************** ************************ 20 | {type_2_rows} 21 | 22 | 23 | TYPE 003: HANDLING OF STATION PROBLEMS 24 | -------------------------------------- 25 | 26 | STATION NAME FLG FROM TO REMARK 27 | **************** *** YYYY MM DD HH MM SS YYYY MM DD HH MM SS ************************************************************ 28 | {type_3_rows} 29 | 30 | 31 | TYPE 004: STATION COORDINATES AND VELOCITIES (ADDNEQ) 32 | ----------------------------------------------------- 33 | RELATIVE CONSTR. POSITION RELATIVE CONSTR. VELOCITY 34 | STATION NAME 1 STATION NAME 2 NORTH EAST UP NORTH EAST UP 35 | **************** **************** **.***** **.***** **.***** **.***** **.***** **.***** 36 | {type_4_rows} 37 | 38 | 39 | TYPE 005: HANDLING STATION TYPES 40 | -------------------------------- 41 | 42 | STATION NAME FLG FROM TO MARKER TYPE REMARK 43 | **************** *** YYYY MM DD HH MM SS YYYY MM DD HH MM SS ******************** ************************ 44 | {type_5_rows} 45 | -------------------------------------------------------------------------------- /docs/manual/assets/autobernese.yaml: -------------------------------------------------------------------------------- 1 | campaign: 2 | directories: 3 | - name: ATM 4 | - name: BPE 5 | - name: GEN 6 | files: 7 | - !Path [*CONFIG, OBSERV.SEL] 8 | - !Path [*PAN, SESSIONS.SES] 9 | - name: GRD 10 | - name: OBS 11 | - name: ORB 12 | - name: ORX 13 | - name: OUT 14 | - name: RAW 15 | - name: SOL 16 | - name: STA 17 | 18 | troposphere: 19 | ipath: &TROPOSPHERE_IPATH !Path [*D, VMF3, '1x1_OP_H', '{date.year}'] 20 | opath: &TROPOSPHERE_OPATH !Path [*D, VMF3, '1x1_OP_GRD', '{date.year}'] 21 | 22 | sources: 23 | 24 | - identifier: EUREF54_20_STA 25 | description: EUREF STA file from epncb 26 | url: ftp://epncb.oma.be/pub/station/general/EUREF54_20.STA 27 | destination: !Path [*D, REF54] 28 | max_age: 1 29 | 30 | - identifier: BSW_MODEL 31 | description: BSW Model data 32 | url: ftp://ftp.aiub.unibe.ch/BSWUSER54/MODEL/ 33 | destination: *MODEL 34 | filenames: ['*'] 35 | max_age: 1 36 | 37 | - identifier: BSW_CONFIG 38 | description: BSW Configuration data 39 | url: ftp://ftp.aiub.unibe.ch/BSWUSER54/CONFIG/ 40 | destination: *CONFIG 41 | filenames: ['*'] 42 | max_age: 1 43 | 44 | - identifier: BSW_REF 45 | description: Universal and BSW-specific antenna files 46 | url: ftp://ftp.aiub.unibe.ch/BSWUSER54/REF/ 47 | destination: !Path [*D, REF54] 48 | filenames: 49 | - ANTENNA_I20.PCV 50 | - I20.ATX 51 | - FES2014b.BLQ 52 | 53 | - IGB14.CRD 54 | - IGB14.FIX 55 | - IGB14.PSD 56 | - IGB14.VEL 57 | 58 | - IGS14.CRD 59 | - IGS14.FIX 60 | - IGS14.PSD 61 | - IGS14.VEL 62 | 63 | - IGS20.CRD 64 | - IGS20.FIX 65 | - IGS20.PSD 66 | - IGS20.VEL 67 | 68 | - IGB14_R.CRD 69 | - IGB14_R.VEL 70 | 71 | - IGS14_R.CRD 72 | - IGS14_R.VEL 73 | 74 | - IGS20_R.CRD 75 | - IGS20_R.VEL 76 | max_age: 1 77 | 78 | - identifier: VMF3 79 | description: Troposphere mapping function (VMF3/grid/1x1/operational) 80 | url: https://vmf.geo.tuwien.ac.at/trop_products/GRID/1x1/VMF3/VMF3_OP/{date.year}/VMF3_{date.year}{date.month:02d}{date.day:02d}.H{hour} 81 | destination: *TROPOSPHERE_IPATH 82 | parameters: 83 | date: !DateRange { beg: 2024-06-01, end: 2024-06-01, extend_end_by: 1 } 84 | hour: ['00', '06', '12', '18'] 85 | 86 | station: 87 | sitelogs: !Path [*D, sitelogs, '*.log'] 88 | individually_calibrated: [] 89 | output_sta_file: !Path [*D, station, sitelogs.STA] 90 | -------------------------------------------------------------------------------- /src/ab/cli/_options.py: -------------------------------------------------------------------------------- 1 | """ 2 | Common command-line options 3 | 4 | """ 5 | 6 | import typing as t 7 | 8 | import click 9 | from click.core import ( 10 | Context, 11 | Option, 12 | Argument, 13 | ) 14 | 15 | from ab.cli import _input 16 | 17 | 18 | FORMAT_HELP: t.Final = f"Format: {_input.DATE_FORMAT}" 19 | "Date-format help string" 20 | 21 | 22 | def set_yes(ctx: Context, param: Argument | Option, value: bool) -> None: 23 | if value is True: 24 | _input.set_prompt_proceed_yes() 25 | 26 | 27 | # Command input 28 | yes = click.option( 29 | "--yes", "-y", is_flag=True, help="Continue without asking.", callback=set_yes 30 | ) 31 | 32 | # Command output 33 | verbose = click.option("--verbose", "-v", is_flag=True, help="Print more details.") 34 | 35 | # Command attitude 36 | force = click.option( 37 | "-f", "--force", help="Force action.", required=False, is_flag=True 38 | ) 39 | 40 | # General input 41 | ipath = click.option("-i", "--ipath", type=str) 42 | opath = click.option("-o", "--opath", type=str) 43 | beg = click.option("-b", "--beg", type=_input.date, help=f"Start date. {FORMAT_HELP}") 44 | end = click.option("-e", "--end", type=_input.date, help=f"End date. {FORMAT_HELP}") 45 | 46 | # Campaign 47 | campaign = click.option( 48 | "-c", 49 | "--campaign", 50 | "name", 51 | help="Use specified campaign configuration.", 52 | required=False, 53 | ) 54 | template = click.option( 55 | "-t", 56 | "--template", 57 | type=str, 58 | required=False, 59 | help="Template for campaign configuration. If not given, the default configuration is used.", 60 | ) 61 | gps_week = click.option( 62 | "-g", "--gps-week", type=int, required=False, help=f"GPS-week number" 63 | ) 64 | 65 | # Tasks and sources 66 | identifiers = click.option( 67 | "-i", 68 | "--identifier", 69 | "identifiers", 70 | multiple=True, 71 | type=str, 72 | default=[], 73 | required=False, 74 | help="Use item with selected identifier. Exclude others not mentioned.", 75 | ) 76 | exclude = click.option( 77 | "-x", 78 | "--exclude", 79 | multiple=True, 80 | type=str, 81 | default=[], 82 | required=False, 83 | help="Exclude item with selected identifier. Include others not mentioned.", 84 | ) 85 | 86 | # Troposphere 87 | hour_file_format = click.option("-h", "--hour-file-format", "ifname", type=str) 88 | day_file_format = click.option("-d", "--day-file-format", "ofname", type=str) 89 | -------------------------------------------------------------------------------- /src/ab/bsw/bpe.py: -------------------------------------------------------------------------------- 1 | """ 2 | Run the Bernese Processing Engine [BPE] 3 | 4 | """ 5 | 6 | import os 7 | import logging 8 | import subprocess as sub 9 | 10 | from ab import pkg 11 | from ab.bsw.bpe_terminal_output import parse_bpe_terminal_output 12 | 13 | 14 | log = logging.getLogger(__name__) 15 | 16 | 17 | def ensure_string(s: str) -> str: 18 | if not isinstance(s, str): 19 | TypeError("Expected {s!r} to be `str` ...") 20 | return s 21 | 22 | 23 | def run_bpe( 24 | pcf_file: str, 25 | campaign: str, 26 | year: str, 27 | session: str, 28 | sysout: str, 29 | status: str, 30 | taskid: str, 31 | cpu_file: str = "USER", 32 | ) -> object: 33 | """ 34 | Run Bernese Processing Engine [BPE] by setting needed environment variables 35 | for the built-in BPE runner script `bpe.pl` that initiates and starts BPE 36 | with given PCF and campaign + session arguments. 37 | 38 | """ 39 | bpe_env = dict( 40 | AB_BPE_PCF_FILE=ensure_string(pcf_file), 41 | AB_BPE_CAMPAIGN=ensure_string(campaign), 42 | AB_BPE_YEAR=ensure_string(year), 43 | AB_BPE_SESSION=ensure_string(session), 44 | AB_BPE_SYSOUT=ensure_string(sysout), 45 | AB_BPE_STATUS=ensure_string(status), 46 | AB_BPE_TASKID=ensure_string(taskid), 47 | AB_BPE_CPU_FILE=ensure_string(cpu_file), 48 | ) 49 | 50 | log.info(f"Using the following PCF metadata as input:") 51 | sz = max(len(key) for key in bpe_env) 52 | for key, value in bpe_env.items(): 53 | log.info(f"{key: <{sz}s}: {value}") 54 | 55 | process: sub.Popen | None = None # type: ignore 56 | try: 57 | log.debug(f"Run BPE runner ...") 58 | process = sub.Popen( 59 | f"{pkg.bpe_runner}", 60 | env={**os.environ, **bpe_env}, 61 | stdout=sub.PIPE, 62 | stderr=sub.STDOUT, 63 | universal_newlines=True, 64 | ) 65 | output = "" 66 | for line in process.stdout: # type: ignore 67 | output += line 68 | log.info(line.strip()) 69 | 70 | log.debug(f"BPE runner finished ...") 71 | return parse_bpe_terminal_output(output) 72 | 73 | except KeyboardInterrupt: 74 | log.debug(f"BPE runner killed ...") 75 | # Re-raise so that external caller can adapt accordingly 76 | raise 77 | 78 | finally: 79 | if process is not None: 80 | process.terminate() 81 | process.kill() 82 | -------------------------------------------------------------------------------- /src/ab/bsw/bpe_terminal_output.py: -------------------------------------------------------------------------------- 1 | import datetime as dt 2 | from dataclasses import dataclass 3 | 4 | from string import Template 5 | from typing import Any 6 | 7 | 8 | @dataclass 9 | class BPETerminalOutput: 10 | """ 11 | Container for result printed in the terminal: 12 | 13 | """ 14 | 15 | beg: dt.datetime 16 | username: str 17 | pcf_file: str 18 | cpu_file: str 19 | campaign: str 20 | year_session: str 21 | output_file: str 22 | status_file: str 23 | end: dt.datetime 24 | server_pid: str = "UNKNOWN" 25 | ok: bool = True 26 | 27 | 28 | def parse_bpe_terminal_output( 29 | raw: str, substitutes: dict[str, str] | None = None 30 | ) -> BPETerminalOutput: 31 | if substitutes is not None: 32 | raw = Template(raw).safe_substitute(substitutes) 33 | lines = [line.strip() for line in raw.splitlines() if line.strip() != ""] 34 | fmt = "%d-%b-%Y %H:%M:%S" 35 | results: dict[str, Any] = {} 36 | for line in lines: 37 | if line.startswith("Starting BPE on "): 38 | results["beg"] = dt.datetime.strptime(line[-20:], fmt) 39 | continue 40 | if line.endswith("@"): 41 | results["username"] = line[:-1] 42 | continue 43 | if line.startswith("PCFile:"): 44 | results["pcf_file"] = line.split("PCFile:")[-1].strip() 45 | continue 46 | if line.startswith("CPU file:"): 47 | results["cpu_file"] = line.split("CPU file:")[-1].strip() 48 | continue 49 | if line.startswith("Campaign:"): 50 | results["campaign"] = line.split("Campaign:")[-1].strip() 51 | continue 52 | if line.startswith("Year/session:"): 53 | results["year_session"] = line.split("Year/session:")[-1].strip() 54 | continue 55 | if line.startswith("BPE output:"): 56 | results["output_file"] = line.split("BPE output:")[-1].strip() 57 | continue 58 | if line.startswith("BPE status:"): 59 | results["status_file"] = line.split("BPE status:")[-1].strip() 60 | continue 61 | if line.startswith("BPE server runs PID ="): 62 | results["server_pid"] = line.split("BPE server runs PID =")[-1].strip() 63 | continue 64 | if line.startswith("BPE finished") or line.startswith("BPE error"): 65 | results["end"] = dt.datetime.strptime(line[-20:], fmt) 66 | continue 67 | if line.startswith("User script error"): 68 | results["ok"] = False 69 | return BPETerminalOutput(**results) 70 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | ## Custom additions 132 | workspace/structurizr/.structurizr/ 133 | workspace/structurizr/eula.properties 134 | workspace/structurizr/workspace.json 135 | 136 | __ab__ 137 | 138 | *.code-workspace 139 | 140 | scripts -------------------------------------------------------------------------------- /docs/manual/install-autobernese.md: -------------------------------------------------------------------------------- 1 | 2 | ## Prerequisites 3 | 4 | Before installing AutoBernese, you must have the following software installed: 5 | 6 | * [Bernese GNSS Software version 5.4][BSW] 7 | * [Git] for downloading [the AutoBernese source code][GH-AB] 8 | * [The MambaForge Python distribution](../prerequisites.md) 9 | 10 | [BSW]: http://www.bernese.unibe.ch/ 11 | [Git]: https://git-scm.com/download/linux 12 | [GH-AB]: https://github.com/SDFIdk/AutoBernese 13 | 14 | 15 | ## Install AutoBernese 16 | 17 | * Using git on your system, get the code by cloning the repository to your 18 | machine. 19 | 20 | ```sh 21 | (base) $ git clone https://github.com/SDFIdk/AutoBernese.git 22 | ``` 23 | 24 | * In the directory of the Git archive use `environment.yml` to install the 25 | package dependencies: 26 | 27 | ```sh 28 | (base) $ mamba env create -f environment.yml 29 | ``` 30 | 31 | * Activate the environment: 32 | 33 | ```sh 34 | (base) $ mamba activate ab 35 | (ab) $ 36 | ``` 37 | 38 | * Using the `ab` environment's Python interpreter, install AutoBernese with 39 | `pip`: 40 | 41 | ```sh 42 | (ab) $ python -m pip install . 43 | ``` 44 | 45 | From now on, you will have the AutoBernese command-line tool `ab` available, 46 | when the `ab` mamba environment is activated. 47 | 48 | !!! tip "Set up login script to activate AutoBernese mamba environment automatically" 49 | 50 | To automatically activat the AutoBernese mamba environment, whenever you open a 51 | terminal, add the activation command to your login script [`~/.bashrc` or 52 | similar for your shell]: 53 | 54 | ```bash 55 | # .bashrc 56 | # ... 57 | mamba activate ab 58 | ``` 59 | 60 | ## Update AutoBernese 61 | 62 | The package is so far only distributed on GitHub. Updating to a newer version of 63 | AutoBernese means downloading the latest revision from the main branch and 1) 64 | update the environment dependencies in `environment.yml` and 2) re-install 65 | AutoBernese from the Git archive directory: 66 | 67 | * Go to the repository to your machine. 68 | 69 | ```sh 70 | (base) $ cd /path/to/AutoBernese 71 | ``` 72 | 73 | * Pull the latest revisions from GitHub: 74 | 75 | ```sh 76 | (base) $ git pull 77 | ``` 78 | 79 | * Update the package dependencies: 80 | 81 | ```sh 82 | (base) $ mamba env update -n ab -f environment.yml 83 | ``` 84 | 85 | * Activate the environment: 86 | 87 | ```sh 88 | (base) $ mamba activate ab 89 | (ab) $ 90 | ``` 91 | 92 | * Using the `ab` environment's Python interpreter, update AutoBernese with 93 | `pip`: 94 | 95 | ```sh 96 | (ab) $ python -m pip install -U . 97 | ``` 98 | -------------------------------------------------------------------------------- /tests/ab/configuration/test_configuration.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import pytest 4 | 5 | from ab.configuration import ( 6 | _POP, 7 | merge, 8 | load, 9 | ) 10 | 11 | 12 | def test_merge(): 13 | dir_ = Path(__file__).parent / "configuration_files" 14 | ifname_1 = dir_ / "merge_1" 15 | ifname_2 = dir_ / "merge_2" 16 | 17 | expected = """\ 18 | merge_1 19 | 20 | merge_2 21 | """ 22 | 23 | # Nice input 24 | result = merge(ifname_1, ifname_2) 25 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 26 | 27 | # Duplicate filenames 28 | result = merge(ifname_1, ifname_2, ifname_1, str(ifname_2)) 29 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 30 | 31 | # Missing files 32 | ifname_missing = dir_ / "missing" 33 | with pytest.raises(RuntimeError) as info: 34 | merge(ifname_1, ifname_2, ifname_missing) 35 | 36 | # No files 37 | assert merge() == "", f"Expected no arguments to give an empty string ..." 38 | 39 | 40 | def test_load(): 41 | dir_ = Path(__file__).parent / "configuration_files" 42 | ifname_common = dir_ / "common.yaml" 43 | ifname_campaign = dir_ / "campaign.yaml" 44 | 45 | # Verify that core sections are removed, when filename is provided 46 | env_common = load(ifname_common) 47 | for key in _POP: 48 | assert ( 49 | key not in env_common 50 | ), f"Expected {key!r} to not be in rendered configuration {ifname_common.stem!r}" 51 | 52 | # Any other section is allowed 53 | assert env_common.get("troposphere") is not None 54 | 55 | # Verify that core sections are available 56 | env_campaign = load(ifname_campaign, keep_env=True) 57 | for key in _POP: 58 | assert ( 59 | key in env_campaign 60 | ), f"Expected {key!r} to be in rendered configuration {ifname_campaign.stem}" 61 | 62 | # Verify that section is overridden 63 | env_common_campaign = load(ifname_common, ifname_campaign) 64 | expected = env_campaign.get("troposphere").get("ipath") 65 | result = env_common_campaign.get("troposphere").get("ipath") 66 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 67 | 68 | 69 | def test_load_arbitrary_configurations(): 70 | import yaml 71 | 72 | dir_ = Path(__file__).parent / "configuration_files" 73 | ifname_a = dir_ / "a.yaml" 74 | ifname_b = dir_ / "b.yaml" 75 | ifname_c = dir_ / "c.yaml" 76 | 77 | a = yaml.safe_load(ifname_a.read_text()) 78 | s_a_b = ifname_a.read_text() + "\n" + ifname_b.read_text() 79 | a_b = yaml.safe_load(s_a_b) 80 | s_a_b_c = ( 81 | ifname_a.read_text() + "\n" + ifname_b.read_text() + "\n" + ifname_c.read_text() 82 | ) 83 | a_b_c = yaml.safe_load(s_a_b_c) 84 | 85 | # from IPython import embed 86 | # embed() 87 | -------------------------------------------------------------------------------- /src/ab/cli/qc.py: -------------------------------------------------------------------------------- 1 | """ 2 | Command-line interface for quality assurance and quality control 3 | 4 | """ 5 | 6 | import logging 7 | 8 | import click 9 | from click_aliases import ClickAliasedGroup 10 | from rich import print 11 | 12 | from ab.qaqc import check_example 13 | 14 | 15 | log = logging.getLogger(__name__) 16 | 17 | 18 | @click.group 19 | def qc() -> None: 20 | """ 21 | Quality-control measures 22 | 23 | """ 24 | 25 | 26 | @qc.command 27 | @click.option("-s", "substitute", is_flag=True, default=False) 28 | @click.option( 29 | "-r", 30 | "replacement", 31 | help="Replace zeros with given character.", 32 | required=False, 33 | default="·", 34 | ) 35 | @click.option( 36 | "-t", 37 | "tolerance", 38 | help="Minimally-accepted tolerance for any residual in metres.", 39 | required=False, 40 | default=0.0001, 41 | ) 42 | @click.option("-w", "show_weighted", is_flag=True, default=False) 43 | def residuals( 44 | substitute: bool, replacement: str, tolerance: float, show_weighted: bool 45 | ) -> None: 46 | """ 47 | Check the installation integrity for Bernese GNSS Software by comparing 48 | available results from running the EXAMPlE campaign against the reference 49 | files. 50 | 51 | For our purposes, we only need to check the residuals (reference minus 52 | result) of the coordinates for the stations that had their coordinates 53 | calculated. 54 | 55 | Assumptions include: 56 | 57 | * The stations available in the reference result files are in the same 58 | order and include the same stations that are available in the results we 59 | produce ourselves. 60 | 61 | """ 62 | # Make sure we only use a single character 63 | replacement = replacement[0] 64 | 65 | pairs = check_example.get_available_comparables() 66 | for pair in pairs: 67 | reference = check_example.extract_coordinates(pair.ref.read_text()) 68 | result = check_example.extract_coordinates(pair.res.read_text()) 69 | 70 | diff = reference - result 71 | 72 | print(f"Reference ({reference.date}): {pair.ref.name}") 73 | print(f"Result ({result.date}): {pair.res.name}") 74 | sz = 8 75 | header = f"{'ID': <4s} {'Delta x': >{sz}s} {'Delta y': >{sz}s} {'Delta z': >{sz}s} F" 76 | print(f"{'Delta = Reference - Result': ^{len(header)}s}") 77 | print(f"{f'! marks residuals > {f'{tolerance:.5f}'} m': ^{len(header)}s}") 78 | print(header) 79 | print("-" * len(header)) 80 | for line_diff in diff.coordinates: 81 | if not show_weighted and line_diff.flag.lower() == "w": 82 | continue 83 | 84 | line = ( 85 | f"{line_diff.station:4s} " 86 | f"{line_diff.x: >{sz},.5f}" 87 | f"{check_example.flag_if_too_high(line_diff.x, tolerance)} " 88 | f"{line_diff.y: >{sz},.5f}" 89 | f"{check_example.flag_if_too_high(line_diff.y, tolerance)} " 90 | f"{line_diff.z: >{sz},.5f}" 91 | f"{check_example.flag_if_too_high(line_diff.z, tolerance)} " 92 | f"{line_diff.flag} " 93 | ) 94 | if substitute: 95 | line = line.replace("0", replacement) 96 | print(line) 97 | print() 98 | -------------------------------------------------------------------------------- /src/ab/parameters.py: -------------------------------------------------------------------------------- 1 | """ 2 | Resolve parameterised strings 3 | 4 | """ 5 | 6 | from typing import Any 7 | from collections.abc import Iterable 8 | import itertools as it 9 | 10 | 11 | type ArgumentsType = dict[str, Any] 12 | type ParametersType = dict[str, Iterable[Any]] 13 | type PermutationType = dict[str, Any] 14 | 15 | 16 | def permutations(parameters: ParametersType) -> list[PermutationType]: 17 | """ 18 | Parameter expansion for a mapping with at least one key and a sequence of at 19 | least one value. 20 | 21 | Example: 22 | -------- 23 | Given 24 | 25 | { 26 | 'year': [2021, 2022], 'hour': ['01'] 27 | } 28 | 29 | this function returns 30 | 31 | [ 32 | {'year': 2021, 'hour': '01'}, {'year': 2022, 'hour': '01'}, 33 | ] 34 | 35 | Limitations: 36 | ------------ 37 | The values must be given in a sequence that can be converted to a tuple, 38 | since Python's dict type only takes immutable keys. 39 | 40 | """ 41 | inverted: dict[tuple[Any], str] = { 42 | tuple(values): key for (key, values) in parameters.items() 43 | } 44 | return [ 45 | {key: value for (key, value) in zip(parameters.keys(), values)} 46 | for values in it.product(*inverted.keys()) 47 | ] 48 | 49 | 50 | def resolvable(parameters: ParametersType, string_to_format: str) -> ParametersType: 51 | """ 52 | Remove keys in parameters that are not present in string to format. 53 | 54 | A user may provide more parameters than are used in the string to format. 55 | 56 | Since the mechanism expanding the parameters and possible values 57 | to a list of dicts with each possible permutation of parameter value will 58 | provide duplicate file listings when the name is resolved for each parameter 59 | permutation where the difference in parameter value is only in the not-used 60 | parameter (which is ignored by the .format() method). 61 | 62 | """ 63 | return { 64 | parameter: values 65 | for (parameter, values) in parameters.items() 66 | # Case: 'String with {parameter} whatever comes after' 67 | if f"{{{parameter}}}" in string_to_format 68 | # Case: 'String with {parameter.property} whatever comes after' 69 | or f"{{{parameter}." in string_to_format 70 | } 71 | 72 | 73 | def resolve( 74 | arguments: ArgumentsType, parameters: ParametersType 75 | ) -> list[ArgumentsType]: 76 | """ 77 | Returns a list of dictionaries with the argument names as keys and the 78 | corresponding values all possible permutation of the given parameters. 79 | 80 | """ 81 | if not parameters: 82 | return [arguments] 83 | 84 | return [ 85 | {key: format_strings(value, permutation) for (key, value) in arguments.items()} 86 | for permutation in permutations(parameters) 87 | ] 88 | 89 | 90 | def format_strings(structure: Any, permutation: PermutationType) -> Any: 91 | 92 | if isinstance(structure, dict): 93 | return { 94 | key: format_strings(value, permutation) 95 | for (key, value) in structure.items() 96 | } 97 | 98 | if isinstance(structure, list): 99 | return [format_strings(value, permutation) for value in structure] 100 | 101 | if isinstance(structure, str): 102 | return structure.format_map(permutation) 103 | 104 | return structure 105 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # [GNSS-Process Automation with Bernese GNSS Software](https://github.com/SDFIdk/AutoBernese) 2 | 3 | ## The package 4 | 5 | AutoBernese is written i Python and the package is maintained by the [Danish 6 | Agency for Climate Data](https://eng.kds.dk/) 7 | 8 | The package is mainly built for our internal use, but as the tool may have some 9 | general application, it is published with the hope that it may be useful for 10 | other users of Bernese. 11 | 12 | The package is still a work in progress as far as point 5. below is concerned. 13 | Note also, for instance, that all development assumes that AutoBernese runs in a 14 | Linux environment. 15 | 16 | If you want to reach out, feel free to open an issue on GitHub [see link to the 17 | archive in the top-right corner of this website]. 18 | 19 | Note, however, that we do not offer any support other than what is already 20 | provided here and in the code comments and documentation strings. 21 | 22 | 23 | ## Use-case scenarios 24 | 25 | As geodesists that use the [Bernese GNSS Software](http://www.bernese.unibe.ch/) 26 | 5.4, we need a software system that simplifies and eases the following 27 | processes: 28 | 29 | 1. Create Bernese campaigns for different campaign types. 30 | 2. Download necessary external data, either for common or campaign-specific 31 | purposes. 32 | 3. Produce or preprocess local data products for use in Bernese campaigns. 33 | 4. Simplify setting up recipes and starting the Bernese Processing Engine [BPE] 34 | for a given campaign. 35 | 5. Assures the quality of and handles the end products that come out of the BPE 36 | process. 37 | 38 | Below the introduction is a more detailed overview of what is possible with the 39 | software. 40 | 41 | 42 | ## AutoBernese 43 | 44 | With AutoBernese installed, you have a new command `ab` available, which does 45 | several things behind the scenes for you as it helps you create campaigns in 46 | Bernese GNSS Software and automatically run the Bernese Processing Engine for 47 | all the PCF files you need for a given campaign. It does so by automating the 48 | otherwise manual workflow of selecting a campaign, session and each individual 49 | PCF file to run. 50 | 51 | With its generalised workflow, AutoBernese is prepared, specifically, for a 52 | multi-user scenario, giving users the ability to easily share ressources such as 53 | a common configuration and templates for generalised campaign workflow. 54 | 55 | For example, AutoBernese also lets you maintain a list of common data sources 56 | to download in a common configuration file, whereas campaign-specific sources 57 | and, especially, PCF files to run with BPE, are managed from a campaign-specific 58 | configuration file. 59 | 60 | Another key assumption is that most users do the same things over and over again 61 | to different data sets. To address this, AutoBernese has a concept of a 62 | *campaign type*, which is a way to configure a re-usable processing workflow. 63 | 64 | Having templates for typical campaign scenarios minimises the time needed to set 65 | up each campaign. And since the templates are stored as plain-text files in a 66 | shared directory, they are usable by everyone and easy to maintain and keep a 67 | history of, e.g. using a version control software. 68 | 69 | The same AutoBernese installation can be used for different installations, since 70 | it integrates, seamlessly, with the given loaded Bernese installation. This may 71 | be, especially, useful, if you are a user or developer working in more than one 72 | Bernese installation, e.g. one for either development, testing and production. 73 | -------------------------------------------------------------------------------- /tests/ab/bsw/test_bpe.py: -------------------------------------------------------------------------------- 1 | import datetime as dt 2 | 3 | from ab.bsw.bpe_terminal_output import ( 4 | parse_bpe_terminal_output, 5 | BPETerminalOutput, 6 | ) 7 | 8 | 9 | def test_parse_bpe_terminal_output(): 10 | output_success = """\ 11 | CPU File ${U}/PAN/USER.CPU has been reset 12 | 13 | Starting BPE on 10-Jan-2024 15:03:50 14 | ------------------------------------ 15 | @ 16 | 17 | PCFile: ${U}/PCF/ITRF.PCF 18 | CPU file: ${U}/PAN/USER.CPU 19 | Campaign: ${P}/EXAMPLE 20 | Year/session: 2021/0960 21 | BPE output: ${P}/EXAMPLE/BPE/ITRF_0960.OUT 22 | BPE status: ${P}/EXAMPLE/BPE/ITRF_0960.RUN 23 | 24 | BPE server runs PID = 24792 25 | 26 | BPE finished at 10-Jan-2024 15:04:14 27 | ------------------------------------ 28 | 29 | """ 30 | result = parse_bpe_terminal_output(output_success) 31 | expected = BPETerminalOutput( 32 | beg=dt.datetime(2024, 1, 10, 15, 3, 50), 33 | username="", 34 | pcf_file="${U}/PCF/ITRF.PCF", 35 | cpu_file="${U}/PAN/USER.CPU", 36 | campaign="${P}/EXAMPLE", 37 | year_session="2021/0960", 38 | output_file="${P}/EXAMPLE/BPE/ITRF_0960.OUT", 39 | status_file="${P}/EXAMPLE/BPE/ITRF_0960.RUN", 40 | server_pid="24792", 41 | end=dt.datetime(2024, 1, 10, 15, 4, 14), 42 | ok=True, 43 | ) 44 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 45 | 46 | substitutes = dict( 47 | P="/home/bsw/dev/data/CAMPAIGN54", 48 | U="/home/user/bsw/dev/user", 49 | ) 50 | result_substituted = parse_bpe_terminal_output(output_success, substitutes) 51 | expected_substituted = BPETerminalOutput( 52 | beg=dt.datetime(2024, 1, 10, 15, 3, 50), 53 | username="", 54 | pcf_file="/home/user/bsw/dev/user/PCF/ITRF.PCF", 55 | cpu_file="/home/user/bsw/dev/user/PAN/USER.CPU", 56 | campaign="/home/bsw/dev/data/CAMPAIGN54/EXAMPLE", 57 | year_session="2021/0960", 58 | output_file="/home/bsw/dev/data/CAMPAIGN54/EXAMPLE/BPE/ITRF_0960.OUT", 59 | status_file="/home/bsw/dev/data/CAMPAIGN54/EXAMPLE/BPE/ITRF_0960.RUN", 60 | server_pid="24792", 61 | end=dt.datetime(2024, 1, 10, 15, 4, 14), 62 | ok=True, 63 | ) 64 | assert ( 65 | result_substituted == expected_substituted 66 | ), f"Expected {result!r} to be {expected!r} ..." 67 | 68 | output_failed = """\ 69 | CPU File ${U}/PAN/USER.CPU has been reset 70 | 71 | Starting BPE on 10-Jan-2024 15:03:50 72 | ------------------------------------ 73 | @ 74 | 75 | PCFile: ${U}/PCF/ITRF.PCF 76 | CPU file: ${U}/PAN/USER.CPU 77 | Campaign: ${P}/EXAMPLE 78 | Year/session: 2021/0960 79 | BPE output: ${P}/EXAMPLE/BPE/ITRF_0960.OUT 80 | BPE status: ${P}/EXAMPLE/BPE/ITRF_0960.RUN 81 | 82 | BPE server runs PID = 24792 83 | 84 | User script error in: ${P}/EXAMPLE/BPE/ITRF_0950.OUT 85 | 86 | BPE finished at 10-Jan-2024 15:04:14 87 | ------------------------------------ 88 | 89 | """ 90 | result = parse_bpe_terminal_output(output_failed) 91 | expected = BPETerminalOutput( 92 | beg=dt.datetime(2024, 1, 10, 15, 3, 50), 93 | username="", 94 | pcf_file="${U}/PCF/ITRF.PCF", 95 | cpu_file="${U}/PAN/USER.CPU", 96 | campaign="${P}/EXAMPLE", 97 | year_session="2021/0960", 98 | output_file="${P}/EXAMPLE/BPE/ITRF_0960.OUT", 99 | status_file="${P}/EXAMPLE/BPE/ITRF_0960.RUN", 100 | server_pid="24792", 101 | end=dt.datetime(2024, 1, 10, 15, 4, 14), 102 | ok=False, 103 | ) 104 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 105 | -------------------------------------------------------------------------------- /docs/manual/assets/quick-start_run.cast: -------------------------------------------------------------------------------- 1 | {"version": 2, "width": 80, "height": 24, "timestamp": 1686304908, "env": {"SHELL": "/usr/bin/zsh", "TERM": "screen"}} 2 | [0.868727, "o", "\u001bk..t/AutoBernese\u001b\\"] 3 | [0.888527, "o", "\r\u001b[0m\u001b[23m\u001b[24m\u001b[J(base) \u001b[01;32m➜ \u001b[36mAutoBernese\u001b[00m \u001b[01;34mgit:(\u001b[31mmain\u001b[34m) \u001b[33m✗\u001b[00m \u001b[K"] 4 | [0.888623, "o", "\u001b[?1h\u001b=\u001b[?2004h"] 5 | [2.226796, "o", "b"] 6 | [2.345516, "o", "\bba"] 7 | [2.433647, "o", "s"] 8 | [2.618896, "o", "h"] 9 | [3.668414, "o", "\u001b[?1l\u001b>\u001b[?2004l\r\r\n\u001bk\u001b\\"] 10 | [4.419585, "o", "(base) $ "] 11 | [5.642462, "o", "m"] 12 | [5.745612, "o", "a"] 13 | [5.881982, "o", "m"] 14 | [6.226329, "o", "b"] 15 | [6.372651, "o", "a"] 16 | [6.48561, "o", " "] 17 | [6.616172, "o", "a"] 18 | [6.875452, "o", "c"] 19 | [7.18836, "o", "t"] 20 | [7.328468, "o", "i"] 21 | [7.4403, "o", "v"] 22 | [7.70163, "o", "a"] 23 | [7.844455, "o", "t"] 24 | [7.937881, "o", "e"] 25 | [8.104289, "o", " "] 26 | [8.404676, "o", "a"] 27 | [8.642483, "o", "b"] 28 | [10.585708, "o", "\r\n"] 29 | [10.936713, "o", "(ab) $ "] 30 | [13.593356, "o", "s"] 31 | [13.817293, "o", "o"] 32 | [13.920131, "o", "u"] 33 | [13.976207, "o", "r"] 34 | [14.168494, "o", "c"] 35 | [14.404552, "o", "e"] 36 | [14.589134, "o", " "] 37 | [15.744673, "o", "/"] 38 | [16.028615, "o", "h"] 39 | [16.216398, "o", "o"] 40 | [16.380897, "o", "m"] 41 | [16.562401, "o", "e"] 42 | [16.914257, "o", "/"] 43 | [17.561507, "o", "b"] 44 | [17.723141, "o", "s"] 45 | [17.944491, "o", "w"] 46 | [18.322829, "o", "/"] 47 | [18.672691, "o", "d"] 48 | [18.827455, "o", "e"] 49 | [19.004176, "o", "m"] 50 | [19.187407, "o", "o"] 51 | [19.601917, "o", "/"] 52 | [21.617416, "o", "B"] 53 | [21.900492, "o", "ERN54/"] 54 | [22.55129, "o", "L"] 55 | [23.032948, "o", "O"] 56 | [23.374253, "o", "ADGPS.setvar "] 57 | [24.089803, "o", "\r\n"] 58 | [24.10586, "o", "(ab) $ "] 59 | [27.475784, "o", "a"] 60 | [28.021212, "o", "b"] 61 | [28.458168, "o", " "] 62 | [29.611836, "o", "c"] 63 | [29.850223, "o", "a"] 64 | [29.920853, "o", "m"] 65 | [30.13339, "o", "p"] 66 | [30.226303, "o", "a"] 67 | [30.381495, "o", "i"] 68 | [30.488736, "o", "g"] 69 | [30.568746, "o", "n"] 70 | [30.729445, "o", " "] 71 | [31.249678, "o", "r"] 72 | [31.316568, "o", "u"] 73 | [31.394076, "o", "n"] 74 | [31.514379, "o", " "] 75 | [31.834541, "o", "E"] 76 | [32.062373, "o", "X"] 77 | [32.26606, "o", "A"] 78 | [32.353703, "o", "M"] 79 | [32.538301, "o", "P"] 80 | [32.611666, "o", "L"] 81 | [32.752876, "o", "E"] 82 | [33.272996, "o", "\r\n"] 83 | [33.701506, "o", "CPU File ${U}/PAN/USER.CPU has been reset\r\n"] 84 | [34.34192, "o", "\r\n"] 85 | [34.342967, "o", "Starting BPE on 09-Jun-2023 12:02:25\r\n------------------------------------\r\nUSERNAME@\r\n\r\n"] 86 | [34.343008, "o", "PCFile: ${U}/PCF/PPP.PCF\r\nCPU file: ${U}/PAN/USER.CPU\r\nCampaign: ${P}/EXAMPLE\r\nYear/session: 2019/0440\r\nBPE output: ${P}/EXAMPLE/BPE/PPP_0440.OUT\r\nBPE status: ${P}/EXAMPLE/BPE/PPP_0440.RUN\r\n\r\n"] 87 | [34.343391, "o", "BPE server runs "] 88 | [34.381523, "o", "PID = 3492412\r\n"] 89 | [421.001119, "o", "\r\n"] 90 | [421.00164, "o", "BPE finished at 09-Jun-2023 12:08:49\r\n------------------------------------\r\n\r\n"] 91 | [421.116724, "o", "CPU File ${U}/PAN/USER.CPU has been reset\r\n"] 92 | [421.683905, "o", "\r\n"] 93 | [421.684346, "o", "Starting BPE on 09-Jun-2023 12:08:49\r\n------------------------------------\r\n"] 94 | [421.684386, "o", "USERNAME@\r\n\r\nPCFile: ${U}/PCF/PPP.PCF\r\nCPU file: ${U}/PAN/USER.CPU\r\nCampaign: ${P}/EXAMPLE\r\nYear/session: 2019/0450\r\nBPE output: ${P}/EXAMPLE/BPE/PPP_0450.OUT\r\nBPE status: ${P}/EXAMPLE/BPE/PPP_0450.RUN\r\n\r\n"] 95 | [421.684571, "o", "BPE server runs "] 96 | [421.719498, "o", "PID = 3530190\r\n"] 97 | [791.594345, "o", "\r\n"] 98 | [791.594879, "o", "BPE finished at 09-Jun-2023 12:14:59\r\n------------------------------------\r\n\r\n"] 99 | [793.963586, "o", "(ab) $ \u0007\u0007\u0007\u0007"] 100 | [793.963632, "o", "\u0007\u0007"] 101 | -------------------------------------------------------------------------------- /src/ab/configuration/constructors/dates.py: -------------------------------------------------------------------------------- 1 | """ 2 | Date constructors 3 | 4 | """ 5 | 6 | import datetime as dt 7 | import functools 8 | from typing import Any 9 | from collections.abc import Callable 10 | 11 | import yaml 12 | 13 | from ab.dates import ( 14 | date_range, 15 | dates_to_gps_date, 16 | GPSDate, 17 | ) 18 | 19 | 20 | def date_to_gps_date_constructor( 21 | loader: yaml.Loader, node: yaml.SequenceNode | yaml.ScalarNode 22 | ) -> list[GPSDate] | GPSDate: 23 | """ 24 | Convert a single instance or a list of Python date or datetime instances 25 | (can already be `GPSDate` instances as well) to GPSDate instances. 26 | 27 | The constructor assumes that the value of the node is a single-item sequence 28 | in which the item is the actual object to parse, i.e. 29 | 30 | ```yaml 31 | key: !Tag [] 32 | ``` 33 | 34 | This special syntax is chosen, because it allows the user to specify an 35 | alias to information in another part of the document rather than explicit 36 | data. 37 | 38 | Putting everything into a sequence will force the parser to replace the 39 | alias with the corresponding data, before the constructor for the tag is 40 | invoked. 41 | 42 | The tag constructor then has to pick out the content as the first item of 43 | the sequence that it was given, in order to have access to the actual data 44 | inside. 45 | 46 | References: 47 | 48 | * https://stackoverflow.com/a/53779426 49 | 50 | """ 51 | # We chose to use and thus expect a sequence node to allow us to resolve a 52 | # YAML alias if given instead of an explicit value. See the reference above. 53 | # 54 | # For this purpose, we only expect one item in the container. This item can 55 | # be either a scalar value or another sequence with several scalar values. 56 | # 57 | if not isinstance(node, yaml.SequenceNode): 58 | raise TypeError(f"Node type {node!r} not supported for tag ...") 59 | 60 | # First, Extract the value from our chosen container for the argument(s) to 61 | # the constructor. 62 | sub_node = node.value[0] 63 | 64 | if not isinstance(sub_node, (yaml.ScalarNode, yaml.SequenceNode)): 65 | raise ValueError( 66 | f"Input argument must be single date or list of date instances ..." 67 | ) 68 | 69 | # The value can be a list of dates. 70 | if isinstance(sub_node, yaml.SequenceNode): 71 | # Resolve and extract the list of dates 72 | dates = loader.construct_sequence(sub_node) 73 | # Build a list of GPSDate instances to return 74 | return [GPSDate.from_date(date) for date in dates] 75 | 76 | # Alternatively, the value can be a single date 77 | if isinstance(sub_node, yaml.ScalarNode): 78 | # Resolve the timestamp 79 | date = loader.construct_yaml_timestamp(sub_node) 80 | # And build the single instance to return 81 | return GPSDate.from_date(date) 82 | 83 | 84 | def date_range_constructor( 85 | loader: yaml.Loader, node: yaml.MappingNode 86 | ) -> list[GPSDate]: 87 | """ 88 | Construct GPSDate instances based on start and end dates (both inclusive) as 89 | well as how much to extend end date with, if any. 90 | 91 | """ 92 | if not isinstance(node, yaml.MappingNode): 93 | raise TypeError(f"Node type {node!r} not supported for tag ...") 94 | d = loader.construct_mapping(node) 95 | beg = d.get("beg") 96 | assert isinstance( 97 | beg, (dt.date, dt.datetime) 98 | ) # Remember that this only works for debug mode. 99 | end = d.get("end") 100 | extend_end_by = d.get("extend_end_by", 0) 101 | return dates_to_gps_date(date_range(beg, end, extend_end_by=extend_end_by)) 102 | 103 | 104 | def timestamp2GPSDate( 105 | func: Callable[[Any], dt.datetime | dt.date], 106 | ) -> Callable[[Any], GPSDate]: 107 | @functools.wraps(func) 108 | def wrapper(*args, **kwargs): 109 | return GPSDate.from_date(func(*args, **kwargs)) 110 | 111 | return wrapper 112 | -------------------------------------------------------------------------------- /docs/dev/contribute.md: -------------------------------------------------------------------------------- 1 | 2 | This is the documentation for our internal developers. 3 | 4 | Note, again, that we, currently, only work on this project for the purpose of 5 | reaching our own internal goals. 6 | 7 | 8 | ## Development environment for Python 9 | 10 | We use [MambaForge] to create a development environment. To build the `mamba` 11 | environment `ab-dev` for development, go to your local Git archive with the 12 | repository and type: 13 | 14 | [MambaForge]: ../prerequisites.md 15 | 16 | ```sh 17 | (base) $ mamba env create -f environment-dev.yml 18 | ``` 19 | 20 | Then, activate the environment by typing: 21 | 22 | ```sh 23 | (base) $ mamba activate ab-dev 24 | (ab-dev) $ 25 | ``` 26 | 27 | Finally, install AutoBernese in developer mode, so that you can run the 28 | application using the current revision state of the archive: 29 | 30 | ```sh 31 | (ab-dev) $ python -m pip install -e . 32 | ``` 33 | 34 | ### Pre-commit 35 | 36 | Before making your first commit pre-commit needs to be configures. Simply 37 | run 38 | 39 | ```sh 40 | (ab-dev) $ pre-commit install 41 | ``` 42 | 43 | After this the pre-commit checks are run before changes are committed to 44 | the repo. 45 | 46 | ### Tests 47 | 48 | AutoBernese uses pytest as unit testing framework. Run the test suite from 49 | the root of the repo: 50 | 51 | ```sh 52 | (ab-dev) $ pytest 53 | ``` 54 | 55 | ### Contribution guidelines 56 | 57 | Code changes must be made from any branch on your own fork of the official 58 | repository and submitted as a GitHub pull request. 59 | 60 | Make sure that contributed code passes the checks made on GitHub, before 61 | requesting a review. 62 | 63 | We aim for type consistency, but for now, type checking is not enforced. 64 | 65 | 66 | ## Documentation 67 | 68 | The documentation is built using MkDocs with the Material extension. Other 69 | features are used as well. (See which ones in `environment-dev.yml`.) 70 | 71 | 72 | ### Illustration Business-Process Modelling and Notation [BPMN] 73 | 74 | To illustrate the business-process model, the BPMN XML-format is used. 75 | 76 | The `.bpmn` files are edited using the Javascript-based application from 77 | [bpmn.io](https://bpmn.io/) which can be added as [an extension to VS 78 | Code][VSC-EXT-BPMN]. 79 | 80 | [VSC-EXT-BPMN]: https://marketplace.visualstudio.com/items?itemName=bpmn-io.vs-code-bpmn-io 81 | 82 | The files are visualised using [kroki](https://kroki.io/) in an MkDocs 83 | extension. 84 | 85 | 86 | ### Shell recording for examples 87 | 88 | Examples are recoreded using [asciinema](https://asciinema.org/) and `.cast` 89 | files are included in the documentation by adding extra CSS and JavaScript for 90 | the visualiser and adding HTML `script` elements in the Markdown source 91 | documents. 92 | 93 | 94 | ### System Diagrams 95 | 96 | The architecture and overall functionality is viaualised using the C4-model tool 97 | Structurizr. 98 | 99 | A single [workspace][STRUCTURIZR-WORKSPACE-DSL] contains a collection of 100 | software systems, the main system's container and their components. 101 | 102 | [STRUCTURIZR-WORKSPACE-DSL]: 103 | https://github.com/sdfidk/autobernese/blob/main/workspace/structurizr/workspace.dsl 104 | 105 | The diagrams are produced using the docker container for [Structurizr 106 | Lite][STRUCTURIZR-LITE], and the generated images are manually created from the 107 | web application accessing the workspace file. 108 | 109 | [STRUCTURIZR-LITE]: https://structurizr.com/help/lite 110 | 111 | Command for running it in development: 112 | 113 | ```sh 114 | docker run -d -it --rm -p 8080:8080 -v /path/to/git/AutoBernese/workspace/structurizr:/usr/local/structurizr structurizr/lite 115 | ``` 116 | 117 | For VS Code, there is a syntax extension called [**Structurizr DSL syntax 118 | highlighting**][VSC-EXT-STRUCTURIZR] from publisher *ciarant* that is useful, 119 | when editing the workspace file. 120 | 121 | [VSC-EXT-STRUCTURIZR]: https://marketplace.visualstudio.com/items?itemName=ciarant.vscode-structurizr 122 | 123 | !!! note "Note" 124 | 125 | Running the docker container above for the first in a directory which does not 126 | already have a `workspace.dsl` file, a new file is created with root ownership. 127 | 128 | In this case, change the permissions on the `workspace.dsl` to give yourself 129 | write permissions for the file. 130 | -------------------------------------------------------------------------------- /src/ab/tasks.py: -------------------------------------------------------------------------------- 1 | """ 2 | Define tasks and run them 3 | 4 | """ 5 | 6 | import typing as t 7 | from collections.abc import Iterable 8 | import itertools as it 9 | from functools import partial 10 | from dataclasses import ( 11 | dataclass, 12 | field, 13 | ) 14 | 15 | from ab.parameters import ( 16 | ArgumentsType, 17 | ParametersType, 18 | resolve, 19 | ) 20 | from ab.typing import AnyFunction 21 | 22 | 23 | @dataclass 24 | class TaskResult: 25 | finished: bool = False 26 | return_value: object | None = None 27 | exception: Exception | None = None 28 | 29 | 30 | @dataclass 31 | class Task: 32 | identifier: str 33 | function: AnyFunction = field(repr=False) 34 | arguments: ArgumentsType 35 | result: TaskResult = field(repr=False, default_factory=TaskResult) 36 | 37 | def run(self) -> None: 38 | return_value = None 39 | finished = False 40 | exception: Exception | None = None 41 | 42 | try: 43 | return_value = self.function(**self.arguments) 44 | finished = True 45 | except Exception as e: 46 | exception = e 47 | 48 | self.result = TaskResult(finished, return_value, exception) 49 | 50 | 51 | def untouched(arguments: ArgumentsType) -> Iterable[ArgumentsType]: 52 | """ 53 | This is the default `dispatch_with` function to call for each permutation of 54 | the input arguments. 55 | 56 | The point of putting the input in a list is to mimmick a situation where a 57 | dispatch function creates further possible arguments from the values of the 58 | input arguments. 59 | 60 | """ 61 | return [arguments] 62 | 63 | 64 | @dataclass 65 | class TaskDefinition: 66 | """ 67 | Compact specification of chosen API-level function to call though a Task 68 | instance, and, presumably, with many different arguments---although the key 69 | `arguments` is optional. 70 | 71 | If `parameters` are specified, the `arguments` mapping is treated as a 72 | template from which a list of instances of arguments is created with each 73 | instance having any string-typed values formatted using the same parameter 74 | permutation. 75 | 76 | If the `dispatch_with` function is set, each permutation of arguments is 77 | passed to this function which is expected to return an iterable of arguments 78 | that match the signature of the API-level function. This way, arguments may 79 | be further processed or, simply, restructured to make them work with the 80 | API-level function. 81 | 82 | The `tasks` method creates Task instances that that can run the API-level 83 | function with concrete arguments. 84 | 85 | Conclusion: Arguments defined more compactly, may be used, when resolved by 86 | any parameters, as the actual input to the API-level function, but they can 87 | also be used as input for the dispatcher function that builds/makes the the 88 | arguments fit the signature of the API-level function. 89 | 90 | """ 91 | 92 | identifier: str 93 | description: str 94 | run: AnyFunction = field(repr=False) 95 | dispatch_with: AnyFunction = field(repr=False, default_factory=lambda: untouched) 96 | arguments: ArgumentsType = field(default_factory=dict) 97 | parameters: ParametersType = field(default_factory=dict) 98 | asynchronous: bool = False 99 | 100 | _tasks: list[Task] | None = field( 101 | init=False, repr=False, default_factory=lambda: None 102 | ) 103 | 104 | _task_id: it.count = field( 105 | init=False, repr=False, default_factory=partial(it.count, start=1) 106 | ) 107 | 108 | @property 109 | def task_id(self) -> str: 110 | minor = next(self._task_id) 111 | return f"{self.identifier}.{minor:d}" 112 | 113 | @property 114 | def tasks(self) -> list[Task]: 115 | """ 116 | Return Task instances for Task Definition instance. 117 | 118 | These are created once, and can thus be referred to several times. 119 | 120 | """ 121 | if self._tasks is None: 122 | self._tasks = [ 123 | Task(self.task_id, self.run, arguments) 124 | for permutation in resolve(self.arguments, self.parameters) 125 | for arguments in self.dispatch_with(permutation) 126 | ] 127 | return self._tasks 128 | -------------------------------------------------------------------------------- /docs/manual/assets/campaign.yaml: -------------------------------------------------------------------------------- 1 | metadata: 2 | version: &version 0.5.0 3 | username: &username USERNAME 4 | created: &created 2024-04-08 5 | template: &template example 6 | campaign: &campaign EXAMPLE 7 | beg: &beg 2019-02-13 8 | end: &end 2019-02-14 9 | 10 | custom: 11 | dates: &dates 12 | - 2019-02-13 13 | - 2019-02-14 14 | - 2020-06-27 15 | - 2020-06-28 16 | - 2021-04-05 17 | - 2021-04-06 18 | 19 | tasks: 20 | 21 | - identifier: PPP 22 | description: Precise-Point Positioning 23 | run: RunBPE 24 | arguments: 25 | pcf_file: PPP 26 | campaign: *campaign 27 | year: '{date.year}' 28 | session: '{date.doy:0>3d}0' 29 | sysout: 'PPP_{date.doy:0>3d}0' 30 | status: 'PPP_{date.doy:0>3d}0.RUN' 31 | taskid: 'PPP_{date.doy:0>3d}0' 32 | parameters: 33 | date: dates 34 | 35 | - identifier: RNX2SNX 36 | description: RINEX to SINEX 37 | run: RunBPE 38 | arguments: 39 | pcf_file: RNX2SNX 40 | campaign: *campaign 41 | year: '{date.year}' 42 | session: '{date.doy:0>3d}0' 43 | sysout: 'RNX2SNX_{date.doy:0>3d}0' 44 | status: 'RNX2SNX_{date.doy:0>3d}0.RUN' 45 | taskid: 'R2S_{date.doy:0>3d}0' 46 | parameters: 47 | date: dates 48 | 49 | - identifier: BASTST 50 | description: |- 51 | Baseline by baseline processing for trouble shooting 52 | run: RunBPE 53 | arguments: 54 | pcf_file: BASTST 55 | campaign: *campaign 56 | year: '{date.year}' 57 | session: '{date.doy:0>3d}0' 58 | sysout: 'BASTST_{date.doy:0>3d}0' 59 | status: 'BASTST_{date.doy:0>3d}0.RUN' 60 | taskid: 'BASTST_{date.doy:0>3d}0' 61 | parameters: 62 | date: dates 63 | 64 | - identifier: CLKDET 65 | description: |- 66 | Zero-difference network solution providing clock corrections 67 | run: RunBPE 68 | arguments: 69 | pcf_file: CLKDET 70 | campaign: *campaign 71 | year: '{date.year}' 72 | session: '{date.doy:0>3d}0' 73 | sysout: 'CLKDET_{date.doy:0>3d}0' 74 | status: 'CLKDET_{date.doy:0>3d}0.RUN' 75 | taskid: 'CLKDET_{date.doy:0>3d}0' 76 | parameters: 77 | date: dates 78 | 79 | - identifier: IONDET 80 | description: |- 81 | Zero-difference network solution providing station-wise, regional, or global 82 | ionosphere maps and the related biases 83 | run: RunBPE 84 | arguments: 85 | pcf_file: IONDET 86 | campaign: *campaign 87 | year: '{date.year}' 88 | session: '{date.doy:0>3d}0' 89 | sysout: 'IONDET_{date.doy:0>3d}0' 90 | status: 'IONDET_{date.doy:0>3d}0.RUN' 91 | taskid: 'IONDET_{date.doy:0>3d}0' 92 | parameters: 93 | date: dates 94 | 95 | - identifier: LEOPOD 96 | description: |- 97 | Precise Orbit Determination for a Low Earth Orbiting Satellites based on 98 | on-board GPS-measurements with phase ambiguity resolution 99 | run: RunBPE 100 | arguments: 101 | pcf_file: LEOPOD 102 | campaign: *campaign 103 | year: '{date.year}' 104 | session: '{date.doy:0>3d}0' 105 | sysout: 'LEOPOD_{date.doy:0>3d}0' 106 | status: 'LEOPOD_{date.doy:0>3d}0.RUN' 107 | taskid: 'LEOPOD_{date.doy:0>3d}0' 108 | parameters: 109 | date: !DateRange {beg: *beg, end: *end} 110 | 111 | - identifier: SLRVAL 112 | description: |- 113 | Validation of an existing GNSS or LEO orbit using SLR measurements 114 | run: RunBPE 115 | arguments: 116 | pcf_file: SLRVAL 117 | campaign: *campaign 118 | year: '{date.year}' 119 | session: '{date.doy:0>3d}0' 120 | sysout: 'SLRVAL_{date.doy:0>3d}0' 121 | status: 'SLRVAL_{date.doy:0>3d}0.RUN' 122 | taskid: 'SLRVAL_{date.doy:0>3d}0' 123 | parameters: 124 | date: dates 125 | 126 | - identifier: ITRF 127 | description: |- 128 | Derives a coordinate and linear velocity approximation from the ITRF 129 | solution containing non-linear PSD corrections 130 | run: RunBPE 131 | arguments: 132 | pcf_file: ITRF 133 | campaign: *campaign 134 | year: '{date.year}' 135 | session: '{date.doy:0>3d}0' 136 | sysout: 'ITRF_{date.doy:0>3d}0' 137 | status: 'ITRF_{date.doy:0>3d}0.RUN' 138 | taskid: 'ITRF_{date.doy:0>3d}0' 139 | parameters: 140 | date: dates 141 | 142 | sources: 143 | 144 | - identifier: ITRF14 145 | description: IERS data needed for the EXAMPLE campaign 146 | url: https://datacenter.iers.org/products/reference-systems/terrestrial/itrf/itrf2014/ 147 | filenames: 148 | - ITRF2014-IGS-TRF.SNX.gz # 1.4 GB 149 | - ITRF2014-psd-gnss.dat # 38 KB 150 | destination: !Path [*D, ITRF14] 151 | -------------------------------------------------------------------------------- /src/ab/cli/download.py: -------------------------------------------------------------------------------- 1 | """ 2 | Command-line interface for downloading external and local data sources 3 | 4 | """ 5 | 6 | import logging 7 | from types import ModuleType 8 | from dataclasses import asdict 9 | 10 | import click 11 | from click_aliases import ClickAliasedGroup 12 | from rich import print 13 | from rich.console import Console 14 | from rich.table import Table 15 | from rich.live import Live 16 | from rich import box 17 | 18 | from ab.cli import ( 19 | _input, 20 | _filter, 21 | _options, 22 | ) 23 | from ab import configuration 24 | from ab.configuration import sources as _sources 25 | from ab.bsw import campaign as _campaign 26 | from ab.data import ( 27 | TransferStatus, 28 | ftp as _ftp, 29 | http as _http, 30 | file as _file, 31 | ) 32 | 33 | 34 | log = logging.getLogger(__name__) 35 | 36 | 37 | PROTOCOLS: dict[str, ModuleType] = dict( 38 | ftp=_ftp, 39 | http=_http, 40 | https=_http, 41 | file=_file, 42 | ) 43 | 44 | 45 | @click.command 46 | @_options.identifiers 47 | @_options.exclude 48 | @_options.force 49 | @_options.campaign 50 | @_options.yes 51 | def download( 52 | identifiers: list[str] | None, 53 | exclude: list[str] | None = None, 54 | force: bool = False, 55 | name: str | None = None, 56 | yes: bool = False, 57 | ) -> None: 58 | """ 59 | Download sources in common or campaign configuration file. 60 | 61 | """ 62 | if name is not None: 63 | log.info(f"Using campaign configuration from {name} ...") 64 | config = _campaign.load(name) 65 | else: 66 | log.info(f"Using default configuration ...") 67 | config = configuration.load() 68 | 69 | # Load raw configuration items 70 | raw_sources = _filter.get_raw(config, "sources", identifiers, exclude) 71 | 72 | if not raw_sources: 73 | msg = f"No sources matching selected identifiers ..." 74 | print(msg) 75 | log.debug(msg) 76 | return 77 | 78 | # Build instances 79 | sources = _sources.load_all(raw_sources) 80 | 81 | # Remove sources with an unsupported protocol 82 | sources = [source for source in sources if source.protocol in PROTOCOLS] 83 | 84 | if not sources: 85 | msg = f"No sources with supported protocols ..." 86 | print(msg) 87 | log.debug(msg) 88 | return 89 | 90 | # Preamble 91 | table = Table(title="Downloading the following sources", box=box.HORIZONTALS) 92 | table.add_column("Identifier", no_wrap=True) 93 | table.add_column("Description") 94 | table.add_column("Local resolution count", justify="right") 95 | for source in sources: 96 | table.add_row( 97 | source.identifier, 98 | source.description, 99 | str(len(source.resolve())), 100 | ) 101 | console = Console() 102 | console.print(table) 103 | 104 | # Ask to proceed or not 105 | if not _input.prompt_proceed(): 106 | return 107 | 108 | # Resolve sources 109 | s = "s" if len(sources) > 1 else "" 110 | msg = f"Resolving {len(sources)} source{s} ..." 111 | log.info(msg) 112 | 113 | # Set force attribute 114 | if force: 115 | for source in sources: 116 | source.max_age = 0 117 | 118 | # Prepare output layout 119 | table = Table(title="Transfer Status", box=box.HORIZONTALS) 120 | table.add_column("Identifier", no_wrap=True) 121 | table.add_column("Proto") 122 | for key in asdict(TransferStatus()): 123 | table.add_column(key, justify="right") 124 | 125 | with Live(table, console=console, screen=False, refresh_per_second=4) as live: 126 | 127 | status_total: TransferStatus = TransferStatus() 128 | for source in sources: 129 | msg = f"Download: {source.identifier}: {source.description}" 130 | log.info(msg) 131 | # live.console.print(msg) 132 | 133 | agent = PROTOCOLS[source.protocol] 134 | status = agent.download(source) 135 | status_total += status 136 | 137 | args = [source.identifier, source.protocol] + [ 138 | f"{total}" for total in asdict(status).values() 139 | ] 140 | table.add_row(*args) 141 | 142 | else: 143 | log.debug("Finished downloading sources ...") 144 | # Add a line and print the totals 145 | table.add_section() 146 | args = ["", ""] + [f"{total}" for total in asdict(status_total).values()] 147 | table.add_row(*args) 148 | -------------------------------------------------------------------------------- /tests/ab/test_paths.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | from ab.paths import ( 4 | resolve_wildcards, 5 | _parts, 6 | _parents, 7 | ) 8 | 9 | 10 | def test_resolve_wildcards_with_wildcards(tmp_path): 11 | 12 | # Arrange 13 | fnames = [ 14 | "foo_x.bar", 15 | "foo_y.bar", 16 | "foo_z.bar", 17 | ] 18 | ifnames = [tmp_path / fname for fname in fnames] 19 | [ifname.touch() for ifname in ifnames] 20 | 21 | expected = [ifname.name for ifname in ifnames] 22 | 23 | # Act 24 | path_input = Path(tmp_path / "foo*.bar") 25 | result = list(resolve_wildcards(path_input)) 26 | 27 | # Assert 28 | for ifname in result: 29 | assert ( 30 | ifname.name in expected 31 | ), f"Expected {ifname.name} to be in {expected!r} ..." 32 | 33 | 34 | def test_directories_with_trailing_slash(tmp_path): 35 | 36 | # Arrange 37 | directories = "ABC" 38 | paths = [tmp_path / directory for directory in directories] 39 | [path.mkdir() for path in paths] 40 | 41 | expected = directories 42 | 43 | # Act 44 | path_input = Path(tmp_path / "*/") 45 | result = list(resolve_wildcards(path_input)) 46 | 47 | # Assert 48 | for path in result: 49 | assert path.name in expected, f"Expected {path.name} to be in {expected!r} ..." 50 | 51 | 52 | def test_directories_no_trailing_slash(tmp_path): 53 | 54 | # Arrange 55 | directories = "ABC" 56 | fname_in_A = "foo.bar" 57 | paths = [tmp_path / directory for directory in directories] 58 | [path.mkdir() for path in paths] 59 | (paths[0] / fname_in_A).touch() 60 | 61 | expected = directories 62 | 63 | # Act 64 | path_input = Path(tmp_path / "*") 65 | result = list(resolve_wildcards(path_input)) 66 | 67 | # Assert 68 | for path in result: 69 | assert path.name in expected, f"Expected {path.name} to be in {expected!r} ..." 70 | 71 | 72 | def test_file_in_dir(tmp_path): 73 | 74 | # Arrange 75 | directories = "ABC" 76 | paths = [tmp_path / directory for directory in directories] 77 | [path.mkdir() for path in paths] 78 | 79 | fname_in_A = "foo.bar" 80 | ifname_in_A = paths[0] / fname_in_A 81 | ifname_in_A.touch() 82 | 83 | expected = ifname_in_A 84 | 85 | # Act I 86 | path_input = Path(tmp_path / "**" / "*.bar") 87 | result = list(resolve_wildcards(path_input)) 88 | 89 | # Assert 90 | ifname = result[0] 91 | assert ( 92 | ifname.name == expected.name 93 | ), f"Expected {ifname.name} to be {expected.name!r} ..." 94 | 95 | # Act II 96 | path_input = Path(tmp_path / "**" / "foo.bar") 97 | result = list(resolve_wildcards(path_input)) 98 | 99 | # Assert 100 | ifname = result[0] 101 | assert ( 102 | ifname.name == expected.name 103 | ), f"Expected {ifname.name} to be {expected.name!r} ..." 104 | 105 | # Act III 106 | path_input = Path(tmp_path / "A" / "foo.bar") 107 | result = list(resolve_wildcards(path_input)) 108 | 109 | # Assert 110 | ifname = result[0] 111 | assert ( 112 | ifname.name == expected.name 113 | ), f"Expected {ifname.name} to be {expected.name!r} ..." 114 | 115 | 116 | def test_resolve_wildcards_without_wildcards(tmp_path): 117 | ifname = tmp_path / "foo.bar" 118 | ifname.touch() 119 | 120 | expected = [ifname] 121 | result = list(resolve_wildcards(ifname)) 122 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 123 | 124 | 125 | def test_empty_result_for_non_existing_file(): 126 | ifname = Path("foo.bar") 127 | expected = [] 128 | result = list(resolve_wildcards(ifname)) 129 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 130 | 131 | 132 | def test_parts(): 133 | input_ = "/A/B/C" 134 | expected = ["A", "B", "C"] 135 | result = _parts(input_) 136 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 137 | 138 | input_ = "A/B/C/" 139 | expected = ["A", "B", "C"] 140 | result = _parts(input_) 141 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 142 | 143 | input_ = "./A/B/C/." 144 | expected = ["A", "B", "C"] 145 | result = _parts(input_) 146 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 147 | 148 | 149 | def test_parents(): 150 | input_ = "/A/B/C" 151 | expected = [ 152 | "A", 153 | "A/B", 154 | "A/B/C", 155 | ] 156 | result = _parents(input_) 157 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 158 | 159 | input_ = "A/B/C/" 160 | result = _parents(input_) 161 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 162 | -------------------------------------------------------------------------------- /src/ab/qaqc/check_example.py: -------------------------------------------------------------------------------- 1 | """ 2 | The purpose of this module is to quickly verify that results in the Bernese 3 | installation are reliable. 4 | 5 | Overall observations, when comparing the _REF-files with the generated output: 6 | 7 | * The _REF-file has only the stations it needed for the calculation, whereas 8 | the output files from our own run produces files with many more lines, 9 | since, apparently, the input station data include many more files. 10 | 11 | * It is not clear from the documentation of the update process, what, 12 | specifically, to compare, and how much it may deviate from the baseline 13 | results. 14 | 15 | Quote from README_SAVEDISK.TXT: 16 | 17 | > You should obtain the same results (apart from effects introduced by 18 | > different compilers and/or operating systems which have been used). 19 | 20 | Quote from README_FIRST_STEPS.md: 21 | 22 | > We expect no (significant) differences between your solution and the 23 | > reference files. 24 | 25 | Recommendations from our internal point of view: 26 | 27 | * Most important to compare coordinates. 28 | * For coordinate files, compare *.CRD - Criteria: 29 | - well under a mm; at least 1/10 mm, and probably lower. 30 | * Generally, compare *.PRC files (summary files): - Compare, e.g.: 31 | - Solved ambiguities 32 | - Some residuals from Helmert transformations. 33 | - "Coordinate repeatability". 34 | 35 | Files of concern: 36 | 37 | * Final coordinates from the RINEX-to-SINEX PCF `RNX2SNX.PCF`: 38 | - $S/RNX2SNX/yyyy/FIN_yyyyssss.CRD 39 | - $S/RNX2SNX/yyyy/FIN_yyyyssss.CRD_REF 40 | 41 | """ 42 | 43 | from dataclasses import dataclass 44 | from pathlib import Path 45 | from typing import ( 46 | Protocol, 47 | TypeVar, 48 | ) 49 | from collections.abc import Callable 50 | 51 | from ab import configuration 52 | 53 | 54 | C = TypeVar("C", bound="Comparable") 55 | 56 | 57 | class Comparable(Protocol): 58 | def __sub__(self, other: C) -> C: ... 59 | 60 | __rsub__: Callable[[C, C], C] = __sub__ 61 | 62 | 63 | @dataclass 64 | class FilePair: 65 | ref: Path 66 | res: Path 67 | 68 | 69 | def get_available_comparables() -> list[FilePair]: 70 | config = configuration.load() 71 | return [ 72 | FilePair(ref, res) 73 | for pair in config.get("bsw_files", {}).get("check_install", []) 74 | if (ref := pair.get("reference")).is_file() 75 | and (res := pair.get("result")).is_file() 76 | ] 77 | 78 | 79 | @dataclass 80 | class LineCRD: 81 | station: str 82 | x: float 83 | y: float 84 | z: float 85 | flag: str 86 | 87 | def __sub__(self, other: "LineCRD") -> "LineCRD": 88 | if not self.station == other.station: 89 | raise RuntimeError( 90 | f"Must subtract from the same station. Got {self.station!r} and {other.station!r}" 91 | ) 92 | return LineCRD( 93 | self.station, 94 | self.x - other.x, 95 | self.y - other.y, 96 | self.z - other.z, 97 | self.flag, 98 | ) 99 | 100 | __rsub__ = __sub__ 101 | 102 | 103 | @dataclass 104 | class FileCRD: 105 | title: str 106 | date: str 107 | coordinates: list[LineCRD] 108 | 109 | def __sub__(self, other: "FileCRD") -> "FileCRD": 110 | diffs = [ 111 | self_line - other_line 112 | for (self_line, other_line) in zip(self.coordinates, other.coordinates) 113 | ] 114 | return FileCRD(self.title, self.date, diffs) 115 | 116 | __rsub__ = __sub__ 117 | 118 | 119 | def extract_coordinates(raw: str) -> FileCRD: 120 | FLAG_INDEX: int = 70 121 | 122 | results = [] 123 | for index, line in enumerate(raw.splitlines()): 124 | if index == 0: 125 | # Save metadata 126 | title = line[:-15].strip().split(":")[0] 127 | date = line[-15:] 128 | continue 129 | if index < 7: 130 | # Not a line with coordinate results 131 | continue 132 | if not len(line) > FLAG_INDEX: 133 | # Not calculated 134 | continue 135 | results.append(line) 136 | 137 | coordinates = [] ## TODO: Make it a dict with station as key 138 | for line in results: 139 | coordinates.append( 140 | LineCRD( 141 | station=line[5:9], 142 | x=float(line[20:36]), 143 | y=float(line[37:51]), 144 | z=float(line[52:66]), 145 | flag=line[FLAG_INDEX], 146 | ) 147 | ) 148 | return FileCRD(title, date, coordinates) 149 | 150 | 151 | def flag_if_too_high(delta: float, tolerance: float = 0.0001) -> str: 152 | return "!" if abs(delta) > tolerance else " " 153 | -------------------------------------------------------------------------------- /src/ab/station/sitelog/__init__.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | from pathlib import Path 3 | import logging 4 | 5 | from ab.station.sitelog.parse import ( 6 | parse_sections, 7 | parse_section_1, 8 | parse_section_2, 9 | parse_subsection_3, 10 | parse_subsection_4, 11 | ) 12 | 13 | 14 | log = logging.getLogger(__name__) 15 | 16 | 17 | class StationIDLength(Enum): 18 | FOUR = "four" 19 | NINE = "nine" 20 | 21 | @classmethod 22 | def values(self) -> list[str]: 23 | return [item.value for item in self] 24 | 25 | 26 | station_id_keys = { 27 | StationIDLength.FOUR: "four_character_id", 28 | StationIDLength.NINE: "nine_character_id", 29 | } 30 | "Mapping between enum and sitelog dict key with the chosen ID format" 31 | 32 | 33 | class Sitelog: 34 | """ 35 | Sitelog reads given filename and stores parsed sitelog data. 36 | 37 | """ 38 | 39 | def __init__( 40 | self, 41 | filename: Path | str, 42 | preferred_station_id_length: str | None = None, 43 | ) -> None: 44 | 45 | self.filename = Path(filename) 46 | 47 | # Validate 48 | 49 | try: 50 | sitelog_content = self.filename.read_text() 51 | 52 | except Exception as e: 53 | log.warning(f"Caught error {e!r}, when reading file {self.filename!r} ...") 54 | sitelog_content = self.filename.read_text(encoding="cp1252") 55 | 56 | # Choose preferred station ID, if choice is available 57 | chosen: StationIDLength | None = None 58 | if preferred_station_id_length is not None: 59 | try: 60 | chosen = StationIDLength(preferred_station_id_length) 61 | except ValueError as e: 62 | expected = StationIDLength.values() 63 | part_1 = f"Expected `station_id_characters` to be in {expected!r}." 64 | part_2 = f"Got {preferred_station_id_length!r} ..." 65 | log.error(f"{part_1} {part_2}") 66 | log.info("Using largest ID length in each individual sitelog ...") 67 | 68 | # Extract raw section contents 69 | self.sections = parse_sections(sitelog_content) 70 | 71 | # Build model structure 72 | self.section_1 = parse_section_1(self.sections["1"]["content"]) 73 | self.section_2 = parse_section_2(self.sections["2"]["content"]) 74 | self.receivers = [ 75 | parse_subsection_3(subsection) 76 | for subsection in self.sections["3"]["subsections"] 77 | ] 78 | self.antennae = [ 79 | parse_subsection_4(subsection) 80 | for subsection in self.sections["4"]["subsections"] 81 | ] 82 | 83 | # Derived station ID 84 | # ------------------ 85 | 86 | # Here, the parsing has mixed v1 and v2 of the sitelog format that 87 | # changed ~ 2024. Version 1 provides four, and not nine, and vice versa. 88 | four = self.section_1.four_character_id 89 | nine = self.section_1.nine_character_id 90 | 91 | # Make it possible to prefer four-letter ID, when nine (and thus not 92 | # four) was found by setting four to the first characters in the 93 | # nine-letter ID. 94 | if nine is not None: 95 | nine = nine.upper() 96 | four = nine[:4] 97 | 98 | # If no valid preferred length is chosen, use longest ID supported by 99 | # the sitelog 100 | if chosen is None: 101 | if nine is not None: 102 | self.station_id = nine 103 | else: 104 | self.station_id = four 105 | return 106 | 107 | # If preferred length is nine 108 | if chosen is StationIDLength.NINE: 109 | if nine is not None: 110 | self.station_id = nine 111 | return 112 | else: 113 | log.warning( 114 | f"Chosen station-ID length is nine, but {self.filename.name} does not have nine." 115 | ) 116 | 117 | # The only other option so far is StationIDLength.FOUR, but as this is 118 | # also the default, we log it as such. 119 | # 120 | # Default to the four-letter one which is assumed to always exist 121 | log.info(f"Using four-letter station name {four} as station ID.") 122 | self.station_id = four 123 | 124 | def save_sections_raw(self, ofname: Path | str) -> None: 125 | ofname = Path(ofname) 126 | with open(ofname, "w+") as f: 127 | json.dump(self.sections, f, indent=2) 128 | 129 | @property 130 | def sections_extracted(self) -> dict[str, str]: 131 | return dict( 132 | sec1=asdict(self.section_1), 133 | sec2=asdict(self.section_2), 134 | sec3=[asdict(receiver) for receiver in self.receivers], 135 | sec4=[asdict(antenna) for antenna in self.antennae], 136 | ) 137 | 138 | def save_sections_extracted(self, ofname: Path | str) -> None: 139 | ofname = Path(ofname) 140 | with open(ofname, "w+") as f: 141 | json.dump(self.sections_extracted, f, indent=2) 142 | -------------------------------------------------------------------------------- /src/ab/data/sftp.py: -------------------------------------------------------------------------------- 1 | """ 2 | Transfer files over SFTP 3 | 4 | Built-in command-line too sftp is assumed to exist. 5 | 6 | """ 7 | 8 | from typing import Final 9 | from collections.abc import Iterable 10 | from dataclasses import ( 11 | dataclass, 12 | asdict, 13 | ) 14 | import itertools as it 15 | from pathlib import Path 16 | import subprocess as sub # type: ignore 17 | import logging 18 | 19 | from ab.paths import ( 20 | resolve_wildcards, 21 | _parents, 22 | ) 23 | from ab.data import TransferStatus 24 | 25 | 26 | log = logging.getLogger(__name__) 27 | 28 | type LocalRemoteType = dict[str, Path | str] 29 | "Type for local file and remote directory as dict" 30 | 31 | MKDIR: Final = "-mkdir {remote_dir}" 32 | "SFTP-command structure for making a directory with a prefixed hyphen to suppress errors." 33 | 34 | PUT: Final = "put {fname} {remote_dir}" 35 | "SFTP-command structure for transfering a file to a remote directory" 36 | 37 | 38 | @dataclass 39 | class LocalRemote: 40 | fname: str | Path | None = None 41 | remote_dir: str | Path | None = None 42 | 43 | @property 44 | def valid(self) -> bool: 45 | if self.fname is None: 46 | return False 47 | 48 | if self.remote_dir is None: 49 | return False 50 | 51 | return True 52 | 53 | def resolve_local(self) -> list["LocalRemote"]: 54 | if not self.valid: 55 | return [] 56 | 57 | # NOTE: type comment avoids MyPy complaining about type of `self.fname` 58 | # which `self.valid` has already made sure is note the case. 59 | return [ 60 | LocalRemote(resolved, self.remote_dir) 61 | for resolved in resolve_wildcards(self.fname) # type: ignore 62 | ] 63 | 64 | 65 | def _mkdir_commands(paths: Iterable[str]) -> list[str]: 66 | """ 67 | Return list of commands to create each full path, part by part. 68 | 69 | """ 70 | return [ 71 | MKDIR.format(remote_dir=part) 72 | for unique in sorted(set(paths)) 73 | for part in _parents(unique) 74 | ] 75 | 76 | 77 | def update_status( 78 | status: TransferStatus, result: sub.CompletedProcess 79 | ) -> TransferStatus: 80 | """ 81 | Assuming that each line is a successful transfer message, when the return 82 | code is zero. 83 | 84 | NOTE: This is not accounting for any failures. 85 | 86 | """ 87 | if result.returncode == 0: 88 | status.success += len(result.stdout.splitlines()) 89 | return status 90 | 91 | 92 | def _batch(host: str, commands: str) -> sub.CompletedProcess: 93 | return sub.run( 94 | ["sftp", "-b", "-", host], 95 | input=commands, 96 | text=True, 97 | check=True, 98 | capture_output=True, 99 | ) 100 | 101 | 102 | def upload(host: str, pairs: list[LocalRemoteType]) -> TransferStatus: 103 | """ 104 | Using settings provided, upload pairs of local file/remote destination 105 | directory. 106 | 107 | Provided host string must be a `Host` in running user's SSH config file. 108 | 109 | Host settings must provide needed host details and credentials for the 110 | connection. 111 | 112 | If '*' wildcards are used for the local file, these are expanded. 113 | 114 | No wildcards can be used for the destination directory. This would be 115 | ambiguous for files with wildcards, and, presumably, redundant, if concrete 116 | files are to be put into more than one destination directory. 117 | 118 | """ 119 | log.info(f"Uploading files to {host} using SFTP ...") 120 | 121 | status = TransferStatus() 122 | 123 | log.info("Build LocalRemote instances from input ...") 124 | candidates = [LocalRemote(**pair) for pair in pairs] 125 | 126 | log.info("Get usable input (valid dict structure) ...") 127 | extracted = [candidate for candidate in candidates if candidate.valid] 128 | status.failed += len(candidates) - len(extracted) 129 | 130 | log.info("Get all local filenames ...") 131 | resolvable = [ 132 | resolved 133 | for candidate in extracted 134 | if len(resolved := candidate.resolve_local()) 135 | ] 136 | status.failed += len(resolvable) - len(extracted) 137 | resolved = list(it.chain(*resolvable)) 138 | 139 | log.info(f"Prepare commands to build remote directory tree ...") 140 | remote_dirs = [str(lr.remote_dir) for lr in resolved] 141 | cmd_mkdir = "\n".join(_mkdir_commands(remote_dirs)) 142 | 143 | log.info("Prepare commands to transfer local files to remote directories ...") 144 | cmd_put = "\n".join(PUT.format(**asdict(lr)) for lr in resolved) 145 | 146 | try: 147 | log.info(f"Batch-create directories on {host} ...") 148 | result = _batch(host, cmd_mkdir) 149 | status = update_status(status, result) 150 | 151 | log.info(f"Batch-transfer local files to {host} ...") 152 | result = _batch(host, cmd_put) 153 | status = update_status(status, result) 154 | 155 | except sub.CalledProcessError as e: 156 | log.warn(f"Subprocess failed with error {e}") 157 | status.exceptions.append(e) 158 | 159 | log.info(status) 160 | return status 161 | -------------------------------------------------------------------------------- /src/ab/configuration/constructors/paths.py: -------------------------------------------------------------------------------- 1 | """ 2 | Path constructors 3 | 4 | """ 5 | 6 | from pathlib import Path 7 | 8 | import yaml # type: ignore 9 | 10 | from ab.paths import resolve_wildcards 11 | 12 | 13 | def path_constructor(loader: yaml.Loader, node: yaml.Node) -> Path | list[Path]: 14 | """ 15 | The path constructor resolves filenames and returns either a single Path 16 | instance or a list, depending on the number of results. 17 | 18 | The constructor can read input either as 1) a string path or 2) a sequence 19 | of parts that combine to a path. The reason for the second input option is 20 | the need for building paths relative to the active installation environment. 21 | 22 | 1) Fully specified path strings like the following: 23 | 24 | * scheme://uniform/ressource/identifier 25 | * /some/path/to/somewhere 26 | * /some/path/to/some.file 27 | 28 | These values will be parsed as YAML ScalarNode instances, and the 29 | constructor simply returns a Python Path instance of the entire string 30 | value. 31 | 32 | Examples of the scalar syntax are: 33 | 34 | ```yaml 35 | 36 | key1: !Path /mnt/server/subdirectory/file.txt 37 | 38 | key2: !Path /mnt/server/subdirectory/*.log 39 | 40 | ``` 41 | 42 | --- 43 | 44 | 2) A sequence of path components, which is useful when one or more 45 | components are YAML aliases referring to a string somewhere else in the 46 | YAML document. 47 | 48 | More complex paths can thus be constructed by supplying the YAML tag with a 49 | sequence of path elements. 50 | 51 | Here, each sequence item can be either a YAML alias for a value elsewhere in 52 | the document, or simply a string. 53 | 54 | Examples of the sequence syntax are: 55 | 56 | ```yaml 57 | 58 | key1: !Path [*alias_to_a_base_path, subdirectory, file.txt] 59 | 60 | ``` 61 | 62 | Return types 63 | ------------ 64 | 65 | Any element in the path or sequence of path components may use 66 | system-specific wildcards like `*` to obtain files and directories. 67 | 68 | In this case, the constructor will return not one single Path instance, but 69 | a list of all matches found, except when only one result is found. In that 70 | case that single Path instance is returned. 71 | 72 | All paths are resolved using the the `Path.glob` method in Python's 73 | `pathlib`. (Note that this implementation of `glob` differs from that in the 74 | traditional built-in module `glob`.) This means that nothing will be 75 | returned, if the file or directory is not there, when the path is resolved. 76 | In this case, the constructor returns the Path instance created from the 77 | input. 78 | 79 | If the wildcard `*` is used as the first character of the first element, the 80 | entry must be explicitly written as a string to prevent the YAML parser from 81 | interpreting it as an alias to a YAML anchor. 82 | 83 | """ 84 | if not isinstance(node, (yaml.ScalarNode, yaml.SequenceNode)): 85 | raise KeyError( 86 | f"Must be single string or list of strings or `Path` instances. Got {node.value!r} ..." 87 | ) 88 | 89 | if isinstance(node, yaml.ScalarNode): 90 | raw = loader.construct_scalar(node) 91 | if not isinstance(raw, str): 92 | raise RuntimeError(f"Expected {raw!r} to be a string.") 93 | path = Path(raw) 94 | else: 95 | # We use loader.construct_object, since there may be YAML aliases inside. 96 | multiple: list[str | Path] = [loader.construct_object(v) for v in node.value] 97 | if not all(isinstance(part, (str, Path)) for part in multiple): 98 | raise RuntimeError( 99 | f"Expected all parts to be strings or Path instances. Got {multiple!r} ..." 100 | ) 101 | path = Path(*multiple) 102 | 103 | # Avoid relative paths `dir/../dir2` 104 | path = path.absolute() 105 | 106 | # Match against possible wildcards or just path 107 | resolved = list(resolve_wildcards(path)) 108 | 109 | # NOTE: Non-existing files are resolvable, but we need path instances for 110 | # functionality that might create those paths. 111 | if not resolved: 112 | return path 113 | 114 | if len(resolved) > 1: 115 | return resolved 116 | 117 | return resolved[0] 118 | 119 | 120 | def path_as_str_constructor(loader: yaml.Loader, node: yaml.Node) -> str | list[str]: 121 | paths = path_constructor(loader, node) 122 | if isinstance(paths, list): 123 | return [str(path) for path in paths] 124 | return str(paths) 125 | 126 | 127 | def parent_constructor(loader: yaml.Loader, node: yaml.Node) -> Path | list[Path]: 128 | """ 129 | Returns the parent directory of the path given. 130 | 131 | Expected input is the same as for path_contructor. 132 | 133 | """ 134 | paths = path_constructor(loader, node) 135 | # There is no usecase for returning multiple parent directories, since, at 136 | # worst, this will be ambiguous or have duplicates. 137 | # if isinstance(paths, list): 138 | # return [path.parent for path in paths] 139 | if not isinstance(paths, Path): 140 | raise ValueError( 141 | f"Expected single path to return parent directory for. Got {paths!r} ..." 142 | ) 143 | return paths.parent 144 | -------------------------------------------------------------------------------- /workspace/structurizr/workspace.dsl: -------------------------------------------------------------------------------- 1 | workspace "GNSS processing with AutoBernese" { 2 | 3 | model "AutoBernese" { 4 | 5 | external_data_sources = softwareSystem "External data sources" 6 | 7 | enterprise "Agency for Data Supply and Infrastructure" { 8 | user = person "Geodesist" 9 | # cron = person "System user | CRON job" 10 | 11 | auto_bernese = softwareSystem "AutoBernese" { 12 | cli = container "CLI application" {} 13 | download_api = container "Download API" { 14 | downloader = component "Downloader" "Downloads specified files from external data sources." "FTP/HTTP" 15 | download_controller = component "Download controller" "Specifies source files and local destinations." 16 | } 17 | preprocessor = container "Preprocess and organise input to and output from BSW" { 18 | qaqc = component "Kvalitetssikring [QA] og kvalitetskontrol [QC]" {} 19 | gensta = component "Legacy converter" "Converts sitelogs to station file." "Perl" 20 | gensta_runner = component "gensta runner" "Creates subprocess with a call to the Perl program" 21 | } 22 | bsw_api = container "BSW API" { 23 | bpe_interface = component "BPE interface" "Perl scripts that configure and start the Bernese Processing Engine" "Perl 5" 24 | bpe_runner = component "BPE runner" "Creates subprocess with a call to the BPE interface" 25 | campaign_manager = component "Create, configure and work with BSW campaigns" 26 | } 27 | fire_api = container "FIRE API" { 28 | fire_wrapper = component "FIRE wrapper" "Creates a subprocess that runs FIRE command to read in specified results from the campaign." "MambaForge, FIRE" 29 | } 30 | } 31 | 32 | file_server = softwareSystem "File server" { 33 | datapool = container "BSW DATAPOOL" "The directory BSW uses as common raw-data storage." 34 | campaign = container "BSW CAMPAIGN" "The directory BSW uses for each campaign area." 35 | savedisk = container "BSW SAVEDISK" "The directory BSW uses as end-product storage." 36 | everything_else = container "Everything else" "The rest of the file system used for GNSS-related data." 37 | } 38 | bsw = softwareSystem "BSW" { 39 | bsw_perl_interface = container "BSW Perl modules" "Perl programs that can be used to start the Bernese Processning Engine." 40 | } 41 | fire = softwareSystem "FIRE" {} 42 | } 43 | 44 | # Relationships between people and software systems 45 | download_api -> external_data_sources "Get data" 46 | 47 | user -> cli "Create campaign, activate campaign" 48 | user -> cli "Get data from external sources" 49 | user -> cli "Create STA file from updated sitelog" 50 | user -> cli "Run PCF file for campaign" 51 | user -> cli "Export end products" 52 | user -> cli "Upload results to FIRE Dataase" 53 | user -> file_server "Get and move end products to own PC" 54 | 55 | cli -> download_api "Get data from sources in campaign.yaml" 56 | 57 | cli -> qaqc "Check data integrity, accessibility, etc." 58 | cli -> qaqc "Perform quality assurance/control on end products" 59 | 60 | cli -> preprocessor "Rebuild general input data" "" 61 | cli -> preprocessor "Transfer relevant input data to the campaign directory" 62 | 63 | cli -> bsw_api "Create new campaign of given type" "NKG, 5D, KDI, RTK service" 64 | cli -> bsw_api "Convert sitelog to station file" 65 | cli -> bsw_api "Start BPE using campaign-specific PCF file" 66 | 67 | cli -> fire_api "Upload results to FIRE Database" ".CRD and .VEL files" 68 | 69 | # Relationships to/from containers 70 | download_api -> file_server "Store external data locally" 71 | bsw_api -> file_server "Store workspace and end products" 72 | qaqc -> file_server "Assure quality of end products." 73 | bsw_api -> bsw "Organise campaign and run BPE" 74 | fire_api -> fire "Upload end products to FIRE database" 75 | 76 | # Relationships to/from components 77 | # TBW 78 | # Here will come the interactions between the different parts on the component level. 79 | gensta_runner -> gensta "Run converter for a given sitelog." 80 | } 81 | 82 | views { 83 | # systemlandscape "SystemLandscape" { 84 | # include * 85 | # animation { 86 | # file_server 87 | # } 88 | # autoLayout 89 | # } 90 | 91 | systemContext auto_bernese { 92 | include * 93 | autolayout lr 94 | } 95 | 96 | container auto_bernese { 97 | include * 98 | autolayout tb 99 | } 100 | 101 | component download_api "Data" { 102 | include * 103 | autoLayout 104 | } 105 | 106 | component preprocessor "Preprocessor" { 107 | include * 108 | autoLayout 109 | } 110 | 111 | component bsw_api "BSW" { 112 | include * 113 | autoLayout 114 | } 115 | 116 | theme default 117 | } 118 | 119 | } 120 | -------------------------------------------------------------------------------- /src/ab/configuration/core.yaml: -------------------------------------------------------------------------------- 1 | # Bernese GNSS Software [BSW] environment variables available after 'source'ing 2 | # the shell script `LOADGPS.setvar` in the root of the installation directory. 3 | bsw_env: 4 | 5 | # Installation directory 6 | C: &C !ENV C 7 | 8 | # Bernese documentation files 9 | DOC: &DOC !ENV DOC 10 | 11 | # Built-in Bernese program panels 12 | PAN: &PAN !ENV PAN 13 | 14 | # Global model files used by Bernese programs 15 | MODEL: &MODEL !ENV MODEL 16 | 17 | # Global configuration file used by Bernese programs 18 | CONFIG: &CONFIG !ENV CONFIG 19 | 20 | # DATAPOOL directory 21 | D: &D !ENV D 22 | 23 | # CAMPAIGN directory 24 | P: &P !ENV P 25 | 26 | # SAVEDISK directory 27 | S: &S !ENV S 28 | 29 | # User directory 30 | U: &U !ENV U 31 | 32 | # Temporary directory 33 | T: &T !ENV T 34 | 35 | # Specific files in the Bernese environment that we need to access 36 | bsw_files: 37 | 38 | # Release-information file 39 | release_info: !Path [*DOC, RELEASE.TXT] 40 | 41 | # Input file with the list of existing Bernese campaigns 42 | campaign_menu: !Path [*PAN, MENU_CMP.INP] 43 | 44 | # We define the environment root directory as the one containing the BSW 45 | # installation. It is assumed to be a directory that each user can can write to. 46 | env: &env !Parent [*C] 47 | 48 | # AutoBernese runtime environment 49 | runtime: 50 | 51 | # Root directory for runtime files 52 | ab: &ab !Path [*env, autobernese] 53 | 54 | # Keyword arguments to the Python logging module 55 | logging: 56 | filename: !Path [*ab, autobernese.log] 57 | # Note the field `{user}` which is replaced at runtime with Python's getpass.getuser() 58 | format: '%(asctime)s | {user} | %(levelname)s | %(name)s | %(message)s' 59 | datefmt: '%Y-%m-%d %H:%M:%S' 60 | style: '%' 61 | level: DEBUG 62 | 63 | # Campaign-configuration template directory 64 | campaign_templates: !Path [*ab, templates] 65 | 66 | # Filename for the common configuration 67 | common_config: !Path [*ab, autobernese.yaml] 68 | 69 | # Sections that can be added to or overridden in the core configuration 70 | sections_to_override: 71 | - metadata 72 | - environment 73 | - sources 74 | - tasks 75 | - station 76 | - clean 77 | - troposphere 78 | - campaign 79 | 80 | # Default content for the above sections_to_override. These sections can be 81 | # overriden by the user in the general configuration file autobernese.yaml or in 82 | # the campaign configuration (including the template). 83 | 84 | campaign: 85 | directories: 86 | - name: ATM 87 | - name: BPE 88 | - name: GEN 89 | files: 90 | - !Path [*CONFIG, OBSERV.SEL] 91 | - !Path [*PAN, SESSIONS.SES] 92 | - name: GRD 93 | - name: OBS 94 | - name: ORB 95 | - name: ORX 96 | - name: OUT 97 | - name: RAW 98 | - name: SOL 99 | - name: STA 100 | 101 | troposphere: 102 | 103 | # NOTE: The `data` section is under development. It main purpose is to provide 104 | # the YAML anchors for our defaults and make them available in the rest of the 105 | # configuration files. 106 | data: 107 | 108 | common: 109 | hour_extensions: &CORE_HOUR_EXTENSIONS ['00', '06', '12', '18'] 110 | 111 | VMF3_OP_1X1: 112 | hour_url: &CORE_URL_H_VMF3_OP_1X1 https://vmf.geo.tuwien.ac.at/trop_products/GRID/1x1/VMF3/VMF3_OP/{date.year} 113 | date_beg: &CORE_DATE_BEG_VMF3_OP_1X1 2008-01-01 114 | date_end: &CORE_DATE_END_VMF3_OP_1X1 null 115 | hour_directory: &CORE_DIR_H_VMF3_OP_1X1 !Path [*D, VMF3, '1x1_OP_H', '{date.year}'] 116 | grd_directory: &CORE_DIR_GRD_VMF3_OP_1X1 !Path [*D, VMF3, '1x1_OP_GRD', '{date.year}'] 117 | hour_filename: &CORE_FNAME_H_VMF3_OP_1X1 'VMF3_{date.year}{date.month:02d}{date.day:02d}.H{hour}' 118 | grd_filename: &CORE_FNAME_GRD_VMF3_OP_1X1 'VMF3_{date.year}{date.doy:03d}0.GRD' 119 | 120 | VMF3_EI_1X1: 121 | hour_url: &CORE_URL_H_VMF3_EI_1X1 https://vmf.geo.tuwien.ac.at/trop_products/GRID/1x1/VMF3/VMF3_EI/{date.year} 122 | date_beg: &CORE_DATE_BEG_VMF3_EI_1X1 1997-01-01 123 | date_end: &CORE_DATE_END_VMF3_EI_1X1 2007-12-31 124 | hour_directory: &CORE_DIR_H_VMF3_EI_1X1 !Path [*D, VMF3, '1x1_EI_H', '{date.year}'] 125 | grd_directory: &CORE_DIR_GRD_VMF3_EI_1X1 !Path [*D, VMF3, '1x1_EI_GRD', '{date.year}'] 126 | hour_filename: &CORE_FNAME_H_VMF3_EI_1X1 'VMF3_{date.year}{date.month:02d}{date.day:02d}.H{hour}' 127 | grd_filename: &CORE_FNAME_GRD_VMF3_EI_1X1 'VMF3_{date.year}{date.doy:03d}0.GRD' 128 | 129 | VMF1_OP_2.5X2: 130 | hour_url: &CORE_URL_H_VMF1_OP_2COMMA5X2 https://vmf.geo.tuwien.ac.at/trop_products/GRID/2.5x2/VMF1/VMF1_OP/{date.year} 131 | date_beg: &CORE_DATE_BEG_VMF1_OP_2COMMA5X2 1985-01-01 132 | date_end: &CORE_DATE_END_VMF1_OP_2COMMA5X2 null 133 | hour_directory: &CORE_DIR_H_VMF1_OP_2COMMA5X2 !Path [*D, VMF1, '1x1_EI_H', '{date.year}'] 134 | grd_directory: &CORE_DIR_GRD_VMF1_OP_2COMMA5X2 !Path [*D, VMF1, '1x1_EI_GRD', '{date.year}'] 135 | hour_filename: &CORE_FNAME_H_VMF1_OP_2COMMA5X2 'VMFG_{date.year}{date.month:02d}{date.day:02d}.H{hour}' 136 | grd_filename: &CORE_FNAME_GRD_VMF1_OP_2COMMA5X2 'VMFG_{date.year}{date.doy:03d}0.GRD' 137 | 138 | # Default argumemts for the troposphere CLI 139 | # Define your own in the common or campaign-specific configuration file. 140 | ipath: *CORE_DIR_H_VMF3_OP_1X1 141 | opath: *CORE_DIR_GRD_VMF3_OP_1X1 142 | ifname: *CORE_FNAME_H_VMF3_OP_1X1 143 | ofname: *CORE_FNAME_GRD_VMF3_OP_1X1 144 | beg: *CORE_DATE_BEG_VMF3_OP_1X1 145 | end: *CORE_DATE_END_VMF3_OP_1X1 146 | -------------------------------------------------------------------------------- /docs/manual/quick-start.md: -------------------------------------------------------------------------------- 1 | The purpose of this section is to give a quick demonstration of how to use 2 | AutoBernese to run PCF-files with the Bernese Processing Engine for a campaign. 3 | 4 | To get a feel for how AutoBernese works, when everything is set up, this section 5 | goes through running the PPP-script with the Bernese Processing Engine for the 6 | EXAMPLE campaign that comes bundled with the Bernese GNSS Software. 7 | 8 | 9 | ## Preparation 10 | 11 | Before proceeding, make sure that you have performed the following steps: 12 | 13 | * [Install AutoBernese](install-autobernese.md). 14 | * Install the Bernese EXAMPLE campaign [see Bernese manual]. 15 | * Download [this campaign configuration](assets/campaign.yaml) to the 16 | EXAMPLE-campaign directory [`$P/EXAMPLE`]. 17 | 18 | What you have now is a configuration file that lets you run the default PPP.PCF 19 | file for the first two days available in the EXAMPLE campaign's interval. 20 | 21 | 22 | ### Initialise environments 23 | 24 | First, initialise the environments: 25 | 26 | * Load your Bernese environment as defined in `LOADGPS.setvar`. 27 | * Activate the AutoBernese `conda`/`mamba` environment [`ab`] 28 | 29 | !!! note "Bernese user environment" 30 | 31 | Make sure that you have configured a Bernese user environment 32 | for the active user. Run 33 | 34 | ``` 35 | $EXE/configure.pm 36 | ``` 37 | 38 | and select option 3. 39 | AutoBernese will fail without a functional Bernese user environment. 40 | 41 | ## Run the campaign 42 | 43 | To run the PCF file for each specified day, type the following in the terminal 44 | with the activated environments: 45 | 46 | ```sh 47 | (ab) $ ab campaign run EXAMPLE 48 | # (output) from BPE 49 | ``` 50 | 51 | 52 | ### Recorded example 53 | 54 | Below is a demonstration of the process above: 55 | 56 |
57 | 58 | 69 | 70 | 71 | ## The configuration for the EXAMPLE campaign 72 | 73 | The file that you downloaded to the EXAMPLE-campaign directory above is an 74 | example of a campaign-configuration file that AutoBernese uses to download 75 | campaign-specific data and configure and run the Bernese Processing Engine. 76 | 77 | ??? info "Expand to see the AutoBernese campaign-configuration file for the EXAMPLE campaign" 78 | 79 | ``` title="`campaign.yaml`" linenums="1" 80 | --8<-- "docs/manual/assets/campaign.yaml" 81 | ``` 82 | 83 | The three standard sections in this file are `metadata`, which contain data 84 | about the campaign, `tasks` which contain a list of tasks to run for the 85 | campaign, and `sources`, which contain a list of data sources. 86 | 87 | !!! warning "IMPORTANT" 88 | 89 | For the EXAMPLE campaign, the provided data cover several two-day intervals 90 | instead of having a single interval, that can be defined by the beginning and 91 | end dates in the `metadata`section. AutoBernese only supports creating campaigns 92 | with a single date interval, but the campaign configuration can easily be 93 | amended to accommodate the case, where several arbitrary dates are used. 94 | 95 | In this case, a custom section, arbitrarily named `custom`, has been added, in 96 | which a YAML anchor `&dates` is defined for the sequence of dates that all 97 | `.PCF` files, except `LEOPOD.PCF`, use in the EXAMPLE campaign. 98 | 99 | This section is not 'seen' by AutoBernese, since it does not use this key, when 100 | loading the configuration. But the configuration settings that use the YAML 101 | alias `*dates` do have its values inserted, before being read by AutoBernese. 102 | 103 | See a short description of each section below or an expanded description in the 104 | section on [campaign-specific configuration files][AB-C-CONFIG]: 105 | 106 | [AB-C-CONFIG]: configuration-files.md#campaign-configuration 107 | 108 | * The `metadata` section contains data about the context in which the campaign 109 | was created, if it was created by AutoBernese. With YAML anchors 110 | [`&`], information such as the campaign directory [here, `EXAMPLE`] 111 | and the date interval [`beg` and `end`] covered by the campaign can be 112 | re-used later in the document 113 | 114 | Here, the beginning and end dates define an interval in which there are data 115 | for the `LEOPOD.PCF` file in the EXAMPLE campaign. 116 | 117 | * The `custom` section, as explained above is not a part of AutoBernese, but a 118 | useful construct, that works as a container for manually-set-up data. 119 | 120 | * The `tasks` section contains short definitions of each BPE process to run, 121 | and in which the details depend on the input dates that the `.PCF` file 122 | should run for. 123 | 124 | * The `sources` section define external or otherwise remote data sources 125 | needed for the campaign. The EXAMPLE campaign does not come with all needed 126 | data by default, and the missing source is defined as a single entry here. 127 | 128 | To learn more about how to configure campaigns, go to [Configuration files]. To 129 | learn how to use AutoBernese to create Bernese campaigns based on templates, go 130 | [Command-line reference]. 131 | 132 | [Configuration files]: configuration-files.md 133 | [Command-line reference]: command-reference.md 134 | -------------------------------------------------------------------------------- /src/ab/cli/station.py: -------------------------------------------------------------------------------- 1 | """ 2 | Command-line interface for station-related tools 3 | 4 | """ 5 | 6 | import logging 7 | from pathlib import Path 8 | from typing import Any 9 | 10 | import click 11 | from click_aliases import ClickAliasedGroup 12 | from rich import print 13 | 14 | from ab import ( 15 | configuration, 16 | dates, 17 | ) 18 | from ab.cli import _options 19 | from ab.bsw import campaign as _campaign 20 | from ab.station import sta 21 | 22 | 23 | log = logging.getLogger(__name__) 24 | 25 | 26 | @click.group(cls=ClickAliasedGroup) 27 | def station() -> None: 28 | """ 29 | Stand-alone tools for station data. 30 | 31 | """ 32 | 33 | 34 | @station.command 35 | @_options.campaign 36 | @click.option( 37 | "-f", 38 | "--config", 39 | required=False, 40 | default=None, 41 | type=Path, 42 | help="Path to an input YAML file with valid `station`.", 43 | ) 44 | @click.option( 45 | "-i", 46 | "sitelogs", 47 | multiple=True, 48 | type=Path, 49 | help="One or more paths to site-log files to build the STA-file from.", 50 | ) 51 | @click.option( 52 | "-k", 53 | "individually_calibrated", 54 | multiple=True, 55 | type=str, 56 | help="Four-letter ID for each station that is individually calibrated.", 57 | ) 58 | @click.option( 59 | "-o", 60 | "output_filename", 61 | required=False, 62 | default=Path(".").resolve() / "sitelogs.STA", 63 | type=Path, 64 | help="Path to optional output path and filename for the STA file.", 65 | ) 66 | @click.option( 67 | "-l", 68 | "preferred_station_id_length", 69 | required=False, 70 | type=str, 71 | help="Preferred station-ID length. Choose between values `four` and `nine`. Default is to use available length in each given file.", 72 | ) 73 | def sitelogs2sta( 74 | name: str, 75 | config: Path, 76 | sitelogs: tuple[Path], 77 | individually_calibrated: tuple[str], 78 | output_filename: Path, 79 | preferred_station_id_length: str | None = None, 80 | ) -> None: 81 | """ 82 | Create a STA file from sitelogs and optional station information. 83 | 84 | Choose one of the following ways to run it: 85 | 86 | 1. No arguments: Use input provided in common user configuration 87 | (`autobernese.yaml`). 88 | 89 | 2. Use the flag `-c` to suppply a name for a campaign whose configuration 90 | (`campaign.yaml`) contains to create a STA file from standard settings 91 | in campaign-specific configuration. 92 | 93 | 2. Use the flag `-f` to supply a path to a custom path to a YAML file with 94 | arguments. 95 | 96 | 4. Use flags `-i`, `-k`, `-o` and `-l` to supply needed and optional arguments on 97 | the command line. 98 | 99 | The following arguments are possible: 100 | 101 | \b 102 | * Site-log filenames are required. 103 | * Station IDs for individually-calibrated stations are optional. 104 | * The path to the output STA file is optional. (Default: `sitelogs.STA`) 105 | 106 | These arguments can be provided in a configuration file which has the 107 | following structure: 108 | 109 | \b 110 | ```yaml 111 | station: 112 | sitelogs: [sta1.log, sta2.log] 113 | individually_calibrated: [sta1] 114 | output_sta_file: /path/to/output.STA 115 | ``` 116 | 117 | Provided configuration files are loaded with the current Bernese 118 | environment, so advanced paths are possible, e.g. 119 | 120 | \b 121 | ```yaml 122 | sitelogs: !Path [*D, sitelogs, '*.log'] 123 | ``` 124 | 125 | which will give a sequence of all the files ending with `log` in the given 126 | directory. 127 | 128 | \b 129 | ```yaml 130 | sitelogs: 131 | - !Path [*D, sitelogs, 'sta1*.log'] 132 | - !Path [*D, sitelogs, 'sta2*.log'] 133 | - !Path [*D, sitelogs, 'sta3*.log'] 134 | ``` 135 | 136 | which will let you create the sequence of filenames yourself when you want 137 | to specifiy specific stations. The wildcard `*` lets you avoid having to 138 | look up the date, when the file was last updated. 139 | 140 | For the output STA file, you can also write the following: 141 | 142 | \b 143 | ```yaml 144 | output_sta_file: `!Path [*D, station, sitelogs.STA]` 145 | ``` 146 | 147 | """ 148 | arguments: dict[str, Any] | None = None 149 | if config is not None: 150 | ifname = config.absolute() 151 | msg = f"Create STA file with arguments in file {ifname} ..." 152 | log.info(msg) 153 | print(msg) 154 | arguments = configuration.load(ifname).get("station") 155 | 156 | elif sitelogs: 157 | log.info(f"Create STA file from given arguments ...") 158 | arguments = dict( 159 | sitelogs=list(sitelogs), 160 | individually_calibrated=individually_calibrated, 161 | output_sta_file=output_filename, 162 | ) 163 | 164 | elif name is not None: 165 | msg = f"Create STA file from arguments in configuration for campaing {name} ..." 166 | log.info(msg) 167 | print(msg) 168 | arguments = _campaign.load(name).get("station") 169 | 170 | elif configuration.load().get("station") is not None: 171 | msg = f"Create STA file from arguments in the common user configuration `autobernese.yaml` ..." 172 | log.info(msg) 173 | print(msg) 174 | arguments = configuration.load().get("station") 175 | 176 | if arguments is None: 177 | msg = f"No arguments found ..." 178 | print(msg) 179 | log.info(msg) 180 | return 181 | 182 | sta.create_sta_file_from_sitelogs(**arguments) 183 | -------------------------------------------------------------------------------- /src/ab/country_code/ISO-3166-1-alpha-3.yaml: -------------------------------------------------------------------------------- 1 | Afghanistan: AFG 2 | Albania: ALB 3 | Algeria: DZA 4 | American Samoa: ASM 5 | Andorra: AND 6 | Angola: AGO 7 | Anguilla: AIA 8 | Antarctica: ATA 9 | Antigua and Barbuda: ATG 10 | Argentina: ARG 11 | Armenia: ARM 12 | Aruba: ABW 13 | Australia: AUS 14 | Austria: AUT 15 | Azerbaijan: AZE 16 | Bahamas (the): BHS 17 | Bahrain: BHR 18 | Bangladesh: BGD 19 | Barbados: BRB 20 | Belarus: BLR 21 | Belgium: BEL 22 | Belize: BLZ 23 | Benin: BEN 24 | Bermuda: BMU 25 | Bhutan: BTN 26 | Bolivia (Plurinational State of): BOL 27 | Bonaire, Sint Eustatius and Saba: BES 28 | Bosnia and Herzegovina: BIH 29 | Botswana: BWA 30 | Bouvet Island: BVT 31 | Brazil: BRA 32 | British Indian Ocean Territory (the): IOT 33 | Brunei Darussalam: BRN 34 | Bulgaria: BGR 35 | Burkina Faso: BFA 36 | Burundi: BDI 37 | Cabo Verde: CPV 38 | Cambodia: KHM 39 | Cameroon: CMR 40 | Canada: CAN 41 | Cayman Islands (the): CYM 42 | Central African Republic (the): CAF 43 | Chad: TCD 44 | Chile: CHL 45 | China: CHN 46 | Christmas Island: CXR 47 | Cocos (Keeling) Islands (the): CCK 48 | Colombia: COL 49 | Comoros (the): COM 50 | Congo (the Democratic Republic of the): COD 51 | Congo (the): COG 52 | Cook Islands (the): COK 53 | Costa Rica: CRI 54 | Croatia: HRV 55 | Cuba: CUB 56 | Curaçao: CUW 57 | Cyprus: CYP 58 | Czechia: CZE 59 | Côte d'Ivoire: CIV 60 | Denmark: DNK 61 | Djibouti: DJI 62 | Dominica: DMA 63 | Dominican Republic (the): DOM 64 | Ecuador: ECU 65 | Egypt: EGY 66 | El Salvador: SLV 67 | Equatorial Guinea: GNQ 68 | Eritrea: ERI 69 | Estonia: EST 70 | Eswatini: SWZ 71 | Ethiopia: ETH 72 | Falkland Islands (the) [Malvinas]: FLK 73 | Faroe Islands (the): FRO 74 | Fiji: FJI 75 | Finland: FIN 76 | France: FRA 77 | French Guiana: GUF 78 | French Polynesia: PYF 79 | French Southern Territories (the): ATF 80 | Gabon: GAB 81 | Gambia (the): GMB 82 | Georgia: GEO 83 | Germany: DEU 84 | Ghana: GHA 85 | Gibraltar: GIB 86 | Greece: GRC 87 | Greenland: GRL 88 | Grenada: GRD 89 | Guadeloupe: GLP 90 | Guam: GUM 91 | Guatemala: GTM 92 | Guernsey: GGY 93 | Guinea: GIN 94 | Guinea-Bissau: GNB 95 | Guyana: GUY 96 | Haiti: HTI 97 | Heard Island and McDonald Islands: HMD 98 | Holy See (the): VAT 99 | Honduras: HND 100 | Hong Kong: HKG 101 | Hungary: HUN 102 | Iceland: ISL 103 | India: IND 104 | Indonesia: IDN 105 | Iran (Islamic Republic of): IRN 106 | Iraq: IRQ 107 | Ireland: IRL 108 | Isle of Man: IMN 109 | Israel: ISR 110 | Italy: ITA 111 | Jamaica: JAM 112 | Japan: JPN 113 | Jersey: JEY 114 | Jordan: JOR 115 | Kazakhstan: KAZ 116 | Kenya: KEN 117 | Kiribati: KIR 118 | Korea (the Democratic People's Republic of): PRK 119 | Korea (the Republic of): KOR 120 | Kuwait: KWT 121 | Kyrgyzstan: KGZ 122 | Lao People's Democratic Republic (the): LAO 123 | Latvia: LVA 124 | Lebanon: LBN 125 | Lesotho: LSO 126 | Liberia: LBR 127 | Libya: LBY 128 | Liechtenstein: LIE 129 | Lithuania: LTU 130 | Luxembourg: LUX 131 | Macao: MAC 132 | Madagascar: MDG 133 | Malawi: MWI 134 | Malaysia: MYS 135 | Maldives: MDV 136 | Mali: MLI 137 | Malta: MLT 138 | Marshall Islands (the): MHL 139 | Martinique: MTQ 140 | Mauritania: MRT 141 | Mauritius: MUS 142 | Mayotte: MYT 143 | Mexico: MEX 144 | Micronesia (Federated States of): FSM 145 | Moldova (the Republic of): MDA 146 | Monaco: MCO 147 | Mongolia: MNG 148 | Montenegro: MNE 149 | Montserrat: MSR 150 | Morocco: MAR 151 | Mozambique: MOZ 152 | Myanmar: MMR 153 | Namibia: NAM 154 | Nauru: NRU 155 | Nepal: NPL 156 | Netherlands (Kingdom of the): NLD 157 | New Caledonia: NCL 158 | New Zealand: NZL 159 | Nicaragua: NIC 160 | Niger (the): NER 161 | Nigeria: NGA 162 | Niue: NIU 163 | Norfolk Island: NFK 164 | North Macedonia: MKD 165 | Northern Mariana Islands (the): MNP 166 | Norway: NOR 167 | Oman: OMN 168 | Pakistan: PAK 169 | Palau: PLW 170 | Palestine, State of: PSE 171 | Panama: PAN 172 | Papua New Guinea: PNG 173 | Paraguay: PRY 174 | Peru: PER 175 | Philippines (the): PHL 176 | Pitcairn: PCN 177 | Poland: POL 178 | Portugal: PRT 179 | Puerto Rico: PRI 180 | Qatar: QAT 181 | Romania: ROU 182 | Russian Federation (the): RUS 183 | Rwanda: RWA 184 | Réunion: REU 185 | Saint Barthélemy: BLM 186 | Saint Helena, Ascension and Tristan da Cunha: SHN 187 | Saint Kitts and Nevis: KNA 188 | Saint Lucia: LCA 189 | Saint Martin (French part): MAF 190 | Saint Pierre and Miquelon: SPM 191 | Saint Vincent and the Grenadines: VCT 192 | Samoa: WSM 193 | San Marino: SMR 194 | Sao Tome and Principe: STP 195 | Saudi Arabia: SAU 196 | Senegal: SEN 197 | Serbia: SRB 198 | Seychelles: SYC 199 | Sierra Leone: SLE 200 | Singapore: SGP 201 | Sint Maarten (Dutch part): SXM 202 | Slovakia: SVK 203 | Slovenia: SVN 204 | Solomon Islands: SLB 205 | Somalia: SOM 206 | South Africa: ZAF 207 | South Georgia and the South Sandwich Islands: SGS 208 | South Sudan: SSD 209 | Spain: ESP 210 | Sri Lanka: LKA 211 | Sudan (the): SDN 212 | Suriname: SUR 213 | Svalbard and Jan Mayen: SJM 214 | Sweden: SWE 215 | Switzerland: CHE 216 | Syrian Arab Republic (the): SYR 217 | Taiwan (Province of China): TWN 218 | Tajikistan: TJK 219 | Tanzania, the United Republic of: TZA 220 | Thailand: THA 221 | Timor-Leste: TLS 222 | Togo: TGO 223 | Tokelau: TKL 224 | Tonga: TON 225 | Trinidad and Tobago: TTO 226 | Tunisia: TUN 227 | Turkmenistan: TKM 228 | Turks and Caicos Islands (the): TCA 229 | Tuvalu: TUV 230 | Türkiye: TUR 231 | Uganda: UGA 232 | Ukraine: UKR 233 | United Arab Emirates (the): ARE 234 | United Kingdom of Great Britain and Northern Ireland (the): GBR 235 | United States Minor Outlying Islands (the): UMI 236 | United States of America (the): USA 237 | Uruguay: URY 238 | Uzbekistan: UZB 239 | Vanuatu: VUT 240 | Venezuela (Bolivarian Republic of): VEN 241 | Viet Nam: VNM 242 | Virgin Islands (British): VGB 243 | Virgin Islands (U.S.): VIR 244 | Wallis and Futuna: WLF 245 | Western Sahara*: ESH 246 | Yemen: YEM 247 | Zambia: ZMB 248 | Zimbabwe: ZWE 249 | Åland Islands: ALA 250 | -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | 2 | This is the documentation website for AutoBernese, a command-line application 3 | that automates and generalises common workflows with the [Bernese GNSS 4 | Software](http://www.bernese.unibe.ch/) [*Bernese* or *BSW*]. 5 | 6 | AutoBernese is written in Python and the package is maintained by the [Danish 7 | Agency for Climate Data](https://eng.kds.dk/) 8 | 9 | The software was created for our internal use cases, but as it may have some 10 | general application, it is published for a larger audience with the hope that it 11 | may be useful for other users of Bernese. 12 | 13 | Please note that: 14 | 15 | * We do not offer any support other than what is already provided here and in 16 | the code comments and documentation strings. 17 | * All development assumes that AutoBernese runs in a Linux environment. 18 | * The package is still a work in progress as far as point 5. below is concerned. 19 | 20 | However, if you want to reach out, feel free to open an issue on GitHub [see 21 | link to the archive in the top-right corner of this website]. 22 | 23 | 24 | ## Background 25 | 26 | As geodesists that use the Bernese GNSS Software 5.4, we need a software system 27 | that simplifies and eases the following processes: 28 | 29 | 1. Create Bernese campaigns for different campaign types. 30 | 2. Download necessary external data, either for common or campaign-specific 31 | purposes. 32 | 3. Produce or preprocess local data products for use in Bernese campaigns. 33 | 4. Simplify setting up recipes and starting the Bernese Processing Engine [BPE] 34 | for a given campaign. 35 | 5. Assures the quality of and handles the end products that come out of the BPE 36 | process. 37 | 38 | Below the introduction is a more detailed overview of what is possible with the 39 | software. 40 | 41 | If your would rather like to try out the software, go to the 42 | [Quick-start](manual/quick-start.md) page or go to the [command-line 43 | interface](manual/command-reference.md) page to read a complete walk-through of 44 | the available commands. 45 | 46 | 47 | ## AutoBernese 48 | 49 | AutoBernese does several things behind the scenes for you as it helps you create 50 | campaigns in Bernese GNSS Software and automatically run the Bernese Processing 51 | Engine for all the PCF files you need for a given campaign. It does so by 52 | automating the otherwise manual workflow of selecting a campaign, session and 53 | each individual PCF file to run. 54 | 55 | The same AutoBernese installation can be used for different installations, since 56 | it integrates, seamlessly, with the given loaded Bernese installation. This may 57 | be, especially, useful, if you are a user or developer working in more than one 58 | Bernese installation, e.g. one for either development, testing and production. 59 | 60 | With its generalised workflow, AutoBernese is prepared, specifically, for a 61 | multi-user scenario, giving users the ability to easily share ressources such as 62 | a **common configuration** and **templates for generalised campaign workflow**. 63 | 64 | Having templates for typical campaign scenarios minimises the time needed to set 65 | up each campaign. And since the templates are stored as plain-text files in a 66 | shared directory, they are usable by everyone and easy to maintain and keep a 67 | history of, e.g. using a version control software. 68 | 69 | 71 | 72 | This is a conceptual view of the workflow that AutoBernese supports. The 73 | following illustrates the order of tasks from the user perspective. 74 | 75 | ```kroki-bpmn 76 | @from_file:assets/workflows.bpmn 77 | ``` 78 | 79 | Given that Bernese has been activated in the user terminal by `source`'ing 80 | `LOADGPS.setvar`, the first thing that a user must consider is setting up the 81 | local configuration of AutoBernese. 82 | 83 | **Built-in and user-defined AutoBernese configuration** 84 | 85 | This general configuration file lets you do the following: 86 | 87 | 1. Specify what open GNSS-data sources your want to download. 88 | 89 | **Use case:** Download common data files, independently, from any campaign. 90 | 91 | 2. Specify directory setup for campaigns that AutoBernese creates. 92 | 93 | **Use case:** Provide an option similar to the one in the Bernese GUI, where 94 | the campaign structure of a new campaign can be changed. 95 | 96 | 3. Specify information used by AutoBernese to create a STA file from locally 97 | available station-sitelog files. 98 | 99 | **Use case:** A user needs to continuously build a STA file from updated 100 | sitelogs, so that it may be combined with STA files from other sources in a 101 | Bernese campaign. 102 | 103 | 104 | 105 | **Template management for generic campaign types** 106 | 107 | A campaign-configuration template lets you do the following: 108 | 109 | 1. Create campaigns based on your own campaign types. 110 | 111 | 2. Just as with general data sources, you may have campaign-specific data 112 | depend on the campaign template used. Specify these in the campaign-specific 113 | configuration file. 114 | 115 | 3. Provided a list of one or more sets of input arguments to the Bernese 116 | Processing Engine [BPE], you may have a recipe for the given campaign type that 117 | can be easily run with a single command. 118 | 119 | 120 | **The rest of the processes in the overall workflow diagram** 121 | 122 | With the general settings configured in the general AutoBernese configuration 123 | file, and at least one campaign-configuration template added to the templates 124 | directory of the autobernese directory, you are able to either go download and 125 | pre-process your general data or create a Bernese campaign. 126 | 127 | !!! note "Parallel processes" 128 | 129 | While the diagram above shows that downloading general data and creating Bernese 130 | campaigns are processes that can be performed in parallel, the process of 131 | running the BPE for that campaign with AutoBernese does require that the 132 | campaign is created. 133 | 134 | To read how to do all these things and more, go to the relevant section of the 135 | manual to get started. 136 | -------------------------------------------------------------------------------- /tests/ab/test_dates.py: -------------------------------------------------------------------------------- 1 | import datetime as dt 2 | 3 | import pytest 4 | 5 | from ab.dates import ( 6 | asdate, 7 | date_range, 8 | doy, 9 | doy2date, 10 | GPS_EPOCH, 11 | gps_week, 12 | gps_weekday, 13 | date_from_gps_week, 14 | GPSDate, 15 | gps_week_limits, 16 | gps_week_range, 17 | ) 18 | 19 | 20 | def test_datetime_is_an_instance_of_date(): 21 | date = dt.datetime.now() 22 | assert isinstance(date, dt.date) 23 | 24 | 25 | def test_asdate_with_date_instance(): 26 | input_date = dt.date(1991, 1, 1) 27 | expected = input_date 28 | result = asdate(input_date) 29 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 30 | 31 | 32 | def test_asdate_with_datetime_instance(): 33 | input_datetime = dt.datetime(1991, 1, 1, 12, 0, 0) 34 | expected = dt.date(1991, 1, 1) 35 | result = asdate(input_datetime) 36 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 37 | 38 | 39 | def test_asdate_with_GPSDate_instance(): 40 | input_date = GPSDate(1991, 1, 1) 41 | expected = GPSDate(1991, 1, 1) 42 | result = asdate(input_date) 43 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 44 | 45 | 46 | def test_asdate_with_incorrect_input_type(): 47 | with pytest.raises(TypeError) as info: 48 | asdate(None) 49 | 50 | 51 | def test_date_range_simple(): 52 | t0 = dt.datetime(1997, 5, 1, 15, 30, 0) 53 | t1 = dt.datetime(1997, 5, 5, 0, 0, 0) 54 | expected_dates = ( 55 | dt.date(1997, 5, 1), 56 | dt.date(1997, 5, 2), 57 | dt.date(1997, 5, 3), 58 | dt.date(1997, 5, 4), 59 | dt.date(1997, 5, 5), 60 | ) 61 | resulting_dates = date_range(t0, t1) 62 | assert len(resulting_dates) == len( 63 | expected_dates 64 | ), f"Length of resulting sequence is different from that of the expected sequence ..." 65 | 66 | for result, expected in zip(resulting_dates, expected_dates): 67 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 68 | 69 | 70 | def test_date_range_with_incorrect_input(): 71 | t0 = None 72 | t1 = 2 73 | with pytest.raises(TypeError) as info: 74 | date_range(t0, t1) 75 | 76 | 77 | def test_date_range_extended_end(): 78 | t0 = dt.datetime(1997, 5, 1) 79 | t1 = dt.datetime(1997, 5, 3) 80 | extend_end_by = 1 81 | expected_dates = ( 82 | dt.date(1997, 5, 1), 83 | dt.date(1997, 5, 2), 84 | dt.date(1997, 5, 3), 85 | dt.date(1997, 5, 4), 86 | ) 87 | resulting_dates = date_range(t0, t1, extend_end_by=extend_end_by) 88 | assert len(resulting_dates) == len( 89 | expected_dates 90 | ), f"Length of resulting sequence is different from that of the expected sequence ..." 91 | 92 | for result, expected in zip(resulting_dates, expected_dates): 93 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 94 | 95 | 96 | def test_doy(): 97 | test_data = ( 98 | (dt.date(1990, 1, 1), 1), 99 | (dt.date(2025, 6, 2), 153), 100 | ) 101 | for date, expected in test_data: 102 | result = doy(date) 103 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 104 | 105 | 106 | def test_doy2date(): 107 | test_data = ( 108 | ((1990, 1), dt.date(1990, 1, 1)), 109 | ((2025, 153), dt.date(2025, 6, 2)), 110 | ) 111 | for (year, doy_), expected in test_data: 112 | result = doy2date(year, doy_) 113 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 114 | 115 | 116 | def test_gps_week_with_incorrect_input(): 117 | with pytest.raises(ValueError) as info: 118 | gps_week(dt.date(1980, 1, 5)) 119 | 120 | 121 | def test_gps_week(): 122 | test_data = ( 123 | # Date, GPS week 124 | (dt.date(1980, 1, 6), 0), 125 | (dt.date(1985, 7, 25), 289), 126 | (dt.date(1994, 2, 8), 735), 127 | (dt.date(2015, 10, 14), 1866), 128 | (dt.date(2022, 8, 11), 2222), 129 | (dt.date(2023, 2, 21), 2250), 130 | ) 131 | for date, expected in test_data: 132 | result = gps_week(date) 133 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 134 | 135 | 136 | def test_gps_weekday(): 137 | test_data = ( 138 | (dt.date(2023, 10, 28), 6), 139 | (dt.date(2023, 10, 29), 0), 140 | (dt.date(2023, 10, 30), 1), 141 | (dt.date(2023, 10, 31), 2), 142 | (dt.date(2023, 11, 1), 3), 143 | (dt.date(2023, 11, 2), 4), 144 | (dt.date(2023, 11, 3), 5), 145 | (dt.date(2023, 11, 4), 6), 146 | (dt.date(2023, 11, 5), 0), 147 | (dt.date(2023, 11, 6), 1), 148 | (dt.date(2023, 11, 7), 2), 149 | ) 150 | for date, expected in test_data: 151 | result = gps_weekday(date) 152 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 153 | 154 | 155 | def test_date_from_gps_week(): 156 | expected = dt.date(1980, 1, 6) 157 | gps_week_for_gps_epoch = 0 158 | result = date_from_gps_week(gps_week_for_gps_epoch) 159 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 160 | 161 | 162 | def test_gps_week_limits(): 163 | gps_week = 2222 164 | to_expect = ( 165 | dt.date(2022, 8, 7), 166 | dt.date(2022, 8, 13), 167 | ) 168 | results = gps_week_limits(gps_week) 169 | for result, expected in zip(results, to_expect): 170 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 171 | 172 | 173 | def test_gps_week_range(): 174 | gps_week = 2222 175 | expected_range = [ 176 | dt.date(2022, 8, 7), 177 | dt.date(2022, 8, 8), 178 | dt.date(2022, 8, 9), 179 | dt.date(2022, 8, 10), 180 | dt.date(2022, 8, 11), 181 | dt.date(2022, 8, 12), 182 | dt.date(2022, 8, 13), 183 | ] 184 | resulting_range = gps_week_range(gps_week) 185 | for result, expected in zip(resulting_range, expected_range): 186 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 187 | 188 | 189 | def test_GPSDate_two_digit_year(): 190 | date = GPSDate(1980, 1, 6) 191 | expected = 80 192 | result = date.y 193 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 194 | 195 | 196 | def test_GPSDate_conversion_to_date(): 197 | date = GPSDate(1980, 1, 6) 198 | expected = dt.date(1980, 1, 6) 199 | result = date.date() 200 | assert result == expected, f"Expected {result!r} to be {expected!r} ..." 201 | -------------------------------------------------------------------------------- /src/ab/data/ftp.py: -------------------------------------------------------------------------------- 1 | """ 2 | Transfer files over FTP 3 | 4 | """ 5 | 6 | import datetime as dt 7 | from os.path import join 8 | from pathlib import Path 9 | from ftplib import FTP 10 | from urllib.parse import ParseResult 11 | from fnmatch import fnmatch 12 | from ftplib import ( 13 | FTP, 14 | error_perm, 15 | ) 16 | from typing import Any 17 | from collections.abc import ( 18 | Iterable, 19 | Iterator, 20 | ) 21 | from contextlib import contextmanager 22 | import functools 23 | import logging 24 | 25 | from ab import configuration 26 | from ab.data import TransferStatus 27 | from ab.data.source import Source 28 | from ab.data.stats import already_updated 29 | 30 | 31 | log = logging.getLogger(__name__) 32 | 33 | 34 | @functools.cache 35 | def is_file(ftp: FTP, candidate: str) -> bool: 36 | return ftp.nlst(candidate) == [candidate] 37 | 38 | 39 | @contextmanager 40 | def specific_path(ftp: FTP, path: str) -> Iterator[FTP]: 41 | cwd = ftp.pwd() 42 | log.debug(f"switch to {path} ...") 43 | ftp.cwd(path) 44 | try: 45 | yield ftp 46 | finally: 47 | log.debug(f" -> switch back to {cwd} ...") 48 | ftp.cwd(cwd) 49 | 50 | 51 | def list_files(ftp: FTP, path: str, *, ix_column: int = 8) -> list[str]: 52 | """ 53 | List files without looking for a cached version. 54 | 55 | Note: Not all the servers that we access support the newer MLSD command, so 56 | we rely on the older NLST command. 57 | 58 | Works for non-Windows FTP-servers, where the directory listing is what you 59 | get with `ls`. 60 | 61 | """ 62 | with specific_path(ftp, path) as tmp: 63 | tmp.retrlines("LIST", (lines := []).append) # type: ignore 64 | return [line.split()[ix_column] for line in lines if not line.startswith("d")] 65 | 66 | 67 | def download(source: Source) -> TransferStatus: 68 | """ 69 | Download paths resolved from a Source instance. 70 | 71 | Logic: 72 | 73 | * If the filename does not contain wildcards to match against the remote 74 | directory listing, simply, download the file. 75 | 76 | * Otherwise, the filename, which we then know contains wildcards, is 77 | matched against the list of files inside the remote parent directory to 78 | get all possible files to download based on the given pattern. 79 | 80 | Note: 81 | 82 | * The assumption for a RemoteLocalPair instance is that the remote path in 83 | `path_remote` is a directory in which to find the file denoted `fname`. 84 | 85 | For now, the algorithm does not look inside any directory. 86 | 87 | """ 88 | status = TransferStatus() 89 | 90 | # Log on to the host first, since we need to probe for files directories 91 | with FTP(source.host) as ftp: 92 | ftp.login() 93 | 94 | try: 95 | for pair in source.resolve(): 96 | 97 | # Prepare local destination directory 98 | destination = Path(pair.path_local) 99 | destination.mkdir(parents=True, exist_ok=True) 100 | 101 | if "*" not in pair.fname: 102 | log.debug( 103 | f"Filename {pair.fname!r} has no wildcard and is added to the download list." 104 | ) 105 | # If the filename has no wildcard, just download the file 106 | candidates = [pair.fname] 107 | 108 | # NOTE: At this point, we do not know if the filename in 109 | # `candidates` exists on the server. 110 | 111 | else: 112 | # Get files that match the current source filename 113 | log.debug("Searching for wildcard results") 114 | candidates = [ 115 | candidate 116 | for candidate in list_files(ftp, pair.path_remote) 117 | # This, effectively, resolves the actual filename of the 118 | # remote file to download, since the wildcard notation in 119 | # the filename specified in the source instance is expanded 120 | # using `fnmatch`. 121 | if fnmatch(candidate, pair.fname) 122 | ] 123 | 124 | # NOTE: At this point, we do know that the filenames in 125 | # `candidates` actually exist on the server. 126 | 127 | if not candidates: 128 | log.info( 129 | f"Found no files matching {pair.path_remote}/{pair.fname} ..." 130 | ) 131 | status.not_found += 1 132 | continue 133 | 134 | # Filter out files already available 135 | to_download = [] 136 | for fname in candidates: 137 | # Get resolved destination filename 138 | ofname = destination / fname 139 | if already_updated(ofname, max_age=source.max_age): 140 | log.debug(f"{ofname.name} already downloaded ...") 141 | status.existing += 1 142 | continue 143 | to_download.append((fname, ofname)) 144 | 145 | # Finally, download each of the filenames resolved 146 | ftp.cwd(pair.path_remote) 147 | for fname, ofname in to_download: 148 | log.info(f"Downloading {fname} ...") 149 | try: 150 | # NOTE: `pathlib.Path.write_text` can not be used as a 151 | # callback function in `retrbinary`, because it is 152 | # called for each chunk of data, which means that the 153 | # file is overwritten with each new chunk, thus only 154 | # preserving the last chunk in the 'downloaded' file. 155 | 156 | # Therefore, we use the write on the context manager 157 | with open(ofname, "wb") as f: 158 | ftp.retrbinary(f"RETR {fname}", f.write) 159 | 160 | except error_perm as e: 161 | log.warn(f"Filename {fname} could not be downloaded ...") 162 | log.debug(f"{e}") 163 | log.info(f"Deleting empty or incomplete {ofname} ...") 164 | ofname.unlink() 165 | status.failed += 1 166 | continue 167 | 168 | status.success += 1 169 | 170 | except KeyboardInterrupt: 171 | log.info(f"Interrupted by user. Closing FTP connection ...") 172 | raise 173 | 174 | return status 175 | -------------------------------------------------------------------------------- /src/ab/cli/troposphere.py: -------------------------------------------------------------------------------- 1 | """ 2 | Command-line interface for troposphere-delay model data 3 | 4 | """ 5 | 6 | import os 7 | import datetime as dt 8 | from pathlib import Path 9 | from functools import ( 10 | partial, 11 | update_wrapper, 12 | ) 13 | import typing as t 14 | from collections import abc 15 | from dataclasses import dataclass 16 | import logging 17 | import multiprocessing as mp 18 | 19 | import click 20 | from click_aliases import ClickAliasedGroup 21 | from rich import print 22 | 23 | from ab.cli import _options 24 | from ab import ( 25 | configuration, 26 | vmf, 27 | ) 28 | from ab.dates import gps_week_limits 29 | from ab.typing import AnyFunction 30 | 31 | 32 | log = logging.getLogger(__name__) 33 | 34 | N_CPUS: t.Final = len(os.sched_getaffinity(0)) 35 | 36 | 37 | @click.group(cls=ClickAliasedGroup) 38 | def troposphere() -> None: 39 | """ 40 | Stand-alone tools for troposphere-delay model data (VMF3). 41 | 42 | """ 43 | 44 | 45 | @dataclass 46 | class CLITroposphereInput: 47 | ipath: Path | str 48 | opath: Path | str 49 | beg: dt.date 50 | end: dt.date 51 | ifname: Path | str 52 | ofname: Path | str 53 | 54 | 55 | def parse_args( 56 | ipath: str | None, 57 | opath: str | None, 58 | gps_week: int | None, 59 | beg: dt.date | None, 60 | end: dt.date | None, 61 | ifname: str | None, 62 | ofname: str | None | None, 63 | ) -> CLITroposphereInput: 64 | """ 65 | Parse generic input from command-line interface and set missing data. 66 | 67 | """ 68 | section = configuration.load().get("troposphere") 69 | 70 | # Core configuration has this section; common configuration may update it ... 71 | assert section is not None 72 | 73 | ipath = ipath or section.get("ipath") 74 | if ipath is None: 75 | raise SystemExit(f"Missing input-path from command or configuration ...") 76 | 77 | opath = opath or section.get("opath") 78 | if opath is None: 79 | raise SystemExit(f"Missing output-path from command or configuration ...") 80 | 81 | if gps_week is not None: 82 | beg, end = gps_week_limits(gps_week) 83 | 84 | if beg is None: 85 | beg = section.get("beg") or dt.date.today() 86 | 87 | if end is None: 88 | end = section.get("end") or beg + dt.timedelta(days=1) 89 | 90 | ifname = ifname or section.get("ifname") 91 | if ifname is None: 92 | raise SystemExit(f"Missing hour-file format from command or configuration ...") 93 | 94 | ofname = ofname or section.get("ofname") 95 | if ofname is None: 96 | raise SystemExit(f"Missing day-file format from command or configuration ...") 97 | 98 | raw = CLITroposphereInput(ipath, opath, beg, end, ifname, ofname) 99 | log.info(f"Run using the following input: {raw} ...") 100 | return raw 101 | 102 | 103 | def common_options[T, **P](func: abc.Callable[P, T]) -> abc.Callable[P, T]: 104 | """ 105 | A single decorator for common options 106 | 107 | Source: https://stackoverflow.com/a/70852267 108 | 109 | """ 110 | 111 | @_options.ipath 112 | @_options.opath 113 | @_options.gps_week 114 | @_options.beg 115 | @_options.end 116 | @_options.hour_file_format 117 | @_options.day_file_format 118 | def wrapper_common_options(*args: P.args, **kwargs: P.kwargs) -> t.Any: 119 | return func(*args, **kwargs) 120 | 121 | return update_wrapper(wrapper_common_options, func) 122 | 123 | 124 | def cli_wrapper(builder: vmf.DayFileBuilder, method: str, action: str) -> None: 125 | """ 126 | Single-process executor which prints status to the terminal 127 | 128 | """ 129 | msg = f"{action}ing {builder.day_file} ..." 130 | log.info(msg) 131 | failed_msg = getattr(builder, method)() 132 | if failed_msg: 133 | print(f"{msg} [red]FAILED[/red]") 134 | print(f" Error: {failed_msg}") 135 | print(f"{msg} [green]SUCCESS[/green]") 136 | 137 | 138 | def dispatch( 139 | args: CLITroposphereInput, method: str, action: str, *, timeout: float = 1 140 | ) -> None: 141 | log.info(f"{action} VMF files for chosen interval {args.beg} to {args.end} ...") 142 | builders = vmf.day_file_builders( 143 | args.ipath, args.opath, args.beg, args.end, args.ifname, args.ofname 144 | ) 145 | wrappers = [partial(cli_wrapper, builder, method, action) for builder in builders] 146 | try: 147 | with mp.Pool(processes=N_CPUS) as pool: 148 | multiple_results = [pool.apply_async(wrapper) for wrapper in wrappers] 149 | [res.get(timeout=timeout) for res in multiple_results] 150 | except KeyboardInterrupt: 151 | msg = f"{action} interrupted by user ..." 152 | log.info(msg) 153 | print(msg) 154 | except mp.context.TimeoutError: 155 | msg = ( 156 | f"{action} took more than {timeout} seconds to complete, stopping process." 157 | ) 158 | log.error(msg) 159 | print(msg) 160 | 161 | 162 | @troposphere.command 163 | @common_options 164 | def build( 165 | ipath: str | None, 166 | opath: str | None, 167 | gps_week: int | None, 168 | beg: dt.date | None, 169 | end: dt.date | None, 170 | ifname: str | None, 171 | ofname: str | None, 172 | ) -> None: 173 | """ 174 | Concatenate hour files into day files. 175 | 176 | Build day file for each date for which there is data available. 177 | 178 | """ 179 | args = parse_args(ipath, opath, gps_week, beg, end, ifname, ofname) 180 | dispatch(args, "build", "Build", timeout=600) 181 | 182 | 183 | @troposphere.command 184 | @common_options 185 | def check( 186 | ipath: str | None, 187 | opath: str | None, 188 | gps_week: int | None, 189 | beg: dt.date | None, 190 | end: dt.date | None, 191 | ifname: str | None, 192 | ofname: str | None, 193 | ) -> None: 194 | """ 195 | Check that input hour files went into built day files. 196 | 197 | """ 198 | args = parse_args(ipath, opath, gps_week, beg, end, ifname, ofname) 199 | dispatch(args, "check", "Check", timeout=600) 200 | 201 | 202 | @troposphere.command 203 | @common_options 204 | def status( 205 | ipath: str | None, 206 | opath: str | None, 207 | gps_week: int | None, 208 | beg: dt.date | None, 209 | end: dt.date | None, 210 | ifname: str | None, 211 | ofname: str | None, 212 | ) -> None: 213 | """ 214 | Show availability of hour and day files in selected interval. 215 | 216 | """ 217 | args = parse_args(ipath, opath, gps_week, beg, end, ifname, ofname) 218 | log.info(f"Show data status for chosen interval {args.beg} to {args.end} ...") 219 | builders = vmf.day_file_builders( 220 | args.ipath, args.opath, args.beg, args.end, args.ifname, args.ofname 221 | ) 222 | try: 223 | with mp.Pool(processes=N_CPUS) as pool: 224 | multiple_results = [ 225 | pool.apply_async(builder.status) for builder in builders 226 | ] 227 | print([res.get(timeout=1) for res in multiple_results]) 228 | except KeyboardInterrupt: 229 | msg = "Status retrieval interrupted by user ..." 230 | log.info(msg) 231 | print(msg) 232 | -------------------------------------------------------------------------------- /src/ab/dates.py: -------------------------------------------------------------------------------- 1 | """ 2 | Date handling and specific tools for conversion between different formats. 3 | 4 | """ 5 | 6 | import datetime as dt 7 | from typing import ( 8 | Any, 9 | Final, 10 | ) 11 | 12 | 13 | END_INCLUDED: Final = 1 14 | "Add this to `range` in `date_range` to include both start and end date in range." 15 | 16 | 17 | def asdate(date: dt.datetime | dt.date) -> dt.date: 18 | """ 19 | Return a date instance from a datetime instance or the date itself, if the 20 | input is actually a date instance. 21 | 22 | Raises a TypeError, if the instance is neither a date or datetime instance. 23 | 24 | """ 25 | # NOTE: The order of these checks is important, since `isinstance()` allows 26 | # for a datetime instance be an instance of date. We want to specifically 27 | # check for a datetime instance first, and then anything that derives from 28 | # the date type such as GPSDate will be interpreted as a date and be 29 | # returned without modification. 30 | if isinstance(date, dt.datetime): 31 | return date.date() 32 | if isinstance(date, dt.date): 33 | return date 34 | raise TypeError(f"Expected input date to be datetime instance. Got {date!r} ...") 35 | 36 | 37 | def date_range( 38 | beg: dt.date | dt.datetime, 39 | end: dt.date | dt.datetime | None = None, 40 | /, 41 | *, 42 | extend_end_by: int = 0, 43 | ) -> list[dt.date]: 44 | """ 45 | By default, returns a range of dates between and including the given start 46 | and end dates. 47 | 48 | Note: `datetime` instances are truncated to dates, since only `date` instances 49 | have the method `toordinal`. 50 | 51 | """ 52 | # Range validation 53 | if extend_end_by < 0: 54 | raise ValueError(f"{extend_end_by=}, but must be zero or greater.") 55 | 56 | # Allowing end to be None to obtain today's date 57 | if end is None: 58 | end = beg 59 | 60 | # Casting (+ implicitly type validating), if needed, to date instances which 61 | # have the ordinal-properties. 62 | beg = asdate(beg) 63 | end = asdate(end) 64 | 65 | return [ 66 | dt.date.fromordinal(n) 67 | for n in range( 68 | beg.toordinal(), 69 | end.toordinal() + END_INCLUDED + extend_end_by, 70 | ) 71 | ] 72 | 73 | 74 | def doy(d: dt.date | dt.datetime) -> int: 75 | """ 76 | Day of year for a given date. 77 | 78 | """ 79 | return d.timetuple().tm_yday 80 | 81 | 82 | def doy2date(year: int, doy: int) -> dt.date: 83 | """ 84 | Date from year and day-of-year 85 | 86 | """ 87 | return dt.date(year, 1, 1) + dt.timedelta(days=doy - 1) 88 | 89 | 90 | GPS_EPOCH = dt.date(1980, 1, 6) 91 | "First GPS week" 92 | 93 | 94 | def gps_week(date: dt.date | dt.datetime) -> int: 95 | """ 96 | Calculate GPS-week number for given date. 97 | 98 | """ 99 | date = asdate(date) 100 | if date < GPS_EPOCH: 101 | raise ValueError("Date must be on or after first GPS week. Got {date!r} ...") 102 | 103 | return (date - GPS_EPOCH).days // 7 104 | 105 | 106 | def gps_weekday(date: dt.date | dt.datetime) -> int: 107 | """ 108 | Return given date's weekday number (zero-based) in GPS date. 109 | 110 | Python date and datetime instances count from Monday starint at zero. 111 | 112 | GPS weeks begin on Sundays and start at zero. 113 | 114 | Thus, the weekday number for GPS is Python date instance + 1 modulus 7. 115 | 116 | """ 117 | return (date.weekday() + 1) % 7 118 | 119 | 120 | def date_from_gps_week(gps_week: int | str) -> dt.date: 121 | return GPS_EPOCH + dt.timedelta(7 * int(gps_week)) 122 | 123 | 124 | def gps_week_limits(gps_week: int | str) -> tuple[dt.date, dt.date]: 125 | beg = date_from_gps_week(gps_week) 126 | end = beg + dt.timedelta(days=6) 127 | return (beg, end) 128 | 129 | 130 | def gps_week_range(gps_week: int | str) -> list[dt.date]: 131 | return date_range(*gps_week_limits(gps_week)) 132 | 133 | 134 | class GPSDate(dt.date): 135 | """ 136 | A GPSDate instance is a Python datetime instance with additional properties 137 | and a serialiser of particular data for that date or datetime. 138 | 139 | Both Python date and datetime instances can be wrapped. 140 | 141 | Note: Timezone data are not preserved. 142 | 143 | """ 144 | 145 | @classmethod 146 | def from_date(cls, date: dt.date | dt.datetime, /) -> "GPSDate": 147 | """ 148 | Create a GPSDate instance from an existing date instance. 149 | 150 | """ 151 | date = asdate(date) 152 | return cls(date.year, date.month, date.day) 153 | 154 | @classmethod 155 | def from_gps_week(cls, gps_week: int | str, /) -> "GPSDate": 156 | """ 157 | Create a GPSDate instance from a valid GPS week. 158 | 159 | """ 160 | return cls.from_date(date_from_gps_week(gps_week)) 161 | 162 | @classmethod 163 | def from_year_doy(cls, year: int | str, doy: int | str, /) -> "GPSDate": 164 | """ 165 | Create a GPSDate instance from a valid day-of-year. 166 | 167 | """ 168 | return cls.from_date(doy2date(int(year), int(doy))) 169 | 170 | def date(self) -> dt.date: 171 | """ 172 | Return date as Python date instance. 173 | 174 | """ 175 | return dt.date(self.year, self.month, self.day) 176 | 177 | @property 178 | def gps_week(self) -> int: 179 | """ 180 | Return GPS week number for date. 181 | 182 | """ 183 | return gps_week(self) 184 | 185 | @property 186 | def gps_weekday(self) -> int: 187 | """ 188 | Return weekday index for GPS week (Sunday is 0). 189 | 190 | """ 191 | return gps_weekday(self) 192 | 193 | @property 194 | def doy(self) -> int: 195 | """ 196 | Return day-of-year count of the date's year. 197 | 198 | """ 199 | return doy(self) 200 | 201 | @property 202 | def y(self) -> int: 203 | """ 204 | Return two-digit year as integer. 205 | 206 | """ 207 | return int(self.strftime("%y")) 208 | 209 | @property 210 | def info(self) -> dict[str, Any]: 211 | """ 212 | Return instance information in serialisable form. 213 | 214 | """ 215 | gps_week_beg = self.from_gps_week(self.gps_week) 216 | gps_week_mid = gps_week_beg + dt.timedelta(days=3) 217 | gps_week_end = gps_week_beg + dt.timedelta(days=6) 218 | return dict( 219 | weekday=self.strftime("%A"), 220 | timestamp=self.isoformat()[:10], 221 | doy=self.doy, 222 | iso_week=self.isocalendar()[1], 223 | iso_weekday=self.isocalendar()[2], 224 | gps_week=self.gps_week, 225 | gps_weekday=self.gps_weekday, 226 | # GPS week corresponds to a specific date without timestamp. 227 | gps_week_beg=gps_week_beg.isoformat()[:10], 228 | gps_week_mid=gps_week_mid.isoformat()[:10], 229 | gps_week_end=gps_week_end.isoformat()[:10], 230 | ) 231 | 232 | 233 | def dates_to_gps_date(dates: list[dt.date]) -> list[GPSDate]: 234 | return [GPSDate.from_date(date) for date in dates] 235 | -------------------------------------------------------------------------------- /src/ab/data/source.py: -------------------------------------------------------------------------------- 1 | """ 2 | Handle data sources 3 | 4 | """ 5 | 6 | import os 7 | from typing import Any 8 | from collections.abc import Iterable 9 | from dataclasses import dataclass 10 | import math 11 | from pathlib import Path 12 | from urllib.parse import ( 13 | urlparse, 14 | ParseResult, 15 | ) 16 | import logging 17 | 18 | from ab.parameters import permutations 19 | 20 | 21 | log = logging.getLogger(__name__) 22 | 23 | 24 | @dataclass 25 | class RemoteLocalPair: 26 | """ 27 | This is a class for transferring a remote filepath and local destination 28 | directory path with some additional values being derived from the initial 29 | input to make it easier to handle for a function that makes the actual 30 | transfer of data from the remote souce and the local destination. 31 | 32 | The post-init method splits uo given URI into to a remote directory path and 33 | the filename itself. 34 | 35 | """ 36 | 37 | uri: str 38 | path_local: str 39 | 40 | def __post_init__(self) -> None: 41 | # Assumption: the URI is a path to a file 42 | 43 | # Code annotated with an example: 44 | 45 | # Given: uri = 'https://example.com/filename.txt' 46 | parsed: ParseResult = urlparse(self.uri) 47 | 48 | # Then: `parsed.path` will be the full path (without protocol) to 49 | # `filename.txt` 50 | filepath: Path = Path(parsed.path) 51 | 52 | # From `filepath` we can get the remote directory path and the filename 53 | # of the file to download. 54 | self.path_remote: str = str(filepath.parent) 55 | self.fname: str = str(filepath.name) 56 | 57 | 58 | @dataclass 59 | class Source: 60 | """ 61 | A source object with one path (pattern) and specific or all files to get. 62 | 63 | Filenames are resolved offline and can be used to match against the source 64 | path, when connected to the remote server. 65 | 66 | Steps: 67 | 68 | * Parse url, to get the path isolated 69 | * Resolve path combinations 70 | * Resolve filenames 71 | 72 | A source is a source and not a downloader. The source can yield a list of 73 | files to download if any filenames are specified. 74 | 75 | A Source instance can tell what method (scheme in URL language) is used so 76 | that the user can use the right tool to get the source. 77 | 78 | Source may not be the best name, since it contains information about the 79 | corresponding file destination(s). 80 | 81 | A Source instance handles the following input: 82 | 83 | * The destination is always assumed to be a directory in which to store 84 | the downloaded files given their resolved paths. 85 | 86 | - Given a source file, the destination directory is also seen as 87 | completely specified in that the file is simply put into the 88 | destination directory. 89 | 90 | * If the source URL points to a directory, all files (no sub directories) 91 | within this directory are downloaded to the destination directory. The 92 | destination directory may have a different name. 93 | 94 | - The sign that the entire FTP directory should be downloaded is that 95 | there are no specific filenames given. 96 | 97 | * Given a source with a dictionary of parameters, a list of dictionaries 98 | with each parameter-combination (one value for each parameter) is 99 | generated, and the source may thereby have many paths for the server. 100 | 101 | - If filenames are given, they are added to each resolved path. 102 | 103 | * Any filenames specified are resolved in the following manner: 104 | 105 | - If there are more paths given a set of parameters, the specified 106 | source files are downloaded for each of the paths resolved. 107 | 108 | - Filenames may contain UNIX wildcard patterns as can be matched 109 | against using Python's fnmatch module. (See what wildards are 110 | matchable in the documentation for fnmatch.) 111 | 112 | Questions addressed: 113 | 114 | * Where to add the final URL parsing that will give the host/domain, path 115 | to download from? 116 | 117 | * How to best return an interable of the remote and local filepaths to the 118 | user? 119 | 120 | - For HTTP, this is easy, since the paths can be completely specified 121 | from the beginning, as they should be known in advance, since there 122 | is no way of listing a directory for the one HTTP source we have so 123 | far. 124 | 125 | For HTTP, there is no need to split the URL up into smaller parts 126 | (host, path, filename). 127 | 128 | - For FTP, the need for downloading data using wildcard characters, 129 | means that the filenames must come from an active connection to the 130 | FTP-server so that the directory listing can be obtained, and, here, 131 | the filenames are used without their full path, since the protocol 132 | forces one to change directory before getting a file in that 133 | directory. TODO: Rewrite and/or move to RemoteLocalPair __doc__ 134 | 135 | * For a given Source instance, the same connection should be used for each 136 | filename that is to be downloaded from the source paths. 137 | 138 | - This constrains where the final resolution of the filenames should 139 | be performed. 140 | 141 | * What if the parameter is a range? 142 | 143 | - So far, a range of dates can be made from the configuration file 144 | using the `!DateRange` YAML tag and a mapping with `beg` and `end` 145 | being the beginning and end dates (both will be included). 146 | 147 | To implement (maybe): 148 | 149 | * What if the parameter is given as an open range in the sense that all 150 | subsequent data, if available, should be downloaded? 151 | 152 | """ 153 | 154 | identifier: str 155 | description: str 156 | url: str | Path 157 | destination: str | Path 158 | filenames: list[str | Path] | None = None 159 | parameters: dict[str, Iterable[Any]] | None = None 160 | max_age: int | float = math.inf 161 | 162 | def __post_init__(self) -> None: 163 | # Path version for path joining 164 | self.destination = Path(self.destination) 165 | 166 | # String version for formatting 167 | self.url_ = str(self.url) 168 | self.destination_ = str(self.destination) 169 | 170 | # Have each component 171 | self._parsed = urlparse(self.url_) 172 | self.protocol = self._parsed.scheme 173 | # In order to ensure clear API semantics for `self.protocol`, override 174 | # default (and correct) result (``) in `ParseResult.scheme`, when the 175 | # path is local (begins with `/`). 176 | if self.protocol == "" and self.url_.startswith("/"): 177 | self.protocol = "file" 178 | self.host = self._parsed.netloc 179 | 180 | def resolve(self) -> list[RemoteLocalPair]: 181 | """ 182 | Return all combinations of URL + filename (if any of the latter). 183 | 184 | * URIs are obtained for each filename, if given. 185 | * Each URI is then expanded so that all parameter combinations are used. 186 | 187 | """ 188 | if self.filenames: 189 | urls = [os.path.join(self.url_, filename) for filename in self.filenames] 190 | else: 191 | urls = [self.url_] 192 | 193 | if self.parameters is None: 194 | return [RemoteLocalPair(url, self.destination_) for url in urls] 195 | 196 | return [ 197 | RemoteLocalPair( 198 | url.format(**permutation), self.destination_.format(**permutation) 199 | ) 200 | for url in urls 201 | for permutation in permutations(self.parameters) 202 | ] 203 | --------------------------------------------------------------------------------