├── .dockerignore ├── .env.template ├── .github ├── scripts │ └── make_debian.sh └── workflows │ ├── CI.yml │ ├── DockerCI.yml │ └── DockerPackage.yml ├── .gitignore ├── CONTRIBUTING.md ├── Cargo.lock ├── Cargo.toml ├── Dockerfile ├── LICENSE ├── README.md ├── aws_local ├── Cargo.toml └── src │ ├── README.md │ └── lib.rs ├── boost_manager ├── Cargo.toml ├── README.md ├── migrations │ ├── 1_setup.sql │ ├── 2_meta.sql │ ├── 3_activated_hexes.sql │ └── 4_files_processed.sql ├── pkg │ └── settings-template.toml ├── src │ ├── activator.rs │ ├── db.rs │ ├── lib.rs │ ├── main.rs │ ├── purger.rs │ ├── settings.rs │ ├── telemetry.rs │ ├── updater.rs │ └── watcher.rs └── tests │ └── integrations │ ├── activator_tests.rs │ ├── common │ └── mod.rs │ ├── main.rs │ ├── purger_tests.rs │ ├── updater_tests.rs │ └── watcher_tests.rs ├── build.rs ├── coverage_map ├── Cargo.toml └── src │ ├── indoor.rs │ ├── lib.rs │ └── outdoor.rs ├── coverage_point_calculator ├── Cargo.toml ├── src │ ├── hexes.rs │ ├── lib.rs │ ├── location.rs │ ├── service_provider_boosting.rs │ └── speedtest.rs └── tests │ └── coverage_point_calculator.rs ├── custom_tracing ├── Cargo.toml └── src │ ├── grpc_layer.rs │ ├── http_layer.rs │ ├── lib.rs │ └── settings.rs ├── db_store ├── Cargo.toml └── src │ ├── error.rs │ ├── iam_auth_pool.rs │ ├── lib.rs │ ├── meta.rs │ ├── metric_tracker.rs │ └── settings.rs ├── denylist ├── Cargo.toml └── src │ ├── client.rs │ ├── denylist.rs │ ├── error.rs │ ├── lib.rs │ ├── models │ ├── metadata.rs │ └── mod.rs │ └── settings.rs ├── file_store ├── Cargo.toml └── src │ ├── cli │ ├── bucket.rs │ ├── dump.rs │ ├── dump_mobile_rewards.rs │ ├── info.rs │ └── mod.rs │ ├── coverage.rs │ ├── entropy_report.rs │ ├── error.rs │ ├── file_info.rs │ ├── file_info_poller.rs │ ├── file_sink.rs │ ├── file_source.rs │ ├── file_store.rs │ ├── file_upload.rs │ ├── hex_boost.rs │ ├── iot_beacon_report.rs │ ├── iot_invalid_poc.rs │ ├── iot_packet.rs │ ├── iot_valid_poc.rs │ ├── iot_witness_report.rs │ ├── lib.rs │ ├── main.rs │ ├── mobile_ban.rs │ ├── mobile_radio_invalidated_threshold.rs │ ├── mobile_radio_threshold.rs │ ├── mobile_session.rs │ ├── mobile_subscriber.rs │ ├── mobile_transfer.rs │ ├── reward_manifest.rs │ ├── settings.rs │ ├── speedtest.rs │ ├── subscriber_verified_mapping_event.rs │ ├── subscriber_verified_mapping_event_ingest_report.rs │ ├── traits │ ├── file_sink_write.rs │ ├── mod.rs │ ├── msg_bytes.rs │ ├── msg_decode.rs │ ├── msg_timestamp.rs │ ├── msg_verify.rs │ └── report_id.rs │ ├── unique_connections.rs │ ├── usage_counts.rs │ ├── verified_subscriber_verified_mapping_event_ingest_report.rs │ └── wifi_heartbeat.rs ├── hex_assignments ├── Cargo.toml └── src │ ├── assignment.rs │ ├── footfall.rs │ ├── landtype.rs │ ├── lib.rs │ ├── service_provider_override.rs │ └── urbanization.rs ├── ingest ├── Cargo.toml ├── README.md ├── pkg │ └── settings-template.toml ├── src │ ├── lib.rs │ ├── main.rs │ ├── server_iot.rs │ ├── server_mobile.rs │ └── settings.rs └── tests │ ├── common │ └── mod.rs │ ├── iot_ingest.rs │ └── mobile_ingest.rs ├── iot_config ├── Cargo.toml ├── README.md ├── migrations │ ├── 10_helium_devaddrs.sql │ ├── 11_skf_max_copies.sql │ ├── 12_admin_keys_name.sql │ ├── 1_setup.sql │ ├── 20230626183323_skip_empty_skf_routes.sql │ ├── 20231101175438_track_deletes.sql │ ├── 2_organizations.sql │ ├── 3_routes.sql │ ├── 4_regions.sql │ ├── 5_admin_keys.sql │ ├── 6_session_key_filters.sql │ ├── 7_oracle_key_type.sql │ ├── 8_skfs_by_route.sql │ └── 9_delegate_keys.sql ├── pkg │ └── settings-template.toml ├── src │ ├── admin.rs │ ├── admin_service.rs │ ├── client │ │ ├── mod.rs │ │ ├── org_client.rs │ │ ├── settings.rs │ │ └── sub_dao_client.rs │ ├── db_cleaner.rs │ ├── gateway_info.rs │ ├── gateway_service.rs │ ├── helium_netids.rs │ ├── lib.rs │ ├── lora_field.rs │ ├── main.rs │ ├── org.rs │ ├── org_service.rs │ ├── region_map.rs │ ├── route.rs │ ├── route_service.rs │ ├── settings.rs │ ├── sub_dao_epoch_reward_info.rs │ ├── sub_dao_service.rs │ └── telemetry.rs └── tests │ └── route_service.rs ├── iot_packet_verifier ├── Cargo.toml ├── README.md ├── migrations │ ├── 1_setup.sql │ ├── 2_burns.sql │ ├── 3_files_processed.sql │ ├── 4_pending_txns.sql │ ├── 5_add_time_zone.sql │ └── 6_files_processed_process_name.sql ├── pkg │ └── settings-template.toml ├── src │ ├── balances.rs │ ├── burner.rs │ ├── daemon.rs │ ├── lib.rs │ ├── main.rs │ ├── pending.rs │ ├── settings.rs │ └── verifier.rs └── tests │ └── integration_tests.rs ├── iot_verifier ├── Cargo.toml ├── README.md ├── migrations │ ├── 10_gateway_dc_shares.sql │ ├── 11_files_processed.sql │ ├── 12_bootstrap_disable_data_checks.sql │ ├── 13_files_processed_process_name.sql │ ├── 14_last_witness.sql │ ├── 15_last_beacon_reciprocity.sql │ ├── 1_setup.sql │ ├── 2_meta.sql │ ├── 3_poc_report.sql │ ├── 4_last_beacon.sql │ ├── 5_entropy.sql │ ├── 6_gateway_shares.sql │ ├── 7_bootstrap_reward_time.sql │ └── 9_delete_meta_report_entry.sql ├── pkg │ └── settings-template.toml ├── src │ ├── entropy.rs │ ├── entropy_loader.rs │ ├── gateway_cache.rs │ ├── gateway_updater.rs │ ├── hex_density.rs │ ├── last_beacon.rs │ ├── last_beacon_reciprocity.rs │ ├── last_witness.rs │ ├── lib.rs │ ├── loader.rs │ ├── main.rs │ ├── meta.rs │ ├── packet_loader.rs │ ├── poc.rs │ ├── poc_report.rs │ ├── purger.rs │ ├── region_cache.rs │ ├── reward_share.rs │ ├── rewarder.rs │ ├── runner.rs │ ├── settings.rs │ ├── telemetry.rs │ ├── tx_scaler.rs │ └── witness_updater.rs └── tests │ └── integrations │ ├── common │ └── mod.rs │ ├── main.rs │ ├── purger_tests.rs │ ├── rewarder_operations.rs │ ├── rewarder_oracles.rs │ ├── rewarder_poc_dc.rs │ └── runner_tests.rs ├── metrics ├── Cargo.toml └── src │ ├── client_requests.rs │ ├── error.rs │ ├── lib.rs │ └── settings.rs ├── mobile_config ├── Cargo.toml ├── README.md ├── migrations │ ├── 1_setup.sql │ ├── 20230708171204_add_pcs_key_role.sql │ ├── 20250411184550_add_banning_key_role.sql │ ├── 2_registered_keys.sql │ ├── 3_carrier_keys.sql │ ├── 5_carrier_service.sql │ ├── 6_registered_keys_name.sql │ └── 7_mobile_radio_tracker.sql ├── pkg │ └── settings-template.toml ├── src │ ├── admin_service.rs │ ├── authorization_service.rs │ ├── boosted_hex_info.rs │ ├── carrier_service.rs │ ├── client │ │ ├── authorization_client.rs │ │ ├── carrier_service_client.rs │ │ ├── entity_client.rs │ │ ├── gateway_client.rs │ │ ├── hex_boosting_client.rs │ │ ├── mod.rs │ │ ├── settings.rs │ │ └── sub_dao_client.rs │ ├── entity_service.rs │ ├── gateway_info.rs │ ├── gateway_service.rs │ ├── hex_boosting_service.rs │ ├── key_cache.rs │ ├── lib.rs │ ├── main.rs │ ├── mobile_radio_tracker.rs │ ├── settings.rs │ ├── sub_dao_epoch_reward_info.rs │ ├── sub_dao_service.rs │ └── telemetry.rs └── tests │ ├── common │ └── mod.rs │ ├── gateway_service.rs │ └── mobile_radio_tracker.rs ├── mobile_config_cli ├── Cargo.toml └── src │ ├── client.rs │ ├── cmds │ ├── admin.rs │ ├── authorization.rs │ ├── carrier.rs │ ├── entity.rs │ ├── env.rs │ ├── gateway.rs │ └── mod.rs │ ├── lib.rs │ └── main.rs ├── mobile_packet_verifier ├── Cargo.toml ├── README.md ├── migrations │ ├── 1_setup.sql │ ├── 2_data_transfer_sessions.sql │ ├── 3_processed_files.sql │ ├── 4_event_ids.sql │ ├── 5_rewardable_bytes.sql │ ├── 6_files_processed_process_name.sql │ ├── 7_pending_txns.sql │ ├── 8_pending_data_transfer_sessions.sql │ └── 9_hotspot_bans.sql ├── pkg │ └── settings-template.toml ├── src │ ├── accumulate.rs │ ├── banning │ │ ├── db.rs │ │ ├── ingestor.rs │ │ ├── mod.rs │ │ └── purger.rs │ ├── burner.rs │ ├── daemon.rs │ ├── event_ids.rs │ ├── lib.rs │ ├── main.rs │ ├── pending_burns.rs │ ├── pending_txns.rs │ └── settings.rs └── tests │ └── integrations │ ├── accumulate_sessions.rs │ ├── banning.rs │ ├── burn_metric.rs │ ├── burner.rs │ ├── common │ └── mod.rs │ └── main.rs ├── mobile_verifier ├── Cargo.toml ├── README.md ├── migrations │ ├── 10_timestamptz_speedtests.sql │ ├── 11_files_processed.sql │ ├── 12_disable_complete_data_checks_until.sql │ ├── 13_data_session.sql │ ├── 14_subscriber_location.sql │ ├── 15_speedtests_one_to_one.sql │ ├── 16_wifi_heartbeat.sql │ ├── 17_modeled_coverage.sql │ ├── 18_invalidated_at.sql │ ├── 19_signal_power.sql │ ├── 1_setup.sql │ ├── 20_remove_null_coverage_objects.sql │ ├── 21_index_hex_coverage.sql │ ├── 22_coverage_objects.sql │ ├── 23_files_processed_process_name.sql │ ├── 24_location_trust_multiplier.sql │ ├── 25_make_distance_to_asserted_not_null.sql │ ├── 26_urbanized.sql │ ├── 27_subscriber_radio_threshold.sql │ ├── 28_radio_threshold_cbsd_id_emptry_strings.sql │ ├── 29_footfall.sql │ ├── 2_meta.sql │ ├── 30_save_lat_and_lon.sql │ ├── 31_reset_validation_timestamps.sql │ ├── 32_landtype.sql │ ├── 33_data_sets.sql │ ├── 34_sp_boosted_rewards_bans.sql │ ├── 35_subscriber_verified_mapping_event.sql │ ├── 36_sp_boosted_bans_type.sql │ ├── 37_sp_promotions.sql │ ├── 38_coverage_objects_cascade_delete.sql │ ├── 39_unique_connections-up.sql │ ├── 3_heartbeats.sql │ ├── 40_service_provider_override-up.sql │ ├── 41_data_session_add_rewardable_bytes.sql │ ├── 42_subscriber_mapping_activity.sql │ ├── 43_hotspot_bans.sql │ ├── 44_data_session_add_burn_timestamp.sql │ ├── 45_drop_grandfathered_radio_threshold.sql │ ├── 46_radio_threshold_rework_unique_idx.sql │ ├── 47_drop_cbrs_heartbeats.sql │ ├── 48_drop_old_hex_coverage.sql │ ├── 49_delete_cbrs_from_seniority.sql │ ├── 4_heartbeats.sql │ ├── 50_delete_cbrs_from_coverage_objects.sql │ ├── 5_speedtests.sql │ ├── 6_heartbeats_cbsd_id_index.sql │ ├── 7_multiple_heartbeats.sql │ ├── 8_out_of_order_heartbeats.sql │ └── 9_realtime_heartbeats.sql ├── pkg │ └── settings-template.toml ├── src │ ├── banning │ │ ├── db.rs │ │ ├── ingestor.rs │ │ ├── mod.rs │ │ └── sp_boosted_rewards_bans.rs │ ├── boosting_oracles │ │ ├── data_sets.rs │ │ └── mod.rs │ ├── cell_type.rs │ ├── cli │ │ ├── mod.rs │ │ ├── reward_from_db.rs │ │ ├── server.rs │ │ ├── service_provider_promotions.rs │ │ └── verify_disktree.rs │ ├── coverage.rs │ ├── data_session.rs │ ├── geofence.rs │ ├── heartbeats │ │ ├── last_location.rs │ │ ├── mod.rs │ │ ├── valid_radios.sql │ │ └── wifi.rs │ ├── lib.rs │ ├── main.rs │ ├── radio_threshold.rs │ ├── reward_shares.rs │ ├── reward_shares │ │ └── radio_reward_v2.rs │ ├── rewarder.rs │ ├── rewarder │ │ ├── boosted_hex_eligibility.rs │ │ └── db.rs │ ├── seniority.rs │ ├── service_provider │ │ ├── dc_sessions.rs │ │ ├── mod.rs │ │ ├── promotions.rs │ │ └── reward.rs │ ├── settings.rs │ ├── speedtests.rs │ ├── speedtests_average.rs │ ├── subscriber_mapping_activity.rs │ ├── subscriber_mapping_activity │ │ └── db.rs │ ├── telemetry.rs │ └── unique_connections │ │ ├── db.rs │ │ ├── ingestor.rs │ │ └── mod.rs └── tests │ └── integrations │ ├── banning.rs │ ├── boosting_oracles.rs │ ├── common │ └── mod.rs │ ├── coverage.rs │ ├── fixtures │ ├── covered_stream.sql │ ├── footfall.1722895200000.gz │ ├── footfall.1732895200000.gz │ ├── landtype.1722895200000.gz │ ├── service_provider_override.1739404800000.gz │ └── urbanization.1722895200000.gz │ ├── heartbeats.rs │ ├── hex_boosting.rs │ ├── last_location.rs │ ├── main.rs │ ├── modeled_coverage.rs │ ├── rewarder_mappers.rs │ ├── rewarder_oracles.rs │ ├── rewarder_poc_dc.rs │ ├── rewarder_sp_rewards.rs │ ├── seniority.rs │ └── speedtests.rs ├── poc_entropy ├── Cargo.toml ├── README.md ├── pkg │ └── settings-template.toml └── src │ ├── entropy_generator.rs │ ├── lib.rs │ ├── main.rs │ ├── server.rs │ └── settings.rs ├── price ├── Cargo.toml ├── README.md ├── pkg │ └── settings-template.toml └── src │ ├── cli │ ├── check.rs │ └── mod.rs │ ├── lib.rs │ ├── main.rs │ ├── metrics.rs │ ├── price_generator.rs │ ├── price_tracker.rs │ └── settings.rs ├── reward_index ├── Cargo.toml ├── README.md ├── migrations │ ├── 10_add_service_provider_reward_type.sql │ ├── 11_add_mobile_promotion_reward_type.sql │ ├── 1_setup.sql │ ├── 2_meta.sql │ ├── 3_index.sql │ ├── 4_files_processed.sql │ ├── 5_add_type_to_index.sql │ ├── 6_add_subscriber_reward_type.sql │ ├── 7_files_processed_process_name.sql │ ├── 8_add_mobile_unallocated_reward_type.sql │ └── 9_add_iot_unallocated_reward_type.sql ├── pkg │ └── settings-template.toml ├── src │ ├── db.rs │ ├── extract.rs │ ├── indexer.rs │ ├── lib.rs │ ├── main.rs │ ├── settings.rs │ └── telemetry.rs └── tests │ └── integrations │ ├── common │ └── mod.rs │ ├── iot.rs │ ├── main.rs │ └── mobile.rs ├── reward_scheduler ├── Cargo.toml └── src │ └── lib.rs ├── rust-toolchain.toml ├── solana ├── Cargo.toml └── src │ ├── burn.rs │ ├── carrier.rs │ ├── lib.rs │ ├── main.rs │ └── start_boost.rs └── task_manager ├── Cargo.toml └── src ├── lib.rs └── select_all.rs /.dockerignore: -------------------------------------------------------------------------------- 1 | .github 2 | target/ 3 | **/target/ 4 | .dockerignore 5 | .env 6 | .env.template 7 | .gitignore 8 | build.rs 9 | Dockerfile 10 | -------------------------------------------------------------------------------- /.env.template: -------------------------------------------------------------------------------- 1 | export DATABASE_URL=postgresql://postgres:password@localhost:5433/poclora-dev 2 | export GRPC_SOCKET_ADDR=127.0.0.1:3001 3 | export API_TOKEN=dev 4 | export API_RO_TOKEN=dev 5 | export FOLLOWER_URI=http://localhost:8081 6 | export BUCKET_ENDPOINT=http://localhost:9000 7 | export AWS_ACCESS_KEY_ID="AWS_ACCESS_KEY_ID" 8 | export AWS_SECRET_ACCESS_KEY="AWS_SECRET_ACCESS_KEY" 9 | export INGEST_STORE="~/" 10 | export VERIFIER_STORE="~/" 11 | export INGESTOR_BUCKET_ENDPOINT="poclora-ingest" 12 | export VERIFIER_BUCKET_ENDPOINT="poclora-verifier" 13 | export GRPC_SERVER_MODE=iot 14 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | **/*.rs.bk 3 | *.key* 4 | perf.data* 5 | flamegraph.* 6 | *.png 7 | *.json 8 | .envrc 9 | .env 10 | *.DS_STORE 11 | 12 | !/minio/bucket-policy.json 13 | *.bak 14 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # How to Contribute to this repository 2 | 3 | We value contributions from the community and will do everything we 4 | can go get them reviewed in a timely fashion. If you have code to send 5 | our way or a bug to report: 6 | 7 | - **Contributing Code**: If you have new code or a bug fix, fork this 8 | repo, create a logically-named branch, and [submit a PR against this 9 | repo](https://github.com/helium/oracles). Include a 10 | write up of the PR with details on what it does. 11 | 12 | - **Reporting Bugs**: Open an issue [against this 13 | repo](https://github.com/helium/oracles/issues) with as much 14 | detail as you can. At the very least you'll include steps to 15 | reproduce the problem. 16 | 17 | This project is intended to be a safe, welcoming space for 18 | collaboration, and contributors are expected to adhere to the 19 | [Contributor Covenant Code of 20 | Conduct](http://contributor-covenant.org/). 21 | 22 | Above all, thank you for taking the time to be a part of the Helium community. 23 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # BASE 2 | FROM rust:bookworm AS base 3 | 4 | RUN apt-get update && apt-get install -y \ 5 | protobuf-compiler 6 | 7 | # BUILDER 8 | FROM base AS builder 9 | 10 | WORKDIR /app 11 | 12 | COPY . . 13 | RUN cargo fetch 14 | 15 | ARG PACKAGE 16 | RUN cargo build --release -p ${PACKAGE} 17 | 18 | 19 | # RUNNER 20 | FROM debian:bookworm-slim AS runner 21 | 22 | RUN apt-get update && apt-get install -y \ 23 | libssl-dev \ 24 | ca-certificates 25 | 26 | ARG PACKAGE 27 | 28 | COPY --from=builder /app/target/release/${PACKAGE} /opt/${PACKAGE}/bin/${PACKAGE} 29 | 30 | ENV PACKAGE=${PACKAGE} 31 | 32 | CMD ["/opt/${PACKAGE}/bin/${PACKAGE}", "-c", "/opt/${PACKAGE}/etc/settings.toml", "server"] -------------------------------------------------------------------------------- /aws_local/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "aws-local" 3 | version = "0.1.0" 4 | authors.workspace = true 5 | license.workspace = true 6 | edition.workspace = true 7 | 8 | [dependencies] 9 | aws-config = {workspace = true} 10 | aws-sdk-s3 = {workspace = true} 11 | aws-types = {workspace = true, features = ["hardcoded-credentials"]} 12 | tokio = {workspace = true} 13 | triggered = {workspace = true} 14 | tonic = {workspace = true} 15 | chrono = {workspace = true} 16 | prost = {workspace = true} 17 | anyhow = {workspace = true} 18 | uuid = {workspace = true} 19 | tempfile = {workspace = true} 20 | file-store = { path = "../file_store", features = ["local"] } 21 | -------------------------------------------------------------------------------- /aws_local/src/README.md: -------------------------------------------------------------------------------- 1 | It helps to run tests with [localstack](https://www.localstack.cloud/) 2 | -------------------------------------------------------------------------------- /boost_manager/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "boost-manager" 3 | version = "0.1.0" 4 | description = "Hex boosting manager" 5 | edition.workspace = true 6 | authors.workspace = true 7 | license.workspace = true 8 | 9 | 10 | [dependencies] 11 | axum = { version = "0", features = ["tracing"] } 12 | 13 | anyhow = { workspace = true } 14 | bs58 = { workspace = true } 15 | config = { workspace = true } 16 | clap = { workspace = true } 17 | thiserror = { workspace = true } 18 | serde = { workspace = true } 19 | serde_json = { workspace = true } 20 | sqlx = { workspace = true } 21 | base64 = { workspace = true } 22 | sha2 = { workspace = true } 23 | lazy_static = { workspace = true } 24 | triggered = { workspace = true } 25 | futures = { workspace = true } 26 | futures-util = { workspace = true } 27 | prost = { workspace = true } 28 | once_cell = { workspace = true } 29 | tokio = { workspace = true } 30 | tokio-util = { workspace = true } 31 | tracing = { workspace = true } 32 | tracing-subscriber = { workspace = true } 33 | chrono = { workspace = true, features = ["serde"] } 34 | metrics = { workspace = true } 35 | metrics-exporter-prometheus = { workspace = true } 36 | helium-proto = { workspace = true } 37 | rust_decimal = { workspace = true } 38 | rust_decimal_macros = { workspace = true } 39 | tonic = { workspace = true } 40 | rand = { workspace = true } 41 | async-trait = { workspace = true } 42 | http = { workspace = true } 43 | http-serde = { workspace = true } 44 | humantime-serde = { workspace = true } 45 | hextree = { workspace = true } 46 | 47 | custom-tracing = { path = "../custom_tracing" } 48 | db-store = { path = "../db_store" } 49 | file-store = { path = "../file_store" } 50 | mobile-config = { path = "../mobile_config" } 51 | poc-metrics = { path = "../metrics" } 52 | solana = { path = "../solana" } 53 | task-manager = { path = "../task_manager" } 54 | -------------------------------------------------------------------------------- /boost_manager/README.md: -------------------------------------------------------------------------------- 1 | # Boost Manager 2 | 3 | ### S3 Inputs 4 | 5 | | File Type | Pattern | | 6 | | :--- |:-----------------------| :-- | 7 | | RewardManifest | reward_manifest.\* | [Proto](https://github.com/helium/proto/blob/149997d2a74e08679e56c2c892d7e46f2d0d1c46/src/reward_manifest.proto#L5) | 8 | | MobileRewardShare | mobile_reward_share.\* | [Proto](https://github.com/helium/proto/blob/149997d2a74e08679e56c2c892d7e46f2d0d1c46/src/service/poc_lora.proto#L171) | 9 | 10 | 11 | -------------------------------------------------------------------------------- /boost_manager/migrations/1_setup.sql: -------------------------------------------------------------------------------- 1 | -- This extension gives us `uuid_generate_v1mc()` which generates UUIDs that cluster better than `gen_random_uuid()` 2 | -- while still being difficult to predict and enumerate. 3 | -- Also, while unlikely, `gen_random_uuid()` can in theory produce collisions which can trigger spurious errors on 4 | -- insertion, whereas it's much less likely with `uuid_generate_v1mc()`. 5 | create extension if not exists "uuid-ossp"; 6 | 7 | create or replace function set_updated_at() 8 | returns trigger as 9 | $$ 10 | begin 11 | NEW.updated_at = now(); 12 | return NEW; 13 | end; 14 | $$ language plpgsql; 15 | 16 | create or replace function trigger_updated_at(tablename regclass) 17 | returns void as 18 | $$ 19 | begin 20 | execute format('CREATE TRIGGER set_updated_at 21 | BEFORE UPDATE 22 | ON %s 23 | FOR EACH ROW 24 | WHEN (OLD is distinct from NEW) 25 | EXECUTE FUNCTION set_updated_at();', tablename); 26 | end; 27 | $$ language plpgsql; 28 | -------------------------------------------------------------------------------- /boost_manager/migrations/2_meta.sql: -------------------------------------------------------------------------------- 1 | create table meta ( 2 | key text primary key not null, 3 | value text 4 | ); 5 | -------------------------------------------------------------------------------- /boost_manager/migrations/3_activated_hexes.sql: -------------------------------------------------------------------------------- 1 | CREATE TYPE onchain_status AS ENUM ( 2 | 'queued', 3 | 'pending', 4 | 'success', 5 | 'failed', 6 | 'cancelled' 7 | ); 8 | 9 | create table activated_hexes ( 10 | location bigint primary key not null, 11 | activation_ts timestamptz not null, 12 | boosted_hex_pubkey text not null, 13 | boost_config_pubkey text not null, 14 | status onchain_status not null, 15 | txn_id text, 16 | retries integer not null default 0, 17 | inserted_at timestamptz default now(), 18 | updated_at timestamptz default now() 19 | ); 20 | 21 | -------------------------------------------------------------------------------- /boost_manager/migrations/4_files_processed.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE files_processed ( 2 | file_name VARCHAR PRIMARY KEY, 3 | file_type VARCHAR NOT NULL, 4 | file_timestamp TIMESTAMPTZ NOT NULL, 5 | processed_at TIMESTAMPTZ NOT NULL, 6 | process_name text not null default 'default' 7 | ); 8 | -------------------------------------------------------------------------------- /boost_manager/pkg/settings-template.toml: -------------------------------------------------------------------------------- 1 | log = "boost_manager=info,solana=debug" 2 | 3 | # Cache location for generated boost manager outputs; Required 4 | cache = "/tmp/oracles/boost-manager" 5 | 6 | start_after = "2024-12-15 01:00:00Z" 7 | 8 | enable_solana_integration = true 9 | 10 | activation_check_interval = 30 11 | 12 | [solana] 13 | # Solana RPC. This may contain a secret 14 | rpc_url = "https://api.devnet.solana.com" 15 | # Path to the keypair used to sign data credit burn solana transactions 16 | start_authority_keypair = "" 17 | # Public key of the hex boost authority 18 | hexboost_authority_pubkey = "" 19 | # Solana cluster to use. "devnet" or "mainnet" 20 | cluster = "devnet" 21 | 22 | # 23 | [database] 24 | url = "postgresql://postgres:postgres@localhost:5432/hexboosting" 25 | # Max connections to the database. 26 | max_connections = 10 27 | 28 | [verifier] 29 | bucket = "mobile-verified" 30 | 31 | [output] 32 | bucket = "mobile-verified" 33 | 34 | [mobile_config_client] 35 | url = "http://localhost:6090" 36 | config_pubkey = "" 37 | signing_keypair = "" 38 | 39 | 40 | [metrics] 41 | 42 | # Endpoint for metrics. Default below 43 | # 44 | endpoint = "127.0.0.1:19001" 45 | -------------------------------------------------------------------------------- /boost_manager/src/lib.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | pub mod activator; 4 | pub mod db; 5 | pub mod purger; 6 | pub mod settings; 7 | pub mod telemetry; 8 | pub use settings::Settings; 9 | pub mod updater; 10 | pub mod watcher; 11 | 12 | #[derive(Debug, Eq, Hash, PartialEq, Copy, Clone, Deserialize, Serialize, sqlx::Type)] 13 | #[sqlx(type_name = "onchain_status")] 14 | #[sqlx(rename_all = "lowercase")] 15 | pub enum OnChainStatus { 16 | Queued = 0, 17 | Pending = 1, 18 | Success = 2, 19 | Failed = 3, 20 | Cancelled = 4, 21 | } 22 | -------------------------------------------------------------------------------- /boost_manager/src/purger.rs: -------------------------------------------------------------------------------- 1 | use crate::db; 2 | use futures::{future::LocalBoxFuture, TryFutureExt}; 3 | use sqlx::{Pool, Postgres}; 4 | use std::time::Duration; 5 | use task_manager::ManagedTask; 6 | 7 | const PURGE_INTERVAL: Duration = Duration::from_secs(30); 8 | 9 | pub struct Purger { 10 | pool: Pool, 11 | retention_period: Duration, 12 | } 13 | 14 | impl ManagedTask for Purger { 15 | fn start_task( 16 | self: Box, 17 | shutdown: triggered::Listener, 18 | ) -> LocalBoxFuture<'static, anyhow::Result<()>> { 19 | let handle = tokio::spawn(self.run(shutdown)); 20 | Box::pin( 21 | handle 22 | .map_err(anyhow::Error::from) 23 | .and_then(|result| async move { result }), 24 | ) 25 | } 26 | } 27 | 28 | impl Purger { 29 | pub fn new(pool: Pool, retention_period: Duration) -> Self { 30 | Self { 31 | pool, 32 | retention_period, 33 | } 34 | } 35 | 36 | async fn run(self, shutdown: triggered::Listener) -> anyhow::Result<()> { 37 | tracing::info!("starting Purger"); 38 | loop { 39 | tokio::select! { 40 | biased; 41 | _ = shutdown.clone() => break, 42 | _ = tokio::time::sleep(PURGE_INTERVAL) => { 43 | purge(&self.pool, self.retention_period).await?; 44 | } 45 | } 46 | } 47 | tracing::info!("stopping Purger"); 48 | Ok(()) 49 | } 50 | } 51 | 52 | pub async fn purge(pool: &Pool, retention_period: Duration) -> anyhow::Result<()> { 53 | let num_records_purged = db::purge_stale_records(pool, retention_period).await?; 54 | tracing::info!("purged {} stale records", num_records_purged); 55 | Ok(()) 56 | } 57 | -------------------------------------------------------------------------------- /boost_manager/src/telemetry.rs: -------------------------------------------------------------------------------- 1 | use chrono::{DateTime, TimeZone, Utc}; 2 | use db_store::meta; 3 | use sqlx::{Pool, Postgres}; 4 | 5 | const LAST_REWARD_PROCESSED_TIME: &str = "last_reward_processed_time"; 6 | 7 | pub async fn initialize(db: &Pool) -> anyhow::Result<()> { 8 | match meta::fetch(db, LAST_REWARD_PROCESSED_TIME).await { 9 | Ok(timestamp) => last_reward_processed_time(db, to_datetime(timestamp)?).await, 10 | Err(db_store::Error::NotFound(_)) => Ok(()), 11 | Err(err) => Err(err.into()), 12 | } 13 | } 14 | 15 | pub async fn last_reward_processed_time( 16 | db: &Pool, 17 | datetime: DateTime, 18 | ) -> anyhow::Result<()> { 19 | metrics::gauge!(LAST_REWARD_PROCESSED_TIME).set(datetime.timestamp() as f64); 20 | meta::store(db, LAST_REWARD_PROCESSED_TIME, datetime.timestamp()).await?; 21 | 22 | Ok(()) 23 | } 24 | 25 | fn to_datetime(timestamp: i64) -> anyhow::Result> { 26 | Utc.timestamp_opt(timestamp, 0) 27 | .single() 28 | .ok_or_else(|| anyhow::anyhow!("Unable to decode timestamp")) 29 | } 30 | -------------------------------------------------------------------------------- /boost_manager/tests/integrations/main.rs: -------------------------------------------------------------------------------- 1 | mod common; 2 | 3 | mod activator_tests; 4 | mod purger_tests; 5 | mod updater_tests; 6 | mod watcher_tests; 7 | -------------------------------------------------------------------------------- /build.rs: -------------------------------------------------------------------------------- 1 | // generated by `sqlx migrate build-script` 2 | fn main() { 3 | // trigger recompilation when a new migration is added 4 | println!("cargo:rerun-if-changed=migrations"); 5 | } 6 | -------------------------------------------------------------------------------- /coverage_map/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "coverage-map" 3 | version = "0.1.0" 4 | authors.workspace = true 5 | license.workspace = true 6 | edition.workspace = true 7 | 8 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 9 | 10 | [dependencies] 11 | chrono = { workspace = true } 12 | hex-assignments = { path = "../hex_assignments" } 13 | hextree = { workspace = true } 14 | -------------------------------------------------------------------------------- /coverage_point_calculator/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "coverage-point-calculator" 3 | version = "0.1.0" 4 | description = "Calculate Coverage Points for hotspots in the Mobile Network" 5 | authors.workspace = true 6 | license.workspace = true 7 | edition.workspace = true 8 | 9 | [dependencies] 10 | chrono = { workspace = true } 11 | hextree = { workspace = true } 12 | rust_decimal = { workspace = true } 13 | rust_decimal_macros = { workspace = true } 14 | thiserror = { workspace = true } 15 | hex-assignments = { path = "../hex_assignments" } 16 | coverage-map = { path = "../coverage_map" } 17 | 18 | [dev-dependencies] 19 | rstest = { version = "0.21.0", default-features = false } 20 | -------------------------------------------------------------------------------- /coverage_point_calculator/src/service_provider_boosting.rs: -------------------------------------------------------------------------------- 1 | use rust_decimal::Decimal; 2 | use rust_decimal_macros::dec; 3 | 4 | // In order for the Wi-Fi access point to be eligible for boosted hex rewards 5 | // as described in HIP84 the location trust score needs to be 0.75 or higher. 6 | // 7 | // [HIP-93: Add Wifi to Mobile Dao][add-wifi-aps] 8 | // 9 | // [add-wifi-aps]: https://github.com/helium/HIP/blob/main/0093-addition-of-wifi-aps-to-mobile-subdao.md#341-indoor-access-points-rewards 10 | pub(crate) const MIN_WIFI_TRUST_MULTIPLIER: Decimal = dec!(0.75); 11 | 12 | // In order for access points to be eligible for boosted Service Provider 13 | // rewards defined in HIP-84, the asserted distances must be 50 meters or 14 | // less than the reported location from external services for both indoor 15 | // and outdoor Access Points. 16 | // 17 | // [HIP-119: Gaming Loopholes][gaming-loopholes] 18 | // 19 | // [gaming-loopholes]: https://github.com/helium/HIP/blob/main/0119-closing-gaming-loopholes-within-the-mobile-network.md#maximum-asserted-distance-for-boosted-hexes 20 | pub(crate) const MAX_AVERAGE_DISTANCE: Decimal = dec!(50); 21 | 22 | #[derive(Debug, Clone, Copy, PartialEq)] 23 | pub enum SPBoostedRewardEligibility { 24 | Eligible, 25 | /// Radio must pass at least 1mb of data from 3 unique phones. 26 | /// 27 | /// [HIP-84: Provider Hex Boosting][provider-boosting] 28 | /// 29 | /// [provider-boosting]: https://github.com/helium/HIP/blob/main/0084-service-provider-hex-boosting.md 30 | RadioThresholdNotMet, 31 | /// Radio must have greather than 25 unique connections over 7 days 32 | /// 33 | /// [HIP-140: Adjust Service Provider Boost Qualifiers][adjust-service-provider-boost-qualifiers] 34 | /// 35 | /// [adjust-service-provider-boost-qualifiers]: https://github.com/helium/HIP/blob/main/0140-adjust-service-provider-boost-qualifiers.md 36 | NotEnoughConnections, 37 | } 38 | -------------------------------------------------------------------------------- /custom_tracing/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "custom-tracing" 3 | version = "0.1.0" 4 | authors.workspace = true 5 | license.workspace = true 6 | edition.workspace = true 7 | 8 | [dependencies] 9 | anyhow = "1" 10 | axum = { version = ">=0.7", features = ["tracing"], optional = true } 11 | bs58 = { workspace = true } 12 | helium-crypto = { workspace = true } 13 | helium-proto = { workspace = true, optional = true } 14 | http = { workspace = true, optional = true } 15 | notify = { version = "6", default-features = false } 16 | serde = { version = "1", features = ["derive"] } 17 | tokio = { version = "1", features = ["rt-multi-thread", "sync", "signal"] } 18 | tower-http = { version = "0", features = ["trace"] } 19 | tower-layer = { version = "0" } 20 | tracing = "0" 21 | tracing-subscriber = { version = "0", default-features = true, features = [ 22 | "env-filter", 23 | "registry", 24 | "fmt", 25 | ] } 26 | 27 | 28 | [target.'cfg(target_os = "macos")'.dependencies] 29 | notify = { version = "6", default-features = false, features = [ 30 | "macos_fsevent", 31 | ] } 32 | 33 | 34 | [features] 35 | default = [] 36 | http-1 = ["axum"] 37 | grpc = ["helium-proto", "http"] 38 | -------------------------------------------------------------------------------- /custom_tracing/src/grpc_layer.rs: -------------------------------------------------------------------------------- 1 | use helium_proto::services::Body; 2 | use http::request::Request; 3 | use tower_http::{ 4 | classify::{GrpcErrorsAsFailures, SharedClassifier}, 5 | trace::{DefaultOnFailure, DefaultOnResponse, TraceLayer}, 6 | LatencyUnit, 7 | }; 8 | use tracing::{Level, Span}; 9 | 10 | type GrpcLayer = 11 | TraceLayer, for<'a> fn(&'a http::Request) -> Span>; 12 | 13 | pub fn new_with_span(make_span: fn(&Request) -> Span) -> GrpcLayer { 14 | TraceLayer::new_for_grpc() 15 | .make_span_with(make_span) 16 | .on_response( 17 | DefaultOnResponse::new() 18 | .level(Level::DEBUG) 19 | .latency_unit(LatencyUnit::Millis), 20 | ) 21 | .on_failure( 22 | DefaultOnFailure::new() 23 | .level(Level::WARN) 24 | .latency_unit(LatencyUnit::Millis), 25 | ) 26 | } 27 | -------------------------------------------------------------------------------- /custom_tracing/src/http_layer.rs: -------------------------------------------------------------------------------- 1 | use axum::{body::Body, http::Request}; 2 | use tower_http::{ 3 | classify::{ServerErrorsAsFailures, SharedClassifier}, 4 | trace::{DefaultOnFailure, DefaultOnResponse, TraceLayer}, 5 | LatencyUnit, 6 | }; 7 | use tracing::{Level, Span}; 8 | 9 | #[allow(clippy::type_complexity)] 10 | pub fn new_with_span( 11 | make_span: fn(&Request) -> Span, 12 | ) -> TraceLayer< 13 | SharedClassifier, 14 | for<'a> fn(&'a axum::http::Request) -> Span, 15 | > { 16 | TraceLayer::new_for_http() 17 | .make_span_with(make_span) 18 | .on_response( 19 | DefaultOnResponse::new() 20 | .level(Level::DEBUG) 21 | .latency_unit(LatencyUnit::Millis), 22 | ) 23 | .on_failure( 24 | DefaultOnFailure::new() 25 | .level(Level::WARN) 26 | .latency_unit(LatencyUnit::Millis), 27 | ) 28 | } 29 | -------------------------------------------------------------------------------- /custom_tracing/src/settings.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | #[derive(Debug, Serialize, Deserialize, Clone)] 4 | pub struct Settings { 5 | /// File name to be watched by custom tracing 6 | #[serde(default = "default_tracing_cfg_file")] 7 | pub tracing_cfg_file: String, 8 | } 9 | 10 | impl Default for Settings { 11 | fn default() -> Self { 12 | Self { 13 | tracing_cfg_file: default_tracing_cfg_file(), 14 | } 15 | } 16 | } 17 | 18 | pub fn default_tracing_cfg_file() -> String { 19 | "tracing.cfg".to_string() 20 | } 21 | -------------------------------------------------------------------------------- /db_store/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "db-store" 3 | version = "0.1.0" 4 | description = "Database Store Library" 5 | edition.workspace = true 6 | authors.workspace = true 7 | license.workspace = true 8 | 9 | [dependencies] 10 | metrics = { workspace = true } 11 | poc-metrics = { path = "../metrics" } 12 | thiserror = { workspace = true } 13 | sqlx = { workspace = true } 14 | serde = { workspace = true } 15 | http = { workspace = true } 16 | tokio = { workspace = true } 17 | tracing = { workspace = true } 18 | 19 | aws-config = "0" 20 | aws-sdk-sts = "0" 21 | aws-credential-types = "0" 22 | aws-smithy-http = "0.54.4" 23 | aws-types = "0.54.1" 24 | aws-sig-auth = "0.54.1" 25 | -------------------------------------------------------------------------------- /db_store/src/error.rs: -------------------------------------------------------------------------------- 1 | use thiserror::Error; 2 | 3 | pub type Result = std::result::Result; 4 | 5 | #[derive(Error, Debug)] 6 | pub enum Error { 7 | #[error("Sql error")] 8 | SqlError(#[from] sqlx::Error), 9 | #[error("Failed to decode value")] 10 | DecodeError, 11 | #[error("meta key not found {0}")] 12 | NotFound(String), 13 | #[error("invalid configuration: {0}")] 14 | InvalidConfiguration(String), 15 | #[error("Aws Assume Role Error")] 16 | AwsStsError(#[from] aws_sdk_sts::types::SdkError), 17 | #[error("Assumed Credentials were invalid: {0}")] 18 | InvalidAssumedCredentials(String), 19 | #[error("Aws Signing Error")] 20 | SigningError(#[from] aws_sig_auth::signer::SigningError), 21 | #[error("tokio join error")] 22 | JoinError(#[from] tokio::task::JoinError), 23 | #[error("invalid auth token, does not start with http")] 24 | InvalidAuthToken(), 25 | } 26 | 27 | pub fn invalid_configuration(str: impl Into) -> Error { 28 | Error::InvalidConfiguration(str.into()) 29 | } 30 | -------------------------------------------------------------------------------- /db_store/src/lib.rs: -------------------------------------------------------------------------------- 1 | use std::str::FromStr; 2 | mod error; 3 | mod iam_auth_pool; 4 | mod metric_tracker; 5 | mod settings; 6 | 7 | pub use error::{Error, Result}; 8 | pub use settings::Settings; 9 | 10 | pub mod meta; 11 | 12 | /// A key-value pair that is stored in the metadata table. 13 | pub struct MetaValue { 14 | key: String, 15 | value: T, 16 | } 17 | 18 | impl MetaValue { 19 | pub fn new(key: &str, value: T) -> Self { 20 | Self { 21 | key: key.to_string(), 22 | value, 23 | } 24 | } 25 | 26 | pub fn key(&self) -> &str { 27 | &self.key 28 | } 29 | 30 | pub fn value(&self) -> &T { 31 | &self.value 32 | } 33 | } 34 | 35 | impl MetaValue 36 | where 37 | T: ToString, 38 | { 39 | pub async fn insert<'c, E>(&self, exec: E) -> Result 40 | where 41 | E: sqlx::Executor<'c, Database = sqlx::Postgres>, 42 | { 43 | meta::store(exec, &self.key, &self.value.to_string()).await 44 | } 45 | } 46 | 47 | impl MetaValue 48 | where 49 | T: ToString + FromStr, 50 | { 51 | pub async fn fetch_or_insert_with<'c, E>( 52 | exec: E, 53 | key: &str, 54 | default_fn: impl FnOnce() -> T, 55 | ) -> Result 56 | where 57 | E: sqlx::Executor<'c, Database = sqlx::Postgres> + Copy, 58 | { 59 | let result: Result = meta::fetch::(exec, key).await; 60 | 61 | match result { 62 | Ok(str_val) => { 63 | let value = str_val.parse().map_err(|_| Error::DecodeError)?; 64 | Ok(Self { 65 | key: key.to_string(), 66 | value, 67 | }) 68 | } 69 | Err(Error::NotFound(_)) => { 70 | let value = default_fn(); 71 | let res = Self::new(key, value); 72 | res.insert(exec).await?; 73 | Ok(res) 74 | } 75 | Err(err) => Err(err), 76 | } 77 | } 78 | 79 | pub async fn update<'c, E>(&mut self, exec: E, new_val: T) -> Result 80 | where 81 | E: sqlx::PgExecutor<'c>, 82 | { 83 | meta::store(exec, &self.key, new_val.to_string()).await?; 84 | Ok(std::mem::replace(&mut self.value, new_val)) 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /db_store/src/meta.rs: -------------------------------------------------------------------------------- 1 | use std::str::FromStr; 2 | 3 | use crate::{Error, Result}; 4 | 5 | macro_rules! query_exec_timed { 6 | ( $name:literal, $query:expr, $meth:ident, $exec:expr ) => {{ 7 | match poc_metrics::record_duration!(concat!($name, "_duration"), $query.$meth($exec).await) { 8 | Ok(x) => { 9 | metrics::counter!(concat!($name, "_count"), "status" => "ok").increment(1); 10 | Ok(x) 11 | } 12 | Err(e) => { 13 | metrics::counter!(concat!($name, "_count"), "status" => "error").increment(1); 14 | Err(Error::SqlError(e)) 15 | } 16 | } 17 | }}; 18 | } 19 | 20 | pub async fn store(exec: impl sqlx::PgExecutor<'_>, key: &str, value: T) -> Result 21 | where 22 | T: ToString, 23 | { 24 | let query = sqlx::query( 25 | r#" 26 | insert into meta(key, value) 27 | values ($1, $2) 28 | on conflict (key) do update set 29 | value = EXCLUDED.value 30 | "#, 31 | ) 32 | .bind(key) 33 | .bind(value.to_string()); 34 | query_exec_timed!("db_store_meta_store", query, execute, exec).map(|_| ()) 35 | } 36 | 37 | pub async fn fetch(exec: impl sqlx::PgExecutor<'_>, key: &str) -> Result 38 | where 39 | T: FromStr, 40 | { 41 | let query = sqlx::query_scalar::<_, String>( 42 | r#" 43 | select value from meta where key = $1 44 | "#, 45 | ) 46 | .bind(key); 47 | query_exec_timed!("db_store_meta_fetch", query, fetch_optional, exec)? 48 | .ok_or_else(|| Error::NotFound(key.to_string())) 49 | .and_then(|value| value.parse().map_err(|_| Error::DecodeError)) 50 | } 51 | -------------------------------------------------------------------------------- /db_store/src/metric_tracker.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | const DURATION: Duration = Duration::from_secs(300); 4 | 5 | pub async fn start(app_name: &str, pool: sqlx::Pool) { 6 | let pool_size_name = format!("{app_name}_db_pool_size"); 7 | let pool_idle_name = format!("{app_name}_db_pool_idle"); 8 | tokio::spawn(async move { run(pool_size_name, pool_idle_name, pool).await }); 9 | } 10 | 11 | async fn run(size_name: String, idle_name: String, pool: sqlx::Pool) { 12 | let mut trigger = tokio::time::interval(DURATION); 13 | 14 | loop { 15 | trigger.tick().await; 16 | 17 | metrics::gauge!(size_name.clone()).set(pool.size() as f64); 18 | metrics::gauge!(idle_name.clone()).set(pool.num_idle() as f64); 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /denylist/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "denylist" 3 | version = "0.1.0" 4 | edition.workspace = true 5 | description = "Maintains latest denylist" 6 | authors.workspace = true 7 | license.workspace = true 8 | 9 | 10 | [dependencies] 11 | thiserror = { workspace = true } 12 | tracing = { workspace = true } 13 | reqwest = { workspace = true } 14 | helium-crypto = { workspace = true } 15 | base64 = { workspace = true } 16 | sha2 = { workspace = true } 17 | bytes = { workspace = true } 18 | bincode = { workspace = true } 19 | xorf = { workspace = true } 20 | serde = { workspace = true } 21 | serde_json = { workspace = true } 22 | config = { workspace = true } 23 | chrono = { workspace = true } 24 | humantime-serde = { workspace = true } 25 | 26 | xorf-generator = { git = "https://github.com/helium/xorf-generator", branch = "main" } 27 | -------------------------------------------------------------------------------- /denylist/src/client.rs: -------------------------------------------------------------------------------- 1 | use crate::{error::Error, models::metadata::DenyListMetaData, Result}; 2 | use std::{str, time::Duration}; 3 | 4 | /// The default client useragent for denylist http requests 5 | static USERAGENT: &str = "oracle/iot_verifier/1.0"; 6 | /// The default timeout for http requests 7 | pub const DEFAULT_TIMEOUT: Duration = Duration::from_secs(5); 8 | 9 | #[derive(Clone, Debug)] 10 | pub struct DenyListClient { 11 | pub client: reqwest::Client, 12 | } 13 | 14 | // TODO: basic non production client. productionise.... 15 | impl DenyListClient { 16 | pub fn new() -> Result { 17 | let client = reqwest::Client::builder() 18 | .gzip(true) 19 | .user_agent(USERAGENT) 20 | .timeout(DEFAULT_TIMEOUT) 21 | .build() 22 | .map_err(Error::from)?; 23 | Ok(Self { client }) 24 | } 25 | 26 | pub async fn get_metadata(&mut self, url: &String) -> Result { 27 | let response = self.client.get(url).send().await?; 28 | match response.status() { 29 | reqwest::StatusCode::OK => { 30 | let json = response.json::().await?; 31 | Ok(json) 32 | } 33 | other => Err(Error::UnexpectedStatus(other.to_string())), 34 | } 35 | } 36 | 37 | pub async fn get_bin(&mut self, url: &String) -> Result> { 38 | let response = self.client.get(url).send().await?; 39 | match response.status() { 40 | reqwest::StatusCode::OK => { 41 | let bytes = response.bytes().await?; 42 | Ok(bytes.to_vec()) 43 | } 44 | other => Err(Error::UnexpectedStatus(other.to_string())), 45 | } 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /denylist/src/error.rs: -------------------------------------------------------------------------------- 1 | use thiserror::Error; 2 | pub type Result = std::result::Result; 3 | 4 | #[derive(Error, Debug)] 5 | pub enum Error { 6 | #[error("request error")] 7 | Request(#[from] reqwest::Error), 8 | #[error("filter error")] 9 | InvalidFilter(String), 10 | #[error("unexpected value")] 11 | Value(serde_json::Value), 12 | #[error("invalid decimals in {0}, only 8 allowed")] 13 | Decimals(String), 14 | #[error("unexpected or invalid number {0}")] 15 | Number(String), 16 | #[error("crypto error")] 17 | Crypto(#[from] helium_crypto::Error), 18 | #[error("bincode error")] 19 | BinCode(#[from] bincode::Error), 20 | #[error("unexpected status {0}")] 21 | UnexpectedStatus(String), 22 | #[error("unable to parse metadata")] 23 | ParseInt(#[from] std::num::ParseIntError), 24 | #[error("io error")] 25 | Io(#[from] std::io::Error), 26 | #[error("config error")] 27 | Config(#[from] config::ConfigError), 28 | } 29 | 30 | impl Error { 31 | pub fn invalid_filter(msg: E) -> Self { 32 | Self::InvalidFilter(msg.to_string()) 33 | } 34 | 35 | pub fn value(value: serde_json::Value) -> Self { 36 | Self::Value(value) 37 | } 38 | 39 | pub fn decimals(value: &str) -> Self { 40 | Self::Decimals(value.to_string()) 41 | } 42 | 43 | pub fn number(value: &str) -> Self { 44 | Self::Number(value.to_string()) 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /denylist/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod error; 2 | pub use error::{Error, Result}; 3 | pub mod client; 4 | pub mod denylist; 5 | pub mod models; 6 | pub mod settings; 7 | 8 | pub use crate::denylist::DenyList; 9 | pub use crate::settings::Settings; 10 | -------------------------------------------------------------------------------- /denylist/src/models/metadata.rs: -------------------------------------------------------------------------------- 1 | // model representing the JSON payload returned from: 2 | // https://api.github.com/repos/helium/denylist/releases/latest 3 | 4 | use serde::{Deserialize, Serialize}; 5 | 6 | #[derive(Clone, Serialize, Deserialize, Debug)] 7 | pub struct DenyListMetaData { 8 | url: String, 9 | assets_url: String, 10 | upload_url: String, 11 | html_url: String, 12 | id: i64, 13 | author: Item, 14 | node_id: String, 15 | pub tag_name: String, 16 | target_commitish: String, 17 | name: String, 18 | draft: bool, 19 | prerelease: bool, 20 | created_at: String, 21 | published_at: String, 22 | pub assets: Vec, 23 | tarball_url: String, 24 | zipball_url: String, 25 | body: String, 26 | } 27 | 28 | #[derive(Clone, Serialize, Deserialize, Debug)] 29 | pub struct Asset { 30 | url: String, 31 | id: i64, 32 | node_id: String, 33 | pub name: String, 34 | label: String, 35 | uploader: Item, 36 | content_type: String, 37 | state: String, 38 | size: i64, 39 | download_count: i64, 40 | created_at: String, 41 | updated_at: String, 42 | pub browser_download_url: String, 43 | } 44 | 45 | #[derive(Clone, Serialize, Deserialize, Debug)] 46 | struct Item { 47 | login: String, 48 | id: i64, 49 | node_id: String, 50 | avatar_url: String, 51 | gravatar_id: String, 52 | url: String, 53 | html_url: String, 54 | followers_url: String, 55 | following_url: String, 56 | gists_url: String, 57 | starred_url: String, 58 | subscriptions_url: String, 59 | organizations_url: String, 60 | repos_url: String, 61 | events_url: String, 62 | received_events_url: String, 63 | #[serde(rename = "type")] 64 | r#type: String, 65 | site_admin: bool, 66 | } 67 | -------------------------------------------------------------------------------- /denylist/src/models/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod metadata; 2 | -------------------------------------------------------------------------------- /file_store/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "file-store" 3 | version = "0.1.0" 4 | description = "Ingest Store Library" 5 | edition.workspace = true 6 | authors.workspace = true 7 | license.workspace = true 8 | 9 | [dependencies] 10 | anyhow = {workspace = true} 11 | clap = {workspace = true} 12 | config = {workspace = true} 13 | serde = {workspace = true} 14 | serde_json = {workspace = true} 15 | thiserror = {workspace = true} 16 | tokio = { workspace = true } 17 | tokio-util = { workspace = true } 18 | tokio-stream = {workspace = true} 19 | triggered = {workspace = true} 20 | async-compression = {version = "0", features = ["tokio", "gzip"]} 21 | futures = {workspace = true} 22 | futures-util = {workspace = true} 23 | prost = {workspace = true} 24 | bytes = "*" 25 | regex = "1" 26 | lazy_static = {workspace = true} 27 | tracing = { workspace = true } 28 | chrono = { workspace = true } 29 | helium-proto = {workspace = true} 30 | helium-crypto = {workspace = true} 31 | csv = "*" 32 | http = {workspace = true} 33 | aws-config = {workspace = true} 34 | aws-sdk-s3 = {workspace = true} 35 | aws-types = {workspace = true, optional = true} 36 | strum = {version = "0", features = ["derive"]} 37 | strum_macros = "0" 38 | sha2 = {workspace = true} 39 | metrics = {workspace = true } 40 | blake3 = {workspace = true} 41 | poc-metrics = { path = "../metrics" } 42 | rust_decimal = {workspace = true} 43 | rust_decimal_macros = {workspace = true} 44 | base64 = {workspace = true} 45 | beacon = {workspace = true} 46 | sqlx = {workspace = true, optional = true} 47 | async-trait = {workspace = true} 48 | derive_builder = {workspace = true} 49 | retainer = {workspace = true} 50 | uuid = {workspace = true} 51 | h3o = {workspace = true} 52 | task-manager = { path = "../task_manager" } 53 | 54 | [dev-dependencies] 55 | hex-literal = "0" 56 | tempfile = { workspace = true } 57 | 58 | [features] 59 | default = ["sqlx-postgres"] 60 | local = ["aws-types"] 61 | sqlx-postgres = ["sqlx/postgres"] 62 | -------------------------------------------------------------------------------- /file_store/src/cli/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod bucket; 2 | pub mod dump; 3 | pub mod dump_mobile_rewards; 4 | pub mod info; 5 | 6 | use crate::Result; 7 | 8 | pub(crate) fn print_json(value: &T) -> Result { 9 | println!("{}", serde_json::to_string_pretty(value)?); 10 | Ok(()) 11 | } 12 | -------------------------------------------------------------------------------- /file_store/src/entropy_report.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | traits::{MsgDecode, MsgTimestamp, TimestampDecode, TimestampEncode}, 3 | Error, Result, 4 | }; 5 | use chrono::{DateTime, Utc}; 6 | use helium_proto::EntropyReportV1; 7 | use serde::Serialize; 8 | 9 | #[derive(Serialize, Clone, Debug)] 10 | pub struct EntropyReport { 11 | pub data: Vec, 12 | pub timestamp: DateTime, 13 | pub version: u32, 14 | } 15 | 16 | impl MsgTimestamp for EntropyReport { 17 | fn timestamp(&self) -> u64 { 18 | self.timestamp.encode_timestamp_millis() 19 | } 20 | } 21 | 22 | impl MsgTimestamp>> for EntropyReportV1 { 23 | fn timestamp(&self) -> Result> { 24 | self.timestamp.to_timestamp() 25 | } 26 | } 27 | 28 | impl MsgDecode for EntropyReport { 29 | type Msg = EntropyReportV1; 30 | } 31 | 32 | impl TryFrom for EntropyReport { 33 | type Error = Error; 34 | 35 | fn try_from(v: EntropyReportV1) -> Result { 36 | let timestamp = v.timestamp()?; 37 | Ok(Self { 38 | data: v.data, 39 | version: v.version, 40 | timestamp, 41 | }) 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /file_store/src/hex_boost.rs: -------------------------------------------------------------------------------- 1 | use chrono::{DateTime, Utc}; 2 | 3 | #[derive(Debug, Clone)] 4 | pub struct BoostedHexActivation { 5 | pub location: u64, 6 | pub activation_ts: DateTime, 7 | pub boosted_hex_pubkey: String, 8 | pub boost_config_pubkey: String, 9 | } 10 | -------------------------------------------------------------------------------- /file_store/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod cli; 2 | pub mod coverage; 3 | pub mod entropy_report; 4 | mod error; 5 | pub mod file_info; 6 | pub mod file_info_poller; 7 | pub mod file_sink; 8 | pub mod file_source; 9 | pub mod file_store; 10 | pub mod file_upload; 11 | pub mod hex_boost; 12 | pub mod iot_beacon_report; 13 | pub mod iot_invalid_poc; 14 | pub mod iot_packet; 15 | pub mod iot_valid_poc; 16 | pub mod iot_witness_report; 17 | pub mod mobile_ban; 18 | pub mod mobile_radio_invalidated_threshold; 19 | pub mod mobile_radio_threshold; 20 | pub mod mobile_session; 21 | pub mod mobile_subscriber; 22 | pub mod mobile_transfer; 23 | pub mod reward_manifest; 24 | mod settings; 25 | pub mod speedtest; 26 | pub mod subscriber_verified_mapping_event; 27 | pub mod subscriber_verified_mapping_event_ingest_report; 28 | pub mod traits; 29 | pub mod unique_connections; 30 | pub mod usage_counts; 31 | pub mod verified_subscriber_verified_mapping_event_ingest_report; 32 | pub mod wifi_heartbeat; 33 | 34 | pub use crate::file_store::FileStore; 35 | pub use cli::bucket::FileFilter; 36 | pub use error::{Error, Result}; 37 | pub use file_info::{FileInfo, FileType}; 38 | pub use file_sink::{FileSink, FileSinkBuilder}; 39 | pub use iot_valid_poc::SCALING_PRECISION; 40 | pub use settings::Settings; 41 | 42 | use bytes::BytesMut; 43 | use futures::stream::BoxStream; 44 | 45 | pub type Stream = BoxStream<'static, Result>; 46 | pub type FileInfoStream = Stream; 47 | pub type BytesMutStream = Stream; 48 | -------------------------------------------------------------------------------- /file_store/src/main.rs: -------------------------------------------------------------------------------- 1 | use clap::Parser; 2 | use file_store::{ 3 | cli::{bucket, dump, dump_mobile_rewards, info}, 4 | Result, 5 | }; 6 | 7 | #[derive(Debug, clap::Parser)] 8 | #[clap(version = env!("CARGO_PKG_VERSION"))] 9 | #[clap(about = "Helium Bucket Commands")] 10 | pub struct Cli { 11 | #[clap(subcommand)] 12 | cmd: Cmd, 13 | } 14 | 15 | impl Cli { 16 | pub async fn run(self) -> Result { 17 | self.cmd.run().await 18 | } 19 | } 20 | 21 | #[derive(Debug, clap::Subcommand)] 22 | pub enum Cmd { 23 | Info(info::Cmd), 24 | Dump(dump::Cmd), 25 | Bucket(Box), 26 | DumpMobileRewards(dump_mobile_rewards::Cmd), 27 | } 28 | 29 | impl Cmd { 30 | pub async fn run(&self) -> Result { 31 | match self { 32 | Cmd::Info(cmd) => cmd.run().await, 33 | Cmd::Dump(cmd) => cmd.run().await, 34 | Cmd::Bucket(cmd) => cmd.run().await, 35 | Cmd::DumpMobileRewards(cmd) => cmd.run().await, 36 | } 37 | } 38 | } 39 | 40 | #[tokio::main] 41 | async fn main() -> Result { 42 | let cli = Cli::parse(); 43 | cli.run().await 44 | } 45 | -------------------------------------------------------------------------------- /file_store/src/mobile_transfer.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | traits::{MsgDecode, TimestampDecode}, 3 | Error, Result, 4 | }; 5 | use chrono::{DateTime, Utc}; 6 | use helium_crypto::PublicKeyBinary; 7 | use helium_proto::services::packet_verifier as proto; 8 | use serde::Serialize; 9 | 10 | #[derive(Serialize, Clone)] 11 | pub struct ValidDataTransferSession { 12 | pub pub_key: PublicKeyBinary, 13 | pub payer: PublicKeyBinary, 14 | pub upload_bytes: u64, 15 | pub download_bytes: u64, 16 | pub rewardable_bytes: u64, 17 | pub num_dcs: u64, 18 | pub first_timestamp: DateTime, 19 | pub last_timestamp: DateTime, 20 | pub burn_timestamp: DateTime, 21 | } 22 | 23 | impl MsgDecode for ValidDataTransferSession { 24 | type Msg = proto::ValidDataTransferSession; 25 | } 26 | 27 | impl TryFrom for ValidDataTransferSession { 28 | type Error = Error; 29 | fn try_from(v: proto::ValidDataTransferSession) -> Result { 30 | Ok(Self { 31 | payer: v.payer.into(), 32 | pub_key: v.pub_key.into(), 33 | upload_bytes: v.upload_bytes, 34 | download_bytes: v.download_bytes, 35 | rewardable_bytes: v.rewardable_bytes, 36 | num_dcs: v.num_dcs, 37 | first_timestamp: v.first_timestamp.to_timestamp_millis()?, 38 | last_timestamp: v.last_timestamp.to_timestamp_millis()?, 39 | burn_timestamp: v.burn_timestamp.to_timestamp_millis()?, 40 | }) 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /file_store/src/settings.rs: -------------------------------------------------------------------------------- 1 | use crate::{Error, Result}; 2 | use config::{Config, File}; 3 | use serde::{Deserialize, Serialize}; 4 | use std::path::Path; 5 | 6 | #[derive(Debug, Serialize, Deserialize, Clone)] 7 | pub struct Settings { 8 | /// Bucket name for the store. Required 9 | pub bucket: String, 10 | /// Optional api endpoint for the bucket. Default none 11 | pub endpoint: Option, 12 | /// Optional region for the endpoint. Default: us-west-2 13 | #[serde(default = "default_region")] 14 | pub region: String, 15 | 16 | /// Should only be used for local testing 17 | pub access_key_id: Option, 18 | pub secret_access_key: Option, 19 | } 20 | 21 | pub fn default_region() -> String { 22 | "us-west-2".to_string() 23 | } 24 | 25 | impl Settings { 26 | /// Load Settings from a given path. 27 | /// 28 | /// Environemnt overrides are not suppported for file_store cli commands 29 | pub fn new>(path: P) -> Result { 30 | Config::builder() 31 | .add_source(File::with_name(&path.as_ref().to_string_lossy())) 32 | .build() 33 | .and_then(|config| config.try_deserialize()) 34 | .map_err(Error::from) 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /file_store/src/subscriber_verified_mapping_event.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | traits::{MsgDecode, MsgTimestamp, TimestampDecode, TimestampEncode}, 3 | Error, Result, 4 | }; 5 | use chrono::{DateTime, Utc}; 6 | use helium_crypto::PublicKeyBinary; 7 | use helium_proto::services::poc_mobile::SubscriberVerifiedMappingEventReqV1; 8 | use serde::{Deserialize, Serialize}; 9 | 10 | #[derive(Clone, Deserialize, Serialize, Debug, PartialEq)] 11 | pub struct SubscriberVerifiedMappingEvent { 12 | pub subscriber_id: Vec, 13 | pub total_reward_points: u64, 14 | pub timestamp: DateTime, 15 | pub carrier_mapping_key: PublicKeyBinary, 16 | } 17 | 18 | impl MsgDecode for SubscriberVerifiedMappingEvent { 19 | type Msg = SubscriberVerifiedMappingEventReqV1; 20 | } 21 | 22 | impl MsgTimestamp>> for SubscriberVerifiedMappingEventReqV1 { 23 | fn timestamp(&self) -> Result> { 24 | self.timestamp.to_timestamp() 25 | } 26 | } 27 | 28 | impl MsgTimestamp for SubscriberVerifiedMappingEvent { 29 | fn timestamp(&self) -> u64 { 30 | self.timestamp.encode_timestamp() 31 | } 32 | } 33 | 34 | impl From for SubscriberVerifiedMappingEventReqV1 { 35 | fn from(v: SubscriberVerifiedMappingEvent) -> Self { 36 | let timestamp = v.timestamp(); 37 | SubscriberVerifiedMappingEventReqV1 { 38 | subscriber_id: v.subscriber_id, 39 | total_reward_points: v.total_reward_points, 40 | timestamp, 41 | carrier_mapping_key: v.carrier_mapping_key.into(), 42 | signature: vec![], 43 | } 44 | } 45 | } 46 | 47 | impl TryFrom for SubscriberVerifiedMappingEvent { 48 | type Error = Error; 49 | fn try_from(v: SubscriberVerifiedMappingEventReqV1) -> Result { 50 | let timestamp = v.timestamp()?; 51 | Ok(Self { 52 | subscriber_id: v.subscriber_id, 53 | total_reward_points: v.total_reward_points, 54 | timestamp, 55 | carrier_mapping_key: v.carrier_mapping_key.into(), 56 | }) 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /file_store/src/subscriber_verified_mapping_event_ingest_report.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | subscriber_verified_mapping_event::SubscriberVerifiedMappingEvent, 3 | traits::{MsgDecode, MsgTimestamp, TimestampDecode, TimestampEncode}, 4 | Error, Result, 5 | }; 6 | use chrono::{DateTime, Utc}; 7 | use helium_proto::services::poc_mobile::{ 8 | SubscriberVerifiedMappingEventIngestReportV1, SubscriberVerifiedMappingEventReqV1, 9 | }; 10 | use serde::{Deserialize, Serialize}; 11 | 12 | #[derive(Clone, Deserialize, Serialize, Debug, PartialEq)] 13 | pub struct SubscriberVerifiedMappingEventIngestReport { 14 | pub received_timestamp: DateTime, 15 | pub report: SubscriberVerifiedMappingEvent, 16 | } 17 | 18 | impl MsgDecode for SubscriberVerifiedMappingEventIngestReport { 19 | type Msg = SubscriberVerifiedMappingEventIngestReportV1; 20 | } 21 | 22 | impl MsgTimestamp>> for SubscriberVerifiedMappingEventIngestReportV1 { 23 | fn timestamp(&self) -> Result> { 24 | self.received_timestamp.to_timestamp_millis() 25 | } 26 | } 27 | 28 | impl MsgTimestamp for SubscriberVerifiedMappingEventIngestReport { 29 | fn timestamp(&self) -> u64 { 30 | self.received_timestamp.encode_timestamp_millis() 31 | } 32 | } 33 | 34 | impl From 35 | for SubscriberVerifiedMappingEventIngestReportV1 36 | { 37 | fn from(v: SubscriberVerifiedMappingEventIngestReport) -> Self { 38 | let received_timestamp = v.timestamp(); 39 | let report: SubscriberVerifiedMappingEventReqV1 = v.report.into(); 40 | Self { 41 | received_timestamp, 42 | report: Some(report), 43 | } 44 | } 45 | } 46 | 47 | impl TryFrom 48 | for SubscriberVerifiedMappingEventIngestReport 49 | { 50 | type Error = Error; 51 | fn try_from(v: SubscriberVerifiedMappingEventIngestReportV1) -> Result { 52 | Ok(Self { 53 | received_timestamp: v.timestamp()?, 54 | report: v 55 | .report 56 | .ok_or_else(|| { 57 | Error::not_found("ingest SubscriberVerifiedMappingEventIngestReport report") 58 | })? 59 | .try_into()?, 60 | }) 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /file_store/src/traits/mod.rs: -------------------------------------------------------------------------------- 1 | mod file_sink_write; 2 | mod msg_bytes; 3 | mod msg_decode; 4 | mod msg_timestamp; 5 | mod msg_verify; 6 | mod report_id; 7 | 8 | pub use file_sink_write::{ 9 | FileSinkCommitStrategy, FileSinkRollTime, FileSinkWriteExt, DEFAULT_ROLL_TIME, 10 | }; 11 | pub use msg_bytes::MsgBytes; 12 | pub use msg_decode::MsgDecode; 13 | pub use msg_timestamp::{MsgTimestamp, TimestampDecode, TimestampEncode}; 14 | pub use msg_verify::MsgVerify; 15 | pub use report_id::{IngestId, ReportId}; 16 | -------------------------------------------------------------------------------- /file_store/src/traits/msg_bytes.rs: -------------------------------------------------------------------------------- 1 | pub trait MsgBytes { 2 | fn as_bytes(&self) -> bytes::Bytes; 3 | } 4 | 5 | // As prost::Message is implemented for basically all types, implementing 6 | // MsgBytes for anything that implements prost::Message makes it so you 7 | // cannot use a FileSink for anything that is _not_ a protobuf. So we 8 | // provide utility implementations for Vec an String, and require all 9 | // protos to be implemented directly, following the pattern of verifying and 10 | // signing messages. 11 | impl MsgBytes for Vec { 12 | fn as_bytes(&self) -> bytes::Bytes { 13 | bytes::Bytes::from(self.clone()) 14 | } 15 | } 16 | 17 | impl MsgBytes for String { 18 | fn as_bytes(&self) -> bytes::Bytes { 19 | bytes::Bytes::from(self.clone()) 20 | } 21 | } 22 | 23 | impl MsgBytes for bytes::Bytes { 24 | fn as_bytes(&self) -> bytes::Bytes { 25 | self.clone() 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /file_store/src/traits/msg_decode.rs: -------------------------------------------------------------------------------- 1 | use crate::{Error, Result}; 2 | use bytes::Buf; 3 | use helium_proto::Message; 4 | 5 | pub trait MsgDecode { 6 | type Msg: Message + Default; 7 | 8 | fn decode(buf: B) -> Result 9 | where 10 | Self: Sized, 11 | Self: TryFrom, 12 | { 13 | let req = Self::Msg::decode(buf)?; 14 | Self::try_from(req) 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /file_store/src/traits/msg_timestamp.rs: -------------------------------------------------------------------------------- 1 | use crate::{error::DecodeError, Result}; 2 | use chrono::{DateTime, TimeZone, Utc}; 3 | 4 | pub trait MsgTimestamp { 5 | fn timestamp(&self) -> R; 6 | } 7 | 8 | pub trait TimestampDecode { 9 | fn to_timestamp(self) -> Result>; 10 | fn to_timestamp_millis(self) -> Result>; 11 | fn to_timestamp_nanos(self) -> Result>; 12 | } 13 | 14 | impl TimestampDecode for u64 { 15 | fn to_timestamp(self) -> Result> { 16 | let decoded = i64::try_from(self).map_err(DecodeError::from)?; 17 | Utc.timestamp_opt(decoded, 0) 18 | .single() 19 | .ok_or_else(|| DecodeError::invalid_timestamp(self)) 20 | } 21 | 22 | fn to_timestamp_millis(self) -> Result> { 23 | let decoded = i64::try_from(self).map_err(DecodeError::from)?; 24 | Utc.timestamp_millis_opt(decoded) 25 | .single() 26 | .ok_or_else(|| DecodeError::invalid_timestamp(self)) 27 | } 28 | 29 | fn to_timestamp_nanos(self) -> Result> { 30 | let decoded = i64::try_from(self).map_err(DecodeError::from)?; 31 | Ok(Utc.timestamp_nanos(decoded)) 32 | } 33 | } 34 | 35 | pub trait TimestampEncode { 36 | fn encode_timestamp(&self) -> u64; 37 | fn encode_timestamp_millis(&self) -> u64; 38 | fn encode_timestamp_nanos(&self) -> u64; 39 | } 40 | 41 | impl TimestampEncode for DateTime { 42 | fn encode_timestamp(&self) -> u64 { 43 | self.timestamp() as u64 44 | } 45 | 46 | fn encode_timestamp_millis(&self) -> u64 { 47 | self.timestamp_millis() as u64 48 | } 49 | 50 | fn encode_timestamp_nanos(&self) -> u64 { 51 | self.timestamp_nanos_opt() 52 | .expect("value can not be represented in a timestamp with nanosecond precision.") 53 | as u64 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /file_store/src/traits/report_id.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | iot_beacon_report::{IotBeaconIngestReport, IotBeaconReport}, 3 | iot_witness_report::{IotWitnessIngestReport, IotWitnessReport}, 4 | traits::TimestampEncode, 5 | }; 6 | use blake3::Hasher; 7 | use chrono::{DateTime, Utc}; 8 | pub trait IngestId { 9 | fn ingest_id(&self) -> Vec; 10 | } 11 | 12 | pub trait ReportId { 13 | fn report_id(&self, received_ts: DateTime) -> Vec; 14 | } 15 | 16 | macro_rules! impl_ingest_id { 17 | ($report_type:ty) => { 18 | impl IngestId for $report_type { 19 | fn ingest_id(&self) -> Vec { 20 | let mut hasher = Hasher::new(); 21 | hasher.update(&self.report.data); 22 | hasher.update( 23 | &self 24 | .received_timestamp 25 | .encode_timestamp_millis() 26 | .to_be_bytes(), 27 | ); 28 | hasher.update(self.report.pub_key.as_ref()); 29 | hasher.finalize().as_bytes().to_vec() 30 | } 31 | } 32 | }; 33 | } 34 | 35 | macro_rules! impl_report_id { 36 | ($report_type:ty) => { 37 | impl ReportId for $report_type { 38 | fn report_id(&self, received_ts: DateTime) -> Vec { 39 | let mut hasher = Hasher::new(); 40 | hasher.update(&self.data); 41 | hasher.update(&received_ts.encode_timestamp_millis().to_be_bytes()); 42 | hasher.update(self.pub_key.as_ref()); 43 | hasher.finalize().as_bytes().to_vec() 44 | } 45 | } 46 | }; 47 | } 48 | 49 | impl_ingest_id!(IotBeaconIngestReport); 50 | impl_ingest_id!(IotWitnessIngestReport); 51 | impl_report_id!(IotBeaconReport); 52 | impl_report_id!(IotWitnessReport); 53 | -------------------------------------------------------------------------------- /hex_assignments/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "hex-assignments" 3 | version = "0.1.0" 4 | description = "Hex Assignments" 5 | edition.workspace = true 6 | authors.workspace = true 7 | license.workspace = true 8 | 9 | [dependencies] 10 | anyhow = { workspace = true } 11 | hextree = { workspace = true } 12 | sqlx = { version = "*", features = ["runtime-tokio-rustls"] } 13 | rust_decimal = { workspace = true } 14 | rust_decimal_macros = { workspace = true } 15 | helium-proto = { workspace = true } 16 | async-trait = { workspace = true } 17 | chrono = { workspace = true } 18 | derive_builder = { workspace = true } 19 | -------------------------------------------------------------------------------- /hex_assignments/src/footfall.rs: -------------------------------------------------------------------------------- 1 | use chrono::{DateTime, Utc}; 2 | use hextree::disktree::DiskTreeMap; 3 | 4 | use super::{Assignment, HexAssignment}; 5 | 6 | #[derive(Default)] 7 | pub struct Footfall { 8 | pub footfall: Option, 9 | pub timestamp: Option>, 10 | } 11 | 12 | impl Footfall { 13 | pub fn new(footfall: Option) -> Self { 14 | Self { 15 | footfall, 16 | timestamp: None, 17 | } 18 | } 19 | } 20 | 21 | impl HexAssignment for Footfall { 22 | fn assignment(&self, cell: hextree::Cell) -> anyhow::Result { 23 | let Some(ref footfall) = self.footfall else { 24 | anyhow::bail!("No footfall data set has been loaded"); 25 | }; 26 | 27 | // The footfall disktree maps hexes to a single byte, a value of one indicating 28 | // assignment A and a value of zero indicating assignment B. If no value is present, 29 | // assignment C is given. 30 | match footfall.get(cell)? { 31 | Some((_, &[x])) if x >= 1 => Ok(Assignment::A), 32 | Some((_, &[0])) => Ok(Assignment::B), 33 | None => Ok(Assignment::C), 34 | Some((_, other)) => anyhow::bail!("Unexpected disktree data: {cell:?} {other:?}"), 35 | } 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /hex_assignments/src/service_provider_override.rs: -------------------------------------------------------------------------------- 1 | use chrono::{DateTime, Utc}; 2 | use hextree::disktree::DiskTreeMap; 3 | 4 | use super::{Assignment, HexAssignment}; 5 | 6 | #[derive(Default)] 7 | pub struct ServiceProviderOverride { 8 | pub service_provider_override: Option, 9 | pub timestamp: Option>, 10 | } 11 | 12 | impl ServiceProviderOverride { 13 | pub fn new(service_provider_override: Option) -> Self { 14 | Self { 15 | service_provider_override, 16 | timestamp: None, 17 | } 18 | } 19 | } 20 | 21 | impl HexAssignment for ServiceProviderOverride { 22 | fn assignment(&self, cell: hextree::Cell) -> anyhow::Result { 23 | let Some(ref service_provider_override) = self.service_provider_override else { 24 | anyhow::bail!("No service provider override hex data set has been loaded"); 25 | }; 26 | match service_provider_override.contains(cell) { 27 | Ok(true) => Ok(Assignment::A), 28 | _ => Ok(Assignment::C), 29 | } 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /hex_assignments/src/urbanization.rs: -------------------------------------------------------------------------------- 1 | use chrono::{DateTime, Utc}; 2 | use hextree::disktree::DiskTreeMap; 3 | 4 | use super::{Assignment, HexAssignment}; 5 | 6 | #[derive(Default)] 7 | pub struct Urbanization { 8 | pub urbanized: Option, 9 | pub timestamp: Option>, 10 | } 11 | 12 | impl Urbanization { 13 | pub fn new(urbanized: Option) -> Self { 14 | Self { 15 | urbanized, 16 | timestamp: None, 17 | } 18 | } 19 | } 20 | 21 | impl HexAssignment for Urbanization { 22 | fn assignment(&self, cell: hextree::Cell) -> anyhow::Result { 23 | let Some(ref urbanized) = self.urbanized else { 24 | anyhow::bail!("No urbanization data set has been loaded"); 25 | }; 26 | match urbanized.get(cell)? { 27 | Some((_, &[1])) => Ok(Assignment::A), 28 | Some((_, &[0])) => Ok(Assignment::B), 29 | None => Ok(Assignment::C), 30 | Some((_, other)) => { 31 | anyhow::bail!("unexpected urbanization disktree data: {cell:?} {other:?}") 32 | } 33 | } 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /ingest/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ingest" 3 | version = "0.1.0" 4 | description = "PoC Ingest Server for the Helium Network" 5 | authors.workspace = true 6 | edition.workspace = true 7 | license.workspace = true 8 | 9 | [dependencies] 10 | anyhow = { workspace = true } 11 | config = { workspace = true } 12 | clap = { workspace = true } 13 | thiserror = { workspace = true } 14 | serde = { workspace = true } 15 | serde_json = { workspace = true } 16 | base64 = { workspace = true } 17 | bs58 = { workspace = true } 18 | sha2 = { workspace = true } 19 | http = { workspace = true } 20 | tonic = { workspace = true } 21 | triggered = { workspace = true } 22 | futures = { workspace = true } 23 | futures-util = { workspace = true } 24 | prost = { workspace = true } 25 | tokio = { workspace = true } 26 | tokio-util = { workspace = true } 27 | tokio-stream = { workspace = true } 28 | tracing = { workspace = true } 29 | tracing-subscriber = { workspace = true } 30 | chrono = { workspace = true } 31 | helium-proto = { workspace = true } 32 | helium-crypto = { workspace = true } 33 | file-store = { path = "../file_store" } 34 | poc-metrics = { path = "../metrics" } 35 | metrics = { workspace = true } 36 | metrics-exporter-prometheus = { workspace = true } 37 | mobile-config = { path = "../mobile_config" } 38 | task-manager = { path = "../task_manager" } 39 | rand = { workspace = true } 40 | custom-tracing = { path = "../custom_tracing", features = ["grpc"] } 41 | humantime-serde = { workspace = true } 42 | 43 | [dev-dependencies] 44 | backon = "0" 45 | -------------------------------------------------------------------------------- /ingest/README.md: -------------------------------------------------------------------------------- 1 | # Ingest 2 | 3 | ## IOT 4 | 5 | ### S3 Inputs 6 | 7 | | File Type | | 8 | | :--- | :-- | 9 | | LoraBeaconReportReqV1 | [Proto](https://github.com/helium/proto/blob/149997d2a74e08679e56c2c892d7e46f2d0d1c46/src/service/poc_lora.proto#L42) | 10 | | LoraWitnessReportReqV1 | [Proto](https://github.com/helium/proto/blob/149997d2a74e08679e56c2c892d7e46f2d0d1c46/src/service/poc_lora.proto#L64) | 11 | 12 | ### S3 Outputs 13 | | File Type | Pattern | | 14 | | :--- | :-- | :-- | 15 | | IotBeaconIngestReport | iot_beacon_ingest_report.\* | [Proto](https://github.com/helium/proto/blob/149997d2a74e08679e56c2c892d7e46f2d0d1c46/src/service/poc_lora.proto#L83) | 16 | | IotWitnessIngestReport | iot_witness_ingest_report.\* | [Proto](https://github.com/helium/proto/blob/149997d2a74e08679e56c2c892d7e46f2d0d1c46/src/service/poc_lora.proto#L90) | 17 | 18 | ## Mobile 19 | 20 | ### S3 Inputs 21 | 22 | | File Type | | 23 | | :--- | :-- | 24 | | SpeedtestReqV1 | [Proto](https://github.com/helium/proto/blob/149997d2a74e08679e56c2c892d7e46f2d0d1c46/src/service/poc_mobile.proto#L7) | 25 | | CellHeartbeatReqV1 | [Proto](https://github.com/helium/proto/blob/149997d2a74e08679e56c2c892d7e46f2d0d1c46/src/service/poc_mobile.proto#L31) | 26 | | DataTransferSessionReqV1 | [Proto](https://github.com/helium/proto/blob/9fc57133ed1e760c3f1b65dd22d55c09c84832da/src/service/poc_mobile.proto#L376) | 27 | | CoverageObjectReqV1 | [Proto](https://github.com/helium/proto/blob/9fc57133ed1e760c3f1b65dd22d55c09c84832da/src/service/poc_mobile.proto#L136) | 28 | 29 | ### S3 Outputs 30 | 31 | | File Type | Pattern | | 32 | | :--- | :-- | :-- | 33 | | CellHeartbeatIngestReport | heartbeat_report.\* | [Proto](https://github.com/helium/proto/blob/149997d2a74e08679e56c2c892d7e46f2d0d1c46/src/service/poc_mobile.proto#L50) | 34 | | CellSpeedtestIngestReport | speedtest_report.\* | [Proto](https://github.com/helium/proto/blob/149997d2a74e08679e56c2c892d7e46f2d0d1c46/src/service/poc_mobile.proto#L25) | 35 | | DataTransferSessionIngestReport | data_transfer_session_ingest_report.\* | [Proto](https://github.com/helium/proto/blob/9fc57133ed1e760c3f1b65dd22d55c09c84832da/src/service/poc_mobile.proto#L414) | 36 | | CoverageObjectIngestReport | coverage_object_ingest_report.\* | [Proto](https://github.com/helium/proto/blob/9fc57133ed1e760c3f1b65dd22d55c09c84832da/src/service/poc_mobile.proto#L157) | 37 | 38 | -------------------------------------------------------------------------------- /ingest/pkg/settings-template.toml: -------------------------------------------------------------------------------- 1 | 2 | # log settings for the application (RUST_LOG format). Default below 3 | # 4 | # log = "ingest=debug,poc_store=info" 5 | 6 | # Mode to run the ingest in. This adjustst the allowed grpc endpoints. "iot" or 7 | # "mobile". Required 8 | mode = "iot" 9 | 10 | # Token for ingest grpc endpoint bearer authentication. This is required for 11 | # "mobile" mode and ignored for iot. 12 | # 13 | # token = "api-token" 14 | 15 | # Listen addres for public grpc. Default below 16 | # 17 | # listen = "0.0.0.0:9081" 18 | 19 | # Cache folder to use. Default blow 20 | # 21 | # cache = "/var/data/ingest" 22 | 23 | # Network required by all public keys: mainnet | testnet 24 | # 25 | network = "mainnet" 26 | 27 | [output] 28 | # Output bucket for ingested data 29 | 30 | # Name of bucket to write details to. Required 31 | # 32 | bucket = "ingest-bucket" 33 | 34 | # Region for bucket. Defaults to below 35 | # 36 | # region = "us-west-2" 37 | 38 | # Optional URL for AWS api endpoint. Inferred from aws config settings or aws 39 | # IAM context by default 40 | # 41 | # endpoint = "https://aws-s3-bucket.aws.com" 42 | 43 | [metrics] 44 | 45 | # Endpoint for metrics. Default below 46 | # 47 | # endpoint = "127.0.0.1:19000" 48 | -------------------------------------------------------------------------------- /ingest/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod server_iot; 2 | pub mod server_mobile; 3 | pub mod settings; 4 | 5 | pub use settings::{Mode, Settings}; 6 | -------------------------------------------------------------------------------- /ingest/src/main.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use clap::Parser; 3 | use ingest::{server_iot, server_mobile, Mode, Settings}; 4 | use std::path; 5 | 6 | #[derive(Debug, clap::Parser)] 7 | #[clap(version = env!("CARGO_PKG_VERSION"))] 8 | pub struct Cli { 9 | /// Optional configuration file to use. If present the toml file at the 10 | /// given path will be loaded. Environment variables can override the 11 | /// settings in the given file. 12 | #[clap(short = 'c')] 13 | config: Option, 14 | 15 | #[clap(subcommand)] 16 | cmd: Cmd, 17 | } 18 | 19 | impl Cli { 20 | pub async fn run(self) -> Result<()> { 21 | self.cmd.run(Settings::new(self.config)?).await 22 | } 23 | } 24 | 25 | #[derive(Debug, clap::Subcommand)] 26 | pub enum Cmd { 27 | Server(Server), 28 | } 29 | 30 | impl Cmd { 31 | pub async fn run(&self, settings: Settings) -> Result<()> { 32 | match self { 33 | Self::Server(cmd) => cmd.run(&settings).await, 34 | } 35 | } 36 | } 37 | 38 | #[derive(Debug, clap::Args)] 39 | pub struct Server {} 40 | 41 | impl Server { 42 | pub async fn run(&self, settings: &Settings) -> Result<()> { 43 | custom_tracing::init(settings.log.clone(), settings.custom_tracing.clone()).await?; 44 | 45 | // Install the prometheus metrics exporter 46 | poc_metrics::start_metrics(&settings.metrics)?; 47 | 48 | // run the grpc server in either iot or mobile 5g mode 49 | match settings.mode { 50 | Mode::Iot => server_iot::grpc_server(settings).await, 51 | Mode::Mobile => server_mobile::grpc_server(settings).await, 52 | } 53 | } 54 | } 55 | 56 | #[tokio::main] 57 | async fn main() -> Result<()> { 58 | let cli = Cli::parse(); 59 | cli.run().await 60 | } 61 | -------------------------------------------------------------------------------- /iot_config/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "iot-config" 3 | version = "0.1.0" 4 | description = "Configuration APIs for the IoT subnetwork" 5 | edition.workspace = true 6 | authors.workspace = true 7 | license.workspace = true 8 | 9 | [dependencies] 10 | anyhow = { workspace = true } 11 | async-trait = { workspace = true } 12 | base64 = { workspace = true } 13 | bs58 = { workspace = true } 14 | chrono = { workspace = true } 15 | clap = { workspace = true } 16 | config = { workspace = true } 17 | futures = { workspace = true } 18 | futures-util = { workspace = true } 19 | helium-crypto = { workspace = true, features = ["sqlx-postgres"] } 20 | helium-proto = { workspace = true } 21 | hextree = { workspace = true } 22 | http = { workspace = true } 23 | http-serde = { workspace = true } 24 | humantime-serde = { workspace = true } 25 | libflate = "1" 26 | metrics = { workspace = true } 27 | metrics-exporter-prometheus = { workspace = true } 28 | prost = { workspace = true } 29 | retainer = { workspace = true } 30 | rust_decimal = { workspace = true, features = ["maths"] } 31 | rust_decimal_macros = { workspace = true } 32 | serde = { workspace = true } 33 | serde_json = { workspace = true } 34 | sqlx = { workspace = true } 35 | thiserror = { workspace = true } 36 | tokio = { workspace = true } 37 | tokio-stream = { workspace = true } 38 | tokio-util = { workspace = true } 39 | tonic = { workspace = true } 40 | tower-http = { workspace = true } 41 | tracing = { workspace = true } 42 | tracing-subscriber = { workspace = true } 43 | triggered = { workspace = true } 44 | 45 | custom-tracing = { path = "../custom_tracing", features = ["grpc"] } 46 | db-store = { path = "../db_store" } 47 | file-store = { path = "../file_store" } 48 | poc-metrics = { path = "../metrics" } 49 | task-manager = { path = "../task_manager" } 50 | 51 | [dev-dependencies] 52 | rand = { workspace = true } 53 | backon = "0" 54 | -------------------------------------------------------------------------------- /iot_config/README.md: -------------------------------------------------------------------------------- 1 | # IoT Config Service 2 | 3 | The IoT Config Service provides configuration settings and values for the 4 | LoRaWAN IoT Helium Subnetwork. Actors on the IoT subnetwork can interact 5 | with the gRPC APIs provided by the Config service to perform various 6 | operations of the network according to role, including but not limited to: 7 | 8 | - Community Management (the Foundation) can issue network configuration variables 9 | (formerly known as Chain Variables) to adjust PoC settings 10 | - Users of the network can manage their organization's routes 11 | - Gateways can request their region and associated region parameters 12 | 13 | The IoT Config service provides 4 major gRPC services: 14 | 15 | ## `route` 16 | 17 | provides routing information for devices on the LoRaWAN network to correctly 18 | route packets and the management of those routes by their controlling organizations 19 | 20 | ## `org` 21 | 22 | management of organizations using the Helium LoRaWAN network 23 | 24 | ## `session key filter` 25 | 26 | management of session key filters by organizations to decrypt device and other 27 | data associated with usage of the network 28 | 29 | ## `gateway` 30 | 31 | configuration data provided to LoRaWAN gateways serving the network, including 32 | the current region parameters for the region in which the gateway is asserted and 33 | metadata info about gateways primarily stored on-chain but fed through the config service 34 | to other oracles 35 | 36 | ## `admin` 37 | 38 | administrative apis for managing auth keys, region params binaries, and other service-wide 39 | settings 40 | -------------------------------------------------------------------------------- /iot_config/migrations/10_helium_devaddrs.sql: -------------------------------------------------------------------------------- 1 | create table helium_used_devaddrs ( 2 | devaddr int primary key not null, 3 | net_id int not null, 4 | inserted_at timestamptz not null default now(), 5 | updated_at timestamptz not null default now() 6 | ); 7 | 8 | select trigger_updated_at('helium_used_devaddrs'); 9 | -------------------------------------------------------------------------------- /iot_config/migrations/11_skf_max_copies.sql: -------------------------------------------------------------------------------- 1 | alter table route_session_key_filters add column max_copies int; 2 | 3 | update route_session_key_filters set max_copies = 0; 4 | 5 | alter table route_session_key_filters alter column max_copies set not null; 6 | -------------------------------------------------------------------------------- /iot_config/migrations/12_admin_keys_name.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE admin_keys ADD COLUMN IF NOT EXISTS name TEXT; 2 | -------------------------------------------------------------------------------- /iot_config/migrations/1_setup.sql: -------------------------------------------------------------------------------- 1 | -- This extension gives us `uuid_generate_v1mc()` which generates UUIDs that cluster better than `gen_random_uuid()` 2 | -- while still being difficult to predict and enumerate. 3 | -- Also, while unlikely, `gen_random_uuid()` can in theory produce collisions which can trigger spurious errors on 4 | -- insertion, whereas it's much less likely with `uuid_generate_v1mc()`. 5 | create extension if not exists "uuid-ossp"; 6 | 7 | create or replace function set_updated_at() 8 | returns trigger as 9 | $$ 10 | begin 11 | NEW.updated_at = now(); 12 | return NEW; 13 | end; 14 | $$ language plpgsql; 15 | 16 | create or replace function trigger_updated_at(tablename regclass) 17 | returns void as 18 | $$ 19 | begin 20 | execute format('CREATE TRIGGER set_updated_at 21 | BEFORE UPDATE 22 | ON %s 23 | FOR EACH ROW 24 | WHEN (OLD is distinct from NEW) 25 | EXECUTE FUNCTION set_updated_at();', tablename); 26 | end; 27 | $$ language plpgsql; 28 | -------------------------------------------------------------------------------- /iot_config/migrations/20230626183323_skip_empty_skf_routes.sql: -------------------------------------------------------------------------------- 1 | alter table routes add column ignore_empty_skf bool; 2 | update routes set ignore_empty_skf = 'f'; 3 | alter table routes alter column ignore_empty_skf set default false; 4 | alter table routes alter column ignore_empty_skf set not null; 5 | -------------------------------------------------------------------------------- /iot_config/migrations/20231101175438_track_deletes.sql: -------------------------------------------------------------------------------- 1 | -- Add migration script here 2 | alter table routes add column deleted bool not null default false; 3 | alter table route_eui_pairs add column deleted bool not null default false; 4 | alter table route_devaddr_ranges add column deleted bool not null default false; 5 | alter table route_session_key_filters add column deleted bool not null default false; 6 | -------------------------------------------------------------------------------- /iot_config/migrations/2_organizations.sql: -------------------------------------------------------------------------------- 1 | create table organizations ( 2 | oui bigserial primary key not null, 3 | owner_pubkey text not null, 4 | payer_pubkey text not null, 5 | delegate_keys text[], 6 | locked bool default false, 7 | 8 | inserted_at timestamptz not null default now(), 9 | updated_at timestamptz not null default now() 10 | ); 11 | 12 | select trigger_updated_at('organizations'); 13 | 14 | create table organization_devaddr_constraints ( 15 | oui bigint not null references organizations(oui) on delete cascade, 16 | net_id int not null, 17 | start_addr int not null, 18 | end_addr int not null, 19 | 20 | inserted_at timestamptz not null default now(), 21 | updated_at timestamptz not null default now() 22 | ); 23 | 24 | select trigger_updated_at('organization_devaddr_constraints'); 25 | -------------------------------------------------------------------------------- /iot_config/migrations/3_routes.sql: -------------------------------------------------------------------------------- 1 | create table routes ( 2 | id uuid primary key not null default uuid_generate_v1mc(), 3 | oui bigint not null references organizations(oui) on delete cascade, 4 | net_id int not null, 5 | max_copies int not null, 6 | server_host text not null, 7 | server_port int not null, 8 | server_protocol_opts jsonb not null, 9 | active bool default true, 10 | 11 | inserted_at timestamptz not null default now(), 12 | updated_at timestamptz not null default now() 13 | ); 14 | 15 | select trigger_updated_at('routes'); 16 | 17 | create index route_oui_idx on routes (oui); 18 | 19 | create table route_eui_pairs ( 20 | route_id uuid not null references routes(id) on delete cascade, 21 | app_eui bigint not null, 22 | dev_eui bigint not null, 23 | primary key (route_id, app_eui, dev_eui), 24 | 25 | inserted_at timestamptz not null default now(), 26 | updated_at timestamptz not null default now() 27 | ); 28 | 29 | select trigger_updated_at('route_eui_pairs'); 30 | 31 | create index eui_pair_route_idx on route_eui_pairs (route_id); 32 | 33 | create table route_devaddr_ranges ( 34 | route_id uuid not null references routes(id) on delete cascade, 35 | start_addr int not null, 36 | end_addr int not null, 37 | primary key (route_id, start_addr, end_addr), 38 | 39 | inserted_at timestamptz not null default now(), 40 | updated_at timestamptz not null default now() 41 | ); 42 | 43 | select trigger_updated_at('route_devaddr_ranges'); 44 | 45 | create index devaddr_range_route_idx on route_devaddr_ranges (route_id); 46 | -------------------------------------------------------------------------------- /iot_config/migrations/4_regions.sql: -------------------------------------------------------------------------------- 1 | create table regions ( 2 | region text primary key not null, 3 | params bytea not null, 4 | indexes bytea, 5 | 6 | inserted_at timestamptz not null default now(), 7 | updated_at timestamptz not null default now() 8 | ); 9 | 10 | select trigger_updated_at('regions'); 11 | -------------------------------------------------------------------------------- /iot_config/migrations/5_admin_keys.sql: -------------------------------------------------------------------------------- 1 | create type key_type as enum ( 2 | 'administrator', 3 | 'packet_router' 4 | ); 5 | 6 | create table admin_keys ( 7 | pubkey text not null unique, 8 | key_type key_type not null, 9 | 10 | inserted_at timestamptz not null default now(), 11 | updated_at timestamptz not null default now() 12 | ); 13 | 14 | select trigger_updated_at('admin_keys'); 15 | -------------------------------------------------------------------------------- /iot_config/migrations/6_session_key_filters.sql: -------------------------------------------------------------------------------- 1 | create table session_key_filters ( 2 | oui bigint not null references organizations(oui) on delete cascade, 3 | devaddr int not null, 4 | session_key text not null, 5 | 6 | inserted_at timestamptz not null default now(), 7 | updated_at timestamptz not null default now(), 8 | 9 | primary key (oui, devaddr, session_key) 10 | ); 11 | 12 | create index skf_devaddr_idx on session_key_filters (devaddr); 13 | 14 | select trigger_updated_at('session_key_filters'); 15 | -------------------------------------------------------------------------------- /iot_config/migrations/7_oracle_key_type.sql: -------------------------------------------------------------------------------- 1 | alter type key_type add value 'oracle'; -------------------------------------------------------------------------------- /iot_config/migrations/8_skfs_by_route.sql: -------------------------------------------------------------------------------- 1 | drop table session_key_filters; 2 | 3 | create table route_session_key_filters ( 4 | route_id uuid not null references routes(id) on delete cascade, 5 | devaddr int not null, 6 | session_key text not null, 7 | 8 | inserted_at timestamptz not null default now(), 9 | updated_at timestamptz not null default now(), 10 | 11 | primary key (route_id, devaddr, session_key) 12 | ); 13 | 14 | create index skf_devaddr_idx on route_session_key_filters (devaddr); 15 | 16 | select trigger_updated_at('route_session_key_filters'); 17 | -------------------------------------------------------------------------------- /iot_config/migrations/9_delegate_keys.sql: -------------------------------------------------------------------------------- 1 | create table organization_delegate_keys ( 2 | delegate_pubkey text primary key not null, 3 | oui bigint not null references organizations(oui) on delete cascade, 4 | 5 | inserted_at timestamptz not null default now(), 6 | updated_at timestamptz not null default now() 7 | ); 8 | 9 | select trigger_updated_at('organization_delegate_keys'); 10 | 11 | insert into organization_delegate_keys 12 | select delegate_pubkey, oui from organizations, 13 | unnest(delegate_keys) as delegate_pubkey; 14 | 15 | alter table organizations drop column delegate_keys; 16 | -------------------------------------------------------------------------------- /iot_config/pkg/settings-template.toml: -------------------------------------------------------------------------------- 1 | # log settings for the application (RUST_LOG format). Default below 2 | # 3 | # log = "iot-config=debug,poc_store=info" 4 | 5 | 6 | # Listen addres for public grpc. Default below 7 | # 8 | # listen = "0.0.0.0:8080" 9 | 10 | network = "mainnet" 11 | 12 | [database] 13 | 14 | # Postgres Connection Information 15 | url = "postgres://postgres:postgres@127.0.0.1:5432/config_db" 16 | 17 | max_connections = 20 18 | 19 | [metadata] 20 | 21 | # Url for the solana on-chain data such as hotspot asserted hexes 22 | host = "helius.aws" 23 | port = 5432 24 | username = "helius" 25 | database = "db" 26 | 27 | auth_type = "iam" 28 | # IAM Role to assume to generate db auth token 29 | 30 | iam_role_arn = "arn::iam" 31 | iam_role_session_name = "role-session-name" 32 | iam_duration_seconds = 900 33 | iam_region = "us-west-2" 34 | 35 | # Max connections to database 36 | max_connections = 20 37 | 38 | [metrics] 39 | 40 | # Endpoint for metrics. Default below 41 | # 42 | # endpoint = "127.0.0.1:19000" 43 | -------------------------------------------------------------------------------- /iot_config/src/client/settings.rs: -------------------------------------------------------------------------------- 1 | use serde::Deserialize; 2 | use std::{str::FromStr, sync::Arc}; 3 | 4 | #[derive(Clone, Debug, Deserialize)] 5 | pub struct Settings { 6 | /// grpc url to the iot config oracle server 7 | #[serde(with = "http_serde::uri")] 8 | pub url: http::Uri, 9 | /// File from which to load keypair for signing config client requests 10 | pub signing_keypair: String, 11 | /// B58 encoded public key of the iot config server for verifying responses 12 | pub config_pubkey: String, 13 | /// Connect timeout for the iot config client in seconds. Default 5 14 | #[serde(default = "default_connect_timeout")] 15 | pub connect_timeout: u64, 16 | /// RPC timeout for iot config client in seconds. Default 5 17 | #[serde(default = "default_rpc_timeout")] 18 | pub rpc_timeout: u64, 19 | /// Batch size for gateway info stream results. Default 1000 20 | #[serde(default = "default_batch_size")] 21 | pub batch_size: u32, 22 | } 23 | 24 | fn default_connect_timeout() -> u64 { 25 | 5 26 | } 27 | 28 | fn default_rpc_timeout() -> u64 { 29 | 5 30 | } 31 | 32 | fn default_batch_size() -> u32 { 33 | 1000 34 | } 35 | 36 | impl Settings { 37 | pub fn signing_keypair( 38 | &self, 39 | ) -> Result, Box> { 40 | let data = std::fs::read(&self.signing_keypair).map_err(helium_crypto::Error::from)?; 41 | Ok(Arc::new(helium_crypto::Keypair::try_from(&data[..])?)) 42 | } 43 | 44 | pub fn config_pubkey(&self) -> Result { 45 | helium_crypto::PublicKey::from_str(&self.config_pubkey) 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /iot_packet_verifier/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "iot-packet-verifier" 3 | version = "0.1.0" 4 | description = "Packet verification for IOT" 5 | edition.workspace = true 6 | authors.workspace = true 7 | license.workspace = true 8 | 9 | [dependencies] 10 | anyhow = { workspace = true } 11 | async-trait = { workspace = true } 12 | chrono = { workspace = true } 13 | clap = { workspace = true } 14 | config = { workspace = true } 15 | futures = { workspace = true } 16 | futures-util = { workspace = true } 17 | helium-crypto = { workspace = true, features = [ 18 | "sqlx-postgres", 19 | "multisig", 20 | "solana", 21 | ] } 22 | helium-proto = { workspace = true } 23 | http = { workspace = true } 24 | http-serde = { workspace = true } 25 | humantime-serde = { workspace = true } 26 | metrics = { workspace = true } 27 | prost = { workspace = true } 28 | serde = { workspace = true } 29 | sqlx = { workspace = true } 30 | thiserror = { workspace = true } 31 | tokio = { workspace = true } 32 | tonic = { workspace = true } 33 | tracing = { workspace = true } 34 | tracing-subscriber = { workspace = true } 35 | triggered = { workspace = true } 36 | 37 | custom-tracing = { path = "../custom_tracing" } 38 | db-store = { path = "../db_store" } 39 | file-store = { path = "../file_store" } 40 | iot-config = { path = "../iot_config" } 41 | poc-metrics = { path = "../metrics" } 42 | solana = { path = "../solana" } 43 | task-manager = { path = "../task_manager" } 44 | -------------------------------------------------------------------------------- /iot_packet_verifier/README.md: -------------------------------------------------------------------------------- 1 | # IoT Packet Verifier 2 | 3 | The IoT Packet Verifier reads published packet reports and verifies that the 4 | payer has a sufficient balance to pay for the packet. If it does, it burns the 5 | data credits on the Solana chain, tells the config server to enable the owner, 6 | and writes a valid packet report to S3. If the payer's balance is insufficient, 7 | the verifier will tell the config server to disable the owner and writes an 8 | invalid packet report to S3. 9 | 10 | ## S3 Inputs 11 | 12 | | File Type | Pattern | | 13 | | :-- | :-- | :-- | 14 | | PacketRouterPacketReportV1 | packetreport.* | [Proto](https://github.com/helium/proto/blob/master/src/service/packet_router.proto#L8) | 15 | 16 | ## S3 Outputs 17 | 18 | | File Type | Pattern | | 19 | | :-- | :-- | :-- | 20 | | ValidPacket | valid_packet.* | [Proto](https://github.com/helium/proto/blob/master/src/service/packet_verifier.proto#L5) | 21 | | InvalidPacket | invalid_packet.* | [Proto](https://github.com/helium/proto/blob/master/src/service/packet_verifier.proto#L11) | 22 | 23 | ## Details of operation 24 | 25 | Checking the balance of an owner on the Solana chain is cheap, but burning data 26 | credits is quite expensive, and we are limited to burning the data credits of 27 | one payer every one second. Therefore, the verifier is split into three parts: 28 | 29 | - An in-memory cache that contains the previously recorded balance of the payer. 30 | This cache allows us to quickly debit a payer and check their balance without 31 | having to burn their credits on chain each packet. 32 | - A postgres database that contains the pending burn amounts for each payer. When 33 | a payer is debited, the amount is added to the database, ensuring that if the 34 | packet verifier crashes the state will be recoverable. 35 | - A burner process that polls the database for a random payer that exceeds a certain 36 | amount of data credits for payment. This process issues a burn transaction to 37 | the Solana chain and will remove that burned amount from the in-memory cache. 38 | -------------------------------------------------------------------------------- /iot_packet_verifier/migrations/1_setup.sql: -------------------------------------------------------------------------------- 1 | -- This extension gives us `uuid_generate_v1mc()` which generates UUIDs that cluster better than `gen_random_uuid()` 2 | -- while still being difficult to predict and enumerate. 3 | -- Also, while unlikely, `gen_random_uuid()` can in theory produce collisions which can trigger spurious errors on 4 | -- insertion, whereas it's much less likely with `uuid_generate_v1mc()`. 5 | create extension if not exists "uuid-ossp"; 6 | 7 | create or replace function set_updated_at() 8 | returns trigger as 9 | $$ 10 | begin 11 | NEW.updated_at = now(); 12 | return NEW; 13 | end; 14 | $$ language plpgsql; 15 | 16 | create or replace function trigger_updated_at(tablename regclass) 17 | returns void as 18 | $$ 19 | begin 20 | execute format('CREATE TRIGGER set_updated_at 21 | BEFORE UPDATE 22 | ON %s 23 | FOR EACH ROW 24 | WHEN (OLD is distinct from NEW) 25 | EXECUTE FUNCTION set_updated_at();', tablename); 26 | end; 27 | $$ language plpgsql; 28 | -------------------------------------------------------------------------------- /iot_packet_verifier/migrations/2_burns.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE pending_burns ( 2 | payer TEXT PRIMARY KEY, 3 | amount BIGINT NOT NULL, 4 | last_burn TIMESTAMP NOT NULL 5 | ); 6 | -------------------------------------------------------------------------------- /iot_packet_verifier/migrations/3_files_processed.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE files_processed ( 2 | file_name VARCHAR PRIMARY KEY, 3 | file_type VARCHAR NOT NULL, 4 | file_timestamp TIMESTAMPTZ NOT NULL, 5 | processed_at TIMESTAMPTZ NOT NULL 6 | ); 7 | -------------------------------------------------------------------------------- /iot_packet_verifier/migrations/4_pending_txns.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE pending_txns ( 2 | signature TEXT PRIMARY KEY, 3 | payer TEXT NOT NULL, 4 | amount BIGINT NOT NULL, 5 | time_of_submission TIMESTAMPTZ NOT NULL 6 | ); 7 | -------------------------------------------------------------------------------- /iot_packet_verifier/migrations/5_add_time_zone.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE pending_burns ALTER COLUMN last_burn TYPE TIMESTAMPTZ USING last_burn AT TIME ZONE 'UTC'; 2 | -------------------------------------------------------------------------------- /iot_packet_verifier/migrations/6_files_processed_process_name.sql: -------------------------------------------------------------------------------- 1 | alter table files_processed add column process_name text not null default 'default'; 2 | -------------------------------------------------------------------------------- /iot_packet_verifier/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod balances; 2 | pub mod burner; 3 | pub mod daemon; 4 | pub mod pending; 5 | pub mod settings; 6 | pub mod verifier; 7 | -------------------------------------------------------------------------------- /iot_packet_verifier/src/main.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use clap::Parser; 3 | use iot_packet_verifier::{daemon, settings::Settings}; 4 | use std::path::PathBuf; 5 | 6 | #[derive(clap::Parser)] 7 | #[clap(version = env!("CARGO_PKG_VERSION"))] 8 | #[clap(about = "Helium IOT Packer Verifier Server")] 9 | pub struct Cli { 10 | /// Optional configuration file to use. If present the toml file at the 11 | /// given path will be loaded. Environemnt variables can override the 12 | /// settins in the given file. 13 | #[clap(short = 'c')] 14 | config: Option, 15 | 16 | #[clap(subcommand)] 17 | cmd: Cmd, 18 | } 19 | 20 | impl Cli { 21 | pub async fn run(self) -> Result<()> { 22 | let settings = Settings::new(self.config)?; 23 | custom_tracing::init(settings.log.clone(), settings.custom_tracing.clone()).await?; 24 | self.cmd.run(settings).await 25 | } 26 | } 27 | 28 | #[derive(clap::Subcommand)] 29 | pub enum Cmd { 30 | Server(daemon::Cmd), 31 | } 32 | 33 | impl Cmd { 34 | async fn run(self, settings: Settings) -> Result<()> { 35 | match self { 36 | Self::Server(cmd) => cmd.run(settings).await, 37 | } 38 | } 39 | } 40 | 41 | #[tokio::main] 42 | async fn main() -> Result<()> { 43 | let cli = Cli::parse(); 44 | cli.run().await 45 | } 46 | -------------------------------------------------------------------------------- /iot_verifier/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "iot-verifier" 3 | version = "0.1.0" 4 | description = "PoC Verifier Server for the Helium Iot Network" 5 | edition.workspace = true 6 | authors.workspace = true 7 | license.workspace = true 8 | 9 | [build-dependencies] 10 | cmake = "0.1" 11 | 12 | [dependencies] 13 | anyhow = { workspace = true } 14 | async-trait = { workspace = true } 15 | base64 = { workspace = true } 16 | beacon = { workspace = true } 17 | blake3 = { workspace = true } 18 | chrono = { workspace = true } 19 | clap = { workspace = true } 20 | config = { workspace = true } 21 | futures = { workspace = true } 22 | futures-util = { workspace = true } 23 | h3o = { workspace = true, features = ["geo"] } 24 | helium-crypto = { workspace = true, features = ["sqlx-postgres"] } 25 | helium-proto = { workspace = true } 26 | http-serde = { workspace = true } 27 | humantime-serde = { workspace = true } 28 | itertools = { workspace = true } 29 | lazy_static = { workspace = true } 30 | metrics = { workspace = true } 31 | once_cell = { workspace = true } 32 | prost = { workspace = true } 33 | rand = { workspace = true } 34 | retainer = { workspace = true } 35 | rust_decimal = { workspace = true, features = ["maths"] } 36 | rust_decimal_macros = { workspace = true } 37 | serde = { workspace = true } 38 | serde_json = { workspace = true } 39 | sha2 = { workspace = true } 40 | sqlx = { workspace = true } 41 | thiserror = { workspace = true } 42 | tokio = { workspace = true } 43 | tokio-stream = { workspace = true } 44 | tokio-util = { workspace = true } 45 | tonic = { workspace = true } 46 | tracing = { workspace = true } 47 | tracing-subscriber = { workspace = true } 48 | triggered = { workspace = true } 49 | twox-hash = { workspace = true } 50 | xorf = { workspace = true } 51 | 52 | custom-tracing = { path = "../custom_tracing" } 53 | db-store = { path = "../db_store" } 54 | denylist = { path = "../denylist" } 55 | file-store = { path = "../file_store" } 56 | iot-config = { path = "../iot_config" } 57 | poc-metrics = { path = "../metrics" } 58 | price = { path = "../price" } 59 | reward-scheduler = { path = "../reward_scheduler" } 60 | solana = { path = "../solana" } 61 | task-manager = { path = "../task_manager" } 62 | -------------------------------------------------------------------------------- /iot_verifier/migrations/10_gateway_dc_shares.sql: -------------------------------------------------------------------------------- 1 | create table gateway_dc_shares ( 2 | hotspot_key text not null, 3 | reward_timestamp timestamptz not null, 4 | num_dcs bigint default 0, 5 | -- id of the associated valid poc report 6 | id bytea primary key not null 7 | ); 8 | 9 | create index idx_gds_hotspot_key on gateway_dc_shares (hotspot_key); -------------------------------------------------------------------------------- /iot_verifier/migrations/11_files_processed.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE files_processed ( 2 | file_name VARCHAR PRIMARY KEY, 3 | file_type VARCHAR NOT NULL, 4 | file_timestamp TIMESTAMPTZ NOT NULL, 5 | processed_at TIMESTAMPTZ NOT NULL 6 | ); 7 | -------------------------------------------------------------------------------- /iot_verifier/migrations/12_bootstrap_disable_data_checks.sql: -------------------------------------------------------------------------------- 1 | insert into meta 2 | (key, value) 3 | values 4 | ('disable_complete_data_checks_until', '0') 5 | on conflict 6 | (key) 7 | do nothing; 8 | -------------------------------------------------------------------------------- /iot_verifier/migrations/13_files_processed_process_name.sql: -------------------------------------------------------------------------------- 1 | alter table files_processed add column process_name text not null default 'default'; 2 | -------------------------------------------------------------------------------- /iot_verifier/migrations/14_last_witness.sql: -------------------------------------------------------------------------------- 1 | create table last_witness ( 2 | id bytea primary key not null, 3 | timestamp timestamptz not null 4 | ); 5 | -- seed last_witness with timestamps from last_beacon 6 | insert into last_witness (id, timestamp) 7 | select id, timestamp from last_beacon 8 | where timestamp > now() - interval '7 day'; 9 | -------------------------------------------------------------------------------- /iot_verifier/migrations/15_last_beacon_reciprocity.sql: -------------------------------------------------------------------------------- 1 | create table last_beacon_recip ( 2 | id bytea primary key not null, 3 | timestamp timestamptz not null 4 | ); 5 | -- seed beacon_recip with timestamps from last_beacon 6 | insert into last_beacon_recip (id, timestamp) 7 | select id, timestamp from last_beacon 8 | where timestamp > now() - interval '7 day'; 9 | -------------------------------------------------------------------------------- /iot_verifier/migrations/1_setup.sql: -------------------------------------------------------------------------------- 1 | -- This extension gives us `uuid_generate_v1mc()` which generates UUIDs that cluster better than `gen_random_uuid()` 2 | -- while still being difficult to predict and enumerate. 3 | -- Also, while unlikely, `gen_random_uuid()` can in theory produce collisions which can trigger spurious errors on 4 | -- insertion, whereas it's much less likely with `uuid_generate_v1mc()`. 5 | create extension if not exists "uuid-ossp"; 6 | 7 | create or replace function set_updated_at() 8 | returns trigger as 9 | $$ 10 | begin 11 | NEW.updated_at = now(); 12 | return NEW; 13 | end; 14 | $$ language plpgsql; 15 | 16 | create or replace function trigger_updated_at(tablename regclass) 17 | returns void as 18 | $$ 19 | begin 20 | execute format('CREATE TRIGGER set_updated_at 21 | BEFORE UPDATE 22 | ON %s 23 | FOR EACH ROW 24 | WHEN (OLD is distinct from NEW) 25 | EXECUTE FUNCTION set_updated_at();', tablename); 26 | end; 27 | $$ language plpgsql; 28 | -------------------------------------------------------------------------------- /iot_verifier/migrations/2_meta.sql: -------------------------------------------------------------------------------- 1 | create table meta ( 2 | key text primary key not null, 3 | value text 4 | ); 5 | -------------------------------------------------------------------------------- /iot_verifier/migrations/3_poc_report.sql: -------------------------------------------------------------------------------- 1 | create type iotstatus AS enum ( 2 | 'pending', 3 | 'ready', 4 | 'valid', 5 | 'invalid' 6 | ); 7 | 8 | create type reporttype AS enum ( 9 | 'witness', 10 | 'beacon' 11 | ); 12 | 13 | create table poc_report ( 14 | id bytea primary key not null, 15 | -- remote_entropy: allow nulls as only beacon reports will populate this 16 | remote_entropy bytea, 17 | packet_data bytea not null, 18 | report_data bytea not null, 19 | report_type reporttype, 20 | status iotstatus default 'pending' not null, 21 | attempts integer default 0, 22 | report_timestamp timestamptz not null, 23 | last_processed timestamptz default now() not null, 24 | created_at timestamptz default now() 25 | ); 26 | 27 | CREATE INDEX idx_poc_report_packet_data 28 | ON poc_report(packet_data); 29 | 30 | CREATE INDEX idx_poc_report_report_type 31 | ON poc_report(report_type); 32 | 33 | CREATE INDEX idx_poc_report_status 34 | ON poc_report(status); 35 | 36 | CREATE INDEX idx_poc_report_created_at 37 | ON poc_report(created_at); 38 | 39 | CREATE INDEX idx_poc_report_attempts 40 | ON poc_report(attempts); 41 | 42 | CREATE INDEX idx_poc_report_report_type_status_created_at 43 | ON poc_report (report_type,status,created_at); 44 | 45 | 46 | -------------------------------------------------------------------------------- /iot_verifier/migrations/4_last_beacon.sql: -------------------------------------------------------------------------------- 1 | create table last_beacon ( 2 | id bytea primary key not null, 3 | timestamp timestamptz not null 4 | ); 5 | -------------------------------------------------------------------------------- /iot_verifier/migrations/5_entropy.sql: -------------------------------------------------------------------------------- 1 | 2 | create table entropy ( 3 | id bytea primary key not null, 4 | data bytea not null, 5 | timestamp timestamptz default now() not null, 6 | version integer not null, 7 | created_at timestamptz default now() 8 | ); 9 | 10 | CREATE INDEX idx_entropy_id 11 | ON entropy(id); 12 | 13 | CREATE INDEX idx_entropy_timestamp 14 | ON entropy(timestamp); 15 | -------------------------------------------------------------------------------- /iot_verifier/migrations/6_gateway_shares.sql: -------------------------------------------------------------------------------- 1 | create table gateway_shares ( 2 | hotspot_key text not null, 3 | reward_type reporttype not null, 4 | reward_timestamp timestamptz not null, 5 | hex_scale decimal not null, 6 | reward_unit decimal not null, 7 | -- id of the associated valid poc report 8 | poc_id bytea not null, 9 | primary key(hotspot_key, poc_id) 10 | ); 11 | 12 | create index idx_hotspot_key on gateway_shares (hotspot_key); 13 | 14 | create index idx_reward_type on gateway_shares (reward_type); 15 | -------------------------------------------------------------------------------- /iot_verifier/migrations/7_bootstrap_reward_time.sql: -------------------------------------------------------------------------------- 1 | insert into meta 2 | (key, value) 3 | values 4 | ('last_rewarded_end_time', '1671499800'), 5 | ('next_rewarded_end_time', '1671586200') 6 | on conflict 7 | (key) 8 | do nothing; 9 | -------------------------------------------------------------------------------- /iot_verifier/migrations/9_delete_meta_report_entry.sql: -------------------------------------------------------------------------------- 1 | DELETE from meta where key = 'report'; 2 | -------------------------------------------------------------------------------- /iot_verifier/src/gateway_cache.rs: -------------------------------------------------------------------------------- 1 | // 2 | // this cache is used to resolve gateway info for a given gateway 3 | // the gateway info is required by the poc verifications as part of verifying beacon and witness reports 4 | // the cache is populated / updated by the gateway_updater and is prepopulated at startup 5 | // 6 | 7 | use crate::gateway_updater::MessageReceiver; 8 | use helium_crypto::PublicKeyBinary; 9 | use iot_config::gateway_info::GatewayInfo; 10 | 11 | #[derive(Clone)] 12 | pub struct GatewayCache { 13 | gateway_cache_receiver: MessageReceiver, 14 | } 15 | 16 | #[derive(Debug, thiserror::Error)] 17 | pub enum GatewayCacheError { 18 | #[error("gateway not found: {0}")] 19 | GatewayNotFound(PublicKeyBinary), 20 | } 21 | 22 | impl GatewayCache { 23 | pub fn new(gateway_cache_receiver: MessageReceiver) -> Self { 24 | Self { 25 | gateway_cache_receiver, 26 | } 27 | } 28 | 29 | pub async fn resolve_gateway_info( 30 | &self, 31 | address: &PublicKeyBinary, 32 | ) -> Result { 33 | match self.gateway_cache_receiver.borrow().get(address) { 34 | Some(hit) => { 35 | metrics::counter!("oracles_iot_verifier_gateway_cache_hit").increment(1); 36 | Ok(hit.clone()) 37 | } 38 | None => { 39 | metrics::counter!("oracles_iot_verifier_gateway_cache_miss").increment(1); 40 | Err(GatewayCacheError::GatewayNotFound(address.clone())) 41 | } 42 | } 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /iot_verifier/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod entropy; 2 | pub mod entropy_loader; 3 | pub mod gateway_cache; 4 | pub mod gateway_updater; 5 | pub mod hex_density; 6 | pub mod last_beacon; 7 | pub mod last_beacon_reciprocity; 8 | pub mod last_witness; 9 | pub mod loader; 10 | pub mod meta; 11 | pub mod packet_loader; 12 | pub mod poc; 13 | pub mod poc_report; 14 | pub mod purger; 15 | pub mod region_cache; 16 | pub mod reward_share; 17 | pub mod rewarder; 18 | pub mod runner; 19 | mod settings; 20 | pub mod telemetry; 21 | pub mod tx_scaler; 22 | pub mod witness_updater; 23 | 24 | use rust_decimal::Decimal; 25 | pub use settings::Settings; 26 | use solana::SolPubkey; 27 | 28 | #[derive(Clone, Debug)] 29 | pub struct PriceInfo { 30 | pub price_in_bones: u64, 31 | pub price_per_token: Decimal, 32 | pub price_per_bone: Decimal, 33 | pub decimals: u8, 34 | } 35 | 36 | impl PriceInfo { 37 | pub fn new(price_in_bones: u64, decimals: u8) -> Self { 38 | let price_per_token = 39 | Decimal::from(price_in_bones) / Decimal::from(10_u64.pow(decimals as u32)); 40 | let price_per_bone = price_per_token / Decimal::from(10_u64.pow(decimals as u32)); 41 | Self { 42 | price_in_bones, 43 | price_per_token, 44 | price_per_bone, 45 | decimals, 46 | } 47 | } 48 | } 49 | 50 | pub fn resolve_subdao_pubkey() -> SolPubkey { 51 | solana::SubDao::Iot.key() 52 | } 53 | -------------------------------------------------------------------------------- /iot_verifier/tests/integrations/main.rs: -------------------------------------------------------------------------------- 1 | mod common; 2 | 3 | mod purger_tests; 4 | mod rewarder_operations; 5 | mod rewarder_oracles; 6 | mod rewarder_poc_dc; 7 | mod runner_tests; 8 | -------------------------------------------------------------------------------- /iot_verifier/tests/integrations/rewarder_operations.rs: -------------------------------------------------------------------------------- 1 | use crate::common::{self, rewards_info_24_hours, MockFileSinkReceiver}; 2 | use helium_proto::services::poc_lora::{IotRewardShare, OperationalReward}; 3 | use iot_verifier::{reward_share, rewarder}; 4 | use rust_decimal::{prelude::ToPrimitive, Decimal, RoundingStrategy}; 5 | use rust_decimal_macros::dec; 6 | 7 | #[tokio::test] 8 | async fn test_operations() -> anyhow::Result<()> { 9 | let (iot_rewards_client, mut iot_rewards) = common::create_file_sink(); 10 | 11 | let reward_info = rewards_info_24_hours(); 12 | 13 | let (_, rewards) = tokio::join!( 14 | rewarder::reward_operational(&iot_rewards_client, &reward_info), 15 | receive_expected_rewards(&mut iot_rewards) 16 | ); 17 | if let Ok(ops_reward) = rewards { 18 | // confirm the total rewards allocated matches expectations 19 | let expected_total = 20 | reward_share::get_scheduled_ops_fund_tokens(reward_info.epoch_emissions) 21 | .to_u64() 22 | .unwrap(); 23 | assert_eq!(ops_reward.amount, 6_232_876_712_328); 24 | assert_eq!(ops_reward.amount, expected_total); 25 | 26 | // confirm the ops percentage amount matches expectations 27 | let ops_percent = (Decimal::from(ops_reward.amount) / reward_info.epoch_emissions) 28 | .round_dp_with_strategy(2, RoundingStrategy::MidpointNearestEven); 29 | assert_eq!(ops_percent, dec!(0.07)); 30 | } else { 31 | panic!("no rewards received"); 32 | }; 33 | Ok(()) 34 | } 35 | 36 | async fn receive_expected_rewards( 37 | iot_rewards: &mut MockFileSinkReceiver, 38 | ) -> anyhow::Result { 39 | // expect one operational reward msg 40 | let reward = iot_rewards.receive_operational_reward().await; 41 | 42 | // should be no further msgs 43 | iot_rewards.assert_no_messages(); 44 | 45 | Ok(reward) 46 | } 47 | -------------------------------------------------------------------------------- /iot_verifier/tests/integrations/rewarder_oracles.rs: -------------------------------------------------------------------------------- 1 | use crate::common::{self, rewards_info_24_hours, MockFileSinkReceiver}; 2 | use helium_proto::services::poc_lora::{IotRewardShare, UnallocatedReward}; 3 | use iot_verifier::{reward_share, rewarder}; 4 | use rust_decimal::{prelude::ToPrimitive, Decimal, RoundingStrategy}; 5 | use rust_decimal_macros::dec; 6 | use sqlx::PgPool; 7 | 8 | #[sqlx::test] 9 | async fn test_oracles(_pool: PgPool) -> anyhow::Result<()> { 10 | let (iot_rewards_client, mut iot_rewards) = common::create_file_sink(); 11 | 12 | let reward_info = rewards_info_24_hours(); 13 | 14 | let (_, rewards) = tokio::join!( 15 | rewarder::reward_oracles(&iot_rewards_client, &reward_info), 16 | receive_expected_rewards(&mut iot_rewards) 17 | ); 18 | if let Ok(unallocated_oracle_reward) = rewards { 19 | // confirm the total rewards matches expectations 20 | let expected_total = reward_share::get_scheduled_oracle_tokens(reward_info.epoch_emissions) 21 | .to_u64() 22 | .unwrap(); 23 | assert_eq!(unallocated_oracle_reward.amount, 6_232_876_712_328); 24 | assert_eq!(unallocated_oracle_reward.amount, expected_total); 25 | 26 | // confirm the ops percentage amount matches expectations 27 | let oracle_percent = (Decimal::from(unallocated_oracle_reward.amount) 28 | / reward_info.epoch_emissions) 29 | .round_dp_with_strategy(2, RoundingStrategy::MidpointNearestEven); 30 | assert_eq!(oracle_percent, dec!(0.07)); 31 | } else { 32 | panic!("no rewards received"); 33 | }; 34 | Ok(()) 35 | } 36 | 37 | async fn receive_expected_rewards( 38 | iot_rewards: &mut MockFileSinkReceiver, 39 | ) -> anyhow::Result { 40 | // expect one unallocated reward 41 | // as oracle rewards are currently 100% unallocated 42 | let reward = iot_rewards.receive_unallocated_reward().await; 43 | 44 | // should be no further msgs 45 | iot_rewards.assert_no_messages(); 46 | 47 | Ok(reward) 48 | } 49 | -------------------------------------------------------------------------------- /metrics/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "poc-metrics" 3 | version = "0.1.0" 4 | description = "Metrics for Helium Mobile Network servers" 5 | edition.workspace = true 6 | authors.workspace = true 7 | license.workspace = true 8 | 9 | [dependencies] 10 | tower = "0.4" 11 | thiserror = { workspace = true } 12 | serde = { workspace = true } 13 | tracing = { workspace = true } 14 | tracing-subscriber = { workspace = true } 15 | metrics = { workspace = true } 16 | metrics-exporter-prometheus = { workspace = true } 17 | futures = { workspace = true } 18 | 19 | [dev-dependencies] 20 | tokio = { workspace = true } 21 | reqwest = { workspace = true } 22 | -------------------------------------------------------------------------------- /metrics/src/error.rs: -------------------------------------------------------------------------------- 1 | use thiserror::Error; 2 | 3 | pub type Result = std::result::Result; 4 | 5 | #[derive(Error, Debug)] 6 | pub enum Error { 7 | #[error("socket address decode error {0}")] 8 | DecodeError(#[from] std::net::AddrParseError), 9 | #[error("metrics build error")] 10 | Metrics(#[from] metrics_exporter_prometheus::BuildError), 11 | } 12 | -------------------------------------------------------------------------------- /metrics/src/settings.rs: -------------------------------------------------------------------------------- 1 | use std::net::SocketAddr; 2 | 3 | use serde::Deserialize; 4 | 5 | #[derive(Debug, Deserialize, Clone)] 6 | pub struct Settings { 7 | /// Scrape endpoint for metrics 8 | #[serde(default = "default_metrics_endpoint")] 9 | pub endpoint: SocketAddr, 10 | } 11 | 12 | fn default_metrics_endpoint() -> SocketAddr { 13 | "127.0.0.1:19000".parse().unwrap() 14 | } 15 | -------------------------------------------------------------------------------- /mobile_config/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "mobile-config" 3 | version = "0.1.0" 4 | description = "Configuration APIs for the Mobile subnetwork" 5 | edition.workspace = true 6 | authors.workspace = true 7 | license.workspace = true 8 | 9 | [dependencies] 10 | anyhow = { workspace = true } 11 | async-trait = { workspace = true } 12 | base64 = { workspace = true } 13 | blake3 = { workspace = true } 14 | bs58 = { workspace = true } 15 | chrono = { workspace = true } 16 | clap = { workspace = true } 17 | config = { workspace = true } 18 | futures = { workspace = true } 19 | futures-util = { workspace = true } 20 | helium-crypto = { workspace = true, features = ["sqlx-postgres"] } 21 | helium-proto = { workspace = true } 22 | hextree = { workspace = true } 23 | http = { workspace = true } 24 | http-serde = { workspace = true } 25 | humantime = { workspace = true } 26 | humantime-serde = { workspace = true } 27 | lazy_static = { workspace = true } 28 | metrics = { workspace = true } 29 | metrics-exporter-prometheus = { workspace = true } 30 | prost = { workspace = true } 31 | retainer = { workspace = true } 32 | rust_decimal = { workspace = true } 33 | rust_decimal_macros = { workspace = true } 34 | serde = { workspace = true } 35 | serde_json = { workspace = true } 36 | sqlx = { workspace = true } 37 | thiserror = { workspace = true } 38 | tokio = { workspace = true } 39 | tokio-stream = { workspace = true } 40 | tokio-util = { workspace = true } 41 | tonic = { workspace = true } 42 | tower-http = { workspace = true } 43 | tracing = { workspace = true } 44 | tracing-subscriber = { workspace = true } 45 | triggered = { workspace = true } 46 | 47 | coverage-map = { path = "../coverage_map" } 48 | custom-tracing = { path = "../custom_tracing", features = ["grpc"] } 49 | db-store = { path = "../db_store" } 50 | file-store = { path = "../file_store" } 51 | poc-metrics = { path = "../metrics" } 52 | solana = { path = "../solana" } 53 | task-manager = { path = "../task_manager" } 54 | 55 | [dev-dependencies] 56 | rand = { workspace = true } 57 | tokio-stream = { workspace = true, features = ["net"] } 58 | -------------------------------------------------------------------------------- /mobile_config/README.md: -------------------------------------------------------------------------------- 1 | # Mobile Config Service 2 | 3 | The Mobile Config Service provides configuration settings and values for the 4 | CBRS Mobile Helium Subnetwork. Actors on the Mobile subnetwork can interact with 5 | the gRPC APIs provided by the Config service to perform various operations of the 6 | network according to role, including but not limited to: 7 | 8 | - Community Management (the Foundation) can issue network configuration variables 9 | - Oracles can request hotspot information from the Solana chain 10 | 11 | The Mobile Config service provides 3 major gRPC services: 12 | 13 | ## `hotspot` 14 | 15 | provides metadata information about hotspots stored on the Solana chain and used 16 | for figuring hotspot interactions in PoC algorithms and reward calculations 17 | 18 | ## `router` 19 | 20 | validate the eligibility of a given router public key to burn data credits on 21 | behalf of the network when the router attempts to send mobile traffic across the 22 | network. routers whose public keys are not registered to the config service but 23 | attempt to send data traffic are denied burn authority 24 | 25 | ## `admin` 26 | 27 | administrative apis for managing auth keys and other service-wide settings 28 | -------------------------------------------------------------------------------- /mobile_config/migrations/1_setup.sql: -------------------------------------------------------------------------------- 1 | -- This extension gives us `uuid_generate_v1mc()` which generates UUIDs that cluster better than `gen_random_uuid()` 2 | -- while still being difficult to predict and enumerate. 3 | -- Also, while unlikely, `gen_random_uuid()` can in theory produce collisions which can trigger spurious errors on 4 | -- insertion, whereas it's much less likely with `uuid_generate_v1mc()`. 5 | create extension if not exists "uuid-ossp"; 6 | 7 | create or replace function set_updated_at() 8 | returns trigger as 9 | $$ 10 | begin 11 | NEW.updated_at = now(); 12 | return NEW; 13 | end; 14 | $$ language plpgsql; 15 | 16 | create or replace function trigger_updated_at(tablename regclass) 17 | returns void as 18 | $$ 19 | begin 20 | execute format('CREATE TRIGGER set_updated_at 21 | BEFORE UPDATE 22 | ON %s 23 | FOR EACH ROW 24 | WHEN (OLD is distinct from NEW) 25 | EXECUTE FUNCTION set_updated_at();', tablename); 26 | end; 27 | $$ language plpgsql; 28 | -------------------------------------------------------------------------------- /mobile_config/migrations/20230708171204_add_pcs_key_role.sql: -------------------------------------------------------------------------------- 1 | ALTER TYPE key_role ADD VALUE IF NOT EXISTS 'pcs'; 2 | -------------------------------------------------------------------------------- /mobile_config/migrations/20250411184550_add_banning_key_role.sql: -------------------------------------------------------------------------------- 1 | ALTER TYPE key_role ADD VALUE IF NOT EXISTS 'banning'; -------------------------------------------------------------------------------- /mobile_config/migrations/2_registered_keys.sql: -------------------------------------------------------------------------------- 1 | create type key_type as enum ( 2 | 'administrator', 3 | 'packet_router', 4 | 'oracle' 5 | ); 6 | 7 | create table registered_keys ( 8 | pubkey text not null unique, 9 | key_type key_type not null, 10 | 11 | created_at timestamptz not null default now(), 12 | updated_at timestamptz not null default now() 13 | ); 14 | 15 | select trigger_updated_at('registered_keys'); 16 | -------------------------------------------------------------------------------- /mobile_config/migrations/3_carrier_keys.sql: -------------------------------------------------------------------------------- 1 | alter type key_type rename value 'packet_router' to 'router'; 2 | 3 | alter type key_type add value if not exists 'carrier'; 4 | 5 | alter type key_type rename to key_role; 6 | 7 | alter table registered_keys rename column key_type to key_role; 8 | 9 | alter table registered_keys drop constraint registered_keys_pubkey_key; 10 | 11 | alter table registered_keys add primary key (pubkey, key_role); 12 | -------------------------------------------------------------------------------- /mobile_config/migrations/5_carrier_service.sql: -------------------------------------------------------------------------------- 1 | create table carrier_keys ( 2 | pubkey text primary key not null, 3 | entity_key text not null, 4 | created_at timestamptz not null default now(), 5 | updated_at timestamptz not null default now() 6 | ); 7 | -------------------------------------------------------------------------------- /mobile_config/migrations/6_registered_keys_name.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE registered_keys ADD COLUMN IF NOT EXISTS name TEXT; 2 | -------------------------------------------------------------------------------- /mobile_config/migrations/7_mobile_radio_tracker.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS mobile_radio_tracker ( 2 | entity_key BYTEA NOT NULL, 3 | hash TEXT NOT NULL, 4 | last_changed_at TIMESTAMPTZ NOT NULL, 5 | last_checked_at TIMESTAMPTZ NOT NULL, 6 | PRIMARY KEY (entity_key) 7 | ); 8 | -------------------------------------------------------------------------------- /mobile_config/pkg/settings-template.toml: -------------------------------------------------------------------------------- 1 | # log settings for the application (RUST_LOG format). Default below 2 | # 3 | # log = "iot-mobile=debug,poc_store=info" 4 | 5 | 6 | # Listen addres for public grpc. Default below 7 | # 8 | # listen = "0.0.0.0:8080" 9 | 10 | network = "mainnet" 11 | 12 | [database] 13 | 14 | # Url for the main service database 15 | url = "postgres://postgres:postgres@127.0.0.1:5432/config_db" 16 | 17 | # Max connections to database 18 | max_connections = 20 19 | 20 | [metadata] 21 | # Connection fields and auth values for connecting to RDS via IAM 22 | host = "helius.aws" 23 | port = 5432 24 | username = "helius" 25 | database = "db" 26 | 27 | auth_type = "iam" 28 | 29 | iam_role_arn = "arn::iam" 30 | iam_role_session_name = "role-session-name" 31 | iam_duration_seconds = 900 32 | iam_region = "us-west-2" 33 | 34 | # Max connections to database 35 | max_connections = 20 36 | 37 | [metrics] 38 | 39 | # Endpoint for metrics. Default below 40 | # 41 | # endpoint = "127.0.0.1:19000" 42 | -------------------------------------------------------------------------------- /mobile_config/src/telemetry.rs: -------------------------------------------------------------------------------- 1 | const RPC_METRIC: &str = concat!(env!("CARGO_PKG_NAME"), "-", "grpc-request"); 2 | const GATEWAY_CHAIN_LOOKUP_METRIC: &str = 3 | concat!(env!("CARGO_PKG_NAME"), "-", "gateway-chain-lookup"); 4 | 5 | const EPOCH_CHAIN_LOOKUP_METRIC: &str = concat!(env!("CARGO_PKG_NAME"), "-", "epoch-chain-lookup"); 6 | 7 | pub fn count_request(service: &'static str, rpc: &'static str) { 8 | metrics::counter!(RPC_METRIC, "service" => service, "rpc" => rpc).increment(1); 9 | } 10 | 11 | pub fn count_gateway_chain_lookup(result: &'static str) { 12 | metrics::counter!(GATEWAY_CHAIN_LOOKUP_METRIC, "result" => result).increment(1); 13 | } 14 | 15 | pub fn count_epoch_chain_lookup(result: &'static str) { 16 | metrics::counter!(EPOCH_CHAIN_LOOKUP_METRIC, "result" => result).increment(1); 17 | } 18 | -------------------------------------------------------------------------------- /mobile_config/tests/mobile_radio_tracker.rs: -------------------------------------------------------------------------------- 1 | use chrono::Utc; 2 | use helium_crypto::PublicKeyBinary; 3 | use mobile_config::mobile_radio_tracker::{get_tracked_radios, track_changes}; 4 | use sqlx::PgPool; 5 | 6 | pub mod common; 7 | use common::*; 8 | 9 | #[sqlx::test] 10 | async fn mobile_tracker_handle_entity_duplicates(pool: PgPool) { 11 | // In case of duplications mobile tracker must use newer (refreshed_at) 12 | let asset1_pubkey = make_keypair().public_key().clone(); 13 | let asset1_hex_idx = 631711281837647359_i64; 14 | create_db_tables(&pool).await; 15 | let now = Utc::now(); 16 | let now_minus_hour = now - chrono::Duration::hours(1); 17 | let pubkey_binary = PublicKeyBinary::from(asset1_pubkey.clone()); 18 | 19 | add_db_record( 20 | &pool, 21 | "asset1", 22 | asset1_hex_idx, 23 | "\"wifiIndoor\"", 24 | asset1_pubkey.clone().into(), 25 | now_minus_hour, 26 | Some(now_minus_hour), 27 | Some(r#"{"wifiInfoV0": {"antenna": 18, "azimuth": 160, "elevation": 5, "electricalDownTilt": 1, "mechanicalDownTilt": 2}}"#) 28 | ) 29 | .await; 30 | 31 | add_db_record( 32 | &pool, 33 | "asset1", 34 | asset1_hex_idx, 35 | "\"wifiIndoor\"", 36 | asset1_pubkey.clone().into(), 37 | now, 38 | None, 39 | Some(r#"{"wifiInfoV0": {"antenna": 18, "azimuth": 160, "elevation": 5, "electricalDownTilt": 1, "mechanicalDownTilt": 2}}"#) 40 | ) 41 | .await; 42 | 43 | add_db_record( 44 | &pool, 45 | "asset1", 46 | asset1_hex_idx, 47 | "\"wifiIndoor\"", 48 | asset1_pubkey.clone().into(), 49 | now, 50 | Some(now), 51 | Some(r#"{"wifiInfoV0": {"antenna": 18, "azimuth": 160, "elevation": 5, "electricalDownTilt": 1, "mechanicalDownTilt": 2}}"#) 52 | ) 53 | .await; 54 | 55 | let b58 = bs58::decode(pubkey_binary.to_string()).into_vec().unwrap(); 56 | track_changes(&pool, &pool).await.unwrap(); 57 | let tracked_radios = get_tracked_radios(&pool).await.unwrap(); 58 | assert_eq!(tracked_radios.len(), 1); 59 | let tracked_radio = tracked_radios.get::>(&b58).unwrap(); 60 | assert_eq!( 61 | tracked_radio.last_changed_at.timestamp_millis(), 62 | now.timestamp_millis() 63 | ); 64 | } 65 | -------------------------------------------------------------------------------- /mobile_config_cli/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "mobile-config-cli" 3 | version = "0.1.0" 4 | description = "Cli for the Helium Mobile subnetwork Config Service" 5 | edition.workspace = true 6 | authors.workspace = true 7 | license.workspace = true 8 | 9 | [dependencies] 10 | angry-purple-tiger = { version = "1", features = ["helium_crypto"] } 11 | anyhow = {workspace = true} 12 | base64 = {workspace = true} 13 | clap = {workspace = true, features = ["derive", "env"]} 14 | dialoguer = "0.10" 15 | futures = {workspace = true} 16 | helium-crypto = {workspace = true} 17 | helium-proto = {workspace = true} 18 | h3o = {workspace = true} 19 | mobile-config = {path = "../mobile_config"} 20 | prost = {workspace = true} 21 | rand = {workspace = true} 22 | serde = {workspace = true} 23 | serde_json = {workspace = true} 24 | tokio = {workspace = true, features = ["macros", "rt-multi-thread"]} 25 | tokio-stream = {workspace = true} 26 | tonic = {workspace = true, features = ["tls", "tls-roots"]} 27 | tracing = {workspace = true} 28 | custom-tracing = { path = "../custom_tracing" } 29 | 30 | -------------------------------------------------------------------------------- /mobile_config_cli/src/cmds/admin.rs: -------------------------------------------------------------------------------- 1 | use crate::{client, cmds::PathBufKeypair, Msg, Result}; 2 | 3 | use super::AdminKeyArgs; 4 | 5 | pub async fn add_key(args: AdminKeyArgs) -> Result { 6 | let output = format!("Added {} as {} key", args.pubkey, args.key_role); 7 | 8 | if args.commit { 9 | let mut client = client::AdminClient::new(&args.config_host, &args.config_pubkey).await?; 10 | client 11 | .add_key(&args.pubkey, args.key_role, &args.keypair.to_keypair()?) 12 | .await?; 13 | return Msg::ok(output); 14 | } 15 | Msg::dry_run(output) 16 | } 17 | 18 | pub async fn remove_key(args: AdminKeyArgs) -> Result { 19 | let output = format!("Removed {} as {} key", args.pubkey, args.key_role); 20 | 21 | if args.commit { 22 | let mut client = client::AdminClient::new(&args.config_host, &args.config_pubkey).await?; 23 | client 24 | .remove_key(&args.pubkey, args.key_role, &args.keypair.to_keypair()?) 25 | .await?; 26 | return Msg::ok(output); 27 | } 28 | Msg::dry_run(output) 29 | } 30 | -------------------------------------------------------------------------------- /mobile_config_cli/src/cmds/authorization.rs: -------------------------------------------------------------------------------- 1 | use crate::{client, cmds::PathBufKeypair, Msg, PrettyJson, Result}; 2 | 3 | use super::{ListNetKeys, VerifyNetKey}; 4 | use serde_json::json; 5 | 6 | pub async fn verify_key_role(args: VerifyNetKey) -> Result { 7 | let mut client = client::AuthClient::new(&args.config_host, &args.config_pubkey).await?; 8 | let registered = client 9 | .verify(&args.pubkey, args.key_role, &args.keypair.to_keypair()?) 10 | .await?; 11 | let output = json!({ 12 | "pubkey": args.pubkey, 13 | "role": args.key_role, 14 | "registered": registered 15 | }); 16 | Msg::ok(output.pretty_json()?) 17 | } 18 | 19 | pub async fn list_keys_role(args: ListNetKeys) -> Result { 20 | let mut client = client::AuthClient::new(&args.config_host, &args.config_pubkey).await?; 21 | let keys = client 22 | .list(args.key_role, &args.keypair.to_keypair()?) 23 | .await?; 24 | let output = json!({ 25 | "role": args.key_role, 26 | "registered_keys": keys 27 | }); 28 | Msg::ok(output.pretty_json()?) 29 | } 30 | -------------------------------------------------------------------------------- /mobile_config_cli/src/cmds/carrier.rs: -------------------------------------------------------------------------------- 1 | use crate::{client, Msg, PrettyJson, Result}; 2 | 3 | use super::{ListIncentivePromotions, PathBufKeypair}; 4 | 5 | pub async fn list_incentive_promotions(args: ListIncentivePromotions) -> Result { 6 | let mut client = client::CarrierClient::new(&args.config_host, &args.config_pubkey).await?; 7 | let list = client 8 | .list_incentive_promotions(&args.keypair.to_keypair()?) 9 | .await?; 10 | Msg::ok(list.pretty_json()?) 11 | } 12 | -------------------------------------------------------------------------------- /mobile_config_cli/src/cmds/entity.rs: -------------------------------------------------------------------------------- 1 | use crate::{client, cmds::PathBufKeypair, Msg, PrettyJson, Result}; 2 | 3 | use super::VerifyRewardableEntity; 4 | use serde_json::json; 5 | 6 | pub async fn verify_entity(args: VerifyRewardableEntity) -> Result { 7 | let mut client = client::EntityClient::new(&args.config_host, &args.config_pubkey).await?; 8 | let verified = client 9 | .verify(&args.entity_id, &args.keypair.to_keypair()?) 10 | .await?; 11 | let output = json!({ 12 | "entity_id": args.entity_id, 13 | "on_chain": verified 14 | }); 15 | Msg::ok(output.pretty_json()?) 16 | } 17 | -------------------------------------------------------------------------------- /mobile_config_cli/src/main.rs: -------------------------------------------------------------------------------- 1 | use clap::Parser; 2 | use mobile_config_cli::{ 3 | cmds::{self, admin, authorization, carrier, entity, env, gateway, Cli, Commands}, 4 | Msg, Result, 5 | }; 6 | 7 | #[tokio::main] 8 | async fn main() -> Result { 9 | let cli = Cli::parse(); 10 | 11 | custom_tracing::init(cli.log_filter.clone(), custom_tracing::Settings::default()).await?; 12 | 13 | if cli.print_command { 14 | println!("cli:#?"); 15 | } 16 | 17 | let msg = handle_cli(cli).await?; 18 | println!("{msg}"); 19 | 20 | Ok(()) 21 | } 22 | 23 | pub async fn handle_cli(cli: Cli) -> Result { 24 | match cli.command { 25 | Commands::Env { command } => match command { 26 | cmds::EnvCommands::Init => env::env_init().await, 27 | cmds::EnvCommands::Info(args) => env::env_info(args), 28 | cmds::EnvCommands::GenerateKeypair(args) => env::generate_keypair(args), 29 | }, 30 | Commands::Admin { command } => match command { 31 | cmds::AdminCommands::AddKey(args) => admin::add_key(args).await, 32 | cmds::AdminCommands::RemoveKey(args) => admin::remove_key(args).await, 33 | }, 34 | Commands::Authorization { command } => match command { 35 | cmds::AuthCommands::VerifyKey(args) => authorization::verify_key_role(args).await, 36 | cmds::AuthCommands::ListKeys(args) => authorization::list_keys_role(args).await, 37 | }, 38 | Commands::Carrier { command } => match command { 39 | cmds::CarrierCommands::ListIncentivePromotions(args) => { 40 | carrier::list_incentive_promotions(args).await 41 | } 42 | }, 43 | Commands::Entity { command } => match command { 44 | cmds::EntityCommands::VerifyEntity(args) => entity::verify_entity(args).await, 45 | }, 46 | Commands::Gateway { command } => match command { 47 | cmds::GatewayCommands::Info(args) => gateway::info(args).await, 48 | cmds::GatewayCommands::InfoBatch(args) => gateway::info_batch(args).await, 49 | }, 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /mobile_packet_verifier/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "mobile-packet-verifier" 3 | version = "0.1.0" 4 | description = "Packet verification for Mobile" 5 | edition.workspace = true 6 | authors.workspace = true 7 | license.workspace = true 8 | 9 | [dependencies] 10 | anyhow = { workspace = true } 11 | async-trait = { workspace = true } 12 | chrono = { workspace = true } 13 | clap = { workspace = true } 14 | config = { workspace = true } 15 | futures = { workspace = true } 16 | futures-util = { workspace = true } 17 | helium-crypto = { workspace = true, features = [ 18 | "sqlx-postgres", 19 | "multisig", 20 | "solana", 21 | ] } 22 | helium-proto = { workspace = true } 23 | http = { workspace = true } 24 | http-serde = { workspace = true } 25 | humantime-serde = { workspace = true } 26 | metrics = { workspace = true } 27 | prost = { workspace = true } 28 | serde = { workspace = true } 29 | sha2 = { workspace = true } 30 | solana = { path = "../solana" } 31 | sqlx = { workspace = true } 32 | thiserror = { workspace = true } 33 | tokio = { workspace = true } 34 | tonic = { workspace = true } 35 | tracing = { workspace = true } 36 | tracing-subscriber = { workspace = true } 37 | triggered = { workspace = true } 38 | 39 | custom-tracing = { path = "../custom_tracing" } 40 | db-store = { path = "../db_store" } 41 | file-store = { path = "../file_store" } 42 | mobile-config = { path = "../mobile_config" } 43 | poc-metrics = { path = "../metrics" } 44 | task-manager = { path = "../task_manager" } 45 | 46 | [dev-dependencies] 47 | reqwest = { workspace = true } 48 | -------------------------------------------------------------------------------- /mobile_packet_verifier/README.md: -------------------------------------------------------------------------------- 1 | # Mobile Packet Verifier 2 | 3 | The mobile packet verifier reads data transfer sessions and accumulates the amount 4 | of bytes downloaded and uploaded. After a specified period, it burns a proportional 5 | amount of data credits from the payer and issues validated data transfer sessions 6 | so that the mobile verifier may reward the hotspots. 7 | 8 | The mobile packet verifier does not check the balance of the payer and write out 9 | invalid data transfer sessions if it fails to debit the balance. The assumption is 10 | that payers will always have data credits to pay. If they do not, the mobile packet 11 | verifier will error out, and will fail to write out validated data transfer sessions 12 | for rewards. 13 | 14 | ## S3 Inputs 15 | 16 | | File Type | Pattern | | 17 | | :-- | :-- | :-- | 18 | | DataTransferSessionIngestReport | data_transfer_session_ingest_report.* | [Proto](https://github.com/helium/proto/blob/40388d260fd3603f453a965dbc13f79470b5adcb/src/service/poc_mobile.proto#L212) | 19 | 20 | ## S3 Outputs 21 | 22 | | File Type | Pattern | | 23 | | :-- | :-- | :-- | 24 | | ValidDataTransferSession | valid_data_transfer_session.* | [Proto](https://github.com/helium/proto/blob/40388d260fd3603f453a965dbc13f79470b5adcb/src/service/packet_verifier.proto#L24) | 25 | 26 | -------------------------------------------------------------------------------- /mobile_packet_verifier/migrations/1_setup.sql: -------------------------------------------------------------------------------- 1 | -- This extension gives us `uuid_generate_v1mc()` which generates UUIDs that cluster better than `gen_random_uuid()` 2 | -- while still being difficult to predict and enumerate. 3 | -- Also, while unlikely, `gen_random_uuid()` can in theory produce collisions which can trigger spurious errors on 4 | -- insertion, whereas it's much less likely with `uuid_generate_v1mc()`. 5 | create extension if not exists "uuid-ossp"; 6 | 7 | create or replace function set_updated_at() 8 | returns trigger as 9 | $$ 10 | begin 11 | NEW.updated_at = now(); 12 | return NEW; 13 | end; 14 | $$ language plpgsql; 15 | 16 | create or replace function trigger_updated_at(tablename regclass) 17 | returns void as 18 | $$ 19 | begin 20 | execute format('CREATE TRIGGER set_updated_at 21 | BEFORE UPDATE 22 | ON %s 23 | FOR EACH ROW 24 | WHEN (OLD is distinct from NEW) 25 | EXECUTE FUNCTION set_updated_at();', tablename); 26 | end; 27 | $$ language plpgsql; 28 | -------------------------------------------------------------------------------- /mobile_packet_verifier/migrations/2_data_transfer_sessions.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE data_transfer_sessions ( 2 | pub_key TEXT NOT NULL, 3 | payer TEXT NOT NULL, 4 | uploaded_bytes BIGINT NOT NULL, 5 | downloaded_bytes BIGINT NOT NULL, 6 | first_timestamp TIMESTAMPTZ NOT NULL, 7 | last_timestamp TIMESTAMPTZ NOT NULL, 8 | PRIMARY KEY(pub_key, payer) 9 | ); 10 | -------------------------------------------------------------------------------- /mobile_packet_verifier/migrations/3_processed_files.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE files_processed ( 2 | file_name VARCHAR PRIMARY KEY, 3 | file_type VARCHAR NOT NULL, 4 | file_timestamp TIMESTAMPTZ NOT NULL, 5 | processed_at TIMESTAMPTZ NOT NULL 6 | ); 7 | -------------------------------------------------------------------------------- /mobile_packet_verifier/migrations/4_event_ids.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE event_ids ( 2 | event_id TEXT NOT NULL PRIMARY KEY, 3 | received_timestamp TIMESTAMPTZ NOT NULL 4 | ); -------------------------------------------------------------------------------- /mobile_packet_verifier/migrations/5_rewardable_bytes.sql: -------------------------------------------------------------------------------- 1 | 2 | alter table data_transfer_sessions add rewardable_bytes bigint; 3 | 4 | update data_transfer_sessions 5 | set rewardable_bytes = uploaded_bytes + downloaded_bytes; 6 | 7 | alter table data_transfer_sessions alter column rewardable_bytes set not null; 8 | -------------------------------------------------------------------------------- /mobile_packet_verifier/migrations/6_files_processed_process_name.sql: -------------------------------------------------------------------------------- 1 | alter table files_processed add column process_name text not null default 'default'; 2 | -------------------------------------------------------------------------------- /mobile_packet_verifier/migrations/7_pending_txns.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS pending_txns ( 2 | signature TEXT PRIMARY KEY, 3 | payer TEXT NOT NULL, 4 | amount BIGINT NOT NULL, 5 | time_of_submission TIMESTAMPTZ NOT NULL 6 | ); 7 | -------------------------------------------------------------------------------- /mobile_packet_verifier/migrations/8_pending_data_transfer_sessions.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS pending_data_transfer_sessions ( 2 | pub_key TEXT NOT NULL, 3 | payer TEXT NOT NULL, 4 | uploaded_bytes BIGINT NOT NULL, 5 | downloaded_bytes BIGINT NOT NULL, 6 | rewardable_bytes BIGINT NOT NULL, 7 | first_timestamp TIMESTAMPTZ NOT NULL, 8 | last_timestamp TIMESTAMPTZ NOT NULL, 9 | signature TEXT NOT NULL, 10 | PRIMARY KEY(pub_key, payer) 11 | ); 12 | -------------------------------------------------------------------------------- /mobile_packet_verifier/migrations/9_hotspot_bans.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS hotspot_bans ( 2 | hotspot_pubkey TEXT NOT NULL, 3 | received_timestamp TIMESTAMPTZ NOT NULL, 4 | expiration_timestamp TIMESTAMPTZ, 5 | ban_type TEXT NOT NULL, 6 | PRIMARY KEY (hotspot_pubkey, received_timestamp) 7 | ); 8 | -------------------------------------------------------------------------------- /mobile_packet_verifier/src/banning/mod.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::HashSet, time::Duration}; 2 | 3 | use chrono::{DateTime, Utc}; 4 | use file_store::{mobile_ban, FileStore}; 5 | use helium_crypto::PublicKeyBinary; 6 | use humantime_serde::re::humantime; 7 | use serde::Deserialize; 8 | use sqlx::PgPool; 9 | use task_manager::{ManagedTask, TaskManager}; 10 | 11 | pub mod db; 12 | pub mod ingestor; 13 | pub mod purger; 14 | 15 | pub use db::get_banned_radios; 16 | pub use ingestor::handle_verified_ban_report; 17 | 18 | pub const BAN_CLEANUP_DAYS: i64 = 7; 19 | 20 | #[derive(Debug, Deserialize)] 21 | pub struct BanSettings { 22 | /// Where do we look in s3 for ban files 23 | pub input_bucket: file_store::Settings, 24 | /// How often to purge expired bans 25 | #[serde(with = "humantime_serde", default = "default_purge_interval")] 26 | pub purge_interval: Duration, 27 | /// How far back should we be reading ban files 28 | #[serde(default = "default_ingest_start_after")] 29 | pub start_after: DateTime, 30 | } 31 | 32 | fn default_purge_interval() -> Duration { 33 | humantime::parse_duration("24 hours").unwrap() 34 | } 35 | 36 | fn default_ingest_start_after() -> DateTime { 37 | DateTime::UNIX_EPOCH 38 | } 39 | 40 | pub async fn create_managed_task( 41 | pool: PgPool, 42 | settings: &BanSettings, 43 | ) -> anyhow::Result { 44 | let verifier_file_store = FileStore::from_settings(&settings.input_bucket).await?; 45 | 46 | let (ban_report_rx, ban_report_server) = 47 | mobile_ban::verified_report_source(pool.clone(), verifier_file_store, settings.start_after) 48 | .await?; 49 | 50 | let ingestor = ingestor::BanIngestor::new(pool.clone(), ban_report_rx); 51 | let purger = purger::BanPurger::new(pool, settings.purge_interval); 52 | 53 | Ok(TaskManager::builder() 54 | .add_task(ban_report_server) 55 | .add_task(ingestor) 56 | .add_task(purger) 57 | .build()) 58 | } 59 | 60 | #[derive(Debug, Default)] 61 | pub struct BannedRadios { 62 | banned: HashSet, 63 | } 64 | 65 | impl BannedRadios { 66 | pub fn contains(&self, hotspot_pubkey: &PublicKeyBinary) -> bool { 67 | self.banned.contains(hotspot_pubkey) 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /mobile_packet_verifier/src/banning/purger.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use chrono::{DateTime, Utc}; 4 | use futures::FutureExt; 5 | use sqlx::PgPool; 6 | use task_manager::ManagedTask; 7 | 8 | pub struct BanPurger { 9 | pool: PgPool, 10 | interval: Duration, 11 | } 12 | 13 | impl ManagedTask for BanPurger { 14 | fn start_task( 15 | self: Box, 16 | shutdown: triggered::Listener, 17 | ) -> futures::future::LocalBoxFuture<'static, anyhow::Result<()>> { 18 | self.run(shutdown).boxed_local() 19 | } 20 | } 21 | 22 | impl BanPurger { 23 | pub fn new(pool: PgPool, interval: Duration) -> Self { 24 | Self { pool, interval } 25 | } 26 | 27 | async fn run(mut self, mut shutdown: triggered::Listener) -> anyhow::Result<()> { 28 | tracing::info!("ban purger starting"); 29 | let mut timer = tokio::time::interval(self.interval); 30 | 31 | loop { 32 | tokio::select! { 33 | biased; 34 | _ = &mut shutdown => break, 35 | _ = timer.tick() => self.purge(Utc::now()).await? 36 | } 37 | } 38 | 39 | tracing::info!("ban purger stopping"); 40 | 41 | Ok(()) 42 | } 43 | 44 | async fn purge(&mut self, timestamp: DateTime) -> anyhow::Result<()> { 45 | let mut conn = self.pool.acquire().await?; 46 | let deleted = crate::banning::db::cleanup_bans(&mut conn, timestamp).await?; 47 | tracing::info!(deleted, "purge expired bans"); 48 | Ok(()) 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /mobile_packet_verifier/src/event_ids.rs: -------------------------------------------------------------------------------- 1 | use chrono::{DateTime, Utc}; 2 | use sqlx::{Pool, Postgres, Transaction}; 3 | use std::time::Duration; 4 | use task_manager::ManagedTask; 5 | 6 | use crate::settings::Settings; 7 | 8 | pub async fn is_duplicate( 9 | txn: &mut Transaction<'_, Postgres>, 10 | event_id: String, 11 | received_timestamp: DateTime, 12 | ) -> anyhow::Result { 13 | sqlx::query("INSERT INTO event_ids(event_id, received_timestamp) VALUES($1, $2) ON CONFLICT (event_id) DO NOTHING") 14 | .bind(event_id) 15 | .bind(received_timestamp) 16 | .execute(&mut **txn) 17 | .await 18 | .map(|result| result.rows_affected() == 0) 19 | .map_err(anyhow::Error::from) 20 | } 21 | 22 | pub struct EventIdPurger { 23 | conn: Pool, 24 | interval: Duration, 25 | max_age: Duration, 26 | } 27 | 28 | impl ManagedTask for EventIdPurger { 29 | fn start_task( 30 | self: Box, 31 | shutdown: triggered::Listener, 32 | ) -> futures::future::LocalBoxFuture<'static, anyhow::Result<()>> { 33 | Box::pin(self.run(shutdown)) 34 | } 35 | } 36 | 37 | impl EventIdPurger { 38 | pub fn from_settings(conn: Pool, settings: &Settings) -> Self { 39 | Self { 40 | conn, 41 | interval: settings.purger_interval, 42 | max_age: settings.purger_max_age, 43 | } 44 | } 45 | 46 | pub async fn run(self, mut shutdown: triggered::Listener) -> anyhow::Result<()> { 47 | let mut timer = tokio::time::interval(self.interval); 48 | 49 | loop { 50 | tokio::select! { 51 | _ = &mut shutdown => { 52 | return Ok(()) 53 | } 54 | _ = timer.tick() => { 55 | purge(&self.conn, self.max_age).await?; 56 | } 57 | } 58 | } 59 | } 60 | } 61 | 62 | async fn purge(conn: &Pool, max_age: Duration) -> anyhow::Result<()> { 63 | let timestamp = Utc::now() - max_age; 64 | 65 | sqlx::query("DELETE FROM event_ids where received_timestamp < $1") 66 | .bind(timestamp) 67 | .execute(conn) 68 | .await 69 | .map(|_| ()) 70 | .map_err(anyhow::Error::from) 71 | } 72 | -------------------------------------------------------------------------------- /mobile_packet_verifier/src/lib.rs: -------------------------------------------------------------------------------- 1 | use helium_crypto::PublicKeyBinary; 2 | use helium_proto::services::mobile_config::NetworkKeyRole; 3 | use mobile_config::client::{ 4 | self, authorization_client::AuthorizationVerifier, gateway_client::GatewayInfoResolver, 5 | }; 6 | 7 | pub mod accumulate; 8 | pub mod banning; 9 | pub mod burner; 10 | pub mod daemon; 11 | pub mod event_ids; 12 | pub mod pending_burns; 13 | pub mod pending_txns; 14 | pub mod settings; 15 | 16 | const BYTES_PER_DC: u64 = 20_000; 17 | 18 | pub fn bytes_to_dc(bytes: u64) -> u64 { 19 | let bytes = bytes.max(BYTES_PER_DC); 20 | bytes.div_ceil(BYTES_PER_DC) 21 | } 22 | 23 | pub fn dc_to_bytes(dcs: u64) -> u64 { 24 | dcs * BYTES_PER_DC 25 | } 26 | 27 | pub struct MobileConfigClients { 28 | gateway_client: client::GatewayClient, 29 | auth_client: client::AuthorizationClient, 30 | } 31 | 32 | impl MobileConfigClients { 33 | pub fn new(settings: &client::Settings) -> anyhow::Result { 34 | Ok(Self { 35 | gateway_client: client::GatewayClient::from_settings(settings)?, 36 | auth_client: client::AuthorizationClient::from_settings(settings)?, 37 | }) 38 | } 39 | } 40 | 41 | #[async_trait::async_trait] 42 | pub trait MobileConfigResolverExt { 43 | async fn is_gateway_known(&self, public_key: &PublicKeyBinary) -> bool; 44 | async fn is_routing_key_known(&self, public_key: &PublicKeyBinary) -> bool; 45 | } 46 | 47 | #[async_trait::async_trait] 48 | impl MobileConfigResolverExt for MobileConfigClients { 49 | async fn is_gateway_known(&self, public_key: &PublicKeyBinary) -> bool { 50 | match self.gateway_client.resolve_gateway_info(public_key).await { 51 | Ok(res) => res.is_some(), 52 | Err(_err) => false, 53 | } 54 | } 55 | 56 | async fn is_routing_key_known(&self, public_key: &PublicKeyBinary) -> bool { 57 | self.auth_client 58 | .verify_authorized_key(public_key, NetworkKeyRole::MobileRouter) 59 | .await 60 | .unwrap_or_default() 61 | } 62 | } 63 | 64 | #[cfg(test)] 65 | mod tests { 66 | use super::*; 67 | 68 | #[test] 69 | fn test_bytes_to_dc() { 70 | assert_eq!(1, bytes_to_dc(1)); 71 | assert_eq!(1, bytes_to_dc(20_000)); 72 | assert_eq!(2, bytes_to_dc(20_001)); 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /mobile_packet_verifier/src/main.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use clap::Parser; 3 | use mobile_packet_verifier::{daemon, settings::Settings}; 4 | use std::path::PathBuf; 5 | 6 | #[derive(clap::Parser)] 7 | #[clap(version = env!("CARGO_PKG_VERSION"))] 8 | #[clap(about = "Helium Mobile Packer Verifier Server")] 9 | pub struct Cli { 10 | /// Optional configuration file to use. If present the toml file at the 11 | /// given path will be loaded. Environemnt variables can override the 12 | /// settins in the given file. 13 | #[clap(short = 'c')] 14 | config: Option, 15 | 16 | #[clap(subcommand)] 17 | cmd: Cmd, 18 | } 19 | 20 | impl Cli { 21 | pub async fn run(self) -> Result<()> { 22 | let settings = Settings::new(self.config)?; 23 | custom_tracing::init(settings.log.clone(), settings.custom_tracing.clone()).await?; 24 | self.cmd.run(settings).await 25 | } 26 | } 27 | 28 | #[derive(clap::Subcommand)] 29 | pub enum Cmd { 30 | Server(daemon::Cmd), 31 | } 32 | 33 | impl Cmd { 34 | async fn run(self, settings: Settings) -> Result<()> { 35 | match self { 36 | Self::Server(cmd) => cmd.run(&settings).await, 37 | } 38 | } 39 | } 40 | 41 | #[tokio::main] 42 | async fn main() -> Result<()> { 43 | let cli = Cli::parse(); 44 | cli.run().await 45 | } 46 | -------------------------------------------------------------------------------- /mobile_packet_verifier/tests/integrations/main.rs: -------------------------------------------------------------------------------- 1 | pub mod common; 2 | 3 | pub mod accumulate_sessions; 4 | pub mod banning; 5 | pub mod burn_metric; 6 | pub mod burner; 7 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/10_timestamptz_speedtests.sql: -------------------------------------------------------------------------------- 1 | CREATE TYPE speedtest_migration AS ( 2 | timestamp TIMESTAMPTZ, 3 | upload_speed BIGINT, 4 | download_speed BIGINT, 5 | latency INTEGER 6 | ); 7 | 8 | CREATE FUNCTION tomigration(speedtest) 9 | RETURNS speedtest_migration 10 | STRICT IMMUTABLE LANGUAGE SQL AS 11 | $$ SELECT CAST ( ROW ( ($1).timestamp at time zone 'UTC', ($1).upload_speed, ($1).download_speed, ($1).latency ) AS speedtest_migration ); $$ ; 12 | 13 | CREATE CAST ( speedtest as speedtest_migration ) WITH FUNCTION tomigration(speedtest) AS IMPLICIT; 14 | 15 | ALTER TABLE speedtests ALTER COLUMN speedtests TYPE speedtest_migration [] ; 16 | ALTER TABLE speedtests ALTER COLUMN latest_timestamp TYPE TIMESTAMPTZ USING latest_timestamp at time zone 'UTC' ; 17 | 18 | DROP CAST ( speedtest as speedtest_migration ); 19 | DROP FUNCTION tomigration(speedtest); 20 | DROP TYPE speedtest; 21 | ALTER TYPE speedtest_migration RENAME TO speedtest; 22 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/11_files_processed.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE files_processed ( 2 | file_name VARCHAR PRIMARY KEY, 3 | file_type VARCHAR NOT NULL, 4 | file_timestamp TIMESTAMPTZ NOT NULL, 5 | processed_at TIMESTAMPTZ NOT NULL 6 | ); 7 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/12_disable_complete_data_checks_until.sql: -------------------------------------------------------------------------------- 1 | INSERT INTO 2 | meta 3 | VALUES 4 | ('disable_complete_data_checks_until', '0') 5 | ON CONFLICT 6 | (key) 7 | DO NOTHING; 8 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/13_data_session.sql: -------------------------------------------------------------------------------- 1 | create table hotspot_data_transfer_sessions ( 2 | pub_key TEXT NOT NULL, 3 | payer TEXT NOT NULL, 4 | upload_bytes BIGINT NOT NULL, 5 | download_bytes BIGINT NOT NULL, 6 | num_dcs BIGINT NOT NULL, 7 | received_timestamp TIMESTAMPTZ not null, 8 | created_at TIMESTAMPTZ default now() 9 | ); 10 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/14_subscriber_location.sql: -------------------------------------------------------------------------------- 1 | create table subscriber_loc_verified ( 2 | subscriber_id BYTEA not null, 3 | received_timestamp TIMESTAMPTZ not null, 4 | created_at TIMESTAMPTZ default now() 5 | ); 6 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/15_speedtests_one_to_one.sql: -------------------------------------------------------------------------------- 1 | 2 | CREATE TABLE speedtests_migration ( 3 | pubkey text NOT NULL, 4 | upload_speed bigint, 5 | download_speed bigint, 6 | latency integer, 7 | serial_num text, 8 | timestamp timestamptz NOT NULL, 9 | inserted_at timestamptz default now(), 10 | PRIMARY KEY(pubkey, timestamp) 11 | ); 12 | CREATE INDEX idx_speedtests_pubkey on speedtests_migration (pubkey); 13 | 14 | INSERT INTO speedtests_migration (pubkey, upload_speed, download_speed, latency, serial_num, timestamp) 15 | SELECT id, (st).upload_speed, (st).download_speed, (st).latency, '', (st).timestamp 16 | FROM (select id, unnest(speedtests) as st from speedtests) as tmp 17 | ON CONFLICT DO NOTHING; 18 | 19 | ALTER TABLE speedtests RENAME TO speedtests_old; 20 | ALTER TABLE speedtests_migration RENAME TO speedtests; 21 | 22 | 23 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/16_wifi_heartbeat.sql: -------------------------------------------------------------------------------- 1 | ALTER TYPE cell_type ADD VALUE 'celltypenone' AFTER 'sercommoutdoor'; 2 | ALTER TYPE cell_type ADD VALUE 'novagenericwifiindoor' AFTER 'celltypenone'; 3 | 4 | CREATE TABLE wifi_heartbeats ( 5 | hotspot_key TEXT NOT NULL, 6 | cell_type cell_type NOT NULL, 7 | truncated_timestamp TIMESTAMPTZ NOT NULL CHECK (truncated_timestamp = date_trunc('hour', truncated_timestamp)), 8 | latest_timestamp TIMESTAMPTZ NOT NULL, 9 | location_validation_timestamp TIMESTAMPTZ, 10 | distance_to_asserted BIGINT, 11 | PRIMARY KEY(hotspot_key, truncated_timestamp) 12 | ); 13 | 14 | ALTER TABLE heartbeats RENAME TO cbrs_heartbeats; 15 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/17_modeled_coverage.sql: -------------------------------------------------------------------------------- 1 | CREATE TYPE signal_level AS ENUM ( 2 | 'none', 3 | 'low', 4 | 'medium', 5 | 'high' 6 | ); 7 | 8 | CREATE TYPE radio_type as ENUM ( 9 | 'cbrs', 10 | 'wifi' 11 | ); 12 | 13 | CREATE TABLE hex_coverage ( 14 | uuid UUID NOT NULL, 15 | hex BIGINT NOT NULL, 16 | indoor BOOLEAN NOT NULL, 17 | radio_key TEXT NOT NULL, 18 | signal_level signal_level NOT NULL, 19 | coverage_claim_time TIMESTAMPTZ NOT NULL, 20 | inserted_at TIMESTAMPTZ NOT NULL, 21 | radio_type radio_type NOT NULL, 22 | PRIMARY KEY (uuid, hex) 23 | ); 24 | 25 | CREATE TABLE seniority ( 26 | radio_key TEXT NOT NULL, 27 | seniority_ts TIMESTAMPTZ NOT NULL, 28 | last_heartbeat TIMESTAMPTZ NOT NULL, 29 | uuid UUID NOT NULL, 30 | update_reason INT NOT NULL, 31 | inserted_at TIMESTAMPTZ NOT NULL, 32 | radio_type radio_type NOT NULL, 33 | PRIMARY KEY (radio_key, radio_type, seniority_ts) 34 | ); 35 | 36 | ALTER TABLE wifi_heartbeats ADD COLUMN coverage_object UUID; 37 | ALTER TABLE cbrs_heartbeats ADD COLUMN coverage_object UUID; 38 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/18_invalidated_at.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE hex_coverage ADD COLUMN invalidated_at TIMESTAMPTZ; 2 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/19_signal_power.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE hex_coverage ADD COLUMN signal_power INTEGER; 2 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/1_setup.sql: -------------------------------------------------------------------------------- 1 | -- This extension gives us `uuid_generate_v1mc()` which generates UUIDs that cluster better than `gen_random_uuid()` 2 | -- while still being difficult to predict and enumerate. 3 | -- Also, while unlikely, `gen_random_uuid()` can in theory produce collisions which can trigger spurious errors on 4 | -- insertion, whereas it's much less likely with `uuid_generate_v1mc()`. 5 | create extension if not exists "uuid-ossp"; 6 | 7 | create or replace function set_updated_at() 8 | returns trigger as 9 | $$ 10 | begin 11 | NEW.updated_at = now(); 12 | return NEW; 13 | end; 14 | $$ language plpgsql; 15 | 16 | create or replace function trigger_updated_at(tablename regclass) 17 | returns void as 18 | $$ 19 | begin 20 | execute format('CREATE TRIGGER set_updated_at 21 | BEFORE UPDATE 22 | ON %s 23 | FOR EACH ROW 24 | WHEN (OLD is distinct from NEW) 25 | EXECUTE FUNCTION set_updated_at();', tablename); 26 | end; 27 | $$ language plpgsql; 28 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/20_remove_null_coverage_objects.sql: -------------------------------------------------------------------------------- 1 | -- Remove NULL and invalid coverage objects from WiFi heartbeats: 2 | DELETE FROM wifi_heartbeats WHERE coverage_object IS NULL; 3 | DELETE FROM wifi_heartbeats WHERE coverage_object NOT IN (SELECT uuid FROM hex_coverage); 4 | 5 | -- Remove NULL and invalid coverage objects from CBRS heartbeats: 6 | DELETE FROM cbrs_heartbeats WHERE coverage_object IS NULL; 7 | DELETE FROM cbrs_heartbeats WHERE coverage_object NOT IN (SELECT uuid FROM hex_coverage); 8 | 9 | -- Add the NOT NULL constraint to coverage objects for wifi and cbrs heartbeats: 10 | ALTER TABLE wifi_heartbeats ALTER COLUMN coverage_object SET NOT NULL; 11 | ALTER TABLE cbrs_heartbeats ALTER COLUMN coverage_object SET NOT NULL; 12 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/21_index_hex_coverage.sql: -------------------------------------------------------------------------------- 1 | CREATE INDEX IF NOT EXISTS hex_coverage_radio_key_idx ON hex_coverage USING btree (radio_key) 2 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/22_coverage_objects.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE coverage_objects ( 2 | uuid UUID PRIMARY KEY, 3 | radio_type radio_type NOT NULL, 4 | radio_key TEXT NOT NULL, 5 | indoor BOOLEAN NOT NULL, 6 | coverage_claim_time TIMESTAMPTZ NOT NULL, 7 | trust_score INTEGER, 8 | inserted_at TIMESTAMPTZ NOT NULL, 9 | invalidated_at TIMESTAMPTZ 10 | ); 11 | 12 | CREATE TABLE hexes ( 13 | uuid UUID NOT NULL REFERENCES coverage_objects(uuid), 14 | hex BIGINT NOT NULL, 15 | signal_level signal_level NOT NULL, 16 | signal_power INTEGER, 17 | PRIMARY KEY (uuid, hex) 18 | ); 19 | 20 | INSERT INTO coverage_objects(uuid, radio_type, radio_key, indoor, coverage_claim_time, inserted_at, invalidated_at) 21 | SELECT DISTINCT uuid, radio_type, radio_key, indoor, coverage_claim_time, inserted_at, invalidated_at 22 | FROM hex_coverage; 23 | 24 | INSERT INTO hexes(uuid, hex, signal_level, signal_power) 25 | SELECT uuid, hex, signal_level, signal_power 26 | FROM hex_coverage; 27 | 28 | ALTER TABLE hex_coverage RENAME TO old_hex_coverage; 29 | 30 | ALTER TYPE cell_type ADD VALUE 'novagenericwifioutdoor' AFTER 'novagenericwifiindoor'; 31 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/23_files_processed_process_name.sql: -------------------------------------------------------------------------------- 1 | alter table files_processed add column process_name text not null default 'default'; 2 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/24_location_trust_multiplier.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE wifi_heartbeats ADD COLUMN location_trust_score_multiplier DECIMAL; 2 | 3 | UPDATE wifi_heartbeats SET location_trust_score_multiplier = 4 | CASE WHEN location_validation_timestamp IS NULL THEN 5 | 0.25 6 | WHEN distance_to_asserted > 100 THEN 7 | 0.25 8 | ELSE 9 | 1.0 10 | END; 11 | 12 | ALTER TABLE wifi_heartbeats ALTER COLUMN location_trust_score_multiplier SET NOT NULL; 13 | 14 | ALTER TABLE cbrs_heartbeats ADD COLUMN location_trust_score_multiplier DECIMAL; 15 | 16 | UPDATE cbrs_heartbeats SET location_trust_score_multiplier = 1.0; 17 | 18 | ALTER TABLE cbrs_heartbeats ALTER COLUMN location_trust_score_multiplier SET NOT NULL; 19 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/25_make_distance_to_asserted_not_null.sql: -------------------------------------------------------------------------------- 1 | UPDATE wifi_heartbeats SET distance_to_asserted = 0 WHERE distance_to_asserted IS NULL; 2 | ALTER TABLE wifi_heartbeats ALTER COLUMN distance_to_asserted SET NOT NULL; 3 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/26_urbanized.sql: -------------------------------------------------------------------------------- 1 | CREATE TYPE oracle_assignment AS ENUM ('a', 'b', 'c'); 2 | 3 | ALTER TABLE hexes ADD COLUMN urbanized oracle_assignment; 4 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/27_subscriber_radio_threshold.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS radio_threshold ( 2 | id BIGSERIAL PRIMARY KEY, 3 | hotspot_pubkey TEXT NOT NULL, 4 | cbsd_id TEXT NULL, 5 | bytes_threshold BIGINT NOT NULL, 6 | subscriber_threshold INT NOT NULL, 7 | threshold_timestamp TIMESTAMPTZ NOT NULL, 8 | threshold_met BOOLEAN DEFAULT FALSE, 9 | recv_timestamp TIMESTAMPTZ NOT NULL, 10 | updated_at TIMESTAMPTZ DEFAULT NOW(), 11 | created_at TIMESTAMPTZ DEFAULT NOW() 12 | ); 13 | 14 | CREATE UNIQUE INDEX IF NOT EXISTS radio_threshold_hotspot_pubkey_cbsd_id_idx ON radio_threshold (hotspot_pubkey, COALESCE(cbsd_id,'')); 15 | 16 | -- temp table for grandfathered radio thresholds 17 | CREATE TABLE IF NOT EXISTS grandfathered_radio_threshold ( 18 | id SERIAL PRIMARY KEY, 19 | hotspot_pubkey TEXT NOT NULL, 20 | cbsd_id TEXT NULL, 21 | created_at TIMESTAMPTZ DEFAULT NOW() 22 | ); 23 | 24 | CREATE UNIQUE INDEX IF NOT EXISTS grandfathered_radio_threshold_hotspot_pubkey_cbsd_id_idx ON grandfathered_radio_threshold (hotspot_pubkey, COALESCE(cbsd_id,'')); 25 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/28_radio_threshold_cbsd_id_emptry_strings.sql: -------------------------------------------------------------------------------- 1 | UPDATE radio_threshold SET cbsd_id = NULL WHERE cbsd_id = ''; 2 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/29_footfall.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE hexes ADD COLUMN footfall oracle_assignment; -------------------------------------------------------------------------------- /mobile_verifier/migrations/2_meta.sql: -------------------------------------------------------------------------------- 1 | create table meta ( 2 | key text primary key not null, 3 | value text 4 | ); 5 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/30_save_lat_and_lon.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE wifi_heartbeats 2 | ADD COLUMN lat DOUBLE PRECISION NOT NULL DEFAULT 0.0, 3 | ADD COLUMN lon DOUBLE PRECISION NOT NULL DEFAULT 0.0; 4 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/31_reset_validation_timestamps.sql: -------------------------------------------------------------------------------- 1 | UPDATE wifi_heartbeats SET location_validation_timestamp = NULL, lat = 0.0, lon = 0.0 2 | WHERE distance_to_asserted > 200; 3 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/32_landtype.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE hexes ADD COLUMN landtype oracle_assignment; -------------------------------------------------------------------------------- /mobile_verifier/migrations/33_data_sets.sql: -------------------------------------------------------------------------------- 1 | DO $$ BEGIN 2 | CREATE TYPE data_set_status AS enum ( 3 | 'pending', 4 | 'downloaded', 5 | 'processed' 6 | ); 7 | EXCEPTION 8 | WHEN duplicate_object THEN null; 9 | END $$; 10 | 11 | DO $$ BEGIN 12 | CREATE TYPE data_set_type AS enum ( 13 | 'urbanization', 14 | 'footfall', 15 | 'landtype' 16 | ); 17 | EXCEPTION 18 | WHEN duplicate_object THEN null; 19 | END $$; 20 | 21 | CREATE TABLE IF NOT EXISTS hex_assignment_data_set_status ( 22 | filename TEXT PRIMARY KEY, 23 | data_set data_set_type NOT NULL, 24 | time_to_use TIMESTAMPTZ NOT NULL, 25 | status data_set_status NOT NULL 26 | ); 27 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/34_sp_boosted_rewards_bans.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS sp_boosted_rewards_bans ( 2 | radio_type radio_type NOT NULL, 3 | radio_key TEXT NOT NULL, 4 | received_timestamp TIMESTAMPTZ NOT NULL, 5 | until TIMESTAMPTZ NOT NULL, 6 | invalidated_at TIMESTAMPTZ, 7 | PRIMARY KEY (radio_type, radio_key, received_timestamp) 8 | ); 9 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/35_subscriber_verified_mapping_event.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS subscriber_verified_mapping_event ( 2 | subscriber_id BYTEA NOT NULL, 3 | total_reward_points BIGINT NOT NULL, 4 | received_timestamp TIMESTAMPTZ NOT NULL, 5 | inserted_at TIMESTAMPTZ DEFAULT now(), 6 | PRIMARY KEY (subscriber_id, received_timestamp) 7 | ); 8 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/36_sp_boosted_bans_type.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE sp_boosted_rewards_bans ADD COLUMN ban_type TEXT; 2 | 3 | UPDATE sp_boosted_rewards_bans SET ban_type = 'boosted_hex'; 4 | 5 | ALTER TABLE sp_boosted_rewards_bans ALTER COLUMN ban_type SET NOT NULL; 6 | 7 | ALTER TABLE sp_boosted_rewards_bans DROP CONSTRAINT sp_boosted_rewards_bans_pkey; 8 | 9 | ALTER TABLE sp_boosted_rewards_bans ADD PRIMARY KEY (ban_type, radio_type, radio_key, received_timestamp); 10 | 11 | 12 | 13 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/37_sp_promotions.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS subscriber_promotion_rewards ( 2 | time_of_reward TIMESTAMPTZ NOT NULL, 3 | subscriber_id BYTEA NOT NULL, 4 | carrier_key TEXT NOT NULL, 5 | shares BIGINT NOT NULL, 6 | PRIMARY KEY (time_of_reward, subscriber_id, carrier_key) 7 | ); 8 | 9 | CREATE TABLE IF NOT EXISTS gateway_promotion_rewards ( 10 | time_of_reward TIMESTAMPTZ NOT NULL, 11 | gateway_key TEXT NOT NULL, 12 | carrier_key TEXT NOT NULL, 13 | shares BIGINT NOT NULL, 14 | PRIMARY KEY (time_of_reward, gateway_key, carrier_key) 15 | ); 16 | 17 | CREATE TABLE IF NOT EXISTS service_provider_promotion_funds ( 18 | service_provider BIGINT NOT NULL PRIMARY KEY, 19 | basis_points BIGINT NOT NULL, 20 | inserted_at TIMESTAMPTZ NOT NULL 21 | ); 22 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/38_coverage_objects_cascade_delete.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE 2 | hexes DROP CONSTRAINT IF EXISTS hexes_uuid_fkey; 3 | 4 | ALTER TABLE 5 | hexes 6 | ADD 7 | CONSTRAINT hexes_uuid_fkey FOREIGN KEY (uuid) REFERENCES coverage_objects(uuid) ON DELETE CASCADE; -------------------------------------------------------------------------------- /mobile_verifier/migrations/39_unique_connections-up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS unique_connections ( 2 | hotspot_pubkey TEXT NOT NULL, 3 | unique_connections BIGINT NOT NULL, 4 | start_timestamp TIMESTAMPTZ NOT NULL, 5 | end_timestamp TIMESTAMPTZ NOT NULL, 6 | received_timestamp TIMESTAMPTZ NOT NULL, 7 | primary key(hotspot_pubkey, received_timestamp) 8 | ); 9 | 10 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/3_heartbeats.sql: -------------------------------------------------------------------------------- 1 | create table heartbeats ( 2 | id text primary key not null, 3 | reward_weight decimal not null, 4 | timestamp timestamp not null 5 | ); 6 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/40_service_provider_override-up.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE IF EXISTS hexes ADD COLUMN IF NOT EXISTS service_provider_override oracle_assignment; 2 | ALTER TYPE data_set_type ADD VALUE IF NOT EXISTS 'service_provider_override' AFTER 'landtype'; 3 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/41_data_session_add_rewardable_bytes.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE 2 | IF EXISTS hotspot_data_transfer_sessions 3 | ADD 4 | COLUMN IF NOT EXISTS rewardable_bytes BIGINT; -------------------------------------------------------------------------------- /mobile_verifier/migrations/42_subscriber_mapping_activity.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS subscriber_mapping_activity ( 2 | subscriber_id BYTEA NOT NULL, 3 | discovery_reward_shares BIGINT NOT NULL, 4 | verification_reward_shares BIGINT NOT NULL, 5 | received_timestamp TIMESTAMPTZ NOT NULL, 6 | inserted_at TIMESTAMPTZ NOT NULL DEFAULT now(), 7 | PRIMARY KEY (subscriber_id, received_timestamp) 8 | ); 9 | 10 | INSERT INTO subscriber_mapping_activity(subscriber_id, discovery_reward_shares, verification_reward_shares, received_timestamp, inserted_at) 11 | SELECT subscriber_id, 30, 0, received_timestamp, created_at AS inserted_at 12 | FROM subscriber_loc_verified; 13 | 14 | UPDATE subscriber_mapping_activity sma 15 | SET verification_reward_shares = svme.total_reward_points 16 | FROM subscriber_verified_mapping_event svme 17 | WHERE sma.subscriber_id = svme.subscriber_id 18 | AND sma.received_timestamp::date = svme.received_timestamp::date; 19 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/43_hotspot_bans.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS hotspot_bans ( 2 | hotspot_pubkey TEXT NOT NULL, 3 | received_timestamp TIMESTAMPTZ NOT NULL, 4 | expiration_timestamp TIMESTAMPTZ, 5 | ban_type TEXT NOT NULL, 6 | PRIMARY KEY (hotspot_pubkey, received_timestamp) 7 | ); 8 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/44_data_session_add_burn_timestamp.sql: -------------------------------------------------------------------------------- 1 | -- Add burn_timestamp 2 | ALTER TABLE 3 | hotspot_data_transfer_sessions 4 | ADD 5 | COLUMN IF NOT EXISTS burn_timestamp TIMESTAMPTZ; 6 | 7 | -- Update burn_timestamp to be equal to received_timestamp for old records 8 | UPDATE 9 | hotspot_data_transfer_sessions 10 | SET 11 | burn_timestamp = received_timestamp 12 | WHERE 13 | burn_timestamp IS NULL; 14 | 15 | -- Make burn_timestamp a NOT NULL value so it can be a primary key 16 | ALTER TABLE 17 | hotspot_data_transfer_sessions 18 | ALTER COLUMN 19 | burn_timestamp 20 | SET 21 | NOT NULL; 22 | 23 | -- Add primary key on pub_key, payer, burn_timestamp 24 | ALTER TABLE 25 | hotspot_data_transfer_sessions 26 | ADD 27 | CONSTRAINT hotspot_data_transfer_sessions_pk PRIMARY KEY (pub_key, payer, burn_timestamp); -------------------------------------------------------------------------------- /mobile_verifier/migrations/45_drop_grandfathered_radio_threshold.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE grandfathered_radio_threshold; 2 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/46_radio_threshold_rework_unique_idx.sql: -------------------------------------------------------------------------------- 1 | DELETE FROM radio_threshold WHERE cbsd_id IS NOT NULL; 2 | 3 | DROP INDEX radio_threshold_hotspot_pubkey_cbsd_id_idx; 4 | 5 | ALTER TABLE radio_threshold DROP COLUMN cbsd_id; 6 | 7 | CREATE UNIQUE INDEX radio_threshold_hotspot_pubkey_idx ON radio_threshold (hotspot_pubkey); 8 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/47_drop_cbrs_heartbeats.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE cbrs_heartbeats; 2 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/48_drop_old_hex_coverage.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE old_hex_coverage; 2 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/49_delete_cbrs_from_seniority.sql: -------------------------------------------------------------------------------- 1 | DELETE FROM seniority WHERE radio_type = 'cbrs'; 2 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/4_heartbeats.sql: -------------------------------------------------------------------------------- 1 | drop table heartbeats; 2 | 3 | create table heartbeats ( 4 | hotspot_key text not null, 5 | cbsd_id text not null, 6 | reward_weight decimal not null, 7 | timestamp timestamp not null, 8 | primary key(hotspot_key, cbsd_id) 9 | ); 10 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/50_delete_cbrs_from_coverage_objects.sql: -------------------------------------------------------------------------------- 1 | DELETE FROM coverage_objects WHERE radio_type = 'cbrs'; 2 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/5_speedtests.sql: -------------------------------------------------------------------------------- 1 | CREATE TYPE speedtest AS ( 2 | timestamp timestamp, 3 | upload_speed bigint, 4 | download_speed bigint, 5 | latency integer 6 | ); 7 | 8 | CREATE TABLE speedtests ( 9 | id text primary key not null, 10 | speedtests speedtest[] not null, 11 | latest_timestamp timestamp not null 12 | ); 13 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/6_heartbeats_cbsd_id_index.sql: -------------------------------------------------------------------------------- 1 | create unique index heartbeat_cbsd_id on heartbeats (cbsd_id); 2 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/7_multiple_heartbeats.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE heartbeats; 2 | 3 | CREATE TABLE heartbeats ( 4 | cbsd_id TEXT NOT NULL PRIMARY KEY, 5 | hotspot_key TEXT NOT NULL, 6 | reward_weight DECIMAL, 7 | timestamps TIMESTAMP[] NOT NULL 8 | ); 9 | 10 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/8_out_of_order_heartbeats.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE heartbeats; 2 | 3 | CREATE TABLE heartbeats ( 4 | cbsd_id TEXT NOT NULL PRIMARY KEY, 5 | hotspot_key TEXT NOT NULL, 6 | reward_weight DECIMAL, 7 | latest_timestamp TIMESTAMP NOT NULL, 8 | -- List of hours for which we have seen or have not seen a heartbeat, in order starting 9 | -- at the first hour of the day (00:00 to 00:59). Since SQL arrays are 1-indexed, this 10 | -- means that the index is one greater than the hour, e.g. hours_seen[2] corresponds to 11 | -- seeing a heartbeat sometime at 1 am. 12 | hours_seen BOOLEAN[24] NOT NULL 13 | ); 14 | -------------------------------------------------------------------------------- /mobile_verifier/migrations/9_realtime_heartbeats.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE heartbeats; 2 | 3 | CREATE TYPE cell_type AS ENUM ( 4 | 'nova436h', 5 | 'nova430i', 6 | 'neutrino430', 7 | 'sercommindoor', 8 | 'sercommoutdoor' 9 | ); 10 | 11 | CREATE TABLE heartbeats ( 12 | cbsd_id TEXT NOT NULL, 13 | hotspot_key TEXT NOT NULL, 14 | cell_type cell_type NOT NULL, 15 | latest_timestamp TIMESTAMPTZ NOT NULL, 16 | truncated_timestamp TIMESTAMPTZ NOT NULL CHECK (truncated_timestamp = date_trunc('hour', truncated_timestamp)), 17 | PRIMARY KEY(cbsd_id, truncated_timestamp) 18 | ); 19 | -------------------------------------------------------------------------------- /mobile_verifier/src/boosting_oracles/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod data_sets; 2 | pub use data_sets::*; 3 | -------------------------------------------------------------------------------- /mobile_verifier/src/cli/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod reward_from_db; 2 | pub mod server; 3 | pub mod service_provider_promotions; 4 | pub mod verify_disktree; 5 | -------------------------------------------------------------------------------- /mobile_verifier/src/cli/service_provider_promotions.rs: -------------------------------------------------------------------------------- 1 | use crate::{service_provider, Settings}; 2 | use anyhow::Result; 3 | use chrono::{DateTime, Utc}; 4 | use mobile_config::client::CarrierServiceClient; 5 | 6 | #[derive(Debug, clap::Args)] 7 | pub struct Cmd { 8 | #[clap(long)] 9 | start: Option>, 10 | } 11 | 12 | impl Cmd { 13 | pub async fn run(self, settings: &Settings) -> Result<()> { 14 | let epoch_start = match self.start { 15 | Some(dt) => dt, 16 | None => Utc::now(), 17 | }; 18 | 19 | let carrier_client = CarrierServiceClient::from_settings(&settings.config_client)?; 20 | let promos = service_provider::get_promotions(&carrier_client, &epoch_start).await?; 21 | 22 | println!("Promotions as of {epoch_start}"); 23 | for sp in promos.into_proto() { 24 | println!("Service Provider: {:?}", sp.service_provider()); 25 | println!(" incentive_escrow_bps: {:?}", sp.incentive_escrow_fund_bps); 26 | println!(" Promotions: ({})", sp.promotions.len()); 27 | for promo in sp.promotions { 28 | let start = DateTime::from_timestamp(promo.start_ts as i64, 0).unwrap(); 29 | let end = DateTime::from_timestamp(promo.end_ts as i64, 0).unwrap(); 30 | let duration = humantime::format_duration((end - start).to_std()?); 31 | println!(" name: {}", promo.entity); 32 | println!(" duration: {duration} ({start:?} -> {end:?})",); 33 | println!(" shares: {}", promo.shares); 34 | } 35 | } 36 | 37 | Ok(()) 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /mobile_verifier/src/cli/verify_disktree.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::HashMap, path::PathBuf}; 2 | 3 | use hextree::disktree::DiskTreeMap; 4 | 5 | use hex_assignments::{landtype::LandtypeValue, Assignment}; 6 | 7 | use crate::Settings; 8 | 9 | #[derive(Debug, clap::Args)] 10 | pub struct Cmd { 11 | /// Path to the unzipped .h3tree file 12 | #[clap(long)] 13 | path: PathBuf, 14 | 15 | /// Expected type of the .h3tree file 16 | #[clap(long)] 17 | r#type: DisktreeType, 18 | } 19 | 20 | #[derive(Debug, Clone, clap::ValueEnum)] 21 | enum DisktreeType { 22 | Landtype, 23 | } 24 | 25 | impl Cmd { 26 | pub async fn run(self, _settings: &Settings) -> anyhow::Result<()> { 27 | let disktree = DiskTreeMap::open(&self.path)?; 28 | 29 | let mut value_counts = HashMap::::new(); 30 | let mut idx: u128 = 0; 31 | let start = tokio::time::Instant::now(); 32 | 33 | println!("Checking {}, this may take a while...", self.path.display()); 34 | for x in disktree.iter()? { 35 | idx += 1; 36 | if idx % 100_000_000 == 0 { 37 | println!("Processed {} cells after {:?}", idx, start.elapsed()); 38 | } 39 | let (_cell, vals) = x.unwrap(); 40 | *value_counts.entry(vals[0]).or_insert(0) += 1; 41 | } 42 | 43 | println!("REPORT {}", "=".repeat(50)); 44 | match self.r#type { 45 | DisktreeType::Landtype => { 46 | for (key, count) in value_counts { 47 | let landtype = LandtypeValue::try_from(key); 48 | let assignment = landtype.as_ref().map(|lt| Assignment::from(*lt)); 49 | // cover is formatted twice to allow for padding a result 50 | println!( 51 | "| {key:<4} | {count:<12} | {:<20} | {assignment:?} |", 52 | format!("{landtype:?}") 53 | ); 54 | } 55 | } 56 | } 57 | 58 | Ok(()) 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /mobile_verifier/src/heartbeats/valid_radios.sql: -------------------------------------------------------------------------------- 1 | WITH heartbeats AS ( 2 | SELECT 3 | hotspot_key, 4 | cell_type, 5 | CASE WHEN count(*) >= $3 THEN 6 | 1.0 7 | ELSE 8 | 0.0 9 | END AS heartbeat_multiplier, 10 | ARRAY_AGG(distance_to_asserted ORDER BY truncated_timestamp) as distances_to_asserted, 11 | ARRAY_AGG(location_trust_score_multiplier ORDER BY truncated_timestamp) as trust_score_multipliers 12 | FROM 13 | wifi_heartbeats 14 | WHERE 15 | truncated_timestamp >= $1 16 | AND truncated_timestamp < $2 17 | GROUP BY 18 | hotspot_key, 19 | cell_type 20 | ), 21 | latest_uuids AS ( 22 | SELECT DISTINCT ON (hotspot_key) 23 | hotspot_key, 24 | coverage_object 25 | FROM 26 | wifi_heartbeats wh 27 | WHERE 28 | truncated_timestamp >= $1 29 | AND truncated_timestamp < $2 30 | ORDER BY 31 | hotspot_key, 32 | truncated_timestamp DESC 33 | ) 34 | SELECT 35 | hb.hotspot_key, 36 | hb.cell_type, 37 | hb.distances_to_asserted, 38 | hb.trust_score_multipliers, 39 | u.coverage_object 40 | FROM 41 | heartbeats hb 42 | INNER JOIN latest_uuids u ON hb.hotspot_key = u.hotspot_key 43 | WHERE 44 | hb.heartbeat_multiplier = 1.0 45 | -------------------------------------------------------------------------------- /mobile_verifier/src/main.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use clap::Parser; 3 | use mobile_verifier::{ 4 | cli::{reward_from_db, server, service_provider_promotions, verify_disktree}, 5 | Settings, 6 | }; 7 | use std::path; 8 | 9 | #[derive(clap::Parser)] 10 | #[clap(version = env!("CARGO_PKG_VERSION"))] 11 | #[clap(about = "Helium Mobile Share Server")] 12 | pub struct Cli { 13 | /// Optional configuration file to use. If present the toml file at the 14 | /// given path will be loaded. Environemnt variables can override the 15 | /// settins in the given file. 16 | #[clap(short = 'c')] 17 | config: Option, 18 | 19 | #[clap(subcommand)] 20 | cmd: Cmd, 21 | } 22 | 23 | impl Cli { 24 | pub async fn run(self) -> Result<()> { 25 | let settings = Settings::new(self.config)?; 26 | custom_tracing::init(settings.log.clone(), settings.custom_tracing.clone()).await?; 27 | self.cmd.run(settings).await 28 | } 29 | } 30 | 31 | #[derive(clap::Subcommand)] 32 | pub enum Cmd { 33 | Server(server::Cmd), 34 | RewardFromDb(reward_from_db::Cmd), 35 | /// Verify a Disktree file for HexBoosting. 36 | /// 37 | /// Go through every cell and ensure it's value can be turned into an Assignment. 38 | /// NOTE: This can take a very long time. Run with a --release binary. 39 | VerifyDisktree(verify_disktree::Cmd), 40 | /// Print active Service Provider Promotions 41 | ServiceProviderPromotions(service_provider_promotions::Cmd), 42 | } 43 | 44 | impl Cmd { 45 | pub async fn run(self, settings: Settings) -> Result<()> { 46 | match self { 47 | Self::Server(cmd) => cmd.run(&settings).await, 48 | Self::RewardFromDb(cmd) => cmd.run(&settings).await, 49 | Self::VerifyDisktree(cmd) => cmd.run(&settings).await, 50 | Self::ServiceProviderPromotions(cmd) => cmd.run(&settings).await, 51 | } 52 | } 53 | } 54 | 55 | #[tokio::main] 56 | async fn main() -> Result<()> { 57 | let cli = Cli::parse(); 58 | cli.run().await 59 | } 60 | -------------------------------------------------------------------------------- /mobile_verifier/src/service_provider/mod.rs: -------------------------------------------------------------------------------- 1 | pub use dc_sessions::{get_dc_sessions, ServiceProviderDCSessions}; 2 | pub use promotions::{get_promotions, ServiceProviderPromotions}; 3 | pub use reward::ServiceProviderRewardInfos; 4 | use rust_decimal::Decimal; 5 | 6 | mod dc_sessions; 7 | mod promotions; 8 | mod reward; 9 | 10 | // This type is used in lieu of the helium_proto::ServiceProvider enum so we can 11 | // handle more than a single value without adding a hard deploy dependency to 12 | // mobile-verifier when a new carrier is added.. 13 | pub type ServiceProviderId = i32; 14 | 15 | pub fn get_scheduled_tokens(total_emission_pool: Decimal) -> Decimal { 16 | crate::reward_shares::get_scheduled_tokens_for_service_providers(total_emission_pool) 17 | } 18 | -------------------------------------------------------------------------------- /mobile_verifier/src/service_provider/promotions.rs: -------------------------------------------------------------------------------- 1 | use chrono::{DateTime, Utc}; 2 | use mobile_config::client::carrier_service_client::CarrierServiceVerifier; 3 | use rust_decimal::Decimal; 4 | use rust_decimal_macros::dec; 5 | 6 | use crate::service_provider::ServiceProviderId; 7 | 8 | mod proto { 9 | pub use helium_proto::{service_provider_promotions::Promotion, ServiceProviderPromotions}; 10 | } 11 | 12 | pub async fn get_promotions( 13 | client: &impl CarrierServiceVerifier, 14 | epoch_start: &DateTime, 15 | ) -> anyhow::Result { 16 | let promos = client.list_incentive_promotions(epoch_start).await?; 17 | Ok(ServiceProviderPromotions(promos)) 18 | } 19 | 20 | #[derive(Debug, Default, Clone)] 21 | pub struct ServiceProviderPromotions(Vec); 22 | 23 | impl ServiceProviderPromotions { 24 | pub fn into_proto(self) -> Vec { 25 | self.0 26 | } 27 | 28 | pub(crate) fn get_fund_percent(&self, sp_id: ServiceProviderId) -> Decimal { 29 | for promo in &self.0 { 30 | if promo.service_provider == sp_id { 31 | return Decimal::from(promo.incentive_escrow_fund_bps) / dec!(10_000); 32 | } 33 | } 34 | 35 | dec!(0) 36 | } 37 | 38 | pub(crate) fn get_active_promotions(&self, sp_id: ServiceProviderId) -> Vec { 39 | for promo in &self.0 { 40 | if promo.service_provider == sp_id { 41 | return promo.promotions.clone(); 42 | } 43 | } 44 | 45 | vec![] 46 | } 47 | } 48 | 49 | impl From> for ServiceProviderPromotions { 50 | fn from(value: Vec) -> Self { 51 | Self(value) 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /mobile_verifier/src/telemetry.rs: -------------------------------------------------------------------------------- 1 | use crate::rewarder; 2 | use chrono::{DateTime, Utc}; 3 | use mobile_config::EpochInfo; 4 | use sqlx::{Pool, Postgres}; 5 | 6 | const LAST_REWARDED_END_TIME: &str = "last_rewarded_end_time"; 7 | const DATA_TRANSFER_REWARDS_SCALE: &str = "data_transfer_rewards_scale"; 8 | 9 | pub async fn initialize(db: &Pool) -> anyhow::Result<()> { 10 | let next_reward_epoch = rewarder::next_reward_epoch(db).await?; 11 | let epoch_period: EpochInfo = next_reward_epoch.into(); 12 | last_rewarded_end_time(epoch_period.period.start); 13 | Ok(()) 14 | } 15 | 16 | pub fn last_rewarded_end_time(timestamp: DateTime) { 17 | metrics::gauge!(LAST_REWARDED_END_TIME).set(timestamp.timestamp() as f64); 18 | } 19 | 20 | pub fn data_transfer_rewards_scale(scale: f64) { 21 | metrics::gauge!(DATA_TRANSFER_REWARDS_SCALE).set(scale); 22 | } 23 | -------------------------------------------------------------------------------- /mobile_verifier/src/unique_connections/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod db; 2 | pub mod ingestor; 3 | 4 | use helium_crypto::PublicKeyBinary; 5 | use std::collections::HashMap; 6 | 7 | pub type UniqueConnectionCounts = HashMap; 8 | 9 | // hip-134: 10 | // https://github.com/helium/HIP/blob/main/0134-reward-mobile-carrier-offload-hotspots.md 11 | // A Hotspot serving >25 unique connections, as defined by the Carrier utlizing the hotspots for Carrier Offload, on a seven day rolling average. 12 | pub const MINIMUM_UNIQUE_CONNECTIONS: u64 = 25; 13 | 14 | pub fn is_qualified(unique_connections: &UniqueConnectionCounts, pubkey: &PublicKeyBinary) -> bool { 15 | let uniq_conns = unique_connections.get(pubkey).cloned().unwrap_or_default(); 16 | uniq_conns > MINIMUM_UNIQUE_CONNECTIONS 17 | } 18 | -------------------------------------------------------------------------------- /mobile_verifier/tests/integrations/coverage.rs: -------------------------------------------------------------------------------- 1 | use std::str::FromStr; 2 | 3 | use futures::TryStreamExt; 4 | use helium_crypto::PublicKeyBinary; 5 | use mobile_verifier::{ 6 | coverage::{CoveredHexStream, HexCoverage}, 7 | heartbeats::KeyType, 8 | seniority::Seniority, 9 | }; 10 | use sqlx::PgPool; 11 | use uuid::Uuid; 12 | 13 | #[sqlx::test(fixtures("covered_stream"))] 14 | async fn test_covered_hex_stream(pool: PgPool) { 15 | let mut txn = pool.begin().await.unwrap(); 16 | 17 | let wifi_pub_key: PublicKeyBinary = "1trSuseow771kqR8Muvj8rK3SbM26jN3o8GuDEjuUMEWZp7WzvexMtZwNP1jH7BMvaUgpb2fWQCxBgCm4UCFbHn6x6ApFzXoaUTb6SMSYYc6uwUQiHsa9vFC8LpPEwo6bv7rjKddgSxxtRhNojuck5dAXkAuWaxW9fW1vxwSqAq7WKEMnRMfjMzbpC1yKVA9iBd3m7s6V9KqLLCBaG4BdYszS3cbsQY92d9BkTapkLfbFrVEaLTeF5ETT7eewTGYQwY2h8knk9x9e84idnNVUKTiJs34AvSaAXkbRehzJpAjQ2skHXb1PtS7FU6TVgmQpW1tykJ9qJkDzDf9JWiHSvupkxvmK6MT2Aqkvc1owy2Q7i" 18 | .parse() 19 | .expect("failed gw1 parse"); 20 | 21 | // WIFI should work 22 | let wifi_key = KeyType::Wifi(&wifi_pub_key); 23 | let seniority = Seniority::fetch_latest(wifi_key, &mut txn) 24 | .await 25 | .unwrap() 26 | .unwrap(); 27 | let co_uuid = Uuid::from_str("019516a5-2a8c-7480-b319-8b4f801ebe6c").unwrap(); 28 | let stream = pool 29 | .covered_hex_stream(wifi_key, &co_uuid, &seniority) 30 | .await 31 | .unwrap(); 32 | let hexes: Vec = stream.try_collect().await.unwrap(); 33 | assert_eq!(hexes.len(), 1); 34 | } 35 | -------------------------------------------------------------------------------- /mobile_verifier/tests/integrations/fixtures/footfall.1722895200000.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/helium/oracles/f5b7c46833e31098ec86620e50bf35714c9edb78/mobile_verifier/tests/integrations/fixtures/footfall.1722895200000.gz -------------------------------------------------------------------------------- /mobile_verifier/tests/integrations/fixtures/footfall.1732895200000.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/helium/oracles/f5b7c46833e31098ec86620e50bf35714c9edb78/mobile_verifier/tests/integrations/fixtures/footfall.1732895200000.gz -------------------------------------------------------------------------------- /mobile_verifier/tests/integrations/fixtures/landtype.1722895200000.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/helium/oracles/f5b7c46833e31098ec86620e50bf35714c9edb78/mobile_verifier/tests/integrations/fixtures/landtype.1722895200000.gz -------------------------------------------------------------------------------- /mobile_verifier/tests/integrations/fixtures/service_provider_override.1739404800000.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/helium/oracles/f5b7c46833e31098ec86620e50bf35714c9edb78/mobile_verifier/tests/integrations/fixtures/service_provider_override.1739404800000.gz -------------------------------------------------------------------------------- /mobile_verifier/tests/integrations/fixtures/urbanization.1722895200000.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/helium/oracles/f5b7c46833e31098ec86620e50bf35714c9edb78/mobile_verifier/tests/integrations/fixtures/urbanization.1722895200000.gz -------------------------------------------------------------------------------- /mobile_verifier/tests/integrations/main.rs: -------------------------------------------------------------------------------- 1 | mod common; 2 | 3 | mod banning; 4 | mod boosting_oracles; 5 | mod coverage; 6 | mod heartbeats; 7 | mod hex_boosting; 8 | mod last_location; 9 | mod modeled_coverage; 10 | mod rewarder_mappers; 11 | mod rewarder_oracles; 12 | mod rewarder_poc_dc; 13 | mod rewarder_sp_rewards; 14 | mod seniority; 15 | mod speedtests; 16 | -------------------------------------------------------------------------------- /mobile_verifier/tests/integrations/rewarder_oracles.rs: -------------------------------------------------------------------------------- 1 | use crate::common::{self, reward_info_24_hours}; 2 | use helium_proto::services::poc_mobile::UnallocatedRewardType; 3 | use mobile_verifier::{reward_shares, rewarder}; 4 | use rust_decimal::prelude::*; 5 | use rust_decimal_macros::dec; 6 | use sqlx::PgPool; 7 | 8 | #[sqlx::test] 9 | async fn test_oracle_rewards(_pool: PgPool) -> anyhow::Result<()> { 10 | let (mobile_rewards_client, mobile_rewards) = common::create_file_sink(); 11 | 12 | let reward_info = reward_info_24_hours(); 13 | 14 | // run rewards for oracles 15 | rewarder::reward_oracles(mobile_rewards_client, &reward_info).await?; 16 | 17 | let rewards = mobile_rewards.finish().await?; 18 | let unallocated_reward = rewards.unallocated.first().expect("Unallocated"); 19 | 20 | assert_eq!( 21 | UnallocatedRewardType::Oracle as i32, 22 | unallocated_reward.reward_type 23 | ); 24 | // confirm our unallocated amount 25 | assert_eq!(3_287_671_232_876, unallocated_reward.amount); 26 | 27 | // confirm the total rewards allocated matches expectations 28 | let expected_sum = reward_shares::get_scheduled_tokens_for_oracles(reward_info.epoch_emissions) 29 | .to_u64() 30 | .unwrap(); 31 | assert_eq!(expected_sum, unallocated_reward.amount); 32 | 33 | // confirm the rewarded percentage amount matches expectations 34 | let percent = (Decimal::from(unallocated_reward.amount) / reward_info.epoch_emissions) 35 | .round_dp_with_strategy(2, RoundingStrategy::MidpointNearestEven); 36 | assert_eq!(percent, dec!(0.04)); 37 | 38 | Ok(()) 39 | } 40 | -------------------------------------------------------------------------------- /poc_entropy/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "poc-entropy" 3 | version = "0.1.0" 4 | description = "PoC Entropy Server for the Helium Network" 5 | edition.workspace = true 6 | authors.workspace = true 7 | license.workspace = true 8 | 9 | [dependencies] 10 | anyhow = { workspace = true } 11 | config = { workspace = true } 12 | clap = { workspace = true } 13 | thiserror = { workspace = true } 14 | serde = { workspace = true } 15 | serde_json = { workspace = true } 16 | base64 = { workspace = true } 17 | blake3 = { workspace = true } 18 | http = { workspace = true } 19 | tonic = { workspace = true } 20 | hyper = "0" 21 | jsonrpsee = { version = "0", features = ["async-client", "http-client"] } 22 | tower = { version = "0.4" } 23 | triggered = { workspace = true } 24 | futures = { workspace = true } 25 | futures-util = { workspace = true } 26 | prost = { workspace = true } 27 | bs58 = "0" 28 | tracing = { workspace = true } 29 | tracing-subscriber = { workspace = true } 30 | metrics = { workspace = true } 31 | metrics-exporter-prometheus = { workspace = true } 32 | tokio = { workspace = true } 33 | chrono = { workspace = true } 34 | helium-proto = { workspace = true } 35 | helium-crypto = { workspace = true } 36 | file-store = { path = "../file_store" } 37 | poc-metrics = { path = "../metrics" } 38 | custom-tracing = { path = "../custom_tracing", features = ["grpc"] } 39 | -------------------------------------------------------------------------------- /poc_entropy/pkg/settings-template.toml: -------------------------------------------------------------------------------- 1 | 2 | # log settings for the application (RUST_LOG format). Default below 3 | # 4 | # log = "poc_entropy=debug,poc_store=info" 5 | 6 | # Source URL for entropy. Required 7 | source = "https://entropy.source.url" 8 | 9 | # Listen addres for public api. Default below 10 | # 11 | # listen = "0.0.0.0:8080" 12 | 13 | # Cache folder to use. Default blow 14 | # 15 | # cache = "/var/data/entropy" 16 | 17 | [output] 18 | # Output bucket for entropy 19 | 20 | # Name of bucket to write details to. Required 21 | # 22 | bucket = "entropy-bucket" 23 | 24 | # Region for bucket. Defaults to below 25 | # 26 | # region = "us-west-2" 27 | 28 | # Optional URL for AWS api endpoint. Inferred from aws config settings or aws 29 | # IAM context by default 30 | # 31 | # endpoint = "https://aws-s3-bucket.aws.com" 32 | 33 | 34 | [metrics] 35 | 36 | # Endpoint for metrics. Default below 37 | # 38 | # endpoint = "127.0.0.1:19000" 39 | -------------------------------------------------------------------------------- /poc_entropy/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod entropy_generator; 2 | pub mod server; 3 | pub mod settings; 4 | 5 | pub use settings::Settings; 6 | -------------------------------------------------------------------------------- /poc_entropy/src/server.rs: -------------------------------------------------------------------------------- 1 | use crate::entropy_generator::MessageReceiver; 2 | use helium_proto::{ 3 | services::poc_entropy::{EntropyReqV1, PocEntropy, Server as GrpcServer}, 4 | EntropyReportV1, 5 | }; 6 | use std::net::SocketAddr; 7 | use tokio::time::Duration; 8 | use tonic::transport; 9 | 10 | struct EntropyServer { 11 | entropy_watch: MessageReceiver, 12 | } 13 | 14 | #[tonic::async_trait] 15 | impl PocEntropy for EntropyServer { 16 | async fn entropy( 17 | &self, 18 | _request: tonic::Request, 19 | ) -> Result, tonic::Status> { 20 | let entropy = &*self.entropy_watch.borrow(); 21 | metrics::counter!("entropy_server_get_count").increment(1); 22 | Ok(tonic::Response::new(entropy.into())) 23 | } 24 | } 25 | 26 | pub struct ApiServer { 27 | pub socket_addr: SocketAddr, 28 | service: GrpcServer, 29 | } 30 | 31 | impl ApiServer { 32 | pub async fn new( 33 | socket_addr: SocketAddr, 34 | entropy_watch: MessageReceiver, 35 | ) -> anyhow::Result { 36 | let service = GrpcServer::new(EntropyServer { entropy_watch }); 37 | 38 | Ok(Self { 39 | socket_addr, 40 | service, 41 | }) 42 | } 43 | 44 | pub async fn run(self, shutdown: &triggered::Listener) -> anyhow::Result<()> { 45 | tracing::info!(listen = self.socket_addr.to_string(), "starting"); 46 | transport::Server::builder() 47 | .layer(custom_tracing::grpc_layer::new_with_span(make_span)) 48 | .http2_keepalive_interval(Some(Duration::from_secs(250))) 49 | .http2_keepalive_timeout(Some(Duration::from_secs(60))) 50 | .add_service(self.service) 51 | .serve_with_shutdown(self.socket_addr, shutdown.clone()) 52 | .await?; 53 | tracing::info!("stopping api server"); 54 | Ok(()) 55 | } 56 | } 57 | 58 | fn make_span(_request: &http::request::Request) -> tracing::Span { 59 | tracing::info_span!(custom_tracing::DEFAULT_SPAN) 60 | } 61 | -------------------------------------------------------------------------------- /poc_entropy/src/settings.rs: -------------------------------------------------------------------------------- 1 | use config::{Config, Environment, File}; 2 | use serde::Deserialize; 3 | use std::path::Path; 4 | 5 | #[derive(Debug, Deserialize)] 6 | pub struct Settings { 7 | /// RUST_LOG compatible settings string. Default to 8 | /// "poc_entropy=debug,poc_store=info" 9 | #[serde(default = "default_log")] 10 | pub log: String, 11 | #[serde(default)] 12 | pub custom_tracing: custom_tracing::Settings, 13 | /// Listen address for http requests for entropy. Default "0.0.0.0:8080" 14 | #[serde(default = "default_listen_addr")] 15 | pub listen: String, 16 | /// Source URL for entropy data. Required 17 | pub source: String, 18 | /// Target output bucket details 19 | pub output: file_store::Settings, 20 | /// Folder for locacl cache of ingest data 21 | #[serde(default = "default_cache")] 22 | pub cache: String, 23 | /// Metrics settings 24 | pub metrics: poc_metrics::Settings, 25 | } 26 | 27 | fn default_log() -> String { 28 | "poc_entropy=debug,poc_store=info".to_string() 29 | } 30 | 31 | fn default_cache() -> String { 32 | "/var/data/entropy".to_string() 33 | } 34 | 35 | fn default_listen_addr() -> String { 36 | "0.0.0.0:8080".to_string() 37 | } 38 | 39 | impl Settings { 40 | /// Load Settings from a given path. Settings are loaded from a given 41 | /// optional path and can be overriden with environment variables. 42 | /// 43 | /// Environemnt overrides have the same name as the entries in the settings 44 | /// file in uppercase and prefixed with "ENTROPY_". For example 45 | /// "ENTROPY_LOG" will override the log setting. 46 | pub fn new>(path: Option

) -> Result { 47 | let mut builder = Config::builder(); 48 | 49 | if let Some(file) = path { 50 | // Add optional settings file 51 | builder = builder 52 | .add_source(File::with_name(&file.as_ref().to_string_lossy()).required(false)); 53 | } 54 | // Add in settings from the environment (with a prefix of APP) 55 | // Eg.. `MI_DEBUG=1 ./target/app` would set the `debug` key 56 | builder 57 | .add_source(Environment::with_prefix("ENTROPY").separator("_")) 58 | .build() 59 | .and_then(|config| config.try_deserialize()) 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /price/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "price" 3 | version = "0.1.0" 4 | description = "Price Oracle for the Helium Network" 5 | edition.workspace = true 6 | authors.workspace = true 7 | license.workspace = true 8 | 9 | [dependencies] 10 | anyhow = { workspace = true } 11 | config = { workspace = true } 12 | clap = { workspace = true } 13 | thiserror = { workspace = true } 14 | serde = { workspace = true } 15 | serde_json = { workspace = true } 16 | futures = { workspace = true } 17 | futures-util = { workspace = true } 18 | prost = { workspace = true } 19 | tracing = { workspace = true } 20 | tracing-subscriber = { workspace = true } 21 | metrics = { workspace = true } 22 | metrics-exporter-prometheus = { workspace = true } 23 | tokio = { workspace = true } 24 | tokio-util = { workspace = true } 25 | chrono = { workspace = true } 26 | helium-proto = { workspace = true } 27 | rust_decimal = { workspace = true } 28 | rust_decimal_macros = { workspace = true } 29 | triggered = { workspace = true } 30 | humantime-serde = { workspace = true } 31 | 32 | custom-tracing = { path = "../custom_tracing" } 33 | file-store = { path = "../file_store" } 34 | poc-metrics = { path = "../metrics" } 35 | solana = { path = "../solana" } 36 | task-manager = { path = "../task_manager" } 37 | -------------------------------------------------------------------------------- /price/README.md: -------------------------------------------------------------------------------- 1 | # Price Oracle Server 2 | 3 | The price oracle server serves up price data for helium token(s) acquired from 4 | the [pyth.network](https://pyth.network) and stores it in an S3 bucket. 5 | 6 | The supported tokens are: 7 | - HNT 8 | - HST 9 | - MOBILE 10 | - IOT 11 | 12 | Note that currently only HNT-USD prices are available. 13 | 14 | The price oracle server: 15 | 16 | - Requests price for HNT token at a regular interval (60s) from pyth via solana 17 | RpcClient. In case of failure it uses the previously fetched price and stores 18 | the same with an updated timestamp. 19 | - Stores and uploads [price_report](https://github.com/helium/proto/blob/master/src/price_report.proto) to an S3 bucket. 20 | -------------------------------------------------------------------------------- /price/pkg/settings-template.toml: -------------------------------------------------------------------------------- 1 | # log settings for the application (RUST_LOG format). Default below 2 | # 3 | # log = "price=debug" 4 | 5 | # RPC Endpoint for price oracles. Required. 6 | source = "https://api.devnet.solana.com" 7 | 8 | # Price tick interval (secs). Default = 60s. Optional. 9 | interval = "60 seconds" 10 | 11 | # Cache folder to use. Default blow 12 | # 13 | # cache = "/var/data/price" 14 | 15 | [cluster] 16 | name = "devnet" 17 | hnt_price_key = "6Eg8YdfFJQF2HHonzPUBSCCmyUEhrStg9VBLK957sBe6" 18 | # mobile_price_key = 19 | # hst_price_key = 20 | # iot_price_key = 21 | # HNT price has 8 exponent. i.e. $1 = 100000000. Set it to some number for testing. Optional. 22 | # hnt_price = 23 | # MOBILE price has 6 exponent. i.e. $1 = 1000000. Set it to some number for testing. Optional. 24 | # mobile_price = 25 | # IOT price has 6 exponent. i.e. $1 = 1000000. Set it to some number for testing. Optional. 26 | # iot_price = 27 | # HST price has 6 exponent. i.e. $1 = 1000000. Set it to some number for testing. Optional. 28 | # hst_price = 29 | 30 | [output] 31 | # Output bucket for price 32 | 33 | # Name of bucket to write details to. Required 34 | # 35 | bucket = "price" 36 | 37 | # Region for bucket. Defaults to below 38 | # 39 | # region = "us-west-2" 40 | 41 | # Optional URL for AWS api endpoint. Inferred from aws config settings or aws 42 | # IAM context by default 43 | # 44 | # endpoint = "https://aws-s3-bucket.aws.com" 45 | 46 | 47 | [metrics] 48 | 49 | # Endpoint for metrics. Default below 50 | # 51 | # endpoint = "127.0.0.1:19000" 52 | -------------------------------------------------------------------------------- /price/src/cli/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod check; 2 | -------------------------------------------------------------------------------- /price/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod cli; 2 | pub mod metrics; 3 | pub mod price_generator; 4 | pub mod price_tracker; 5 | pub mod settings; 6 | 7 | pub use price_generator::PriceGenerator; 8 | pub use price_tracker::PriceTracker; 9 | pub use settings::Settings; 10 | -------------------------------------------------------------------------------- /price/src/metrics.rs: -------------------------------------------------------------------------------- 1 | use solana::Token; 2 | 3 | const PRICE_GAUGE: &str = concat!(env!("CARGO_PKG_NAME"), "_", "price_gauge"); 4 | 5 | pub struct Metrics; 6 | 7 | impl Metrics { 8 | pub fn update(counter: String, token: Token, price: f64) { 9 | increment_counter(counter, token); 10 | set_gauge(token, price) 11 | } 12 | } 13 | 14 | fn increment_counter(counter: String, token: Token) { 15 | metrics::counter!(counter, "token_type" => token.to_string()).increment(1); 16 | } 17 | 18 | fn set_gauge(token: Token, value: f64) { 19 | metrics::gauge!(PRICE_GAUGE, "token_type" => token.to_string()).set(value); 20 | } 21 | -------------------------------------------------------------------------------- /reward_index/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "reward-index" 3 | version = "0.1.0" 4 | description = "Reward Indexer for the Helium Mobile Network" 5 | edition.workspace = true 6 | authors.workspace = true 7 | license.workspace = true 8 | 9 | 10 | [dependencies] 11 | anyhow = { workspace = true } 12 | bs58 = { workspace = true } 13 | config = { workspace = true } 14 | clap = { workspace = true } 15 | thiserror = { workspace = true } 16 | serde = { workspace = true } 17 | serde_json = { workspace = true } 18 | sqlx = { workspace = true } 19 | base64 = { workspace = true } 20 | sha2 = { workspace = true } 21 | lazy_static = { workspace = true } 22 | triggered = { workspace = true } 23 | futures = { workspace = true } 24 | futures-util = { workspace = true } 25 | prost = { workspace = true } 26 | once_cell = { workspace = true } 27 | tokio = { workspace = true } 28 | tracing = { workspace = true } 29 | tracing-subscriber = { workspace = true } 30 | chrono = { workspace = true, features = ["serde"] } 31 | metrics = { workspace = true } 32 | metrics-exporter-prometheus = { workspace = true } 33 | helium-proto = { workspace = true } 34 | helium-crypto = { workspace = true } 35 | rust_decimal = { workspace = true } 36 | rust_decimal_macros = { workspace = true } 37 | tonic = { workspace = true } 38 | rand = { workspace = true } 39 | async-trait = { workspace = true } 40 | humantime-serde = { workspace = true } 41 | 42 | custom-tracing = { path = "../custom_tracing" } 43 | db-store = { path = "../db_store" } 44 | file-store = { path = "../file_store" } 45 | poc-metrics = { path = "../metrics" } 46 | task-manager = { path = "../task_manager" } 47 | -------------------------------------------------------------------------------- /reward_index/README.md: -------------------------------------------------------------------------------- 1 | # Reward Index 2 | 3 | ## IOT 4 | 5 | ### S3 Inputs 6 | 7 | | File Type | Pattern | | 8 | | :--- | :-- | :-- | 9 | | RewardManifest | reward_manifest.\* | [Proto](https://github.com/helium/proto/blob/149997d2a74e08679e56c2c892d7e46f2d0d1c46/src/reward_manifest.proto#L5) | 10 | | GatewayRewardShare | gateway_reward_share.\* | [Proto](https://github.com/helium/proto/blob/149997d2a74e08679e56c2c892d7e46f2d0d1c46/src/service/poc_lora.proto#L171) | 11 | 12 | ## Mobile 13 | 14 | ### S3 Inputs 15 | 16 | | File Type | Pattern | | 17 | | :--- | :-- | :-- | 18 | | RewardManifest | reward_manifest.\* | [Proto](https://github.com/helium/proto/blob/149997d2a74e08679e56c2c892d7e46f2d0d1c46/src/reward_manifest.proto#L5) | 19 | | RadioRewardShare | radio_reward_share.\* | [Proto](https://github.com/helium/proto/blob/149997d2a74e08679e56c2c892d7e46f2d0d1c46/src/service/poc_mobile.proto#L118) | 20 | 21 | -------------------------------------------------------------------------------- /reward_index/migrations/10_add_service_provider_reward_type.sql: -------------------------------------------------------------------------------- 1 | ALTER TYPE reward_type ADD VALUE 'mobile_service_provider'; 2 | -------------------------------------------------------------------------------- /reward_index/migrations/11_add_mobile_promotion_reward_type.sql: -------------------------------------------------------------------------------- 1 | ALTER TYPE reward_type ADD VALUE 'mobile_promotion'; 2 | -------------------------------------------------------------------------------- /reward_index/migrations/1_setup.sql: -------------------------------------------------------------------------------- 1 | -- This extension gives us `uuid_generate_v1mc()` which generates UUIDs that cluster better than `gen_random_uuid()` 2 | -- while still being difficult to predict and enumerate. 3 | -- Also, while unlikely, `gen_random_uuid()` can in theory produce collisions which can trigger spurious errors on 4 | -- insertion, whereas it's much less likely with `uuid_generate_v1mc()`. 5 | create extension if not exists "uuid-ossp"; 6 | 7 | create or replace function set_updated_at() 8 | returns trigger as 9 | $$ 10 | begin 11 | NEW.updated_at = now(); 12 | return NEW; 13 | end; 14 | $$ language plpgsql; 15 | 16 | create or replace function trigger_updated_at(tablename regclass) 17 | returns void as 18 | $$ 19 | begin 20 | execute format('CREATE TRIGGER set_updated_at 21 | BEFORE UPDATE 22 | ON %s 23 | FOR EACH ROW 24 | WHEN (OLD is distinct from NEW) 25 | EXECUTE FUNCTION set_updated_at();', tablename); 26 | end; 27 | $$ language plpgsql; 28 | -------------------------------------------------------------------------------- /reward_index/migrations/2_meta.sql: -------------------------------------------------------------------------------- 1 | create table meta ( 2 | key text primary key not null, 3 | value text 4 | ); 5 | -------------------------------------------------------------------------------- /reward_index/migrations/3_index.sql: -------------------------------------------------------------------------------- 1 | create table reward_index ( 2 | address text primary key not null, 3 | rewards bigint not null default 0, 4 | last_reward timestamptz 5 | ); 6 | -------------------------------------------------------------------------------- /reward_index/migrations/4_files_processed.sql: -------------------------------------------------------------------------------- 1 | create table files_processed ( 2 | file_name varchar primary key, 3 | file_type varchar not null, 4 | file_timestamp timestamptz not null, 5 | processed_at timestamptz not null 6 | ); 7 | 8 | insert into files_processed (file_name, file_type, file_timestamp, processed_at) 9 | select 'migration', 'reward_manifest', to_timestamp(value::decimal / 1000) + interval '30 minutes', NOW() 10 | from meta 11 | where key = 'last_reward_manifest'; 12 | -------------------------------------------------------------------------------- /reward_index/migrations/5_add_type_to_index.sql: -------------------------------------------------------------------------------- 1 | CREATE TYPE reward_type as enum('mobile_gateway', 'iot_gateway', 'iot_operational'); 2 | 3 | ALTER TABLE reward_index ADD reward_type reward_type; 4 | -------------------------------------------------------------------------------- /reward_index/migrations/6_add_subscriber_reward_type.sql: -------------------------------------------------------------------------------- 1 | ALTER TYPE reward_type ADD VALUE 'mobile_subscriber'; 2 | -------------------------------------------------------------------------------- /reward_index/migrations/7_files_processed_process_name.sql: -------------------------------------------------------------------------------- 1 | alter table files_processed add column process_name text not null default 'default'; 2 | -------------------------------------------------------------------------------- /reward_index/migrations/8_add_mobile_unallocated_reward_type.sql: -------------------------------------------------------------------------------- 1 | ALTER TYPE reward_type ADD VALUE 'mobile_unallocated'; 2 | -------------------------------------------------------------------------------- /reward_index/migrations/9_add_iot_unallocated_reward_type.sql: -------------------------------------------------------------------------------- 1 | ALTER TYPE reward_type ADD VALUE 'iot_unallocated'; 2 | -------------------------------------------------------------------------------- /reward_index/pkg/settings-template.toml: -------------------------------------------------------------------------------- 1 | 2 | # log settings for the application (RUST_LOG format). Default below 3 | # 4 | # log = "reward_index=debug" 5 | 6 | # Interval for checking verifier bucket (in seconds). Default below (15 minutes) 7 | # 8 | # interval = "15 minutes" 9 | 10 | # Mode to operate the indexer in. "iot" or "mobile" 11 | # 12 | mode = "iot" 13 | 14 | # Operation Fund key is required when mode = "iot" 15 | # 16 | operation_fund_key = "iot-operation-fund-key" 17 | 18 | # Unallocated reward entity key is always required 19 | # 20 | unallocated_reward_entity_key = "unallocated-reward-entity-key" 21 | 22 | # 23 | [database] 24 | 25 | # Postgres Connection Information 26 | host = "127.0.0.1" 27 | port = 5432 28 | username = "postgres" 29 | database = "reward_index" 30 | 31 | auth_type = "iam" 32 | # IAM Role to assume to generate db auth token 33 | 34 | iam_role_arn = "arn::iam" 35 | iam_role_session_name = "role-session-name" 36 | iam_duration_seconds = 900 37 | iam_region = "us-west-2" 38 | 39 | # Max connections to the database. 40 | max_connections = 10 41 | 42 | [verifier] 43 | # Input bucket details for verified reward share data 44 | 45 | # Name of bucket to access verified data. Required 46 | # 47 | bucket = "mainnet-verified-bucket" 48 | 49 | # Region for bucket. Defaults to below 50 | # 51 | # region = "us-west-2" 52 | 53 | # Optional URL for AWS api endpoint. Inferred from aws config settings or aws 54 | # IAM context by default 55 | # 56 | # endpoint = "https://aws-s3-bucket.aws.com" 57 | 58 | 59 | [metrics] 60 | 61 | # Endpoint for metrics. Default below 62 | # 63 | # endpoint = "127.0.0.1:19000" 64 | -------------------------------------------------------------------------------- /reward_index/src/db.rs: -------------------------------------------------------------------------------- 1 | use crate::indexer::RewardType; 2 | use chrono::{DateTime, Utc}; 3 | 4 | pub async fn insert<'c, E>( 5 | executor: E, 6 | address: String, 7 | amount: u64, 8 | reward_type: RewardType, 9 | timestamp: &DateTime, 10 | ) -> Result<(), sqlx::Error> 11 | where 12 | E: sqlx::Executor<'c, Database = sqlx::Postgres>, 13 | { 14 | // Safeguard against 0 amount shares updating the last rewarded timestamp 15 | if amount == 0 { 16 | return Ok(()); 17 | } 18 | 19 | sqlx::query( 20 | r#" 21 | insert into reward_index ( 22 | address, 23 | rewards, 24 | last_reward, 25 | reward_type 26 | ) values ($1, $2, $3, $4) 27 | on conflict(address) do update set 28 | rewards = reward_index.rewards + EXCLUDED.rewards, 29 | last_reward = EXCLUDED.last_reward 30 | "#, 31 | ) 32 | .bind(address) 33 | .bind(amount as i64) 34 | .bind(timestamp) 35 | .bind(reward_type) 36 | .execute(executor) 37 | .await?; 38 | 39 | Ok(()) 40 | } 41 | -------------------------------------------------------------------------------- /reward_index/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod db; 2 | pub mod extract; 3 | pub mod indexer; 4 | pub mod settings; 5 | pub mod telemetry; 6 | 7 | pub use indexer::Indexer; 8 | pub use settings::Settings; 9 | -------------------------------------------------------------------------------- /reward_index/src/telemetry.rs: -------------------------------------------------------------------------------- 1 | use chrono::{DateTime, TimeZone, Utc}; 2 | use db_store::meta; 3 | use sqlx::{Pool, Postgres}; 4 | 5 | const LAST_REWARD_PROCESSED_TIME: &str = "last_reward_processed_time"; 6 | 7 | pub async fn initialize(db: &Pool) -> anyhow::Result<()> { 8 | match meta::fetch(db, LAST_REWARD_PROCESSED_TIME).await { 9 | Ok(timestamp) => last_reward_processed_time(db, to_datetime(timestamp)?).await, 10 | Err(db_store::Error::NotFound(_)) => Ok(()), 11 | Err(err) => Err(err.into()), 12 | } 13 | } 14 | 15 | pub async fn last_reward_processed_time( 16 | db: &Pool, 17 | datetime: DateTime, 18 | ) -> anyhow::Result<()> { 19 | metrics::gauge!(LAST_REWARD_PROCESSED_TIME).set(datetime.timestamp() as f64); 20 | meta::store(db, LAST_REWARD_PROCESSED_TIME, datetime.timestamp()).await?; 21 | 22 | Ok(()) 23 | } 24 | 25 | fn to_datetime(timestamp: i64) -> anyhow::Result> { 26 | Utc.timestamp_opt(timestamp, 0) 27 | .single() 28 | .ok_or_else(|| anyhow::anyhow!("Unable to decode timestamp")) 29 | } 30 | -------------------------------------------------------------------------------- /reward_index/tests/integrations/common/mod.rs: -------------------------------------------------------------------------------- 1 | use chrono::{DateTime, Duration, DurationRound, Utc}; 2 | use file_store::{traits::MsgBytes, BytesMutStream}; 3 | use futures::{stream, StreamExt}; 4 | use prost::bytes::BytesMut; 5 | use reward_index::indexer::RewardType; 6 | use sqlx::{postgres::PgRow, FromRow, PgPool, Row}; 7 | 8 | pub fn bytes_mut_stream(els: Vec) -> BytesMutStream { 9 | BytesMutStream::from( 10 | stream::iter(els) 11 | .map(|el| el.as_bytes()) 12 | .map(|el| BytesMut::from(el.as_ref())) 13 | .map(Ok) 14 | .boxed(), 15 | ) 16 | } 17 | 18 | // When retreiving a timestamp from DB, depending on the version of postgres 19 | // the timestamp may be truncated. When comparing datetimes, to ones generated 20 | // in a test with `Utc::now()`, you should truncate it. 21 | pub fn nanos_trunc(ts: DateTime) -> DateTime { 22 | ts.duration_trunc(Duration::nanoseconds(1000)).unwrap() 23 | } 24 | 25 | #[derive(Debug, Clone, PartialEq, Eq)] 26 | pub struct DbReward { 27 | pub address: String, 28 | pub rewards: u64, 29 | pub last_reward: DateTime, 30 | pub reward_type: RewardType, 31 | } 32 | 33 | impl FromRow<'_, PgRow> for DbReward { 34 | fn from_row(row: &PgRow) -> Result { 35 | Ok(Self { 36 | address: row.get("address"), 37 | rewards: row.get::("rewards") as u64, 38 | last_reward: row.try_get("last_reward")?, 39 | reward_type: row.try_get("reward_type")?, 40 | }) 41 | } 42 | } 43 | 44 | pub async fn get_reward( 45 | pool: &PgPool, 46 | key: &str, 47 | reward_type: RewardType, 48 | ) -> anyhow::Result { 49 | let reward: DbReward = sqlx::query_as( 50 | r#" 51 | SELECT * 52 | FROM reward_index 53 | WHERE address = $1 AND reward_type = $2 54 | "#, 55 | ) 56 | .bind(key) 57 | .bind(reward_type) 58 | .fetch_one(pool) 59 | .await?; 60 | 61 | Ok(reward) 62 | } 63 | -------------------------------------------------------------------------------- /reward_index/tests/integrations/main.rs: -------------------------------------------------------------------------------- 1 | mod common; 2 | 3 | mod iot; 4 | mod mobile; 5 | -------------------------------------------------------------------------------- /reward_scheduler/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "reward-scheduler" 3 | version = "0.1.0" 4 | description = "Reward period scheduler for verifiers" 5 | edition.workspace = true 6 | authors.workspace = true 7 | license.workspace = true 8 | 9 | [dependencies] 10 | chrono = {workspace = true} 11 | thiserror = {workspace = true} -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "1.85.0" 3 | components = ["rustfmt", "clippy"] 4 | -------------------------------------------------------------------------------- /solana/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "solana" 3 | version = "0.1.0" 4 | description = "Solana integration for Helium Oracles" 5 | edition.workspace = true 6 | authors.workspace = true 7 | license.workspace = true 8 | 9 | [dependencies] 10 | anyhow = { workspace = true } 11 | async-trait = { workspace = true } 12 | clap = { workspace = true } 13 | chrono = { workspace = true } 14 | file-store = { path = "../file_store" } 15 | futures = { workspace = true } 16 | helium-crypto = { workspace = true } 17 | itertools = { workspace = true } 18 | metrics = { workspace = true } 19 | serde = { workspace = true } 20 | sha2 = { workspace = true } 21 | thiserror = { workspace = true } 22 | tokio = { workspace = true } 23 | tracing = { workspace = true } 24 | spl-token = { version = "8" } 25 | sqlx = { workspace = true } 26 | 27 | helium-lib = { git = "https://github.com/helium/helium-wallet-rs", branch = "master" } 28 | -------------------------------------------------------------------------------- /solana/src/carrier.rs: -------------------------------------------------------------------------------- 1 | use crate::SolanaRpcError; 2 | use helium_lib::{ 3 | anchor_lang::AccountDeserialize, 4 | programs::{ 5 | helium_sub_daos, 6 | mobile_entity_manager::{self, accounts::CarrierV0}, 7 | }, 8 | solana_client::nonblocking::rpc_client::RpcClient, 9 | solana_sdk::{commitment_config::CommitmentConfig, pubkey::Pubkey}, 10 | }; 11 | use serde::Deserialize; 12 | 13 | pub struct SolanaRpc { 14 | provider: RpcClient, 15 | sub_dao: Pubkey, 16 | } 17 | 18 | #[derive(Debug, Deserialize, Clone)] 19 | pub struct Settings { 20 | rpc_url: String, 21 | dnt_mint: String, 22 | } 23 | 24 | impl SolanaRpc { 25 | pub fn new(settings: &Settings) -> Result { 26 | let dnt_mint: Pubkey = settings.dnt_mint.parse()?; 27 | let (sub_dao, _) = Pubkey::find_program_address( 28 | &["sub_dao".as_bytes(), dnt_mint.as_ref()], 29 | &helium_sub_daos::ID, 30 | ); 31 | let provider = 32 | RpcClient::new_with_commitment(settings.rpc_url.clone(), CommitmentConfig::finalized()); 33 | Ok(Self { provider, sub_dao }) 34 | } 35 | 36 | pub async fn fetch_incentive_escrow_fund_bps( 37 | &self, 38 | network_name: &str, 39 | ) -> Result { 40 | let (carrier_pda, _) = Pubkey::find_program_address( 41 | &[ 42 | "carrier".as_bytes(), 43 | self.sub_dao.as_ref(), 44 | network_name.as_bytes(), 45 | ], 46 | &mobile_entity_manager::ID, 47 | ); 48 | let carrier_data = self.provider.get_account_data(&carrier_pda).await?; 49 | let mut carrier_data = carrier_data.as_ref(); 50 | let carrier = CarrierV0::try_deserialize(&mut carrier_data)?; 51 | 52 | Ok(carrier.incentive_escrow_fund_bps) 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /solana/src/main.rs: -------------------------------------------------------------------------------- 1 | use clap::{Parser, ValueEnum}; 2 | use helium_crypto::{PublicKey, PublicKeyBinary}; 3 | use helium_lib::programs::data_credits; 4 | use sha2::{Digest, Sha256}; 5 | use solana::SolPubkey; 6 | 7 | #[derive(Parser)] 8 | #[clap(about = "Look up the Delegated Data Credit account for a Helium router key")] 9 | struct Cli { 10 | #[clap(value_enum)] 11 | mode: Dnt, 12 | payer: PublicKey, 13 | } 14 | 15 | #[derive(ValueEnum, Clone)] 16 | enum Dnt { 17 | Mobile, 18 | Iot, 19 | } 20 | 21 | fn main() { 22 | let Cli { mode, payer } = Cli::parse(); 23 | let sub_dao: SolPubkey = match mode { 24 | Dnt::Mobile => "Gm9xDCJawDEKDrrQW6haw94gABaYzQwCq4ZQU8h8bd22" 25 | .parse() 26 | .unwrap(), 27 | Dnt::Iot => "39Lw1RH6zt8AJvKn3BTxmUDofzduCM2J3kSaGDZ8L7Sk" 28 | .parse() 29 | .unwrap(), 30 | }; 31 | let payer = PublicKeyBinary::from(payer); 32 | let mut hasher = Sha256::new(); 33 | hasher.update(payer.to_string()); 34 | let sha_digest = hasher.finalize(); 35 | let (ddc_key, _) = SolPubkey::find_program_address( 36 | &[ 37 | "delegated_data_credits".as_bytes(), 38 | sub_dao.as_ref(), 39 | &sha_digest, 40 | ], 41 | &data_credits::ID, 42 | ); 43 | println!("https://explorer.solana.com/address/{ddc_key}"); 44 | } 45 | -------------------------------------------------------------------------------- /task_manager/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "task-manager" 3 | version = "0.1.0" 4 | description = "Task Manager" 5 | edition.workspace = true 6 | authors.workspace = true 7 | license.workspace = true 8 | 9 | [dependencies] 10 | anyhow = { workspace = true } 11 | tokio = { workspace = true } 12 | futures = {workspace = true} 13 | futures-util = {workspace = true} 14 | triggered = {workspace = true} 15 | -------------------------------------------------------------------------------- /task_manager/src/select_all.rs: -------------------------------------------------------------------------------- 1 | use core::mem; 2 | use core::pin::Pin; 3 | use futures::{ 4 | future::FutureExt, 5 | task::{Context, Poll}, 6 | Future, 7 | }; 8 | 9 | // This is a copy of the select_all function from futures::future::select_all. The 10 | // only difference is the change from swap_remove to remove so order of the inner 11 | // Vec is preserved 12 | 13 | #[derive(Debug)] 14 | #[must_use = "futures do nothing unless you `.await` or poll them"] 15 | pub struct SelectAll { 16 | inner: Vec, 17 | } 18 | pub fn select_all(iter: I) -> SelectAll 19 | where 20 | I: IntoIterator, 21 | I::Item: Future + Unpin, 22 | { 23 | SelectAll { 24 | inner: iter.into_iter().collect(), 25 | } 26 | } 27 | 28 | impl SelectAll { 29 | /// Consumes this combinator, returning the underlying futures. 30 | pub fn into_inner(self) -> Vec { 31 | self.inner 32 | } 33 | } 34 | 35 | impl Future for SelectAll { 36 | type Output = (Fut::Output, usize, Vec); 37 | 38 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 39 | let item = self 40 | .inner 41 | .iter_mut() 42 | .enumerate() 43 | .find_map(|(i, f)| match f.poll_unpin(cx) { 44 | Poll::Pending => None, 45 | Poll::Ready(e) => Some((i, e)), 46 | }); 47 | match item { 48 | Some((idx, res)) => { 49 | #[allow(clippy::let_underscore_future)] 50 | let _ = self.inner.remove(idx); 51 | let rest = mem::take(&mut self.inner); 52 | Poll::Ready((res, idx, rest)) 53 | } 54 | None => Poll::Pending, 55 | } 56 | } 57 | } 58 | --------------------------------------------------------------------------------