├── .dockerignore ├── .github └── workflows │ ├── build_docker.yml │ ├── build_tools.yml │ ├── codeql_analysis.yml │ ├── docker-network-health.yml │ ├── docker-network-tests-nightly.yml │ ├── feature-network-deploy.yml │ ├── gendoc.yml │ ├── golangci-lint.yml │ ├── release.yml │ ├── unit-test-nightly.yml │ └── unit-test.yml ├── .gitignore ├── .golangci.yml ├── Dockerfile ├── Dockerfile.dev ├── LICENSE ├── Makefile ├── README.md ├── build_docker.sh ├── components ├── app │ └── app.go ├── dashboard_metrics │ ├── component.go │ ├── database.go │ ├── gossip.go │ ├── info.go │ ├── params.go │ └── types.go ├── debugapi │ ├── blocks.go │ ├── commitment.go │ ├── component.go │ ├── debug_models.go │ ├── node.go │ ├── params.go │ └── transactions.go ├── inx │ ├── component.go │ ├── params.go │ ├── rangesend.go │ ├── rangesend_template.go │ ├── server.go │ ├── server_accounts.go │ ├── server_api.go │ ├── server_blocks.go │ ├── server_commitments.go │ ├── server_issuance.go │ ├── server_node.go │ ├── server_transactions.go │ └── server_utxo.go ├── metricstracker │ ├── component.go │ ├── metricstracker.go │ └── params.go ├── p2p │ ├── component.go │ └── params.go ├── prometheus │ ├── collector │ │ ├── collection.go │ │ ├── collector.go │ │ └── metric.go │ ├── component.go │ ├── metrics_accounts.go │ ├── metrics_commitments.go │ ├── metrics_conflicts.go │ ├── metrics_db.go │ ├── metrics_info.go │ ├── metrics_scheduler.go │ ├── metrics_slots.go │ ├── metrics_tangle.go │ └── params.go ├── protocol │ ├── component.go │ └── params.go └── restapi │ ├── api.go │ ├── component.go │ ├── core │ ├── accounts.go │ ├── blocks.go │ ├── component.go │ ├── node.go │ ├── transaction.go │ └── utxo.go │ ├── management │ ├── component.go │ ├── peers.go │ ├── pruning.go │ └── snapshots.go │ ├── params.go │ └── routes.go ├── config.json ├── config_defaults.json ├── deploy └── ansible │ ├── deploy.yml │ ├── deploy_cores.yml │ ├── hosts │ └── feature.yml │ ├── roles │ ├── exporter │ │ ├── files │ │ │ └── docker-compose.yml │ │ └── tasks │ │ │ └── main.yml │ ├── firewall │ │ ├── files │ │ │ └── after.rules │ │ └── tasks │ │ │ └── main.yml │ ├── iota-core-node │ │ ├── files │ │ │ └── config.json │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ └── docker-compose-iota-core.yml.j2 │ ├── metrics │ │ ├── files │ │ │ ├── elasticsearch.yml │ │ │ └── grafana │ │ │ │ ├── grafana.ini │ │ │ │ └── provisioning │ │ │ │ ├── dashboards │ │ │ │ ├── global_metrics.json │ │ │ │ ├── go_metrics.json │ │ │ │ ├── iota-core_monitoring.json │ │ │ │ ├── local_dashboard.json │ │ │ │ ├── node-exporter.json │ │ │ │ ├── prometheus.yml │ │ │ │ ├── scheduler-metrics.json │ │ │ │ └── slot_metrics.json │ │ │ │ ├── datasources │ │ │ │ └── prometheus.yml │ │ │ │ └── notifiers │ │ │ │ └── .gitkeep │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ ├── docker-compose.yml.j2 │ │ │ ├── grafana-admin-password.j2 │ │ │ ├── kibana.yml.j2 │ │ │ ├── logstash │ │ │ ├── logstash.yml.j2 │ │ │ └── pipeline │ │ │ │ └── logstash.conf.j2 │ │ │ └── prometheus.yml.j2 │ └── wireguard │ │ ├── tasks │ │ └── main.yml │ │ └── templates │ │ └── wg0.conf.j2 │ └── run.sh ├── documentation └── configuration.md ├── go.mod ├── go.sum ├── main.go ├── out.log ├── peering.json ├── pkg ├── core │ ├── acceptance │ │ ├── state.go │ │ └── threshold_provider.go │ ├── account │ │ ├── accounts.go │ │ ├── accounts_test.go │ │ ├── pool.go │ │ ├── seated_accounts.go │ │ └── seated_accounts_test.go │ ├── buffer │ │ └── unsolid_commitment_buffer.go │ ├── promise │ │ └── promise.go │ ├── types │ │ └── unique_id.go │ ├── vote │ │ ├── mocked_rank.go │ │ └── vote.go │ └── weight │ │ ├── comparison.go │ │ ├── value.go │ │ └── weight.go ├── daemon │ └── shutdown.go ├── jwt │ └── jwt.go ├── libp2putil │ └── io.go ├── metrics │ ├── database_metrics.go │ └── server_metrics.go ├── model │ ├── account_diff.go │ ├── block.go │ ├── commitment.go │ ├── eviction_index.go │ ├── parents.go │ ├── poolstats.go │ ├── pruning_index.go │ ├── signaled_block.go │ ├── validator_performance.go │ └── version_and_hash.go ├── network │ ├── endpoint.go │ ├── errors.go │ ├── filter.go │ ├── manager.go │ ├── neighbor.go │ ├── p2p │ │ ├── autopeering │ │ │ └── autopeering.go │ │ ├── config_manager.go │ │ ├── manager.go │ │ ├── manualpeering │ │ │ └── manualpeering.go │ │ ├── metrics.go │ │ ├── neighbor.go │ │ ├── neighbor_test.go │ │ ├── packetstream.go │ │ ├── peerconfig.go │ │ └── proto │ │ │ ├── negotiation.pb.go │ │ │ └── negotiation.proto │ ├── peer.go │ └── protocols │ │ └── core │ │ ├── events.go │ │ ├── models │ │ ├── message.pb.go │ │ └── message.proto │ │ ├── protocol.go │ │ └── warp_sync.go ├── protocol │ ├── attestations.go │ ├── blocks.go │ ├── chain.go │ ├── chains.go │ ├── commitment.go │ ├── commitment_verifier.go │ ├── commitments.go │ ├── engine │ │ ├── accounts │ │ │ ├── accounts.go │ │ │ ├── accountsledger │ │ │ │ ├── manager.go │ │ │ │ ├── manager_test.go │ │ │ │ ├── snapshot.go │ │ │ │ ├── snapshot_test.go │ │ │ │ └── testsuite_test.go │ │ │ ├── credits.go │ │ │ ├── mana.go │ │ │ └── mana │ │ │ │ ├── manager.go │ │ │ │ └── manager_test.go │ │ ├── attestation │ │ │ ├── attestations.go │ │ │ └── slotattestation │ │ │ │ ├── manager.go │ │ │ │ ├── manager_test.go │ │ │ │ ├── snapshot.go │ │ │ │ ├── storage.go │ │ │ │ └── testframework_test.go │ │ ├── blockdag │ │ │ ├── blockdag.go │ │ │ ├── events.go │ │ │ └── inmemoryblockdag │ │ │ │ └── blockdag.go │ │ ├── blocks │ │ │ ├── block.go │ │ │ └── blocks.go │ │ ├── booker │ │ │ ├── booker.go │ │ │ ├── events.go │ │ │ └── inmemorybooker │ │ │ │ └── booker.go │ │ ├── clock │ │ │ ├── blocktime │ │ │ │ ├── clock.go │ │ │ │ └── relativetime.go │ │ │ ├── clock.go │ │ │ ├── events.go │ │ │ └── relativetime.go │ │ ├── commitment_api.go │ │ ├── congestioncontrol │ │ │ ├── rmc │ │ │ │ └── rmc.go │ │ │ └── scheduler │ │ │ │ ├── drr │ │ │ │ ├── basicbuffer.go │ │ │ │ ├── issuerqueue.go │ │ │ │ ├── scheduler.go │ │ │ │ ├── validatorbuffer.go │ │ │ │ └── validatorqueue.go │ │ │ │ ├── events.go │ │ │ │ ├── passthrough │ │ │ │ └── scheduler.go │ │ │ │ └── scheduler.go │ │ ├── consensus │ │ │ ├── blockgadget │ │ │ │ ├── events.go │ │ │ │ ├── gadget.go │ │ │ │ ├── gadget_test.go │ │ │ │ ├── testframework_test.go │ │ │ │ └── thresholdblockgadget │ │ │ │ │ ├── acceptance_ratification.go │ │ │ │ │ ├── confirmation_ratification.go │ │ │ │ │ ├── gadget.go │ │ │ │ │ ├── options.go │ │ │ │ │ └── witness_weight.go │ │ │ └── slotgadget │ │ │ │ ├── events.go │ │ │ │ ├── slotgadget.go │ │ │ │ └── totalweightslotgadget │ │ │ │ ├── gadget.go │ │ │ │ └── options.go │ │ ├── engine.go │ │ ├── events.go │ │ ├── eviction │ │ │ ├── state.go │ │ │ ├── state_test.go │ │ │ └── testframework_test.go │ │ ├── filter │ │ │ ├── postsolidfilter │ │ │ │ ├── events.go │ │ │ │ ├── post_solid_filter.go │ │ │ │ └── postsolidblockfilter │ │ │ │ │ ├── post_solid_block_filter.go │ │ │ │ │ └── post_solid_block_filter_test.go │ │ │ └── presolidfilter │ │ │ │ ├── events.go │ │ │ │ ├── pre_solid_filter.go │ │ │ │ └── presolidblockfilter │ │ │ │ ├── pre_solid_block_filter.go │ │ │ │ └── pre_solid_block_filter_test.go │ │ ├── inspection.go │ │ ├── ledger │ │ │ ├── blockvoterank.go │ │ │ ├── events.go │ │ │ ├── ledger.go │ │ │ ├── ledger │ │ │ │ ├── ledger.go │ │ │ │ └── vm.go │ │ │ └── tests │ │ │ │ ├── output.go │ │ │ │ ├── state.go │ │ │ │ └── state_resolver.go │ │ ├── mempool │ │ │ ├── errors.go │ │ │ ├── mempool.go │ │ │ ├── signed_transaction_metadata.go │ │ │ ├── spenddag │ │ │ │ ├── constraints.go │ │ │ │ ├── errors.go │ │ │ │ ├── events.go │ │ │ │ ├── spenddag.go │ │ │ │ ├── spenddagv1 │ │ │ │ │ ├── sorted_spender.go │ │ │ │ │ ├── sorted_spenders.go │ │ │ │ │ ├── sorted_spenders_test.go │ │ │ │ │ ├── spend_set.go │ │ │ │ │ ├── spend_set_test.go │ │ │ │ │ ├── spenddag.go │ │ │ │ │ ├── spenddag_test.go │ │ │ │ │ ├── spender.go │ │ │ │ │ ├── spender_test.go │ │ │ │ │ └── utils.go │ │ │ │ └── tests │ │ │ │ │ ├── accounts_framework.go │ │ │ │ │ ├── assertions.go │ │ │ │ │ ├── framework.go │ │ │ │ │ └── tests.go │ │ │ ├── state.go │ │ │ ├── state_diff.go │ │ │ ├── state_id.go │ │ │ ├── state_metadata.go │ │ │ ├── state_reference.go │ │ │ ├── state_resolver.go │ │ │ ├── tests │ │ │ │ ├── testframework.go │ │ │ │ ├── tests.go │ │ │ │ ├── transaction.go │ │ │ │ └── vm.go │ │ │ ├── transaction.go │ │ │ ├── transaction_metadata.go │ │ │ ├── v1 │ │ │ │ ├── inclusion_flags.go │ │ │ │ ├── mempool.go │ │ │ │ ├── mempool_test.go │ │ │ │ ├── signed_transaction_metadata.go │ │ │ │ ├── state_diff.go │ │ │ │ ├── state_metadata.go │ │ │ │ ├── transaction_metadata.go │ │ │ │ └── transaction_metadata_test.go │ │ │ └── vm.go │ │ ├── notarization │ │ │ ├── events.go │ │ │ ├── notarization.go │ │ │ └── slotnotarization │ │ │ │ ├── manager.go │ │ │ │ └── slotmutations.go │ │ ├── syncmanager │ │ │ ├── events.go │ │ │ ├── syncmanager.go │ │ │ └── trivialsyncmanager │ │ │ │ └── syncmanager.go │ │ ├── tipmanager │ │ │ ├── events.go │ │ │ ├── tests │ │ │ │ ├── testframework.go │ │ │ │ └── tipmanager_test.go │ │ │ ├── tip_manager.go │ │ │ ├── tip_metadata.go │ │ │ ├── tip_pool.go │ │ │ └── v1 │ │ │ │ ├── provider.go │ │ │ │ ├── tip_manager.go │ │ │ │ └── tip_metadata.go │ │ ├── tipselection │ │ │ ├── tipselection.go │ │ │ └── v1 │ │ │ │ ├── provider.go │ │ │ │ ├── test_framework_test.go │ │ │ │ ├── tip_selection.go │ │ │ │ └── tip_selection_test.go │ │ ├── upgrade │ │ │ ├── orchestrator.go │ │ │ └── signalingupgradeorchestrator │ │ │ │ ├── options.go │ │ │ │ ├── orchestrator.go │ │ │ │ ├── snapshot.go │ │ │ │ └── storage.go │ │ └── utxoledger │ │ │ ├── database_prefixes.go │ │ │ ├── iteration.go │ │ │ ├── iteration_test.go │ │ │ ├── kvstorable.go │ │ │ ├── manager.go │ │ │ ├── manager_test.go │ │ │ ├── output.go │ │ │ ├── output_test.go │ │ │ ├── slot_diff.go │ │ │ ├── slot_diff_test.go │ │ │ ├── snapshot.go │ │ │ ├── snapshot_test.go │ │ │ ├── spent.go │ │ │ ├── spent_status.go │ │ │ ├── state_tree.go │ │ │ └── tpkg │ │ │ ├── equal.go │ │ │ └── random.go │ ├── engines.go │ ├── errors.go │ ├── events.go │ ├── inspection.go │ ├── network.go │ ├── options.go │ ├── protocol.go │ ├── sybilprotection │ │ ├── activitytracker │ │ │ ├── activitytracker.go │ │ │ ├── activitytrackerv1 │ │ │ │ └── activitytracker.go │ │ │ └── events.go │ │ ├── events.go │ │ ├── seatmanager │ │ │ ├── events.go │ │ │ ├── mock │ │ │ │ └── mockseatmanager.go │ │ │ ├── poa │ │ │ │ ├── options.go │ │ │ │ └── poa.go │ │ │ ├── seatmanager.go │ │ │ └── topstakers │ │ │ │ ├── options.go │ │ │ │ ├── topstakers.go │ │ │ │ └── topstakers_test.go │ │ ├── sybilprotection.go │ │ └── sybilprotectionv1 │ │ │ ├── performance │ │ │ ├── performance.go │ │ │ ├── rewards.go │ │ │ ├── snapshot.go │ │ │ ├── snapshot_test.go │ │ │ ├── testsuite_test.go │ │ │ └── tracker_test.go │ │ │ └── sybilprotection.go │ ├── utils.go │ ├── versioning.go │ └── warp_sync.go ├── requesthandler │ ├── accounts.go │ ├── blockissuance.go │ ├── blocks.go │ ├── cache │ │ └── cache.go │ ├── commitments.go │ ├── node.go │ ├── requesthandler.go │ ├── requesthandler_test.go │ ├── transaction.go │ └── utxo.go ├── restapi │ ├── proxy.go │ ├── route_manager.go │ └── utils.go ├── retainer │ ├── blockretainer │ │ ├── block_retainer.go │ │ ├── block_retainer_cache.go │ │ └── tests │ │ │ ├── blockreatainer_test.go │ │ │ └── testframework.go │ ├── events.go │ ├── retainer.go │ └── txretainer │ │ ├── testsuite_test.go │ │ ├── tx_retainer.go │ │ ├── tx_retainer_cache.go │ │ ├── tx_retainer_cache_test.go │ │ ├── tx_retainer_database.go │ │ ├── tx_retainer_database_test.go │ │ └── tx_retainer_test.go ├── storage │ ├── clonablesql │ │ └── clonable_sqlite.go │ ├── database │ │ ├── config.go │ │ ├── database.go │ │ ├── db_instance.go │ │ ├── errors.go │ │ ├── lockedkvstore.go │ │ ├── openablekvstore.go │ │ └── rocksdb.go │ ├── options.go │ ├── permanent │ │ ├── commitments.go │ │ ├── options.go │ │ ├── permanent.go │ │ └── settings.go │ ├── prunable │ │ ├── bucket_manager.go │ │ ├── bucketed_kvstore.go │ │ ├── epochstore │ │ │ ├── base_store.go │ │ │ ├── cached_store.go │ │ │ ├── constants.go │ │ │ ├── epoch_kv.go │ │ │ └── store.go │ │ ├── options.go │ │ ├── prunable.go │ │ ├── prunable_epoch.go │ │ ├── prunable_slot.go │ │ ├── slotstore │ │ │ ├── accountdiffs.go │ │ │ ├── block_metadata.go │ │ │ ├── blocks.go │ │ │ └── store.go │ │ └── utils.go │ ├── storage.go │ ├── storage_clonable_sql.go │ ├── storage_permanent.go │ ├── storage_prunable.go │ ├── storage_pruning.go │ ├── storage_test.go │ ├── testframework_test.go │ └── utils │ │ └── directory.go ├── tests │ ├── accounts_test.go │ ├── big_committee_test.go │ ├── blocktime_monotonicity_test.go │ ├── booker_test.go │ ├── combined_account_transition_test.go │ ├── committee_rotation_test.go │ ├── confirmation_state_test.go │ ├── engine_check_ledger_state_commitment_test.go │ ├── loss_of_acceptance_test.go │ ├── mempool_invalid_signatures_test.go │ ├── out.log │ ├── protocol_engine_rollback_test.go │ ├── protocol_engine_switching_test.go │ ├── protocol_eviction_test.go │ ├── protocol_startup_test.go │ ├── reward_test.go │ ├── upgrade_signaling_test.go │ └── validator_test.go ├── testsuite │ ├── accounts.go │ ├── attestations.go │ ├── blocks.go │ ├── blocks_retainer.go │ ├── chains.go │ ├── depositcalculator │ │ ├── depositcalculator.go │ │ └── depositcalculator_test.go │ ├── eviction.go │ ├── fork.go │ ├── mock │ │ ├── block_params.go │ │ ├── blockissuer.go │ │ ├── blockissuer_acceptance_loss.go │ │ ├── client.go │ │ ├── network.go │ │ ├── node.go │ │ ├── utils.go │ │ ├── wallet.go │ │ ├── wallet_blocks.go │ │ └── wallet_transactions.go │ ├── node_state.go │ ├── snapshotcreator │ │ ├── options.go │ │ └── snapshotcreator.go │ ├── spenders.go │ ├── storage_accountdiffs.go │ ├── storage_blocks.go │ ├── storage_commitments.go │ ├── storage_prunable.go │ ├── storage_rootblocks.go │ ├── storage_settings.go │ ├── sybilprotection.go │ ├── testsuite.go │ ├── testsuite_issue_blocks.go │ ├── testsuite_options.go │ ├── tips.go │ ├── transactions.go │ ├── upgrades.go │ └── workscore_regression_test.go ├── toolset │ ├── benchmark.go │ ├── benchmark_cpu.go │ ├── benchmark_io.go │ ├── ed25519.go │ ├── jwt.go │ ├── node_info.go │ ├── p2p_identity_extract.go │ ├── p2p_identity_gen.go │ ├── pwd_hash.go │ └── toolset.go └── votes │ ├── constants.go │ ├── slottracker │ └── slottracker.go │ └── utils.go ├── scripts ├── build.sh ├── gendoc.sh ├── get_hive.sh ├── go_mod_tidy.sh └── snapgen.sh └── tools ├── docker-network ├── .env ├── .gitignore ├── config.json ├── docker-compose.yml ├── grafana │ ├── grafana.ini │ └── provisioning │ │ ├── dashboards │ │ ├── commitments-overview.json │ │ ├── go_metrics.json │ │ ├── iota-core_monitoring.json │ │ ├── local_dashboard.json │ │ ├── prometheus.yml │ │ ├── scheduler-metrics.json │ │ └── slot_metrics.json │ │ └── datasources │ │ └── datasources.yaml ├── prometheus.yml ├── restart.sh ├── run.sh ├── run_dev.sh └── tests │ ├── README.md │ ├── accounttransition_test.go │ ├── api_core_test.go │ ├── api_management_test.go │ ├── committeerotation_test.go │ ├── dockertestframework │ ├── accounts.go │ ├── asserts.go │ ├── awaits.go │ ├── blocks.go │ ├── clock.go │ ├── faucet.go │ ├── framework.go │ ├── framework_eventapi.go │ ├── misc.go │ ├── nodes.go │ ├── options.go │ ├── rewards.go │ ├── utils.go │ └── validator.go │ ├── eventapi_test.go │ ├── mempool_invalid_signatures_test.go │ ├── nil_payload_test.go │ ├── rewards_test.go │ ├── run_tests.sh │ └── sync_snapshot_test.go ├── gendoc ├── configuration_header.md ├── go.mod ├── go.sum └── main.go └── genesis-snapshot ├── .gitignore ├── main.go └── presets ├── presets.go └── presets_yaml.go /.dockerignore: -------------------------------------------------------------------------------- 1 | .git 2 | .gitignore 3 | .github/ 4 | docs/ 5 | .idea/ 6 | .vscode/ 7 | 8 | LICENSE 9 | README.md 10 | CHANGELOG.md 11 | docker-compose.yml 12 | 13 | images/ 14 | tools/ 15 | 16 | # Database directories 17 | mainnetdb/ 18 | db/ 19 | retainer/ 20 | -------------------------------------------------------------------------------- /.github/workflows/build_docker.yml: -------------------------------------------------------------------------------- 1 | name: Build Docker 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - "Dockerfile" 7 | push: 8 | branches: 9 | - develop 10 | 11 | jobs: 12 | build: 13 | name: Build Docker 14 | runs-on: self-hosted 15 | steps: 16 | - name: Check out code into the Go module directory 17 | uses: actions/checkout@v4 18 | 19 | - name: Build Docker image 20 | run: DOCKER_BUILDKIT=1 docker build . --file Dockerfile --tag iota-core:latest 21 | 22 | - name: Test Docker image 23 | run: docker run --rm --name iota-core iota-core:latest --version 2>/dev/null | grep -q "iota-core" 24 | -------------------------------------------------------------------------------- /.github/workflows/build_tools.yml: -------------------------------------------------------------------------------- 1 | name: Build internal tools 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - 'tools/genesis-snapshot/**' 7 | 8 | jobs: 9 | build: 10 | name: Import Check 11 | runs-on: self-hosted 12 | steps: 13 | 14 | - name: Checkout repository 15 | uses: actions/checkout@v4 16 | 17 | - uses: actions/setup-go@v5 18 | with: 19 | go-version-file: 'go.mod' 20 | cache: false 21 | 22 | - name: Print Go version 23 | run: go version 24 | 25 | - name: Build genesis-snapshot tool 26 | working-directory: tools/genesis-snapshot 27 | run: go build . 28 | -------------------------------------------------------------------------------- /.github/workflows/codeql_analysis.yml: -------------------------------------------------------------------------------- 1 | 2 | name: "CodeQL" 3 | 4 | on: 5 | # Run this security check every day at 03:00 to find potential new vulnerabilities in the develop branch 6 | schedule: 7 | - cron: "0 3 * * *" 8 | 9 | jobs: 10 | analyze: 11 | name: Analyze 12 | runs-on: self-hosted 13 | permissions: 14 | actions: read 15 | contents: read 16 | security-events: write 17 | 18 | strategy: 19 | fail-fast: false 20 | matrix: 21 | language: [ 'go' ] 22 | 23 | steps: 24 | - name: Checkout repository 25 | uses: actions/checkout@v4 26 | with: 27 | ref: develop 28 | 29 | # Initializes the CodeQL tools for scanning. 30 | - name: Initialize CodeQL 31 | uses: github/codeql-action/init@v2 32 | with: 33 | languages: ${{ matrix.language }} 34 | queries: security-and-quality 35 | 36 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 37 | # If this step fails, then you should remove it and run the build manually (see below) 38 | - name: Autobuild 39 | uses: github/codeql-action/autobuild@v2 40 | 41 | - name: Perform CodeQL Analysis 42 | uses: github/codeql-action/analyze@v2 43 | -------------------------------------------------------------------------------- /.github/workflows/docker-network-health.yml: -------------------------------------------------------------------------------- 1 | name: Docker Network Health Check 2 | 3 | on: 4 | workflow_dispatch: 5 | pull_request: 6 | paths: 7 | - '**' 8 | - '!documentation/**' 9 | - '!scripts/**' 10 | - '!tools/**' 11 | - 'tools/genesis-snapshot/**' 12 | push: 13 | branches: 14 | - develop 15 | 16 | concurrency: 17 | group: run-and-check-group 18 | cancel-in-progress: false 19 | 20 | jobs: 21 | run-and-check: 22 | runs-on: self-hosted 23 | 24 | steps: 25 | - name: Checkout code 26 | uses: actions/checkout@v4 27 | 28 | - name: Run network, wait and check health 29 | run: | 30 | set -x 31 | 32 | # Run network 33 | cd ./tools/docker-network 34 | timeout 10m ./run.sh & 35 | RUN_PID=$! 36 | 37 | # Wait for node-4 to be created before querying it 38 | timeout 10m bash -c 'until docker ps | grep docker-network-node-4; do sleep 5; done' & 39 | 40 | # Wait for any of the two processes to exit 41 | wait -n || exit 1 42 | 43 | # Additional 10 seconds wait to allow the API to come up 44 | sleep 10 45 | 46 | # Health check 47 | SUCCESS=false 48 | while true; do 49 | OUTPUT=$(curl -o /dev/null -s -w "%{http_code}\n" http://localhost:8080/health) 50 | if [[ $OUTPUT -eq 200 ]]; then 51 | SUCCESS=true 52 | kill -s SIGINT $RUN_PID 53 | break 54 | # curl will return a connection refused when the network is tear down from the timeout. 55 | elif [[ $OUTPUT -eq 000 ]]; then 56 | echo "Connection refused. Failing the action." 57 | break 58 | fi 59 | sleep 5 60 | done 61 | 62 | if [[ ! $SUCCESS ]]; then 63 | echo "Health check never returned 200. Failing the action." 64 | exit 1 65 | fi 66 | 67 | - name: Cleanup 68 | run: | 69 | cd ./tools/docker-network 70 | docker compose kill || true 71 | docker compose down -t 1 -v || true 72 | -------------------------------------------------------------------------------- /.github/workflows/docker-network-tests-nightly.yml: -------------------------------------------------------------------------------- 1 | name: Docker Network Tests 2 | 3 | on: 4 | schedule: 5 | - cron: '0 1 * * *' # Runs every day at 1 AM 6 | workflow_dispatch: 7 | inputs: 8 | testCases: 9 | description: 'Custom test cases to run:' 10 | required: false 11 | default: "" 12 | 13 | jobs: 14 | test: 15 | runs-on: self-hosted 16 | 17 | steps: 18 | - name: Check out code 19 | uses: actions/checkout@v4 20 | 21 | - name: Set up Docker 22 | uses: docker/setup-buildx-action@v3 23 | 24 | - name: Set up Go 25 | uses: actions/setup-go@v5 26 | with: 27 | go-version-file: 'go.mod' 28 | cache: false 29 | 30 | - name: Run all tests 31 | run: | 32 | cd tools/docker-network/tests && 33 | ./run_tests.sh ${{ github.event.inputs.testCases }} 34 | 35 | - name: Upload logs as artifacts 36 | uses: actions/upload-artifact@v4 37 | if: success() || failure() 38 | with: 39 | name: logs 40 | path: tools/docker-network/tests/logs 41 | -------------------------------------------------------------------------------- /.github/workflows/gendoc.yml: -------------------------------------------------------------------------------- 1 | name: gendoc 2 | 3 | on: 4 | push: 5 | branches: 6 | - develop 7 | 8 | jobs: 9 | gendoc: 10 | runs-on: self-hosted 11 | steps: 12 | - name: Checkout repository 13 | uses: actions/checkout@v4 14 | 15 | - name: Set up Go 16 | uses: actions/setup-go@v5 17 | with: 18 | go-version-file: 'tools/gendoc/go.mod' 19 | cache: false 20 | 21 | - name: Print Go version 22 | run: go version 23 | 24 | - name: Run gendoc 25 | working-directory: tools/gendoc 26 | run: go mod tidy && go run main.go 27 | 28 | - name: Create Pull Request 29 | uses: peter-evans/create-pull-request@v5 30 | with: 31 | token: ${{ secrets.GITHUB_TOKEN }} 32 | title: "chore(gendoc): update docs" 33 | commit-message: "chore(gendoc): update docs" 34 | body: | 35 | Generated new config documentation. 36 | This PR is auto generated by [gendoc workflow](https://github.com/${{ github.repository }}/actions?query=workflow%3Agendoc). 37 | branch: chore/gendoc 38 | base: develop 39 | add-paths: | 40 | *.json 41 | *.md 42 | -------------------------------------------------------------------------------- /.github/workflows/golangci-lint.yml: -------------------------------------------------------------------------------- 1 | name: GolangCIlint 2 | 3 | on: 4 | pull_request: 5 | paths-ignore: 6 | - 'documentation/**' 7 | - 'scripts/**' 8 | - 'tools/**' 9 | 10 | jobs: 11 | golangci-lint: 12 | name: GolangCI-Lint 13 | runs-on: self-hosted 14 | steps: 15 | - name: Check out code into the Go module directory 16 | uses: actions/checkout@v4 17 | 18 | #- name: Checkout custom linter 19 | # uses: actions/checkout@v4 20 | # with: 21 | # repository: iotaledger/typegroupingcheck 22 | # path: typegroupingcheck 23 | 24 | #- name: Setup go 25 | # uses: actions/setup-go@v5 26 | # with: 27 | # go-version-file: './typegroupingcheck/go.mod' 28 | 29 | #- name: Build custom linter 30 | # working-directory: ./typegroupingcheck 31 | # run: | 32 | # go build -buildmode=plugin -o typegroupingcheck.so 33 | 34 | - name: Setup go 35 | uses: actions/setup-go@v5 36 | with: 37 | go-version-file: 'go.mod' 38 | 39 | - name: golangci-lint 40 | uses: golangci/golangci-lint-action@v3 41 | with: 42 | skip-cache: true 43 | version: latest 44 | install-mode: goinstall 45 | args: --timeout=10m # --enable typegroupingcheck 46 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | release: 5 | types: [published] 6 | workflow_dispatch: 7 | 8 | jobs: 9 | docker: 10 | name: Release Docker 11 | environment: release 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Check out code into the Go module directory 15 | uses: actions/checkout@v4 16 | 17 | - name: Set up QEMU 18 | uses: docker/setup-qemu-action@v3 19 | 20 | - name: Set up Docker Buildx 21 | uses: docker/setup-buildx-action@v3 22 | 23 | - name: Docker meta 24 | id: meta 25 | uses: docker/metadata-action@v5 26 | with: 27 | images: iotaledger/iota-core 28 | tags: | 29 | type=semver,pattern={{version}} 30 | type=semver,pattern={{major}}.{{minor}} 31 | type=semver,pattern={{major}} 32 | type=match,pattern=v(\d+.\d+),suffix=-alpha,group=1,enable=${{ contains(github.ref, '-alpha') }} 33 | type=match,pattern=v(\d+.\d+),suffix=-beta,group=1,enable=${{ contains(github.ref, '-beta') }} 34 | type=match,pattern=v(\d+.\d+),suffix=-rc,group=1,enable=${{ contains(github.ref, '-rc') }} 35 | type=raw,value=${{ github.ref_name }},enable=${{ github.ref_type != 'tag' }} 36 | 37 | - name: Login to DockerHub 38 | uses: docker/login-action@v3 39 | with: 40 | username: ${{ secrets.IOTALEDGER_DOCKER_USERNAME }} 41 | password: ${{ secrets.IOTALEDGER_DOCKER_PASSWORD }} 42 | 43 | - name: Build and push to Dockerhub 44 | uses: docker/build-push-action@v5 45 | with: 46 | file: ./Dockerfile 47 | platforms: linux/amd64,linux/arm64 48 | push: true 49 | build-args: | 50 | BUILD_VERSION=${{ github.ref_name }} 51 | tags: ${{ steps.meta.outputs.tags }} 52 | -------------------------------------------------------------------------------- /.github/workflows/unit-test-nightly.yml: -------------------------------------------------------------------------------- 1 | name: Unit tests nightly 2 | 3 | on: 4 | schedule: 5 | - cron: '0 5 * * *' # Runs every day at 5 AM 6 | workflow_dispatch: 7 | 8 | jobs: 9 | unit-tests-with-log: 10 | name: Unit tests with log nightly 11 | runs-on: self-hosted 12 | steps: 13 | - name: Checkout repository 14 | uses: actions/checkout@v4 15 | 16 | - uses: actions/setup-go@v5 17 | with: 18 | go-version-file: 'go.mod' 19 | cache: false 20 | 21 | - name: Print Go version 22 | run: go version 23 | 24 | - name: Run unit tests with logger (level trace) 25 | run: | 26 | CI_UNIT_TESTS_LOG_LEVEL=trace go test ./... -tags rocksdb -count=5 -race -short -timeout 120m 27 | 28 | unit-tests-without-log: 29 | name: Unit tests without log nightly 30 | runs-on: self-hosted 31 | steps: 32 | - name: Checkout repository 33 | uses: actions/checkout@v4 34 | 35 | - uses: actions/setup-go@v5 36 | with: 37 | go-version-file: 'go.mod' 38 | cache: false 39 | 40 | - name: Print Go version 41 | run: go version 42 | 43 | - name: Run unit tests without logger 44 | run: | 45 | CI_UNIT_NO_LOG=1 go test ./... -tags rocksdb -count=5 -race -short -timeout 120m 46 | -------------------------------------------------------------------------------- /.github/workflows/unit-test.yml: -------------------------------------------------------------------------------- 1 | name: Unit tests 2 | 3 | on: 4 | pull_request: 5 | paths-ignore: 6 | - 'documentation/**' 7 | push: 8 | branches: 9 | - develop 10 | 11 | jobs: 12 | unit-tests: 13 | name: Unit tests 14 | runs-on: self-hosted 15 | steps: 16 | - name: Checkout repository 17 | uses: actions/checkout@v4 18 | 19 | - uses: actions/setup-go@v5 20 | with: 21 | go-version-file: 'go.mod' 22 | cache: false 23 | 24 | - name: Print Go version 25 | run: go version 26 | 27 | - name: Run Tests 28 | run: go test ./... -tags rocksdb -count=1 -timeout 20m 29 | 30 | unit-tests-race: 31 | name: Unit tests -race 32 | runs-on: self-hosted 33 | steps: 34 | - name: Checkout repository 35 | uses: actions/checkout@v4 36 | 37 | - uses: actions/setup-go@v5 38 | with: 39 | go-version-file: 'go.mod' 40 | cache: false 41 | 42 | - name: Print Go version 43 | run: go version 44 | 45 | - name: Run Tests with -race 46 | run: go test ./... -tags rocksdb -count=1 -race -short -timeout 30m 47 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | iota-core 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | 9 | # Test binary, build with `go test -c` 10 | *.test 11 | 12 | # Output of the go coverage tool, specifically when used with LiteIDE 13 | *.out 14 | 15 | # IDE related files 16 | .vscode/ 17 | .idea/ 18 | go.work* 19 | 20 | # dist packages 21 | dist/ 22 | 23 | # OSX related files 24 | .DS_Store 25 | 26 | # Data directories 27 | /testnet 28 | 29 | # snapshot and settings file 30 | *.bin 31 | tools/docker-network/docker-network.snapshot 32 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # https://hub.docker.com/_/golang 2 | FROM golang:1.22-bookworm AS build 3 | 4 | ARG BUILD_TAGS=rocksdb 5 | ARG BUILD_VERSION=v1.0.0-develop 6 | 7 | LABEL org.label-schema.description="IOTA core node" 8 | LABEL org.label-schema.name="iotaledger/iota-core" 9 | LABEL org.label-schema.schema-version="1.0" 10 | LABEL org.label-schema.vcs-url="https://github.com/iotaledger/iota-core" 11 | 12 | # Ensure ca-certificates are up to date 13 | RUN update-ca-certificates 14 | 15 | # Set the current Working Directory inside the container 16 | RUN mkdir /scratch 17 | WORKDIR /scratch 18 | 19 | # Prepare the folder where we are putting all the files 20 | RUN mkdir /app 21 | 22 | WORKDIR /scratch 23 | 24 | # Copy everything from the current directory to the PWD(Present Working Directory) inside the container 25 | COPY . . 26 | 27 | # Download go modules 28 | RUN go mod download 29 | RUN go mod verify 30 | 31 | # Build the binary 32 | RUN go build -o /app/iota-core -tags="$BUILD_TAGS" -ldflags="-w -s -X=github.com/iotaledger/iota-core/components/app.Version=${BUILD_VERSION}" 33 | 34 | # Copy the assets 35 | RUN cp ./config_defaults.json /app/config.json 36 | RUN cp ./peering.json /app/peering.json 37 | 38 | ############################ 39 | # Runtime Image 40 | ############################ 41 | # https://console.cloud.google.com/gcr/images/distroless/global/cc-debian12 42 | # using distroless cc "nonroot" image, which includes everything in the base image (glibc, libssl and openssl) 43 | FROM gcr.io/distroless/cc-debian12:nonroot 44 | 45 | HEALTHCHECK --interval=10s --timeout=5s --retries=30 CMD ["/app/iota-core", "tools", "node-info"] 46 | 47 | # Copy the app dir into distroless image 48 | COPY --chown=nonroot:nonroot --from=build /app /app 49 | 50 | WORKDIR /app 51 | USER nonroot 52 | 53 | ENTRYPOINT ["/app/iota-core"] 54 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | SHELL := /bin/bash 2 | REPO := $(shell pwd) 3 | GOFILES_NOVENDOR := $(shell go list -f "{{.Dir}}" ./...) 4 | PACKAGES_NOVENDOR := $(shell go list ./...) 5 | PROTOC_GEN_GO := $(GOPATH)/bin/protoc-gen-go 6 | 7 | # Protobuf generated go files 8 | PROTO_FILES = $(shell find . -path ./vendor -prune -o -type f -name '*.proto' -print) 9 | PROTO_GO_FILES = $(patsubst %.proto, %.pb.go, $(PROTO_FILES)) 10 | PROTO_GO_FILES_REAL = $(shell find . -path ./vendor -prune -o -type f -name '*.pb.go' -print) 11 | 12 | .PHONY: build 13 | build: proto 14 | go build -o iota-core 15 | 16 | # Protobuffing 17 | .PHONY: proto 18 | proto: $(PROTO_GO_FILES) 19 | 20 | # If $GOPATH/bin/protoc-gen-go does not exist, we'll run this command to install it. 21 | $(PROTOC_GEN_GO): 22 | go install google.golang.org/protobuf/cmd/protoc-gen-go 23 | 24 | # Implicit compile rule for GRPC/proto files 25 | %.pb.go: %.proto | $(PROTOC_GEN_GO) 26 | protoc $< --go_out=paths=source_relative:. 27 | 28 | .PHONY: clean_proto 29 | clean_proto: 30 | @rm -f $(PROTO_GO_FILES_REAL) 31 | 32 | .PHONY: vet 33 | vet: 34 | @echo "Running go vet." 35 | @go vet ${PACKAGES_NOVENDOR} 36 | 37 | .PHONY: test 38 | test: vet 39 | go test -timeout 30s ./... ${GOPACKAGES_NOVENDOR} 40 | -------------------------------------------------------------------------------- /build_docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | docker build .. -f ./Dockerfile -t iota-core:develop 4 | -------------------------------------------------------------------------------- /components/dashboard_metrics/database.go: -------------------------------------------------------------------------------- 1 | package dashboardmetrics 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | func databaseSizesMetrics() *DatabaseSizesMetric { 8 | permanentSize := deps.Protocol.Engines.Main.Get().Storage.PermanentDatabaseSize() 9 | prunableSize := deps.Protocol.Engines.Main.Get().Storage.PrunableDatabaseSize() 10 | txRetainerSize := deps.Protocol.Engines.Main.Get().Storage.TransactionRetainerDatabaseSize() 11 | 12 | return &DatabaseSizesMetric{ 13 | Permanent: permanentSize, 14 | Prunable: prunableSize, 15 | TxRetainer: txRetainerSize, 16 | Total: permanentSize + prunableSize + txRetainerSize, 17 | Time: time.Now().Unix(), 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /components/dashboard_metrics/gossip.go: -------------------------------------------------------------------------------- 1 | package dashboardmetrics 2 | 3 | import ( 4 | "math" 5 | "sync" 6 | ) 7 | 8 | var ( 9 | lastGossipMetricsLock = &sync.RWMutex{} 10 | lastGossipMetrics = &GossipMetrics{ 11 | Incoming: 0, 12 | New: 0, 13 | Outgoing: 0, 14 | } 15 | lastIncomingBlocksCount uint32 16 | lastIncomingNewBlocksCount uint32 17 | lastOutgoingBlocksCount uint32 18 | ) 19 | 20 | // uint32Diff returns the difference between newCount and oldCount 21 | // and catches overflows. 22 | func uint32Diff(newCount uint32, oldCount uint32) uint32 { 23 | // Catch overflows 24 | if newCount < oldCount { 25 | return (math.MaxUint32 - oldCount) + newCount 26 | } 27 | 28 | return newCount - oldCount 29 | } 30 | 31 | // measureGossipMetrics measures the BPS values. 32 | func measureGossipMetrics() { 33 | newIncomingBlocksCount := deps.P2PMetrics.IncomingBlocks.Load() 34 | newIncomingNewBlocksCount := deps.P2PMetrics.IncomingNewBlocks.Load() 35 | newOutgoingBlocksCount := deps.P2PMetrics.OutgoingBlocks.Load() 36 | 37 | // calculate the new BPS metrics 38 | lastGossipMetricsLock.Lock() 39 | defer lastGossipMetricsLock.Unlock() 40 | 41 | lastGossipMetrics = &GossipMetrics{ 42 | Incoming: uint32Diff(newIncomingBlocksCount, lastIncomingBlocksCount), 43 | New: uint32Diff(newIncomingNewBlocksCount, lastIncomingNewBlocksCount), 44 | Outgoing: uint32Diff(newOutgoingBlocksCount, lastOutgoingBlocksCount), 45 | } 46 | 47 | // store the new counters 48 | lastIncomingBlocksCount = newIncomingBlocksCount 49 | lastIncomingNewBlocksCount = newIncomingNewBlocksCount 50 | lastOutgoingBlocksCount = newOutgoingBlocksCount 51 | } 52 | 53 | func gossipMetrics() *GossipMetrics { 54 | lastGossipMetricsLock.RLock() 55 | defer lastGossipMetricsLock.RUnlock() 56 | 57 | return lastGossipMetrics 58 | } 59 | -------------------------------------------------------------------------------- /components/dashboard_metrics/info.go: -------------------------------------------------------------------------------- 1 | package dashboardmetrics 2 | 3 | import ( 4 | "runtime" 5 | "time" 6 | ) 7 | 8 | var ( 9 | nodeStartupTimestamp = time.Now() 10 | ) 11 | 12 | func nodeInfoExtended() *NodeInfoExtended { 13 | var m runtime.MemStats 14 | runtime.ReadMemStats(&m) 15 | 16 | getExternalMultiAddr := func() string { 17 | var fallback string 18 | 19 | for i, addr := range deps.Host.Addrs() { 20 | if i == 0 { 21 | fallback = addr.String() 22 | } 23 | 24 | for _, protocol := range addr.Protocols() { 25 | // search the first dns address 26 | if protocol.Name == "dns" { 27 | return addr.String() 28 | } 29 | } 30 | } 31 | 32 | return fallback 33 | } 34 | 35 | status := &NodeInfoExtended{ 36 | Version: deps.AppInfo.Version, 37 | LatestVersion: deps.AppInfo.LatestGitHubVersion, 38 | Uptime: time.Since(nodeStartupTimestamp).Milliseconds(), 39 | NodeID: deps.Host.ID().String(), 40 | MultiAddress: getExternalMultiAddr(), 41 | Alias: ParamsNode.Alias, 42 | MemoryUsage: int64(m.HeapAlloc + m.StackSys + m.MSpanSys + m.MCacheSys + m.BuckHashSys + m.GCSys + m.OtherSys), 43 | } 44 | 45 | return status 46 | } 47 | -------------------------------------------------------------------------------- /components/dashboard_metrics/params.go: -------------------------------------------------------------------------------- 1 | package dashboardmetrics 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/app" 5 | ) 6 | 7 | // ParametersNode contains the definition of the parameters used by the node. 8 | type ParametersNode struct { 9 | // Alias is used to set an alias to identify a node 10 | Alias string `default:"IOTA-Core node" usage:"set an alias to identify a node"` 11 | } 12 | 13 | var ParamsNode = &ParametersNode{} 14 | 15 | var params = &app.ComponentParams{ 16 | Params: map[string]any{ 17 | "node": ParamsNode, 18 | }, 19 | Masked: nil, 20 | } 21 | -------------------------------------------------------------------------------- /components/dashboard_metrics/types.go: -------------------------------------------------------------------------------- 1 | package dashboardmetrics 2 | 3 | // NodeInfoExtended represents extended information about the node. 4 | type NodeInfoExtended struct { 5 | Version string `serix:",lenPrefix=uint8"` 6 | LatestVersion string `serix:",lenPrefix=uint8"` 7 | Uptime int64 `serix:""` 8 | NodeID string `serix:",lenPrefix=uint8"` 9 | MultiAddress string `serix:",lenPrefix=uint8"` 10 | Alias string `serix:",lenPrefix=uint8"` 11 | MemoryUsage int64 `serix:""` 12 | } 13 | 14 | // DatabaseSizesMetric represents database size metrics. 15 | type DatabaseSizesMetric struct { 16 | Permanent int64 `serix:""` 17 | Prunable int64 `serix:""` 18 | TxRetainer int64 `serix:""` 19 | Total int64 `serix:""` 20 | Time int64 `serix:""` 21 | } 22 | 23 | // GossipMetrics represents the metrics for blocks per second. 24 | type GossipMetrics struct { 25 | Incoming uint32 `serix:""` 26 | New uint32 `serix:""` 27 | Outgoing uint32 `serix:""` 28 | } 29 | -------------------------------------------------------------------------------- /components/debugapi/blocks.go: -------------------------------------------------------------------------------- 1 | package debugapi 2 | 3 | import ( 4 | "sort" 5 | 6 | "github.com/iotaledger/hive.go/ads" 7 | "github.com/iotaledger/hive.go/ierrors" 8 | "github.com/iotaledger/hive.go/kvstore/mapdb" 9 | iotago "github.com/iotaledger/iota.go/v4" 10 | ) 11 | 12 | func getSlotBlockIDs(index iotago.SlotIndex) (*BlockChangesResponse, error) { 13 | blocksForSlot, err := deps.Protocol.Engines.Main.Get().Storage.Blocks(index) 14 | if err != nil { 15 | return nil, ierrors.Wrapf(err, "failed to get block storage bucket for slot %d", index) 16 | } 17 | 18 | includedBlocks := make([]string, 0) 19 | tangleTree := ads.NewSet[iotago.Identifier]( 20 | mapdb.NewMapDB(), 21 | iotago.Identifier.Bytes, 22 | iotago.IdentifierFromBytes, 23 | iotago.BlockID.Bytes, 24 | iotago.BlockIDFromBytes, 25 | ) 26 | 27 | _ = blocksForSlot.StreamKeys(func(blockID iotago.BlockID) error { 28 | includedBlocks = append(includedBlocks, blockID.String()) 29 | if err := tangleTree.Add(blockID); err != nil { 30 | return ierrors.Wrapf(err, "failed to add block to tangle tree, blockID: %s", blockID) 31 | } 32 | 33 | return nil 34 | }) 35 | 36 | sort.Strings(includedBlocks) 37 | 38 | return &BlockChangesResponse{ 39 | Index: index, 40 | IncludedBlocks: includedBlocks, 41 | TangleRoot: tangleTree.Root().String(), 42 | }, nil 43 | } 44 | -------------------------------------------------------------------------------- /components/debugapi/node.go: -------------------------------------------------------------------------------- 1 | package debugapi 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/ierrors" 5 | "github.com/iotaledger/hive.go/lo" 6 | "github.com/iotaledger/iota-core/pkg/core/account" 7 | iotago "github.com/iotaledger/iota.go/v4" 8 | ) 9 | 10 | func validatorsSummary() (*ValidatorsSummaryResponse, error) { 11 | seatManager := deps.Protocol.Engines.Main.Get().SybilProtection.SeatManager() 12 | latestSlotIndex := deps.Protocol.Engines.Main.Get().SyncManager.LatestCommitment().Slot() 13 | latestCommittee, exists := seatManager.CommitteeInSlot(latestSlotIndex) 14 | if !exists { 15 | return nil, ierrors.Errorf("committee for slot %d was not selected", latestSlotIndex) 16 | } 17 | 18 | var validatorSeats []*Validator 19 | accounts, err := latestCommittee.Accounts() 20 | if err != nil { 21 | return nil, ierrors.Wrapf(err, "failed to get accounts from committee for slot %d", latestSlotIndex) 22 | } 23 | 24 | accounts.ForEach(func(id iotago.AccountID, pool *account.Pool) bool { 25 | validatorSeats = append(validatorSeats, &Validator{ 26 | AccountID: id, 27 | SeatIndex: uint8(lo.Return1(latestCommittee.GetSeat(id))), 28 | PoolStake: pool.PoolStake, 29 | ValidatorStake: pool.ValidatorStake, 30 | FixedCost: pool.FixedCost, 31 | }) 32 | 33 | return true 34 | }) 35 | 36 | return &ValidatorsSummaryResponse{ 37 | ValidatorSeats: validatorSeats, 38 | ActiveSeats: lo.Map(seatManager.OnlineCommittee().ToSlice(), func(seatIndex account.SeatIndex) uint32 { 39 | return uint32(seatIndex) 40 | }), 41 | }, nil 42 | } 43 | -------------------------------------------------------------------------------- /components/debugapi/params.go: -------------------------------------------------------------------------------- 1 | package debugapi 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/app" 5 | ) 6 | 7 | // ParametersDebugAPI contains the definition of configuration parameters used by the debug API. 8 | type ParametersDebugAPI struct { 9 | // Enabled whether the DebugAPI component is enabled. 10 | Enabled bool `default:"false" usage:"whether the DebugAPI component is enabled"` 11 | 12 | Database struct { 13 | Path string `default:"testnet/debug" usage:"the path to the database folder"` 14 | MaxOpenDBs int `default:"2" usage:"maximum number of open database instances"` 15 | Granularity int64 `default:"100" usage:"how many slots should be contained in a single DB instance"` 16 | Pruning struct { 17 | Threshold uint64 `default:"1" usage:"how many epochs should be retained"` 18 | } 19 | } `name:"db"` 20 | } 21 | 22 | // ParamsDebugAPI is the default configuration parameters for the DebugAPI component. 23 | var ParamsDebugAPI = &ParametersDebugAPI{} 24 | 25 | var params = &app.ComponentParams{ 26 | Params: map[string]any{ 27 | "debugAPI": ParamsDebugAPI, 28 | }, 29 | } 30 | -------------------------------------------------------------------------------- /components/debugapi/transactions.go: -------------------------------------------------------------------------------- 1 | package debugapi 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/ads" 5 | "github.com/iotaledger/hive.go/ierrors" 6 | "github.com/iotaledger/hive.go/kvstore/mapdb" 7 | "github.com/iotaledger/iota-core/pkg/protocol/engine/notarization" 8 | iotago "github.com/iotaledger/iota.go/v4" 9 | ) 10 | 11 | var transactionsPerSlot map[iotago.SlotIndex]*TransactionsChangesResponse 12 | 13 | func init() { 14 | transactionsPerSlot = make(map[iotago.SlotIndex]*TransactionsChangesResponse) 15 | } 16 | 17 | func storeTransactionsPerSlot(scd *notarization.SlotCommittedDetails) error { 18 | slot := scd.Commitment.Slot() 19 | 20 | mutationsTree := ads.NewSet[iotago.Identifier]( 21 | mapdb.NewMapDB(), 22 | iotago.Identifier.Bytes, 23 | iotago.IdentifierFromBytes, 24 | iotago.TransactionID.Bytes, 25 | iotago.TransactionIDFromBytes, 26 | ) 27 | tcs := &TransactionsChangesResponse{ 28 | Index: slot, 29 | IncludedTransactions: make([]string, 0), 30 | } 31 | 32 | for _, transaction := range scd.Mutations { 33 | txID, err := transaction.ID() 34 | if err != nil { 35 | return ierrors.Wrapf(err, "failed to calculate transactionID") 36 | } 37 | 38 | tcs.IncludedTransactions = append(tcs.IncludedTransactions, txID.String()) 39 | if err = mutationsTree.Add(txID); err != nil { 40 | return ierrors.Wrapf(err, "failed to add transaction to mutations tree, txID: %s", txID) 41 | } 42 | } 43 | 44 | tcs.MutationsRoot = mutationsTree.Root().String() 45 | 46 | transactionsPerSlot[slot] = tcs 47 | 48 | return nil 49 | } 50 | 51 | func getSlotTransactionIDs(slot iotago.SlotIndex) (*TransactionsChangesResponse, error) { 52 | if slotDiff, exists := transactionsPerSlot[slot]; exists { 53 | return slotDiff, nil 54 | } 55 | 56 | return nil, ierrors.Errorf("cannot find transaction storage bucket for slot %d", slot) 57 | } 58 | -------------------------------------------------------------------------------- /components/inx/component.go: -------------------------------------------------------------------------------- 1 | package inx 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/labstack/echo/v4" 7 | "go.uber.org/dig" 8 | 9 | "github.com/iotaledger/hive.go/app" 10 | "github.com/iotaledger/iota-core/components/protocol" 11 | "github.com/iotaledger/iota-core/pkg/daemon" 12 | protocolpkg "github.com/iotaledger/iota-core/pkg/protocol" 13 | "github.com/iotaledger/iota-core/pkg/requesthandler" 14 | restapipkg "github.com/iotaledger/iota-core/pkg/restapi" 15 | ) 16 | 17 | func init() { 18 | Component = &app.Component{ 19 | Name: "INX", 20 | DepsFunc: func(cDeps dependencies) { deps = cDeps }, 21 | Params: params, 22 | Provide: provide, 23 | Run: run, 24 | IsEnabled: func(_ *dig.Container) bool { 25 | return ParamsINX.Enabled 26 | }, 27 | } 28 | } 29 | 30 | var ( 31 | Component *app.Component 32 | deps dependencies 33 | ) 34 | 35 | type dependencies struct { 36 | dig.In 37 | Protocol *protocolpkg.Protocol 38 | RequestHandler *requesthandler.RequestHandler 39 | Echo *echo.Echo `optional:"true"` 40 | RestRouteManager *restapipkg.RestRouteManager 41 | INXServer *Server 42 | BaseToken *protocol.BaseToken 43 | } 44 | 45 | func provide(c *dig.Container) error { 46 | //nolint:gocritic // easier to read which type is returned 47 | if err := c.Provide(func() *Server { 48 | return newServer() 49 | }); err != nil { 50 | Component.LogPanic(err.Error()) 51 | } 52 | 53 | return nil 54 | } 55 | 56 | func run() error { 57 | if err := Component.Daemon().BackgroundWorker("INX", func(ctx context.Context) { 58 | Component.LogInfo("Starting INX ... done") 59 | deps.INXServer.Start() 60 | <-ctx.Done() 61 | Component.LogInfo("Stopping INX ...") 62 | deps.INXServer.Stop() 63 | Component.LogInfo("Stopping INX ... done") 64 | }, daemon.PriorityINX); err != nil { 65 | Component.LogPanicf("failed to start worker: %s", err) 66 | } 67 | 68 | return nil 69 | } 70 | -------------------------------------------------------------------------------- /components/inx/params.go: -------------------------------------------------------------------------------- 1 | package inx 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/app" 5 | ) 6 | 7 | // ParametersINX contains the definition of the parameters used by INX. 8 | type ParametersINX struct { 9 | // Enabled defines whether the INX plugin is enabled. 10 | Enabled bool `default:"false" usage:"whether the INX plugin is enabled"` 11 | // the bind address on which the INX can be accessed from 12 | BindAddress string `default:"localhost:9029" usage:"the bind address on which the INX can be accessed from"` 13 | } 14 | 15 | var ParamsINX = &ParametersINX{} 16 | 17 | var params = &app.ComponentParams{ 18 | Params: map[string]any{ 19 | "inx": ParamsINX, 20 | }, 21 | Masked: nil, 22 | } 23 | -------------------------------------------------------------------------------- /components/inx/server.go: -------------------------------------------------------------------------------- 1 | package inx 2 | 3 | import ( 4 | "net" 5 | "time" 6 | 7 | grpcprometheus "github.com/grpc-ecosystem/go-grpc-prometheus" 8 | "google.golang.org/grpc" 9 | "google.golang.org/grpc/keepalive" 10 | 11 | inx "github.com/iotaledger/inx/go" 12 | ) 13 | 14 | const ( 15 | workerCount = 1 16 | ) 17 | 18 | func newServer() *Server { 19 | grpcServer := grpc.NewServer( 20 | grpc.StreamInterceptor(grpcprometheus.StreamServerInterceptor), 21 | grpc.UnaryInterceptor(grpcprometheus.UnaryServerInterceptor), 22 | grpc.KeepaliveParams(keepalive.ServerParameters{ 23 | Time: 20 * time.Second, 24 | Timeout: 5 * time.Second, 25 | }), 26 | grpc.MaxConcurrentStreams(10), 27 | ) 28 | 29 | s := &Server{grpcServer: grpcServer} 30 | inx.RegisterINXServer(grpcServer, s) 31 | 32 | return s 33 | } 34 | 35 | type Server struct { 36 | inx.UnimplementedINXServer 37 | grpcServer *grpc.Server 38 | } 39 | 40 | func (s *Server) ConfigurePrometheus() { 41 | grpcprometheus.Register(s.grpcServer) 42 | } 43 | 44 | func (s *Server) Start() { 45 | go func() { 46 | listener, err := net.Listen("tcp", ParamsINX.BindAddress) 47 | if err != nil { 48 | Component.LogFatalf("failed to listen: %v", err) 49 | } 50 | defer listener.Close() 51 | 52 | if err := s.grpcServer.Serve(listener); err != nil { 53 | Component.LogFatalf("failed to serve: %v", err) 54 | } 55 | }() 56 | } 57 | 58 | func (s *Server) Stop() { 59 | s.grpcServer.Stop() 60 | } 61 | -------------------------------------------------------------------------------- /components/inx/server_transactions.go: -------------------------------------------------------------------------------- 1 | package inx 2 | 3 | import ( 4 | "context" 5 | 6 | "google.golang.org/grpc/codes" 7 | "google.golang.org/grpc/status" 8 | 9 | "github.com/iotaledger/hive.go/ierrors" 10 | inx "github.com/iotaledger/inx/go" 11 | "github.com/iotaledger/iota-core/pkg/retainer/txretainer" 12 | ) 13 | 14 | func (s *Server) ReadTransactionMetadata(_ context.Context, transactionID *inx.TransactionId) (*inx.TransactionMetadata, error) { 15 | txID := transactionID.Unwrap() 16 | 17 | txMetadata, err := deps.Protocol.Engines.Main.Get().TxRetainer.TransactionMetadata(txID) 18 | if err != nil { 19 | if ierrors.Is(err, txretainer.ErrEntryNotFound) { 20 | return nil, status.Errorf(codes.NotFound, "transaction metadata not found: %s", txID.ToHex()) 21 | } 22 | 23 | return nil, ierrors.Wrapf(err, "error when retrieving transaction metadata: %s", txID.ToHex()) 24 | } 25 | 26 | return inx.WrapTransactionMetadata(txMetadata), nil 27 | } 28 | -------------------------------------------------------------------------------- /components/metricstracker/params.go: -------------------------------------------------------------------------------- 1 | package metricstracker 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/app" 5 | ) 6 | 7 | // ParametersMetricsTracker contains the definition of the parameters used by Metrics Tracker. 8 | type ParametersMetricsTracker struct { 9 | // Enabled defines whether the Metrics Tracker plugin is enabled. 10 | Enabled bool `default:"true" usage:"whether the Metrics Tracker plugin is enabled"` 11 | } 12 | 13 | var ParamsMetricsTracker = &ParametersMetricsTracker{} 14 | 15 | var params = &app.ComponentParams{ 16 | Params: map[string]any{ 17 | "metricsTracker": ParamsMetricsTracker, 18 | }, 19 | } 20 | -------------------------------------------------------------------------------- /components/prometheus/collector/collection.go: -------------------------------------------------------------------------------- 1 | package collector 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/ds/shrinkingmap" 5 | "github.com/iotaledger/hive.go/runtime/options" 6 | ) 7 | 8 | type Collection struct { 9 | CollectionName string 10 | metrics *shrinkingmap.ShrinkingMap[string, *Metric] 11 | } 12 | 13 | func NewCollection(name string, opts ...options.Option[Collection]) *Collection { 14 | return options.Apply(&Collection{ 15 | CollectionName: name, 16 | metrics: shrinkingmap.New[string, *Metric](), 17 | }, opts, func(collection *Collection) { 18 | collection.metrics.ForEach(func(_ string, metric *Metric) bool { 19 | metric.Namespace = collection.CollectionName 20 | metric.initPromMetric() 21 | 22 | return true 23 | }) 24 | }) 25 | } 26 | 27 | func (c *Collection) GetMetric(metricName string) *Metric { 28 | metric, exists := c.metrics.Get(metricName) 29 | if !exists { 30 | return nil 31 | } 32 | 33 | return metric 34 | } 35 | 36 | func (c *Collection) addMetric(metric *Metric) { 37 | if metric != nil { 38 | c.metrics.Set(metric.Name, metric) 39 | } 40 | } 41 | 42 | func WithMetric(metric *Metric) options.Option[Collection] { 43 | return func(c *Collection) { 44 | c.addMetric(metric) 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /components/prometheus/metrics_accounts.go: -------------------------------------------------------------------------------- 1 | package prometheus 2 | 3 | import ( 4 | "github.com/iotaledger/iota-core/components/prometheus/collector" 5 | ) 6 | 7 | const ( 8 | accountNamespace = "account" 9 | 10 | activeSeats = "active_seats" 11 | ) 12 | 13 | var AccountMetrics = collector.NewCollection(accountNamespace, 14 | collector.WithMetric(collector.NewMetric(activeSeats, 15 | collector.WithType(collector.Gauge), 16 | collector.WithHelp("Seats seen as active by the node."), 17 | collector.WithCollectFunc(func() (metricValue float64, labelValues []string) { 18 | return float64(deps.Protocol.Engines.Main.Get().SybilProtection.SeatManager().OnlineCommittee().Size()), nil 19 | }), 20 | )), 21 | ) 22 | -------------------------------------------------------------------------------- /components/prometheus/metrics_db.go: -------------------------------------------------------------------------------- 1 | package prometheus 2 | 3 | import ( 4 | "github.com/iotaledger/iota-core/components/prometheus/collector" 5 | ) 6 | 7 | const ( 8 | dbNamespace = "db" 9 | 10 | sizeBytesPermanent = "size_bytes_permanent" 11 | sizeBytesPrunable = "size_bytes_prunable" 12 | sizeBytesTxRetainerDatabase = "size_bytes_tx_retainer_database" 13 | ) 14 | 15 | var DBMetrics = collector.NewCollection(dbNamespace, 16 | collector.WithMetric(collector.NewMetric(sizeBytesPermanent, 17 | collector.WithType(collector.Gauge), 18 | collector.WithHelp("DB size in bytes for permanent storage."), 19 | collector.WithCollectFunc(func() (metricValue float64, labelValues []string) { 20 | return float64(deps.Protocol.Engines.Main.Get().Storage.PermanentDatabaseSize()), nil 21 | }), 22 | )), 23 | collector.WithMetric(collector.NewMetric(sizeBytesPrunable, 24 | collector.WithType(collector.Gauge), 25 | collector.WithHelp("DB size in bytes for prunable storage."), 26 | collector.WithCollectFunc(func() (metricValue float64, labelValues []string) { 27 | return float64(deps.Protocol.Engines.Main.Get().Storage.PrunableDatabaseSize()), nil 28 | }), 29 | )), 30 | collector.WithMetric(collector.NewMetric(sizeBytesTxRetainerDatabase, 31 | collector.WithType(collector.Gauge), 32 | collector.WithHelp("DB size in bytes for transaction retainer SQL database."), 33 | collector.WithCollectFunc(func() (metricValue float64, labelValues []string) { 34 | return float64(deps.Protocol.Engines.Main.Get().Storage.TransactionRetainerDatabaseSize()), nil 35 | }), 36 | )), 37 | ) 38 | -------------------------------------------------------------------------------- /components/prometheus/metrics_info.go: -------------------------------------------------------------------------------- 1 | package prometheus 2 | 3 | import ( 4 | "runtime" 5 | "strconv" 6 | "time" 7 | 8 | "github.com/iotaledger/iota-core/components/prometheus/collector" 9 | ) 10 | 11 | const ( 12 | infoNamespace = "info" 13 | 14 | nodeOS = "node_os" 15 | syncStatus = "sync_status" 16 | memUsage = "memory_usage_bytes" 17 | ) 18 | 19 | var InfoMetrics = collector.NewCollection(infoNamespace, 20 | collector.WithMetric(collector.NewMetric(nodeOS, 21 | collector.WithType(collector.Gauge), 22 | collector.WithHelp("Node OS data."), 23 | collector.WithLabels("nodeID", "OS", "ARCH", "NUM_CPU"), 24 | collector.WithPruningDelay(10*time.Minute), 25 | collector.WithInitValueFunc(func() (metricValue float64, labelValues []string) { 26 | var nodeID string 27 | if deps.Host != nil { 28 | nodeID = deps.Host.ID().String() 29 | } 30 | 31 | return 0, []string{nodeID, runtime.GOOS, runtime.GOARCH, strconv.Itoa(runtime.NumCPU())} 32 | }), 33 | )), 34 | collector.WithMetric(collector.NewMetric(syncStatus, 35 | collector.WithType(collector.Gauge), 36 | collector.WithHelp("Node sync status based on ATT."), 37 | collector.WithCollectFunc(func() (metricValue float64, labelValues []string) { 38 | if deps.Protocol.Engines.Main.Get().SyncManager.IsNodeSynced() { 39 | return 1, nil 40 | } 41 | 42 | return 0, nil 43 | }), 44 | )), 45 | collector.WithMetric(collector.NewMetric(memUsage, 46 | collector.WithType(collector.Gauge), 47 | collector.WithHelp("The memory usage in bytes of allocated heap objects"), 48 | collector.WithCollectFunc(func() (metricValue float64, labelValues []string) { 49 | var m runtime.MemStats 50 | runtime.ReadMemStats(&m) 51 | 52 | return float64(m.Alloc), nil 53 | }), 54 | )), 55 | ) 56 | -------------------------------------------------------------------------------- /components/prometheus/params.go: -------------------------------------------------------------------------------- 1 | package prometheus 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/app" 5 | ) 6 | 7 | // ParametersMetrics contains the definition of the parameters used by the metrics component. 8 | type ParametersMetrics struct { 9 | // Enabled defines whether the Metrics component is enabled. 10 | Enabled bool `default:"true" usage:"whether the Metrics component is enabled"` 11 | // BindAddress defines the bind address for the Prometheus exporter server. 12 | BindAddress string `default:"0.0.0.0:9311" usage:"bind address on which the Prometheus exporter server"` 13 | // GoMetrics defines whether to include Go metrics. 14 | GoMetrics bool `default:"false" usage:"include go metrics"` 15 | // ProcessMetrics defines whether to include process metrics. 16 | ProcessMetrics bool `default:"false" usage:"include process metrics"` 17 | // PromhttpMetrics defines whether to include promhttp metrics. 18 | PromhttpMetrics bool `default:"false" usage:"include promhttp metrics"` 19 | } 20 | 21 | // ParamsMetrics contains the configuration used by the metrics collector plugin. 22 | var ParamsMetrics = &ParametersMetrics{} 23 | 24 | var params = &app.ComponentParams{ 25 | Params: map[string]any{ 26 | "prometheus": ParamsMetrics, 27 | }, 28 | } 29 | -------------------------------------------------------------------------------- /components/restapi/core/blocks.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "github.com/labstack/echo/v4" 5 | 6 | "github.com/iotaledger/hive.go/ierrors" 7 | "github.com/iotaledger/inx-app/pkg/httpserver" 8 | iotago "github.com/iotaledger/iota.go/v4" 9 | "github.com/iotaledger/iota.go/v4/api" 10 | ) 11 | 12 | func blockByID(c echo.Context) (*iotago.Block, error) { 13 | blockID, err := httpserver.ParseBlockIDParam(c, api.ParameterBlockID) 14 | if err != nil { 15 | return nil, ierrors.Wrapf(err, "failed to parse block ID %s", c.Param(api.ParameterBlockID)) 16 | } 17 | 18 | return deps.RequestHandler.BlockFromBlockID(blockID) 19 | } 20 | 21 | func blockMetadataByID(c echo.Context) (*api.BlockMetadataResponse, error) { 22 | blockID, err := httpserver.ParseBlockIDParam(c, api.ParameterBlockID) 23 | if err != nil { 24 | return nil, ierrors.Wrapf(err, "failed to parse block ID %s", c.Param(api.ParameterBlockID)) 25 | } 26 | 27 | return deps.RequestHandler.BlockMetadataFromBlockID(blockID) 28 | } 29 | 30 | func blockWithMetadataByID(c echo.Context) (*api.BlockWithMetadataResponse, error) { 31 | blockID, err := httpserver.ParseBlockIDParam(c, api.ParameterBlockID) 32 | if err != nil { 33 | return nil, ierrors.Wrapf(err, "failed to parse block ID %s", c.Param(api.ParameterBlockID)) 34 | } 35 | 36 | return deps.RequestHandler.BlockWithMetadataFromBlockID(blockID) 37 | } 38 | 39 | func sendBlock(c echo.Context) (*api.BlockCreatedResponse, error) { 40 | iotaBlock, err := httpserver.ParseRequestByHeader(c, deps.RequestHandler.CommittedAPI(), iotago.BlockFromBytes(deps.RequestHandler.APIProvider())) 41 | if err != nil { 42 | return nil, err 43 | } 44 | 45 | blockID, err := deps.RequestHandler.SubmitBlockAndAwaitRetainer(c.Request().Context(), iotaBlock) 46 | if err != nil { 47 | return nil, ierrors.WithMessagef(echo.ErrInternalServerError, "failed to attach block: %w", err) 48 | } 49 | 50 | return &api.BlockCreatedResponse{ 51 | BlockID: blockID, 52 | }, nil 53 | } 54 | -------------------------------------------------------------------------------- /components/restapi/core/node.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "github.com/iotaledger/iota.go/v4/api" 5 | ) 6 | 7 | func info() *api.InfoResponse { 8 | return &api.InfoResponse{ 9 | Name: deps.AppInfo.Name, 10 | Version: deps.AppInfo.Version, 11 | Status: deps.RequestHandler.GetNodeStatus(), 12 | ProtocolParameters: deps.RequestHandler.GetProtocolParameters(), 13 | BaseToken: &api.InfoResBaseToken{ 14 | Name: deps.BaseToken.Name, 15 | TickerSymbol: deps.BaseToken.TickerSymbol, 16 | Unit: deps.BaseToken.Unit, 17 | Subunit: deps.BaseToken.Subunit, 18 | Decimals: deps.BaseToken.Decimals, 19 | }, 20 | } 21 | } 22 | 23 | func metrics() *api.NetworkMetricsResponse { 24 | metrics := deps.MetricsTracker.NodeMetrics() 25 | return &api.NetworkMetricsResponse{ 26 | BlocksPerSecond: metrics.BlocksPerSecond, 27 | ConfirmedBlocksPerSecond: metrics.ConfirmedBlocksPerSecond, 28 | ConfirmationRate: metrics.ConfirmedRate, 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /components/restapi/core/utxo.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "github.com/labstack/echo/v4" 5 | 6 | "github.com/iotaledger/hive.go/ierrors" 7 | "github.com/iotaledger/inx-app/pkg/httpserver" 8 | "github.com/iotaledger/iota.go/v4/api" 9 | ) 10 | 11 | func outputFromOutputID(c echo.Context) (*api.OutputResponse, error) { 12 | outputID, err := httpserver.ParseOutputIDParam(c, api.ParameterOutputID) 13 | if err != nil { 14 | return nil, ierrors.Wrapf(err, "failed to parse output ID %s", c.Param(api.ParameterOutputID)) 15 | } 16 | 17 | return deps.RequestHandler.OutputFromOutputID(outputID) 18 | } 19 | 20 | func outputMetadataFromOutputID(c echo.Context) (*api.OutputMetadata, error) { 21 | outputID, err := httpserver.ParseOutputIDParam(c, api.ParameterOutputID) 22 | if err != nil { 23 | return nil, ierrors.Wrapf(err, "failed to parse output ID %s", c.Param(api.ParameterOutputID)) 24 | } 25 | 26 | return deps.RequestHandler.OutputMetadataFromOutputID(outputID) 27 | } 28 | 29 | func outputWithMetadataFromOutputID(c echo.Context) (*api.OutputWithMetadataResponse, error) { 30 | outputID, err := httpserver.ParseOutputIDParam(c, api.ParameterOutputID) 31 | if err != nil { 32 | return nil, ierrors.Wrapf(err, "failed to parse output ID %s", c.Param(api.ParameterOutputID)) 33 | } 34 | 35 | return deps.RequestHandler.OutputWithMetadataFromOutputID(outputID) 36 | } 37 | -------------------------------------------------------------------------------- /components/restapi/management/snapshots.go: -------------------------------------------------------------------------------- 1 | package management 2 | 3 | import ( 4 | "github.com/labstack/echo/v4" 5 | 6 | "github.com/iotaledger/hive.go/ierrors" 7 | "github.com/iotaledger/iota.go/v4/api" 8 | ) 9 | 10 | func createSnapshots(_ echo.Context) (*api.CreateSnapshotResponse, error) { 11 | if deps.Protocol.Engines.Main.Get().IsSnapshotting() || deps.Protocol.Engines.Main.Get().Storage.IsPruning() { 12 | return nil, ierrors.WithMessage(echo.ErrServiceUnavailable, "node is already creating a snapshot or pruning is running") 13 | } 14 | 15 | targetSlot, filePath, err := deps.Protocol.Engines.Main.Get().ExportSnapshot(deps.SnapshotFilePath, true, true) 16 | if err != nil { 17 | return nil, ierrors.WithMessagef(echo.ErrInternalServerError, "creating snapshot failed: %w", err) 18 | } 19 | 20 | return &api.CreateSnapshotResponse{ 21 | Slot: targetSlot, 22 | FilePath: filePath, 23 | }, nil 24 | } 25 | -------------------------------------------------------------------------------- /components/restapi/routes.go: -------------------------------------------------------------------------------- 1 | package restapi 2 | 3 | import ( 4 | "net/http" 5 | 6 | "github.com/labstack/echo/v4" 7 | 8 | "github.com/iotaledger/inx-app/pkg/httpserver" 9 | "github.com/iotaledger/iota.go/v4/api" 10 | ) 11 | 12 | type RoutesResponse struct { 13 | Routes []string `json:"routes"` 14 | } 15 | 16 | func setupRoutes() { 17 | deps.Echo.GET(api.RouteHealth, func(c echo.Context) error { 18 | if deps.Protocol.Engines.Main.Get().SyncManager.IsNodeSynced() { 19 | return httpserver.JSONResponse(c, http.StatusOK, &api.HealthResponse{ 20 | IsHealthy: true, 21 | }) 22 | } 23 | 24 | return httpserver.JSONResponse(c, http.StatusServiceUnavailable, &api.HealthResponse{ 25 | IsHealthy: false, 26 | }) 27 | }) 28 | 29 | deps.Echo.GET(api.RouteRoutes, func(c echo.Context) error { 30 | resp := &RoutesResponse{ 31 | Routes: deps.RestRouteManager.Routes(), 32 | } 33 | 34 | return httpserver.JSONResponse(c, http.StatusOK, resp) 35 | }) 36 | } 37 | -------------------------------------------------------------------------------- /config.json: -------------------------------------------------------------------------------- 1 | {} -------------------------------------------------------------------------------- /deploy/ansible/deploy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | gather_facts: no 4 | roles: 5 | - exporter 6 | - firewall 7 | 8 | - hosts: cores:&internal_nodes 9 | gather_facts: yes 10 | roles: 11 | - wireguard 12 | - iota-core-node 13 | 14 | - hosts: metrics 15 | gather_facts: no 16 | vars: 17 | removeData: no 18 | roles: 19 | - metrics 20 | -------------------------------------------------------------------------------- /deploy/ansible/deploy_cores.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: cores:&internal_nodes 3 | gather_facts: yes 4 | roles: 5 | - iota-core-node -------------------------------------------------------------------------------- /deploy/ansible/hosts/feature.yml: -------------------------------------------------------------------------------- 1 | metrics: 2 | hosts: 3 | metrics-01.feature.shimmer.iota.cafe: 4 | 5 | cores: 6 | children: 7 | internal_nodes: 8 | hosts: 9 | node-01.feature.shimmer.iota.cafe: 10 | validatorAccountAddress: "{{ NODE_01_VALIDATOR_ACCOUNTADDRESS }}" 11 | validatorPrvKey: "{{ NODE_01_VALIDATOR_PRIVKEY }}" 12 | p2pIdentityPrvKey: "{{ NODE_01_P2PIDENTITY_PRIVKEY }}" 13 | node-02.feature.shimmer.iota.cafe: 14 | validatorAccountAddress: "{{ NODE_02_VALIDATOR_ACCOUNTADDRESS }}" 15 | validatorPrvKey: "{{ NODE_02_VALIDATOR_PRIVKEY }}" 16 | p2pIdentityPrvKey: "{{ NODE_02_P2PIDENTITY_PRIVKEY }}" 17 | node-03.feature.shimmer.iota.cafe: 18 | validatorAccountAddress: "{{ NODE_03_VALIDATOR_ACCOUNTADDRESS }}" 19 | validatorPrvKey: "{{ NODE_03_VALIDATOR_PRIVKEY }}" 20 | p2pIdentityPrvKey: "{{ NODE_03_P2PIDENTITY_PRIVKEY }}" 21 | node-04.feature.shimmer.iota.cafe: 22 | p2pIdentityPrvKey: "{{ NODE_04_P2PIDENTITY_PRIVKEY }}" 23 | node-05.feature.shimmer.iota.cafe: 24 | p2pIdentityPrvKey: "{{ NODE_05_P2PIDENTITY_PRIVKEY }}" 25 | vars: 26 | -------------------------------------------------------------------------------- /deploy/ansible/roles/exporter/files/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | node_exporter: 3 | image: quay.io/prometheus/node-exporter:latest 4 | container_name: node_exporter 5 | command: 6 | - "--path.rootfs=/host" 7 | network_mode: host 8 | pid: host 9 | restart: unless-stopped 10 | volumes: 11 | - /:/host:ro,rslave 12 | cadvisor: 13 | image: gcr.io/cadvisor/cadvisor:latest 14 | container_name: cadvisor 15 | privileged: true 16 | ports: 17 | - "9111:8080" 18 | command: 19 | - --housekeeping_interval=30s # kubernetes default args 20 | - --max_housekeeping_interval=35s 21 | - --event_storage_event_limit=default=0 22 | - --event_storage_age_limit=default=0 23 | - --store_container_labels=false 24 | - --global_housekeeping_interval=30s 25 | - --event_storage_event_limit=default=0 26 | - --event_storage_age_limit=default=0 27 | - --disable_metrics=accelerator,advtcp,cpu_topology,disk,hugetlb,memory_numa,percpu,referenced_memory,resctrl,sched,tcp,udp 28 | - --enable_load_reader=true 29 | - --docker_only=true # only show stats for docker containers 30 | - --allow_dynamic_housekeeping=true 31 | - --storage_duration=1m0s 32 | volumes: 33 | - /:/rootfs:ro 34 | - /var/run:/var/run:rw 35 | - /sys:/sys:ro 36 | - /var/lib/docker/:/var/lib/docker:ro -------------------------------------------------------------------------------- /deploy/ansible/roles/exporter/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Create exporter directory 2 | file: 3 | path: /opt/exporter 4 | state: directory 5 | mode: '0755' 6 | 7 | - name: Copy docker-compose.yml 8 | copy: 9 | src: docker-compose.yml 10 | dest: /opt/exporter/docker-compose.yml 11 | mode: '0755' 12 | 13 | - name: Run exporters 14 | community.docker.docker_compose: 15 | project_src: /opt/exporter/ 16 | timeout: 180 17 | state: present 18 | pull: yes 19 | 20 | -------------------------------------------------------------------------------- /deploy/ansible/roles/firewall/files/after.rules: -------------------------------------------------------------------------------- 1 | # BEGIN UFW AND DOCKER 2 | *filter 3 | :ufw-user-forward - [0:0] 4 | :ufw-docker-logging-deny - [0:0] 5 | :DOCKER-USER - [0:0] 6 | -A DOCKER-USER -j ufw-user-forward 7 | 8 | -A DOCKER-USER -j RETURN -s 10.0.0.0/8 9 | -A DOCKER-USER -j RETURN -s 172.16.0.0/12 10 | -A DOCKER-USER -j RETURN -s 192.168.0.0/16 11 | 12 | -A DOCKER-USER -p udp -m udp --sport 53 --dport 1024:65535 -j RETURN 13 | 14 | -A DOCKER-USER -j ufw-docker-logging-deny -p tcp -m tcp --tcp-flags FIN,SYN,RST,ACK SYN -d 192.168.0.0/16 15 | -A DOCKER-USER -j ufw-docker-logging-deny -p tcp -m tcp --tcp-flags FIN,SYN,RST,ACK SYN -d 10.0.0.0/8 16 | -A DOCKER-USER -j ufw-docker-logging-deny -p tcp -m tcp --tcp-flags FIN,SYN,RST,ACK SYN -d 172.16.0.0/12 17 | -A DOCKER-USER -j ufw-docker-logging-deny -p udp -m udp --dport 0:32767 -d 192.168.0.0/16 18 | -A DOCKER-USER -j ufw-docker-logging-deny -p udp -m udp --dport 0:32767 -d 10.0.0.0/8 19 | -A DOCKER-USER -j ufw-docker-logging-deny -p udp -m udp --dport 0:32767 -d 172.16.0.0/12 20 | 21 | -A DOCKER-USER -j RETURN 22 | 23 | -A ufw-docker-logging-deny -m limit --limit 3/min --limit-burst 10 -j LOG --log-prefix "[UFW DOCKER BLOCK] " 24 | -A ufw-docker-logging-deny -j DROP 25 | 26 | COMMIT 27 | # END UFW AND DOCKER -------------------------------------------------------------------------------- /deploy/ansible/roles/firewall/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Install UFW 2 | apt: 3 | name: ufw 4 | state: present 5 | 6 | - name: Reset all UFW rules and disable UFW 7 | ufw: 8 | state: reset 9 | 10 | - name: Ensure content from source file is at the end of the config file 11 | blockinfile: 12 | path: /etc/ufw/after.rules 13 | marker: "# {mark} ANSIBLE MANAGED BLOCK FOR CONFIG END" 14 | block: "{{ lookup('file', 'after.rules') }}" 15 | insertafter: EOF 16 | 17 | - name: Allow ssh 18 | ufw: 19 | rule: allow 20 | port: '22' 21 | proto: tcp 22 | 23 | - name: Allow all traffic from 192.168.20.0/24 24 | ufw: 25 | rule: allow 26 | src: 192.168.20.0/24 27 | 28 | - name: Allow WireGuard traffic 29 | ufw: 30 | rule: allow 31 | port: '51820' 32 | proto: udp 33 | 34 | - name: Allow UDP port 53 from WireGuard, to resolve against systemd-resolved 35 | ufw: 36 | rule: allow 37 | interface: wg0 38 | direction: in 39 | port: '53' 40 | proto: udp 41 | 42 | - name: Deny all other incoming traffic 43 | ufw: 44 | rule: deny 45 | direction: in 46 | 47 | - name: Enable UFW 48 | ufw: 49 | state: enabled 50 | 51 | - name: Add manual internal hosts with IPv4 mappings 52 | lineinfile: 53 | path: /etc/hosts 54 | line: "{{ item.value }} {{ item.key }} {{ item.key }}.shimmer.iota.cafe" 55 | regexp: "^{{ item.value }}" 56 | state: present 57 | loop: 58 | - { key: 'node-01.feature', value: '192.168.20.6' } 59 | - { key: 'node-02.feature', value: '192.168.20.4' } 60 | - { key: 'node-03.feature', value: '192.168.20.5' } 61 | - { key: 'node-04.feature', value: '192.168.20.2' } 62 | - { key: 'node-05.feature', value: '192.168.20.3' } 63 | - { key: 'metrics-01.feature', value: '192.168.20.7' } 64 | loop_control: 65 | label: "{{ item.key }}" -------------------------------------------------------------------------------- /deploy/ansible/roles/iota-core-node/files/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "protocol": {} 3 | } -------------------------------------------------------------------------------- /deploy/ansible/roles/iota-core-node/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: restart rsyslog 2 | service: 3 | name: rsyslog 4 | state: restarted -------------------------------------------------------------------------------- /deploy/ansible/roles/iota-core-node/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Pull docker image async 2 | shell: docker pull "{{iota_core_docker_image_repo}}:{{iota_core_docker_image_tag}}" 3 | async: 300 4 | poll: 5 5 | 6 | - name: Set log rotation for a maximum size of 5GB 7 | blockinfile: 8 | path: /etc/logrotate.d/rsyslog 9 | block: maxsize 5G 10 | marker: "# {mark} ANSIBLE MANAGED BLOCK" 11 | insertafter: "{\n" 12 | state: present 13 | 14 | - name: Create node directory 15 | file: 16 | path: /opt/iota-core 17 | state: directory 18 | mode: '0755' 19 | 20 | - name: Copy genesis snapshot 21 | copy: 22 | src: ../../tools/genesis-snapshot/genesis-snapshot.bin 23 | dest: /opt/iota-core/snapshot.bin 24 | mode: '0644' 25 | 26 | - name: Copy configuration file 27 | copy: 28 | src: config.json 29 | dest: /opt/iota-core/config.json 30 | mode: '0644' 31 | 32 | - name: Template docker-compose.yml 33 | template: 34 | src: docker-compose-iota-core.yml.j2 35 | dest: /opt/iota-core/docker-compose.yml 36 | mode: '0644' 37 | 38 | - name: Kill & teardown existing services 39 | community.docker.docker_compose: 40 | project_src: /opt/iota-core 41 | timeout: 5 42 | state: absent 43 | stopped: true 44 | remove_orphans: true 45 | remove_volumes: true 46 | 47 | - name: Create DB directory 48 | file: 49 | path: /opt/iota-core/data 50 | state: directory 51 | mode: '0777' 52 | 53 | - name: Remove DB 54 | shell: 55 | cmd: rm -rf /opt/iota-core/data/* 56 | 57 | - name: Clean conntrack 58 | shell: 59 | cmd: conntrack -D -p udp 60 | ignore_errors: true # sometimes conntrack might not be installed 61 | 62 | - name: Run node 63 | community.docker.docker_compose: 64 | project_src: /opt/iota-core 65 | timeout: 180 66 | state: present 67 | pull: yes 68 | -------------------------------------------------------------------------------- /deploy/ansible/roles/metrics/files/elasticsearch.yml: -------------------------------------------------------------------------------- 1 | ## Default Elasticsearch configuration from Elasticsearch base image. 2 | ## https://github.com/elastic/elasticsearch/blob/master/distribution/docker/src/docker/config/elasticsearch.yml 3 | # 4 | cluster.name: "docker-cluster" 5 | network.host: 0.0.0.0 6 | -------------------------------------------------------------------------------- /deploy/ansible/roles/metrics/files/grafana/grafana.ini: -------------------------------------------------------------------------------- 1 | [database] 2 | wal = true -------------------------------------------------------------------------------- /deploy/ansible/roles/metrics/files/grafana/provisioning/dashboards/prometheus.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | providers: 4 | - name: 'Prometheus' 5 | orgId: 1 6 | folder: '' 7 | type: file 8 | disableDeletion: false 9 | editable: true 10 | allowUiUpdates: true 11 | options: 12 | path: /etc/grafana/provisioning/dashboards -------------------------------------------------------------------------------- /deploy/ansible/roles/metrics/files/grafana/provisioning/datasources/prometheus.yml: -------------------------------------------------------------------------------- 1 | # config file version 2 | apiVersion: 1 3 | 4 | # list of datasources to insert/update depending 5 | # whats available in the database 6 | datasources: 7 | # name of the datasource. Required 8 | - name: Prometheus 9 | # datasource type. Required 10 | type: prometheus 11 | # access mode. direct or proxy. Required 12 | access: proxy 13 | # org id. will default to orgId 1 if not specified 14 | orgId: 1 15 | # url 16 | url: http://prometheus:9090 17 | # database password, if used 18 | password: 19 | # database user, if used 20 | user: 21 | # database name, if used 22 | database: 23 | # enable/disable basic auth 24 | basicAuth: false 25 | # basic auth username 26 | basicAuthUser: 27 | # basic auth password 28 | basicAuthPassword: 29 | # enable/disable with credentials headers 30 | withCredentials: 31 | # mark as default datasource. Max one per org 32 | isDefault: 33 | # fields that will be converted to json and stored in json_data 34 | jsonData: 35 | graphiteVersion: "1.1" 36 | tlsAuth: false 37 | tlsAuthWithCACert: false 38 | # json object of data that will be encrypted. 39 | secureJsonData: 40 | tlsCACert: "..." 41 | tlsClientCert: "..." 42 | tlsClientKey: "..." 43 | version: 1 44 | # allow users to edit datasources from the UI. 45 | editable: true 46 | -------------------------------------------------------------------------------- /deploy/ansible/roles/metrics/files/grafana/provisioning/notifiers/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iotaledger-archive/iota-core/3673e3988adc63febbbb2353e2c6798554a59ca4/deploy/ansible/roles/metrics/files/grafana/provisioning/notifiers/.gitkeep -------------------------------------------------------------------------------- /deploy/ansible/roles/metrics/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Create directory 2 | file: 3 | path: /opt/metrics 4 | state: directory 5 | mode: '0755' 6 | 7 | - name: Copy configs 8 | copy: 9 | src: . 10 | dest: /opt/metrics 11 | mode: '0644' 12 | 13 | - name: Template docker-compose.yml 14 | template: 15 | src: docker-compose.yml.j2 16 | dest: /opt/metrics/docker-compose.yml 17 | mode: '0644' 18 | 19 | - name: Template grafana-admin-password 20 | template: 21 | src: grafana-admin-password.j2 22 | dest: /opt/metrics/grafana/admin-password 23 | mode: '0644' 24 | 25 | - name: Template prometheus.yml 26 | template: 27 | src: prometheus.yml.j2 28 | dest: /opt/metrics/prometheus.yml 29 | mode: '0644' 30 | 31 | - name: Create logstash directory 32 | file: 33 | path: /opt/metrics/logstash 34 | state: directory 35 | mode: '0755' 36 | 37 | - name: Template logstash.yml 38 | template: 39 | src: logstash/logstash.yml.j2 40 | dest: /opt/metrics/logstash/logstash.yml 41 | mode: '0644' 42 | 43 | - name: Create logstash/pipeline directory 44 | file: 45 | path: /opt/metrics/logstash/pipeline 46 | state: directory 47 | mode: '0755' 48 | 49 | - name: Template pipeline logstash.conf 50 | template: 51 | src: logstash/pipeline/logstash.conf.j2 52 | dest: /opt/metrics/logstash/pipeline/logstash.conf 53 | mode: '0644' 54 | 55 | - name: Template kibana.yml 56 | template: 57 | src: kibana.yml.j2 58 | dest: /opt/metrics/kibana.yml 59 | mode: '0644' 60 | 61 | - name: Stop existing services gracefully 62 | community.docker.docker_compose: 63 | project_src: /opt/metrics 64 | timeout: 10 65 | state: present 66 | stopped: yes 67 | 68 | - name: Tear down existing services 69 | community.docker.docker_compose: 70 | project_src: /opt/metrics 71 | state: absent 72 | remove_volumes: "{{ removeData }}" 73 | 74 | - name: Run services 75 | community.docker.docker_compose: 76 | project_src: /opt/metrics 77 | timeout: 180 78 | state: present 79 | pull: yes 80 | -------------------------------------------------------------------------------- /deploy/ansible/roles/metrics/templates/grafana-admin-password.j2: -------------------------------------------------------------------------------- 1 | #jinja2: trim_blocks:True, lstrip_blocks:True 2 | {{ grafanaAdminPassword }} 3 | -------------------------------------------------------------------------------- /deploy/ansible/roles/metrics/templates/kibana.yml.j2: -------------------------------------------------------------------------------- 1 | #jinja2: trim_blocks:True, lstrip_blocks:True 2 | ## Default Kibana configuration from Kibana base image. 3 | ## https://github.com/elastic/kibana/blob/master/src/dev/build/tasks/os_packages/docker_generator/templates/kibana_yml.template.js 4 | # 5 | server.name: kibana 6 | server.host: "0.0.0.0" 7 | elasticsearch.username: "{{ elkElasticUser }}" 8 | elasticsearch.password: "{{ elkElasticPassword }}" 9 | elasticsearch.hosts: [ "http://elasticsearch:9200" ] 10 | -------------------------------------------------------------------------------- /deploy/ansible/roles/metrics/templates/logstash/logstash.yml.j2: -------------------------------------------------------------------------------- 1 | #jinja2: trim_blocks:True, lstrip_blocks:True 2 | ## Default Logstash configuration from Logstash base image. 3 | ## https://github.com/elastic/logstash/blob/master/docker/data/logstash/config/logstash-full.yml 4 | # 5 | http.host: "0.0.0.0" 6 | xpack.monitoring.elasticsearch.hosts: ["http://elasticsearch:9200"] 7 | xpack.monitoring.elasticsearch.username: "{{ elkElasticUser }}" 8 | xpack.monitoring.elasticsearch.password: "{{ elkElasticPassword }}" 9 | -------------------------------------------------------------------------------- /deploy/ansible/roles/metrics/templates/prometheus.yml.j2: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 5s 3 | 4 | scrape_configs: 5 | - job_name: metrics 6 | static_configs: 7 | - targets: 8 | {% for host in groups['cores']%} 9 | - {{ host }}:9311 10 | {% endfor %} 11 | 12 | - job_name: node 13 | static_configs: 14 | - targets: 15 | {% for host in groups['cores'] + groups['metrics']%} 16 | - {{ host }}:9100 17 | {% endfor %} 18 | 19 | - job_name: cadvisor 20 | static_configs: 21 | - targets: 22 | {% for host in groups['cores'] + groups['metrics']%} 23 | - {{ host }}:9111 24 | {% endfor %} -------------------------------------------------------------------------------- /deploy/ansible/roles/wireguard/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install WireGuard 3 | apt: 4 | name: wireguard 5 | state: present 6 | 7 | - name: Ensure WireGuard configuration directory exists 8 | file: 9 | path: /etc/wireguard 10 | state: directory 11 | mode: '0700' 12 | 13 | - name: Deploy WireGuard server configuration 14 | template: 15 | src: wg0.conf.j2 16 | dest: /etc/wireguard/wg0.conf 17 | mode: '0600' 18 | 19 | - name: Enable and start WireGuard 20 | systemd: 21 | name: wg-quick@wg0 22 | enabled: yes 23 | state: restarted 24 | 25 | - name: Enable IPv4 forwarding 26 | sysctl: 27 | name: net.ipv4.ip_forward 28 | value: '1' 29 | state: present 30 | reload: yes 31 | 32 | - name: systemd-resolved to listen on wg0 for remote resolution of local network 33 | lineinfile: 34 | path: /etc/systemd/resolved.conf 35 | line: 'DNSStubListenerExtra=10.199.0.1' 36 | regexp: '^DNSStubListenerExtra=' 37 | state: present 38 | 39 | - name: restart resolved 40 | service: 41 | name: systemd-resolved 42 | state: restarted -------------------------------------------------------------------------------- /deploy/ansible/roles/wireguard/templates/wg0.conf.j2: -------------------------------------------------------------------------------- 1 | [Interface] 2 | Address = 10.199.0.1/32 3 | ListenPort = 51820 4 | PostUp = iptables -A FORWARD -i %i -j ACCEPT; iptables -t nat -A POSTROUTING -o ens10 -j MASQUERADE 5 | PostDown = iptables -D FORWARD -i %i -j ACCEPT; iptables -t nat -D POSTROUTING -o ens10 -j MASQUERADE 6 | PrivateKey = {{ wireguard_server_private_key }} 7 | 8 | [Peer] 9 | PublicKey = kOBZmG6CmdomFWamyNtDB25fWqrX3sHQy1C43ZCRnVE= 10 | AllowedIPs = 10.199.0.2/32 11 | -------------------------------------------------------------------------------- /deploy/ansible/run.sh: -------------------------------------------------------------------------------- 1 | eval "$NETWORK_ENVIRONMENT" 2 | 3 | export ANSIBLE_STRATEGY=free 4 | export ANSIBLE_PIPELINING=true 5 | export ANSIBLE_PERSISTENT_CONTROL_PATH_DIR="/tmp/" 6 | 7 | ARGS=("$@") 8 | ansible-playbook -u root -i deploy/ansible/hosts/"${1:-feature.yml}" \ 9 | --forks 20 --ssh-common-args "-o ControlMaster=auto -o ControlPersist=5m" \ 10 | --extra-vars \ 11 | "iota_core_docker_image_repo=$IOTA_CORE_DOCKER_IMAGE_REPO 12 | iota_core_docker_image_tag=$IOTA_CORE_DOCKER_IMAGE_TAG 13 | wireguard_server_private_key=$WIREGUARD_SERVER_PRIVKEY 14 | elkElasticUser=$ELASTIC_USER 15 | elkElasticPassword=$ELASTIC_PASSWORD 16 | grafanaAdminPassword=$GRAFANA_ADMIN_PASSWORD 17 | 18 | NODE_01_VALIDATOR_ACCOUNTADDRESS=$NODE_01_VALIDATOR_ACCOUNTADDRESS 19 | NODE_01_VALIDATOR_PRIVKEY=$NODE_01_VALIDATOR_PRIVKEY 20 | NODE_01_P2PIDENTITY_PRIVKEY=$NODE_01_P2PIDENTITY_PRIVKEY 21 | 22 | NODE_02_VALIDATOR_ACCOUNTADDRESS=$NODE_02_VALIDATOR_ACCOUNTADDRESS 23 | NODE_02_VALIDATOR_PRIVKEY=$NODE_02_VALIDATOR_PRIVKEY 24 | NODE_02_P2PIDENTITY_PRIVKEY=$NODE_02_P2PIDENTITY_PRIVKEY 25 | 26 | NODE_03_VALIDATOR_ACCOUNTADDRESS=$NODE_03_VALIDATOR_ACCOUNTADDRESS 27 | NODE_03_VALIDATOR_PRIVKEY=$NODE_03_VALIDATOR_PRIVKEY 28 | NODE_03_P2PIDENTITY_PRIVKEY=$NODE_03_P2PIDENTITY_PRIVKEY 29 | 30 | NODE_04_BLOCKISSUER_ACCOUNTADDRESS=$NODE_04_BLOCKISSUER_ACCOUNTADDRESS 31 | NODE_04_BLOCKISSUER_PRIVKEY=$NODE_04_BLOCKISSUER_PRIVKEY 32 | NODE_04_FAUCET_PRIVKEY=$NODE_04_FAUCET_PRIVKEY 33 | NODE_04_P2PIDENTITY_PRIVKEY=$NODE_04_P2PIDENTITY_PRIVKEY 34 | 35 | NODE_05_P2PIDENTITY_PRIVKEY=$NODE_05_P2PIDENTITY_PRIVKEY" \ 36 | ${ARGS[@]:2} deploy/ansible/"${2:-deploy.yml}" 37 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/iotaledger/iota-core/components/app" 5 | ) 6 | 7 | func main() { 8 | app.App().Run() 9 | } 10 | -------------------------------------------------------------------------------- /out.log: -------------------------------------------------------------------------------- 1 | ? github.com/iotaledger/iota-core [no test files] 2 | -------------------------------------------------------------------------------- /peering.json: -------------------------------------------------------------------------------- 1 | { 2 | "peers": [] 3 | } -------------------------------------------------------------------------------- /pkg/core/acceptance/state.go: -------------------------------------------------------------------------------- 1 | package acceptance 2 | 3 | import ( 4 | "strconv" 5 | ) 6 | 7 | const ( 8 | // Pending is the state of pending spenders. 9 | Pending State = iota 10 | 11 | // Accepted is the state of accepted spenders. 12 | Accepted 13 | 14 | // Rejected is the state of rejected spenders. 15 | Rejected 16 | ) 17 | 18 | // State represents the acceptance state of an entity. 19 | type State uint8 20 | 21 | // IsPending returns true if the State is Pending. 22 | func (c State) IsPending() bool { 23 | return c == Pending 24 | } 25 | 26 | // IsAccepted returns true if the State is Accepted. 27 | func (c State) IsAccepted() bool { 28 | return c == Accepted 29 | } 30 | 31 | // IsRejected returns true if the State is Rejected. 32 | func (c State) IsRejected() bool { 33 | return c == Rejected 34 | } 35 | 36 | // String returns a human-readable representation of the State. 37 | func (c State) String() string { 38 | switch c { 39 | case Pending: 40 | return "Pending" 41 | case Accepted: 42 | return "Accepted" 43 | case Rejected: 44 | return "Rejected" 45 | default: 46 | return "Unknown (" + strconv.Itoa(int(c)) + ")" 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /pkg/core/acceptance/threshold_provider.go: -------------------------------------------------------------------------------- 1 | package acceptance 2 | 3 | import ( 4 | "math" 5 | 6 | "github.com/iotaledger/hive.go/lo" 7 | ) 8 | 9 | const bftThreshold = 0.67 10 | 11 | func ThresholdProvider(totalWeightProvider func() int64) func() int64 { 12 | return func() int64 { 13 | // TODO: should we allow threshold go to 0? or should acceptance stop if no committee member is active? 14 | return lo.Max(int64(math.Ceil(float64(totalWeightProvider())*bftThreshold)), 1) 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /pkg/core/account/pool.go: -------------------------------------------------------------------------------- 1 | package account 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/ierrors" 5 | "github.com/iotaledger/hive.go/serializer/v2" 6 | "github.com/iotaledger/hive.go/serializer/v2/stream" 7 | iotago "github.com/iotaledger/iota.go/v4" 8 | ) 9 | 10 | const poolBytesLength = 3 * serializer.UInt64ByteSize 11 | 12 | // Pool represents all the data we need for a given validator and epoch to calculate its rewards data. 13 | type Pool struct { 14 | // Total stake of the pool, including delegators 15 | PoolStake iotago.BaseToken 16 | // Validator's stake 17 | ValidatorStake iotago.BaseToken 18 | FixedCost iotago.Mana 19 | } 20 | 21 | func PoolFromBytes(bytes []byte) (*Pool, int, error) { 22 | p := new(Pool) 23 | 24 | var err error 25 | byteReader := stream.NewByteReader(bytes) 26 | 27 | if p.PoolStake, err = stream.Read[iotago.BaseToken](byteReader); err != nil { 28 | return nil, 0, ierrors.Wrap(err, "failed to read PoolStake") 29 | } 30 | if p.ValidatorStake, err = stream.Read[iotago.BaseToken](byteReader); err != nil { 31 | return nil, 0, ierrors.Wrap(err, "failed to read ValidatorStake") 32 | } 33 | if p.FixedCost, err = stream.Read[iotago.Mana](byteReader); err != nil { 34 | return nil, 0, ierrors.Wrap(err, "failed to read FixedCost") 35 | } 36 | 37 | return p, byteReader.BytesRead(), nil 38 | } 39 | 40 | func (p *Pool) Bytes() ([]byte, error) { 41 | byteBuffer := stream.NewByteBuffer(poolBytesLength) 42 | 43 | if err := stream.Write(byteBuffer, p.PoolStake); err != nil { 44 | return nil, ierrors.Wrap(err, "failed to write PoolStake") 45 | } 46 | if err := stream.Write(byteBuffer, p.ValidatorStake); err != nil { 47 | return nil, ierrors.Wrap(err, "failed to write ValidatorStake") 48 | } 49 | if err := stream.Write(byteBuffer, p.FixedCost); err != nil { 50 | return nil, ierrors.Wrap(err, "failed to write FixedCost") 51 | } 52 | 53 | return byteBuffer.Bytes() 54 | } 55 | -------------------------------------------------------------------------------- /pkg/core/types/unique_id.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | // UniqueID is a unique identifier. 4 | type UniqueID uint64 5 | 6 | // Next returns the next unique identifier. 7 | func (u *UniqueID) Next() UniqueID { 8 | *u++ 9 | 10 | return *u 11 | } 12 | -------------------------------------------------------------------------------- /pkg/core/vote/mocked_rank.go: -------------------------------------------------------------------------------- 1 | package vote 2 | 3 | // MockedRank is a mocked rank implementation that is used for testing. 4 | type MockedRank int 5 | 6 | // Compare compares the MockedRank to another MockedRank. 7 | func (m MockedRank) Compare(other MockedRank) int { 8 | switch { 9 | case m < other: 10 | return -1 11 | case m > other: 12 | return 1 13 | default: 14 | return 0 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /pkg/core/vote/vote.go: -------------------------------------------------------------------------------- 1 | package vote 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/constraints" 5 | "github.com/iotaledger/iota-core/pkg/core/account" 6 | ) 7 | 8 | // Vote represents a vote that is cast by a voter. 9 | type Vote[Rank constraints.Comparable[Rank]] struct { 10 | // Voter is the identity of the voter. 11 | Voter account.SeatIndex 12 | 13 | // Rank is the rank of the voter. 14 | Rank Rank 15 | 16 | // liked is true if the vote is "positive" (voting "for something"). 17 | liked bool 18 | } 19 | 20 | // NewVote creates a new vote. 21 | func NewVote[Rank constraints.Comparable[Rank]](voter account.SeatIndex, rank Rank) *Vote[Rank] { 22 | return &Vote[Rank]{ 23 | Voter: voter, 24 | Rank: rank, 25 | liked: true, 26 | } 27 | } 28 | 29 | // IsLiked returns true if the vote is "positive" (voting "for something"). 30 | func (v *Vote[Rank]) IsLiked() bool { 31 | return v.liked 32 | } 33 | 34 | // WithLiked returns a copy of the vote with the given liked value. 35 | func (v *Vote[Rank]) WithLiked(liked bool) *Vote[Rank] { 36 | updatedVote := new(Vote[Rank]) 37 | updatedVote.Voter = v.Voter 38 | updatedVote.Rank = v.Rank 39 | updatedVote.liked = liked 40 | 41 | return updatedVote 42 | } 43 | -------------------------------------------------------------------------------- /pkg/core/weight/comparison.go: -------------------------------------------------------------------------------- 1 | package weight 2 | 3 | // Comparison is the result of a comparison between two values. 4 | type Comparison = int 5 | 6 | const ( 7 | // Lighter is the result of a comparison between two values when the first value is lighter than the second value. 8 | Lighter Comparison = -1 9 | 10 | // Equal is the result of a comparison between two values when the first value is equal to the second value. 11 | Equal Comparison = 0 12 | 13 | // Heavier is the result of a comparison between two values when the first value is heavier than the second value. 14 | Heavier Comparison = 1 15 | ) 16 | -------------------------------------------------------------------------------- /pkg/daemon/shutdown.go: -------------------------------------------------------------------------------- 1 | package daemon 2 | 3 | // Please add the dependencies if you add your own priority here. 4 | // Otherwise investigating deadlocks at shutdown is much more complicated. 5 | 6 | const ( 7 | PriorityCloseDatabase = iota // no dependencies 8 | PriorityP2P 9 | PriorityProtocol 10 | PriorityRestAPI 11 | PriorityINX 12 | PriorityDashboardMetrics 13 | PriorityDashboard 14 | PriorityMetrics 15 | ) 16 | -------------------------------------------------------------------------------- /pkg/libp2putil/io.go: -------------------------------------------------------------------------------- 1 | package libp2putil 2 | 3 | import ( 4 | "bufio" 5 | "io" 6 | 7 | "github.com/multiformats/go-varint" 8 | "google.golang.org/protobuf/proto" 9 | ) 10 | 11 | // UvarintWriter writes protobuf blocks. 12 | type UvarintWriter struct { 13 | w io.Writer 14 | } 15 | 16 | // NewDelimitedWriter returns a new UvarintWriter. 17 | func NewDelimitedWriter(w io.Writer) *UvarintWriter { 18 | return &UvarintWriter{w} 19 | } 20 | 21 | // WriteBlk writes protobuf block. 22 | func (uw *UvarintWriter) WriteBlk(blk proto.Message) (err error) { 23 | var data []byte 24 | lenBuf := make([]byte, varint.MaxLenUvarint63) 25 | 26 | data, err = proto.Marshal(blk) 27 | if err != nil { 28 | return err 29 | } 30 | 31 | length := uint64(len(data)) 32 | n := varint.PutUvarint(lenBuf, length) 33 | 34 | _, err = uw.w.Write(lenBuf[:n]) 35 | if err != nil { 36 | return err 37 | } 38 | 39 | _, err = uw.w.Write(data) 40 | 41 | return err 42 | } 43 | 44 | // UvarintReader read protobuf blocks. 45 | type UvarintReader struct { 46 | r *bufio.Reader 47 | } 48 | 49 | // NewDelimitedReader returns a new UvarintReader. 50 | func NewDelimitedReader(r io.Reader) *UvarintReader { 51 | return &UvarintReader{r: bufio.NewReader(r)} 52 | } 53 | 54 | // ReadBlk read protobuf blocks. 55 | func (ur *UvarintReader) ReadBlk(blk proto.Message) error { 56 | length64, err := varint.ReadUvarint(ur.r) 57 | if err != nil { 58 | return err 59 | } 60 | 61 | buf := make([]byte, length64) 62 | if _, err := io.ReadFull(ur.r, buf); err != nil { 63 | return err 64 | } 65 | 66 | return proto.Unmarshal(buf, blk) 67 | } 68 | -------------------------------------------------------------------------------- /pkg/metrics/database_metrics.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "go.uber.org/atomic" 5 | ) 6 | 7 | // DatabaseMetrics defines database metrics over the entire runtime of the node. 8 | type DatabaseMetrics struct { 9 | // The total number of compactions. 10 | CompactionCount atomic.Uint32 11 | // Whether compaction is running or not. 12 | CompactionRunning atomic.Bool 13 | } 14 | -------------------------------------------------------------------------------- /pkg/metrics/server_metrics.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import "go.uber.org/atomic" 4 | 5 | // ServerMetrics defines metrics over the entire runtime of the node. 6 | type ServerMetrics struct { 7 | // The number of blocks that have passed the filters. 8 | Blocks atomic.Uint64 9 | // The number of confirmed blocks. 10 | ConfirmedBlocks atomic.Uint64 11 | } 12 | -------------------------------------------------------------------------------- /pkg/model/eviction_index.go: -------------------------------------------------------------------------------- 1 | package model 2 | 3 | import ( 4 | iotago "github.com/iotaledger/iota.go/v4" 5 | ) 6 | 7 | type EvictionIndex[K iotago.SlotIndex | iotago.EpochIndex] struct { 8 | index *K 9 | } 10 | 11 | func NewEvictionIndex[K iotago.SlotIndex | iotago.EpochIndex]() *EvictionIndex[K] { 12 | return &EvictionIndex[K]{} 13 | } 14 | 15 | func (e *EvictionIndex[K]) ShouldEvict(newIndex K) bool { 16 | if e.index == nil { 17 | return true 18 | } 19 | 20 | return newIndex > *e.index 21 | } 22 | 23 | func (e *EvictionIndex[K]) MarkEvicted(index K) (previous K, hadPrevious bool) { 24 | var prev K 25 | var hadPrev bool 26 | 27 | if e.index == nil { 28 | e.index = new(K) 29 | hadPrev = false 30 | } else { 31 | prev = *e.index 32 | hadPrev = true 33 | } 34 | 35 | *e.index = index 36 | 37 | return prev, hadPrev 38 | } 39 | 40 | func (e *EvictionIndex[K]) Index() (current K, valid bool) { 41 | if e.index == nil { 42 | return 0, false 43 | } 44 | 45 | return *e.index, true 46 | } 47 | 48 | func (e *EvictionIndex[K]) NextIndex() K { 49 | if e.index == nil { 50 | return 0 51 | } 52 | 53 | return *e.index + 1 54 | } 55 | 56 | func (e *EvictionIndex[K]) IsEvicted(index K) bool { 57 | if e.index == nil { 58 | return false 59 | } 60 | 61 | return index <= *e.index 62 | } 63 | -------------------------------------------------------------------------------- /pkg/model/parents.go: -------------------------------------------------------------------------------- 1 | package model 2 | 3 | import ( 4 | iotago "github.com/iotaledger/iota.go/v4" 5 | ) 6 | 7 | // ParentReferences is a map between parent type and block IDs. 8 | type ParentReferences map[iotago.ParentsType]iotago.BlockIDs 9 | -------------------------------------------------------------------------------- /pkg/model/pruning_index.go: -------------------------------------------------------------------------------- 1 | package model 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/ierrors" 5 | "github.com/iotaledger/hive.go/kvstore" 6 | "github.com/iotaledger/hive.go/runtime/syncutils" 7 | iotago "github.com/iotaledger/iota.go/v4" 8 | ) 9 | 10 | type PruningIndex struct { 11 | kv kvstore.KVStore 12 | key kvstore.Realm 13 | 14 | memLastPrunedEpoch *EvictionIndex[iotago.EpochIndex] 15 | lastPrunedMutex syncutils.RWMutex 16 | } 17 | 18 | func NewPruningIndex(kv kvstore.KVStore, key kvstore.Realm) *PruningIndex { 19 | return &PruningIndex{ 20 | kv: kv, 21 | key: key, 22 | memLastPrunedEpoch: NewEvictionIndex[iotago.EpochIndex](), 23 | } 24 | } 25 | 26 | func (e *PruningIndex) MarkEvicted(epoch iotago.EpochIndex) error { 27 | e.lastPrunedMutex.Lock() 28 | defer e.lastPrunedMutex.Unlock() 29 | 30 | e.memLastPrunedEpoch.MarkEvicted(epoch) 31 | 32 | return e.kv.Set(e.key, epoch.MustBytes()) 33 | } 34 | 35 | func (e *PruningIndex) Index() (currentEpoch iotago.EpochIndex, valid bool) { 36 | e.lastPrunedMutex.RLock() 37 | defer e.lastPrunedMutex.RUnlock() 38 | 39 | return e.memLastPrunedEpoch.Index() 40 | } 41 | 42 | func (e *PruningIndex) NextIndex() iotago.EpochIndex { 43 | e.lastPrunedMutex.RLock() 44 | defer e.lastPrunedMutex.RUnlock() 45 | 46 | return e.memLastPrunedEpoch.NextIndex() 47 | } 48 | 49 | func (e *PruningIndex) RestoreFromDisk() error { 50 | e.lastPrunedMutex.Lock() 51 | defer e.lastPrunedMutex.Unlock() 52 | 53 | lastPrunedBytes, err := e.kv.Get(e.key) 54 | if err != nil { 55 | if ierrors.Is(err, kvstore.ErrKeyNotFound) { 56 | return nil 57 | } 58 | 59 | return err 60 | } 61 | 62 | lastPrunedEpoch, _, err := iotago.EpochIndexFromBytes(lastPrunedBytes) 63 | if err != nil { 64 | return err 65 | } 66 | 67 | e.memLastPrunedEpoch.MarkEvicted(lastPrunedEpoch) 68 | 69 | return nil 70 | } 71 | -------------------------------------------------------------------------------- /pkg/model/version_and_hash.go: -------------------------------------------------------------------------------- 1 | package model 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/ierrors" 5 | "github.com/iotaledger/hive.go/lo" 6 | "github.com/iotaledger/hive.go/serializer/v2/byteutils" 7 | "github.com/iotaledger/hive.go/stringify" 8 | iotago "github.com/iotaledger/iota.go/v4" 9 | ) 10 | 11 | const VersionAndHashSize = iotago.VersionLength + iotago.IdentifierLength 12 | 13 | type VersionAndHash struct { 14 | Version iotago.Version `serix:""` 15 | Hash iotago.Identifier `serix:""` 16 | } 17 | 18 | func (v VersionAndHash) Bytes() ([]byte, error) { 19 | // iotago.Version and iotago.Identifier can't panic on .Bytes() call. 20 | return byteutils.ConcatBytes(lo.PanicOnErr(v.Version.Bytes()), lo.PanicOnErr(v.Hash.Bytes())), nil 21 | } 22 | 23 | func VersionAndHashFromBytes(bytes []byte) (VersionAndHash, int, error) { 24 | version, versionBytesConsumed, err := iotago.VersionFromBytes(bytes) 25 | if err != nil { 26 | return VersionAndHash{}, 0, ierrors.Wrap(err, "failed to parse version") 27 | } 28 | 29 | hash, hashBytesConsumed, err := iotago.IdentifierFromBytes(bytes[versionBytesConsumed:]) 30 | if err != nil { 31 | return VersionAndHash{}, 0, ierrors.Wrap(err, "failed to parse hash") 32 | } 33 | 34 | return VersionAndHash{version, hash}, versionBytesConsumed + hashBytesConsumed, nil 35 | } 36 | 37 | func (v VersionAndHash) String() string { 38 | return stringify.Struct("VersionAndHash", 39 | stringify.NewStructField("Version", byte(v.Version)), 40 | stringify.NewStructField("Hash", v.Hash), 41 | ) 42 | } 43 | -------------------------------------------------------------------------------- /pkg/network/endpoint.go: -------------------------------------------------------------------------------- 1 | package network 2 | 3 | import ( 4 | "github.com/libp2p/go-libp2p/core/peer" 5 | "google.golang.org/protobuf/proto" 6 | ) 7 | 8 | const ( 9 | CoreProtocolID = "iota-core/1.0.0" 10 | ) 11 | 12 | type Endpoint interface { 13 | LocalPeerID() peer.ID 14 | RegisterProtocol(factory func() proto.Message, handler func(peer.ID, proto.Message) error) 15 | UnregisterProtocol() 16 | Send(packet proto.Message, to ...peer.ID) 17 | Shutdown() 18 | } 19 | -------------------------------------------------------------------------------- /pkg/network/errors.go: -------------------------------------------------------------------------------- 1 | package network 2 | 3 | import "github.com/iotaledger/hive.go/ierrors" 4 | 5 | var ( 6 | // ErrNotRunning is returned when a peer is added to a stopped or not yet started network manager. 7 | ErrNotRunning = ierrors.New("manager not running") 8 | // ErrUnknownPeer is returned when the specified peer is not known to the network manager. 9 | ErrUnknownPeer = ierrors.New("unknown peer") 10 | // ErrLoopbackPeer is returned when the own peer is added. 11 | ErrLoopbackPeer = ierrors.New("loopback connection not allowed") 12 | // ErrDuplicatePeer is returned when the same peer is added more than once. 13 | ErrDuplicatePeer = ierrors.New("already connected") 14 | // ErrMaxAutopeeringPeersReached is returned when the maximum number of autopeering peers is reached. 15 | ErrMaxAutopeeringPeersReached = ierrors.New("max autopeering peers reached") 16 | ) 17 | -------------------------------------------------------------------------------- /pkg/network/neighbor.go: -------------------------------------------------------------------------------- 1 | package network 2 | 3 | // Neighbor is a Peer with an established connection in the gossip layer. 4 | type Neighbor interface { 5 | Peer() *Peer 6 | PacketsRead() uint64 7 | PacketsWritten() uint64 8 | } 9 | -------------------------------------------------------------------------------- /pkg/network/p2p/metrics.go: -------------------------------------------------------------------------------- 1 | package p2p 2 | 3 | import ( 4 | "sync/atomic" 5 | ) 6 | 7 | // Metrics defines P2P metrics over the entire runtime of the node. 8 | type Metrics struct { 9 | // The number of total received blocks. 10 | IncomingBlocks atomic.Uint32 11 | // The number of received blocks which are new. 12 | IncomingNewBlocks atomic.Uint32 13 | // The number of sent blocks. 14 | OutgoingBlocks atomic.Uint32 15 | } 16 | -------------------------------------------------------------------------------- /pkg/network/p2p/peerconfig.go: -------------------------------------------------------------------------------- 1 | package p2p 2 | 3 | import ( 4 | "github.com/libp2p/go-libp2p/core/peer" 5 | "github.com/multiformats/go-multiaddr" 6 | 7 | "github.com/iotaledger/hive.go/ds/onchangemap" 8 | "github.com/iotaledger/hive.go/lo" 9 | ) 10 | 11 | // ComparablePeerID implements the constraints.ComparableStringer interface for the onChangeMap. 12 | type ComparablePeerID struct { 13 | peerIDBase58 string 14 | } 15 | 16 | func NewComparablePeerID(peerID peer.ID) *ComparablePeerID { 17 | return &ComparablePeerID{ 18 | peerIDBase58: peerID.String(), 19 | } 20 | } 21 | 22 | func (c *ComparablePeerID) Key() string { 23 | return c.peerIDBase58 24 | } 25 | 26 | func (c *ComparablePeerID) String() string { 27 | return c.peerIDBase58 28 | } 29 | 30 | // PeerConfig holds the initial information about peers. 31 | type PeerConfig struct { 32 | MultiAddress string `json:"multiAddress" koanf:"multiAddress"` 33 | Alias string `json:"alias" koanf:"alias"` 34 | } 35 | 36 | // PeerConfigItem implements the Item interface for the onChangeMap. 37 | type PeerConfigItem struct { 38 | *PeerConfig 39 | comparablePeerID *ComparablePeerID 40 | } 41 | 42 | func NewPeerConfigItem(peerConfig *PeerConfig) (*PeerConfigItem, error) { 43 | multiAddress, err := multiaddr.NewMultiaddr(peerConfig.MultiAddress) 44 | if err != nil { 45 | return nil, err 46 | } 47 | 48 | newPeerAddrInfo, err := peer.AddrInfoFromP2pAddr(multiAddress) 49 | if err != nil { 50 | return nil, err 51 | } 52 | 53 | return &PeerConfigItem{ 54 | PeerConfig: &PeerConfig{ 55 | MultiAddress: peerConfig.MultiAddress, 56 | Alias: peerConfig.Alias, 57 | }, 58 | comparablePeerID: NewComparablePeerID(newPeerAddrInfo.ID), 59 | }, nil 60 | } 61 | 62 | func (p *PeerConfigItem) ID() *ComparablePeerID { 63 | return p.comparablePeerID 64 | } 65 | 66 | func (p *PeerConfigItem) Clone() onchangemap.Item[string, *ComparablePeerID] { 67 | return lo.PanicOnErr(NewPeerConfigItem(p.PeerConfig)) 68 | } 69 | -------------------------------------------------------------------------------- /pkg/network/p2p/proto/negotiation.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | option go_package = "github.com/iotaledger/iota-core/pkg/network/p2p/proto"; 4 | 5 | package p2p; 6 | 7 | message Negotiation {} -------------------------------------------------------------------------------- /pkg/network/protocols/core/models/message.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | option go_package = "github.com/iotaledger/iota-core/pkg/network/protocols/core/models"; 4 | 5 | package models; 6 | 7 | message Packet { 8 | oneof body { 9 | Block block = 1; 10 | BlockRequest block_request = 2; 11 | SlotCommitment slot_commitment = 3; 12 | SlotCommitmentRequest slot_commitment_request = 4; 13 | Attestations attestations = 5; 14 | AttestationsRequest attestations_request = 6; 15 | WarpSyncRequest warp_sync_request = 7; 16 | WarpSyncResponse warp_sync_response = 8; 17 | } 18 | } 19 | 20 | message Block { 21 | bytes bytes = 1; 22 | } 23 | 24 | message BlockRequest { 25 | bytes block_id = 1; 26 | } 27 | 28 | message SlotCommitment { 29 | bytes bytes = 1; 30 | } 31 | 32 | message SlotCommitmentRequest { 33 | bytes commitment_id = 1; 34 | } 35 | 36 | message Attestations { 37 | bytes commitment = 1; 38 | bytes attestations = 2; 39 | bytes merkle_proof = 3; 40 | } 41 | 42 | message AttestationsRequest { 43 | bytes commitment_id = 1; 44 | } 45 | 46 | message WarpSyncRequest { 47 | bytes commitment_id = 1; 48 | } 49 | 50 | message WarpSyncResponse { 51 | bytes commitment_id = 1; 52 | bytes payload = 2; 53 | } 54 | -------------------------------------------------------------------------------- /pkg/protocol/engine/accounts/mana.go: -------------------------------------------------------------------------------- 1 | package accounts 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/runtime/syncutils" 5 | "github.com/iotaledger/hive.go/stringify" 6 | iotago "github.com/iotaledger/iota.go/v4" 7 | ) 8 | 9 | // Mana is the stored and potential mana value of an account collected on the UTXO layer - used by the Scheduler. 10 | type Mana struct { 11 | value iotago.Mana `serix:""` 12 | excessBaseTokens iotago.BaseToken `serix:""` 13 | updateTime iotago.SlotIndex `serix:""` 14 | 15 | mutex syncutils.RWMutex 16 | } 17 | 18 | func NewMana(value iotago.Mana, excessBaseTokens iotago.BaseToken, updateTime iotago.SlotIndex) *Mana { 19 | return &Mana{ 20 | value: value, 21 | excessBaseTokens: excessBaseTokens, 22 | updateTime: updateTime, 23 | } 24 | } 25 | 26 | func (m *Mana) Value() iotago.Mana { 27 | m.mutex.RLock() 28 | defer m.mutex.RUnlock() 29 | 30 | return m.value 31 | } 32 | 33 | func (m *Mana) ExcessBaseTokens() iotago.BaseToken { 34 | m.mutex.RLock() 35 | defer m.mutex.RUnlock() 36 | 37 | return m.excessBaseTokens 38 | } 39 | 40 | func (m *Mana) UpdateTime() iotago.SlotIndex { 41 | m.mutex.RLock() 42 | defer m.mutex.RUnlock() 43 | 44 | return m.updateTime 45 | } 46 | 47 | func (m *Mana) String() string { 48 | m.mutex.RLock() 49 | defer m.mutex.RUnlock() 50 | 51 | return stringify.Struct("Mana", 52 | stringify.NewStructField("Value", uint64(m.value)), 53 | stringify.NewStructField("ExcessBaseTokens", uint64(m.excessBaseTokens)), 54 | stringify.NewStructField("UpdateTime", uint32(m.updateTime)), 55 | ) 56 | } 57 | -------------------------------------------------------------------------------- /pkg/protocol/engine/attestation/attestations.go: -------------------------------------------------------------------------------- 1 | package attestation 2 | 3 | import ( 4 | "io" 5 | 6 | "github.com/iotaledger/hive.go/ads" 7 | "github.com/iotaledger/hive.go/runtime/module" 8 | "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" 9 | iotago "github.com/iotaledger/iota.go/v4" 10 | ) 11 | 12 | type Attestations interface { 13 | // Get returns the attestations that are included in the commitment of the given slot as list. 14 | // If attestationCommitmentOffset=3 and commitment is 10, then the returned attestations are blocks from 7 to 10 that commit to at least 7. 15 | Get(index iotago.SlotIndex) (attestations []*iotago.Attestation, err error) 16 | 17 | // GetMap returns the attestations that are included in the commitment of the given slot as ads.Map. 18 | // If attestationCommitmentOffset=3 and commitment is 10, then the returned attestations are blocks from 7 to 10 that commit to at least 7. 19 | GetMap(index iotago.SlotIndex) (attestations ads.Map[iotago.Identifier, iotago.AccountID, *iotago.Attestation], err error) 20 | AddAttestationFromValidationBlock(block *blocks.Block) error 21 | Commit(index iotago.SlotIndex) (newCW uint64, attestationsRoot iotago.Identifier, err error) 22 | 23 | Import(reader io.ReadSeeker) (err error) 24 | Export(writer io.WriteSeeker, targetSlot iotago.SlotIndex) (err error) 25 | Rollback(index iotago.SlotIndex) (err error) 26 | 27 | // Reset resets the component to a clean state as if it was created at the last commitment. 28 | Reset() 29 | 30 | RestoreFromDisk() (err error) 31 | 32 | module.Module 33 | } 34 | -------------------------------------------------------------------------------- /pkg/protocol/engine/attestation/slotattestation/manager_test.go: -------------------------------------------------------------------------------- 1 | package slotattestation_test 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestManager(t *testing.T) { 8 | tf := NewTestFramework(t) 9 | 10 | tf.AssertCommit(0, 0, map[string]string{}, true) 11 | 12 | // Slot 1 13 | { 14 | tf.AddFutureAttestation("A", "A.1-0", 1, 0) 15 | tf.AddFutureAttestation("B", "B.1-0", 1, 0) 16 | tf.AddFutureAttestation("B", "B.1.2-0", 1, 0) 17 | tf.AddFutureAttestation("C", "C.1-0", 1, 0) 18 | 19 | tf.AssertCommit(1, 0, map[string]string{}, true) 20 | } 21 | 22 | // Slot 2 23 | { 24 | tf.AddFutureAttestation("A", "A.2-0", 2, 0) 25 | // This should not have any effect. 26 | tf.AddFutureAttestation("C", "C.3-2", 3, 2) 27 | 28 | tf.AssertCommit(2, 3, map[string]string{ 29 | "A": "A.2-0", 30 | "B": "B.1.2-0", 31 | "C": "C.1-0", 32 | }) 33 | } 34 | 35 | // Slot 3 36 | { 37 | tf.AddFutureAttestation("A", "A.3-0", 3, 0) 38 | tf.AddFutureAttestation("B", "B.3-1", 3, 1) 39 | 40 | tf.AssertCommit(3, 5, map[string]string{ 41 | "B": "B.3-1", 42 | "C": "C.3-2", 43 | }) 44 | } 45 | 46 | // Slot 4 47 | { 48 | tf.AssertCommit(4, 6, map[string]string{ 49 | "C": "C.3-2", 50 | }) 51 | } 52 | 53 | // Slot 5 54 | { 55 | tf.AssertCommit(5, 6, map[string]string{}) 56 | } 57 | 58 | // Slot 6 59 | { 60 | tf.AddFutureAttestation("A", "A.6-5", 6, 5) 61 | tf.AddFutureAttestation("A", "A.6-4", 6, 4) 62 | 63 | tf.AddFutureAttestation("B", "B.6-4", 6, 4) 64 | 65 | tf.AddFutureAttestation("C", "C.6-3", 6, 3) 66 | 67 | tf.AssertCommit(6, 8, map[string]string{ 68 | "A": "A.6-5", 69 | "B": "B.6-4", 70 | }) 71 | } 72 | 73 | // Slot 7 74 | { 75 | tf.AssertCommit(7, 9, map[string]string{ 76 | "A": "A.6-5", 77 | }) 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /pkg/protocol/engine/blockdag/blockdag.go: -------------------------------------------------------------------------------- 1 | package blockdag 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/runtime/module" 5 | "github.com/iotaledger/iota-core/pkg/model" 6 | "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" 7 | iotago "github.com/iotaledger/iota.go/v4" 8 | ) 9 | 10 | type BlockDAG interface { 11 | // Append is used to append new Blocks to the BlockDAG. It is the main function of the BlockDAG that triggers Events. 12 | Append(modelBlock *model.Block) (block *blocks.Block, wasAppended bool, err error) 13 | 14 | // GetOrRequestBlock returns the Block with the given BlockID from the BlockDAG (and requests it from the network if 15 | // it is missing). If the requested Block is below the eviction threshold, then this method will return a nil block 16 | // without requesting it. 17 | GetOrRequestBlock(blockID iotago.BlockID) (block *blocks.Block, requested bool) 18 | 19 | // Reset resets the component to a clean state as if it was created at the last commitment. 20 | Reset() 21 | 22 | module.Module 23 | } 24 | -------------------------------------------------------------------------------- /pkg/protocol/engine/blockdag/events.go: -------------------------------------------------------------------------------- 1 | package blockdag 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/runtime/event" 5 | "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" 6 | iotago "github.com/iotaledger/iota.go/v4" 7 | ) 8 | 9 | // Events is a collection of Tangle related Events. 10 | type Events struct { 11 | // BlockAppended is triggered when a previously unknown Block is appended to the block DAG. 12 | BlockAppended *event.Event1[*blocks.Block] 13 | 14 | // BlockSolid is triggered when a Block becomes solid (its entire past cone is known and solid). 15 | BlockSolid *event.Event1[*blocks.Block] 16 | 17 | // BlockMissing is triggered when a referenced Block was not appended, yet. 18 | BlockMissing *event.Event1[*blocks.Block] 19 | 20 | // MissingBlockAppended is triggered when a previously missing Block was appended. 21 | MissingBlockAppended *event.Event1[*blocks.Block] 22 | 23 | // BlockNotAppended is triggered when an incoming Block could not be successfully appended. 24 | BlockNotAppended *event.Event1[iotago.BlockID] 25 | 26 | // BlockInvalid is triggered when a Block is found to be invalid. 27 | BlockInvalid *event.Event2[*blocks.Block, error] 28 | 29 | event.Group[Events, *Events] 30 | } 31 | 32 | // NewEvents contains the constructor of the Events object (it is generated by a generic factory). 33 | var NewEvents = event.CreateGroupConstructor(func() (newEvents *Events) { 34 | return &Events{ 35 | BlockAppended: event.New1[*blocks.Block](), 36 | BlockSolid: event.New1[*blocks.Block](), 37 | BlockMissing: event.New1[*blocks.Block](), 38 | MissingBlockAppended: event.New1[*blocks.Block](), 39 | BlockNotAppended: event.New1[iotago.BlockID](), 40 | BlockInvalid: event.New2[*blocks.Block, error](), 41 | } 42 | }) 43 | -------------------------------------------------------------------------------- /pkg/protocol/engine/booker/booker.go: -------------------------------------------------------------------------------- 1 | package booker 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/runtime/module" 5 | ) 6 | 7 | type Booker interface { 8 | // Reset resets the component to a clean state as if it was created at the last commitment. 9 | Reset() 10 | 11 | module.Module 12 | } 13 | -------------------------------------------------------------------------------- /pkg/protocol/engine/booker/events.go: -------------------------------------------------------------------------------- 1 | package booker 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/runtime/event" 5 | "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" 6 | "github.com/iotaledger/iota-core/pkg/protocol/engine/mempool" 7 | ) 8 | 9 | type Events struct { 10 | BlockBooked *event.Event1[*blocks.Block] 11 | BlockInvalid *event.Event2[*blocks.Block, error] 12 | TransactionAccepted *event.Event1[mempool.TransactionMetadata] 13 | TransactionInvalid *event.Event2[mempool.TransactionMetadata, error] 14 | 15 | event.Group[Events, *Events] 16 | } 17 | 18 | // NewEvents contains the constructor of the Events object (it is generated by a generic factory). 19 | var NewEvents = event.CreateGroupConstructor(func() (newEvents *Events) { 20 | return &Events{ 21 | BlockBooked: event.New1[*blocks.Block](), 22 | BlockInvalid: event.New2[*blocks.Block, error](), 23 | TransactionAccepted: event.New1[mempool.TransactionMetadata](), 24 | TransactionInvalid: event.New2[mempool.TransactionMetadata, error](), 25 | } 26 | }) 27 | -------------------------------------------------------------------------------- /pkg/protocol/engine/clock/clock.go: -------------------------------------------------------------------------------- 1 | package clock 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/iotaledger/hive.go/runtime/module" 7 | ) 8 | 9 | // Clock is an engine module that provides different notions of time according to the different levels of finality. 10 | type Clock interface { 11 | // Accepted returns a notion of time that is anchored to the latest accepted block. 12 | Accepted() RelativeTime 13 | 14 | Confirmed() RelativeTime 15 | 16 | // Snapshot returns a snapshot of all time values tracked in the clock read atomically. 17 | Snapshot() *Snapshot 18 | 19 | // Reset resets the time values tracked in the clock to the given time. 20 | Reset(newTime time.Time) 21 | 22 | // Module embeds the required methods of the modular framework. 23 | module.Module 24 | } 25 | 26 | // Snapshot contains the snapshot of all time values tracked in the clock. 27 | type Snapshot struct { 28 | AcceptedTime time.Time 29 | RelativeAcceptedTime time.Time 30 | ConfirmedTime time.Time 31 | RelativeConfirmedTime time.Time 32 | } 33 | -------------------------------------------------------------------------------- /pkg/protocol/engine/clock/events.go: -------------------------------------------------------------------------------- 1 | package clock 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/iotaledger/hive.go/runtime/event" 7 | ) 8 | 9 | // Events contains a dictionary of events that are triggered by the Clock. 10 | type Events struct { 11 | // AcceptedTimeUpdated is triggered when the accepted time is updated. 12 | AcceptedTimeUpdated *event.Event1[time.Time] 13 | // ConfirmedTimeUpdated is triggered when the confirmed time is updated. 14 | ConfirmedTimeUpdated *event.Event1[time.Time] 15 | 16 | // Group is trait that makes the dictionary linkable. 17 | event.Group[Events, *Events] 18 | } 19 | 20 | // NewEvents is the constructor of the Events object. 21 | var NewEvents = event.CreateGroupConstructor(func() (newEvents *Events) { 22 | return &Events{ 23 | AcceptedTimeUpdated: event.New1[time.Time](), 24 | ConfirmedTimeUpdated: event.New1[time.Time](), 25 | } 26 | }) 27 | -------------------------------------------------------------------------------- /pkg/protocol/engine/clock/relativetime.go: -------------------------------------------------------------------------------- 1 | package clock 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | // RelativeTime is a time value that monotonically advances with the system clock. 8 | type RelativeTime interface { 9 | // Time returns the original time value. 10 | Time() time.Time 11 | 12 | // RelativeTime returns the time value after it has advanced with the system clock. 13 | RelativeTime() time.Time 14 | } 15 | -------------------------------------------------------------------------------- /pkg/protocol/engine/congestioncontrol/scheduler/events.go: -------------------------------------------------------------------------------- 1 | package scheduler 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/runtime/event" 5 | "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" 6 | ) 7 | 8 | type Events struct { 9 | // BlockEnqueued is triggered when a block is added to the queue. 10 | BlockEnqueued *event.Event1[*blocks.Block] 11 | // BlockScheduled is triggered when a block is scheduled. 12 | BlockScheduled *event.Event1[*blocks.Block] 13 | // BlockSkipped is triggered when a block in the buffer is accepted. 14 | // Skipping a block has the same effect as scheduling it, i.e., it is passed to tip manager and gossiped. 15 | BlockSkipped *event.Event1[*blocks.Block] 16 | // BlockDropped is triggered when a block in the buffer is dropped. Dropped blocks are not passed to tip manager and not gossiped. 17 | BlockDropped *event.Event2[*blocks.Block, error] 18 | 19 | event.Group[Events, *Events] 20 | } 21 | 22 | // NewEvents contains the constructor of the Events object (it is generated by a generic factory). 23 | var NewEvents = event.CreateGroupConstructor(func() (newEvents *Events) { 24 | return &Events{ 25 | BlockEnqueued: event.New1[*blocks.Block](), 26 | BlockScheduled: event.New1[*blocks.Block](), 27 | BlockSkipped: event.New1[*blocks.Block](), 28 | BlockDropped: event.New2[*blocks.Block, error](), 29 | } 30 | }) 31 | -------------------------------------------------------------------------------- /pkg/protocol/engine/congestioncontrol/scheduler/scheduler.go: -------------------------------------------------------------------------------- 1 | package scheduler 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/runtime/module" 5 | "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" 6 | iotago "github.com/iotaledger/iota.go/v4" 7 | ) 8 | 9 | type Scheduler interface { 10 | // AddBlock adds a block to the scheduling buffer. 11 | AddBlock(block *blocks.Block) 12 | // IsBlockIssuerReady returns true if the block issuer is ready to issuer a block, i.e., if the block issuer were to add a block to the scheduler, would it be scheduled. 13 | IsBlockIssuerReady(accountID iotago.AccountID, workScores ...iotago.WorkScore) bool 14 | // BasicBufferSize returns the current buffer size of the Scheduler as block count. 15 | BasicBufferSize() int 16 | // ValidatorBufferSize returns the current buffer size of the Scheduler as block count. 17 | ValidatorBufferSize() int 18 | // ReadyBlocksCount returns the number of ready blocks. 19 | ReadyBlocksCount() int 20 | // IssuerQueueBlockCount returns the queue size of the given issuer as block count. 21 | IssuerQueueBlockCount(issuerID iotago.AccountID) int 22 | // IssuerQueueWork returns the queue size of the given issuer in work units. 23 | IssuerQueueWork(issuerID iotago.AccountID) iotago.WorkScore 24 | // ValidatorQueueBlockCount returns the queue size of the given validator as block count. 25 | ValidatorQueueBlockCount(validatorID iotago.AccountID) int 26 | // Reset resets the component to a clean state as if it was created at the last commitment. 27 | Reset() 28 | 29 | module.Module 30 | } 31 | -------------------------------------------------------------------------------- /pkg/protocol/engine/consensus/blockgadget/events.go: -------------------------------------------------------------------------------- 1 | package blockgadget 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/runtime/event" 5 | "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" 6 | ) 7 | 8 | type Events struct { 9 | BlockPreAccepted *event.Event1[*blocks.Block] 10 | BlockPreConfirmed *event.Event1[*blocks.Block] 11 | BlockAccepted *event.Event1[*blocks.Block] 12 | BlockConfirmed *event.Event1[*blocks.Block] 13 | 14 | event.Group[Events, *Events] 15 | } 16 | 17 | // NewEvents contains the constructor of the Events object (it is generated by a generic factory). 18 | var NewEvents = event.CreateGroupConstructor(func() (newEvents *Events) { 19 | return &Events{ 20 | BlockPreAccepted: event.New1[*blocks.Block](), 21 | BlockPreConfirmed: event.New1[*blocks.Block](), 22 | BlockAccepted: event.New1[*blocks.Block](), 23 | BlockConfirmed: event.New1[*blocks.Block](), 24 | } 25 | }) 26 | -------------------------------------------------------------------------------- /pkg/protocol/engine/consensus/blockgadget/gadget.go: -------------------------------------------------------------------------------- 1 | package blockgadget 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/runtime/module" 5 | "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" 6 | ) 7 | 8 | type Gadget interface { 9 | module.Module 10 | 11 | TrackWitnessWeight(votingBlock *blocks.Block) 12 | SetAccepted(block *blocks.Block) bool 13 | 14 | // Reset resets the component to a clean state as if it was created at the last commitment. 15 | Reset() 16 | } 17 | -------------------------------------------------------------------------------- /pkg/protocol/engine/consensus/blockgadget/thresholdblockgadget/options.go: -------------------------------------------------------------------------------- 1 | package thresholdblockgadget 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/runtime/options" 5 | iotago "github.com/iotaledger/iota.go/v4" 6 | ) 7 | 8 | func WithAcceptanceThreshold(acceptanceThreshold float64) options.Option[Gadget] { 9 | return func(gadget *Gadget) { 10 | gadget.optsAcceptanceThreshold = acceptanceThreshold 11 | } 12 | } 13 | 14 | func WithConfirmationThreshold(confirmationThreshold float64) options.Option[Gadget] { 15 | return func(gadget *Gadget) { 16 | gadget.optsConfirmationThreshold = confirmationThreshold 17 | } 18 | } 19 | 20 | func WithConfirmationRatificationThreshold(confirmationRatificationThreshold iotago.SlotIndex) options.Option[Gadget] { 21 | return func(gadget *Gadget) { 22 | gadget.optsConfirmationRatificationThreshold = confirmationRatificationThreshold 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /pkg/protocol/engine/consensus/slotgadget/events.go: -------------------------------------------------------------------------------- 1 | package slotgadget 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/runtime/event" 5 | iotago "github.com/iotaledger/iota.go/v4" 6 | ) 7 | 8 | type Events struct { 9 | SlotFinalized *event.Event1[iotago.SlotIndex] 10 | 11 | event.Group[Events, *Events] 12 | } 13 | 14 | // NewEvents contains the constructor of the Events object (it is generated by a generic factory). 15 | var NewEvents = event.CreateGroupConstructor(func() (newEvents *Events) { 16 | return &Events{ 17 | SlotFinalized: event.New1[iotago.SlotIndex](), 18 | } 19 | }) 20 | -------------------------------------------------------------------------------- /pkg/protocol/engine/consensus/slotgadget/slotgadget.go: -------------------------------------------------------------------------------- 1 | package slotgadget 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/runtime/module" 5 | iotago "github.com/iotaledger/iota.go/v4" 6 | ) 7 | 8 | type Gadget interface { 9 | // Reset resets the component to a clean state as if it was created at the last commitment. 10 | Reset(targetSlot iotago.SlotIndex) 11 | 12 | module.Module 13 | } 14 | -------------------------------------------------------------------------------- /pkg/protocol/engine/consensus/slotgadget/totalweightslotgadget/options.go: -------------------------------------------------------------------------------- 1 | package totalweightslotgadget 2 | 3 | import "github.com/iotaledger/hive.go/runtime/options" 4 | 5 | func WithSlotFinalizationThreshold(threshold float64) options.Option[Gadget] { 6 | return func(gadget *Gadget) { 7 | gadget.optsSlotFinalizationThreshold = threshold 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /pkg/protocol/engine/filter/postsolidfilter/events.go: -------------------------------------------------------------------------------- 1 | package postsolidfilter 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/runtime/event" 5 | "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" 6 | ) 7 | 8 | type Events struct { 9 | BlockFiltered *event.Event1[*BlockFilteredEvent] 10 | BlockAllowed *event.Event1[*blocks.Block] 11 | 12 | event.Group[Events, *Events] 13 | } 14 | 15 | var NewEvents = event.CreateGroupConstructor(func() *Events { 16 | return &Events{ 17 | BlockFiltered: event.New1[*BlockFilteredEvent](), 18 | BlockAllowed: event.New1[*blocks.Block](), 19 | } 20 | }) 21 | 22 | type BlockFilteredEvent struct { 23 | Block *blocks.Block 24 | Reason error 25 | } 26 | -------------------------------------------------------------------------------- /pkg/protocol/engine/filter/postsolidfilter/post_solid_filter.go: -------------------------------------------------------------------------------- 1 | package postsolidfilter 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/runtime/module" 5 | "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" 6 | ) 7 | 8 | type PostSolidFilter interface { 9 | // ProcessSolidBlock processes block from the given source. 10 | ProcessSolidBlock(block *blocks.Block) 11 | 12 | // Reset resets the component to a clean state as if it was created at the last commitment. 13 | Reset() 14 | 15 | module.Module 16 | } 17 | -------------------------------------------------------------------------------- /pkg/protocol/engine/filter/presolidfilter/events.go: -------------------------------------------------------------------------------- 1 | package presolidfilter 2 | 3 | import ( 4 | "github.com/libp2p/go-libp2p/core/peer" 5 | 6 | "github.com/iotaledger/hive.go/runtime/event" 7 | "github.com/iotaledger/iota-core/pkg/model" 8 | ) 9 | 10 | type Events struct { 11 | BlockPreFiltered *event.Event1[*BlockPreFilteredEvent] 12 | BlockPreAllowed *event.Event1[*model.Block] 13 | 14 | event.Group[Events, *Events] 15 | } 16 | 17 | var NewEvents = event.CreateGroupConstructor(func() *Events { 18 | return &Events{ 19 | BlockPreFiltered: event.New1[*BlockPreFilteredEvent](), 20 | BlockPreAllowed: event.New1[*model.Block](), 21 | } 22 | }) 23 | 24 | type BlockPreFilteredEvent struct { 25 | Block *model.Block 26 | Reason error 27 | Source peer.ID 28 | } 29 | -------------------------------------------------------------------------------- /pkg/protocol/engine/filter/presolidfilter/pre_solid_filter.go: -------------------------------------------------------------------------------- 1 | package presolidfilter 2 | 3 | import ( 4 | "github.com/libp2p/go-libp2p/core/peer" 5 | 6 | "github.com/iotaledger/hive.go/runtime/module" 7 | "github.com/iotaledger/iota-core/pkg/model" 8 | ) 9 | 10 | type PreSolidFilter interface { 11 | // ProcessReceivedBlock processes block from the given source. 12 | ProcessReceivedBlock(block *model.Block, source peer.ID) 13 | 14 | // Reset resets the component to a clean state as if it was created at the last commitment. 15 | Reset() 16 | 17 | module.Module 18 | } 19 | -------------------------------------------------------------------------------- /pkg/protocol/engine/inspection.go: -------------------------------------------------------------------------------- 1 | package engine 2 | 3 | import "github.com/iotaledger/hive.go/runtime/inspection" 4 | 5 | // Inspect inspects the Engine and its subcomponents. 6 | func (e *Engine) Inspect(session ...inspection.Session) inspection.InspectedObject { 7 | return inspection.NewInspectedObject(e, func(_ inspection.InspectedObject) { 8 | // TODO: DUMP ENGINE STRUCTURE IN THE FUTURE 9 | }, session...) 10 | } 11 | -------------------------------------------------------------------------------- /pkg/protocol/engine/ledger/blockvoterank.go: -------------------------------------------------------------------------------- 1 | package ledger 2 | 3 | import ( 4 | "bytes" 5 | "time" 6 | 7 | iotago "github.com/iotaledger/iota.go/v4" 8 | ) 9 | 10 | type BlockVoteRank struct { 11 | blockID iotago.BlockID 12 | time time.Time 13 | } 14 | 15 | func NewBlockVoteRank(id iotago.BlockID, time time.Time) BlockVoteRank { 16 | return BlockVoteRank{ 17 | blockID: id, 18 | time: time, 19 | } 20 | } 21 | 22 | func (v BlockVoteRank) Compare(other BlockVoteRank) int { 23 | if v.time.Before(other.time) { 24 | return -1 25 | } else if v.time.After(other.time) { 26 | return 1 27 | } 28 | 29 | return bytes.Compare(v.blockID[:], other.blockID[:]) 30 | } 31 | -------------------------------------------------------------------------------- /pkg/protocol/engine/ledger/events.go: -------------------------------------------------------------------------------- 1 | package ledger 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/runtime/event" 5 | iotago "github.com/iotaledger/iota.go/v4" 6 | ) 7 | 8 | type Events struct { 9 | AccountCreated *event.Event1[iotago.AccountID] 10 | AccountDestroyed *event.Event1[iotago.AccountID] 11 | 12 | event.Group[Events, *Events] 13 | } 14 | 15 | // NewEvents contains the constructor of the Events object (it is generated by a generic factory). 16 | var NewEvents = event.CreateGroupConstructor(func() (newEvents *Events) { 17 | return &Events{ 18 | AccountCreated: event.New1[iotago.AccountID](), 19 | AccountDestroyed: event.New1[iotago.AccountID](), 20 | } 21 | }) 22 | -------------------------------------------------------------------------------- /pkg/protocol/engine/ledger/tests/output.go: -------------------------------------------------------------------------------- 1 | package ledgertests 2 | 3 | import iotago "github.com/iotaledger/iota.go/v4" 4 | 5 | type MockedOutput struct{} 6 | 7 | func (m *MockedOutput) Equal(_ iotago.Output) bool { 8 | panic("implement me") 9 | } 10 | 11 | func (m *MockedOutput) Size() int { 12 | panic("implement me") 13 | } 14 | 15 | func (m *MockedOutput) WorkScore(_ *iotago.WorkScoreParameters) (iotago.WorkScore, error) { 16 | panic("implement me") 17 | } 18 | 19 | func (m *MockedOutput) StorageScore(_ *iotago.StorageScoreStructure, _ iotago.StorageScoreFunc) iotago.StorageScore { 20 | panic("implement me") 21 | } 22 | 23 | func (m *MockedOutput) BaseTokenAmount() iotago.BaseToken { 24 | panic("implement me") 25 | } 26 | 27 | func (m *MockedOutput) StoredMana() iotago.Mana { 28 | panic("implement me") 29 | } 30 | 31 | func (m *MockedOutput) UnlockConditionSet() iotago.UnlockConditionSet { 32 | panic("implement me") 33 | } 34 | 35 | func (m *MockedOutput) FeatureSet() iotago.FeatureSet { 36 | panic("implement me") 37 | } 38 | 39 | func (m *MockedOutput) Type() iotago.OutputType { 40 | panic("implement me") 41 | } 42 | 43 | func (m *MockedOutput) Clone() iotago.Output { 44 | panic("implement me") 45 | } 46 | -------------------------------------------------------------------------------- /pkg/protocol/engine/ledger/tests/state.go: -------------------------------------------------------------------------------- 1 | package ledgertests 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/lo" 5 | "github.com/iotaledger/iota-core/pkg/protocol/engine/mempool" 6 | iotago "github.com/iotaledger/iota.go/v4" 7 | ) 8 | 9 | type MockedState struct { 10 | id iotago.OutputID 11 | output *MockedOutput 12 | creationSlot iotago.SlotIndex 13 | slotBooked iotago.SlotIndex 14 | } 15 | 16 | func NewMockedState(transactionID iotago.TransactionID, index uint16) *MockedState { 17 | return &MockedState{ 18 | id: iotago.OutputIDFromTransactionIDAndIndex(transactionID, index), 19 | output: &MockedOutput{}, 20 | creationSlot: iotago.SlotIndex(0), 21 | slotBooked: iotago.SlotIndex(0), 22 | } 23 | } 24 | 25 | func (m *MockedState) StateID() iotago.Identifier { 26 | return iotago.IdentifierFromData(lo.PanicOnErr(m.id.Bytes())) 27 | } 28 | 29 | func (m *MockedState) Type() mempool.StateType { 30 | return mempool.StateTypeUTXOInput 31 | } 32 | 33 | func (m *MockedState) IsReadOnly() bool { 34 | return false 35 | } 36 | 37 | func (m *MockedState) OutputID() iotago.OutputID { 38 | return m.id 39 | } 40 | 41 | func (m *MockedState) Output() iotago.Output { 42 | return m.output 43 | } 44 | 45 | func (m *MockedState) SlotCreated() iotago.SlotIndex { 46 | return m.creationSlot 47 | } 48 | 49 | func (m *MockedState) SlotBooked() iotago.SlotIndex { 50 | return m.slotBooked 51 | } 52 | 53 | func (m *MockedState) String() string { 54 | return "MockedOutput(" + m.id.ToHex() + ")" 55 | } 56 | -------------------------------------------------------------------------------- /pkg/protocol/engine/ledger/tests/state_resolver.go: -------------------------------------------------------------------------------- 1 | package ledgertests 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/ds/shrinkingmap" 5 | "github.com/iotaledger/hive.go/ierrors" 6 | "github.com/iotaledger/iota-core/pkg/core/promise" 7 | "github.com/iotaledger/iota-core/pkg/protocol/engine/mempool" 8 | ) 9 | 10 | type MockStateResolver struct { 11 | statesByID *shrinkingmap.ShrinkingMap[mempool.StateID, mempool.State] 12 | } 13 | 14 | func New(initialStates ...mempool.State) *MockStateResolver { 15 | stateResolver := &MockStateResolver{ 16 | statesByID: shrinkingmap.New[mempool.StateID, mempool.State](), 17 | } 18 | for _, initialState := range initialStates { 19 | stateResolver.statesByID.Set(initialState.StateID(), initialState) 20 | } 21 | 22 | return stateResolver 23 | } 24 | 25 | func (s *MockStateResolver) AddOutputState(state mempool.State) { 26 | s.statesByID.Set(state.StateID(), state) 27 | } 28 | 29 | func (s *MockStateResolver) DestroyOutputState(stateID mempool.StateID) { 30 | s.statesByID.Delete(stateID) 31 | } 32 | 33 | func (s *MockStateResolver) ResolveOutputState(reference mempool.StateReference) *promise.Promise[mempool.State] { 34 | output, exists := s.statesByID.Get(reference.ReferencedStateID()) 35 | if !exists { 36 | return promise.New[mempool.State]().Reject(ierrors.Errorf("output %s not found: %w", reference.ReferencedStateID().ToHex(), mempool.ErrStateNotFound)) 37 | } 38 | 39 | return promise.New[mempool.State]().Resolve(output) 40 | } 41 | 42 | func (s *MockStateResolver) Cleanup() { 43 | s.statesByID.Clear() 44 | } 45 | -------------------------------------------------------------------------------- /pkg/protocol/engine/mempool/errors.go: -------------------------------------------------------------------------------- 1 | package mempool 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/ierrors" 5 | ) 6 | 7 | var ( 8 | ErrStateNotFound = ierrors.New("state not found") 9 | ErrInputSolidificationRequestFailed = ierrors.New("UTXO input solidification failed") 10 | ) 11 | -------------------------------------------------------------------------------- /pkg/protocol/engine/mempool/mempool.go: -------------------------------------------------------------------------------- 1 | package mempool 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/runtime/event" 5 | "github.com/iotaledger/iota-core/pkg/protocol/engine/mempool/spenddag" 6 | iotago "github.com/iotaledger/iota.go/v4" 7 | ) 8 | 9 | type MemPool[VoteRank spenddag.VoteRankType[VoteRank]] interface { 10 | AttachSignedTransaction(signedTransaction SignedTransaction, transaction Transaction, blockID iotago.BlockID) (signedTransactionMetadata SignedTransactionMetadata, err error) 11 | 12 | OnAttachTransactionFailed(callback func(transactionID iotago.TransactionID, blockID iotago.BlockID, err error), opts ...event.Option) *event.Hook[func(transactionID iotago.TransactionID, blockID iotago.BlockID, err error)] 13 | 14 | OnSignedTransactionAttached(callback func(signedTransactionMetadata SignedTransactionMetadata), opts ...event.Option) *event.Hook[func(metadata SignedTransactionMetadata)] 15 | 16 | OnTransactionAttached(callback func(metadata TransactionMetadata), opts ...event.Option) *event.Hook[func(metadata TransactionMetadata)] 17 | 18 | MarkAttachmentIncluded(blockID iotago.BlockID) bool 19 | 20 | StateMetadata(reference StateReference) (state StateMetadata, err error) 21 | 22 | TransactionMetadata(id iotago.TransactionID) (transaction TransactionMetadata, exists bool) 23 | 24 | VM() VM 25 | 26 | InjectRequestedState(state State) 27 | 28 | TransactionMetadataByAttachment(blockID iotago.BlockID) (transaction TransactionMetadata, exists bool) 29 | 30 | CommitStateDiff(slot iotago.SlotIndex) (StateDiff, error) 31 | 32 | Evict(slot iotago.SlotIndex) 33 | 34 | // Reset resets the component to a clean state as if it was created at the last commitment. 35 | Reset() 36 | } 37 | 38 | // StateType denotes the type of state. 39 | type StateType byte 40 | 41 | const ( 42 | StateTypeUTXOInput StateType = iota 43 | StateTypeCommitment 44 | ) 45 | -------------------------------------------------------------------------------- /pkg/protocol/engine/mempool/signed_transaction_metadata.go: -------------------------------------------------------------------------------- 1 | package mempool 2 | 3 | import iotago "github.com/iotaledger/iota.go/v4" 4 | 5 | type SignedTransactionMetadata interface { 6 | ID() iotago.SignedTransactionID 7 | 8 | SignedTransaction() SignedTransaction 9 | 10 | OnSignaturesValid(callback func()) (unsubscribe func()) 11 | 12 | OnSignaturesInvalid(callback func(err error)) (unsubscribe func()) 13 | 14 | SignaturesInvalid() error 15 | 16 | TransactionMetadata() TransactionMetadata 17 | 18 | Attachments() []iotago.BlockID 19 | } 20 | -------------------------------------------------------------------------------- /pkg/protocol/engine/mempool/spenddag/constraints.go: -------------------------------------------------------------------------------- 1 | package spenddag 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/constraints" 5 | ) 6 | 7 | // IDType is the constraint for the identifier of a spend or a resource. 8 | type IDType interface { 9 | // comparable is a built-in constraint that ensures that the type can be used as a map key. 10 | comparable 11 | 12 | // Bytes returns a serialized version of the ID. 13 | Bytes() ([]byte, error) 14 | 15 | // String returns a human-readable version of the ID. 16 | String() string 17 | } 18 | 19 | // VoteRankType is the constraint for the vote rank of a voter. 20 | type VoteRankType[T any] interface { 21 | // Comparable imports the constraints.Comparable[T] interface to ensure that the type can be compared. 22 | constraints.Comparable[T] 23 | } 24 | -------------------------------------------------------------------------------- /pkg/protocol/engine/mempool/spenddag/errors.go: -------------------------------------------------------------------------------- 1 | package spenddag 2 | 3 | import "github.com/iotaledger/hive.go/ierrors" 4 | 5 | var ( 6 | ErrExpected = ierrors.New("expected error") 7 | ErrAlreadyPartOfSpendSet = ierrors.New("spender already part of SpendSet") 8 | ErrEntityEvicted = ierrors.New("tried to operate on evicted entity") 9 | ErrFatal = ierrors.New("fatal error") 10 | ) 11 | -------------------------------------------------------------------------------- /pkg/protocol/engine/mempool/spenddag/spenddagv1/spend_set_test.go: -------------------------------------------------------------------------------- 1 | package spenddagv1 2 | 3 | import ( 4 | "github.com/iotaledger/iota-core/pkg/core/vote" 5 | iotago "github.com/iotaledger/iota.go/v4" 6 | ) 7 | 8 | type TestSpendSet = *SpendSet[iotago.TransactionID, iotago.OutputID, vote.MockedRank] 9 | 10 | var NewTestSpendSet = NewSpendSet[iotago.TransactionID, iotago.OutputID, vote.MockedRank] 11 | -------------------------------------------------------------------------------- /pkg/protocol/engine/mempool/spenddag/spenddagv1/utils.go: -------------------------------------------------------------------------------- 1 | package spenddagv1 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/ds" 5 | "github.com/iotaledger/iota-core/pkg/core/weight" 6 | "github.com/iotaledger/iota-core/pkg/protocol/engine/mempool/spenddag" 7 | ) 8 | 9 | // heaviestSpender returns the heaviest Spender from the given set of Spenders. 10 | func heaviestSpender[SpenderID, ResourceID spenddag.IDType, VoterPower spenddag.VoteRankType[VoterPower]](spenders ds.Set[*Spender[SpenderID, ResourceID, VoterPower]]) *Spender[SpenderID, ResourceID, VoterPower] { 11 | var result *Spender[SpenderID, ResourceID, VoterPower] 12 | spenders.Range(func(spender *Spender[SpenderID, ResourceID, VoterPower]) { 13 | if spender.Compare(result) == weight.Heavier { 14 | result = spender 15 | } 16 | }) 17 | 18 | return result 19 | } 20 | -------------------------------------------------------------------------------- /pkg/protocol/engine/mempool/state.go: -------------------------------------------------------------------------------- 1 | package mempool 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/lo" 5 | iotago "github.com/iotaledger/iota.go/v4" 6 | ) 7 | 8 | // A generic interface over a state (like an output or a commitment). 9 | type State interface { 10 | // The identifier of the state. 11 | StateID() StateID 12 | 13 | // The type of state. 14 | Type() StateType 15 | 16 | // Whether the state is read only. 17 | IsReadOnly() bool 18 | 19 | // SlotBooked returns the slot index of the state if it is booked. 20 | SlotBooked() iotago.SlotIndex 21 | } 22 | 23 | // A thin wrapper around a resolved commitment. 24 | type CommitmentInputState struct { 25 | Commitment *iotago.Commitment 26 | } 27 | 28 | func (s CommitmentInputState) StateID() StateID { 29 | return iotago.IdentifierFromData(lo.PanicOnErr(s.Commitment.MustID().Bytes())) 30 | } 31 | 32 | func (s CommitmentInputState) Type() StateType { 33 | return StateTypeCommitment 34 | } 35 | 36 | func (s CommitmentInputState) IsReadOnly() bool { 37 | return true 38 | } 39 | 40 | func (s CommitmentInputState) SlotBooked() iotago.SlotIndex { 41 | return s.Commitment.Slot 42 | } 43 | 44 | func CommitmentInputStateFromCommitment(commitment *iotago.Commitment) CommitmentInputState { 45 | return CommitmentInputState{ 46 | Commitment: commitment, 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /pkg/protocol/engine/mempool/state_diff.go: -------------------------------------------------------------------------------- 1 | package mempool 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/ads" 5 | "github.com/iotaledger/hive.go/ds/orderedmap" 6 | "github.com/iotaledger/hive.go/ds/shrinkingmap" 7 | iotago "github.com/iotaledger/iota.go/v4" 8 | ) 9 | 10 | // StateDiff is a collection of changes that happened in a certain slot and that can be applied to the ledger state. 11 | type StateDiff interface { 12 | // Slot returns the slot index of the state diff. 13 | Slot() iotago.SlotIndex 14 | 15 | // DestroyedStates returns a compacted list of all the states that were destroyed in the slot. 16 | DestroyedStates() *shrinkingmap.ShrinkingMap[StateID, StateMetadata] 17 | 18 | // CreatedStates returns a compacted list of all the states that were created in the slot. 19 | CreatedStates() *shrinkingmap.ShrinkingMap[StateID, StateMetadata] 20 | 21 | // ExecutedTransactions returns an un-compacted list of all the transactions that were executed in the slot. 22 | ExecutedTransactions() *orderedmap.OrderedMap[iotago.TransactionID, TransactionMetadata] 23 | 24 | // Mutations returns an authenticated data structure that allows to commit to the applied mutations. 25 | Mutations() ads.Set[iotago.Identifier, iotago.TransactionID] 26 | 27 | // Reset resets the component to a clean state as if it was created at the last commitment. 28 | Reset() error 29 | } 30 | -------------------------------------------------------------------------------- /pkg/protocol/engine/mempool/state_id.go: -------------------------------------------------------------------------------- 1 | package mempool 2 | 3 | import iotago "github.com/iotaledger/iota.go/v4" 4 | 5 | type StateID = iotago.Identifier 6 | -------------------------------------------------------------------------------- /pkg/protocol/engine/mempool/state_metadata.go: -------------------------------------------------------------------------------- 1 | package mempool 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/ds/reactive" 5 | iotago "github.com/iotaledger/iota.go/v4" 6 | ) 7 | 8 | type StateMetadata interface { 9 | State() State 10 | 11 | SpenderIDs() reactive.Set[iotago.TransactionID] 12 | 13 | PendingSpenderCount() int 14 | 15 | AcceptedSpender() (TransactionMetadata, bool) 16 | 17 | OnAcceptedSpenderUpdated(callback func(spender TransactionMetadata)) 18 | 19 | InclusionSlot() iotago.SlotIndex 20 | 21 | OnInclusionSlotUpdated(callback func(prevSlot iotago.SlotIndex, newSlot iotago.SlotIndex)) 22 | 23 | inclusionFlags 24 | } 25 | -------------------------------------------------------------------------------- /pkg/protocol/engine/mempool/state_reference.go: -------------------------------------------------------------------------------- 1 | package mempool 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/lo" 5 | iotago "github.com/iotaledger/iota.go/v4" 6 | ) 7 | 8 | // A reference to a state (like an output or a commitment). 9 | type StateReference interface { 10 | // The identifier of the state to which it resolves. 11 | ReferencedStateID() iotago.Identifier 12 | 13 | // The type of state. 14 | Type() StateType 15 | } 16 | 17 | // A thin wrapper around a UTXO input. 18 | type UTXOInputStateRef struct { 19 | Input *iotago.UTXOInput 20 | } 21 | 22 | func (r UTXOInputStateRef) ReferencedStateID() iotago.Identifier { 23 | return iotago.IdentifierFromData(lo.PanicOnErr(r.Input.OutputID().Bytes())) 24 | } 25 | 26 | func (r UTXOInputStateRef) Type() StateType { 27 | return StateTypeUTXOInput 28 | } 29 | 30 | func UTXOInputStateRefFromInput(input *iotago.UTXOInput) UTXOInputStateRef { 31 | return UTXOInputStateRef{ 32 | Input: input, 33 | } 34 | } 35 | 36 | // A thin wrapper around a Commitment input. 37 | type CommitmentInputStateRef struct { 38 | Input *iotago.CommitmentInput 39 | } 40 | 41 | func (r CommitmentInputStateRef) ReferencedStateID() iotago.Identifier { 42 | return iotago.IdentifierFromData(lo.PanicOnErr(r.Input.CommitmentID.Bytes())) 43 | } 44 | 45 | func (r CommitmentInputStateRef) Type() StateType { 46 | return StateTypeCommitment 47 | } 48 | 49 | func CommitmentInputStateRefFromInput(input *iotago.CommitmentInput) CommitmentInputStateRef { 50 | return CommitmentInputStateRef{ 51 | Input: input, 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /pkg/protocol/engine/mempool/state_resolver.go: -------------------------------------------------------------------------------- 1 | package mempool 2 | 3 | import ( 4 | "github.com/iotaledger/iota-core/pkg/core/promise" 5 | ) 6 | 7 | // StateResolver is a function that resolves a StateReference to a Promise with the State. 8 | type StateResolver func(reference StateReference) *promise.Promise[State] 9 | -------------------------------------------------------------------------------- /pkg/protocol/engine/mempool/tests/transaction.go: -------------------------------------------------------------------------------- 1 | package mempooltests 2 | 3 | import ( 4 | "github.com/iotaledger/iota-core/pkg/protocol/engine/mempool" 5 | iotago "github.com/iotaledger/iota.go/v4" 6 | "github.com/iotaledger/iota.go/v4/tpkg" 7 | ) 8 | 9 | type SignedTransaction struct { 10 | id iotago.SignedTransactionID 11 | transaction mempool.Transaction 12 | } 13 | 14 | func (s *SignedTransaction) ID() (iotago.SignedTransactionID, error) { 15 | return s.id, nil 16 | } 17 | 18 | func (s *SignedTransaction) MustID() iotago.SignedTransactionID { 19 | return s.id 20 | } 21 | 22 | func (s *SignedTransaction) String() string { 23 | return "SignedTransaction(" + s.id.String() + ")" 24 | } 25 | 26 | type Transaction struct { 27 | id iotago.TransactionID 28 | inputs []mempool.StateReference 29 | outputCount uint16 30 | invalidTransaction bool 31 | } 32 | 33 | func NewSignedTransaction(transaction mempool.Transaction) *SignedTransaction { 34 | return &SignedTransaction{ 35 | id: tpkg.RandSignedTransactionID(), 36 | transaction: transaction, 37 | } 38 | } 39 | 40 | func NewTransaction(outputCount uint16, inputs ...mempool.StateReference) *Transaction { 41 | return &Transaction{ 42 | id: tpkg.RandTransactionID(), 43 | inputs: inputs, 44 | outputCount: outputCount, 45 | } 46 | } 47 | 48 | func (t *Transaction) ID() (iotago.TransactionID, error) { 49 | return t.id, nil 50 | } 51 | 52 | func (t *Transaction) MustID() iotago.TransactionID { 53 | return t.id 54 | } 55 | 56 | func (t *Transaction) Inputs() ([]mempool.StateReference, error) { 57 | return t.inputs, nil 58 | } 59 | 60 | func (t *Transaction) String() string { 61 | return "Transaction(" + t.id.String() + ")" 62 | } 63 | 64 | var _ mempool.Transaction = new(Transaction) 65 | -------------------------------------------------------------------------------- /pkg/protocol/engine/mempool/tests/vm.go: -------------------------------------------------------------------------------- 1 | package mempooltests 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/iotaledger/hive.go/ierrors" 7 | ledgertests "github.com/iotaledger/iota-core/pkg/protocol/engine/ledger/tests" 8 | "github.com/iotaledger/iota-core/pkg/protocol/engine/mempool" 9 | ) 10 | 11 | type VM struct{} 12 | 13 | func (v *VM) Inputs(transaction mempool.Transaction) ([]mempool.StateReference, error) { 14 | testTransaction, ok := transaction.(*Transaction) 15 | if !ok { 16 | return nil, ierrors.New("invalid transaction type in MockedVM") 17 | } 18 | 19 | return testTransaction.Inputs() 20 | } 21 | 22 | func (v *VM) ValidateSignatures(_ mempool.SignedTransaction, _ []mempool.State) (executionContext context.Context, err error) { 23 | return context.Background(), nil 24 | } 25 | 26 | func (v *VM) Execute(_ context.Context, transaction mempool.Transaction) (outputs []mempool.State, err error) { 27 | typedTransaction, ok := transaction.(*Transaction) 28 | if !ok { 29 | return nil, ierrors.New("invalid transaction type in MockedVM") 30 | } 31 | 32 | if typedTransaction.invalidTransaction { 33 | return nil, ierrors.New("invalid transaction") 34 | } 35 | 36 | for i := range typedTransaction.outputCount { 37 | id, err := typedTransaction.ID() 38 | if err != nil { 39 | return nil, err 40 | } 41 | 42 | outputs = append(outputs, ledgertests.NewMockedState(id, i)) 43 | } 44 | 45 | return outputs, nil 46 | } 47 | -------------------------------------------------------------------------------- /pkg/protocol/engine/mempool/transaction.go: -------------------------------------------------------------------------------- 1 | package mempool 2 | 3 | import ( 4 | iotago "github.com/iotaledger/iota.go/v4" 5 | ) 6 | 7 | type SignedTransaction interface { 8 | // ID returns the identifier of the Transaction that contains a signature. 9 | ID() (iotago.SignedTransactionID, error) 10 | // MustID works like ID but panics if the SignedTransactionID can't be computed. 11 | MustID() iotago.SignedTransactionID 12 | } 13 | 14 | type Transaction interface { 15 | // ID returns the identifier of the Transaction. 16 | ID() (iotago.TransactionID, error) 17 | // MustID works like ID but panics if the TransactionID can't be computed. 18 | MustID() iotago.TransactionID 19 | } 20 | -------------------------------------------------------------------------------- /pkg/protocol/engine/mempool/transaction_metadata.go: -------------------------------------------------------------------------------- 1 | package mempool 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/ds" 5 | "github.com/iotaledger/hive.go/ds/reactive" 6 | iotago "github.com/iotaledger/iota.go/v4" 7 | ) 8 | 9 | type TransactionMetadata interface { 10 | ID() iotago.TransactionID 11 | 12 | Transaction() Transaction 13 | 14 | Inputs() ds.Set[StateMetadata] 15 | 16 | Outputs() ds.Set[StateMetadata] 17 | 18 | SpenderIDs() reactive.Set[iotago.TransactionID] 19 | 20 | Commit() 21 | 22 | IsSolid() bool 23 | 24 | OnSolid(callback func()) 25 | 26 | IsExecuted() bool 27 | 28 | OnExecuted(callback func()) 29 | 30 | IsInvalid() bool 31 | 32 | OnInvalid(callback func(error)) 33 | 34 | IsBooked() bool 35 | 36 | OnBooked(callback func()) 37 | 38 | ValidAttachments() []iotago.BlockID 39 | 40 | EarliestIncludedAttachment() iotago.BlockID 41 | 42 | OnEarliestIncludedAttachmentUpdated(callback func(prevID, newID iotago.BlockID)) 43 | 44 | OnEvicted(callback func()) 45 | 46 | inclusionFlags 47 | } 48 | 49 | type inclusionFlags interface { 50 | IsPending() bool 51 | 52 | IsAccepted() bool 53 | 54 | OnAccepted(callback func()) 55 | 56 | CommittedSlot() (slot iotago.SlotIndex, isCommitted bool) 57 | 58 | OnCommittedSlotUpdated(callback func(slot iotago.SlotIndex)) 59 | 60 | IsRejected() bool 61 | 62 | OnRejected(callback func()) 63 | 64 | OrphanedSlot() (slot iotago.SlotIndex, isOrphaned bool) 65 | 66 | OnOrphanedSlotUpdated(callback func(slot iotago.SlotIndex)) 67 | } 68 | -------------------------------------------------------------------------------- /pkg/protocol/engine/mempool/v1/transaction_metadata_test.go: -------------------------------------------------------------------------------- 1 | package mempoolv1 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/require" 7 | 8 | mempooltests "github.com/iotaledger/iota-core/pkg/protocol/engine/mempool/tests" 9 | iotago "github.com/iotaledger/iota.go/v4" 10 | ) 11 | 12 | func TestAttachments(t *testing.T) { 13 | blockIDs := map[string]iotago.BlockID{ 14 | "1": iotago.BlockIDRepresentingData(1, []byte("block1")), 15 | "2": iotago.BlockIDRepresentingData(2, []byte("block2")), 16 | } 17 | 18 | transactionMetadata := NewTransactionMetadata(mempooltests.NewTransaction(2), nil) 19 | signedTransactionMetadata := NewSignedTransactionMetadata(mempooltests.NewSignedTransaction(transactionMetadata.Transaction()), transactionMetadata) 20 | 21 | require.True(t, signedTransactionMetadata.addAttachment(blockIDs["1"])) 22 | require.True(t, signedTransactionMetadata.addAttachment(blockIDs["2"])) 23 | 24 | require.False(t, signedTransactionMetadata.addAttachment(blockIDs["1"])) 25 | 26 | var earliestInclusionIndex iotago.SlotIndex 27 | 28 | signedTransactionMetadata.transactionMetadata.OnEarliestIncludedAttachmentUpdated(func(_, includedBlock iotago.BlockID) { 29 | earliestInclusionIndex = includedBlock.Slot() 30 | }) 31 | require.Equal(t, iotago.SlotIndex(0), earliestInclusionIndex) 32 | 33 | signedTransactionMetadata.transactionMetadata.markAttachmentIncluded(blockIDs["2"]) 34 | require.Equal(t, iotago.SlotIndex(2), earliestInclusionIndex) 35 | signedTransactionMetadata.transactionMetadata.markAttachmentIncluded(blockIDs["1"]) 36 | require.Equal(t, iotago.SlotIndex(1), earliestInclusionIndex) 37 | } 38 | -------------------------------------------------------------------------------- /pkg/protocol/engine/mempool/vm.go: -------------------------------------------------------------------------------- 1 | package mempool 2 | 3 | import ( 4 | "context" 5 | ) 6 | 7 | // VM is the interface that defines the virtual machine that is used to validate and execute transactions. 8 | type VM interface { 9 | // Inputs returns the referenced inputs of the given transaction. 10 | Inputs(transaction Transaction) ([]StateReference, error) 11 | 12 | // ValidateSignatures validates the signatures of the given SignedTransaction and returns the execution context. 13 | ValidateSignatures(signedTransaction SignedTransaction, inputs []State) (executionContext context.Context, err error) 14 | 15 | // Execute executes the transaction in the given execution context and returns the resulting states. 16 | Execute(executionContext context.Context, transaction Transaction) (outputs []State, err error) 17 | } 18 | -------------------------------------------------------------------------------- /pkg/protocol/engine/notarization/events.go: -------------------------------------------------------------------------------- 1 | package notarization 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/ads" 5 | "github.com/iotaledger/hive.go/runtime/event" 6 | "github.com/iotaledger/iota-core/pkg/model" 7 | "github.com/iotaledger/iota-core/pkg/protocol/engine/utxoledger" 8 | iotago "github.com/iotaledger/iota.go/v4" 9 | ) 10 | 11 | // Events is a container that acts as a dictionary for the events of the notarization manager. 12 | type Events struct { 13 | SlotCommitted *event.Event1[*SlotCommittedDetails] 14 | LatestCommitmentUpdated *event.Event1[*model.Commitment] 15 | 16 | event.Group[Events, *Events] 17 | } 18 | 19 | // NewEvents contains the constructor of the Events object (it is generated by a generic factory). 20 | var NewEvents = event.CreateGroupConstructor(func() (self *Events) { 21 | return &Events{ 22 | SlotCommitted: event.New1[*SlotCommittedDetails](), 23 | LatestCommitmentUpdated: event.New1[*model.Commitment](), 24 | } 25 | }) 26 | 27 | // SlotCommittedDetails contains the details of a committed slot. 28 | type SlotCommittedDetails struct { 29 | Commitment *model.Commitment 30 | AcceptedBlocks ads.Set[iotago.Identifier, iotago.BlockID] 31 | ActiveValidatorsCount int 32 | OutputsCreated utxoledger.Outputs 33 | OutputsConsumed utxoledger.Spents 34 | Mutations []*iotago.Transaction 35 | } 36 | -------------------------------------------------------------------------------- /pkg/protocol/engine/notarization/notarization.go: -------------------------------------------------------------------------------- 1 | package notarization 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/runtime/module" 5 | "github.com/iotaledger/iota-core/pkg/model" 6 | iotago "github.com/iotaledger/iota.go/v4" 7 | ) 8 | 9 | type Notarization interface { 10 | // IsBootstrapped returns if notarization finished committing all pending slots up to the current acceptance time. 11 | IsBootstrapped() bool 12 | 13 | ForceCommit(slot iotago.SlotIndex) (*model.Commitment, error) 14 | ForceCommitUntil(commitUntilSlot iotago.SlotIndex) error 15 | 16 | AcceptedBlocksCount(index iotago.SlotIndex) int 17 | 18 | // Reset resets the component to a clean state as if it was created at the last commitment. 19 | Reset() 20 | 21 | module.Module 22 | } 23 | -------------------------------------------------------------------------------- /pkg/protocol/engine/syncmanager/events.go: -------------------------------------------------------------------------------- 1 | package syncmanager 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/runtime/event" 5 | ) 6 | 7 | type Events struct { 8 | UpdatedStatus *event.Event1[*SyncStatus] 9 | 10 | event.Group[Events, *Events] 11 | } 12 | 13 | var NewEvents = event.CreateGroupConstructor(func() (newEvents *Events) { 14 | return &Events{ 15 | UpdatedStatus: event.New1[*SyncStatus](), 16 | } 17 | }) 18 | -------------------------------------------------------------------------------- /pkg/protocol/engine/syncmanager/syncmanager.go: -------------------------------------------------------------------------------- 1 | package syncmanager 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/runtime/module" 5 | "github.com/iotaledger/iota-core/pkg/model" 6 | iotago "github.com/iotaledger/iota.go/v4" 7 | ) 8 | 9 | type SyncManager interface { 10 | // SyncStatus returns the sync status of a node. 11 | SyncStatus() *SyncStatus 12 | 13 | // IsBootstrapped returns bool indicating if a node is bootstrapped. 14 | IsBootstrapped() bool 15 | 16 | // IsNodeSynced returns bool indicating if a node is synced. 17 | IsNodeSynced() bool 18 | 19 | // IsFinalizationDelayed returns bool indicating if the finalization is delayed 20 | // (latest committed slot - latest finalized slot > max committable age). 21 | IsFinalizationDelayed() bool 22 | 23 | // LastAcceptedBlockSlot returns the slot of the latest accepted block. 24 | LastAcceptedBlockSlot() iotago.SlotIndex 25 | 26 | // LastConfirmedBlockSlot returns slot of the latest confirmed block. 27 | LastConfirmedBlockSlot() iotago.SlotIndex 28 | 29 | // LatestCommitment returns the latest commitment. 30 | LatestCommitment() *model.Commitment 31 | 32 | // LatestFinalizedSlot returns the latest finalized slot index. 33 | LatestFinalizedSlot() iotago.SlotIndex 34 | 35 | // LastPrunedEpoch returns the last pruned epoch index. 36 | LastPrunedEpoch() (iotago.EpochIndex, bool) 37 | 38 | // Reset resets the component to a clean state as if it was created at the last commitment. 39 | Reset() 40 | 41 | module.Module 42 | } 43 | 44 | type SyncStatus struct { 45 | NodeBootstrapped bool 46 | NodeSynced bool 47 | FinalizationDelayed bool 48 | LastAcceptedBlockSlot iotago.SlotIndex 49 | LastConfirmedBlockSlot iotago.SlotIndex 50 | LatestCommitment *model.Commitment 51 | LatestFinalizedSlot iotago.SlotIndex 52 | LastPrunedEpoch iotago.EpochIndex 53 | HasPruned bool 54 | } 55 | -------------------------------------------------------------------------------- /pkg/protocol/engine/tipmanager/events.go: -------------------------------------------------------------------------------- 1 | package tipmanager 2 | 3 | import "github.com/iotaledger/hive.go/runtime/event" 4 | 5 | // Events represents events happening in the TipManager. 6 | type Events struct { 7 | // BlockAdded gets triggered when a new block was added to the TipManager. 8 | BlockAdded *event.Event1[TipMetadata] 9 | // Group makes the Events linkable through the central Events dictionary. 10 | event.Group[Events, *Events] 11 | } 12 | 13 | // NewEvents creates a new Events instance. 14 | var NewEvents = event.CreateGroupConstructor(func() *Events { 15 | return &Events{ 16 | BlockAdded: event.New1[TipMetadata](), 17 | } 18 | }) 19 | -------------------------------------------------------------------------------- /pkg/protocol/engine/tipmanager/tip_metadata.go: -------------------------------------------------------------------------------- 1 | package tipmanager 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/ds/reactive" 5 | "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" 6 | iotago "github.com/iotaledger/iota.go/v4" 7 | ) 8 | 9 | // TipMetadata allows to access the tip related metadata and events of a block in the TipManager. 10 | type TipMetadata interface { 11 | // ID returns the identifier of the block the TipMetadata belongs to. 12 | ID() iotago.BlockID 13 | 14 | // Block returns the block that the TipMetadata belongs to. 15 | Block() *blocks.Block 16 | 17 | // TipPool exposes a variable that stores the current TipPool of the block. 18 | TipPool() reactive.Variable[TipPool] 19 | 20 | // IsStrongTip returns a ReadableVariable that indicates if the block is a strong tip. 21 | IsStrongTip() reactive.ReadableVariable[bool] 22 | 23 | // IsWeakTip returns a ReadableVariable that indicates if the block is a weak tip. 24 | IsWeakTip() reactive.ReadableVariable[bool] 25 | 26 | // IsOrphaned returns a ReadableVariable that indicates if the block was orphaned. 27 | IsOrphaned() reactive.ReadableVariable[bool] 28 | 29 | // LivenessThresholdReached exposes an event that is triggered when the liveness threshold is reached. 30 | LivenessThresholdReached() reactive.Event 31 | 32 | // Evicted exposes an event that is triggered when the block is evicted. 33 | Evicted() reactive.Event 34 | 35 | String() string 36 | } 37 | -------------------------------------------------------------------------------- /pkg/protocol/engine/tipmanager/tip_pool.go: -------------------------------------------------------------------------------- 1 | package tipmanager 2 | 3 | // TipPool represents a pool of blocks that are treated in a certain way by the tip selection strategy. 4 | type TipPool uint8 5 | 6 | const ( 7 | // UndefinedTipPool is the zero value of TipPool. 8 | UndefinedTipPool TipPool = iota 9 | 10 | // StrongTipPool represents a pool of blocks that are supposed to be referenced through strong parents. 11 | StrongTipPool 12 | 13 | // WeakTipPool represents a pool of blocks that are supposed to be referenced through weak parents. 14 | WeakTipPool 15 | 16 | // DroppedTipPool represents a pool of blocks that are supposed to be ignored by the tip selection strategy. 17 | DroppedTipPool 18 | ) 19 | 20 | // Max returns the maximum of the two TipPools. 21 | func (t TipPool) Max(other TipPool) TipPool { 22 | if t > other { 23 | return t 24 | } 25 | 26 | return other 27 | } 28 | 29 | // String returns a human-readable representation of the TipPool. 30 | func (t TipPool) String() string { 31 | switch t { 32 | case StrongTipPool: 33 | return "StrongTipPool" 34 | case WeakTipPool: 35 | return "WeakTipPool" 36 | case DroppedTipPool: 37 | return "DroppedTipPool" 38 | default: 39 | return "UndefinedTipPool" 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /pkg/protocol/engine/tipmanager/v1/provider.go: -------------------------------------------------------------------------------- 1 | package tipmanagerv1 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/lo" 5 | "github.com/iotaledger/hive.go/runtime/event" 6 | "github.com/iotaledger/hive.go/runtime/module" 7 | "github.com/iotaledger/hive.go/runtime/workerpool" 8 | "github.com/iotaledger/iota-core/pkg/core/account" 9 | "github.com/iotaledger/iota-core/pkg/protocol/engine" 10 | "github.com/iotaledger/iota-core/pkg/protocol/engine/tipmanager" 11 | iotago "github.com/iotaledger/iota.go/v4" 12 | ) 13 | 14 | // NewProvider creates a new TipManager provider, that can be used to inject the component into an engine. 15 | func NewProvider() module.Provider[*engine.Engine, tipmanager.TipManager] { 16 | return module.Provide(func(e *engine.Engine) tipmanager.TipManager { 17 | t := New(e.NewSubModule("TipManager"), e.BlockCache.Block, e.SybilProtection.SeatManager().CommitteeInSlot) 18 | 19 | e.ConstructedEvent().OnTrigger(func() { 20 | tipWorker := e.Workers.CreatePool("AddTip", workerpool.WithWorkerCount(2)) 21 | 22 | e.Events.Scheduler.BlockScheduled.Hook(lo.Void(t.AddBlock), event.WithWorkerPool(tipWorker)) 23 | 24 | // the tipmanager needs to know about all the blocks that passed the scheduler 25 | e.Events.Scheduler.BlockSkipped.Hook(lo.Void(t.AddBlock), event.WithWorkerPool(tipWorker)) 26 | e.Events.Evict.Hook(t.Evict) 27 | 28 | e.Events.SeatManager.OnlineCommitteeSeatAdded.Hook(func(index account.SeatIndex, _ iotago.AccountID) { 29 | t.AddSeat(index) 30 | }) 31 | e.Events.SeatManager.OnlineCommitteeSeatRemoved.Hook(t.RemoveSeat) 32 | 33 | e.Events.TipManager.BlockAdded.LinkTo(t.blockAdded) 34 | 35 | t.InitializedEvent().Trigger() 36 | }) 37 | 38 | return t 39 | }) 40 | } 41 | -------------------------------------------------------------------------------- /pkg/protocol/engine/tipselection/tipselection.go: -------------------------------------------------------------------------------- 1 | package tipselection 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/iotaledger/hive.go/runtime/module" 7 | "github.com/iotaledger/iota-core/pkg/model" 8 | ) 9 | 10 | // TipSelection is a component that is used to abstract away the tip selection strategy, used to issuing new blocks. 11 | type TipSelection interface { 12 | // SelectTips selects the tips that should be used as references for a new block. 13 | SelectTips(maxStrongParents int, maxLikedInsteadParents int, maxWeakParents int) (references model.ParentReferences) 14 | 15 | // SetAcceptanceTime updates the acceptance time of the TipSelection. 16 | SetAcceptanceTime(acceptanceTime time.Time) (previousTime time.Time) 17 | 18 | // Reset resets the component to a clean state as if it was created at the last commitment. 19 | Reset() 20 | 21 | // Interface embeds the required methods of the module.Module. 22 | module.Module 23 | } 24 | -------------------------------------------------------------------------------- /pkg/protocol/engine/upgrade/orchestrator.go: -------------------------------------------------------------------------------- 1 | package upgrade 2 | 3 | import ( 4 | "io" 5 | 6 | "github.com/iotaledger/hive.go/runtime/module" 7 | "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" 8 | iotago "github.com/iotaledger/iota.go/v4" 9 | ) 10 | 11 | type Orchestrator interface { 12 | TrackValidationBlock(block *blocks.Block) 13 | Commit(slot iotago.SlotIndex) (protocolParametersAndVersionsHash iotago.Identifier, err error) 14 | 15 | Import(reader io.ReadSeeker) error 16 | Export(writer io.WriteSeeker, targetSlot iotago.SlotIndex) error 17 | 18 | RestoreFromDisk(slot iotago.SlotIndex) error 19 | 20 | // Reset resets the component to a clean state as if it was created at the last commitment. 21 | Reset() 22 | 23 | module.Module 24 | } 25 | -------------------------------------------------------------------------------- /pkg/protocol/engine/upgrade/signalingupgradeorchestrator/options.go: -------------------------------------------------------------------------------- 1 | package signalingupgradeorchestrator 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/runtime/options" 5 | iotago "github.com/iotaledger/iota.go/v4" 6 | ) 7 | 8 | func WithProtocolParameters(protocolParameters ...iotago.ProtocolParameters) options.Option[Orchestrator] { 9 | return func(o *Orchestrator) { 10 | o.optsProtocolParameters = protocolParameters 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /pkg/protocol/engine/upgrade/signalingupgradeorchestrator/storage.go: -------------------------------------------------------------------------------- 1 | package signalingupgradeorchestrator 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/ierrors" 5 | "github.com/iotaledger/iota-core/pkg/core/account" 6 | "github.com/iotaledger/iota-core/pkg/model" 7 | iotago "github.com/iotaledger/iota.go/v4" 8 | ) 9 | 10 | func (o *Orchestrator) RestoreFromDisk(slot iotago.SlotIndex) error { 11 | o.evictionMutex.Lock() 12 | defer o.evictionMutex.Unlock() 13 | 14 | o.lastCommittedSlot = slot 15 | 16 | // Load latest signals into cache into the next slot (necessary so that we have the correct information when we commit that slot). 17 | latestSignals := o.latestSignals.Get(slot+1, true) 18 | upgradeSignals, err := o.upgradeSignalsPerSlotFunc(slot) 19 | if err != nil { 20 | return ierrors.Wrapf(err, "failed to get upgrade signals for slot %d", slot) 21 | } 22 | if err := upgradeSignals.Stream(func(seat account.SeatIndex, signaledBlock *model.SignaledBlock) error { 23 | latestSignals.Set(seat, signaledBlock) 24 | 25 | return nil 26 | }); err != nil { 27 | return ierrors.Wrap(err, "failed to restore upgrade signals from disk") 28 | } 29 | 30 | return nil 31 | } 32 | -------------------------------------------------------------------------------- /pkg/protocol/engine/utxoledger/kvstorable.go: -------------------------------------------------------------------------------- 1 | package utxoledger 2 | 3 | type kvStorable interface { 4 | KVStorableKey() (key []byte) 5 | KVStorableValue() (value []byte) 6 | kvStorableLoad(manager *Manager, key []byte, value []byte) error 7 | } 8 | -------------------------------------------------------------------------------- /pkg/protocol/engine/utxoledger/state_tree.go: -------------------------------------------------------------------------------- 1 | package utxoledger 2 | 3 | import ( 4 | "bytes" 5 | 6 | "github.com/iotaledger/hive.go/ads" 7 | "github.com/iotaledger/hive.go/ierrors" 8 | "github.com/iotaledger/hive.go/kvstore/mapdb" 9 | iotago "github.com/iotaledger/iota.go/v4" 10 | ) 11 | 12 | type stateTreeMetadata struct { 13 | Slot iotago.SlotIndex 14 | } 15 | 16 | func newStateMetadata(output *Output) *stateTreeMetadata { 17 | return &stateTreeMetadata{ 18 | Slot: output.SlotCreated(), 19 | } 20 | } 21 | 22 | func stateMetadataFromBytes(b []byte) (*stateTreeMetadata, int, error) { 23 | s := new(stateTreeMetadata) 24 | 25 | var err error 26 | var n int 27 | s.Slot, n, err = iotago.SlotIndexFromBytes(b) 28 | if err != nil { 29 | return nil, 0, err 30 | } 31 | 32 | return s, n, nil 33 | } 34 | 35 | func (s *stateTreeMetadata) Bytes() ([]byte, error) { 36 | return s.Slot.Bytes() 37 | } 38 | 39 | func (m *Manager) StateTreeRoot() iotago.Identifier { 40 | return m.stateTree.Root() 41 | } 42 | 43 | func (m *Manager) CheckStateTree() bool { 44 | comparisonTree := ads.NewMap[iotago.Identifier](mapdb.NewMapDB(), 45 | iotago.Identifier.Bytes, 46 | iotago.IdentifierFromBytes, 47 | iotago.OutputID.Bytes, 48 | iotago.OutputIDFromBytes, 49 | (*stateTreeMetadata).Bytes, 50 | stateMetadataFromBytes, 51 | ) 52 | 53 | if err := m.ForEachUnspentOutput(func(output *Output) bool { 54 | if err := comparisonTree.Set(output.OutputID(), newStateMetadata(output)); err != nil { 55 | panic(ierrors.Wrapf(err, "failed to set output in comparison tree, outputID: %s", output.OutputID().ToHex())) 56 | } 57 | 58 | return true 59 | }); err != nil { 60 | return false 61 | } 62 | 63 | comparisonRoot := comparisonTree.Root() 64 | storedRoot := m.StateTreeRoot() 65 | 66 | return bytes.Equal(comparisonRoot[:], storedRoot[:]) 67 | } 68 | -------------------------------------------------------------------------------- /pkg/protocol/engine/utxoledger/tpkg/random.go: -------------------------------------------------------------------------------- 1 | package tpkg 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/lo" 5 | "github.com/iotaledger/iota-core/pkg/protocol/engine/utxoledger" 6 | iotago "github.com/iotaledger/iota.go/v4" 7 | "github.com/iotaledger/iota.go/v4/tpkg" 8 | ) 9 | 10 | func RandLedgerStateOutput() *utxoledger.Output { 11 | return RandLedgerStateOutputWithType(tpkg.RandOutputType()) 12 | } 13 | 14 | func RandLedgerStateOutputWithOutput(output iotago.Output) *utxoledger.Output { 15 | outputs := iotago.TxEssenceOutputs{output} 16 | txID := tpkg.RandTransactionID() 17 | proof := lo.PanicOnErr(iotago.NewOutputIDProof(tpkg.ZeroCostTestAPI, txID.Identifier(), txID.Slot(), outputs, 0)) 18 | 19 | return utxoledger.CreateOutput(iotago.SingleVersionProvider(tpkg.ZeroCostTestAPI), tpkg.RandOutputID(), tpkg.RandBlockID(), tpkg.RandSlot(), outputs[0], proof) 20 | } 21 | 22 | func RandLedgerStateOutputWithType(outputType iotago.OutputType) *utxoledger.Output { 23 | return RandLedgerStateOutputWithOutput(tpkg.RandOutput(outputType)) 24 | } 25 | 26 | func RandLedgerStateOutputOnAddress(outputType iotago.OutputType, address iotago.Address) *utxoledger.Output { 27 | return RandLedgerStateOutputWithOutput(tpkg.RandOutputOnAddress(outputType, address)) 28 | } 29 | 30 | func RandLedgerStateOutputOnAddressWithAmount(outputType iotago.OutputType, address iotago.Address, amount iotago.BaseToken) *utxoledger.Output { 31 | return RandLedgerStateOutputWithOutput(tpkg.RandOutputOnAddressWithAmount(outputType, address, amount)) 32 | } 33 | 34 | func RandLedgerStateSpent(indexSpent iotago.SlotIndex) *utxoledger.Spent { 35 | return utxoledger.NewSpent(RandLedgerStateOutput(), tpkg.RandTransactionID(), indexSpent) 36 | } 37 | 38 | func RandLedgerStateSpentWithOutput(output *utxoledger.Output, indexSpent iotago.SlotIndex) *utxoledger.Spent { 39 | return utxoledger.NewSpent(output, tpkg.RandTransactionID(), indexSpent) 40 | } 41 | -------------------------------------------------------------------------------- /pkg/protocol/errors.go: -------------------------------------------------------------------------------- 1 | package protocol 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/ierrors" 5 | ) 6 | 7 | var ( 8 | // ErrorCommitmentNotFound is returned for requests for commitments that are not available yet. 9 | ErrorCommitmentNotFound = ierrors.New("commitment not found") 10 | 11 | // ErrorSlotEvicted is returned for requests for commitments that belong to evicted slots. 12 | ErrorSlotEvicted = ierrors.New("slot evicted") 13 | ) 14 | -------------------------------------------------------------------------------- /pkg/protocol/events.go: -------------------------------------------------------------------------------- 1 | package protocol 2 | 3 | import ( 4 | "github.com/libp2p/go-libp2p/core/peer" 5 | 6 | "github.com/iotaledger/hive.go/runtime/event" 7 | "github.com/iotaledger/iota-core/pkg/model" 8 | "github.com/iotaledger/iota-core/pkg/protocol/engine" 9 | ) 10 | 11 | // Events exposes the Events of the main engine of the protocol at a single endpoint. 12 | // 13 | // TODO: It should be replaced with reactive calls to the corresponding events and be deleted but we can do this in a 14 | // later PR (to minimize the code changes to review). 15 | type Events struct { 16 | Engine *engine.Events 17 | ProtocolFilter *event.Event1[*BlockFilteredEvent] 18 | } 19 | 20 | // NewEvents creates a new Events instance. 21 | func NewEvents() *Events { 22 | return &Events{ 23 | Engine: engine.NewEvents(), 24 | ProtocolFilter: event.New1[*BlockFilteredEvent](), 25 | } 26 | } 27 | 28 | type BlockFilteredEvent struct { 29 | Block *model.Block 30 | Reason error 31 | Source peer.ID 32 | } 33 | -------------------------------------------------------------------------------- /pkg/protocol/network.go: -------------------------------------------------------------------------------- 1 | package protocol 2 | 3 | import ( 4 | "github.com/libp2p/go-libp2p/core/peer" 5 | 6 | "github.com/iotaledger/hive.go/log" 7 | "github.com/iotaledger/iota-core/pkg/model" 8 | "github.com/iotaledger/iota-core/pkg/network" 9 | "github.com/iotaledger/iota-core/pkg/network/protocols/core" 10 | ) 11 | 12 | // Network is a subcomponent of the protocol that is responsible for handling the network communication. 13 | type Network struct { 14 | // Protocol contains the network endpoint of the protocol. 15 | *core.Protocol 16 | 17 | // protocol contains a reference to the Protocol instance that this component belongs to. 18 | protocol *Protocol 19 | 20 | // Logger contains a reference to the logger that is used by this component. 21 | log.Logger 22 | } 23 | 24 | // newNetwork creates a new network protocol instance for the given protocol and network endpoint. 25 | func newNetwork(protocol *Protocol, networkEndpoint network.Endpoint) *Network { 26 | n := &Network{ 27 | Protocol: core.NewProtocol(networkEndpoint, protocol.Workers.CreatePool("NetworkProtocol"), protocol), 28 | Logger: protocol.NewChildLogger("Network"), 29 | protocol: protocol, 30 | } 31 | 32 | protocol.ShutdownEvent().OnTrigger(n.Logger.Shutdown) 33 | 34 | return n 35 | } 36 | 37 | // OnBlockReceived overwrites the OnBlockReceived method of the core protocol to filter out invalid blocks. 38 | func (n *Network) OnBlockReceived(callback func(block *model.Block, src peer.ID)) (unsubscribe func()) { 39 | return n.Protocol.OnBlockReceived(func(block *model.Block, src peer.ID) { 40 | callback(block, src) 41 | }) 42 | } 43 | -------------------------------------------------------------------------------- /pkg/protocol/sybilprotection/activitytracker/activitytracker.go: -------------------------------------------------------------------------------- 1 | package activitytracker 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/iotaledger/hive.go/ds" 7 | "github.com/iotaledger/iota-core/pkg/core/account" 8 | iotago "github.com/iotaledger/iota.go/v4" 9 | ) 10 | 11 | type ActivityTracker interface { 12 | OnlineCommittee() ds.Set[account.SeatIndex] 13 | MarkSeatActive(seat account.SeatIndex, id iotago.AccountID, seatActivityTime time.Time) 14 | } 15 | -------------------------------------------------------------------------------- /pkg/protocol/sybilprotection/activitytracker/events.go: -------------------------------------------------------------------------------- 1 | package activitytracker 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/runtime/event" 5 | "github.com/iotaledger/iota-core/pkg/core/account" 6 | iotago "github.com/iotaledger/iota.go/v4" 7 | ) 8 | 9 | type Events struct { 10 | OnlineCommitteeSeatAdded *event.Event2[account.SeatIndex, iotago.AccountID] 11 | OnlineCommitteeSeatRemoved *event.Event1[account.SeatIndex] 12 | 13 | event.Group[Events, *Events] 14 | } 15 | 16 | // NewEvents contains the constructor of the Events object (it is generated by a generic factory). 17 | var NewEvents = event.CreateGroupConstructor(func() (newEvents *Events) { 18 | return &Events{ 19 | OnlineCommitteeSeatAdded: event.New2[account.SeatIndex, iotago.AccountID](), 20 | OnlineCommitteeSeatRemoved: event.New1[account.SeatIndex](), 21 | } 22 | }) 23 | -------------------------------------------------------------------------------- /pkg/protocol/sybilprotection/events.go: -------------------------------------------------------------------------------- 1 | package sybilprotection 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/runtime/event" 5 | "github.com/iotaledger/iota-core/pkg/core/account" 6 | iotago "github.com/iotaledger/iota.go/v4" 7 | ) 8 | 9 | type Events struct { 10 | CommitteeSelected *event.Event2[*account.SeatedAccounts, iotago.EpochIndex] 11 | RewardsCommitted *event.Event1[iotago.EpochIndex] 12 | 13 | event.Group[Events, *Events] 14 | } 15 | 16 | // NewEvents contains the constructor of the Events object (it is generated by a generic factory). 17 | var NewEvents = event.CreateGroupConstructor(func() (newEvents *Events) { 18 | return &Events{ 19 | CommitteeSelected: event.New2[*account.SeatedAccounts, iotago.EpochIndex](), 20 | RewardsCommitted: event.New1[iotago.EpochIndex](), 21 | } 22 | }) 23 | -------------------------------------------------------------------------------- /pkg/protocol/sybilprotection/seatmanager/events.go: -------------------------------------------------------------------------------- 1 | package seatmanager 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/runtime/event" 5 | "github.com/iotaledger/iota-core/pkg/core/account" 6 | "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" 7 | iotago "github.com/iotaledger/iota.go/v4" 8 | ) 9 | 10 | type Events struct { 11 | BlockProcessed *event.Event1[*blocks.Block] 12 | OnlineCommitteeSeatAdded *event.Event2[account.SeatIndex, iotago.AccountID] 13 | OnlineCommitteeSeatRemoved *event.Event1[account.SeatIndex] 14 | 15 | event.Group[Events, *Events] 16 | } 17 | 18 | // NewEvents contains the constructor of the Events object (it is generated by a generic factory). 19 | var NewEvents = event.CreateGroupConstructor(func() (newEvents *Events) { 20 | return &Events{ 21 | BlockProcessed: event.New1[*blocks.Block](), 22 | OnlineCommitteeSeatAdded: event.New2[account.SeatIndex, iotago.AccountID](), 23 | OnlineCommitteeSeatRemoved: event.New1[account.SeatIndex](), 24 | } 25 | }) 26 | -------------------------------------------------------------------------------- /pkg/protocol/sybilprotection/seatmanager/poa/options.go: -------------------------------------------------------------------------------- 1 | package poa 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/runtime/options" 5 | iotago "github.com/iotaledger/iota.go/v4" 6 | ) 7 | 8 | func WithOnlineCommitteeStartup(optsOnlineCommittee ...iotago.AccountID) options.Option[SeatManager] { 9 | return func(p *SeatManager) { 10 | p.optsOnlineCommitteeStartup = optsOnlineCommittee 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /pkg/protocol/sybilprotection/seatmanager/topstakers/options.go: -------------------------------------------------------------------------------- 1 | package topstakers 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/runtime/options" 5 | iotago "github.com/iotaledger/iota.go/v4" 6 | ) 7 | 8 | func WithOnlineCommitteeStartup(optsOnlineCommittee ...iotago.AccountID) options.Option[SeatManager] { 9 | return func(p *SeatManager) { 10 | p.optsOnlineCommitteeStartup = optsOnlineCommittee 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /pkg/protocol/utils.go: -------------------------------------------------------------------------------- 1 | package protocol 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/log" 5 | "github.com/iotaledger/hive.go/runtime/workerpool" 6 | ) 7 | 8 | // loggedWorkerPoolTask is a generic utility function that submits a request to the given worker pool logging the result. 9 | func loggedWorkerPoolTask(workerPool *workerpool.WorkerPool, processRequest func() error, logger log.Logger, loggerArgs ...any) { 10 | workerPool.Submit(func() { 11 | if err := processRequest(); err != nil { 12 | logger.LogTrace("failed to answer request", append(loggerArgs, "err", err)...) 13 | } else { 14 | logger.LogTrace("answered request", loggerArgs...) 15 | } 16 | }) 17 | } 18 | -------------------------------------------------------------------------------- /pkg/protocol/versioning.go: -------------------------------------------------------------------------------- 1 | package protocol 2 | 3 | const ( 4 | // DatabaseVersion defines the current version of the database. 5 | DatabaseVersion byte = 1 6 | ) 7 | -------------------------------------------------------------------------------- /pkg/requesthandler/requesthandler.go: -------------------------------------------------------------------------------- 1 | package requesthandler 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/runtime/options" 5 | "github.com/iotaledger/hive.go/runtime/workerpool" 6 | "github.com/iotaledger/iota-core/pkg/protocol" 7 | "github.com/iotaledger/iota-core/pkg/requesthandler/cache" 8 | ) 9 | 10 | // RequestHandler contains the logic to handle api requests. 11 | type RequestHandler struct { 12 | workerPool *workerpool.WorkerPool 13 | 14 | cache *cache.Cache 15 | protocol *protocol.Protocol 16 | 17 | optsCacheMaxSize int 18 | } 19 | 20 | func New(p *protocol.Protocol, opts ...options.Option[RequestHandler]) *RequestHandler { 21 | return options.Apply(&RequestHandler{ 22 | workerPool: p.Workers.CreatePool("RequestHandler"), 23 | protocol: p, 24 | optsCacheMaxSize: 50 << 20, // 50MB 25 | }, opts, func(r *RequestHandler) { 26 | r.cache = cache.NewCache(r.optsCacheMaxSize) 27 | }) 28 | } 29 | 30 | // Shutdown shuts down the block issuer. 31 | func (r *RequestHandler) Shutdown() { 32 | r.workerPool.Shutdown() 33 | r.workerPool.ShutdownComplete.Wait() 34 | } 35 | 36 | func WithCacheMaxSizeOptions(size int) options.Option[RequestHandler] { 37 | return func(r *RequestHandler) { 38 | r.optsCacheMaxSize = size 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /pkg/restapi/utils.go: -------------------------------------------------------------------------------- 1 | package restapi 2 | 3 | import ( 4 | "regexp" 5 | "strings" 6 | 7 | "github.com/iotaledger/hive.go/ierrors" 8 | ) 9 | 10 | func CompileRouteAsRegex(route string) *regexp.Regexp { 11 | r := route 12 | 13 | // interpret the string as raw regex if it starts with "^" 14 | if !strings.HasPrefix(route, "^") { 15 | r = regexp.QuoteMeta(route) 16 | r = strings.ReplaceAll(r, `\*`, "(.*?)") 17 | r += "$" 18 | } 19 | 20 | reg, err := regexp.Compile(r) 21 | if err != nil { 22 | return nil 23 | } 24 | 25 | return reg 26 | } 27 | 28 | func CompileRoutesAsRegexes(routes []string) ([]*regexp.Regexp, error) { 29 | regexes := make([]*regexp.Regexp, len(routes)) 30 | for i, route := range routes { 31 | reg := CompileRouteAsRegex(route) 32 | if reg == nil { 33 | return nil, ierrors.Errorf("invalid route in config: %s", route) 34 | } 35 | regexes[i] = reg 36 | } 37 | 38 | return regexes, nil 39 | } 40 | -------------------------------------------------------------------------------- /pkg/retainer/blockretainer/block_retainer_cache.go: -------------------------------------------------------------------------------- 1 | package blockretainer 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/ds/shrinkingmap" 5 | iotago "github.com/iotaledger/iota.go/v4" 6 | "github.com/iotaledger/iota.go/v4/api" 7 | ) 8 | 9 | type cache struct { 10 | uncommittedBlockMetadata *shrinkingmap.ShrinkingMap[iotago.SlotIndex, map[iotago.BlockID]api.BlockState] 11 | } 12 | 13 | func newCache() *cache { 14 | return &cache{ 15 | uncommittedBlockMetadata: shrinkingmap.New[iotago.SlotIndex, map[iotago.BlockID]api.BlockState](), 16 | } 17 | } 18 | 19 | // blockMetadataByID returns the block metadata of a block by its ID. 20 | func (c *cache) blockMetadataByID(blockID iotago.BlockID) (api.BlockState, bool) { 21 | slotMap, exists := c.uncommittedBlockMetadata.Get(blockID.Slot()) 22 | if exists { 23 | blockMetadata, found := slotMap[blockID] 24 | if found { 25 | return blockMetadata, true 26 | } 27 | } 28 | 29 | return api.BlockStateUnknown, false 30 | } 31 | 32 | func (c *cache) setBlockMetadata(blockID iotago.BlockID, state api.BlockState) { 33 | blocks, _ := c.uncommittedBlockMetadata.GetOrCreate(blockID.Slot(), func() map[iotago.BlockID]api.BlockState { 34 | return make(map[iotago.BlockID]api.BlockState) 35 | }) 36 | 37 | prevState, ok := blocks[blockID] 38 | if ok && state == api.BlockStateDropped { 39 | if prevState == api.BlockStateAccepted || state == api.BlockStateConfirmed { 40 | // do not overwrite acceptance with the local congestion dropped event 41 | return 42 | } 43 | } 44 | 45 | blocks[blockID] = state 46 | } 47 | -------------------------------------------------------------------------------- /pkg/retainer/retainer.go: -------------------------------------------------------------------------------- 1 | package retainer 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/runtime/module" 5 | iotago "github.com/iotaledger/iota.go/v4" 6 | "github.com/iotaledger/iota.go/v4/api" 7 | ) 8 | 9 | // BlockRetainer keeps and resolves all the block related information needed in the API and INX. 10 | type BlockRetainer interface { 11 | BlockMetadata(blockID iotago.BlockID) (*api.BlockMetadataResponse, error) 12 | 13 | // Reset resets the component to a clean state as if it was created at the last commitment. 14 | Reset() 15 | 16 | // Interface embeds the required methods of the module.Module. 17 | module.Module 18 | } 19 | 20 | // TransactionRetainer keeps and resolves all the transaction-related metadata needed in the API and INX. 21 | type TransactionRetainer interface { 22 | // TransactionMetadata returns the metadata of a transaction. 23 | TransactionMetadata(txID iotago.TransactionID) (*api.TransactionMetadataResponse, error) 24 | 25 | // Reset resets the component to a clean state as if it was created at the last commitment. 26 | Reset(targetSlot iotago.SlotIndex) 27 | 28 | module.Module 29 | } 30 | -------------------------------------------------------------------------------- /pkg/storage/database/config.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/db" 5 | ) 6 | 7 | type Config struct { 8 | Engine db.Engine 9 | Directory string 10 | Version byte 11 | PrefixHealth []byte 12 | } 13 | 14 | func (c Config) WithDirectory(directory string) Config { 15 | c.Directory = directory 16 | return c 17 | } 18 | -------------------------------------------------------------------------------- /pkg/storage/database/errors.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import "github.com/iotaledger/hive.go/ierrors" 4 | 5 | var ( 6 | ErrEpochPruned = ierrors.New("epoch pruned") 7 | ErrNoPruningNeeded = ierrors.New("no pruning needed") 8 | ErrDatabaseFull = ierrors.New("database full") 9 | ErrDatabaseShutdown = ierrors.New("cannot open DBInstance that is shutdown") 10 | ErrDatabaseNotClosed = ierrors.New("cannot open DBInstance that is not closed") 11 | ) 12 | -------------------------------------------------------------------------------- /pkg/storage/database/rocksdb.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "runtime" 5 | 6 | "github.com/iotaledger/hive.go/kvstore/rocksdb" 7 | ) 8 | 9 | // NewRocksDB creates a new RocksDB instance. 10 | func NewRocksDB(path string) (*rocksdb.RocksDB, error) { 11 | opts := []rocksdb.Option{ 12 | rocksdb.IncreaseParallelism(runtime.NumCPU() - 1), 13 | rocksdb.Custom([]string{ 14 | "periodic_compaction_seconds=43200", 15 | "level_compaction_dynamic_level_bytes=true", 16 | "keep_log_file_num=2", 17 | "max_log_file_size=50000000", // 50MB per log file 18 | }), 19 | } 20 | 21 | return rocksdb.CreateDB(path, opts...) 22 | } 23 | -------------------------------------------------------------------------------- /pkg/storage/options.go: -------------------------------------------------------------------------------- 1 | package storage 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/iotaledger/hive.go/db" 7 | "github.com/iotaledger/hive.go/runtime/options" 8 | "github.com/iotaledger/iota-core/pkg/storage/permanent" 9 | "github.com/iotaledger/iota-core/pkg/storage/prunable" 10 | iotago "github.com/iotaledger/iota.go/v4" 11 | ) 12 | 13 | func WithDBEngine(optsDBEngine db.Engine) options.Option[Storage] { 14 | return func(s *Storage) { 15 | s.optsDBEngine = optsDBEngine 16 | } 17 | } 18 | 19 | func WithAllowedDBEngines(optsAllowedDBEngines []db.Engine) options.Option[Storage] { 20 | return func(s *Storage) { 21 | s.optsAllowedDBEngines = optsAllowedDBEngines 22 | } 23 | } 24 | 25 | func WithBucketManagerOptions(opts ...options.Option[prunable.BucketManager]) options.Option[Storage] { 26 | return func(s *Storage) { 27 | s.optsBucketManagerOptions = append(s.optsBucketManagerOptions, opts...) 28 | } 29 | } 30 | 31 | func WithPruningDelay(optsPruningDelay iotago.EpochIndex) options.Option[Storage] { 32 | return func(s *Storage) { 33 | s.optsPruningDelay = optsPruningDelay 34 | } 35 | } 36 | 37 | func WithPruningSizeEnable(pruningSizeEnabled bool) options.Option[Storage] { 38 | return func(p *Storage) { 39 | p.optPruningSizeEnabled = pruningSizeEnabled 40 | } 41 | } 42 | 43 | func WithPruningSizeMaxTargetSizeBytes(pruningSizeTargetSizeBytes int64) options.Option[Storage] { 44 | return func(p *Storage) { 45 | p.optsPruningSizeMaxTargetSizeBytes = pruningSizeTargetSizeBytes 46 | } 47 | } 48 | 49 | func WithPruningSizeReductionPercentage(pruningSizeReductionPercentage float64) options.Option[Storage] { 50 | return func(p *Storage) { 51 | p.optsPruningSizeReductionPercentage = pruningSizeReductionPercentage 52 | } 53 | } 54 | 55 | func WithPruningSizeCooldownTime(cooldown time.Duration) options.Option[Storage] { 56 | return func(p *Storage) { 57 | p.optsPruningSizeCooldownTime = cooldown 58 | } 59 | } 60 | 61 | func WithPermanentOptions(opts ...options.Option[permanent.Permanent]) options.Option[Storage] { 62 | return func(s *Storage) { 63 | s.optsPermanent = append(s.optsPermanent, opts...) 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /pkg/storage/permanent/options.go: -------------------------------------------------------------------------------- 1 | package permanent 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/runtime/options" 5 | iotago "github.com/iotaledger/iota.go/v4" 6 | ) 7 | 8 | func WithEpochBasedProviderOptions(opts ...options.Option[iotago.EpochBasedProvider]) options.Option[Permanent] { 9 | return func(p *Permanent) { 10 | p.optsEpochBasedProvider = append(p.optsEpochBasedProvider, opts...) 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /pkg/storage/prunable/epochstore/constants.go: -------------------------------------------------------------------------------- 1 | package epochstore 2 | 3 | const ( 4 | entriesKey byte = iota 5 | lastAccessedEpochKey 6 | lastPrunedEpochKey 7 | ) 8 | -------------------------------------------------------------------------------- /pkg/storage/prunable/epochstore/store.go: -------------------------------------------------------------------------------- 1 | package epochstore 2 | 3 | import ( 4 | iotago "github.com/iotaledger/iota.go/v4" 5 | ) 6 | 7 | type Store[V any] interface { 8 | RestoreLastPrunedEpoch() error 9 | LastAccessedEpoch() (iotago.EpochIndex, error) 10 | LastPrunedEpoch() (iotago.EpochIndex, bool) 11 | Load(epoch iotago.EpochIndex) (V, error) 12 | Store(epoch iotago.EpochIndex, value V) error 13 | Stream(consumer func(epoch iotago.EpochIndex, value V) error) error 14 | StreamBytes(consumer func([]byte, []byte) error) error 15 | DeleteEpoch(epoch iotago.EpochIndex) error 16 | Prune(epoch iotago.EpochIndex, defaultPruningDelay iotago.EpochIndex) ([]iotago.EpochIndex, error) 17 | RollbackEpochs(epoch iotago.EpochIndex) (iotago.EpochIndex, []iotago.EpochIndex, error) 18 | } 19 | -------------------------------------------------------------------------------- /pkg/storage/prunable/options.go: -------------------------------------------------------------------------------- 1 | package prunable 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/runtime/options" 5 | ) 6 | 7 | // WithMaxOpenDBs sets the maximum concurrently open DBs. 8 | func WithMaxOpenDBs(optsMaxOpenDBs int) options.Option[BucketManager] { 9 | return func(m *BucketManager) { 10 | m.optsMaxOpenDBs = optsMaxOpenDBs 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /pkg/storage/prunable/prunable_epoch.go: -------------------------------------------------------------------------------- 1 | package prunable 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/kvstore" 5 | "github.com/iotaledger/iota-core/pkg/core/account" 6 | "github.com/iotaledger/iota-core/pkg/model" 7 | "github.com/iotaledger/iota-core/pkg/storage/prunable/epochstore" 8 | iotago "github.com/iotaledger/iota.go/v4" 9 | ) 10 | 11 | const ( 12 | epochPrefixDecidedUpgradeSignals byte = iota 13 | epochPrefixPoolRewards 14 | epochPrefixPoolStats 15 | epochPrefixCommittee 16 | ) 17 | 18 | const ( 19 | pruningDelayDecidedUpgradeSignals = 7 20 | ) 21 | 22 | func (p *Prunable) RewardsForEpoch(epoch iotago.EpochIndex) (kvstore.KVStore, error) { 23 | return p.poolRewards.GetEpoch(epoch) 24 | } 25 | 26 | func (p *Prunable) Rewards() *epochstore.EpochKVStore { 27 | return p.poolRewards 28 | } 29 | 30 | func (p *Prunable) PoolStats() *epochstore.BaseStore[*model.PoolsStats] { 31 | return p.poolStats 32 | } 33 | 34 | func (p *Prunable) DecidedUpgradeSignals() *epochstore.BaseStore[model.VersionAndHash] { 35 | return p.decidedUpgradeSignals 36 | } 37 | 38 | func (p *Prunable) Committee() *epochstore.CachedStore[*account.SeatedAccounts] { 39 | return p.committee 40 | } 41 | -------------------------------------------------------------------------------- /pkg/storage/prunable/utils.go: -------------------------------------------------------------------------------- 1 | package prunable 2 | 3 | import ( 4 | "os" 5 | "path/filepath" 6 | "sort" 7 | "strconv" 8 | 9 | "github.com/iotaledger/hive.go/lo" 10 | "github.com/iotaledger/hive.go/runtime/ioutils" 11 | iotago "github.com/iotaledger/iota.go/v4" 12 | ) 13 | 14 | func dbPathFromIndex(base string, epoch iotago.EpochIndex) string { 15 | return filepath.Join(base, strconv.FormatInt(int64(epoch), 10)) 16 | } 17 | 18 | type dbInstanceFileInfo struct { 19 | baseEpoch iotago.EpochIndex 20 | path string 21 | } 22 | 23 | // getSortedDBInstancesFromDisk returns an ASC sorted list of db instances from the given base directory. 24 | func getSortedDBInstancesFromDisk(baseDir string) (dbInfos []*dbInstanceFileInfo) { 25 | files, err := os.ReadDir(baseDir) 26 | if err != nil { 27 | panic(err) 28 | } 29 | 30 | files = lo.Filter(files, func(e os.DirEntry) bool { return e.IsDir() }) 31 | dbInfos = lo.Map(files, func(e os.DirEntry) *dbInstanceFileInfo { 32 | atoi, convErr := strconv.Atoi(e.Name()) 33 | if convErr != nil { 34 | return nil 35 | } 36 | 37 | return &dbInstanceFileInfo{ 38 | baseEpoch: iotago.EpochIndex(atoi), 39 | path: filepath.Join(baseDir, e.Name()), 40 | } 41 | }) 42 | dbInfos = lo.Filter(dbInfos, func(info *dbInstanceFileInfo) bool { return info != nil }) 43 | 44 | sort.Slice(dbInfos, func(i int, j int) bool { 45 | return dbInfos[i].baseEpoch < dbInfos[j].baseEpoch 46 | }) 47 | 48 | return dbInfos 49 | } 50 | 51 | func dbPrunableDirectorySize(base string, epoch iotago.EpochIndex) (int64, error) { 52 | return ioutils.FolderSize(dbPathFromIndex(base, epoch)) 53 | } 54 | -------------------------------------------------------------------------------- /pkg/storage/storage_clonable_sql.go: -------------------------------------------------------------------------------- 1 | package storage 2 | 3 | import ( 4 | "gorm.io/gorm" 5 | ) 6 | 7 | type ( 8 | SQLDatabaseExecFunc func(func(*gorm.DB) error) error 9 | ) 10 | 11 | func (s *Storage) TransactionRetainerDatabaseExecFunc() SQLDatabaseExecFunc { 12 | return s.txRetainerSQL.ExecDBFunc() 13 | } 14 | -------------------------------------------------------------------------------- /pkg/storage/storage_permanent.go: -------------------------------------------------------------------------------- 1 | package storage 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/kvstore" 5 | "github.com/iotaledger/iota-core/pkg/protocol/engine/utxoledger" 6 | "github.com/iotaledger/iota-core/pkg/storage/permanent" 7 | ) 8 | 9 | func (s *Storage) Settings() *permanent.Settings { 10 | return s.permanent.Settings() 11 | } 12 | 13 | func (s *Storage) Commitments() *permanent.Commitments { 14 | return s.permanent.Commitments() 15 | } 16 | 17 | // Accounts returns the Accounts storage (or a specialized sub-storage if a realm is provided). 18 | func (s *Storage) Accounts(optRealm ...byte) kvstore.KVStore { 19 | return s.permanent.Accounts(optRealm...) 20 | } 21 | 22 | // Ledger returns the ledger storage (or a specialized sub-storage if a realm is provided). 23 | func (s *Storage) Ledger() *utxoledger.Manager { 24 | return s.permanent.UTXOLedger() 25 | } 26 | -------------------------------------------------------------------------------- /pkg/storage/utils/directory.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "os" 5 | "path/filepath" 6 | 7 | "github.com/iotaledger/hive.go/runtime/ioutils" 8 | ) 9 | 10 | const ( 11 | defaultPermissions = 0o755 12 | ) 13 | 14 | // Directory represents a directory on the disk. 15 | type Directory struct { 16 | path string 17 | } 18 | 19 | // NewDirectory creates a new directory at the given path. 20 | func NewDirectory(path string, createIfMissing ...bool) (newDirectory *Directory) { 21 | if len(createIfMissing) > 0 && createIfMissing[0] { 22 | if err := ioutils.CreateDirectory(path, defaultPermissions); err != nil { 23 | panic(err) 24 | } 25 | } 26 | 27 | return &Directory{ 28 | path: path, 29 | } 30 | } 31 | 32 | // Path returns the absolute path that corresponds to the relative path. 33 | func (d *Directory) Path(relativePathElements ...string) (path string) { 34 | return filepath.Join(append([]string{d.path}, relativePathElements...)...) 35 | } 36 | 37 | // PathWithCreate returns the absolute path that corresponds to the relative path and creates the folder if it doesn't exist. 38 | func (d *Directory) PathWithCreate(relativePathElements ...string) string { 39 | path := filepath.Join(append([]string{d.path}, relativePathElements...)...) 40 | 41 | if err := ioutils.CreateDirectory(path, defaultPermissions); err != nil { 42 | panic(err) 43 | } 44 | 45 | return path 46 | } 47 | 48 | func (d *Directory) RemoveSubdir(name string) error { 49 | return os.RemoveAll(d.Path(name)) 50 | } 51 | 52 | func (d *Directory) SubDirs() ([]string, error) { 53 | entries, err := os.ReadDir(d.path) 54 | if err != nil { 55 | return nil, err 56 | } 57 | 58 | var dirs []string 59 | for _, entry := range entries { 60 | if entry.IsDir() { 61 | dirs = append(dirs, entry.Name()) 62 | } 63 | } 64 | 65 | return dirs, err 66 | } 67 | -------------------------------------------------------------------------------- /pkg/tests/big_committee_test.go: -------------------------------------------------------------------------------- 1 | package tests 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | "github.com/iotaledger/iota-core/pkg/testsuite" 8 | iotago "github.com/iotaledger/iota.go/v4" 9 | ) 10 | 11 | func TestBigCommittee(t *testing.T) { 12 | t.Skip("only for benchmarking performance") 13 | 14 | var ( 15 | genesisSlot iotago.SlotIndex = 0 16 | minCommittableAge iotago.SlotIndex = 2 17 | maxCommittableAge iotago.SlotIndex = 4 18 | ) 19 | 20 | ts := testsuite.NewTestSuite(t, 21 | testsuite.WithProtocolParametersOptions( 22 | iotago.WithTimeProviderOptions( 23 | genesisSlot, 24 | testsuite.GenesisTimeWithOffsetBySlots(1000, testsuite.DefaultSlotDurationInSeconds), 25 | testsuite.DefaultSlotDurationInSeconds, 26 | 3, 27 | ), 28 | iotago.WithLivenessOptions( 29 | 10, 30 | 10, 31 | minCommittableAge, 32 | maxCommittableAge, 33 | 5, 34 | ), 35 | ), 36 | ) 37 | defer ts.Shutdown() 38 | 39 | for i := 0; i < 32; i++ { 40 | nodeName := fmt.Sprintf("node%d", i) 41 | ts.AddValidatorNode(nodeName) 42 | } 43 | 44 | for i := 32; i < 50; i++ { 45 | nodeName := fmt.Sprintf("node%d", i) 46 | ts.AddNode(nodeName) 47 | } 48 | 49 | ts.Run(true) 50 | fmt.Println("TestBigCommittee starting to issue blocks...") 51 | 52 | ts.IssueBlocksAtSlots("", []iotago.SlotIndex{1, 2, 3, 4, 5}, 4, "Genesis", ts.Nodes(), true, false) 53 | 54 | ts.AssertNodeState(ts.Nodes(), 55 | testsuite.WithLatestCommitmentSlotIndex(3), 56 | testsuite.WithEqualStoredCommitmentAtIndex(3), 57 | ) 58 | } 59 | -------------------------------------------------------------------------------- /pkg/testsuite/blocks_retainer.go: -------------------------------------------------------------------------------- 1 | package testsuite 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/iotaledger/hive.go/ierrors" 7 | "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" 8 | "github.com/iotaledger/iota-core/pkg/testsuite/mock" 9 | "github.com/iotaledger/iota.go/v4/api" 10 | ) 11 | 12 | func (t *TestSuite) AssertRetainerBlocksState(expectedBlocks []*blocks.Block, expectedState api.BlockState, nodes ...*mock.Node) { 13 | mustNodes(nodes) 14 | 15 | for _, node := range nodes { 16 | for _, block := range expectedBlocks { 17 | t.Eventually(func() error { 18 | blockFromRetainer, err := node.Client.BlockMetadataByBlockID(context.Background(), block.ID()) 19 | if err != nil { 20 | return ierrors.Errorf("AssertRetainerBlocksState: %s: block %s: error when loading %s", node.Name, block.ID(), err.Error()) 21 | } 22 | 23 | if expectedState != blockFromRetainer.BlockState { 24 | return ierrors.Errorf("AssertRetainerBlocksState: %s: block %s: expected %s, got %s", node.Name, block.ID(), expectedState, blockFromRetainer.BlockState) 25 | } 26 | 27 | return nil 28 | }) 29 | 30 | t.AssertBlock(block, node.Client) 31 | } 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /pkg/testsuite/eviction.go: -------------------------------------------------------------------------------- 1 | package testsuite 2 | 3 | import ( 4 | "github.com/stretchr/testify/assert" 5 | 6 | "github.com/iotaledger/hive.go/ierrors" 7 | "github.com/iotaledger/hive.go/lo" 8 | "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" 9 | "github.com/iotaledger/iota-core/pkg/testsuite/mock" 10 | iotago "github.com/iotaledger/iota.go/v4" 11 | ) 12 | 13 | func (t *TestSuite) AssertActiveRootBlocks(expectedBlocks []*blocks.Block, nodes ...*mock.Node) { 14 | mustNodes(nodes) 15 | 16 | expectedRootBlocks := make(map[iotago.BlockID]iotago.CommitmentID) 17 | for _, expectedBlock := range expectedBlocks { 18 | expectedRootBlocks[expectedBlock.ID()] = expectedBlock.SlotCommitmentID() 19 | } 20 | 21 | for _, node := range nodes { 22 | t.Eventually(func() error { 23 | activeRootBlocks := node.Protocol.Engines.Main.Get().EvictionState.AllActiveRootBlocks() 24 | 25 | if !assert.Equal(t.fakeTesting, expectedRootBlocks, activeRootBlocks) { 26 | return ierrors.Errorf("AssertActiveRootBlocks: %s: expected %v, got %v", node.Name, expectedRootBlocks, activeRootBlocks) 27 | } 28 | 29 | return nil 30 | }) 31 | } 32 | } 33 | 34 | func (t *TestSuite) AssertEvictedSlot(expectedIndex iotago.SlotIndex, nodes ...*mock.Node) { 35 | mustNodes(nodes) 36 | 37 | for _, node := range nodes { 38 | t.Eventually(func() error { 39 | if expectedIndex != lo.Return1(node.Protocol.Engines.Main.Get().EvictionState.LastEvictedSlot()) { 40 | return ierrors.Errorf("AssertEvictedSlot: %s: expected %d, got %d", node.Name, expectedIndex, lo.Return1(node.Protocol.Engines.Main.Get().EvictionState.LastEvictedSlot())) 41 | } 42 | 43 | return nil 44 | }) 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /pkg/testsuite/fork.go: -------------------------------------------------------------------------------- 1 | package testsuite 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/ierrors" 5 | "github.com/iotaledger/iota-core/pkg/testsuite/mock" 6 | ) 7 | 8 | func (t *TestSuite) AssertForkDetectedCount(expectedCount int, nodes ...*mock.Node) { 9 | mustNodes(nodes) 10 | 11 | for _, node := range nodes { 12 | t.Eventually(func() error { 13 | actualCount := node.ForkDetectedCount() 14 | if expectedCount != actualCount { 15 | return ierrors.Errorf("AssertForkDetectedCount: %s: expected %v, got %v", node.Name, expectedCount, actualCount) 16 | } 17 | 18 | return nil 19 | }) 20 | } 21 | } 22 | 23 | func (t *TestSuite) AssertCandidateEngineActivatedCount(expectedCount int, nodes ...*mock.Node) { 24 | mustNodes(nodes) 25 | 26 | for _, node := range nodes { 27 | t.Eventually(func() error { 28 | actualCount := node.CandidateEngineActivatedCount() 29 | if expectedCount != actualCount { 30 | return ierrors.Errorf("AssertCandidateEngineActivatedCount: %s: expected %v, got %v", node.Name, expectedCount, actualCount) 31 | } 32 | 33 | return nil 34 | }) 35 | } 36 | } 37 | 38 | func (t *TestSuite) AssertMainEngineSwitchedCount(expectedCount int, nodes ...*mock.Node) { 39 | mustNodes(nodes) 40 | 41 | for _, node := range nodes { 42 | t.Eventually(func() error { 43 | actualCount := node.MainEngineSwitchedCount() 44 | if expectedCount != actualCount { 45 | return ierrors.Errorf("AssertMainEngineSwitchedCount: %s: expected %v, got %v", node.Name, expectedCount, actualCount) 46 | } 47 | 48 | return nil 49 | }) 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /pkg/testsuite/mock/wallet_blocks.go: -------------------------------------------------------------------------------- 1 | package mock 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/iotaledger/hive.go/runtime/options" 7 | "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" 8 | ) 9 | 10 | func (w *Wallet) CreateBasicBlock(ctx context.Context, blockName string, opts ...options.Option[BasicBlockParams]) (*blocks.Block, error) { 11 | return w.BlockIssuer.CreateBasicBlock(ctx, blockName, opts...) 12 | } 13 | 14 | func (w *Wallet) CreateAndSubmitBasicBlock(ctx context.Context, blockName string, opts ...options.Option[BasicBlockParams]) (*blocks.Block, error) { 15 | return w.BlockIssuer.CreateAndSubmitBasicBlock(ctx, blockName, opts...) 16 | } 17 | 18 | func (w *Wallet) CreateValidationBlock(ctx context.Context, blockName string, node *Node, opts ...options.Option[ValidationBlockParams]) (*blocks.Block, error) { 19 | return w.BlockIssuer.CreateValidationBlock(ctx, blockName, node, opts...) 20 | } 21 | 22 | func (w *Wallet) CreateAndSubmitValidationBlock(ctx context.Context, blockName string, node *Node, opts ...options.Option[ValidationBlockParams]) (*blocks.Block, error) { 23 | return w.BlockIssuer.CreateAndSubmitValidationBlock(ctx, blockName, node, opts...) 24 | } 25 | -------------------------------------------------------------------------------- /pkg/testsuite/spenders.go: -------------------------------------------------------------------------------- 1 | package testsuite 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/ds" 5 | "github.com/iotaledger/hive.go/ierrors" 6 | "github.com/iotaledger/iota-core/pkg/core/acceptance" 7 | "github.com/iotaledger/iota-core/pkg/testsuite/mock" 8 | ) 9 | 10 | func (t *TestSuite) AssertSpendersInCacheAcceptanceState(expectedConflictAliases []string, expectedState acceptance.State, nodes ...*mock.Node) { 11 | mustNodes(nodes) 12 | 13 | for _, node := range nodes { 14 | for _, conflictAlias := range expectedConflictAliases { 15 | t.Eventually(func() error { 16 | acceptanceState := node.Protocol.Engines.Main.Get().Ledger.SpendDAG().AcceptanceState(ds.NewSet(t.DefaultWallet().TransactionID(conflictAlias))) 17 | 18 | if acceptanceState != expectedState { 19 | return ierrors.Errorf("assertTransactionsInCacheWithFunc: %s: conflict %s is %s, but expected %s", node.Name, conflictAlias, acceptanceState, expectedState) 20 | } 21 | 22 | return nil 23 | }) 24 | } 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /pkg/testsuite/storage_accountdiffs.go: -------------------------------------------------------------------------------- 1 | package testsuite 2 | 3 | import ( 4 | "github.com/stretchr/testify/assert" 5 | 6 | "github.com/iotaledger/hive.go/ierrors" 7 | "github.com/iotaledger/iota-core/pkg/model" 8 | "github.com/iotaledger/iota-core/pkg/testsuite/mock" 9 | iotago "github.com/iotaledger/iota.go/v4" 10 | ) 11 | 12 | func (t *TestSuite) AssertStorageAccountDiffs(slot iotago.SlotIndex, accountDiffs map[iotago.AccountID]*model.AccountDiff, nodes ...*mock.Node) { 13 | mustNodes(nodes) 14 | 15 | for _, node := range nodes { 16 | for accountID, diffChange := range accountDiffs { 17 | t.Eventually(func() error { 18 | store, err := node.Protocol.Engines.Main.Get().Storage.AccountDiffs(slot) 19 | if err != nil { 20 | return ierrors.Wrapf(err, "AssertStorageAccountDiffs: %s: failed to load accounts diff for slot %d", node.Name, slot) 21 | } 22 | 23 | storedDiffChange, _, err := store.Load(accountID) 24 | if err != nil { 25 | return ierrors.Wrapf(err, "AssertStorageAccountDiffs: %s: error loading account diff: %s", node.Name, accountID) 26 | } 27 | 28 | if !assert.Equal(t.fakeTesting, diffChange, storedDiffChange) { 29 | return ierrors.Errorf("AssertStorageAccountDiffs: %s: expected %v, got %v", node.Name, diffChange, storedDiffChange) 30 | } 31 | 32 | return nil 33 | }) 34 | } 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /pkg/testsuite/storage_blocks.go: -------------------------------------------------------------------------------- 1 | package testsuite 2 | 3 | import ( 4 | "github.com/stretchr/testify/assert" 5 | 6 | "github.com/iotaledger/hive.go/ierrors" 7 | "github.com/iotaledger/iota-core/pkg/model" 8 | "github.com/iotaledger/iota-core/pkg/testsuite/mock" 9 | ) 10 | 11 | func (t *TestSuite) AssertStorageBlock(block *model.Block, node *mock.Node) { 12 | t.Eventually(func() error { 13 | storage, err := node.Protocol.Engines.Main.Get().Storage.Blocks(block.ID().Slot()) 14 | if err != nil { 15 | return ierrors.Errorf("AssertStorageBlock: %s: storage for %s is nil", node.Name, block.ID().Slot()) 16 | } 17 | 18 | loadedBlock, err := storage.Load(block.ID()) 19 | if err != nil { 20 | return ierrors.Wrapf(err, "AssertStorageBlock: %s: error loading block %s", node.Name, block.ID()) 21 | } 22 | 23 | if block.ID() != loadedBlock.ID() { 24 | return ierrors.Errorf("AssertStorageBlock: %s: expected %s, got %s", node.Name, block.ID(), loadedBlock.ID()) 25 | } 26 | 27 | if assert.Equal(t.fakeTesting, block.Data(), loadedBlock.Data()) { 28 | return ierrors.Errorf("AssertStorageBlock: %s: expected %s, got %s", node.Name, block.Data(), loadedBlock.Data()) 29 | } 30 | 31 | return nil 32 | }) 33 | } 34 | 35 | func (t *TestSuite) AssertStorageBlockExist(block *model.Block, expectedExist bool, node *mock.Node) { 36 | if expectedExist { 37 | t.AssertStorageBlock(block, node) 38 | } else { 39 | t.Eventually(func() error { 40 | storage, err := node.Protocol.Engines.Main.Get().Storage.Blocks(block.ID().Slot()) 41 | if err != nil { 42 | //nolint:nilerr // expected behavior 43 | return nil 44 | } 45 | 46 | loadedBlock, _ := storage.Load(block.ID()) 47 | if loadedBlock != nil { 48 | return ierrors.Errorf("AssertStorageBlockExist: %s: expected block %s to not exist", node.Name, block) 49 | } 50 | 51 | return nil 52 | }) 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /pkg/testsuite/storage_rootblocks.go: -------------------------------------------------------------------------------- 1 | package testsuite 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/ierrors" 5 | "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" 6 | "github.com/iotaledger/iota-core/pkg/testsuite/mock" 7 | ) 8 | 9 | func (t *TestSuite) AssertStorageRootBlocks(blocks []*blocks.Block, nodes ...*mock.Node) { 10 | mustNodes(nodes) 11 | 12 | for _, node := range nodes { 13 | for _, block := range blocks { 14 | t.Eventually(func() error { 15 | storage, err := node.Protocol.Engines.Main.Get().Storage.RootBlocks(block.ID().Slot()) 16 | if err != nil { 17 | return ierrors.Errorf("AssertStorageRootBlocks: %s: error loading root blocks for %s: %v", node.Name, block.ID().Slot(), err) 18 | } 19 | 20 | loadedCommitmentID, exists, err := storage.Load(block.ID()) 21 | if err != nil { 22 | return ierrors.Wrapf(err, "AssertStorageRootBlocks: %s: failed to load root block %s", node.Name, block.ID()) 23 | } 24 | 25 | if !exists { 26 | return ierrors.Errorf("AssertStorageRootBlocks: %s: root block %s does not exist", node.Name, block.ID()) 27 | } 28 | 29 | if block.SlotCommitmentID() != loadedCommitmentID { 30 | return ierrors.Errorf("AssertStorageRootBlocks: %s: expected slot commitment %s, got %s for block %s", node.Name, block.SlotCommitmentID(), loadedCommitmentID, block.ID()) 31 | } 32 | 33 | return nil 34 | }) 35 | } 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /pkg/testsuite/tips.go: -------------------------------------------------------------------------------- 1 | package testsuite 2 | 3 | import ( 4 | "github.com/stretchr/testify/assert" 5 | 6 | "github.com/iotaledger/hive.go/ierrors" 7 | "github.com/iotaledger/hive.go/lo" 8 | "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" 9 | "github.com/iotaledger/iota-core/pkg/protocol/engine/tipmanager" 10 | "github.com/iotaledger/iota-core/pkg/testsuite/mock" 11 | ) 12 | 13 | func (t *TestSuite) AssertStrongTips(expectedBlocks []*blocks.Block, nodes ...*mock.Node) { 14 | mustNodes(nodes) 15 | 16 | expectedBlockIDs := lo.Map(expectedBlocks, (*blocks.Block).ID) 17 | 18 | for _, node := range nodes { 19 | t.Eventually(func() error { 20 | storedTipsBlocks := node.Protocol.Engines.Main.Get().TipManager.StrongTips() 21 | storedTipsBlockIDs := lo.Map(storedTipsBlocks, tipmanager.TipMetadata.ID) 22 | 23 | if !assert.ElementsMatch(t.fakeTesting, expectedBlockIDs, storedTipsBlockIDs) { 24 | return ierrors.Errorf("AssertTips: %s: expected %s, got %s", node.Name, expectedBlockIDs, storedTipsBlockIDs) 25 | } 26 | 27 | return nil 28 | }) 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /pkg/toolset/benchmark_cpu.go: -------------------------------------------------------------------------------- 1 | package toolset 2 | 3 | import ( 4 | "context" 5 | "sync/atomic" 6 | 7 | "golang.org/x/crypto/blake2b" 8 | ) 9 | 10 | func cpuBenchmarkWorker(ctx context.Context, powDigest []byte, counter *uint64) { 11 | for { 12 | select { 13 | case <-ctx.Done(): 14 | return 15 | 16 | default: 17 | result := blake2b.Sum256(powDigest) 18 | powDigest = result[:] 19 | atomic.AddUint64(counter, 1) 20 | } 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /pkg/toolset/benchmark_io.go: -------------------------------------------------------------------------------- 1 | package toolset 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | 7 | "github.com/iotaledger/hive.go/kvstore" 8 | iotago_tpkg "github.com/iotaledger/iota.go/v4/tpkg" 9 | ) 10 | 11 | type benchmarkObject struct { 12 | store kvstore.KVStore 13 | writeDoneWaitGroup *sync.WaitGroup 14 | key []byte 15 | value []byte 16 | } 17 | 18 | func newBenchmarkObject(store kvstore.KVStore, writeDoneWaitGroup *sync.WaitGroup, key []byte, value []byte) *benchmarkObject { 19 | return &benchmarkObject{ 20 | store: store, 21 | writeDoneWaitGroup: writeDoneWaitGroup, 22 | key: key, 23 | value: value, 24 | } 25 | } 26 | 27 | func (bo *benchmarkObject) BatchWrite(batchedMuts kvstore.BatchedMutations) { 28 | if err := batchedMuts.Set(bo.key, bo.value); err != nil { 29 | panic(fmt.Errorf("write operation failed: %w", err)) 30 | } 31 | } 32 | 33 | func (bo *benchmarkObject) BatchWriteDone() { 34 | // do a read operation after the batchwrite is done, 35 | // so the write and read operations are equally distributed over the whole benchmark run. 36 | if _, err := bo.store.Has(iotago_tpkg.RandBytes(32)); err != nil { 37 | panic(fmt.Errorf("read operation failed: %w", err)) 38 | } 39 | 40 | bo.writeDoneWaitGroup.Done() 41 | } 42 | 43 | func (bo *benchmarkObject) BatchWriteScheduled() bool { 44 | return false 45 | } 46 | 47 | func (bo *benchmarkObject) ResetBatchWriteScheduled() { 48 | // do nothing 49 | } 50 | -------------------------------------------------------------------------------- /pkg/toolset/node_info.go: -------------------------------------------------------------------------------- 1 | package toolset 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os" 7 | 8 | flag "github.com/spf13/pflag" 9 | 10 | "github.com/iotaledger/hive.go/app/configuration" 11 | "github.com/iotaledger/iota.go/v4/nodeclient" 12 | ) 13 | 14 | func nodeInfo(args []string) error { 15 | fs := configuration.NewUnsortedFlagSet("", flag.ContinueOnError) 16 | nodeURLFlag := fs.String(FlagToolNodeURL, "http://localhost:14265", "URL of the node (optional)") 17 | outputJSONFlag := fs.Bool(FlagToolOutputJSON, false, FlagToolDescriptionOutputJSON) 18 | 19 | fs.Usage = func() { 20 | _, _ = fmt.Fprintf(os.Stderr, "Usage of %s:\n", ToolNodeInfo) 21 | fs.PrintDefaults() 22 | println(fmt.Sprintf("\nexample: %s --%s %s", 23 | ToolNodeInfo, 24 | FlagToolNodeURL, 25 | "http://192.168.1.221:14265", 26 | )) 27 | } 28 | 29 | if err := parseFlagSet(fs, args); err != nil { 30 | return err 31 | } 32 | 33 | client, err := nodeclient.New(*nodeURLFlag) 34 | if err != nil { 35 | return err 36 | } 37 | 38 | info, err := client.Info(context.Background()) 39 | if err != nil { 40 | return err 41 | } 42 | 43 | if *outputJSONFlag { 44 | return printJSON(info) 45 | } 46 | 47 | fmt.Printf("Name: %s\nVersion: %s\nLatestAcceptedBlockSlot: %d\nLatestConfirmedBlockSlot: %d\nIsHealthy: %s\n", info.Name, info.Version, info.Status.LatestAcceptedBlockSlot, info.Status.LatestConfirmedBlockSlot, yesOrNo(info.Status.IsHealthy)) 48 | 49 | return nil 50 | } 51 | -------------------------------------------------------------------------------- /pkg/toolset/p2p_identity_extract.go: -------------------------------------------------------------------------------- 1 | package toolset 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | flag "github.com/spf13/pflag" 8 | 9 | "github.com/iotaledger/hive.go/app/configuration" 10 | hivep2p "github.com/iotaledger/hive.go/crypto/p2p" 11 | "github.com/iotaledger/hive.go/crypto/pem" 12 | "github.com/iotaledger/hive.go/ierrors" 13 | ) 14 | 15 | func extractP2PIdentity(args []string) error { 16 | fs := configuration.NewUnsortedFlagSet("", flag.ContinueOnError) 17 | privKeyFilePath := fs.String(FlagToolIdentityPrivateKeyFilePath, DefaultValueIdentityPrivateKeyFilePath, "the file path to the identity private key file") 18 | outputJSONFlag := fs.Bool(FlagToolOutputJSON, false, FlagToolDescriptionOutputJSON) 19 | 20 | fs.Usage = func() { 21 | _, _ = fmt.Fprintf(os.Stderr, "Usage of %s:\n", ToolP2PExtractIdentity) 22 | fs.PrintDefaults() 23 | println(fmt.Sprintf("\nexample: %s --%s %s", 24 | ToolP2PExtractIdentity, 25 | FlagToolIdentityPrivateKeyFilePath, 26 | DefaultValueIdentityPrivateKeyFilePath)) 27 | } 28 | 29 | if err := parseFlagSet(fs, args); err != nil { 30 | return err 31 | } 32 | 33 | if len(*privKeyFilePath) == 0 { 34 | return ierrors.Errorf("'%s' not specified", FlagToolIdentityPrivateKeyFilePath) 35 | } 36 | 37 | _, err := os.Stat(*privKeyFilePath) 38 | switch { 39 | case os.IsNotExist(err): 40 | // private key does not exist 41 | return ierrors.Errorf("private key file (%s) does not exist", *privKeyFilePath) 42 | 43 | case err == nil || os.IsExist(err): 44 | // private key file exists 45 | 46 | default: 47 | return ierrors.Wrapf(err, "unable to check private key file (%s)", *privKeyFilePath) 48 | } 49 | 50 | privKey, err := pem.ReadEd25519PrivateKeyFromPEMFile(*privKeyFilePath) 51 | if err != nil { 52 | return ierrors.Wrap(err, "reading private key file for peer identity failed") 53 | } 54 | 55 | libp2pPrivKey, err := hivep2p.Ed25519PrivateKeyToLibp2pPrivateKey(privKey) 56 | if err != nil { 57 | return err 58 | } 59 | 60 | return printP2PIdentity(libp2pPrivKey, libp2pPrivKey.GetPublic(), *outputJSONFlag) 61 | } 62 | -------------------------------------------------------------------------------- /pkg/votes/constants.go: -------------------------------------------------------------------------------- 1 | package votes 2 | 3 | const SuperMajority float64 = 0.67 4 | -------------------------------------------------------------------------------- /pkg/votes/utils.go: -------------------------------------------------------------------------------- 1 | package votes 2 | 3 | func IsThresholdReached(objectWeight int, totalWeight int, threshold float64) bool { 4 | if totalWeight == 0 { 5 | return false 6 | } 7 | 8 | return objectWeight > int(float64(totalWeight)*threshold) 9 | } 10 | -------------------------------------------------------------------------------- /scripts/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Builds iota-core with the latest commit hash (short) 4 | # E.g.: ./iota-core -v --> iota-core 75316fe 5 | pushd ./.. 6 | 7 | commit_hash=$(git rev-parse --short HEAD) 8 | 9 | BUILD_TAGS=rocksdb 10 | BUILD_LD_FLAGS="-s -w -X=github.com/iotaledger/iota-core/components/app.Version=${commit_hash}" 11 | 12 | go build -tags ${BUILD_TAGS} -ldflags "${BUILD_LD_FLAGS}" 13 | 14 | popd 15 | -------------------------------------------------------------------------------- /scripts/gendoc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | pushd ../tools/gendoc 3 | 4 | # determine current iota-core version tag 5 | commit_hash=$(git rev-parse --short HEAD) 6 | 7 | BUILD_TAGS=rocksdb 8 | BUILD_LD_FLAGS="-s -w -X=github.com/iotaledger/iota-core/components/app.Version=${commit_hash}" 9 | 10 | go run -tags ${BUILD_TAGS} -ldflags "${BUILD_LD_FLAGS}" main.go 11 | 12 | popd 13 | -------------------------------------------------------------------------------- /scripts/get_hive.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | COMMIT=$1 4 | if [ -z "$COMMIT" ] 5 | then 6 | echo "ERROR: no commit hash given!" 7 | exit 1 8 | fi 9 | 10 | HIVE_MODULES=$(grep -E "^\sgithub.com/iotaledger/hive.go" "go.mod" | awk '{print $1}') 11 | for dependency in $HIVE_MODULES 12 | do 13 | echo "go get $dependency@$COMMIT..." 14 | go get "$dependency@$COMMIT" >/dev/null 15 | done 16 | 17 | # Run go mod tidy 18 | echo "Running go mod tidy..." 19 | pushd $(dirname $0) 20 | ./go_mod_tidy.sh 21 | popd 22 | -------------------------------------------------------------------------------- /scripts/go_mod_tidy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | pushd ./.. 3 | 4 | go mod tidy 5 | 6 | pushd tools/gendoc 7 | go mod tidy 8 | popd 9 | 10 | popd -------------------------------------------------------------------------------- /scripts/snapgen.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | pushd ../tools/genesis-snapshot 3 | 4 | # determine current iota-core version tag 5 | commit_hash=$(git rev-parse --short HEAD) 6 | 7 | BUILD_TAGS=rocksdb 8 | BUILD_LD_FLAGS="-s -w -X=github.com/iotaledger/iota-core/components/app.Version=${commit_hash}" 9 | 10 | go run -tags ${BUILD_TAGS} -ldflags "${BUILD_LD_FLAGS}" main.go && mv snapshot.bin ../../ 11 | 12 | popd 13 | -------------------------------------------------------------------------------- /tools/docker-network/.env: -------------------------------------------------------------------------------- 1 | COMMON_CONFIG=" 2 | -c config.json 3 | --logger.level=debug 4 | --logger.outputPaths=stdout 5 | --retainer.debugStoreErrorMessages=true 6 | --p2p.identityPrivateKeyFilePath=/app/data/p2p/identity.key 7 | --profiling.enabled=true 8 | --profiling.bindAddress=0.0.0.0:6061 9 | --restAPI.publicRoutes=/health,/api/routes,/api/core/v3/info,/api/core/v3/network*,/api/core/v3/blocks*,/api/core/v3/transactions*,/api/core/v3/commitments*,/api/core/v3/outputs*,/api/core/v3/accounts*,/api/core/v3/validators*,/api/core/v3/rewards*,/api/core/v3/committee*,/api/debug/v2/*,/api/indexer/v2/*,/api/mqtt/v2,/api/blockissuer/v1/*,/api/management/v1/* 10 | --debugAPI.enabled=false 11 | --p2p.autopeering.maxPeers=0 12 | " 13 | 14 | # admin/admin 15 | DASHBOARD_USERNAME="admin" 16 | DASHBOARD_PASSWORD="87ee6727b04474a25c5a4fe3914488d56363fa1d6cc03d03e70a424fa4d87a7c" 17 | DASHBOARD_SALT="d46309210eb66b523f3c7ac8c2f79e4856505dc0d3621acb5e79ae5f5bf27c73" -------------------------------------------------------------------------------- /tools/docker-network/.gitignore: -------------------------------------------------------------------------------- 1 | *.txt 2 | csv 3 | grafana/csv 4 | grafana/grafana.db 5 | grafana/plugins 6 | grafana/png 7 | 8 | tests/logs -------------------------------------------------------------------------------- /tools/docker-network/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "protocol": {} 3 | } -------------------------------------------------------------------------------- /tools/docker-network/grafana/grafana.ini: -------------------------------------------------------------------------------- 1 | [database] 2 | wal = true -------------------------------------------------------------------------------- /tools/docker-network/grafana/provisioning/dashboards/prometheus.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | providers: 4 | - name: 'Prometheus' 5 | orgId: 1 6 | folder: '' 7 | type: file 8 | disableDeletion: false 9 | editable: true 10 | allowUiUpdates: true 11 | options: 12 | path: /etc/grafana/provisioning/dashboards 13 | -------------------------------------------------------------------------------- /tools/docker-network/grafana/provisioning/datasources/datasources.yaml: -------------------------------------------------------------------------------- 1 | # config file version 2 | apiVersion: 1 3 | 4 | # list of datasources to insert/update depending 5 | # whats available in the database 6 | datasources: 7 | # name of the datasource. Required 8 | - name: Prometheus 9 | # datasource type. Required 10 | type: prometheus 11 | # access mode. direct or proxy. Required 12 | access: proxy 13 | # org id. will default to orgId 1 if not specified 14 | orgId: 1 15 | # url 16 | url: http://prometheus:9090 17 | # database password, if used 18 | password: 19 | # database user, if used 20 | user: 21 | # database name, if used 22 | database: 23 | # enable/disable basic auth 24 | basicAuth: false 25 | # basic auth username 26 | basicAuthUser: 27 | # basic auth password 28 | basicAuthPassword: 29 | # enable/disable with credentials headers 30 | withCredentials: 31 | # mark as default datasource. Max one per org 32 | isDefault: 33 | # fields that will be converted to json and stored in json_data 34 | jsonData: 35 | graphiteVersion: "1.1" 36 | tlsAuth: false 37 | tlsAuthWithCACert: false 38 | # json object of data that will be encrypted. 39 | secureJsonData: 40 | tlsCACert: "..." 41 | tlsClientCert: "..." 42 | tlsClientKey: "..." 43 | version: 1 44 | # allow users to edit datasources from the UI. 45 | editable: true 46 | -------------------------------------------------------------------------------- /tools/docker-network/prometheus.yml: -------------------------------------------------------------------------------- 1 | scrape_configs: 2 | - job_name: metrics 3 | scrape_interval: 5s 4 | static_configs: 5 | - targets: 6 | - node-1-validator:9311 7 | - node-2-validator:9311 8 | - node-3-validator:9311 9 | - node-4-validator:9311 10 | - node-5:9311 11 | - node-6:9311 12 | dns_sd_configs: 13 | - names: 14 | - 'peer_replica' 15 | type: 'A' 16 | port: 9311 17 | relabel_configs: 18 | - source_labels: [ __address__ ] 19 | target_label: instance 20 | regex: '[0-9]+.[0-9]+.[0-9]+.([0-9]+):[0-9]+' 21 | replacement: peer_replica:$1 22 | -------------------------------------------------------------------------------- /tools/docker-network/restart.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | docker compose kill 4 | docker compose down 5 | ./run.sh -------------------------------------------------------------------------------- /tools/docker-network/run_dev.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | WITH_GO_WORK=1 ./run.sh ${@:1} 4 | -------------------------------------------------------------------------------- /tools/docker-network/tests/README.md: -------------------------------------------------------------------------------- 1 | # Docker Tests 2 | 3 | These tests and the `DockerTestFramework` are using the Docker network and simulate a real network environment with multiple nodes. 4 | They are therefore fully-fledged integration tests that test the entire node and inx-* connections. 5 | 6 | The tests are by default excluded from compilation by `//go:build dockertests`. 7 | To run the tests, the `dockertests` build tag must be added when compiling or simply use the `run_tests.sh` script. 8 | 9 | ## Running the tests 10 | To run the tests, simply execute the following command: 11 | 12 | ```bash 13 | # run all tests 14 | ./run_tests.sh 15 | 16 | # or to run a specific tests 17 | ./run_tests.sh Test_Delegation Test_NoCandidacyPayload 18 | ``` 19 | -------------------------------------------------------------------------------- /tools/docker-network/tests/dockertestframework/clock.go: -------------------------------------------------------------------------------- 1 | //go:build dockertests 2 | 3 | package dockertestframework 4 | 5 | import ( 6 | "github.com/iotaledger/iota-core/pkg/testsuite/mock" 7 | iotago "github.com/iotaledger/iota.go/v4" 8 | ) 9 | 10 | type DockerWalletClock struct { 11 | client mock.Client 12 | } 13 | 14 | func (c *DockerWalletClock) SetCurrentSlot(slot iotago.SlotIndex) { 15 | panic("Cannot set current slot in DockerWalletClock, the slot is set by time.Now()") 16 | } 17 | 18 | func (c *DockerWalletClock) CurrentSlot() iotago.SlotIndex { 19 | return c.client.LatestAPI().TimeProvider().CurrentSlot() 20 | } 21 | -------------------------------------------------------------------------------- /tools/docker-network/tests/dockertestframework/misc.go: -------------------------------------------------------------------------------- 1 | //go:build dockertests 2 | 3 | package dockertestframework 4 | 5 | import ( 6 | iotago "github.com/iotaledger/iota.go/v4" 7 | ) 8 | 9 | func GetMaxRegistrationSlot(committedAPI iotago.API, epoch iotago.EpochIndex) iotago.SlotIndex { 10 | epochEndSlot := committedAPI.TimeProvider().EpochEnd(epoch) 11 | return epochEndSlot - committedAPI.ProtocolParameters().EpochNearingThreshold() 12 | } 13 | -------------------------------------------------------------------------------- /tools/docker-network/tests/dockertestframework/validator.go: -------------------------------------------------------------------------------- 1 | //go:build dockertests 2 | 3 | package dockertestframework 4 | 5 | import ( 6 | "context" 7 | 8 | "github.com/stretchr/testify/require" 9 | 10 | "github.com/iotaledger/iota-core/pkg/testsuite/mock" 11 | iotago "github.com/iotaledger/iota.go/v4" 12 | ) 13 | 14 | func (d *DockerTestFramework) StartIssueCandidacyPayload(nodes ...*Node) { 15 | if len(nodes) == 0 { 16 | return 17 | } 18 | 19 | for _, node := range nodes { 20 | node.IssueCandidacyPayload = true 21 | } 22 | 23 | err := d.DockerComposeUp(true) 24 | require.NoError(d.Testing, err) 25 | } 26 | 27 | func (d *DockerTestFramework) StopIssueCandidacyPayload(nodes ...*Node) { 28 | if len(nodes) == 0 { 29 | return 30 | } 31 | 32 | for _, node := range nodes { 33 | node.IssueCandidacyPayload = false 34 | } 35 | 36 | err := d.DockerComposeUp(true) 37 | require.NoError(d.Testing, err) 38 | } 39 | 40 | func (d *DockerTestFramework) IssueCandidacyPayloadFromAccount(ctx context.Context, wallet *mock.Wallet) iotago.BlockID { 41 | block, err := wallet.CreateAndSubmitBasicBlock(ctx, "candidacy_payload", mock.WithPayload(&iotago.CandidacyAnnouncement{})) 42 | require.NoError(d.Testing, err) 43 | 44 | return block.ID() 45 | } 46 | -------------------------------------------------------------------------------- /tools/docker-network/tests/nil_payload_test.go: -------------------------------------------------------------------------------- 1 | //go:build dockertests 2 | 3 | package tests 4 | 5 | import ( 6 | "context" 7 | "testing" 8 | 9 | "github.com/stretchr/testify/require" 10 | 11 | "github.com/iotaledger/hive.go/lo" 12 | "github.com/iotaledger/iota-core/pkg/testsuite/mock" 13 | "github.com/iotaledger/iota-core/tools/docker-network/tests/dockertestframework" 14 | ) 15 | 16 | // Test_Payload_Nil tests that sending a nil payload does not result in a panic. 17 | // This is a test to ensure issue #978 is fixed. 18 | func Test_Payload_Nil_Test(t *testing.T) { 19 | d := dockertestframework.NewDockerTestFramework(t, 20 | dockertestframework.WithProtocolParametersOptions(dockertestframework.ShortSlotsAndEpochsProtocolParametersOptionsFunc()...), 21 | ) 22 | defer d.Stop() 23 | 24 | d.AddValidatorNode("V1", "docker-network-inx-validator-1-1", "http://localhost:8050", "rms1pzg8cqhfxqhq7pt37y8cs4v5u4kcc48lquy2k73ehsdhf5ukhya3y5rx2w6") 25 | d.AddValidatorNode("V2", "docker-network-inx-validator-2-1", "http://localhost:8060", "rms1pqm4xk8e9ny5w5rxjkvtp249tfhlwvcshyr3pc0665jvp7g3hc875k538hl") 26 | d.AddValidatorNode("V3", "docker-network-inx-validator-3-1", "http://localhost:8070", "rms1pp4wuuz0y42caz48vv876qfpmffswsvg40zz8v79sy8cp0jfxm4kunflcgt") 27 | d.AddValidatorNode("V4", "docker-network-inx-validator-4-1", "http://localhost:8040", "rms1pr8cxs3dzu9xh4cduff4dd4cxdthpjkpwmz2244f75m0urslrsvtsshrrjw") 28 | d.AddNode("node5", "docker-network-node-5-1", "http://localhost:8080") 29 | 30 | err := d.Run() 31 | require.NoError(t, err) 32 | 33 | d.WaitUntilNetworkReady() 34 | 35 | ctx, cancel := context.WithCancel(context.Background()) 36 | 37 | // cancel the context when the test is done 38 | t.Cleanup(cancel) 39 | 40 | // create account-1 41 | account := d.CreateAccountFromFaucet("account-1") 42 | 43 | // Issue a block with a nil payload. 44 | blk := lo.PanicOnErr(account.Wallet().CreateBasicBlock(ctx, "something", mock.WithPayload(nil))) 45 | d.SubmitBlock(ctx, blk.ProtocolBlock()) 46 | 47 | // Wait for the epoch end to ensure the test does not exit early. 48 | d.AwaitEpochFinalized() 49 | } 50 | -------------------------------------------------------------------------------- /tools/docker-network/tests/run_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | BUILD_TAGS="rocksdb,dockertests" 4 | TIMEOUT=180m 5 | 6 | # Exit script on non-zero command exit status 7 | set -e 8 | 9 | # Change directory to the parent directory of this script 10 | pushd ../ 11 | 12 | # Build the docker image with buildkit 13 | echo "Build iota-core docker image" 14 | export DOCKER_BUILDKIT=1 15 | export COMPOSE_DOCKER_CLI_BUILD=1 16 | export DOCKER_BUILD_CONTEXT="../../" 17 | export DOCKERFILE_PATH="./Dockerfile.dev" 18 | 19 | mkdir -p docker-network-snapshots/ 20 | # Allow 'others' to write, so a snapshot can be created via the management API from within docker containers. 21 | chmod o+w docker-network-snapshots/ 22 | 23 | export COMPOSE_PROFILES="full" 24 | 25 | # Allow docker compose to build and cache an image 26 | docker compose build --build-arg DOCKER_BUILD_CONTEXT=${DOCKER_BUILD_CONTEXT} --build-arg DOCKERFILE_PATH=${DOCKERFILE_PATH} 27 | 28 | # Pull missing images 29 | docker compose pull inx-indexer inx-mqtt inx-blockissuer inx-faucet inx-validator-1 inx-dashboard-1 30 | 31 | # Change directory back to the original directory 32 | popd 33 | 34 | # If no arguments were passed, run all tests 35 | if [ $# -eq 0 ]; then 36 | echo "Running all tests..." 37 | # The following command will run all tests in the current directory only, but we 38 | # want to do it this way because otherwise the logs are not shown in the console 39 | # until all the tests are done. 40 | go test -tags ${BUILD_TAGS} -v -timeout=${TIMEOUT} 41 | else 42 | # Concatenate all test names with a pipe 43 | tests=$(printf "|%s" "$@") 44 | tests=${tests:1} 45 | 46 | echo "Running tests: $tests..." 47 | 48 | # Run the specific tests 49 | go test -run=$tests -tags ${BUILD_TAGS} -v -timeout=${TIMEOUT} 50 | fi -------------------------------------------------------------------------------- /tools/gendoc/configuration_header.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: This section describes the configuration parameters and their types for your IOTA core node. 3 | keywords: 4 | - IOTA Node 5 | - Configuration 6 | - JSON 7 | - Customize 8 | - Config 9 | - reference 10 | --- 11 | 12 | 13 | # Configuration 14 | 15 | IOTA core node uses a JSON standard format as a config file. If you are unsure about JSON syntax, you can find more information in the [official JSON specs](https://www.json.org). 16 | 17 | You can change the path of the config file by using the `-c` or `--config` argument while executing `iota-core` executable. 18 | 19 | For example: 20 | ```bash 21 | iota-core -c config_example.json 22 | ``` 23 | 24 | You can always get the most up-to-date description of the config parameters by running: 25 | 26 | ```bash 27 | iota-core -h --full 28 | ``` 29 | 30 | -------------------------------------------------------------------------------- /tools/genesis-snapshot/.gitignore: -------------------------------------------------------------------------------- 1 | *.bin 2 | genesis-snapshot 3 | --------------------------------------------------------------------------------