├── .gitattributes ├── .gitignore ├── .golangci.yml ├── README.md ├── adversary ├── adversary_node.go ├── no_gossip.go ├── same_opinion.go └── shifting_opinion.go ├── config ├── config.go ├── types.go └── utility.go ├── go.mod ├── go.sum ├── logger └── logger.go ├── main.go ├── multiverse ├── approvalweight_manager.go ├── booker.go ├── icca_scheduler.go ├── manaburn_scheduler.go ├── messagefactory.go ├── models.go ├── no_scheduler.go ├── node.go ├── opinion_manager.go ├── requester.go ├── scheduler.go ├── solidifer.go ├── storage.go ├── tangle.go ├── tipmanager.go └── utils.go ├── network ├── bandwidth.go ├── consensus_weight.go ├── groups.go ├── network.go ├── node.go ├── peer.go ├── singleattacker.go └── utility.go ├── plot_style.txt ├── scripts ├── README.md ├── config.py ├── constant.py ├── main.py ├── parsing.py ├── plot_style.txt ├── plotting.py ├── requirements.txt └── utils.py ├── simulation ├── counter.go ├── metrics_setup.go ├── metrics_writers.go ├── metricsmanager.go ├── parser.go └── simulator.go └── singlenodeattacks └── blowball.go /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization. 2 | 3 | *.go text eol=lf -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | *.dat 8 | *.dat.bkp 9 | 10 | # cli-wallet config 11 | tools/cli-wallet/config.json 12 | 13 | # Test binary, build with `go test -c` 14 | *.test 15 | 16 | # Output of the go coverage tool, specifically when used with LiteIDE 17 | *.out 18 | 19 | # Logs 20 | logs/* 21 | testNodes/* 22 | *.log 23 | 24 | # Project files 25 | .idea 26 | .vscode/ 27 | 28 | # Database directory 29 | mainnetdb/ 30 | objectsdb/ 31 | 32 | # OSX related files 33 | .DS_Store 34 | shimmer 35 | goshimmer 36 | 37 | config.json 38 | .env 39 | /scripts/log.txt 40 | /multiverse_sim 41 | *.pyc 42 | 43 | ## Binary files 44 | multivers-simulation 45 | 46 | # Debugging related files 47 | results/ 48 | result/ 49 | figures/ 50 | debug/ 51 | result_remote/ 52 | paper_figs/ 53 | BYLin.md 54 | debug* 55 | *.sh 56 | 57 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | run: 2 | tests: true 3 | 4 | issues: 5 | exclude-use-default: false 6 | max-issues-per-linter: 0 7 | 8 | linters-settings: 9 | gofmt: 10 | simplify: true 11 | golint: 12 | min-confidence: 0.8 13 | govet: 14 | check-shadowing: true 15 | misspell: 16 | locale: US 17 | 18 | linters: 19 | enable: 20 | - misspell 21 | - gofmt 22 | - goimports 23 | - govet 24 | - golint 25 | disable: 26 | - errcheck 27 | - gochecknoglobals 28 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # TangleSim: An Agent-based, Modular Simulator for DAG-based Distributed Ledger Technologies 2 | 3 | The biggest breakthrough of Bitcoin was the introduction of a new voting scheme on top of a blockchain - a data structure which was invented by Stuart Haber and W. Scott Stornetta in 1991. The blocks that are issued by block producers do not just contain the transactions that are added to the ledger but also contain a reference to the previous block. Through this reference, the blocks form a chain where every block implicitly approves all of the previous blocks and represent a vote by the issuer on what he perceives to be the longest chain. The chain that received the most votes (blocks) wins. 4 | 5 | This form of voting, where the messages that introduce new transactions also contain a vote by the issuer is called virtual voting. Instead of having to agree upfront on the participants of the network and making every node exchange votes with every other node to reach consensus, it relies on the idea of piling up approval by validators on past decisions by inheriting the votes of future messages using a data structure like the blockchain. 6 | 7 | This did not just solve the huge message complexity of existing consensus algorithms and opened up the network to a much larger number of nodes, but by combining it with Proof of Work, it also enabled the network to become open and permissionless. 8 | 9 | The real beauty of this voting scheme is not just its efficiency but also its flexibility. Instead of being limited to just Proof of Work, it gives you complete freedom over the way how the block producers are chosen. This kicked off a whole field of research trying to choose block producers more efficiently. Today we have PoW chains (Bitcoin), PoS chains (Cardano), VDF chains (Solana), permissioned chains (Hyperledger), semi-permissioned chains (EOS) and all kinds of other variants. 10 | 11 | This level of freedom and flexibility is the reason why 99% of all DLTs still use a blockchain and very few have tried to use a different approach. Especially in a heavily researched and fast moving field like DLT it is crucial to have a technical foundation that can adapt to future developments and incorporate new findings without breaking the protocol. 12 | 13 | There have been efforts by projects like Hashgraph to translate the concepts of virtual voting into the world of DAGs but they have failed to maintain the same properties as blockchain and are limited to a relatively small network of permissioned nodes. Other DAG based DLTs are very proprietary, often make undesirable tradeoffs and only work exactly the way they were designed. It is impossible to modify even small parts of their protocol and exchange them with a different one as research progresses. 14 | 15 | IOTA was one of the first projects that tried to translate the longest chain wins consensus of blockchains into the world of DAGs maintaining all of its benefits but trying to solve blockchains drawbacks (slow confirmations, hard to shard and relying on a 2-class society where users have to pay miners to get their transactions included in the ledger state). It failed to fulfill this promise due to a badly designed and broken first version. 16 | 17 | This repository implements a simulator for a new consensus mechanism that gets rid of all the original drawbacks of IOTA and that similarly to blockchain does not rely on nodes querying each other for their opinion. 18 | 19 | 20 | ## What is being simulated? 21 | 22 | A configurable network of *N* nodes connected to each other in a [Watts-Strogatz](https://en.wikipedia.org/wiki/Watts%E2%80%93Strogatz_model) graph, 23 | where nodes are assigned weights according to a [Zipf distribution](https://en.wikipedia.org/wiki/Zipf%27s_law). 24 | Each peer in the network can send messages at a rate proportional to its weight. The messages attach to other messages in the tangle according to a configurable tip-selection algorithm. 25 | The simulation tracks the weight of each message, the color and color weight, and tip pool size. 26 | 27 | To see the full list of configurations one should run the simulation with `-h` flag. 28 | 29 | ## Message Weight Mechanism 30 | 31 | When a new message is issued by node *A* it automatically receives the weight of *A*. It also propagates the weight down to its parents, 32 | cumulating the weight to each message in the past cone until genesis is reached. 33 | Each message keeps track of its weight source in an efficient bitmap to ensure the correctness of the weight propagation calculation. 34 | This mechanism doesn't take into account different perceptions of the tangle a node has. 35 | This calculation can be done by every node in the simulation. Once a message bypassed a threshold of the weight (above 50%) we can consider it as *confirmed* or *seen*, 36 | depending on whether we also simulate colored perceptions in our run. 37 | 38 | ## Color Weight Mechanism 39 | 40 | In order to take into account different conflict perceptions we assign colors to subtangles. 41 | This mechanism works by having a node that colors messages. 42 | Each message propagates its color to its descendants ad infinitum. If a message has parents with different colors the node deems it invalid and drops it. 43 | Due to this, it is worth to note, coloring the tangle will create distinct subtangles that can't be combined. 44 | This basically means that we are creating a simulation of the UTXO dag instead of the message dag. 45 | Each node in the simulation keeps track of the weight of the colors. Each time a message is colored, the weight of its issuer is added to the color weight. 46 | If the weight of the issuer was previously assigned to another color, it is subtracted from the color. 47 | Each node in the simulation considers the color with the most weight to be the winner. 48 | Each color is actually a number in the actual implementation. In case of a tie the color with the maximal number will be the winner. 49 | 50 | ## Tip selection 51 | 52 | Each message in the simulation can choose up to a configurable *k* other message to reference. 53 | They will usually pick parents that weren't referenced before, known as tips. 54 | Tip selection plays a great deal with the way weights are distributed, and in the simulation we will implement various 55 | tip-selection strategies, honest and malicious. Currently, only 2 tip selection strategies are implemented. 56 | URTS (Uniform Random Tip Selection) and RURTS (Restricted URTS). URTS, as the name implies, randomly selects any tip. 57 | RURTS won't select tips that have aged above a configurable delta. All other tips will be selected uniformly. 58 | 59 | 60 | ## Running the simulation 61 | 62 | It is best run via a script that will plot the results per the instructions [here](https://github.com/iotaledger/multiverse-simulation/blob/aw/scripts/README.md). 63 | But one can naively run the simulation with a `go run .` command. 64 | -------------------------------------------------------------------------------- /adversary/adversary_node.go: -------------------------------------------------------------------------------- 1 | package adversary 2 | 3 | import ( 4 | "reflect" 5 | 6 | "github.com/iotaledger/multivers-simulation/multiverse" 7 | "github.com/iotaledger/multivers-simulation/network" 8 | ) 9 | 10 | type NodeInterface interface { 11 | AssignColor(color multiverse.Color) 12 | } 13 | 14 | func CastAdversary(node network.Node) NodeInterface { 15 | s := reflect.ValueOf(node) 16 | switch s.Interface().(type) { 17 | case *ShiftingOpinionNode: 18 | return node.(*ShiftingOpinionNode) 19 | case *SameOpinionNode: 20 | return node.(*SameOpinionNode) 21 | case *NoGossipNode: 22 | return node.(*NoGossipNode) 23 | } 24 | return nil 25 | } 26 | -------------------------------------------------------------------------------- /adversary/no_gossip.go: -------------------------------------------------------------------------------- 1 | package adversary 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/events" 5 | "github.com/iotaledger/multivers-simulation/multiverse" 6 | ) 7 | 8 | // region NoGossipNode /////////////////////////////////////////////////////////////////////////////////////////////////// 9 | 10 | type NoGossipNode struct { 11 | *multiverse.Node 12 | } 13 | 14 | func NewNoGossipNode() interface{} { 15 | node := multiverse.NewNode().(*multiverse.Node) 16 | noGossipNode := &NoGossipNode{ 17 | node, 18 | } 19 | noGossipNode.UpdateGossipBehavior() 20 | return noGossipNode 21 | } 22 | 23 | func (n *NoGossipNode) UpdateGossipBehavior() { 24 | n.Tangle().Booker.Events.MessageBooked.Attach(events.NewClosure(func(messageID multiverse.MessageID) { 25 | // do nothing - no gossiping 26 | })) 27 | n.Tangle().Requester.Events.Request.Attach(events.NewClosure(func(messageID multiverse.MessageID) { 28 | // do nothing - no answering requests for missing messages 29 | })) 30 | } 31 | 32 | func (n *NoGossipNode) AssignColor(color multiverse.Color) { 33 | // do nothing - leave undefined color 34 | } 35 | 36 | func (n *NoGossipNode) IssuePayload(payload multiverse.Color) { 37 | // do nothing - this node will not issue DS message, to not allow other nodes count his opinion for any of colors 38 | // user needs to define other adv group that will issue DS 39 | } 40 | 41 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////// 42 | -------------------------------------------------------------------------------- /adversary/same_opinion.go: -------------------------------------------------------------------------------- 1 | package adversary 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/events" 5 | "github.com/iotaledger/multivers-simulation/multiverse" 6 | ) 7 | 8 | // region ShiftingOpinionNode /////////////////////////////////////////////////////////////////////////////////////////////////// 9 | 10 | type SameOpinionNode struct { 11 | *multiverse.Node 12 | } 13 | 14 | func NewSameOpinionNode() interface{} { 15 | node := multiverse.NewNode().(*multiverse.Node) 16 | shiftingNode := &SameOpinionNode{ 17 | node, 18 | } 19 | shiftingNode.setupOpinionManager() 20 | return shiftingNode 21 | 22 | } 23 | 24 | func (s *SameOpinionNode) setupOpinionManager() { 25 | om := s.Tangle().OpinionManager 26 | s.Tangle().OpinionManager = NewShiftingOpinionManager(om) 27 | s.Tangle().OpinionManager.Setup() 28 | } 29 | 30 | func (s *SameOpinionNode) AssignColor(color multiverse.Color) { 31 | s.Tangle().OpinionManager.SetOpinion(color) 32 | } 33 | 34 | type SameOpinionManager struct { 35 | *multiverse.OpinionManager 36 | } 37 | 38 | func NewSameOpinionManager(om multiverse.OpinionManagerInterface) *SameOpinionManager { 39 | return &SameOpinionManager{ 40 | om.(*multiverse.OpinionManager), 41 | } 42 | } 43 | 44 | func (sm *SameOpinionManager) FormOpinion(messageID multiverse.MessageID) { 45 | defer sm.Events().OpinionFormed.Trigger(messageID) 46 | 47 | if updated := sm.UpdateWeights(messageID); !updated { 48 | return 49 | } 50 | 51 | sm.weightsUpdated() 52 | } 53 | 54 | func (sm *SameOpinionManager) weightsUpdated() { 55 | // do nothing 56 | } 57 | 58 | func (sm *SameOpinionManager) Setup() { 59 | sm.Tangle().Booker.Events.MessageBooked.Detach(events.NewClosure(sm.OpinionManager.FormOpinion)) 60 | sm.Tangle().Booker.Events.MessageBooked.Attach(events.NewClosure(sm.FormOpinion)) 61 | } 62 | 63 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// 64 | -------------------------------------------------------------------------------- /adversary/shifting_opinion.go: -------------------------------------------------------------------------------- 1 | package adversary 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/events" 5 | "github.com/iotaledger/multivers-simulation/multiverse" 6 | ) 7 | 8 | // region ShiftingOpinionNode /////////////////////////////////////////////////////////////////////////////////////////////////// 9 | 10 | type ShiftingOpinionNode struct { 11 | *multiverse.Node 12 | } 13 | 14 | func NewShiftingOpinionNode() interface{} { 15 | node := multiverse.NewNode().(*multiverse.Node) 16 | shiftingNode := &ShiftingOpinionNode{ 17 | node, 18 | } 19 | shiftingNode.setupOpinionManager() 20 | return shiftingNode 21 | 22 | } 23 | 24 | func (s *ShiftingOpinionNode) setupOpinionManager() { 25 | om := s.Tangle().OpinionManager 26 | s.Tangle().OpinionManager = NewShiftingOpinionManager(om) 27 | s.Tangle().OpinionManager.Setup() 28 | } 29 | 30 | func (s *ShiftingOpinionNode) AssignColor(color multiverse.Color) { 31 | s.Tangle().OpinionManager.SetOpinion(color) 32 | } 33 | 34 | type ShiftingOpinionManager struct { 35 | *multiverse.OpinionManager 36 | } 37 | 38 | func NewShiftingOpinionManager(om multiverse.OpinionManagerInterface) *ShiftingOpinionManager { 39 | return &ShiftingOpinionManager{ 40 | om.(*multiverse.OpinionManager), 41 | } 42 | } 43 | 44 | func (sm *ShiftingOpinionManager) FormOpinion(messageID multiverse.MessageID) { 45 | defer sm.Events().OpinionFormed.Trigger(messageID) 46 | 47 | if updated := sm.UpdateWeights(messageID); !updated { 48 | return 49 | } 50 | 51 | sm.weightsUpdated() 52 | } 53 | 54 | func (sm *ShiftingOpinionManager) weightsUpdated() { 55 | aw := make(map[multiverse.Color]uint64) 56 | for key, value := range sm.ApprovalWeights() { 57 | aw[key] = value 58 | } 59 | // more than one color present 60 | if len(aw) > 1 { 61 | maxOpinion := sm.getMaxOpinion(aw) 62 | delete(aw, maxOpinion) 63 | } 64 | 65 | newOpinion := sm.getMaxOpinion(aw) 66 | oldOpinion := sm.Opinion() 67 | if newOpinion != oldOpinion { 68 | sm.SetOpinion(newOpinion) 69 | } 70 | sm.UpdateConfirmation(oldOpinion, newOpinion) 71 | } 72 | 73 | func (sm *ShiftingOpinionManager) getMaxOpinion(aw map[multiverse.Color]uint64) multiverse.Color { 74 | maxApprovalWeight := uint64(0) 75 | maxOpinion := multiverse.UndefinedColor 76 | for color, approvalWeight := range aw { 77 | if approvalWeight > maxApprovalWeight || approvalWeight == maxApprovalWeight && color < maxOpinion || maxOpinion == multiverse.UndefinedColor { 78 | maxApprovalWeight = approvalWeight 79 | maxOpinion = color 80 | } 81 | } 82 | return maxOpinion 83 | } 84 | 85 | func (sm *ShiftingOpinionManager) Setup() { 86 | sm.Tangle().Booker.Events.MessageBooked.Detach(events.NewClosure(sm.OpinionManager.FormOpinion)) 87 | sm.Tangle().Booker.Events.MessageBooked.Attach(events.NewClosure(sm.FormOpinion)) 88 | } 89 | 90 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// 91 | -------------------------------------------------------------------------------- /config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "path" 5 | "time" 6 | ) 7 | 8 | // parameters that will be used in multiple settings. 9 | var ( 10 | ResultDir = "results" 11 | ScriptStartTimeStr = time.Now().Format("20060102_1504") 12 | GeneralOutputDir = path.Join(ResultDir, ScriptStartTimeStr, "general") 13 | SchedulerOutputDir = path.Join(ResultDir, ScriptStartTimeStr, "scheduler") 14 | 15 | NodesCount = 100 16 | 17 | SchedulingRate = 200 18 | 19 | SlotTime = time.Duration(1 * float64(time.Second)) 20 | MinCommittableAge = time.Duration(30 * float64(time.Second)) 21 | RMCmin = 500000.0 // 0.25 22 | ) 23 | 24 | // simulator settings 25 | var Params = &Config{ 26 | SimulatorSettings: &SimulatorSettings{ 27 | ResultDir: ResultDir, 28 | SimulationTarget: "CT", 29 | SimulationStopThreshold: 1.0, 30 | ConsensusMonitorTick: 100, 31 | MonitoredAWPeers: []int{0}, 32 | MonitoredWitnessWeightPeer: 0, 33 | MonitoredWitnessWeightMessageID: 200, 34 | ScriptStartTimeStr: ScriptStartTimeStr, 35 | GeneralOutputDir: GeneralOutputDir, 36 | SchedulerOutputDir: SchedulerOutputDir, 37 | SimulationDuration: time.Duration(1) * time.Minute, 38 | }, 39 | NetworkSettings: &NetworkSettings{ 40 | CommitteeBandwidth: 0.5, 41 | NodesCount: NodesCount, 42 | SchedulingRate: SchedulingRate, 43 | IssuingRate: SchedulingRate, 44 | CongestionPeriods: []float64{1.0, 1.0, 1.0, 1.0}, 45 | ValidatorCount: 20, 46 | ValidatorBPS: 1, 47 | ParentsCount: 8, 48 | ParentCountVB: 2, 49 | ParentCountNVB: 38, 50 | NeighbourCountWS: 4, 51 | RandomnessWS: 1.0, 52 | IMIF: "poisson", 53 | PacketLoss: 0.0, 54 | MinDelay: 100, 55 | MaxDelay: 100, 56 | 57 | SlowdownFactor: 1, 58 | }, 59 | WeightSettings: &WeightSettings{ 60 | NodesTotalWeight: 100_000_000, 61 | ZipfParameter: 0.9, 62 | ConfirmationThreshold: 0.66, 63 | ConfirmationThresholdAbsolute: true, 64 | RelevantValidatorWeight: 0, 65 | }, 66 | TipSelectionAlgorithmSettings: &TipSelectionAlgorithmSettings{ 67 | TSA: "RURTS", 68 | DeltaURTS: 30.0, 69 | WeakTipsRatio: 0.0, 70 | }, 71 | CongestionControlSettings: &CongestionControlSettings{ 72 | SchedulerType: "ICCA+", 73 | BurnPolicies: RandomArrayFromValues(0, []int{0, 1}, NodesCount), 74 | InitialMana: 0.0, 75 | MaxBuffer: 25, 76 | ConfEligible: true, 77 | MaxDeficit: 2.0, 78 | SlotTime: time.Duration(1 * float64(time.Second)), 79 | MinCommittableAge: MinCommittableAge, 80 | RMCTime: MinCommittableAge, 81 | LowerRMCThreshold: 0.5 * float64(SchedulingRate) * SlotTime.Seconds(), 82 | UpperRMCThreshold: 0.75 * float64(SchedulingRate) * SlotTime.Seconds(), 83 | AlphaRMC: 0.8, 84 | BetaRMC: 1.2, 85 | RMCmin: RMCmin, // 0.25 86 | InitialRMC: RMCmin, 87 | RMCmax: 5000000.0, //2.0 88 | RMCincrease: 1000000.0, // 1.0 89 | RMCdecrease: 500000.0, // 0.5 90 | RMCPeriodUpdate: 30, 91 | }, 92 | AdversarySettings: &AdversarySettings{ 93 | SimulationMode: "None", 94 | DoubleSpendDelay: 5, 95 | 96 | AccidentalMana: []string{"random", "random"}, 97 | 98 | AdversaryDelays: []int{}, 99 | AdversaryTypes: []int{0, 0}, 100 | AdversaryMana: []float64{}, 101 | AdversaryNodeCounts: []int{}, 102 | AdversaryInitColors: []string{"R", "B"}, 103 | AdversaryPeeringAll: false, 104 | AdversarySpeedup: []float64{1.0, 1.0}, 105 | 106 | BlowballMana: 20, 107 | BlowballSize: 20, 108 | BlowballDelay: 5, 109 | BlowballMaxSent: 2, 110 | BlowballNodeID: 0, 111 | }, 112 | } 113 | -------------------------------------------------------------------------------- /config/types.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | type Config struct { 8 | *SimulatorSettings 9 | *NetworkSettings 10 | *WeightSettings 11 | *CongestionControlSettings 12 | *TipSelectionAlgorithmSettings 13 | *AdversarySettings 14 | } 15 | 16 | // ParametersProtocol contains the definition of the configuration parameters used by the Protocol. 17 | type SimulatorSettings struct { 18 | // Path where all the result files will be saved 19 | ResultDir string `default:"results"` 20 | // The simulation target, CT: Confirmation Time, DS: Double Spending 21 | SimulationTarget string `default:"CT"` 22 | // Stop the simulation when > SimulationStopThreshold * NodesCount have reached the same opinion. 23 | SimulationStopThreshold float64 `default:"1.0"` 24 | // Tick to monitor the consensus, in milliseconds. 25 | ConsensusMonitorTick int `default:"100"` 26 | // Nodes for which we monitor the AW growth 27 | MonitoredAWPeers []int 28 | // Peer for which we monitor Witness Weight 29 | MonitoredWitnessWeightPeer int `default:"0"` 30 | // A specified message ID to monitor the witness weights 31 | MonitoredWitnessWeightMessageID int `default:"200"` 32 | // A string indicating the start time of a simulation started by an external script 33 | ScriptStartTimeStr string `default:"20060102_1504"` 34 | GeneralOutputDir string `default:"results/20060102_1504/general"` 35 | SchedulerOutputDir string `default:"results/20060102_1504/scheduler"` 36 | SimulationDuration time.Duration `default:"1m"` 37 | } 38 | 39 | type NetworkSettings struct { 40 | // NodesCount is the total number of nodes simulated in the network. 41 | NodesCount int `default:"100"` 42 | // ValidatorCount is the total number of nodes simulated in the network. 43 | ValidatorCount int `default:"20"` 44 | // CommitteeBandwidth is the total bandwidth of the committee in the network. 45 | CommitteeBandwidth float64 `default:"0.5"` 46 | // ValidatorBPS is the rate of validation blocks simulated in the network per validator node. 47 | ValidatorBPS int `default:"1"` 48 | // Scheduler rate in units of messages per second. 49 | SchedulingRate int `default:"200"` 50 | // Total rate of issuing messages in units of messages per second. 51 | IssuingRate int `default:"100"` 52 | //, 0.5, 1.5, 1.5, 0.5} // congested/uncongested periods 53 | CongestionPeriods []float64 54 | // ParentsCount that a new message is selecting from the tip pool. 55 | ParentsCount int `default:"8"` 56 | // ParentCountVB is the number of validation block parents for validation block tsa. 57 | ParentCountVB int `default:"2"` 58 | // ParentCountNVB is the number of non-validation block parents for validation block tsa. 59 | ParentCountNVB int `default:"38"` 60 | // Number of neighbors node is connected to in WattsStrogatz network topology. 61 | NeighbourCountWS int `default:"4"` 62 | // WattsStrogatz randomness parameter, gamma parameter described in https://blog.iota.org/the-fast-probabilistic-consensus-simulator-d5963c558b6e/ 63 | RandomnessWS float64 `default:"1.0"` 64 | // IMIF Inter Message Issuing Function for time delay between activity messages: poisson or uniform. 65 | IMIF string `default:"poisson"` 66 | // The packet loss in the network. 67 | PacketLoss float64 `default:"0.0"` 68 | // The minimum network delay in ms. 69 | MinDelay int `default:"100"` 70 | // The maximum network delay in ms. 71 | MaxDelay int `default:"100"` 72 | // The factor to control the speed in the simulation. 73 | SlowdownFactor int `default:"1"` 74 | } 75 | 76 | // Weight setup 77 | 78 | type WeightSettings struct { 79 | // Total number of weight for the whole network. 80 | NodesTotalWeight int `default:"100_000_000"` 81 | // the 's' parameter for the Zipf distribution used to model weight distribution. s=0 all nodes are equal, s=2 network centralized. 82 | ZipfParameter float64 `default:"0.9"` 83 | // Threshold for AW collection above which messages are considered confirmed. 84 | ConfirmationThreshold float64 `default:"0.66"` 85 | // If true the threshold is always counted from zero if false the weight collected is counted from the next peer weight. 86 | ConfirmationThresholdAbsolute bool `default:"true"` 87 | // The node whose weight * RelevantValidatorWeight <= largestWeight will not issue messages (disabled now) 88 | RelevantValidatorWeight int `default:"0"` 89 | } 90 | 91 | // Tip Selection Algorithm setup 92 | 93 | type TipSelectionAlgorithmSettings struct { 94 | // Currently only one supported TSA is RURTS 95 | TSA string `default:"RURTS"` 96 | // in seconds, reference: https://iota.cafe/t/orphanage-with-restricted-urts/1199 97 | DeltaURTS float64 `default:"5.0"` 98 | // The ratio of weak tips 99 | WeakTipsRatio float64 `default:"0.0"` 100 | } 101 | 102 | // Congestion Control 103 | 104 | type CongestionControlSettings struct { 105 | SchedulerType string `default:"ICCA+"` // ManaBurn or ICCA+ 106 | BurnPolicies []int 107 | InitialMana float64 `default:"0.0"` 108 | MaxBuffer int `default:"25"` 109 | ConfEligible bool `default:"true"` // if true, then confirmed is used for eligible check. else just scheduled 110 | MaxDeficit float64 `default:"2.0"` // maximum deficit for any id 111 | SlotTime time.Duration `default:"1s"` 112 | MinCommittableAge time.Duration `default:"4s"` 113 | RMCTime time.Duration `default:"4s"` 114 | // inital value of RMC 115 | InitialRMC float64 `default:"1.0"` 116 | // T1 for RMC 117 | LowerRMCThreshold float64 `default:"50"` 118 | // T2 for RMC 119 | UpperRMCThreshold float64 `default:"75"` 120 | AlphaRMC float64 `default:"0.8"` 121 | BetaRMC float64 `default:"1.2"` 122 | RMCmin float64 `default:"0.25"` 123 | RMCmax float64 `default:"2.0"` 124 | RMCincrease float64 `default:"1.0"` 125 | RMCdecrease float64 `default:"0.5"` 126 | RMCPeriodUpdate int `default:"5"` 127 | } 128 | 129 | // Adversary setup - enabled by setting SimulationTarget="DS" 130 | type AdversarySettings struct { 131 | // SimulationMode defines the type of adversary simulation, one of the following: 132 | // 'None' - no adversary simulation, 133 | // 'Accidental' - accidental double spends sent by max, min or random weight node from Zipf distribution, 134 | // 'Adversary' - need to use adversary groups (parameters starting with 'Adversary...') 135 | // 'Blowball' - enables adversary node that is able to perform a blowball attack. 136 | SimulationMode string `default:"None"` 137 | // Delay after which double spending transactions will be issued. In seconds. 138 | DoubleSpendDelay int `default:"5"` 139 | // Defines nodes which will be used: 'min', 'max', 'random' or valid nodeID 140 | AccidentalMana []string 141 | // Delays in ms of adversary nodes, eg '50 100 200', SimulationTarget must be 'DS' 142 | AdversaryDelays []int 143 | // Defines group attack strategy, one of the following: 0 - honest node behavior, 1 - shifts opinion, 2 - keeps the same opinion, 3 - nodes not gossiping anything, even DS. SimulationTarget must be 'DS' 144 | AdversaryTypes []int 145 | // Adversary nodes mana in %, e.g. '10 10'. Default value: 1%. SimulationTarget must be 'DS' 146 | AdversaryMana []float64 147 | // Defines number of adversary nodes in the group. Leave empty for default value: 1. 148 | AdversaryNodeCounts []int 149 | // Defines initial color for adversary group, one of following: 'R', 'G', 'B'. Mandatory for each group. 150 | AdversaryInitColors []string 151 | // Defines a flag indicating whether adversarial nodes should be able to send messages to all nodes in the network, instead of following regular peering algorithm. 152 | AdversaryPeeringAll bool `default:"false"` 153 | // Defines how many more messages should adversary nodes issue. 154 | AdversarySpeedup []float64 155 | 156 | // The mana of the blowball node in % of total mana 157 | BlowballMana int `default:"20"` 158 | // The size of the blowball 159 | BlowballSize int `default:"20"` 160 | // The delay in seconds between the consecutive blowballs 161 | BlowballDelay int `default:"5"` 162 | // The maximum number of blowballs sent to the network 163 | BlowballMaxSent int `default:"2"` 164 | // The node ID of the blowball node 165 | BlowballNodeID int `default:"0"` 166 | } 167 | -------------------------------------------------------------------------------- /config/utility.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "math/rand" 5 | ) 6 | 7 | // region ManaBurn //////////////////////////////////////////////////////////////////////////////////////////////////// 8 | func RandomValueArray(seed int64, min, max, length int) []int { 9 | r := rand.New(rand.NewSource(seed)) 10 | a := r.Perm(length) 11 | for i, n := range a { 12 | a[i] = (n % (max - min + 1)) + min 13 | } 14 | return a 15 | } 16 | 17 | func RandomArrayFromValues(seed int64, values []int, length int) []int { 18 | a := make([]int, length) 19 | // r := rand.New(rand.NewSource(seed)) 20 | for i := range a { 21 | // 24-99: 32.47% 22 | // 53-99: 15.07% 23 | // 66-99: 9.98% 24 | // 82-99: 4.82% 25 | // value = 0: spammer (i.e., RateSetter() always return true); 26 | // value = 1: normal; 27 | // a[i] = values[r.Intn(len(values))] 28 | 29 | // begin := 24 30 | // a[i] = 1 31 | // if (i >= begin) { 32 | // a[i] = 0 33 | // } 34 | // end := 9 35 | // if (i > end) { 36 | // a[i] = 0 37 | // } 38 | // a[i] = values[r.Intn(len(values))] 39 | a[i] = 1 // normal 40 | } 41 | return a 42 | } 43 | 44 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// 45 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/iotaledger/multivers-simulation 2 | 3 | go 1.18 4 | 5 | require ( 6 | github.com/iotaledger/hive.go v0.0.0-20210218144347-c1d082b210c3 7 | github.com/iotaledger/hive.go/core v1.0.0-rc.3 8 | github.com/stretchr/testify v1.8.1 9 | go.uber.org/atomic v1.10.0 10 | golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 11 | ) 12 | 13 | require ( 14 | github.com/davecgh/go-spew v1.1.1 // indirect 15 | github.com/fatih/structs v1.1.0 // indirect 16 | github.com/fsnotify/fsnotify v1.6.0 // indirect 17 | github.com/knadh/koanf v1.4.4 // indirect 18 | github.com/mitchellh/copystructure v1.2.0 // indirect 19 | github.com/mitchellh/mapstructure v1.5.0 // indirect 20 | github.com/mitchellh/reflectwalk v1.0.2 // indirect 21 | github.com/petermattis/goid v0.0.0-20221018141743-354ef7f2fd21 // indirect 22 | github.com/pmezard/go-difflib v1.0.0 // indirect 23 | github.com/sasha-s/go-deadlock v0.3.1 // indirect 24 | github.com/spf13/cast v1.5.0 // indirect 25 | github.com/spf13/pflag v1.0.5 // indirect 26 | go.uber.org/multierr v1.8.0 // indirect 27 | go.uber.org/zap v1.23.0 // indirect 28 | golang.org/x/sys v0.2.0 // indirect 29 | gopkg.in/yaml.v2 v2.4.0 // indirect 30 | gopkg.in/yaml.v3 v3.0.1 // indirect 31 | ) 32 | -------------------------------------------------------------------------------- /logger/logger.go: -------------------------------------------------------------------------------- 1 | package logger 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/configuration" 5 | "github.com/iotaledger/hive.go/logger" 6 | ) 7 | 8 | var New = logger.NewLogger 9 | 10 | func init() { 11 | if err := logger.InitGlobalLogger(configuration.New()); err != nil { 12 | panic(err) 13 | } 14 | logger.SetLevel(logger.LevelDebug) 15 | } 16 | -------------------------------------------------------------------------------- /multiverse/approvalweight_manager.go: -------------------------------------------------------------------------------- 1 | package multiverse 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/iotaledger/hive.go/datastructure/walker" 7 | "github.com/iotaledger/hive.go/events" 8 | "github.com/iotaledger/multivers-simulation/config" 9 | ) 10 | 11 | // region ApprovalManager /////////////////////////////////////////////////////////////////////////////////////////////////// 12 | 13 | type ApprovalManager struct { 14 | tangle *Tangle 15 | Events *ApprovalWeightEvents 16 | } 17 | 18 | func NewApprovalManager(tangle *Tangle) *ApprovalManager { 19 | return &ApprovalManager{ 20 | tangle: tangle, 21 | Events: &ApprovalWeightEvents{ 22 | MessageConfirmed: events.NewEvent(approvalEventCaller), 23 | MessageWeightUpdated: events.NewEvent(approvalEventCaller), 24 | MessageWitnessWeightUpdated: events.NewEvent(witnessWeightEventCaller), 25 | }, 26 | } 27 | } 28 | 29 | func approvalEventCaller(handler interface{}, params ...interface{}) { 30 | handler.(func(*Message, *MessageMetadata, uint64, int64))(params[0].(*Message), params[1].(*MessageMetadata), params[2].(uint64), params[3].(int64)) 31 | } 32 | 33 | func witnessWeightEventCaller(handler interface{}, params ...interface{}) { 34 | handler.(func(*Message, uint64))(params[0].(*Message), params[1].(uint64)) 35 | } 36 | 37 | func (a *ApprovalManager) Setup() { 38 | a.tangle.Solidifier.Events.MessageSolid.Attach(events.NewClosure(a.ApproveMessages)) 39 | } 40 | 41 | func (a *ApprovalManager) ApproveMessages(messageID MessageID) { 42 | 43 | issuingMessage := a.tangle.Storage.Message(messageID) 44 | byteIndex := issuingMessage.Issuer / 8 45 | mod := issuingMessage.Issuer % 8 46 | 47 | if !issuingMessage.Validation { 48 | return 49 | } 50 | 51 | weight := a.tangle.WeightDistribution.Weight(issuingMessage.Issuer) 52 | a.tangle.Utils.WalkMessagesAndMetadata(func(message *Message, messageMetadata *MessageMetadata, walker *walker.Walker) { 53 | if int(a.tangle.Peer.ID) == config.Params.MonitoredWitnessWeightPeer && messageMetadata.ID() == MessageID(config.Params.MonitoredWitnessWeightMessageID) { 54 | // log.Infof("Peer %d Message %d Witness Weight %d", a.tangle.Peer.ID, messageMetadata.id, messageMetadata.weight) 55 | a.Events.MessageWitnessWeightUpdated.Trigger(message, messageMetadata.Weight()) 56 | } 57 | weightByte := messageMetadata.WeightByte(int(byteIndex)) 58 | if weightByte&(1<= config.Params.ConfirmationThreshold*float64(a.tangle.WeightDistribution.TotalWeight()) && 64 | !messageMetadata.Confirmed() && !messageMetadata.Orphaned() { 65 | // check if this should be orphaned 66 | now := time.Now() 67 | if a.tangle.Storage.TooOld(message) { 68 | messageMetadata.SetOrphanTime(now) 69 | } else { 70 | messageMetadata.SetConfirmationTime(now) 71 | a.Events.MessageConfirmed.Trigger(message, messageMetadata, messageMetadata.Weight(), messageIDCounter) 72 | } 73 | } 74 | 75 | for strongParentID := range message.StrongParents { 76 | walker.Push(strongParentID) 77 | } 78 | 79 | for weakParentID := range message.WeakParents { 80 | walker.Push(weakParentID) 81 | } 82 | } 83 | 84 | }, NewMessageIDs(messageID), false) 85 | } 86 | 87 | // region ApprovalWeightEvents ///////////////////////////////////////////////////////////////////////////////////////////// 88 | 89 | type ApprovalWeightEvents struct { 90 | MessageConfirmed *events.Event 91 | MessageWeightUpdated *events.Event 92 | MessageWitnessWeightUpdated *events.Event 93 | } 94 | 95 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// 96 | -------------------------------------------------------------------------------- /multiverse/booker.go: -------------------------------------------------------------------------------- 1 | package multiverse 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/cerrors" 5 | "github.com/iotaledger/hive.go/events" 6 | "golang.org/x/xerrors" 7 | ) 8 | 9 | // region Booker /////////////////////////////////////////////////////////////////////////////////////////////////////// 10 | 11 | type Booker struct { 12 | Events *BookerEvents 13 | 14 | tangle *Tangle 15 | } 16 | 17 | func NewBooker(tangle *Tangle) (booker *Booker) { 18 | return &Booker{ 19 | Events: &BookerEvents{ 20 | MessageBooked: events.NewEvent(messageIDEventCaller), 21 | MessageInvalid: events.NewEvent(messageIDEventCaller), 22 | }, 23 | 24 | tangle: tangle, 25 | } 26 | } 27 | 28 | func (b *Booker) Setup() { 29 | b.tangle.Solidifier.Events.MessageSolid.Attach(events.NewClosure(b.Book)) 30 | } 31 | 32 | func (b *Booker) Book(messageID MessageID) { 33 | message := b.tangle.Storage.Message(messageID) 34 | messageMetadata := b.tangle.Storage.MessageMetadata(messageID) 35 | 36 | inheritedColor, err := b.inheritColor(message) 37 | if err != nil { 38 | b.Events.MessageInvalid.Trigger(messageID) 39 | return 40 | } 41 | 42 | messageMetadata.SetInheritedColor(inheritedColor) 43 | 44 | b.Events.MessageBooked.Trigger(messageID) 45 | } 46 | 47 | // The booked message will inherit the color from its parent 48 | func (b *Booker) inheritColor(message *Message) (inheritedColor Color, err error) { 49 | inheritedColor = message.Payload 50 | for _, colorToInherit := range append(make([]Color, 0), b.colorsOfStrongParents(message)...) { 51 | if colorToInherit == UndefinedColor { 52 | continue 53 | } 54 | 55 | if inheritedColor != UndefinedColor && inheritedColor != colorToInherit { 56 | err = xerrors.Errorf("message with %s tried to combine conflicting perceptions of the ledger state: %w", message.ID, cerrors.ErrFatal) 57 | return 58 | } 59 | 60 | inheritedColor = colorToInherit 61 | } 62 | 63 | return 64 | } 65 | 66 | func (b *Booker) colorsOfStrongParents(message *Message) (colorsOfStrongParents []Color) { 67 | for strongParent := range message.StrongParents { 68 | if strongParent == Genesis { 69 | continue 70 | } 71 | 72 | colorsOfStrongParents = append(colorsOfStrongParents, b.tangle.Storage.MessageMetadata(strongParent).InheritedColor()) 73 | } 74 | 75 | return 76 | } 77 | 78 | func (b *Booker) colorsOfWeakParents(message *Message) (colorsOfStrongParents []Color) { 79 | for weakParent := range message.WeakParents { 80 | if weakParent == Genesis { 81 | continue 82 | } 83 | 84 | colorsOfStrongParents = append(colorsOfStrongParents, b.tangle.Storage.Message(weakParent).Payload) 85 | } 86 | 87 | return 88 | } 89 | 90 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// 91 | 92 | // region BookerEvents ///////////////////////////////////////////////////////////////////////////////////////////////// 93 | 94 | type BookerEvents struct { 95 | MessageInvalid *events.Event 96 | MessageBooked *events.Event 97 | } 98 | 99 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// 100 | -------------------------------------------------------------------------------- /multiverse/icca_scheduler.go: -------------------------------------------------------------------------------- 1 | package multiverse 2 | 3 | import ( 4 | "container/heap" 5 | "container/ring" 6 | "math" 7 | "sync" 8 | "time" 9 | 10 | "github.com/iotaledger/hive.go/events" 11 | "github.com/iotaledger/multivers-simulation/config" 12 | "github.com/iotaledger/multivers-simulation/network" 13 | ) 14 | 15 | // region ICCA Scheduler //////////////////////////////////////////////////////////////////////////////////////////////////// 16 | 17 | func (s *ICCAScheduler) initQueues() { 18 | for i := 0; i < config.Params.NodesCount; i++ { 19 | issuerQueue := &IssuerQueue{} 20 | heap.Init(issuerQueue) 21 | s.issuerQueues[network.PeerID(i)] = issuerQueue 22 | s.roundRobin.Value = network.PeerID(i) 23 | s.roundRobin = s.roundRobin.Next() 24 | } 25 | if s.roundRobin.Value.(network.PeerID) != 0 { 26 | panic("Incomplete ring") 27 | } 28 | } 29 | 30 | // region ICCA Scheduler //////////////////////////////////////////////////////////////////////////////////////////////////// 31 | 32 | type ICCAScheduler struct { 33 | tangle *Tangle 34 | 35 | // nonReadyMapMutex sync.RWMutex 36 | nonReadyMap map[MessageID]*Message 37 | 38 | accessMana map[network.PeerID]float64 39 | deficits map[network.PeerID]float64 40 | quanta map[network.PeerID]float64 41 | issuerQueues map[network.PeerID]*IssuerQueue 42 | roundRobin *ring.Ring 43 | readyLen int 44 | 45 | mutex sync.Mutex 46 | 47 | events *SchedulerEvents 48 | } 49 | 50 | func (s *ICCAScheduler) Setup() { 51 | // setup the initial AccessMana, deficits and quanta when the peer ID is created 52 | for id := 0; id < config.Params.NodesCount; id++ { 53 | s.accessMana[network.PeerID(id)] = 0.0 54 | s.deficits[network.PeerID(id)] = 0.0 55 | idBandwidth := s.tangle.BandwidthDistribution.Bandwidth(network.PeerID(id)) 56 | s.quanta[network.PeerID(id)] = float64(idBandwidth) / float64(config.Params.SchedulingRate) 57 | } 58 | // initialise the issuer queues 59 | s.initQueues() 60 | s.events.MessageScheduled.Attach(events.NewClosure(func(messageID MessageID) { 61 | s.tangle.Peer.GossipNetworkMessage(s.tangle.Storage.Message(messageID)) 62 | s.updateChildrenReady(messageID) 63 | // log.Debugf("Peer %d Gossiped message %d", 64 | // s.tangle.Peer.ID, messageID) 65 | })) 66 | s.events.MessageDropped.Attach(events.NewClosure(func(messageID MessageID) { 67 | s.tangle.Storage.MessageMetadata(messageID).SetDropTime(time.Now()) 68 | })) 69 | s.tangle.ApprovalManager.Events.MessageConfirmed.Attach(events.NewClosure(func(message *Message, messageMetadata *MessageMetadata, weight uint64, messageIDCounter int64) { 70 | if config.Params.ConfEligible { 71 | s.updateChildrenReady(message.ID) 72 | } 73 | s.tangle.Storage.AddToAcceptedSlot(message) 74 | })) 75 | } 76 | 77 | func (s *ICCAScheduler) updateChildrenReady(messageID MessageID) { 78 | for strongChildID := range s.tangle.Storage.StrongChildren(messageID) { 79 | if s.tangle.Storage.isReady(strongChildID) { 80 | s.setReady(strongChildID) 81 | } 82 | } 83 | for weakChildID := range s.tangle.Storage.WeakChildren(messageID) { 84 | if s.tangle.Storage.isReady(weakChildID) { 85 | s.setReady(weakChildID) 86 | } 87 | } 88 | } 89 | 90 | func (s *ICCAScheduler) setReady(messageID MessageID) { 91 | s.tangle.Storage.MessageMetadata(messageID).SetReady() 92 | // move from non-ready queue to ready queue if this child is already enqueued 93 | 94 | // s.nonReadyMapMutex.Lock() 95 | // defer s.nonReadyMapMutex.Unlock() 96 | if m, exists := s.nonReadyMap[messageID]; exists { 97 | delete(s.nonReadyMap, messageID) 98 | s.push(m) 99 | } 100 | } 101 | 102 | func (s *ICCAScheduler) IncrementAccessMana(schedulingRate float64) { 103 | bandwidth := s.tangle.BandwidthDistribution.Bandwidths() 104 | totalBandwidth := config.Params.SchedulingRate 105 | // every time something is scheduled, we add this much mana in total\ 106 | mana := float64(10) 107 | for id := range s.accessMana { 108 | s.accessMana[id] += mana * float64(bandwidth[id]) / float64(totalBandwidth) 109 | } 110 | } 111 | 112 | func (s *ICCAScheduler) DecreaseNodeAccessMana(nodeID network.PeerID, manaIncrement float64) (newAccessMana float64) { 113 | s.accessMana[nodeID] -= manaIncrement 114 | newAccessMana = s.accessMana[nodeID] 115 | return newAccessMana 116 | } 117 | 118 | func (s *ICCAScheduler) BurnValue(issuanceTime time.Time) (float64, bool) { 119 | slotIndex := s.tangle.Storage.SlotIndex(issuanceTime) 120 | RMC := s.tangle.Storage.RMC(slotIndex) 121 | return RMC, s.GetNodeAccessMana(s.tangle.Peer.ID) >= RMC 122 | } 123 | 124 | func (s *ICCAScheduler) EnqueueMessage(messageID MessageID) { 125 | s.tangle.Storage.MessageMetadata(messageID).SetEnqueueTime(time.Now()) 126 | m := s.tangle.Storage.Message(messageID) 127 | 128 | // if this message is a validation block, skip the scheduler. 129 | if m.Validation { 130 | s.tangle.Storage.MessageMetadata(m.ID).SetScheduleTime(time.Now()) 131 | s.updateChildrenReady(m.ID) 132 | s.events.MessageScheduled.Trigger(m.ID) 133 | } 134 | 135 | // if this node is a spammer, skip the scheduler. 136 | if m.Issuer == s.tangle.Peer.ID && config.Params.BurnPolicies[m.Issuer] == 0 { 137 | s.tangle.Storage.MessageMetadata(m.ID).SetScheduleTime(time.Now()) 138 | s.updateChildrenReady(m.ID) 139 | s.events.MessageScheduled.Trigger(m.ID) 140 | } 141 | // Check if the message is ready to decide which queue to append to 142 | if s.tangle.Storage.isReady(messageID) { 143 | //log.Debugf("Ready Message Enqueued") 144 | s.tangle.Storage.MessageMetadata(messageID).SetReady() 145 | s.push(m) 146 | } else { 147 | //log.Debug("Not Ready Message Enqueued") 148 | s.tangle.Storage.MessageMetadata(messageID).SetReady() 149 | // s.nonReadyMapMutex.Lock() 150 | s.nonReadyMap[messageID] = s.tangle.Storage.Message(messageID) 151 | // s.nonReadyMapMutex.Unlock() 152 | } 153 | s.events.MessageEnqueued.Trigger(s.IssuerQueueLen(m.Issuer), len(s.nonReadyMap)) 154 | s.BufferManagement() 155 | } 156 | 157 | func (s *ICCAScheduler) BufferManagement() { 158 | for s.ReadyLen() > config.Params.MaxBuffer { 159 | issuerID := 0 160 | maxScaledLen := 0.0 161 | for id := 0; id < config.Params.NodesCount; id++ { 162 | scaledLen := float64(s.IssuerQueueLen(network.PeerID(id))) / s.quanta[network.PeerID(id)] 163 | if scaledLen >= maxScaledLen { 164 | maxScaledLen = scaledLen 165 | issuerID = id 166 | } 167 | } 168 | s.pop(network.PeerID(issuerID)) // drop head 169 | } 170 | } 171 | 172 | func (s *ICCAScheduler) ScheduleMessage() { 173 | rounds, selectedIssuerID := s.selectIssuer() 174 | if selectedIssuerID == network.PeerID(-1) { 175 | return 176 | } 177 | for id := 0; id < config.Params.NodesCount; id++ { 178 | // increment all deficits by the number of rounds needed. 179 | s.incrementDeficit(network.PeerID(id), rounds*s.quanta[network.PeerID(id)]) 180 | } 181 | for id := s.roundRobin.Value.(network.PeerID); id != selectedIssuerID; id = s.roundRobin.Value.(network.PeerID) { 182 | // increment all the issuers before the selected issuer by one more round. 183 | s.incrementDeficit(id, s.quanta[id]) 184 | s.roundRobin = s.roundRobin.Next() 185 | } 186 | // now the ring is pointing to the selected issuer and deficits are updated. 187 | // pop the message from the chosen issuer's queue 188 | m := s.pop(s.roundRobin.Value.(network.PeerID)) 189 | // decrement its deficit 190 | s.incrementDeficit(s.roundRobin.Value.(network.PeerID), -1) // assumes work==1 191 | // schedule the message 192 | s.tangle.Storage.MessageMetadata(m.ID).SetScheduleTime(time.Now()) 193 | s.updateChildrenReady(m.ID) 194 | s.events.MessageScheduled.Trigger(m.ID) 195 | } 196 | 197 | // Return the issuer with the least rounds to wait. 198 | func (s *ICCAScheduler) selectIssuer() (rounds float64, issuerID network.PeerID) { 199 | rounds = math.MaxFloat64 200 | issuerID = network.PeerID(-1) 201 | for i := 0; i < config.Params.NodesCount; i++ { 202 | if s.IssuerQueueLen(s.roundRobin.Value.(network.PeerID)) == 0 { 203 | s.roundRobin = s.roundRobin.Next() 204 | continue 205 | } 206 | id := s.roundRobin.Value.(network.PeerID) 207 | r := (math.Max(1-s.Deficit(network.PeerID(id)), 0) / s.quanta[network.PeerID(id)]) 208 | if r < rounds { 209 | rounds = r 210 | issuerID = id 211 | } 212 | s.roundRobin = s.roundRobin.Next() 213 | } 214 | return 215 | } 216 | 217 | func (s *ICCAScheduler) Events() *SchedulerEvents { 218 | return s.events 219 | } 220 | 221 | func (s *ICCAScheduler) ReadyLen() int { 222 | s.mutex.Lock() 223 | defer s.mutex.Unlock() 224 | return s.readyLen 225 | } 226 | 227 | func (s *ICCAScheduler) NonReadyLen() int { 228 | // s.nonReadyMapMutex.RLock() 229 | // defer s.nonReadyMapMutex.RUnlock() 230 | return len(s.nonReadyMap) 231 | } 232 | 233 | func (s *ICCAScheduler) GetNodeAccessMana(nodeID network.PeerID) (mana float64) { 234 | mana = s.accessMana[nodeID] 235 | return mana 236 | } 237 | 238 | func (s *ICCAScheduler) GetMaxManaBurn() (maxManaBurn float64) { 239 | s.mutex.Lock() 240 | defer s.mutex.Unlock() 241 | for id := 0; id < config.Params.NodesCount; id++ { 242 | q := s.issuerQueues[network.PeerID(id)] 243 | if q.Len() > 0 { 244 | maxManaBurn = math.Max(maxManaBurn, (*q)[0].ManaBurnValue) 245 | } 246 | } 247 | return 248 | } 249 | 250 | func (s *ICCAScheduler) IssuerQueueLen(issuer network.PeerID) int { 251 | s.mutex.Lock() 252 | defer s.mutex.Unlock() 253 | return s.issuerQueues[issuer].Len() 254 | } 255 | 256 | func (s *ICCAScheduler) push(m *Message) { 257 | s.mutex.Lock() 258 | defer s.mutex.Unlock() 259 | heap.Push(s.issuerQueues[m.Issuer], *m) 260 | s.readyLen += 1 261 | } 262 | 263 | func (s *ICCAScheduler) pop(issuer network.PeerID) Message { 264 | s.mutex.Lock() 265 | defer s.mutex.Unlock() 266 | s.readyLen -= 1 267 | return heap.Pop(s.issuerQueues[issuer]).(Message) 268 | } 269 | 270 | func (s *ICCAScheduler) Deficit(issuer network.PeerID) float64 { 271 | s.mutex.Lock() 272 | defer s.mutex.Unlock() 273 | return s.deficits[issuer] 274 | } 275 | 276 | func (s *ICCAScheduler) incrementDeficit(issuer network.PeerID, delta float64) { 277 | s.mutex.Lock() 278 | defer s.mutex.Unlock() 279 | s.deficits[issuer] = math.Min( 280 | s.deficits[issuer]+delta, 281 | config.Params.MaxDeficit, 282 | ) 283 | } 284 | 285 | func (s *ICCAScheduler) RateSetter() bool { 286 | if s.ReadyLen() == 0 || config.Params.BurnPolicies[s.tangle.Peer.ID] == 0 { 287 | return true 288 | } 289 | qlen := s.IssuerQueueLen(s.tangle.Peer.ID) 290 | return int(s.Deficit(s.tangle.Peer.ID)) >= qlen+1 291 | } 292 | -------------------------------------------------------------------------------- /multiverse/manaburn_scheduler.go: -------------------------------------------------------------------------------- 1 | package multiverse 2 | 3 | import ( 4 | "container/heap" 5 | "time" 6 | 7 | "github.com/iotaledger/hive.go/events" 8 | "github.com/iotaledger/multivers-simulation/config" 9 | "github.com/iotaledger/multivers-simulation/network" 10 | ) 11 | 12 | // region ManaBurn Scheduler //////////////////////////////////////////////////////////////////////////////////////////////////// 13 | 14 | type MBScheduler struct { 15 | tangle *Tangle 16 | readyQueue *PriorityQueue 17 | nonReadyMap map[MessageID]*Message 18 | accessMana map[network.PeerID]float64 19 | 20 | events *SchedulerEvents 21 | } 22 | 23 | func (s *MBScheduler) Setup() { 24 | // Setup the initial AccessMana when the peer ID is created 25 | for id := 0; id < config.Params.NodesCount; id++ { 26 | s.accessMana[network.PeerID(id)] = config.Params.InitialMana 27 | } 28 | s.events.MessageScheduled.Attach(events.NewClosure(func(messageID MessageID) { 29 | s.tangle.Peer.GossipNetworkMessage(s.tangle.Storage.Message(messageID)) 30 | s.updateChildrenReady(messageID) 31 | // log.Debugf("Peer %d Gossiped message %d", 32 | // s.tangle.Peer.ID, messageID) 33 | })) 34 | s.events.MessageDropped.Attach(events.NewClosure(func(messageID MessageID) { 35 | s.tangle.Storage.MessageMetadata(messageID).SetDropTime(time.Now()) 36 | })) 37 | s.tangle.ApprovalManager.Events.MessageConfirmed.Attach(events.NewClosure(func(message *Message, messageMetadata *MessageMetadata, weight uint64, messageIDCounter int64) { 38 | if config.Params.ConfEligible { 39 | s.updateChildrenReady(message.ID) 40 | } 41 | s.tangle.Storage.AddToAcceptedSlot(message) 42 | })) 43 | } 44 | 45 | func (s *MBScheduler) BurnValue(issuanceTime time.Time) (burn float64, ok bool) { 46 | peerID := s.tangle.Peer.ID 47 | switch policy := config.Params.BurnPolicies[peerID]; BurnPolicyType(policy) { 48 | case NoBurn: 49 | return 0.0, true 50 | case Anxious: 51 | burn = s.GetNodeAccessMana(peerID) 52 | ok = true 53 | return 54 | case Greedy1: 55 | burn = s.GetMaxManaBurn() + 1.0 56 | ok = burn <= s.GetNodeAccessMana(peerID) 57 | return 58 | case Greedy10: 59 | burn = s.GetMaxManaBurn() + 10.0 60 | ok = burn <= s.GetNodeAccessMana(peerID) 61 | return 62 | default: 63 | panic("invalid burn policy") 64 | } 65 | } 66 | 67 | // TODO: schedulingRate is not used 68 | func (s *MBScheduler) IncrementAccessMana(schedulingRate float64) { 69 | weights := s.tangle.WeightDistribution.Weights() 70 | totalWeight := config.Params.NodesTotalWeight 71 | // every time something is scheduled, we add this much mana in total\ 72 | mana := float64(10) 73 | for id := range s.accessMana { 74 | s.accessMana[id] += mana * float64(weights[id]) / float64(totalWeight) 75 | } 76 | } 77 | 78 | func (s *MBScheduler) DecreaseNodeAccessMana(nodeID network.PeerID, manaIncrement float64) (newAccessMana float64) { 79 | s.accessMana[nodeID] -= manaIncrement 80 | newAccessMana = s.accessMana[nodeID] 81 | return newAccessMana 82 | } 83 | 84 | func (s *MBScheduler) ReadyLen() int { 85 | return s.readyQueue.Len() 86 | } 87 | 88 | func (s *MBScheduler) NonReadyLen() int { 89 | return len(s.nonReadyMap) 90 | } 91 | 92 | func (s *MBScheduler) GetNodeAccessMana(nodeID network.PeerID) (mana float64) { 93 | mana = s.accessMana[nodeID] 94 | return mana 95 | } 96 | 97 | func (s *MBScheduler) Events() *SchedulerEvents { 98 | return s.events 99 | } 100 | 101 | func (s *MBScheduler) updateChildrenReady(messageID MessageID) { 102 | for strongChildID := range s.tangle.Storage.StrongChildren(messageID) { 103 | if s.tangle.Storage.isReady(strongChildID) { 104 | s.setReady(strongChildID) 105 | } 106 | } 107 | for weakChildID := range s.tangle.Storage.WeakChildren(messageID) { 108 | if s.tangle.Storage.isReady(weakChildID) { 109 | s.setReady(weakChildID) 110 | } 111 | } 112 | } 113 | 114 | func (s *MBScheduler) setReady(messageID MessageID) { 115 | s.tangle.Storage.MessageMetadata(messageID).SetReady() 116 | // move from non ready queue to ready queue if this child is already enqueued 117 | if m, exists := s.nonReadyMap[messageID]; exists { 118 | delete(s.nonReadyMap, messageID) 119 | heap.Push(s.readyQueue, *m) 120 | s.BufferManagement() 121 | } 122 | } 123 | 124 | func (s *MBScheduler) IsEmpty() bool { 125 | return s.readyQueue.Len() == 0 126 | } 127 | 128 | func (s *MBScheduler) GetMaxManaBurn() float64 { 129 | if s.readyQueue.Len() > 0 { 130 | return (*s.readyQueue)[0].ManaBurnValue 131 | } else { 132 | return 0.0 133 | } 134 | } 135 | 136 | func (s *MBScheduler) ScheduleMessage() { 137 | // pop the Message from top of the priority queue and consume the accessMana 138 | if !s.IsEmpty() { 139 | m := heap.Pop(s.readyQueue).(Message) 140 | if m.Issuer != s.tangle.Peer.ID { // already deducted Mana for own blocks 141 | s.DecreaseNodeAccessMana(m.Issuer, m.ManaBurnValue) 142 | } 143 | s.tangle.Storage.MessageMetadata(m.ID).SetScheduleTime(time.Now()) 144 | s.updateChildrenReady(m.ID) 145 | s.events.MessageScheduled.Trigger(m.ID) 146 | } 147 | } 148 | 149 | func (s *MBScheduler) EnqueueMessage(messageID MessageID) { 150 | s.tangle.Storage.MessageMetadata(messageID).SetEnqueueTime(time.Now()) 151 | // Check if the message is ready to decide which queue to append to 152 | if s.tangle.Storage.isReady(messageID) { 153 | //log.Debugf("Ready Message Enqueued") 154 | s.tangle.Storage.MessageMetadata(messageID).SetReady() 155 | m := *s.tangle.Storage.Message(messageID) 156 | heap.Push(s.readyQueue, m) 157 | } else { 158 | //log.Debug("Not Ready Message Enqueued") 159 | s.tangle.Storage.MessageMetadata(messageID).SetReady() 160 | s.nonReadyMap[messageID] = s.tangle.Storage.Message(messageID) 161 | } 162 | s.events.MessageEnqueued.Trigger(s.readyQueue.Len(), len(s.nonReadyMap)) 163 | s.BufferManagement() 164 | } 165 | 166 | func (s *MBScheduler) BufferManagement() { 167 | for s.readyQueue.Len() > config.Params.MaxBuffer { 168 | tail := s.readyQueue.tail() 169 | heap.Remove(s.readyQueue, tail) // remove the lowest burn value/ issuance time 170 | } 171 | } 172 | 173 | func (s *MBScheduler) IssuerQueueLen(issuer network.PeerID) int { 174 | return 0 175 | } 176 | 177 | func (s *MBScheduler) Deficit(issuer network.PeerID) float64 { 178 | return 0.0 179 | } 180 | 181 | func (s *MBScheduler) RateSetter() bool { 182 | return true 183 | } 184 | -------------------------------------------------------------------------------- /multiverse/messagefactory.go: -------------------------------------------------------------------------------- 1 | package multiverse 2 | 3 | import ( 4 | "sync/atomic" 5 | "time" 6 | ) 7 | 8 | // region MessageFactory /////////////////////////////////////////////////////////////////////////////////////////////// 9 | 10 | type MessageFactory struct { 11 | tangle *Tangle 12 | sequenceNumber uint64 13 | numberOfNodes uint64 14 | } 15 | 16 | func NewMessageFactory(tangle *Tangle, numberOfNodes uint64) (messageFactory *MessageFactory) { 17 | return &MessageFactory{ 18 | tangle: tangle, 19 | numberOfNodes: numberOfNodes, 20 | } 21 | } 22 | 23 | func (m *MessageFactory) CreateMessage(validation bool, payload Color) (*Message, bool) { 24 | strongParents, weakParents := m.tangle.TipManager.Tips(validation) 25 | issuanceTime := time.Now() 26 | if burn, ok := m.tangle.Scheduler.BurnValue(issuanceTime); ok { 27 | m.tangle.Scheduler.DecreaseNodeAccessMana(m.tangle.Peer.ID, burn) // decrease the nodes own Mana when the message is created 28 | message := &Message{ 29 | ID: NewMessageID(), 30 | Validation: validation, 31 | StrongParents: strongParents, 32 | WeakParents: weakParents, 33 | SequenceNumber: atomic.AddUint64(&m.sequenceNumber, 1), 34 | Issuer: m.tangle.Peer.ID, 35 | Payload: payload, 36 | IssuanceTime: issuanceTime, 37 | ManaBurnValue: burn, 38 | } 39 | return message, ok 40 | } else { 41 | return nil, false 42 | } 43 | } 44 | 45 | func (m *MessageFactory) SequenceNumber() uint64 { 46 | return atomic.AddUint64(&m.sequenceNumber, 1) 47 | } 48 | 49 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// 50 | -------------------------------------------------------------------------------- /multiverse/models.go: -------------------------------------------------------------------------------- 1 | package multiverse 2 | 3 | import ( 4 | "sync/atomic" 5 | "time" 6 | 7 | "github.com/iotaledger/hive.go/types" 8 | "github.com/iotaledger/multivers-simulation/config" 9 | "github.com/iotaledger/multivers-simulation/network" 10 | ) 11 | 12 | // region Slot ////////////////////////////////////////////////////////////////////////////////////////////////////// 13 | 14 | type SlotIndex int 15 | 16 | // endregion Slot /////////////////////////////////////////////////////////////////////////////////////////////////// 17 | 18 | // region Message ////////////////////////////////////////////////////////////////////////////////////////////////////// 19 | 20 | type Message struct { 21 | Validation bool 22 | ID MessageID 23 | StrongParents MessageIDs 24 | WeakParents MessageIDs 25 | SequenceNumber uint64 26 | Issuer network.PeerID 27 | Payload Color 28 | IssuanceTime time.Time 29 | ManaBurnValue float64 30 | } 31 | 32 | // endregion Message /////////////////////////////////////////////////////////////////////////////////////////////////// 33 | 34 | // region MessageMetadata ////////////////////////////////////////////////////////////////////////////////////////////// 35 | 36 | type MessageMetadata struct { 37 | id MessageID 38 | solid bool 39 | ready bool 40 | inheritedColor Color 41 | weightSlice []byte 42 | weight uint64 43 | confirmationTime time.Time 44 | orphanTime time.Time 45 | arrivalTime time.Time 46 | enqueueTime time.Time 47 | scheduleTime time.Time 48 | dropTime time.Time 49 | } 50 | 51 | func (m *MessageMetadata) ArrivalTime() time.Time { 52 | return m.arrivalTime 53 | } 54 | 55 | func (m *MessageMetadata) WeightByte(index int) byte { 56 | return m.weightSlice[index] 57 | } 58 | 59 | func (m *MessageMetadata) SetWeightByte(index int, weight byte) { 60 | m.weightSlice[index] = weight 61 | } 62 | 63 | func (m *MessageMetadata) SetWeightSlice(weightSlice []byte) { 64 | m.weightSlice = weightSlice 65 | } 66 | 67 | func (m *MessageMetadata) Weight() uint64 { 68 | return m.weight 69 | } 70 | 71 | func (m *MessageMetadata) AddWeight(weight uint64) { 72 | m.weight += weight 73 | } 74 | 75 | func (m *MessageMetadata) SetWeight(weight uint64) { 76 | m.weight = weight 77 | } 78 | 79 | func (m *MessageMetadata) ConfirmationTime() time.Time { 80 | return m.confirmationTime 81 | } 82 | 83 | func (m *MessageMetadata) SetConfirmationTime(confirmationTime time.Time) { 84 | m.confirmationTime = confirmationTime 85 | } 86 | 87 | func (m *MessageMetadata) SetOrphanTime(orphanTime time.Time) { 88 | m.orphanTime = orphanTime 89 | } 90 | 91 | func (m *MessageMetadata) SetEnqueueTime(enqueueTime time.Time) { 92 | m.enqueueTime = enqueueTime 93 | } 94 | 95 | func (m *MessageMetadata) SetScheduleTime(scheduleTime time.Time) { 96 | m.scheduleTime = scheduleTime 97 | } 98 | 99 | func (m *MessageMetadata) SetDropTime(dropTime time.Time) { 100 | m.dropTime = dropTime 101 | } 102 | 103 | func (m *MessageMetadata) ID() (messageID MessageID) { 104 | return m.id 105 | } 106 | 107 | func (m *MessageMetadata) Ready() bool { 108 | return m.ready 109 | } 110 | 111 | func (m *MessageMetadata) SetReady() { 112 | m.ready = true 113 | } 114 | 115 | func (m *MessageMetadata) Scheduled() bool { 116 | return !m.scheduleTime.IsZero() 117 | } 118 | 119 | func (m *MessageMetadata) Confirmed() bool { 120 | return !m.confirmationTime.IsZero() 121 | } 122 | 123 | func (m *MessageMetadata) Orphaned() bool { 124 | return !m.orphanTime.IsZero() 125 | } 126 | 127 | func (m *MessageMetadata) Eligible() bool { // a message is ready if all parents are eligible = either scheduled or confirmed 128 | return m.Scheduled() || (m.Confirmed() && config.Params.ConfEligible) 129 | } 130 | 131 | func (m *MessageMetadata) SetSolid(solid bool) (modified bool) { 132 | if solid == m.solid { 133 | return 134 | } 135 | 136 | m.solid = solid 137 | modified = true 138 | 139 | return 140 | } 141 | 142 | func (m *MessageMetadata) Solid() (solid bool) { 143 | return m.solid 144 | } 145 | 146 | func (m *MessageMetadata) SetInheritedColor(color Color) (modified bool) { 147 | if color == m.inheritedColor { 148 | return 149 | } 150 | 151 | m.inheritedColor = color 152 | modified = true 153 | 154 | return 155 | } 156 | 157 | func (m *MessageMetadata) InheritedColor() (color Color) { 158 | return m.inheritedColor 159 | } 160 | 161 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// 162 | 163 | // region MessageRequest /////////////////////////////////////////////////////////////////////////////////////////////// 164 | 165 | type MessageRequest struct { 166 | MessageID MessageID 167 | Issuer network.PeerID 168 | } 169 | 170 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// 171 | 172 | // region MessageID //////////////////////////////////////////////////////////////////////////////////////////////////// 173 | 174 | type MessageID int64 175 | 176 | var ( 177 | Genesis MessageID 178 | 179 | messageIDCounter int64 180 | ) 181 | 182 | func NewMessageID() MessageID { 183 | return MessageID(atomic.AddInt64(&messageIDCounter, 1)) 184 | } 185 | 186 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// 187 | 188 | // region MessageIDs /////////////////////////////////////////////////////////////////////////////////////////////////// 189 | 190 | type MessageIDs map[MessageID]types.Empty 191 | 192 | func NewMessageIDs(messageIDs ...MessageID) (newMessageIDs MessageIDs) { 193 | newMessageIDs = make(MessageIDs) 194 | for _, messageID := range messageIDs { 195 | newMessageIDs[messageID] = types.Void 196 | } 197 | 198 | return 199 | } 200 | 201 | func (m MessageIDs) Add(messageID MessageID) { 202 | m[messageID] = types.Void 203 | } 204 | 205 | // Trim the MessageIDs to only retain `length` size 206 | func (m MessageIDs) Trim(length int) { 207 | counter := 0 208 | for messageID := range m { 209 | if counter == length { 210 | delete(m, messageID) 211 | continue 212 | } 213 | counter++ 214 | } 215 | } 216 | 217 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// 218 | 219 | // region Color //////////////////////////////////////////////////////////////////////////////////////////////////////// 220 | 221 | // The Color is used to ease of observation of Peer opinions and the ownOpinion based on the approvalWeights 222 | // The maxOpinion is the Opinion with the highest Color value and the maxApprovalWeight 223 | // 224 | // The approvalWeights stores the accumulated weights of each Color for messages 225 | // - The message will have an associated Color inherited from its parents 226 | // - The Color of a message is assigned from `IssuePayload` 227 | // - The strongTips/weakTips will be selected from the TipSet[ownOpinion] 228 | // 229 | // The different color values are used as a tie breaker, i.e., when 2 colors have the same weight, the larger color value 230 | // opinion will be regarded as the ownOpinion. Each color simply represents a perception of a certain state of a tangle 231 | // where different conflicts are approved. 232 | type Color int64 233 | 234 | func (c Color) String() string { 235 | switch c { 236 | case 0: 237 | return "Color(Undefined)" 238 | case 1: 239 | return "Color(Blue)" 240 | case 2: 241 | return "Color(Red)" 242 | case 3: 243 | return "Color(Green)" 244 | default: 245 | return "Color(Unknown)" 246 | } 247 | } 248 | 249 | func ColorFromInt(i int) Color { 250 | switch i { 251 | case 0: 252 | return UndefinedColor 253 | case 1: 254 | return Blue 255 | case 2: 256 | return Red 257 | case 3: 258 | return Green 259 | default: 260 | return UndefinedColor 261 | } 262 | } 263 | 264 | func ColorFromStr(s string) Color { 265 | switch s { 266 | case "": 267 | return UndefinedColor 268 | case "B": 269 | return Blue 270 | case "R": 271 | return Red 272 | case "G": 273 | return Green 274 | default: 275 | return UndefinedColor 276 | } 277 | } 278 | 279 | var ( 280 | UndefinedColor Color 281 | Blue = Color(1) 282 | Red = Color(2) 283 | Green = Color(3) 284 | ) 285 | 286 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// 287 | -------------------------------------------------------------------------------- /multiverse/no_scheduler.go: -------------------------------------------------------------------------------- 1 | package multiverse 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/iotaledger/hive.go/events" 7 | "github.com/iotaledger/multivers-simulation/network" 8 | ) 9 | 10 | type NoScheduler struct { 11 | tangle *Tangle 12 | 13 | events *SchedulerEvents 14 | } 15 | 16 | func (s *NoScheduler) Setup() { 17 | s.events.MessageScheduled.Attach(events.NewClosure(func(messageID MessageID) { 18 | s.tangle.Peer.GossipNetworkMessage(s.tangle.Storage.Message(messageID)) 19 | // log.Debugf("Peer %d Gossiped message %d", s.tangle.Peer.ID, messageID) 20 | })) 21 | } 22 | func (s *NoScheduler) IncrementAccessMana(float64) {} 23 | func (s *NoScheduler) DecreaseNodeAccessMana(network.PeerID, float64) float64 { return 0 } 24 | func (s *NoScheduler) BurnValue(time.Time) (float64, bool) { return 0, true } 25 | func (s *NoScheduler) EnqueueMessage(messageID MessageID) { 26 | s.events.MessageScheduled.Trigger(messageID) 27 | } 28 | func (s *NoScheduler) ScheduleMessage() {} 29 | func (s *NoScheduler) Events() *SchedulerEvents { return s.events } 30 | func (s *NoScheduler) ReadyLen() int { return 0 } 31 | func (s *NoScheduler) NonReadyLen() int { return 0 } 32 | func (s *NoScheduler) GetNodeAccessMana(network.PeerID) float64 { return 0 } 33 | func (s *NoScheduler) GetMaxManaBurn() float64 { return 0 } 34 | func (s *NoScheduler) IssuerQueueLen(network.PeerID) int { return 0 } 35 | func (s *NoScheduler) Deficit(network.PeerID) float64 { return 0 } 36 | func (s *NoScheduler) RateSetter() bool { return true } 37 | -------------------------------------------------------------------------------- /multiverse/node.go: -------------------------------------------------------------------------------- 1 | package multiverse 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/iotaledger/hive.go/events" 7 | "github.com/iotaledger/multivers-simulation/logger" 8 | "github.com/iotaledger/multivers-simulation/network" 9 | ) 10 | 11 | var log = logger.New("Multiverse") 12 | 13 | // region Node ///////////////////////////////////////////////////////////////////////////////////////////////////////// 14 | 15 | type NodeInterface interface { 16 | Peer() *network.Peer 17 | Tangle() *Tangle 18 | IssuePayload(payload Color) 19 | } 20 | 21 | type Node struct { 22 | peer *network.Peer 23 | tangle *Tangle 24 | } 25 | 26 | func NewNode() interface{} { 27 | return &Node{ 28 | tangle: NewTangle(), 29 | } 30 | } 31 | 32 | func (n *Node) Peer() *network.Peer { 33 | return n.peer 34 | } 35 | 36 | func (n *Node) Tangle() *Tangle { 37 | return n.tangle 38 | } 39 | 40 | func (n *Node) Setup(peer *network.Peer, weightDistribution *network.ConsensusWeightDistribution, bandwidthDistribution *network.BandwidthDistribution, genesisTime time.Time) { 41 | defer log.Debugf("%s: Setting up Multiverse ... [DONE]", peer) 42 | 43 | n.peer = peer 44 | n.tangle.Setup(peer, weightDistribution, bandwidthDistribution, genesisTime) 45 | n.tangle.Requester.Events.Request.Attach(events.NewClosure(func(messageID MessageID) { 46 | n.peer.GossipNetworkMessage(&MessageRequest{MessageID: messageID, Issuer: n.peer.ID}) 47 | })) 48 | n.tangle.Booker.Events.MessageBooked.Attach(events.NewClosure(func(messageID MessageID) { 49 | // Push the message to the scheduling buffer 50 | n.tangle.Scheduler.EnqueueMessage(messageID) 51 | })) 52 | } 53 | 54 | // IssuePayload sends the Color to the socket for creating a new Message 55 | func (n *Node) IssuePayload(payload Color) { 56 | n.peer.Socket <- payload 57 | } 58 | 59 | func (n *Node) HandleNetworkMessage(networkMessage interface{}) { 60 | switch receivedNetworkMessage := networkMessage.(type) { 61 | case *MessageRequest: 62 | if requestedMessage := n.tangle.Storage.Message(receivedNetworkMessage.MessageID); requestedMessage != nil { 63 | n.peer.Neighbors[receivedNetworkMessage.Issuer].Send(requestedMessage) 64 | } 65 | case *Message: 66 | n.tangle.ProcessMessage(receivedNetworkMessage) 67 | case Color: 68 | // create own message 69 | if message, ok := n.tangle.MessageFactory.CreateMessage(false, receivedNetworkMessage); ok { 70 | n.tangle.ProcessMessage(message) 71 | } 72 | } 73 | } 74 | 75 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// 76 | -------------------------------------------------------------------------------- /multiverse/opinion_manager.go: -------------------------------------------------------------------------------- 1 | package multiverse 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/events" 5 | "github.com/iotaledger/multivers-simulation/config" 6 | "github.com/iotaledger/multivers-simulation/network" 7 | ) 8 | 9 | // region OpinionManager /////////////////////////////////////////////////////////////////////////////////////////////// 10 | 11 | type OpinionManagerInterface interface { 12 | Events() *OpinionManagerEvents 13 | ApprovalWeights() map[Color]uint64 14 | Setup() 15 | FormOpinion(messageID MessageID) 16 | Opinion() Color 17 | SetOpinion(opinion Color) 18 | WeightsUpdated() 19 | UpdateWeights(messageID MessageID) (updated bool) 20 | UpdateConfirmation(oldOpinion Color, maxOpinion Color) 21 | Tangle() *Tangle 22 | } 23 | 24 | type OpinionManager struct { 25 | events *OpinionManagerEvents 26 | 27 | tangle *Tangle 28 | ownOpinion Color 29 | peerOpinions map[network.PeerID]*Opinion 30 | approvalWeights map[Color]uint64 31 | colorConfirmed bool 32 | } 33 | 34 | func NewOpinionManager(tangle *Tangle) (opinionManager *OpinionManager) { 35 | return &OpinionManager{ 36 | events: &OpinionManagerEvents{ 37 | OpinionFormed: events.NewEvent(messageIDEventCaller), 38 | OpinionChanged: events.NewEvent(opinionChangedEventHandler), 39 | ApprovalWeightUpdated: events.NewEvent(approvalWeightUpdatedHandler), 40 | MinConfirmedWeightUpdated: events.NewEvent(approvalWeightUpdatedHandler), 41 | ColorConfirmed: events.NewEvent(colorEventHandler), 42 | ColorUnconfirmed: events.NewEvent(reorgEventHandler), 43 | }, 44 | 45 | tangle: tangle, 46 | peerOpinions: make(map[network.PeerID]*Opinion), 47 | approvalWeights: make(map[Color]uint64), 48 | colorConfirmed: false, 49 | } 50 | } 51 | 52 | func (o *OpinionManager) ApprovalWeights() map[Color]uint64 { 53 | return o.approvalWeights 54 | } 55 | 56 | func (o *OpinionManager) Events() *OpinionManagerEvents { 57 | return o.events 58 | } 59 | 60 | func (o *OpinionManager) Tangle() *Tangle { 61 | return o.tangle 62 | } 63 | 64 | func (o *OpinionManager) Setup() { 65 | o.tangle.Booker.Events.MessageBooked.Attach(events.NewClosure(o.FormOpinion)) 66 | } 67 | 68 | // FormOpinion of the current tangle. 69 | // The opinion is determined by the color with the most approvalWeight. 70 | func (o *OpinionManager) FormOpinion(messageID MessageID) { 71 | defer o.events.OpinionFormed.Trigger(messageID) 72 | 73 | if updated := o.UpdateWeights(messageID); !updated { 74 | return 75 | } 76 | // Here we accumulate the approval weights in our local tangle. 77 | o.WeightsUpdated() 78 | } 79 | 80 | func (o *OpinionManager) UpdateWeights(messageID MessageID) (updated bool) { 81 | message := o.tangle.Storage.Message(messageID) 82 | messageMetadata := o.tangle.Storage.MessageMetadata(messageID) 83 | 84 | if messageMetadata.InheritedColor() == UndefinedColor { 85 | return 86 | } 87 | 88 | lastOpinion, exist := o.peerOpinions[message.Issuer] 89 | if !exist { 90 | lastOpinion = &Opinion{ 91 | PeerID: message.Issuer, 92 | } 93 | o.peerOpinions[message.Issuer] = lastOpinion 94 | } 95 | 96 | if message.SequenceNumber <= lastOpinion.SequenceNumber { 97 | return 98 | } 99 | lastOpinion.SequenceNumber = message.SequenceNumber 100 | 101 | if lastOpinion.Color == messageMetadata.InheritedColor() { 102 | return 103 | } 104 | 105 | if exist { 106 | // We calculate the approval weight of the branch based on the node who issued the message to the branch (i.e., it already voted for the branch). 107 | o.approvalWeights[lastOpinion.Color] -= o.tangle.WeightDistribution.Weight(message.Issuer) 108 | o.events.ApprovalWeightUpdated.Trigger(lastOpinion.Color, int64(-o.tangle.WeightDistribution.Weight(message.Issuer))) 109 | 110 | // Record the min confirmed weight 111 | // When the weight of the color < confirmation threshold, but the color is still not unconfirmed yet. 112 | if o.colorConfirmed && o.ownOpinion == lastOpinion.Color && !o.checkColorConfirmed(o.ownOpinion) { 113 | o.events.MinConfirmedWeightUpdated.Trigger(lastOpinion.Color, int64(o.approvalWeights[lastOpinion.Color])) 114 | } 115 | } 116 | 117 | // We calculate the approval weight of the branch based on the node who issued the message to the branch (i.e., it already voted for the branch). 118 | o.approvalWeights[messageMetadata.InheritedColor()] += o.tangle.WeightDistribution.Weight(message.Issuer) 119 | o.events.ApprovalWeightUpdated.Trigger(messageMetadata.InheritedColor(), int64(o.tangle.WeightDistribution.Weight(message.Issuer))) 120 | 121 | lastOpinion.Color = messageMetadata.InheritedColor() 122 | updated = true 123 | return 124 | } 125 | 126 | func (o *OpinionManager) Opinion() Color { 127 | return o.ownOpinion 128 | } 129 | 130 | func (o *OpinionManager) SetOpinion(opinion Color) { 131 | if oldOpinion := o.ownOpinion; oldOpinion != opinion { 132 | o.events.OpinionChanged.Trigger(oldOpinion, opinion, int64(o.Tangle().WeightDistribution.Weight(o.Tangle().Peer.ID)), o.tangle.Peer.ID) 133 | } 134 | o.ownOpinion = opinion 135 | } 136 | 137 | func (o *OpinionManager) UpdateConfirmation(oldOpinion Color, maxOpinion Color) { 138 | if o.colorConfirmed && maxOpinion != oldOpinion { 139 | o.colorConfirmed = false 140 | o.Events().ColorUnconfirmed.Trigger(oldOpinion, int64(o.approvalWeights[o.ownOpinion]), int64(o.tangle.WeightDistribution.Weight(o.tangle.Peer.ID))) 141 | } 142 | 143 | if o.checkColorConfirmed(maxOpinion) && !o.colorConfirmed { 144 | // Here we accumulate the approval weights in our local tangle. 145 | o.Events().ColorConfirmed.Trigger(maxOpinion, int64(o.tangle.WeightDistribution.Weight(o.tangle.Peer.ID))) 146 | o.colorConfirmed = true 147 | } 148 | } 149 | 150 | // Update the opinions counter and ownOpinion based on the highest peer color value and maxApprovalWeight 151 | // Each Color has approvalWeight. The Color with maxApprovalWeight determines the ownOpinion 152 | func (o *OpinionManager) WeightsUpdated() { 153 | maxOpinion := getMaxOpinion(o.approvalWeights) 154 | oldOpinion := o.ownOpinion 155 | if maxOpinion != oldOpinion { 156 | o.ownOpinion = maxOpinion 157 | o.Events().OpinionChanged.Trigger(oldOpinion, maxOpinion, int64(o.tangle.WeightDistribution.Weight(o.tangle.Peer.ID))) 158 | } 159 | o.UpdateConfirmation(oldOpinion, maxOpinion) 160 | } 161 | 162 | func (o *OpinionManager) checkColorConfirmed(newOpinion Color) bool { 163 | if config.Params.ConfirmationThresholdAbsolute { 164 | return float64(o.approvalWeights[newOpinion]) > float64(config.Params.NodesTotalWeight)*config.Params.ConfirmationThreshold 165 | } else { 166 | aw := make(map[Color]uint64) 167 | for key, value := range o.approvalWeights { 168 | if key != newOpinion { 169 | aw[key] = value 170 | } 171 | } 172 | alternativeOpinion := getMaxOpinion(aw) 173 | return float64(o.approvalWeights[newOpinion])-float64(o.approvalWeights[alternativeOpinion]) > float64(config.Params.NodesTotalWeight)*config.Params.ConfirmationThreshold 174 | } 175 | } 176 | 177 | func getMaxOpinion(aw map[Color]uint64) Color { 178 | maxApprovalWeight := uint64(0) 179 | maxOpinion := UndefinedColor 180 | for color, approvalWeight := range aw { 181 | if approvalWeight > maxApprovalWeight || approvalWeight == maxApprovalWeight && color < maxOpinion || maxOpinion == UndefinedColor { 182 | maxApprovalWeight = approvalWeight 183 | maxOpinion = color 184 | } 185 | } 186 | return maxOpinion 187 | } 188 | 189 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// 190 | 191 | // region Opinion ////////////////////////////////////////////////////////////////////////////////////////////////////// 192 | 193 | type Opinion struct { 194 | PeerID network.PeerID 195 | Color Color 196 | SequenceNumber uint64 197 | } 198 | 199 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// 200 | 201 | // region OpinionManagerEvents ///////////////////////////////////////////////////////////////////////////////////////// 202 | 203 | type OpinionManagerEvents struct { 204 | OpinionFormed *events.Event 205 | OpinionChanged *events.Event 206 | ApprovalWeightUpdated *events.Event 207 | MinConfirmedWeightUpdated *events.Event 208 | ColorConfirmed *events.Event 209 | ColorUnconfirmed *events.Event 210 | } 211 | 212 | func opinionChangedEventHandler(handler interface{}, params ...interface{}) { 213 | handler.(func(Color, Color, int64))(params[0].(Color), params[1].(Color), params[2].(int64)) 214 | } 215 | func colorEventHandler(handler interface{}, params ...interface{}) { 216 | handler.(func(Color, int64))(params[0].(Color), params[1].(int64)) 217 | } 218 | func reorgEventHandler(handler interface{}, params ...interface{}) { 219 | handler.(func(Color, int64, int64))(params[0].(Color), params[1].(int64), params[2].(int64)) 220 | } 221 | func approvalWeightUpdatedHandler(handler interface{}, params ...interface{}) { 222 | handler.(func(Color, int64))(params[0].(Color), params[1].(int64)) 223 | } 224 | 225 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// 226 | -------------------------------------------------------------------------------- /multiverse/requester.go: -------------------------------------------------------------------------------- 1 | package multiverse 2 | 3 | import ( 4 | "sync" 5 | "time" 6 | 7 | "github.com/iotaledger/hive.go/events" 8 | "github.com/iotaledger/hive.go/timedexecutor" 9 | ) 10 | 11 | const retryInterval = 5 * time.Second 12 | 13 | // region Requester //////////////////////////////////////////////////////////////////////////////////////////////////// 14 | 15 | type Requester struct { 16 | Events *RequesterEvents 17 | 18 | tangle *Tangle 19 | timedExecutor *timedexecutor.TimedExecutor 20 | queuedElements map[MessageID]*timedexecutor.ScheduledTask 21 | mutex sync.Mutex 22 | } 23 | 24 | func NewRequester(tangle *Tangle) (requester *Requester) { 25 | requester = &Requester{ 26 | Events: &RequesterEvents{ 27 | Request: events.NewEvent(messageIDEventCaller), 28 | }, 29 | 30 | tangle: tangle, 31 | timedExecutor: timedexecutor.New(1), 32 | queuedElements: make(map[MessageID]*timedexecutor.ScheduledTask), 33 | } 34 | 35 | return 36 | } 37 | 38 | func (r *Requester) Setup() { 39 | r.tangle.Solidifier.Events.MessageMissing.Attach(events.NewClosure(r.StartRequest)) 40 | r.tangle.Storage.Events.MessageStored.Attach(events.NewClosure(func(messageID MessageID, message *Message, messageMetadata *MessageMetadata) { 41 | r.StopRequest(messageID) 42 | })) 43 | } 44 | 45 | func (r *Requester) StartRequest(messageID MessageID) { 46 | // Comment out this funcion to turn off solidifier 47 | 48 | r.mutex.Lock() 49 | defer r.mutex.Unlock() 50 | 51 | if _, requestExists := r.queuedElements[messageID]; requestExists { 52 | return 53 | } 54 | 55 | r.triggerRequestAndScheduleRetry(messageID) 56 | } 57 | 58 | func (r *Requester) StopRequest(messageID MessageID) { 59 | r.mutex.Lock() 60 | defer r.mutex.Unlock() 61 | 62 | request, requestExists := r.queuedElements[messageID] 63 | if !requestExists { 64 | return 65 | } 66 | 67 | request.Cancel() 68 | delete(r.queuedElements, messageID) 69 | } 70 | 71 | func (r *Requester) triggerRequestAndScheduleRetry(messageID MessageID) { 72 | r.Events.Request.Trigger(messageID) 73 | 74 | r.queuedElements[messageID] = r.timedExecutor.ExecuteAfter(func() { 75 | r.retry(messageID) 76 | }, retryInterval) 77 | } 78 | 79 | func (r *Requester) retry(messageID MessageID) { 80 | r.mutex.Lock() 81 | defer r.mutex.Unlock() 82 | 83 | if _, requestExists := r.queuedElements[messageID]; !requestExists { 84 | return 85 | } 86 | 87 | r.triggerRequestAndScheduleRetry(messageID) 88 | } 89 | 90 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// 91 | 92 | // region RequesterEvents ////////////////////////////////////////////////////////////////////////////////////////////// 93 | 94 | type RequesterEvents struct { 95 | Request *events.Event 96 | } 97 | 98 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// 99 | -------------------------------------------------------------------------------- /multiverse/scheduler.go: -------------------------------------------------------------------------------- 1 | package multiverse 2 | 3 | import ( 4 | "container/heap" 5 | "container/ring" 6 | "time" 7 | 8 | "github.com/iotaledger/hive.go/events" 9 | "github.com/iotaledger/multivers-simulation/config" 10 | "github.com/iotaledger/multivers-simulation/network" 11 | ) 12 | 13 | // region Scheduler Interface ////////////////////////////////////////////////////////////////////////////////////////////// 14 | 15 | // Priority Queue for Message 16 | type PriorityQueue []Message 17 | 18 | type IssuerQueue []Message 19 | 20 | type BurnPolicyType int 21 | 22 | const ( 23 | NoBurn BurnPolicyType = 0 24 | Anxious BurnPolicyType = 1 25 | Greedy1 BurnPolicyType = 2 26 | Greedy10 BurnPolicyType = 3 27 | ) 28 | 29 | type Scheduler interface { 30 | Setup() 31 | IncrementAccessMana(float64) 32 | DecreaseNodeAccessMana(network.PeerID, float64) float64 33 | BurnValue(time.Time) (float64, bool) 34 | EnqueueMessage(MessageID) 35 | ScheduleMessage() 36 | Events() *SchedulerEvents 37 | ReadyLen() int 38 | NonReadyLen() int 39 | GetNodeAccessMana(network.PeerID) float64 40 | GetMaxManaBurn() float64 41 | IssuerQueueLen(network.PeerID) int 42 | Deficit(network.PeerID) float64 43 | RateSetter() bool 44 | } 45 | 46 | func NewScheduler(tangle *Tangle) (s Scheduler) { 47 | if config.Params.SchedulerType == "ManaBurn" { 48 | readyHeap := &PriorityQueue{} 49 | heap.Init(readyHeap) 50 | s = &MBScheduler{ 51 | tangle: tangle, 52 | readyQueue: readyHeap, 53 | nonReadyMap: make(map[MessageID]*Message), 54 | accessMana: make(map[network.PeerID]float64, config.Params.NodesCount), 55 | events: &SchedulerEvents{ 56 | MessageScheduled: events.NewEvent(messageIDEventCaller), 57 | MessageDropped: events.NewEvent(messageIDEventCaller), 58 | MessageEnqueued: events.NewEvent(schedulerEventCaller), 59 | }, 60 | } 61 | } else if config.Params.SchedulerType == "ICCA+" { 62 | s = &ICCAScheduler{ 63 | tangle: tangle, 64 | nonReadyMap: make(map[MessageID]*Message), 65 | accessMana: make(map[network.PeerID]float64, config.Params.NodesCount), 66 | deficits: make(map[network.PeerID]float64, config.Params.NodesCount), 67 | quanta: make(map[network.PeerID]float64, config.Params.NodesCount), 68 | issuerQueues: make(map[network.PeerID]*IssuerQueue, config.Params.NodesCount), 69 | roundRobin: ring.New(config.Params.NodesCount), 70 | events: &SchedulerEvents{ 71 | MessageScheduled: events.NewEvent(messageIDEventCaller), 72 | MessageDropped: events.NewEvent(messageIDEventCaller), 73 | MessageEnqueued: events.NewEvent(schedulerEventCaller), 74 | }, 75 | } 76 | } else { 77 | s = &NoScheduler{ 78 | tangle: tangle, 79 | 80 | events: &SchedulerEvents{ 81 | MessageScheduled: events.NewEvent(messageIDEventCaller), 82 | }, 83 | } 84 | } 85 | return 86 | } 87 | 88 | // / region Priority Queue //////////////////////////////////////////////////////////////////////////////// 89 | func (h PriorityQueue) Len() int { return len(h) } 90 | func (h PriorityQueue) Less(i, j int) bool { 91 | if h[i].ManaBurnValue > h[j].ManaBurnValue { 92 | return true 93 | } else if h[i].ManaBurnValue == h[j].ManaBurnValue { 94 | return float64(h[i].IssuanceTime.Nanosecond()) < float64(h[j].IssuanceTime.Nanosecond()) 95 | } else { 96 | return false 97 | } 98 | } 99 | func (h PriorityQueue) Swap(i, j int) { 100 | h[i], h[j] = h[j], h[i] 101 | } 102 | 103 | func (h *PriorityQueue) Push(m any) { 104 | // Push and Pop use pointer receivers because they modify the slice's length, 105 | // not just its contents. 106 | *h = append(*h, m.(Message)) 107 | } 108 | 109 | func (h *PriorityQueue) Pop() any { 110 | old := *h 111 | n := len(old) 112 | x := old[n-1] 113 | *h = old[0 : n-1] 114 | return x 115 | } 116 | 117 | func (h PriorityQueue) tail() (tail int) { 118 | for i := range h { 119 | if !h.Less(i, tail) { // less means more mana burned/older issue time 120 | tail = i 121 | } 122 | } 123 | return 124 | } 125 | 126 | // region Issuer Queue //////////////////////////////////////////////////////////////////////////////// 127 | func (h IssuerQueue) Len() int { return len(h) } 128 | func (h IssuerQueue) Less(i, j int) bool { 129 | return float64(h[i].IssuanceTime.Nanosecond()) < float64(h[j].IssuanceTime.Nanosecond()) 130 | } 131 | func (h IssuerQueue) Swap(i, j int) { 132 | h[i], h[j] = h[j], h[i] 133 | } 134 | 135 | func (h *IssuerQueue) Push(m any) { 136 | // Push and Pop use pointer receivers because they modify the slice's length, 137 | // not just its contents. 138 | *h = append(*h, m.(Message)) 139 | } 140 | 141 | func (h *IssuerQueue) Pop() any { 142 | old := *h 143 | n := len(old) 144 | x := old[n-1] 145 | *h = old[0 : n-1] 146 | return x 147 | } 148 | 149 | // Scheduler Events /////////////////////////////////////////////////////////////////////////////////////////////////////////// 150 | 151 | type SchedulerEvents struct { 152 | MessageScheduled *events.Event 153 | MessageDropped *events.Event 154 | MessageEnqueued *events.Event 155 | } 156 | 157 | func schedulerEventCaller(handler interface{}, params ...interface{}) { 158 | handler.(func(int, int))(params[0].(int), params[1].(int)) 159 | } 160 | -------------------------------------------------------------------------------- /multiverse/solidifer.go: -------------------------------------------------------------------------------- 1 | package multiverse 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/events" 5 | ) 6 | 7 | // region Solidifier /////////////////////////////////////////////////////////////////////////////////////////////////// 8 | 9 | type Solidifier struct { 10 | tangle *Tangle 11 | Events *SolidifierEvents 12 | } 13 | 14 | func NewSolidifier(tangle *Tangle) *Solidifier { 15 | return &Solidifier{ 16 | tangle: tangle, 17 | Events: &SolidifierEvents{ 18 | MessageSolid: events.NewEvent(messageIDEventCaller), 19 | MessageMissing: events.NewEvent(messageIDEventCaller), 20 | }, 21 | } 22 | } 23 | 24 | func (s *Solidifier) Setup() { 25 | s.tangle.Storage.Events.MessageStored.Attach(events.NewClosure(func(messageID MessageID, message *Message, messageMetadata *MessageMetadata) { 26 | s.Solidify(messageID) 27 | })) 28 | } 29 | 30 | func (s *Solidifier) Solidify(messageID MessageID) { 31 | message := s.tangle.Storage.Message(messageID) 32 | // if message is not solid, nothing more to do 33 | if !s.messageSolid(message) { 34 | return 35 | } 36 | messageMetadata := s.tangle.Storage.MessageMetadata(messageID) 37 | // if message was already solid, nothing more to do 38 | if !messageMetadata.SetSolid(true) { 39 | return 40 | } 41 | // if message was not already solid, make sure future cone is solid too. 42 | s.Events.MessageSolid.Trigger(message.ID) 43 | strongChildrenIDs := s.tangle.Storage.StrongChildren(message.ID) 44 | for strongChildID := range strongChildrenIDs { 45 | s.Solidify(strongChildID) 46 | } 47 | weakChildrenIDs := s.tangle.Storage.WeakChildren(message.ID) 48 | for weakChildID := range weakChildrenIDs { 49 | s.Solidify(weakChildID) 50 | } 51 | 52 | } 53 | 54 | func (s *Solidifier) messageSolid(message *Message) (isSolid bool) { 55 | isSolid = true 56 | if !s.parentsSolid(message.StrongParents) { 57 | isSolid = false 58 | } 59 | if !s.parentsSolid(message.WeakParents) { 60 | isSolid = false 61 | } 62 | 63 | return 64 | } 65 | 66 | func (s *Solidifier) parentsSolid(parentMessageIDs MessageIDs) (parentsSolid bool) { 67 | parentsSolid = true 68 | for parentMessageID := range parentMessageIDs { 69 | if parentMessageID == Genesis { 70 | continue 71 | } 72 | 73 | parentMessageMetadata := s.tangle.Storage.MessageMetadata(parentMessageID) 74 | if parentMessageMetadata == nil { 75 | s.Events.MessageMissing.Trigger(parentMessageID) 76 | log.Debug("Solidification request sent.") 77 | parentsSolid = false 78 | continue 79 | } 80 | 81 | if !parentMessageMetadata.Solid() { 82 | parentsSolid = false 83 | } 84 | } 85 | 86 | return 87 | } 88 | 89 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// 90 | 91 | // region SolidifierEvents ///////////////////////////////////////////////////////////////////////////////////////////// 92 | 93 | type SolidifierEvents struct { 94 | MessageSolid *events.Event 95 | MessageMissing *events.Event 96 | } 97 | 98 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// 99 | -------------------------------------------------------------------------------- /multiverse/storage.go: -------------------------------------------------------------------------------- 1 | package multiverse 2 | 3 | import ( 4 | "math" 5 | "sync" 6 | "time" 7 | 8 | "github.com/iotaledger/hive.go/events" 9 | "github.com/iotaledger/multivers-simulation/config" 10 | ) 11 | 12 | // region Storage ////////////////////////////////////////////////////////////////////////////////////////////////////// 13 | 14 | type Storage struct { 15 | Events *StorageEvents 16 | 17 | messageDB map[MessageID]*Message 18 | messageMetadataDB map[MessageID]*MessageMetadata 19 | strongChildrenDB map[MessageID]MessageIDs 20 | weakChildrenDB map[MessageID]MessageIDs 21 | slotDB map[SlotIndex]MessageIDs 22 | acceptedSlotDB map[SlotIndex]MessageIDs 23 | rmc map[SlotIndex]float64 24 | genesisTime time.Time 25 | ATT time.Time 26 | 27 | slotMutex sync.Mutex 28 | } 29 | 30 | func NewStorage() (storage *Storage) { 31 | return &Storage{ 32 | Events: &StorageEvents{ 33 | MessageStored: events.NewEvent(messageEventCaller), 34 | }, 35 | 36 | messageDB: make(map[MessageID]*Message), 37 | messageMetadataDB: make(map[MessageID]*MessageMetadata), 38 | strongChildrenDB: make(map[MessageID]MessageIDs), 39 | weakChildrenDB: make(map[MessageID]MessageIDs), 40 | slotDB: make(map[SlotIndex]MessageIDs), 41 | acceptedSlotDB: make(map[SlotIndex]MessageIDs), 42 | rmc: make(map[SlotIndex]float64), 43 | } 44 | } 45 | 46 | func (s *Storage) Setup(genesisTime time.Time) { 47 | s.genesisTime = genesisTime 48 | s.ATT = genesisTime 49 | } 50 | 51 | func (s *Storage) Store(message *Message) (*MessageMetadata, bool) { 52 | if _, exists := s.messageDB[message.ID]; exists { 53 | return &MessageMetadata{}, false 54 | } 55 | slotIndex := s.SlotIndex(message.IssuanceTime) 56 | s.slotMutex.Lock() 57 | defer s.slotMutex.Unlock() 58 | if _, exists := s.slotDB[slotIndex]; !exists { 59 | s.slotDB[slotIndex] = NewMessageIDs() 60 | } 61 | if _, exists := s.rmc[slotIndex]; !exists { 62 | s.NewRMC(slotIndex) 63 | } 64 | if message.ManaBurnValue < s.rmc[slotIndex] { // RMC will always be zero if not in ICCA+ 65 | log.Debug("Message dropped due to Mana burn < RMC", message.Issuer, message.ManaBurnValue, s.rmc[slotIndex]) 66 | return &MessageMetadata{}, false // don't store this message if it burns less than RMC 67 | } 68 | // store to slot storage 69 | s.slotDB[slotIndex].Add(message.ID) 70 | // store message and metadata 71 | s.messageDB[message.ID] = message 72 | messageMetadata := &MessageMetadata{ 73 | id: message.ID, 74 | weightSlice: make([]byte, int(math.Ceil(float64(config.Params.NodesCount)/8.0))), 75 | arrivalTime: time.Now(), 76 | ready: false, 77 | } 78 | // check if this should be orphaned 79 | if s.TooOld(message) { 80 | messageMetadata.SetOrphanTime(time.Now()) 81 | } 82 | s.messageMetadataDB[message.ID] = messageMetadata 83 | // store child references 84 | s.storeChildReferences(message.ID, s.strongChildrenDB, message.StrongParents) 85 | s.storeChildReferences(message.ID, s.weakChildrenDB, message.WeakParents) 86 | return messageMetadata, true 87 | } 88 | 89 | func (s *Storage) Message(messageID MessageID) (message *Message) { 90 | return s.messageDB[messageID] 91 | } 92 | 93 | func (s *Storage) MessageMetadata(messageID MessageID) (messageMetadata *MessageMetadata) { 94 | return s.messageMetadataDB[messageID] 95 | } 96 | 97 | func (s *Storage) StrongChildren(messageID MessageID) (strongChildren MessageIDs) { 98 | return s.strongChildrenDB[messageID] 99 | } 100 | 101 | func (s *Storage) WeakChildren(messageID MessageID) (weakChildren MessageIDs) { 102 | return s.weakChildrenDB[messageID] 103 | } 104 | 105 | func (s *Storage) storeChildReferences(messageID MessageID, childReferenceDB map[MessageID]MessageIDs, parents MessageIDs) { 106 | for parent := range parents { 107 | if _, exists := childReferenceDB[parent]; !exists { 108 | childReferenceDB[parent] = NewMessageIDs() 109 | } 110 | 111 | childReferenceDB[parent].Add(messageID) 112 | } 113 | } 114 | 115 | func (s *Storage) isReady(messageID MessageID) bool { 116 | if !s.MessageMetadata(messageID).Solid() { 117 | return false 118 | } 119 | message := s.Message(messageID) 120 | for strongParentID := range message.StrongParents { 121 | if strongParentID == Genesis { 122 | continue 123 | } 124 | strongParentMetadata := s.MessageMetadata(strongParentID) 125 | if strongParentMetadata == nil { 126 | panic("Strong Parent Metadata is empty") 127 | } 128 | if !strongParentMetadata.Eligible() { 129 | return false 130 | } 131 | } 132 | for weakParentID := range message.WeakParents { 133 | weakParentMetadata := s.MessageMetadata(weakParentID) 134 | if weakParentID == Genesis { 135 | continue 136 | } 137 | if !weakParentMetadata.Eligible() { 138 | return false 139 | } 140 | } 141 | return true 142 | } 143 | 144 | func (s *Storage) SlotIndex(messageTime time.Time) SlotIndex { 145 | timeSinceGenesis := messageTime.Sub(s.genesisTime) 146 | return SlotIndex(float64(timeSinceGenesis) / (float64(config.Params.SlotTime) * float64(config.Params.SlowdownFactor))) 147 | } 148 | 149 | func (s *Storage) Slot(index SlotIndex) MessageIDs { 150 | return s.slotDB[index] 151 | } 152 | 153 | func (s *Storage) AcceptedSlot(index SlotIndex) MessageIDs { 154 | return s.acceptedSlotDB[index] 155 | } 156 | 157 | // Get the messages count per slot 158 | func (s *Storage) MessagesCountPerSlot() map[SlotIndex]int { 159 | s.slotMutex.Lock() 160 | defer s.slotMutex.Unlock() 161 | counts := make(map[SlotIndex]int) 162 | for slotIndex, messages := range s.slotDB { 163 | counts[slotIndex] = len(messages) 164 | } 165 | return counts 166 | } 167 | 168 | // Get the total messages counts in range of slots 169 | func (s *Storage) MessagesCountInRange(startSlotIndex SlotIndex, endSlotIndex SlotIndex) int { 170 | count := 0 171 | for slotIndex := startSlotIndex; slotIndex < endSlotIndex; slotIndex++ { 172 | if _, exists := s.slotDB[slotIndex]; !exists { 173 | continue 174 | } 175 | count += len(s.slotDB[slotIndex]) 176 | } 177 | return count 178 | } 179 | 180 | func (s *Storage) RMC(slotIndex SlotIndex) float64 { 181 | s.slotMutex.Lock() 182 | defer s.slotMutex.Unlock() 183 | if _, exists := s.slotDB[slotIndex]; !exists { 184 | s.NewRMC(slotIndex) 185 | s.slotDB[slotIndex] = NewMessageIDs() 186 | } 187 | return s.rmc[slotIndex] 188 | } 189 | 190 | // func (s *Storage) NewRMC(currentSlotIndex SlotIndex) { 191 | // currentSlotStartTime := s.genesisTime.Add(time.Duration(float64(currentSlotIndex)*float64(config.Params.SlowdownFactor)) * config.Params.SlotTime) 192 | // if config.Params.SchedulerType != "ICCA+" { 193 | // s.rmc[currentSlotIndex] = 0.0 194 | // return 195 | // } 196 | // if currentSlotIndex == SlotIndex(0) { 197 | // s.rmc[currentSlotIndex] = config.Params.InitialRMC 198 | // return 199 | // } 200 | // s.rmc[currentSlotIndex] = s.rmc[currentSlotIndex-SlotIndex(1)] // keep RMC the same by default 201 | // if currentSlotStartTime.After(s.genesisTime.Add(config.Params.RMCTime * time.Duration(config.Params.SlowdownFactor))) { 202 | // n := len(s.AcceptedSlot(s.SlotIndex(currentSlotStartTime.Add(-config.Params.RMCTime)))) // number of messages k slots in the past 203 | // if n < int(config.Params.LowerRMCThreshold) { 204 | // s.rmc[currentSlotIndex] = math.Max(config.Params.RMCmin, s.rmc[currentSlotIndex]*config.Params.AlphaRMC) 205 | // } else if n > int(config.Params.UpperRMCThreshold) { 206 | // s.rmc[currentSlotIndex] = math.Min(config.Params.RMCmax, s.rmc[currentSlotIndex]*config.Params.BetaRMC) 207 | // } 208 | // } 209 | // } 210 | 211 | func (s *Storage) NewRMC(currentSlotIndex SlotIndex) { 212 | currentSlotStartTime := s.genesisTime.Add(time.Duration(float64(currentSlotIndex)*float64(config.Params.SlowdownFactor)) * config.Params.SlotTime) 213 | if config.Params.SchedulerType != "ICCA+" { 214 | s.rmc[currentSlotIndex] = 0.0 215 | return 216 | } 217 | if currentSlotIndex == SlotIndex(0) { 218 | s.rmc[currentSlotIndex] = config.Params.InitialRMC 219 | return 220 | } 221 | s.rmc[currentSlotIndex] = s.rmc[currentSlotIndex-SlotIndex(1)] // keep RMC the same by default 222 | 223 | // Update the RMC every RMCPeriodUpdate 224 | if currentSlotStartTime.After(s.genesisTime.Add(config.Params.RMCTime * time.Duration(config.Params.SlowdownFactor))) { 225 | // log.Debugf("CurrentSlotIndex %d", currentSlotIndex) 226 | if int(currentSlotIndex)%config.Params.RMCPeriodUpdate == 0 { 227 | traffic := s.MessagesCountInRange( 228 | currentSlotIndex-SlotIndex(config.Params.MinCommittableAge/config.Params.SlotTime)-SlotIndex(config.Params.RMCPeriodUpdate), 229 | currentSlotIndex-SlotIndex(config.Params.MinCommittableAge/config.Params.SlotTime)) / config.Params.RMCPeriodUpdate 230 | 231 | // currentSlotIndex-SlotIndex(config.Params.RMCTime/config.Params.SlotTime)-SlotIndex(config.Params.RMCPeriodUpdate), 232 | // currentSlotIndex-SlotIndex(config.Params.RMCTime/config.Params.SlotTime)) 233 | 234 | // a := currentSlotIndex-SlotIndex(config.Params.RMCTime/config.Params.SlotTime)-SlotIndex(config.Params.RMCPeriodUpdate) 235 | // b := currentSlotIndex-SlotIndex(config.Params.RMCTime/config.Params.SlotTime) 236 | 237 | // traffic := 0 238 | // for i := 0; i < config.Params.RMCPeriodUpdate; i++ { 239 | // // traffic += len(s.AcceptedSlot(s.SlotIndex(currentSlotStartTime.Add(-config.Params.MinCommittableAge-time.Duration(i) * config.Params.SlotTime)))) 240 | // traffic += len(s.AcceptedSlot(s.SlotIndex(currentSlotStartTime.Add(-config.Params.RMCTime -time.Duration(i) * config.Params.SlotTime)))) // number of messages k slots in the past 241 | // } 242 | // MessagesCountInRange 243 | // log.Debugf("Traffic: %d, Slot: %d, Slot a: %d, Slot b: %d", traffic, currentSlotIndex, a, b) 244 | // traffic = traffic 245 | // log.Debugf("Enter Branch, traffic after division: %d", traffic) 246 | 247 | // Modified 248 | // if traffic < config.Params.RMCPeriodUpdate*int(config.Params.LowerRMCThreshold) { 249 | // s.rmc[currentSlotIndex] = math.Max(config.Params.RMCmin, s.rmc[currentSlotIndex]*config.Params.AlphaRMC) 250 | // } else if traffic > config.Params.RMCPeriodUpdate*int(config.Params.UpperRMCThreshold) { 251 | // s.rmc[currentSlotIndex] = math.Min(config.Params.RMCmax, s.rmc[currentSlotIndex]*config.Params.BetaRMC) 252 | // } 253 | 254 | // log.Debugf("Traffic: %d", traffic) 255 | if traffic < int(config.Params.LowerRMCThreshold) { 256 | for i := 0; i < config.Params.RMCPeriodUpdate; i++ { 257 | s.rmc[currentSlotIndex+SlotIndex(i)] = math.Max( 258 | s.rmc[currentSlotIndex-SlotIndex(1)]-config.Params.RMCdecrease, config.Params.RMCmin) 259 | } 260 | // log.Debugf("LOW!!!!, rmc = %f", s.rmc[currentSlotIndex]) 261 | } else if traffic > int(config.Params.UpperRMCThreshold) { 262 | for i := 0; i < config.Params.RMCPeriodUpdate; i++ { 263 | s.rmc[currentSlotIndex+SlotIndex(i)] = math.Min( 264 | s.rmc[currentSlotIndex-SlotIndex(1)]+config.Params.RMCincrease, config.Params.RMCmax) 265 | } 266 | // log.Debugf("HIGH!!!!, rmc = %f", s.rmc[currentSlotIndex]) 267 | } else { 268 | for i := 0; i < config.Params.RMCPeriodUpdate; i++ { 269 | s.rmc[currentSlotIndex+SlotIndex(i)] = s.rmc[currentSlotIndex-SlotIndex(1)] 270 | } 271 | } 272 | } 273 | } 274 | } 275 | 276 | func (s *Storage) TooOld(message *Message) bool { 277 | return message.IssuanceTime.Before(s.ATT.Add(-config.Params.MinCommittableAge * time.Duration(config.Params.SlowdownFactor))) 278 | } 279 | 280 | func (s *Storage) AddToAcceptedSlot(message *Message) { 281 | s.slotMutex.Lock() 282 | defer s.slotMutex.Unlock() 283 | slotIndex := s.SlotIndex(message.IssuanceTime) 284 | if _, exists := s.acceptedSlotDB[slotIndex]; !exists { 285 | s.acceptedSlotDB[slotIndex] = NewMessageIDs() 286 | } 287 | // store to accepted slot storage 288 | s.acceptedSlotDB[slotIndex].Add(message.ID) 289 | // update accepted tange time 290 | if message.IssuanceTime.After(s.ATT) { 291 | s.ATT = message.IssuanceTime 292 | } 293 | } 294 | 295 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// 296 | 297 | // region StorageEvents //////////////////////////////////////////////////////////////////////////////////////////////// 298 | 299 | type StorageEvents struct { 300 | MessageStored *events.Event 301 | } 302 | 303 | func messageEventCaller(handler interface{}, params ...interface{}) { 304 | handler.(func(MessageID, *Message, *MessageMetadata))(params[0].(MessageID), params[1].(*Message), params[2].(*MessageMetadata)) 305 | } 306 | 307 | func messageIDEventCaller(handler interface{}, params ...interface{}) { 308 | handler.(func(MessageID))(params[0].(MessageID)) 309 | } 310 | 311 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// 312 | -------------------------------------------------------------------------------- /multiverse/tangle.go: -------------------------------------------------------------------------------- 1 | package multiverse 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/iotaledger/multivers-simulation/config" 7 | "github.com/iotaledger/multivers-simulation/network" 8 | ) 9 | 10 | type Tangle struct { 11 | Peer *network.Peer 12 | WeightDistribution *network.ConsensusWeightDistribution 13 | BandwidthDistribution *network.BandwidthDistribution 14 | GenesisTime time.Time 15 | Storage *Storage 16 | Solidifier *Solidifier 17 | ApprovalManager *ApprovalManager 18 | Requester *Requester 19 | Booker *Booker 20 | OpinionManager OpinionManagerInterface 21 | TipManager *TipManager 22 | MessageFactory *MessageFactory 23 | Utils *Utils 24 | Scheduler Scheduler 25 | } 26 | 27 | func NewTangle() (tangle *Tangle) { 28 | tangle = &Tangle{} 29 | 30 | tangle.Storage = NewStorage() 31 | tangle.Solidifier = NewSolidifier(tangle) 32 | tangle.Requester = NewRequester(tangle) 33 | tangle.Booker = NewBooker(tangle) 34 | tangle.OpinionManager = NewOpinionManager(tangle) 35 | tangle.TipManager = NewTipManager(tangle, config.Params.TSA) 36 | tangle.MessageFactory = NewMessageFactory(tangle, uint64(config.Params.NodesCount)) 37 | tangle.ApprovalManager = NewApprovalManager(tangle) 38 | tangle.Utils = NewUtils(tangle) 39 | tangle.Scheduler = NewScheduler(tangle) 40 | return 41 | } 42 | 43 | func (t *Tangle) Setup( 44 | peer *network.Peer, 45 | weightDistribution *network.ConsensusWeightDistribution, 46 | bandwidthDistribution *network.BandwidthDistribution, 47 | genesisTime time.Time, 48 | ) { 49 | t.Peer = peer 50 | t.WeightDistribution = weightDistribution 51 | t.BandwidthDistribution = bandwidthDistribution 52 | 53 | t.Storage.Setup(genesisTime) 54 | t.Solidifier.Setup() 55 | t.Requester.Setup() 56 | t.Booker.Setup() 57 | t.OpinionManager.Setup() 58 | t.TipManager.Setup() 59 | t.ApprovalManager.Setup() 60 | t.Scheduler.Setup() 61 | } 62 | 63 | func (t *Tangle) ProcessMessage(message *Message) { 64 | if messageMetadata, stored := t.Storage.Store(message); stored { 65 | t.Storage.Events.MessageStored.Trigger(message.ID, message, messageMetadata) 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /multiverse/utils.go: -------------------------------------------------------------------------------- 1 | package multiverse 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/datastructure/walker" 5 | ) 6 | 7 | // region ////////////////////////////////////////////////////////////////////////////////////////////////////////////// 8 | 9 | type Utils struct { 10 | tangle *Tangle 11 | } 12 | 13 | func NewUtils(tangle *Tangle) *Utils { 14 | return &Utils{ 15 | tangle: tangle, 16 | } 17 | } 18 | 19 | func (u *Utils) WalkMessageIDs(callback func(messageID MessageID, walker *walker.Walker), entryPoints MessageIDs, revisitElements ...bool) { 20 | if len(entryPoints) == 0 { 21 | panic("you need to provide at least one entry point") 22 | } 23 | 24 | messageWalker := walker.New(revisitElements...) 25 | for messageID := range entryPoints { 26 | messageWalker.Push(messageID) 27 | } 28 | 29 | for messageWalker.HasNext() { 30 | messageID := messageWalker.Next().(MessageID) 31 | if messageID != Genesis { 32 | callback(messageID, messageWalker) 33 | } 34 | } 35 | } 36 | 37 | func (u *Utils) WalkMessages(callback func(message *Message, walker *walker.Walker), entryPoints MessageIDs, revisitElements ...bool) { 38 | u.WalkMessageIDs(func(messageID MessageID, walker *walker.Walker) { 39 | callback(u.tangle.Storage.Message(messageID), walker) 40 | }, entryPoints, revisitElements...) 41 | } 42 | 43 | func (u *Utils) WalkMessagesAndMetadata(callback func(message *Message, messageMetadata *MessageMetadata, walker *walker.Walker), entryPoints MessageIDs, revisitElements ...bool) { 44 | u.WalkMessageIDs(func(messageID MessageID, walker *walker.Walker) { 45 | callback(u.tangle.Storage.Message(messageID), u.tangle.Storage.MessageMetadata(messageID), walker) 46 | }, entryPoints, revisitElements...) 47 | } 48 | 49 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// 50 | -------------------------------------------------------------------------------- /network/bandwidth.go: -------------------------------------------------------------------------------- 1 | package network 2 | 3 | type BandwidthGenerator func( 4 | validatorNodeCount int, 5 | nonValidatorNodeCount int, 6 | validatorCommitteeBandwidth float64, 7 | nonValidatorCommitteeBandwidth float64, 8 | ) []float64 9 | 10 | // region BandwidthDistribution ////////////////////////////////////////////////////////////////////////////////// 11 | 12 | type BandwidthDistribution struct { 13 | bandwidth map[PeerID]float64 14 | totalBandwidth float64 15 | largestBandwidth float64 16 | } 17 | 18 | func NewBandwidthDistribution() *BandwidthDistribution { 19 | return &BandwidthDistribution{ 20 | bandwidth: make(map[PeerID]float64), 21 | } 22 | } 23 | 24 | func (c *BandwidthDistribution) SetBandwidth(peerID PeerID, bandwidth float64) { 25 | if existingBandwidth, exists := c.bandwidth[peerID]; exists { 26 | c.totalBandwidth -= existingBandwidth 27 | 28 | if c.largestBandwidth == existingBandwidth { 29 | c.rescanForLargestBandwidth() 30 | } 31 | } 32 | 33 | c.bandwidth[peerID] = bandwidth 34 | c.totalBandwidth += bandwidth 35 | 36 | if bandwidth > c.largestBandwidth { 37 | c.largestBandwidth = bandwidth 38 | } 39 | } 40 | 41 | func (c *BandwidthDistribution) Bandwidth(peerID PeerID) float64 { 42 | return c.bandwidth[peerID] 43 | } 44 | 45 | func (c *BandwidthDistribution) Bandwidths() map[PeerID]float64 { 46 | return c.bandwidth 47 | } 48 | 49 | func (c *BandwidthDistribution) TotalBandwidth() float64 { 50 | return c.totalBandwidth 51 | } 52 | 53 | func (c *BandwidthDistribution) LargestBandwidth() float64 { 54 | return c.largestBandwidth 55 | } 56 | 57 | func (c *BandwidthDistribution) rescanForLargestBandwidth() { 58 | c.largestBandwidth = 0 59 | for _, bandwidth := range c.bandwidth { 60 | if bandwidth > c.largestBandwidth { 61 | c.largestBandwidth = bandwidth 62 | } 63 | } 64 | } 65 | 66 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// 67 | -------------------------------------------------------------------------------- /network/consensus_weight.go: -------------------------------------------------------------------------------- 1 | package network 2 | 3 | type WeightGenerator func(nodeCount int, nodeTotalWeight float64) []uint64 4 | type EqualWeightGenerator func(validatorNodeCount int, nonValidatorNodeCount int, totalWeight int) []uint64 5 | 6 | // region ConsensusWeightDistribution ////////////////////////////////////////////////////////////////////////////////// 7 | 8 | type ConsensusWeightDistribution struct { 9 | weights map[PeerID]uint64 10 | totalWeight uint64 11 | largestWeight uint64 12 | } 13 | 14 | func NewConsensusWeightDistribution() *ConsensusWeightDistribution { 15 | return &ConsensusWeightDistribution{ 16 | weights: make(map[PeerID]uint64), 17 | } 18 | } 19 | 20 | func (c *ConsensusWeightDistribution) SetWeight(peerID PeerID, weight uint64) { 21 | if existingWeight, exists := c.weights[peerID]; exists { 22 | c.totalWeight -= existingWeight 23 | 24 | if c.largestWeight == existingWeight { 25 | c.rescanForLargestWeight() 26 | } 27 | } 28 | 29 | c.weights[peerID] = weight 30 | c.totalWeight += weight 31 | 32 | if weight > c.largestWeight { 33 | c.largestWeight = weight 34 | } 35 | } 36 | 37 | func (c *ConsensusWeightDistribution) Weight(peerID PeerID) uint64 { 38 | return c.weights[peerID] 39 | } 40 | 41 | func (c *ConsensusWeightDistribution) Weights() map[PeerID]uint64 { 42 | return c.weights 43 | } 44 | 45 | func (c *ConsensusWeightDistribution) TotalWeight() uint64 { 46 | return c.totalWeight 47 | } 48 | 49 | func (c *ConsensusWeightDistribution) LargestWeight() uint64 { 50 | return c.largestWeight 51 | } 52 | 53 | func (c *ConsensusWeightDistribution) rescanForLargestWeight() { 54 | c.largestWeight = 0 55 | for _, weight := range c.weights { 56 | if weight > c.largestWeight { 57 | c.largestWeight = weight 58 | } 59 | } 60 | } 61 | 62 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// 63 | -------------------------------------------------------------------------------- /network/groups.go: -------------------------------------------------------------------------------- 1 | package network 2 | 3 | import ( 4 | "strconv" 5 | "time" 6 | 7 | "github.com/iotaledger/hive.go/crypto" 8 | "github.com/iotaledger/hive.go/datastructure/set" 9 | "github.com/iotaledger/multivers-simulation/config" 10 | ) 11 | 12 | // region AdversaryType //////////////////////////////////////////////////////////////////////////////////////////////// 13 | 14 | type AdversaryType int 15 | 16 | const ( 17 | HonestNode AdversaryType = iota 18 | ShiftOpinion 19 | TheSameOpinion 20 | NoGossip 21 | Blowball 22 | ) 23 | 24 | func ToAdversaryType(adv int) AdversaryType { 25 | switch adv { 26 | case int(ShiftOpinion): 27 | return ShiftOpinion 28 | case int(TheSameOpinion): 29 | return TheSameOpinion 30 | case int(NoGossip): 31 | return NoGossip 32 | case int(Blowball): 33 | return Blowball 34 | } 35 | return HonestNode 36 | } 37 | 38 | func AdversaryTypeToString(adv AdversaryType) string { 39 | switch adv { 40 | case HonestNode: 41 | return "Honest" 42 | case ShiftOpinion: 43 | return "ShiftingOpinion" 44 | case TheSameOpinion: 45 | return "TheSameOpinion" 46 | case NoGossip: 47 | return "NoGossip" 48 | case Blowball: 49 | return "Blowball" 50 | } 51 | return "" 52 | } 53 | 54 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// 55 | 56 | // region AdversaryGroup //////////////////////////////////////////////////////////////////////////////////////////////// 57 | 58 | var AdversaryNodeIDToGroupIDMap = make(map[int]int) 59 | 60 | func IsAdversary(nodeID int) bool { 61 | _, ok := AdversaryNodeIDToGroupIDMap[nodeID] 62 | return ok 63 | } 64 | 65 | type AdversaryGroup struct { 66 | NodeIDs []int 67 | GroupMana float64 68 | TargetManaPercentage float64 69 | Delay time.Duration 70 | AdversaryType AdversaryType 71 | InitColor string 72 | NodeCount int 73 | } 74 | 75 | func (g *AdversaryGroup) AddNodeID(id, groupId int) { 76 | g.NodeIDs = append(g.NodeIDs, id) 77 | AdversaryNodeIDToGroupIDMap[id] = groupId 78 | } 79 | 80 | type AdversaryGroups []*AdversaryGroup 81 | 82 | func NewAdversaryGroups() (groups AdversaryGroups) { 83 | groups = make(AdversaryGroups, 0, len(config.Params.AdversaryTypes)) 84 | for i, configAdvType := range config.Params.AdversaryTypes { 85 | targetMana := float64(1) 86 | delay := config.Params.MinDelay 87 | color := "" 88 | nCount := 1 89 | 90 | if len(config.Params.AdversaryMana) > 0 { 91 | targetMana = config.Params.AdversaryMana[i] 92 | } 93 | 94 | if len(config.Params.AdversaryDelays) > 0 { 95 | delay = config.Params.AdversaryDelays[i] 96 | } 97 | 98 | if len(config.Params.AdversaryNodeCounts) > 0 { 99 | nCount = config.Params.AdversaryNodeCounts[i] 100 | } 101 | 102 | color = config.Params.AdversaryInitColors[i] 103 | group := &AdversaryGroup{ 104 | NodeIDs: make([]int, 0, nCount), 105 | TargetManaPercentage: targetMana, 106 | Delay: time.Millisecond * time.Duration(delay), 107 | AdversaryType: ToAdversaryType(configAdvType), 108 | InitColor: color, 109 | NodeCount: nCount, 110 | } 111 | groups = append(groups, group) 112 | } 113 | 114 | return 115 | } 116 | 117 | // CalculateWeightTotalConfig returns how many nodes will be used for weight distribution and their total weight 118 | // after excluding all adversary nodes that will not be selected randomly 119 | func (g *AdversaryGroups) CalculateWeightTotalConfig() (int, float64) { 120 | totalAdv := 0 121 | totalAdvManaPercentage := float64(0) 122 | 123 | for _, group := range *g { 124 | totalAdv += group.NodeCount 125 | totalAdvManaPercentage += group.TargetManaPercentage 126 | } 127 | totalCount := config.Params.NodesCount - totalAdv 128 | totalWeight := float64(config.Params.NodesTotalWeight) * (1 - totalAdvManaPercentage/100) 129 | return totalCount, totalWeight 130 | } 131 | 132 | // UpdateAdversaryNodes assigns adversary nodes in AdversaryGroups to correct nodeIDs and updates their mana 133 | func (g *AdversaryGroups) UpdateAdversaryNodes(weightDistribution []uint64) []uint64 { 134 | g.updateGroupMana() 135 | 136 | // Adversary nodes are taking indexes from the end, excluded randomly chosen nodes 137 | advIndex := len(weightDistribution) 138 | // weight distribution with adversary weights appended at the ned 139 | newWeights := g.updateAdvIDAndWeights(advIndex, weightDistribution) 140 | 141 | return newWeights 142 | } 143 | 144 | func (g *AdversaryGroups) updateAdvIDAndWeights(advIndex int, newWeights []uint64) []uint64 { 145 | for groupIndex, group := range *g { 146 | for i := 0; i < group.NodeCount; i++ { 147 | group.AddNodeID(advIndex, groupIndex) 148 | advIndex++ 149 | // append adversary weight at the end of weight distribution 150 | nodeWeight := uint64(group.GroupMana / float64(group.NodeCount)) 151 | newWeights = append(newWeights, nodeWeight) 152 | } 153 | } 154 | return newWeights 155 | } 156 | 157 | func (g *AdversaryGroups) updateGroupMana() { 158 | for _, group := range *g { 159 | group.GroupMana = group.TargetManaPercentage * float64(config.Params.NodesTotalWeight) / 100.0 160 | } 161 | } 162 | 163 | func (g *AdversaryGroups) ApplyNetworkDelayForAdversaryNodes(network *Network) { 164 | for _, adversaryGroup := range *g { 165 | for _, nodeID := range adversaryGroup.NodeIDs { 166 | peer := network.Peer(nodeID) 167 | for _, neighbor := range peer.Neighbors { 168 | neighbor.SetDelay(adversaryGroup.Delay) 169 | } 170 | } 171 | } 172 | } 173 | 174 | func (g *AdversaryGroups) ApplyNeighborsAdversaryNodes(network *Network, configuration *Configuration) { 175 | for _, adversaryGroup := range *g { 176 | for _, nodeID := range adversaryGroup.NodeIDs { 177 | adversary := network.Peer(nodeID) 178 | for _, peer := range network.Peers { 179 | adversary.Neighbors[peer.ID] = NewConnection( 180 | network.Peers[peer.ID].Socket, 181 | adversaryGroup.Delay, 182 | 0, 183 | configuration, 184 | ) 185 | } 186 | } 187 | } 188 | } 189 | 190 | func randomWeightIndex(weights []uint64, count int) (randomWeights []int) { 191 | selectedPeers := set.New() 192 | for len(randomWeights) < count { 193 | if randomIndex := crypto.Randomness.Intn(len(weights)); selectedPeers.Add(randomIndex) { 194 | randomWeights = append(randomWeights, randomIndex) 195 | } 196 | } 197 | return 198 | } 199 | 200 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// 201 | 202 | // region Accidental /////////////////////////////////////////////////////////////////////////////////////////////////// 203 | 204 | func GetAccidentalIssuers(network *Network) []*Peer { 205 | peers := make([]*Peer, 0) 206 | randomCount := 0 207 | for i := 0; i < len(config.Params.AccidentalMana); i++ { 208 | switch config.Params.AccidentalMana[i] { 209 | case "max": 210 | peers = append(peers, network.Peer(0)) 211 | case "min": 212 | peers = append(peers, network.Peer(len(network.WeightDistribution.weights)-1)) 213 | case "random": 214 | randomCount++ 215 | default: 216 | customId, err := strconv.Atoi(config.Params.AccidentalMana[i]) 217 | if err != nil || config.Params.NodesCount-1 < customId || customId < 0 { 218 | log.Warnf("AccidentalMana parameter: %s is incorrect, so not processed", config.Params.AccidentalMana[i]) 219 | } else { 220 | peers = append(peers, network.Peer(customId)) 221 | } 222 | } 223 | } 224 | if randomCount > 0 { 225 | for _, selectedNode := range network.RandomPeers(randomCount) { 226 | peers = append(peers, selectedNode) 227 | } 228 | } 229 | return peers 230 | } 231 | 232 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// 233 | -------------------------------------------------------------------------------- /network/network.go: -------------------------------------------------------------------------------- 1 | package network 2 | 3 | import ( 4 | "math/rand" 5 | "time" 6 | 7 | "github.com/iotaledger/multivers-simulation/config" 8 | "github.com/iotaledger/multivers-simulation/logger" 9 | 10 | "github.com/iotaledger/hive.go/crypto" 11 | "github.com/iotaledger/hive.go/datastructure/set" 12 | ) 13 | 14 | var log = logger.New("Network") 15 | 16 | // region Network ////////////////////////////////////////////////////////////////////////////////////////////////////// 17 | 18 | type Network struct { 19 | Peers []*Peer 20 | WeightDistribution *ConsensusWeightDistribution 21 | BandwidthDistribution *BandwidthDistribution 22 | AdversaryGroups AdversaryGroups 23 | Attacker *SingleAttacker 24 | } 25 | 26 | func New(option ...Option) (network *Network) { 27 | log.Debug("Creating Network ...") 28 | defer log.Info("Creating Network ... [DONE]") 29 | 30 | network = &Network{ 31 | Peers: make([]*Peer, 0), 32 | AdversaryGroups: NewAdversaryGroups(), 33 | Attacker: NewSingleAttacker(), 34 | } 35 | 36 | configuration := NewConfiguration(option...) 37 | configuration.CreatePeers(network) 38 | configuration.ConnectPeers(network) 39 | 40 | return 41 | } 42 | 43 | func (n *Network) RandomPeers(count int) (randomPeers []*Peer) { 44 | selectedPeers := set.New() 45 | for len(randomPeers) < count { 46 | if randomIndex := crypto.Randomness.Intn(len(n.Peers)); selectedPeers.Add(randomIndex) { 47 | randomPeers = append(randomPeers, n.Peers[randomIndex]) 48 | } 49 | } 50 | 51 | return 52 | } 53 | 54 | func (n *Network) Shutdown() { 55 | for _, peer := range n.Peers { 56 | peer.Shutdown() 57 | } 58 | } 59 | 60 | func (n *Network) Peer(index int) *Peer { 61 | return n.Peers[index] 62 | } 63 | 64 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// 65 | 66 | // region Configuration //////////////////////////////////////////////////////////////////////////////////////////////// 67 | 68 | type Configuration struct { 69 | nodes []*NodesSpecification 70 | minDelay time.Duration 71 | maxDelay time.Duration 72 | minPacketLoss float64 73 | maxPacketLoss float64 74 | peeringStrategy PeeringStrategy 75 | adversaryPeeringAll bool 76 | adversarySpeedup []float64 77 | genesisTime time.Time 78 | } 79 | 80 | func NewConfiguration(options ...Option) (configuration *Configuration) { 81 | configuration = &Configuration{} 82 | for _, currentOption := range options { 83 | currentOption(configuration) 84 | } 85 | 86 | return 87 | } 88 | 89 | func (c *Configuration) RandomNetworkDelay() time.Duration { 90 | return c.minDelay + time.Duration(crypto.Randomness.Float64()*float64(c.maxDelay-c.minDelay)) 91 | } 92 | 93 | func (c *Configuration) ExpRandomNetworkDelay() time.Duration { 94 | return time.Duration(rand.ExpFloat64() * (float64(c.maxDelay+c.minDelay) / 2)) 95 | } 96 | 97 | func (c *Configuration) RandomPacketLoss() float64 { 98 | return c.minPacketLoss + crypto.Randomness.Float64()*(c.maxPacketLoss-c.minPacketLoss) 99 | } 100 | 101 | func (c *Configuration) CreatePeers(network *Network) { 102 | log.Debugf("Creating peers ...") 103 | defer log.Info("Creating peers ... [DONE]") 104 | 105 | network.WeightDistribution = NewConsensusWeightDistribution() 106 | network.BandwidthDistribution = NewBandwidthDistribution() 107 | 108 | for _, nodesSpecification := range c.nodes { 109 | nodeWeights := nodesSpecification.ConfigureWeights(network) 110 | nodeBandwidth := nodesSpecification.ConfigureBandwidth(network) 111 | 112 | for i := 0; i < nodesSpecification.nodeCount; i++ { 113 | nodeType := HonestNode 114 | speedupFactor := 1.0 115 | // this is adversary node 116 | if groupIndex, ok := AdversaryNodeIDToGroupIDMap[i]; ok { 117 | nodeType = network.AdversaryGroups[groupIndex].AdversaryType 118 | speedupFactor = c.adversarySpeedup[groupIndex] 119 | } 120 | if IsAttacker(i) { 121 | nodeType = Blowball 122 | } 123 | nodeFactory := nodesSpecification.nodeFactories[nodeType] 124 | 125 | peer := NewPeer(nodeFactory()) 126 | peer.AdversarySpeedup = speedupFactor 127 | network.Peers = append(network.Peers, peer) 128 | log.Debugf("Created %s ... [DONE]", peer) 129 | 130 | network.WeightDistribution.SetWeight(peer.ID, nodeWeights[i]) 131 | network.BandwidthDistribution.SetBandwidth(peer.ID, nodeBandwidth[i]) 132 | } 133 | for _, peer := range network.Peers { 134 | peer.SetupNode(network.WeightDistribution, network.BandwidthDistribution, c.genesisTime) 135 | log.Debugf("Setup %s ... [DONE]", peer) 136 | log.Debugf("%s weight %d bandwidth %f", 137 | peer, 138 | network.WeightDistribution.Weight(peer.ID), 139 | network.BandwidthDistribution.Bandwidth(peer.ID)) 140 | } 141 | } 142 | } 143 | 144 | func (c *Configuration) ConnectPeers(network *Network) { 145 | log.Debugf("Connecting peers ...") 146 | defer log.Info("Connecting peers ... [DONE]") 147 | 148 | c.peeringStrategy(network, c) 149 | if c.adversaryPeeringAll { 150 | network.AdversaryGroups.ApplyNeighborsAdversaryNodes(network, c) 151 | } 152 | network.AdversaryGroups.ApplyNetworkDelayForAdversaryNodes(network) 153 | 154 | } 155 | 156 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// 157 | 158 | // region Option /////////////////////////////////////////////////////////////////////////////////////////////////////// 159 | 160 | type Option func(*Configuration) 161 | 162 | func Nodes(nodeCount int, 163 | nodeFactories map[AdversaryType]NodeFactory, 164 | weightGenerator WeightGenerator, 165 | bandwidthGenerator BandwidthGenerator, 166 | ) Option { 167 | nodeSpecs := &NodesSpecification{ 168 | nodeCount: nodeCount, 169 | nodeFactories: nodeFactories, 170 | weightGenerator: weightGenerator, 171 | bandwidthGenerator: bandwidthGenerator, 172 | } 173 | 174 | return func(config *Configuration) { 175 | config.nodes = append(config.nodes, nodeSpecs) 176 | } 177 | } 178 | 179 | type NodesSpecification struct { 180 | nodeCount int 181 | nodeFactories map[AdversaryType]NodeFactory 182 | weightGenerator WeightGenerator 183 | bandwidthGenerator BandwidthGenerator 184 | } 185 | 186 | func (n *NodesSpecification) ConfigureWeights(network *Network) []uint64 { 187 | var nodesCount int 188 | var totalWeight float64 189 | var nodeWeights []uint64 190 | 191 | switch config.Params.SimulationMode { 192 | case "Adversary": 193 | nodesCount, totalWeight = network.AdversaryGroups.CalculateWeightTotalConfig() 194 | nodeWeights = n.weightGenerator(nodesCount, totalWeight) 195 | // update adversary groups and get new mana distribution with adversary nodes included 196 | nodeWeights = network.AdversaryGroups.UpdateAdversaryNodes(nodeWeights) 197 | case "Accidental": 198 | nodeWeights = n.weightGenerator(config.Params.NodesCount, float64(config.Params.NodesTotalWeight)) 199 | case "Blowball": 200 | nodesCount, totalWeight = network.Attacker.CalculateWeightTotalConfig() 201 | nodeWeights = n.weightGenerator(nodesCount, totalWeight) 202 | nodeWeights = network.Attacker.UpdateAttackerWeight(nodeWeights) 203 | default: 204 | // nodeWeights = n.weightGenerator(config.Params.NodesCount, float64(config.Params.NodesTotalWeight)) 205 | nodeWeights = EqualDistribution( 206 | config.Params.ValidatorCount, 207 | config.Params.NodesCount-config.Params.ValidatorCount, 208 | config.Params.NodesTotalWeight, 209 | ) 210 | } 211 | 212 | return nodeWeights 213 | } 214 | 215 | func (n *NodesSpecification) ConfigureBandwidth(network *Network) []float64 { 216 | var nodeBandwidth []float64 217 | 218 | switch config.Params.SimulationMode { 219 | default: 220 | nodeBandwidth = n.bandwidthGenerator( 221 | config.Params.ValidatorCount, 222 | config.Params.NodesCount-config.Params.ValidatorCount, 223 | float64(float64(config.Params.SchedulingRate)*(config.Params.CommitteeBandwidth)), 224 | float64(float64(config.Params.SchedulingRate)*(1-config.Params.CommitteeBandwidth))) 225 | } 226 | return nodeBandwidth 227 | } 228 | 229 | func Delay(minDelay time.Duration, maxDelay time.Duration) Option { 230 | return func(config *Configuration) { 231 | config.minDelay = minDelay 232 | config.maxDelay = maxDelay 233 | } 234 | } 235 | 236 | func PacketLoss(minPacketLoss float64, maxPacketLoss float64) Option { 237 | return func(config *Configuration) { 238 | config.minPacketLoss = minPacketLoss 239 | config.maxPacketLoss = maxPacketLoss 240 | } 241 | } 242 | 243 | func Topology(peeringStrategy PeeringStrategy) Option { 244 | return func(config *Configuration) { 245 | config.peeringStrategy = peeringStrategy 246 | } 247 | } 248 | 249 | func AdversaryPeeringAll(adversaryPeeringAll bool) Option { 250 | return func(config *Configuration) { 251 | config.adversaryPeeringAll = adversaryPeeringAll 252 | } 253 | } 254 | 255 | func AdversarySpeedup(adversarySpeedupFactors []float64) Option { 256 | return func(config *Configuration) { 257 | config.adversarySpeedup = adversarySpeedupFactors 258 | } 259 | } 260 | 261 | func GenesisTime(genesisTime time.Time) Option { 262 | return func(config *Configuration) { 263 | config.genesisTime = genesisTime 264 | } 265 | } 266 | 267 | type PeeringStrategy func(network *Network, options *Configuration) 268 | 269 | func WattsStrogatz(meanDegree int, randomness float64) PeeringStrategy { 270 | if meanDegree%2 != 0 { 271 | panic("Invalid argument: meanDegree needs to be even") 272 | } 273 | 274 | return func(network *Network, configuration *Configuration) { 275 | nodeCount := len(network.Peers) 276 | graph := make(map[int]map[int]bool) 277 | 278 | for nodeID := 0; nodeID < nodeCount; nodeID++ { 279 | graph[nodeID] = make(map[int]bool) 280 | 281 | for j := nodeID + 1; j <= nodeID+meanDegree/2; j++ { 282 | graph[nodeID][j%nodeCount] = true 283 | } 284 | } 285 | 286 | for tail, edges := range graph { 287 | for head := range edges { 288 | if crypto.Randomness.Float64() < randomness { 289 | newHead := crypto.Randomness.Intn(nodeCount) 290 | for newHead == tail || graph[newHead][tail] || edges[newHead] { 291 | newHead = crypto.Randomness.Intn(nodeCount) 292 | } 293 | 294 | delete(edges, head) 295 | edges[newHead] = true 296 | } 297 | } 298 | } 299 | for sourceNodeID, targetNodeIDs := range graph { 300 | for targetNodeID := range targetNodeIDs { 301 | randomNetworkDelay := configuration.RandomNetworkDelay() 302 | randomPacketLoss := configuration.RandomPacketLoss() 303 | 304 | network.Peers[sourceNodeID].Neighbors[PeerID(targetNodeID)] = NewConnection( 305 | network.Peers[targetNodeID].Socket, 306 | randomNetworkDelay, 307 | randomPacketLoss, 308 | configuration, 309 | ) 310 | 311 | network.Peers[targetNodeID].Neighbors[PeerID(sourceNodeID)] = NewConnection( 312 | network.Peers[sourceNodeID].Socket, 313 | randomNetworkDelay, 314 | randomPacketLoss, 315 | configuration, 316 | ) 317 | 318 | log.Debugf("Connecting %s <-> %s [network delay (%s), packet loss (%0.4f%%)] ... [DONE]", network.Peers[sourceNodeID], network.Peers[targetNodeID], randomNetworkDelay, randomPacketLoss*100) 319 | } 320 | } 321 | totalNeighborCount := 0 322 | for _, peer := range network.Peers { 323 | log.Debugf("%d %d", peer.ID, len(peer.Neighbors)) 324 | totalNeighborCount += len(peer.Neighbors) 325 | } 326 | log.Infof("Average number of neighbors: %.1f", float64(totalNeighborCount)/float64(nodeCount)) 327 | } 328 | } 329 | 330 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// 331 | -------------------------------------------------------------------------------- /network/node.go: -------------------------------------------------------------------------------- 1 | package network 2 | 3 | import "time" 4 | 5 | // region Node ///////////////////////////////////////////////////////////////////////////////////////////////////////// 6 | type Node interface { 7 | Setup(peer *Peer, weightDistribution *ConsensusWeightDistribution, bandwidthDistribution *BandwidthDistribution, genesisTime time.Time) 8 | HandleNetworkMessage(networkMessage interface{}) 9 | } 10 | 11 | type NodeFactory func() Node 12 | 13 | func NodeClosure(closure func() interface{}) NodeFactory { 14 | return func() Node { 15 | return closure().(Node) 16 | } 17 | } 18 | 19 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// 20 | -------------------------------------------------------------------------------- /network/peer.go: -------------------------------------------------------------------------------- 1 | package network 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | "sync/atomic" 7 | "time" 8 | 9 | "github.com/iotaledger/hive.go/crypto" 10 | "github.com/iotaledger/hive.go/timedexecutor" 11 | ) 12 | 13 | // region Peer ///////////////////////////////////////////////////////////////////////////////////////////////////////// 14 | 15 | type Peer struct { 16 | ID PeerID 17 | Neighbors map[PeerID]*Connection 18 | Socket chan interface{} 19 | Node Node 20 | AdversarySpeedup float64 21 | 22 | shutdownOnce sync.Once 23 | ShutdownProcessing chan struct{} 24 | ShutdownIssuing chan struct{} 25 | } 26 | 27 | func NewPeer(node Node) (peer *Peer) { 28 | peer = &Peer{ 29 | ID: NewPeerID(), 30 | Neighbors: make(map[PeerID]*Connection), 31 | Socket: make(chan interface{}, 1024), 32 | Node: node, 33 | 34 | ShutdownProcessing: make(chan struct{}, 1), 35 | ShutdownIssuing: make(chan struct{}, 1), 36 | } 37 | 38 | return 39 | } 40 | 41 | func (p *Peer) SetupNode(consensusWeightDistribution *ConsensusWeightDistribution, bandwidthDistribution *BandwidthDistribution, genesisTime time.Time) { 42 | p.Node.Setup(p, consensusWeightDistribution, bandwidthDistribution, genesisTime) 43 | } 44 | 45 | func (p *Peer) Shutdown() { 46 | p.shutdownOnce.Do(func() { 47 | close(p.ShutdownProcessing) 48 | close(p.ShutdownIssuing) 49 | }) 50 | } 51 | 52 | func (p *Peer) ReceiveNetworkMessage(message interface{}) { 53 | p.Socket <- message 54 | } 55 | 56 | func (p *Peer) GossipNetworkMessage(message interface{}) { 57 | for _, neighborConnection := range p.Neighbors { 58 | neighborConnection.Send(message) 59 | } 60 | } 61 | 62 | func (p *Peer) String() string { 63 | return fmt.Sprintf("Peer%d", p.ID) 64 | } 65 | 66 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// 67 | 68 | // region PeerID /////////////////////////////////////////////////////////////////////////////////////////////////////// 69 | 70 | type PeerID int64 71 | 72 | var peerIDCounter int64 73 | 74 | func NewPeerID() PeerID { 75 | return PeerID(atomic.AddInt64(&peerIDCounter, 1) - 1) 76 | } 77 | 78 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// 79 | 80 | // region Connection /////////////////////////////////////////////////////////////////////////////////////////////////// 81 | 82 | type Connection struct { 83 | socket chan<- interface{} 84 | networkDelay time.Duration 85 | packetLoss float64 86 | timedExecutor *timedexecutor.TimedExecutor 87 | shutdownOnce sync.Once 88 | configuration *Configuration 89 | } 90 | 91 | func NewConnection(socket chan<- interface{}, networkDelay time.Duration, packetLoss float64, configuration *Configuration) (connection *Connection) { 92 | connection = &Connection{ 93 | socket: socket, 94 | networkDelay: networkDelay, 95 | packetLoss: packetLoss, 96 | timedExecutor: timedexecutor.New(1), 97 | configuration: configuration, 98 | } 99 | 100 | return 101 | } 102 | 103 | func (c *Connection) NetworkDelay() time.Duration { 104 | return c.networkDelay 105 | } 106 | 107 | func (c *Connection) PacketLoss() float64 { 108 | return c.packetLoss 109 | } 110 | 111 | func (c *Connection) Send(message interface{}) { 112 | if crypto.Randomness.Float64() <= c.packetLoss { 113 | return 114 | } 115 | c.timedExecutor.ExecuteAfter(func() { 116 | c.socket <- message 117 | }, c.configuration.RandomNetworkDelay()) 118 | } 119 | 120 | func (c *Connection) SetDelay(delay time.Duration) { 121 | c.networkDelay = delay 122 | } 123 | 124 | func (c *Connection) Shutdown() { 125 | c.shutdownOnce.Do(func() { 126 | c.timedExecutor.Shutdown(timedexecutor.CancelPendingTasks) 127 | }) 128 | } 129 | 130 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// 131 | -------------------------------------------------------------------------------- /network/singleattacker.go: -------------------------------------------------------------------------------- 1 | package network 2 | 3 | import ( 4 | "github.com/iotaledger/hive.go/core/generics/constraints" 5 | "github.com/iotaledger/multivers-simulation/config" 6 | ) 7 | 8 | type SingleAttacker struct { 9 | nodeID int 10 | TargetManaPercentage int 11 | AttackerType AdversaryType 12 | weight float64 13 | } 14 | 15 | func (a SingleAttacker) CalculateWeightTotalConfig() (newNodesCount int, newTotalWeight float64) { 16 | newTotalWeight = float64(config.Params.NodesTotalWeight) - a.weight 17 | newNodesCount = config.Params.NodesCount - 1 18 | return 19 | } 20 | func insert[V constraints.Numeric](array []V, element V, i int) []V { 21 | array = append(array[:i+1], array[i:]...) 22 | array[i] = element 23 | return array 24 | } 25 | 26 | func (a SingleAttacker) UpdateAttackerWeight(weights []uint64) []uint64 { 27 | return insert(weights, uint64(a.weight), a.nodeID) 28 | } 29 | 30 | func NewSingleAttacker() *SingleAttacker { 31 | return &SingleAttacker{ 32 | weight: float64(config.Params.BlowballMana) * float64(config.Params.NodesTotalWeight) / 100, 33 | nodeID: config.Params.BlowballNodeID, 34 | TargetManaPercentage: config.Params.BlowballMana, 35 | AttackerType: Blowball, 36 | } 37 | } 38 | 39 | // todo use generics 40 | //type ( 41 | // Vanilla uint 42 | // Blowball uint 43 | //) 44 | //type AttackerType interface { 45 | // Blowball | Vanilla 46 | //} 47 | // 48 | //func getNode[T AttackerType](node singlenodeattacks.AttackerNode) T { 49 | // return node.(T) 50 | //} 51 | 52 | func IsAttacker(nodeID int) bool { 53 | // return nodeID == config.Params.BlowballNodeID 54 | return false 55 | } 56 | -------------------------------------------------------------------------------- /network/utility.go: -------------------------------------------------------------------------------- 1 | package network 2 | 3 | import "math" 4 | 5 | func ZIPFDistribution(s float64) WeightGenerator { 6 | return func(nodeCount int, totalWeight float64) (result []uint64) { 7 | rawTotalWeight := uint64(0) 8 | rawWeights := make([]uint64, nodeCount) 9 | for i := 0; i < nodeCount; i++ { 10 | weight := uint64(math.Pow(float64(i+1), -s) * totalWeight) 11 | rawWeights[i] = weight 12 | rawTotalWeight += weight 13 | } 14 | 15 | normalizedTotalWeight := uint64(0) 16 | result = make([]uint64, nodeCount) 17 | for i := 0; i < nodeCount; i++ { 18 | normalizedWeight := uint64((float64(rawWeights[i]) / float64(rawTotalWeight)) * totalWeight) 19 | 20 | result[i] = normalizedWeight 21 | normalizedTotalWeight += normalizedWeight 22 | } 23 | 24 | result[0] += uint64(totalWeight) - normalizedTotalWeight 25 | 26 | return 27 | } 28 | } 29 | 30 | func EqualDistribution(validatorNodeCount int, nonValidatorNodeCount int, totalWeight int) (result []uint64) { 31 | 32 | normalizedTotalWeight := uint64(0) 33 | result = make([]uint64, validatorNodeCount+nonValidatorNodeCount) 34 | for i := 0; i < validatorNodeCount; i++ { 35 | normalizedWeight := uint64((float64(totalWeight) / float64(validatorNodeCount))) 36 | 37 | result[i] = normalizedWeight 38 | normalizedTotalWeight += normalizedWeight 39 | } 40 | result[0] += uint64(totalWeight) - normalizedTotalWeight 41 | return result 42 | } 43 | 44 | // The mixed ZIPF distribution, where the first validatorNodeCount nodes 45 | // have equal weight, and the remaining nonValidatorNodeCount nodes' weights 46 | // follow ZIPF distribution. 47 | // Note that the bandwidth generated here only consider nonValidation blocks, 48 | // even if the blocks are issued from validator nodes. 49 | // The reason is that there is additional ticker in the main function that 50 | // only for issuing the validation blocks. 51 | func MixedZIPFDistribution(s float64) BandwidthGenerator { 52 | return func( 53 | validatorNodeCount int, 54 | nonValidatorNodeCount int, 55 | validatorCommitteeBandwidth float64, 56 | nonValidatorCommitteeBandwidth float64, 57 | 58 | ) (result []float64) { 59 | 60 | result = make([]float64, validatorNodeCount+nonValidatorNodeCount) 61 | totalBandwidthPerValidator := validatorCommitteeBandwidth / float64(validatorNodeCount) 62 | for i := 0; i < validatorNodeCount; i++ { 63 | result[i] = totalBandwidthPerValidator 64 | } 65 | 66 | rawTotalBandwidth := uint64(0) 67 | rawBandwidth := make([]uint64, nonValidatorNodeCount) 68 | for i := 0; i < nonValidatorNodeCount; i++ { 69 | bandwidth := uint64(math.Pow(float64(i+1), -s) * nonValidatorCommitteeBandwidth) 70 | rawBandwidth[i] = bandwidth 71 | rawTotalBandwidth += bandwidth 72 | } 73 | 74 | normalizedTotalBandwidth := float64(0) 75 | for i := 0; i < nonValidatorNodeCount; i++ { 76 | normalizedBandwidth := (float64(rawBandwidth[i]) / float64(rawTotalBandwidth)) * nonValidatorCommitteeBandwidth 77 | 78 | result[i+validatorNodeCount] = normalizedBandwidth 79 | normalizedTotalBandwidth += normalizedBandwidth 80 | } 81 | 82 | result[validatorNodeCount] += nonValidatorCommitteeBandwidth - normalizedTotalBandwidth 83 | 84 | return 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /plot_style.txt: -------------------------------------------------------------------------------- 1 | xtick.color: 323034 2 | ytick.color: 323034 3 | text.color: 323034 4 | lines.markeredgecolor: black 5 | patch.facecolor : bc80bd 6 | patch.force_edgecolor : True 7 | patch.linewidth: 0.8 8 | scatter.edgecolors: black 9 | grid.color: b1afb5 10 | axes.titlesize: 20 11 | figure.autolayout: True 12 | legend.title_fontsize: 20 13 | xtick.labelsize: 20 14 | ytick.labelsize: 20 15 | axes.labelsize: 20 16 | font.size: 10 17 | axes.prop_cycle : (cycler('color', ['bc80bd','fb8072', 'b3de69','fdb462','fccde5','8dd3c7','ffed6f','bebada','80b1d3', 'ccebc5', 'd9d9d9'])) 18 | mathtext.fontset: stix 19 | font.family: STIXGeneral 20 | lines.linewidth: 2 21 | legend.frameon: True 22 | legend.framealpha: 0.8 23 | legend.fontsize: 13 24 | legend.edgecolor: 0.9 25 | legend.borderpad: 0.2 26 | legend.columnspacing: 1.5 27 | legend.labelspacing: 0.4 28 | text.usetex: False 29 | axes.titlelocation: left 30 | axes.formatter.use_mathtext: True 31 | axes.autolimit_mode: round_numbers 32 | axes.labelpad: 3 33 | axes.formatter.limits: -4, 4 34 | axes.labelcolor: black 35 | axes.edgecolor: black 36 | axes.linewidth: 0.6 37 | axes.spines.right : False 38 | axes.spines.top : False 39 | axes.grid: False 40 | figure.titlesize: 23 41 | figure.dpi: 300 42 | figure.figsize: 4.8, 3 -------------------------------------------------------------------------------- /scripts/README.md: -------------------------------------------------------------------------------- 1 | # Multiverse simulation scripts 2 | 3 | ## About 4 | 5 | Batch running the multiverse-simulation in different parameter sets, and generate the figures automatically. 6 | 7 | ## Requirements 8 | 9 | - Install Python3.6+ from the [official website](https://www.python.org/downloads/) 10 | - Install the required packages 11 | ```s 12 | pip install -r requirements.txt 13 | ``` 14 | 15 | ## Supported Arguments 16 | - Note that the default MULTIVERSE_PATH will be the parent's folder of the `scripts` folder. 17 | - Please use `python3 main.py -h` to see the default paths in your environments. 18 | ```s 19 | optional arguments: 20 | -h, --help show this help message and exit 21 | -msp MULTIVERSE_PATH, --MULTIVERSE_PATH MULTIVERSE_PATH 22 | The path of multiverse-simulation 23 | Default: [MULTIVERSE_PATH] 24 | -rp RESULTS_PATH, --RESULTS_PATH RESULTS_PATH 25 | The path to save the simulation results 26 | Default: [MULTIVERSE_PATH]/results 27 | -fop FIGURE_OUTPUT_PATH, --FIGURE_OUTPUT_PATH FIGURE_OUTPUT_PATH 28 | The path to output the figures 29 | Default: [MULTIVERSE_PATH]/scripts/figures 30 | -st SIMULATION_TARGET, --SIMULATION_TARGET SIMULATION_TARGET 31 | The simulation target, CT (confirmation time) or DS (double spending) 32 | Default: CT 33 | -rt REPETITION_TIME, --REPETITION_TIME REPETITION_TIME 34 | The number of runs for a single configuration 35 | Default: 1 36 | -v VARIATIONS, --VARIATIONS VARIATIONS 37 | N, K, S, D (Number of nodes, parents, Zipfs, delays) 38 | Default: N 39 | -vv VARIATION_VALUES [VARIATION_VALUES ...], --VARIATION_VALUES VARIATION_VALUES [VARIATION_VALUES ...] 40 | The variation values, e.g., '100 200 300' for different N 41 | Default: [100, 200, 300, 400, 500, 600, 700, 800, 900, 1000] 42 | -df DECELERATION_FACTORS [DECELERATION_FACTORS ...], --DECELERATION_FACTORS DECELERATION_FACTORS [DECELERATION_FACTORS ...] 43 | The slowdown factors for each variation. If only one element, then it will be used for all runs 44 | Default: [1, 2, 2, 3, 5, 10, 15, 20, 25, 30] 45 | -exec EXECUTE, --EXECUTE EXECUTE 46 | Execution way, e.g., 'go run .' or './multiverse_sim' 47 | Default: go run . 48 | -t TRANSPARENT, --TRANSPARENT TRANSPARENT 49 | The generated figures should be transparent 50 | Default: False 51 | -xb X_AXIS_BEGIN, --X_AXIS_BEGIN X_AXIS_BEGIN 52 | The begining x axis in ns 53 | Default: 20000000000 54 | -ct COLORED_MSG_ISSUANCE_TIME, --COLORED_MSG_ISSUANCE_TIME COLORED_MSG_ISSUANCE_TIME 55 | The issuance time of colored message (in ns) 56 | Default: 2000000000 57 | -rs, --RUN_SIM Run the simulation 58 | Default: False 59 | -pf, --PLOT_FIGURES Plot the figures 60 | Default: False 61 | ``` 62 | 63 | ## Running examples 64 | - Different Ns for confirmation time (CT) analysis 65 | - The output results will be put in `RESULTS_PATH`/var_N_CT 66 | - Usage 67 | ```s 68 | python3 main.py -rs -pf 69 | ``` 70 | 71 | - Different Ks for double spending (DS) analysis, 100 times 72 | - The output results will be put in `RESULTS_PATH`/var_K_DS 73 | - Usage 74 | ```s 75 | python3 main.py -rs -pf -v K -vv 2 4 8 16 32 64 -df 1 -rt 100 -st DS 76 | ``` 77 | 78 | - Different Ss for double spending (DS) analysis, 100 times 79 | - The output results will be put in `RESULTS_PATH`/var_S_DS 80 | - NOTE: Need to use -rp, -fop to specify different RESULTS_PATH and FIGURE_OUTPUT_PATH 81 | if customized EXECUTE is set, or the output folder for the multiverse results will be 82 | put in the same folder, and the output figures will be overwritten. 83 | - Usage example of generating different Ss (0~2.2) and different Ks (2, 4, 8, 16, 32, 64) 84 | ```s 85 | python3 main.py -rs -pf -v S -vv 0 0.2 0.4 0.6 0.8 1.0 1.2 1.4 1.6 1.8 2.0 2.2 -df 1 -rp 'k_2' -fop 'k_2/figures' -exec 'go run . --parentsCount=2' -rt 100 -st DS 86 | 87 | python3 main.py -rs -pf -v S -vv 0 0.2 0.4 0.6 0.8 1.0 1.2 1.4 1.6 1.8 2.0 2.2 -df 1 -rp 'k_4' -fop 'k_4/figures' -exec 'go run . --parentsCount=4' -rt 100 -st DS 88 | 89 | python3 main.py -rs -pf -v S -vv 0 0.2 0.4 0.6 0.8 1.0 1.2 1.4 1.6 1.8 2.0 2.2 -df 1 -rp 'k_8' -fop 'k_8/figures' -exec 'go run . --parentsCount=8' -rt 100 -st DS 90 | 91 | python3 main.py -rs -pf -v S -vv 0 0.2 0.4 0.6 0.8 1.0 1.2 1.4 1.6 1.8 2.0 2.2 -df 1 -rp 'k_16' -fop 'k_16/figures' -exec 'go run . --parentsCount=16' -rt 100 -st DS 92 | 93 | python3 main.py -rs -pf -v S -vv 0 0.2 0.4 0.6 0.8 1.0 1.2 1.4 1.6 1.8 2.0 2.2 -df 1 -rp 'k_32' -fop 'k_32/figures' -exec 'go run . --parentsCount=32' -rt 100 -st DS 94 | 95 | python3 main.py -rs -pf -v S -vv 0 0.2 0.4 0.6 0.8 1.0 1.2 1.4 1.6 1.8 2.0 2.2 -df 1 -rp 'k_64' -fop 'k_64/figures' -exec 'go run . --parentsCount=64' -rt 100 -st DS 96 | ``` -------------------------------------------------------------------------------- /scripts/config.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from pathlib import Path 3 | from datetime import datetime 4 | 5 | """The configuration for the simulation script. 6 | """ 7 | 8 | 9 | class Configuration: 10 | """The configuration of simulation 11 | """ 12 | 13 | def __init__(self): 14 | """Initialize the default configuration values 15 | 16 | """ 17 | # The configuration dictionary 18 | self.cd = {} 19 | 20 | # The data paths 21 | self.cd['MULTIVERSE_PATH'] = str(Path().absolute().parent) 22 | self.cd['RESULTS_PATH'] = self.cd['MULTIVERSE_PATH'] + "/results" 23 | self.cd['SCRIPT_START_TIME'] = datetime.strftime( 24 | datetime.now(), "%Y%m%d_%H%M") 25 | # self.cd['SCRIPT_START_TIME'] = '20230715_0555' 26 | self.cd['CONFIGURATION_PATH'] = f"{self.cd['MULTIVERSE_PATH']}/results/{self.cd['SCRIPT_START_TIME']}/mb.config" 27 | self.cd['GENERAL_OUTPUT_PATH'] = f"{self.cd['MULTIVERSE_PATH']}/results/{self.cd['SCRIPT_START_TIME']}/general" 28 | self.cd['SCHEDULER_OUTPUT_PATH'] = f"{self.cd['MULTIVERSE_PATH']}/results/{self.cd['SCRIPT_START_TIME']}/scheduler" 29 | self.cd['GENERAL_FIGURE_OUTPUT_PATH'] = f"{self.cd['MULTIVERSE_PATH']}/results/{self.cd['SCRIPT_START_TIME']}/general/figures" 30 | self.cd['SCHEDULER_FIGURE_OUTPUT_PATH'] = f"{self.cd['MULTIVERSE_PATH']}/results/{self.cd['SCRIPT_START_TIME']}/scheduler/figures" 31 | 32 | # (self.cd['MULTIVERSE_PATH'] + '/scripts/figures') 33 | 34 | self.cd['NODES_COUNT'] = 100 35 | # monitoring interval in milliseconds 36 | self.cd['MONITOR_INTERVAL'] = 100 37 | 38 | # The output folder suffix (e.g., ct for confirmation time and ds for double spending) 39 | self.cd['SIMULATION_TARGET'] = 'CT' 40 | 41 | # The variations to run 42 | # N, K, S, D, 'MB' (Number of nodes/parents, Zipfs, delays, manaburn policies) 43 | self.cd['VARIATIONS'] = 'N' 44 | 45 | # The variations value list 46 | # list of policies, separated by spaces 47 | self.cd['VARIATION_VALUES'] = [ 48 | "2 0 2 2 2 2 2 2 2 2 2 0 2 2 2 2 2 2 2 2"] 49 | 50 | # The deceleration factor list 51 | self.cd['DECELERATION_FACTORS'] = [1] 52 | 53 | # The repetition of each variation 54 | self.cd['REPETITION_TIME'] = 1 55 | 56 | # Execution way (e.g., 'go run .' or './multiverse_sim') 57 | # EXECUTE = './multiverse_sim' 58 | self.cd['EXECUTE'] = 'go run .' 59 | 60 | # Transparent figure 61 | self.cd['TRANSPARENT'] = False 62 | 63 | # The begining x_axis in ns 64 | self.cd['X_AXIS_BEGIN'] = 20000_000_000 65 | 66 | # The issuance time of colored message (in ns) 67 | self.cd['COLORED_MSG_ISSUANCE_TIME'] = 2000_000_000 68 | 69 | # Flags of operations 70 | self.cd['RUN_SIM'] = True 71 | self.cd['PLOT_FIGURES'] = True 72 | 73 | # Adversary strategies 74 | self.cd["ADVERSARY_STRATEGY"] = "1 1" 75 | 76 | # Plotting variation setting 77 | self.cd['PLOT_VARIED_FIGURES'] = False 78 | self.cd['VARIED_PATHS'] = [] 79 | self.cd['VARIED_LABELS'] = [ 80 | # '15.07low', 81 | # '32.47low', 82 | # # '15.56high', 83 | # # '29.68high', 84 | # # '32.47low_opt' 85 | ] 86 | 87 | def update(self, k, v): 88 | """Update the key/value pair of the configuration. 89 | 90 | Args: 91 | k: The configuration key. 92 | v: The configuration value. 93 | """ 94 | self.cd[k] = v 95 | -------------------------------------------------------------------------------- /scripts/constant.py: -------------------------------------------------------------------------------- 1 | """The constant for the simulation script. 2 | """ 3 | 4 | # The timescale of the 'ns after start' is ns. Use sec as the unit time. 5 | ONE_SEC = 1000_000_000 6 | 7 | # Define the list of styles 8 | CLR_LIST = ['k', 'b', 'g', 'r', 'y', 'purple', 'gray'] # list of basic colors 9 | STY_LIST = ['-', '--', '-.', ':'] # list of basic linestyles 10 | 11 | # Define the target to parse 12 | TARGET = "Confirmation Time (ns)" 13 | ISSUED_MESSAGE = "# of Issued Messages" 14 | 15 | # Rename the parameters 16 | VAR_DICT = {'ParentsCount': 'k', 'ZipfParameter': 's', 17 | 'NodesCount': 'N', 'MinDelay': 'D', 'ConfirmationThreshold': 'AW', 'AccidentalMana': 'IM', 'AdversaryMana': 'AD', 18 | 'AdversaryNodeCounts': 'AC', 'AdversarySpeedup': 'SU', 'PacketLoss': 'P'} 19 | 20 | # Items for double spending figures 21 | COLORED_CONFIRMED_LIKE_ITEMS = [ 22 | 'Blue (Confirmed Accumulated Weight)', 'Red (Confirmed Accumulated Weight)', 'Blue (Like Accumulated Weight)', 23 | 'Red (Like Accumulated Weight)'] 24 | 25 | # The color list for the double spending figures 26 | DS_CLR_LIST = ['b', 'r', 'b', 'r', 'b', 'r'] 27 | DS_STY_LIST = ['-', '-', '--', '--', "-.", "-."] 28 | 29 | # The simulation mapping 30 | SIMULATION_VAR_DICT = {'N': 'nodesCount', 31 | 'S': 'zipfParameter', 32 | 'K': 'parentsCount', 33 | 'P': 'packetLoss'} 34 | 35 | # The figure naming mapping 36 | FIGURE_NAMING_DICT = {'N': ("NodesCount", "Confirmation Time v.s. Different Node Counts", 37 | "Convergence Time v.s. Different Node Counts", "Flips v.s. Different Node Counts", 38 | "Unconfirming Counts v.s. Different Node Counts", 39 | "Confirmation Weight Depth v.s. Different Node Counts"), 40 | 'S': ("ZipfParameter", "Confirmation Time v.s. Different Zipf's Parameters", 41 | "Convergence Time v.s. Different Zipf's Parameters", 42 | "Flips v.s. Different Zipf's Parameters", 43 | "Unconfirming Counts v.s. Different Zipf's Parameters", 44 | "Confirmation Weight Depth v.s. Different Zipf's Parameters"), 45 | 'K': ("ParentsCount", "Confirmation Time v.s. Different Parents Counts", 46 | "Convergence Time v.s. Different Parents Counts", "Flips v.s. Different Parents Counts", 47 | "Unconfirming Counts v.s. Different Parents Counts", 48 | "Confirmation Weight Depth v.s. Different Parents Counts"), 49 | 'D': ("MinDelay", "Confirmation Time v.s. Different Delays", 50 | "Convergence Time v.s. Different Delays", "Flips v.s. Different Delays", 51 | "Unconfirming Counts v.s. Different Delays", 52 | "Confirmation Weight Depth v.s. Different Delays"), 53 | 'P': ("PacketLoss", "Confirmation Time v.s. Different Packet Losses", 54 | "Convergence Time v.s. Different Packet Losses", "Flips v.s. Different Packet Losses", 55 | "Unconfirming Counts v.s. Different Packet Losses", 56 | "Confirmation Weight Depth v.s. Different Packet Losses"), 57 | 'AW': ("ConfirmationThreshold", "Confirmation Time v.s. Different Thresholds", 58 | "Convergence Time v.s. Different Threshold", "Flips v.s. Different Thresholds", 59 | "Unconfirming Counts v.s. Different Thresholds", 60 | "Confirmation Weight Depth v.s. Different Thresholds"), 61 | 'IM': ("AccidentalMana", "Confirmation Time v.s. Different Issuers", 62 | "Convergence Time v.s. Different Issuers", "Flips v.s. Different Issuers", 63 | "Unconfirming Counts v.s. Different Issuers", 64 | "Confirmation Weight Depth v.s. Different Issuers"), 65 | 'AD': ("AdversaryMana", "Confirmation Time v.s. Different Adversary Weights", 66 | "Convergence Time v.s. Different Adversary Weights", 67 | "Flips v.s. Different Adversary Weights", 68 | "Unconfirming Counts v.s. Different Adversary Weights", 69 | "Confirmation Weight Depth v.s. Different Adversary Weights"), 70 | 'AC': ("AdversaryNodeCounts", "Confirmation Time v.s. Different Adversary Node Counts", 71 | "Convergence Time v.s. Different Adversary Node Counts", 72 | "Flips v.s. Different Adversary Node Counts", 73 | "Unconfirming Counts v.s. Different Adversary Node Counts", 74 | "Confirmation Weight Depth v.s. Different Adversary Node Counts"), 75 | 'BS': ("AdversaryMana", "Confirmation Time v.s. Different Adversary Weights", 76 | "Convergence Time v.s. Different Adversary Weights", 77 | "Flips v.s. Different Adversary Weights", 78 | "Unconfirming Counts v.s. Different Adversary Weights", 79 | "Confirmation Weight Depth v.s. Different Adversary Weights"), 80 | 'SU': ("AdversarySpeedup", "Confirmation Time v.s. Different Adversary Speed", 81 | "Convergence Time v.s. Different Adversary Speed", 82 | "Flips v.s. Different Adversary Speed", 83 | "Unconfirming Counts v.s. Different Adversary Speed", 84 | "Confirmation Weight Depth v.s. Different Adversary Speed"), 85 | 'MB': ("BurnPolicyNames", "Confirmation Time v.s. Different Burn Policies", 86 | "Convergence Time v.s. Different Burn Policies", 87 | "Flips v.s. Different Burn Policies", 88 | "Unconfirming Counts v.s. Different Burn Policies", 89 | "Confirmation Weight Depth v.s. Different Burn Policies"), 90 | } 91 | -------------------------------------------------------------------------------- /scripts/parsing.py: -------------------------------------------------------------------------------- 1 | """The parsing module to parse the dumpped files. 2 | """ 3 | 4 | import json 5 | import logging 6 | import re 7 | 8 | import pandas as pd 9 | 10 | import constant as c 11 | 12 | 13 | class FileParser: 14 | """ 15 | The file parser for the files generated by multiverse-simulation. 16 | """ 17 | 18 | def __init__(self, cd): 19 | """Initialize the parameters. 20 | 21 | Args: 22 | cd: The configuration dictionary. 23 | """ 24 | self.x_axis_begin = cd['X_AXIS_BEGIN'] 25 | self.colored_confirmed_like_items = c.COLORED_CONFIRMED_LIKE_ITEMS 26 | self.one_second = c.ONE_SEC 27 | self.target = c.TARGET 28 | self.config_path = cd['CONFIGURATION_PATH'] 29 | 30 | def parse_aw_file(self, fn, variation): 31 | """Parse the accumulated weight files. 32 | 33 | Args: 34 | fc: The figure count. 35 | 36 | Returns: 37 | 38 | Returns: 39 | v: The variation value. 40 | data: The target data to analyze. 41 | x_axis: The scaled/adjusted x axis. 42 | """ 43 | logging.info(f'Parsing {fn}...') 44 | # Opening JSON file 45 | with open(self.config_path) as f: 46 | c = json.load(f) 47 | 48 | v = str(c[variation]) 49 | 50 | data = pd.read_csv(fn) 51 | # # Chop data before the begining time 52 | # data = data[data['ns since start'] >= 53 | # self.x_axis_begin * float(c["SlowdownFactor"])] 54 | 55 | # Reset the index to only consider the confirmed msgs from X_AXIS_BEGIN 56 | data = data.reset_index() 57 | 58 | # ns is the time scale of the aw outputs 59 | x_axis = float(self.one_second) 60 | data[self.target] = data[self.target] / float(c["SlowdownFactor"]) 61 | return v, data[self.target], x_axis 62 | 63 | def parse_block_information_file(self, fn, variation): 64 | """Parse the block information files. 65 | 66 | Args: 67 | fc: The figure count. 68 | 69 | Returns: 70 | 71 | Returns: 72 | v: The variation value. 73 | data: The target data to analyze. 74 | x_axis: The scaled/adjusted x axis. 75 | """ 76 | logging.info(f'Parsing {fn}...') 77 | # Opening JSON file 78 | with open(self.config_path) as f: 79 | c = json.load(f) 80 | print("target:", self.target) 81 | v = '' 82 | if variation != '' : 83 | v = str(c[variation]) 84 | print("variation", v) 85 | 86 | data = pd.read_csv(fn) 87 | data = data.reset_index() 88 | # ns is the time scale of the block information 89 | spammer_accepted_time = data[data['Issuer Burn Policy'] 90 | == 0][data[self.target] != 0] 91 | 92 | non_spammer_accepted_time = data[data['Issuer Burn Policy'] 93 | == 1][data[self.target] != 0] 94 | 95 | spammer_not_accepted_time = data[data['Issuer Burn Policy'] 96 | == 0][data[self.target] == 0] 97 | 98 | non_spammer_not_accepted_time = data[data['Issuer Burn Policy'] 99 | == 1][data[self.target] == 0] 100 | 101 | spammer_accepted_time = ((spammer_accepted_time[self.target] / 102 | float(c["SlowdownFactor"]))) 103 | non_spammer_accepted_time = ((non_spammer_accepted_time[self.target] / 104 | float(c["SlowdownFactor"]))) 105 | 106 | spammer_not_accepted_time = (float(c["SimulationDuration"]) - ((spammer_not_accepted_time['Issuance Time Since Start (ns)'] / 107 | float(c["SlowdownFactor"])))) 108 | non_spammer_not_accepted_time = (float(c["SimulationDuration"]) - ((non_spammer_not_accepted_time['Issuance Time Since Start (ns)'] / 109 | float(c["SlowdownFactor"])))) 110 | 111 | return (v, 112 | spammer_accepted_time, 113 | non_spammer_accepted_time, 114 | spammer_not_accepted_time, 115 | non_spammer_not_accepted_time) 116 | 117 | def parse_acceptance_delay_file(self, fn, variation): 118 | """Parse the acceptance time latency among nodes. 119 | 120 | Returns: 121 | v: The variation value. 122 | data: The target data to analyze. 123 | """ 124 | logging.info(f'Parsing {fn}...') 125 | data = pd.read_csv(fn) 126 | 127 | # Opening JSON file 128 | with open(self.config_path) as f: 129 | c = json.load(f) 130 | 131 | v = '' 132 | if variation != '' : 133 | v = str(c[variation]) 134 | print("variation", v) 135 | 136 | # ns is the time scale of the block information 137 | accepted_delay_time = (data['Accepted Time Diff']/float(c["SlowdownFactor"])) 138 | 139 | return v, accepted_delay_time 140 | 141 | def parse_confirmation_threshold_file(self, fn, variation): 142 | """Parse the acceptance time latency among nodes. 143 | 144 | Returns: 145 | v: The variation value. 146 | data: The target data to analyze. 147 | """ 148 | logging.info(f'Parsing {fn}...') 149 | data = pd.read_csv(fn) 150 | 151 | # Opening JSON file 152 | with open(self.config_path) as f: 153 | c = json.load(f) 154 | 155 | v = str(c[variation]) 156 | print("variation", v) 157 | 158 | target = 'Time (s)' 159 | # ns is the time scale of the block information 160 | unconfirmation_age = data[data['Title'] 161 | == 'UnconfirmationAge'][target] 162 | 163 | unconfirmation_age_since_tip = data[data['Title'] 164 | == 'UnconfirmationAgeSinceTip'][target] 165 | 166 | confirmation_age = data[data['Title'] 167 | == 'ConfirmationAge'][target] 168 | 169 | confirmation_age_since_tip = data[data['Title'] 170 | == 'ConfirmationAgeSinceTip'][target] 171 | 172 | unconfirmation_age = ((unconfirmation_age / 173 | float(c["SlowdownFactor"]))) 174 | 175 | unconfirmation_age_since_tip = ((unconfirmation_age_since_tip / 176 | float(c["SlowdownFactor"]))) 177 | 178 | confirmation_age = ((confirmation_age / 179 | float(c["SlowdownFactor"]))) 180 | 181 | confirmation_age_since_tip = ((confirmation_age_since_tip / 182 | float(c["SlowdownFactor"]))) 183 | 184 | return (v, 185 | unconfirmation_age, 186 | unconfirmation_age_since_tip, 187 | confirmation_age, 188 | confirmation_age_since_tip) 189 | 190 | def parse_mm_file(self, fn, variation): 191 | """Parse the witness weight files. 192 | 193 | Args: 194 | fc: The figure count. 195 | 196 | Returns: 197 | 198 | Returns: 199 | v: The variation value. 200 | data: The target data to analyze. 201 | x_axis: The scaled/adjusted x axis. 202 | """ 203 | logging.info(f'Parsing {fn}...') 204 | # Get the configuration setup of this simulation 205 | # Note currently we only consider the first node 206 | config_fn = re.sub('mm', 'aw', fn) 207 | config_fn = config_fn.replace('.csv', '.config') 208 | 209 | # Opening JSON file 210 | with open(config_fn) as f: 211 | c = json.load(f) 212 | 213 | v = str(c[variation]) 214 | 215 | data = pd.read_csv(fn) 216 | requested_messages = data['Number of Requested Messages'].tolist()[-1] 217 | return v, requested_messages 218 | 219 | def parse_ww_file(self, fn, variation): 220 | """Parse the witness weight files. 221 | 222 | Args: 223 | fc: The figure count. 224 | 225 | Returns: 226 | 227 | Returns: 228 | v: The variation value. 229 | data: The target data to analyze. 230 | x_axis: The scaled/adjusted x axis. 231 | """ 232 | logging.info(f'Parsing {fn}...') 233 | # Get the configuration setup of this simulation 234 | # Note currently we only consider the first node 235 | config_fn = re.sub('ww', 'aw', fn) 236 | config_fn = config_fn.replace('.csv', '.config') 237 | 238 | # Opening JSON file 239 | with open(config_fn) as f: 240 | c = json.load(f) 241 | 242 | v = str(c[variation]) 243 | 244 | data = pd.read_csv(fn) 245 | data['Witness Weight'] = (data['Witness Weight'] / 246 | float(c["NodesTotalWeight"])) 247 | 248 | # ns is the time scale of the aw outputs 249 | x_axis = (data['Time (ns)'] / 250 | float(c["SlowdownFactor"]) / float(self.one_second)) 251 | return v, data['Witness Weight'], x_axis 252 | 253 | def parse_throughput_file(self, fn, var): 254 | """Parse the throughput files. 255 | Args: 256 | fn: The input file name. 257 | var: The variated parameter. 258 | 259 | Returns: 260 | v: The variation value. 261 | tip_pool_size: The pool size list. 262 | processed_messages: The # of processed messages list. 263 | issued_messages: The # of issued messages list. 264 | x_axis: The scaled x axis. 265 | """ 266 | logging.info(f'Parsing {fn}...') 267 | # Get the configuration setup of this simulation 268 | config_fn = re.sub('tp', 'aw', fn) 269 | config_fn = config_fn.replace('.csv', '.config') 270 | 271 | # Opening JSON file 272 | with open(config_fn) as f: 273 | c = json.load(f) 274 | 275 | v = str(c[var]) 276 | 277 | data = pd.read_csv(fn) 278 | 279 | # Chop data before the begining time 280 | data = data[data['ns since start'] >= 281 | self.x_axis_begin * float(c["SlowdownFactor"])] 282 | 283 | # Get the throughput details 284 | tip_pool_size = data['UndefinedColor (Tip Pool Size)'] 285 | processed_messages = data['UndefinedColor (Processed)'] 286 | issued_messages = data['# of Issued Messages'] 287 | 288 | # Return the scaled x axis 289 | x_axis = (data['ns since start'] / float(self.one_second) / 290 | float(c["SlowdownFactor"])) 291 | return v, (tip_pool_size, processed_messages, issued_messages, x_axis) 292 | 293 | def parse_all_throughput_file(self, fn, var): 294 | """Parse the all-tp files. 295 | Args: 296 | fn: The input file name. 297 | var: The variated parameter. 298 | 299 | Returns: 300 | v: The variation value. 301 | tip_pool_size: The pool size list. 302 | x_axis: The scaled x axis. 303 | """ 304 | logging.info(f'Parsing {fn}...') 305 | # Get the configuration setup of this simulation 306 | config_fn = re.sub('all-tp', 'aw', fn) 307 | config_fn = config_fn.replace('.csv', '.config') 308 | 309 | # Opening JSON file 310 | with open(config_fn) as f: 311 | c = json.load(f) 312 | 313 | v = str(c[var]) 314 | 315 | data = pd.read_csv(fn) 316 | 317 | # # Chop data before the begining time 318 | # data = data[data['ns since start'] >= 319 | # self.x_axis_begin * float(c["SlowdownFactor"])] 320 | 321 | # Get the throughput details 322 | tip_pool_sizes = data.loc[:, data.columns != 'ns since start'] 323 | 324 | # Return the scaled x axis 325 | x_axis = (data['ns since start'] / float(self.one_second) / 326 | float(c["SlowdownFactor"])) 327 | return v, (tip_pool_sizes, x_axis) 328 | 329 | def parse_confirmed_color_file(self, fn, var): 330 | """Parse the confirmed color files. 331 | 332 | Args: 333 | fn: The input file name. 334 | var: The variated parameter. 335 | 336 | Returns: 337 | v: The variation value. 338 | colored_node_counts: The colored node counts list. 339 | convergence_time: The convergence time. 340 | flips: The flips count. 341 | unconfirming_blue: The unconfirming count of blue branch. 342 | unconfirming_red: The unconfirming count of red branch. 343 | total_weight: Total weight of all nodes in the network. 344 | x_axis: The scaled x axis. 345 | """ 346 | logging.info(f'Parsing {fn}...') 347 | # Get the configuration setup of this simulation 348 | config_fn = re.sub('cc', 'aw', fn) 349 | config_fn = config_fn.replace('.csv', '.config') 350 | 351 | # Opening JSON file 352 | with open(config_fn) as f: 353 | c = json.load(f) 354 | 355 | data = pd.read_csv(fn) 356 | 357 | # Chop data before the begining time 358 | data = data[data['ns since start'] >= 359 | self.x_axis_begin * float(c["SlowdownFactor"])] 360 | 361 | # Get the throughput details 362 | colored_node_aw = data[self.colored_confirmed_like_items] 363 | flips = data['Flips (Winning color changed)'].iloc[-1] 364 | 365 | # Unconfirmed Blue,Unconfirmed Red 366 | unconfirming_blue = data['Unconfirmed Blue'].iloc[-1] 367 | unconfirming_red = data['Unconfirmed Red'].iloc[-1] 368 | 369 | adversary_liked_aw_blue = data['Blue (Adversary Like Accumulated Weight)'] 370 | adversary_liked_aw_red = data['Red (Adversary Like Accumulated Weight)'] 371 | adversary_confirmed_aw_blue = data['Blue (Confirmed Adversary Weight)'] 372 | adversary_confirmed_aw_red = data['Red (Confirmed Adversary Weight)'] 373 | 374 | convergence_time = data['ns since issuance'].iloc[-1] 375 | convergence_time /= self.one_second 376 | convergence_time /= float(c["SlowdownFactor"]) 377 | 378 | colored_node_aw["Blue (Like Accumulated Weight)"] -= adversary_liked_aw_blue 379 | colored_node_aw["Red (Like Accumulated Weight)"] -= adversary_liked_aw_red 380 | colored_node_aw["Blue (Confirmed Accumulated Weight)"] -= adversary_confirmed_aw_blue 381 | colored_node_aw["Red (Confirmed Accumulated Weight)"] -= adversary_confirmed_aw_red 382 | 383 | v = str(c[var]) 384 | 385 | honest_total_weight = (c["NodesTotalWeight"] - 386 | adversary_liked_aw_blue.iloc[-1] - adversary_liked_aw_red.iloc[-1]) 387 | 388 | # Return the scaled x axis 389 | x_axis = ((data['ns since start']) / 390 | float(self.one_second * float(c["SlowdownFactor"]))) 391 | 392 | return v, (colored_node_aw, convergence_time, flips, unconfirming_blue, unconfirming_red, 393 | honest_total_weight, x_axis) 394 | 395 | def parse_node_file(self, fn, var): 396 | """Parse the node files. 397 | Args: 398 | fn: The input file name. 399 | var: The variated parameter. 400 | 401 | Returns: 402 | v: The variation value. 403 | confirmation_rate_depth: The confirmation rate depth. 404 | """ 405 | logging.info(f'Parsing {fn}...') 406 | # Get the configuration setup of this simulation 407 | config_fn = re.sub('nd', 'aw', fn) 408 | config_fn = config_fn.replace('.csv', '.config') 409 | 410 | # Opening JSON file 411 | with open(config_fn) as f: 412 | c = json.load(f) 413 | 414 | v = str(c[var]) 415 | 416 | # Get the confirmation threshold 417 | weight_threshold = float(c['ConfirmationThreshold']) 418 | 419 | data = pd.read_csv(fn) 420 | 421 | # Get the minimum weight percentage 422 | mw = float(data['Min Confirmed Accumulated Weight'].min() 423 | ) / float(c['NodesTotalWeight']) 424 | 425 | confirmation_rate_depth = max(weight_threshold - mw, 0) * 100.0 426 | 427 | return v, confirmation_rate_depth 428 | -------------------------------------------------------------------------------- /scripts/plot_style.txt: -------------------------------------------------------------------------------- 1 | xtick.color: 323034 2 | ytick.color: 323034 3 | text.color: 323034 4 | lines.markeredgecolor: black 5 | patch.facecolor : bc80bd 6 | patch.force_edgecolor : True 7 | patch.linewidth: 0.8 8 | scatter.edgecolors: black 9 | grid.color: b1afb5 10 | axes.titlesize: 20 11 | figure.autolayout: True 12 | legend.title_fontsize: 20 13 | xtick.labelsize: 20 14 | ytick.labelsize: 20 15 | axes.labelsize: 20 16 | font.size: 10 17 | axes.prop_cycle : (cycler('color', ['8dd3c7','ffed6f','bebada','80b1d3', 'ccebc5', 'd9d9d9'])) 18 | mathtext.fontset: stix 19 | font.family: STIXGeneral 20 | lines.linewidth: 2 21 | legend.frameon: True 22 | legend.framealpha: 0.8 23 | legend.fontsize: 13 24 | legend.edgecolor: 0.9 25 | legend.borderpad: 0.2 26 | legend.columnspacing: 1.5 27 | legend.labelspacing: 0.4 28 | text.usetex: False 29 | axes.titlelocation: left 30 | axes.formatter.use_mathtext: True 31 | axes.autolimit_mode: round_numbers 32 | axes.labelpad: 3 33 | axes.formatter.limits: -4, 4 34 | axes.labelcolor: black 35 | axes.edgecolor: black 36 | axes.linewidth: 0.6 37 | axes.spines.right : False 38 | axes.spines.top : False 39 | axes.grid: False 40 | figure.titlesize: 23 41 | figure.dpi: 300 42 | figure.figsize: 6.4,3.2 -------------------------------------------------------------------------------- /scripts/requirements.txt: -------------------------------------------------------------------------------- 1 | numpy==1.18.1 2 | matplotlib==3.1.0 3 | pandas==0.25.0 4 | -------------------------------------------------------------------------------- /scripts/utils.py: -------------------------------------------------------------------------------- 1 | """The helper functions. 2 | """ 3 | 4 | import numpy as np 5 | import matplotlib.pyplot as plt 6 | from matplotlib.lines import Line2D 7 | from networkx.drawing.nx_agraph import graphviz_layout 8 | import matplotlib 9 | import networkx as nx 10 | import pandas as pd 11 | import argparse 12 | import logging 13 | import json 14 | import os 15 | import csv 16 | import matplotlib.colors as mcolors 17 | 18 | colors = list(mcolors.TABLEAU_COLORS.values())[:10] 19 | color_dict = {} 20 | for i, color in enumerate(colors): 21 | color_dict[f'color_{i+1}'] = color + 'CC' 22 | colors = color_dict 23 | colornames = list(colors) 24 | burnPolicyNames = {"ManaBurn": [ 25 | "No Burn", "Anxious", "Greedy (+1)", "Greedy (+10)"], "ICCA+": ["Spammer", " ", "Best Effort"]} 26 | 27 | 28 | class ArgumentParserWithDefaults(argparse.ArgumentParser): 29 | """The argument parser to support RawTextHelpFormatter and show default values. 30 | """ 31 | 32 | def add_argument(self, *args, help=None, default=None, **kwargs): 33 | if help is not None: 34 | kwargs['help'] = help 35 | if default is not None and args[0] != '-h': 36 | kwargs['default'] = default 37 | if help is not None: 38 | kwargs['help'] += '\nDefault: {}'.format(default) 39 | super().add_argument(*args, **kwargs) 40 | 41 | 42 | def move_results(src, dst): 43 | """Move the files from the source folder to the destination folder. 44 | 45 | Args: 46 | src: The source folder. 47 | dst: The destination folder. 48 | """ 49 | 50 | if not os.path.isdir(dst): 51 | os.mkdir(dst) 52 | logging.info(f'Moving folder {src} to {dst}...') 53 | os.system(f'mv {src}/*.config {dst}') 54 | os.system(f'mv {src}/*.csv {dst}') 55 | 56 | 57 | def get_row_col_counts(fc): 58 | """Return the row/columns counts of the figure. 59 | 60 | Args: 61 | fc: The figure count. 62 | 63 | Returns: 64 | rc: The row count. 65 | cc: The column count. 66 | """ 67 | rc = int(np.sqrt(fc)) 68 | while fc % rc != 0: 69 | rc -= 1 70 | cc = int(fc/rc) 71 | return (rc, cc) 72 | 73 | 74 | def get_diameter(fn, ofn, plot=False, transparent=False): 75 | """Construct the network graph and return the diameter 76 | 77 | Args: 78 | fn: The nw- file path. 79 | ofn: The figure output path. 80 | plot: Plot the network or not. 81 | transparent: The generated figure is transparent or not. 82 | 83 | Returns: 84 | diameter: The network diameter. 85 | """ 86 | 87 | # Init the matplotlib config 88 | font = {'family': 'Times New Roman', 89 | 'weight': 'bold', 90 | 'size': 14} 91 | matplotlib.rc('font', **font) 92 | 93 | data = pd.read_csv(fn) 94 | 95 | # Get the network information 96 | weighted_edges = [tuple(x) 97 | for x in data[['Peer ID', 'Neighbor ID', 'Network Delay (ns)']].to_numpy()] 98 | weighted_edges_pruned = set() 99 | # Remove the repetitive edges 100 | for u, v, w in weighted_edges: 101 | u_v, w = sorted([u, v]), w 102 | weighted_edges_pruned.add((u_v[0], u_v[1], w)) 103 | weighted_edges = list(weighted_edges_pruned) 104 | 105 | nodes = data.drop_duplicates('Peer ID')['Peer ID'].to_numpy() 106 | weights = data.drop_duplicates('Peer ID')['Weight'].to_numpy() 107 | 108 | # Construct the graph 109 | g = nx.Graph() 110 | g.add_weighted_edges_from(weighted_edges) 111 | 112 | diameter = nx.algorithms.distance_measures.diameter(g) 113 | if plot == False: 114 | return diameter 115 | 116 | lengths = {} 117 | for edge in weighted_edges: 118 | lengths[(edge[0], edge[1])] = dict(len=edge[2]) 119 | 120 | pos = graphviz_layout(g, prog='neato') 121 | ec = nx.draw_networkx_edges(g, pos, alpha=0.2) 122 | nc = nx.draw_networkx_nodes(g, pos, nodelist=nodes, node_color=weights, 123 | with_labels=False, node_size=10, cmap=plt.cm.jet) 124 | 125 | plt.colorbar(nc).ax.set_ylabel( 126 | 'Weights', rotation=270, fontsize=14, labelpad=14) 127 | plt.axis('off') 128 | 129 | plt.title(f'{len(nodes)} Nodes, Diameter = {diameter}') 130 | plt.savefig(ofn, transparent=transparent) 131 | plt.close() 132 | return diameter 133 | 134 | 135 | def parse_per_node_metrics(file): 136 | with open(file, newline='') as csvfile: 137 | reader = csv.reader(csvfile, delimiter=',', quotechar='|') 138 | header = next(reader) 139 | n_nodes = len(header)-1 140 | n_data = sum(1 for _ in reader) 141 | data = np.zeros((n_nodes, n_data)) 142 | times = np.zeros(n_data) 143 | csvfile.seek(0) 144 | i = -1 145 | for row in reader: 146 | if i < 0: 147 | i += 1 148 | continue 149 | times[i] = int(row[-1])*10**-9 150 | data[:, i] = row[:-1] 151 | i += 1 152 | return data, times 153 | 154 | 155 | def parse_metric_names(file): 156 | with open(file, newline='') as csvfile: 157 | reader = csv.reader(csvfile, delimiter=',', quotechar='|') 158 | names = [] 159 | for row in reader: 160 | names.append(row[0]) 161 | return names 162 | 163 | 164 | def parse_latencies(file, cd): 165 | with open(file, newline='') as csvfile: 166 | reader = csv.reader(csvfile, delimiter=',', quotechar='|') 167 | next(reader) 168 | latencies = [[] for _ in range(cd['NODES_COUNT'])] 169 | times = [[] for _ in range(cd['NODES_COUNT'])] 170 | for row in reader: 171 | latencies[int(row[0])].append(int(row[2])*10**-9) 172 | times[int(row[0])].append(int(row[1])*10**-9) 173 | return latencies 174 | 175 | 176 | def parse_int_node_attributes(file, cd): 177 | with open(file, newline='') as csvfile: 178 | reader = csv.reader(csvfile, delimiter=',', quotechar='|') 179 | next(reader) 180 | attributes = np.zeros(cd['NODES_COUNT'], dtype=int) 181 | for row in reader: 182 | attributes[int(row[0])] = int(row[1]) 183 | return attributes 184 | 185 | 186 | def plot_per_node_metric(data, times, cd, title, ylab): 187 | fig, ax = plt.subplots(figsize=(8, 4)) 188 | ax.grid(linestyle='--') 189 | ax.set_xlabel("Time (s)") 190 | ax.set_ylabel(ylab) 191 | ax.title.set_text(title) 192 | burnPolicies = cd['BURN_POLICIES'] 193 | weights = cd['WEIGHTS'] 194 | for NodeID in range(cd['NODES_COUNT']): 195 | ax.plot(times, data[NodeID, :], color=colors[colornames[burnPolicies[NodeID]]], 196 | linewidth=4*weights[NodeID]/weights[0]) 197 | ax.set_xlim(0, times[-1]) 198 | ax.set_ylim(0) 199 | bps = list(set(burnPolicies)) 200 | ModeLines = [Line2D([0], [0], color=colors[colornames[bp]], lw=4) 201 | for bp in bps] 202 | fig.legend(ModeLines, [burnPolicyNames[cd["SchedulerType"]][i] 203 | for i in bps], loc="lower right") 204 | plt.savefig( 205 | f'{cd["SCHEDULER_FIGURE_OUTPUT_PATH"]}/{title}.png', bbox_inches='tight') 206 | plt.close() 207 | 208 | 209 | def plot_per_node_wo_spammer_metric(data, times, cd, title, ylab): 210 | fig, ax = plt.subplots(figsize=(8, 4)) 211 | ax.grid(linestyle='--') 212 | ax.set_xlabel("Time (s)") 213 | ax.set_ylabel(ylab) 214 | ax.title.set_text(title + ' wo Spammer') 215 | burnPolicies = cd['BURN_POLICIES'] 216 | weights = cd['WEIGHTS'] 217 | for NodeID in range(cd['NODES_COUNT']): 218 | # Skip spammer 219 | if burnPolicies[NodeID] == 0: 220 | continue 221 | ax.plot(times, data[NodeID, :], color=colors[colornames[burnPolicies[NodeID]]], 222 | linewidth=4*weights[NodeID]/weights[0]) 223 | ax.set_xlim(0, times[-1]) 224 | ax.set_ylim(0) 225 | bps = list(set(burnPolicies)) 226 | # ModeLines = [Line2D([0], [0], color=colors[colornames[bp]], lw=4) 227 | # for bp in bps] 228 | # fig.legend(ModeLines, [burnPolicyNames[cd["SchedulerType"]][i] 229 | # for i in bps], loc="lower right") 230 | plt.savefig( 231 | f'{cd["SCHEDULER_FIGURE_OUTPUT_PATH"]}/{title}_wo_spammer.png', bbox_inches='tight') 232 | plt.close() 233 | 234 | 235 | def plot_per_node_rates(messages, times, cd, title): 236 | fig, ax = plt.subplots(2, 1, sharex=True, figsize=(8, 8)) 237 | ax[0].grid(linestyle='--') 238 | ax[0].set_xlabel("Time (s)") 239 | ax[0].set_ylabel("Rate (Blocks/s)") 240 | ax[1].grid(linestyle='--') 241 | ax[1].set_xlabel("Time (s)") 242 | ax[1].set_ylabel("Scaled Rate") 243 | ax[0].title.set_text(title) 244 | avg_window = 50 245 | burnPolicies = cd['BURN_POLICIES'] 246 | weights = cd['WEIGHTS'] 247 | for NodeID in range(cd['NODES_COUNT']): 248 | rate = (messages[NodeID, 1:]-messages[NodeID, :-1]) * \ 249 | 1000/(cd['MONITOR_INTERVAL']) 250 | ax[0].plot(times[avg_window:], np.convolve(np.ones(avg_window)/avg_window, rate, 'valid'), 251 | color=colors[colornames[burnPolicies[NodeID]]], linewidth=4*weights[NodeID]/weights[0]) 252 | ax[1].plot(times[avg_window:], np.convolve(np.ones(avg_window)/avg_window, rate, 'valid')*sum(weights) / 253 | weights[NodeID], color=colors[colornames[burnPolicies[NodeID]]], linewidth=4*weights[NodeID]/weights[0]) 254 | ax[0].set_xlim(0, times[-1]) 255 | ax[1].set_xlim(0, times[-1]) 256 | ax[0].set_ylim(0) 257 | ax[1].set_ylim(0, 500) 258 | bps = list(set(burnPolicies)) 259 | ModeLines = [Line2D([0], [0], color=colors[colornames[bp]], lw=4) 260 | for bp in bps] 261 | fig.legend(ModeLines, [burnPolicyNames[cd["SchedulerType"]][i] 262 | for i in bps], loc="lower right") 263 | plt.savefig(cd['RESULTS_PATH']+'/'+cd['SCRIPT_START_TIME'] + 264 | '/figures/'+title+'.png', bbox_inches='tight') 265 | plt.close() 266 | 267 | 268 | def plot_latency_cdf(latencies, cd, title): 269 | fig, ax = plt.subplots(figsize=(8, 4)) 270 | ax.set_xlabel("Latency (s)") 271 | ax.grid(linestyle='--') 272 | ax.title.set_text(title) 273 | maxlats = [max(latencies[NodeID]) 274 | for NodeID in range(len(latencies)) if latencies[NodeID]] 275 | if not maxlats: 276 | return 277 | maxval = max(maxlats) 278 | nbins = 1000 279 | bins = np.arange(0, maxval+maxval/nbins, maxval/nbins) 280 | pdf = np.zeros(len(bins)) 281 | burnPolicies = cd['BURN_POLICIES'] 282 | weights = cd['WEIGHTS'] 283 | for NodeID in range(len(latencies)): 284 | if not latencies[NodeID]: 285 | continue 286 | i = 0 287 | if latencies[NodeID]: 288 | lats = sorted(latencies[NodeID]) 289 | for lat in lats: 290 | while i < len(bins): 291 | while lat > bins[i]: 292 | i += 1 293 | break 294 | pdf[i-1] += 1 295 | pdf = pdf/sum(pdf) 296 | cdf = np.cumsum(pdf) 297 | ax.plot(bins, cdf, color=colors[colornames[burnPolicies[NodeID]]], 298 | linewidth=4*weights[NodeID]/weights[0]) 299 | 300 | ax.set_xlim(0, bins[-1]) 301 | ax.set_ylim(0, 1.1) 302 | bps = list(set(burnPolicies)) 303 | ModeLines = [Line2D([0], [0], color=colors[colornames[bp]], lw=4) 304 | for bp in bps] 305 | fig.legend(ModeLines, [burnPolicyNames[cd["SchedulerType"]][i] 306 | for i in bps], loc="lower right") 307 | plt.savefig(cd['RESULTS_PATH']+'/'+cd['SCRIPT_START_TIME'] + 308 | '/figures/'+title+'.png', bbox_inches='tight') 309 | plt.close() 310 | 311 | 312 | def plot_total_traffic(data, times, cd, title, ylim=None): 313 | _, ax = plt.subplots(figsize=(8, 4)) 314 | ax.grid(linestyle='--') 315 | ax.set_xlabel("Time (s)") 316 | ax.set_ylabel("Rate (Blocks/s)") 317 | ax.title.set_text(title) 318 | totals = np.sum(data, axis=0) 319 | 320 | # read config from mb.config 321 | with open(cd['CONFIGURATION_PATH']) as f: 322 | c = json.load(f) 323 | 324 | totals = np.sum(data, axis=0) 325 | scalar = 1 # (c['SimulationDuration']*1e-9) / 60 326 | print(scalar) 327 | avg_window = int(10 * scalar) 328 | rate = (totals[avg_window:]-totals[:-avg_window]) * \ 329 | 1000/(avg_window*cd['MONITOR_INTERVAL']) 330 | print(rate) 331 | 332 | plt.bar(times[avg_window:][::avg_window], 333 | rate[::avg_window], 1, label='Disseminated Blocks') 334 | 335 | partition = int(15 * (c['SimulationDuration']*1e-9) / 60) 336 | # remainder = len(times[avg_window:][::10]) % 4 337 | # print(remainder) 338 | schedulingRate = c['SchedulingRate'] 339 | congestions = ([int(c['CongestionPeriods'][0]*schedulingRate)] * (partition + 1) + 340 | [int(c['CongestionPeriods'][1]*schedulingRate)] * (partition) + 341 | [int(c['CongestionPeriods'][2]*schedulingRate)] * (partition) + 342 | [int(c['CongestionPeriods'][3]*schedulingRate)] * (partition)) 343 | plt.plot(congestions, 'r', label='Congestion') 344 | print(cd["SCHEDULER_FIGURE_OUTPUT_PATH"]) 345 | 346 | ax.set_xlim(0, times[-1]) 347 | if ylim is not None: 348 | ax.set_ylim(0, ylim) 349 | plt.savefig( 350 | f'{cd["SCHEDULER_FIGURE_OUTPUT_PATH"]}/{title}{str(ylim)}.png', bbox_inches='tight') 351 | else: 352 | plt.savefig( 353 | f'{cd["SCHEDULER_FIGURE_OUTPUT_PATH"]}/{title}.png', bbox_inches='tight') 354 | plt.close() 355 | 356 | 357 | def plot_total_rate(data, times, cd, title, ylim=None): 358 | _, ax = plt.subplots(figsize=(8, 4)) 359 | ax.grid(linestyle='--') 360 | ax.set_xlabel("Time (s)") 361 | ax.set_ylabel("Rate (Blocks/s)") 362 | ax.title.set_text(title) 363 | totals = np.sum(data, axis=0) 364 | avg_window = 10 365 | rate = (totals[avg_window:]-totals[:-avg_window]) * \ 366 | 1000/(avg_window*cd['MONITOR_INTERVAL']) 367 | ax.plot(times[avg_window:], rate, color='k') 368 | ax.set_xlim(0, times[-1]) 369 | if ylim is not None: 370 | ax.set_ylim(0, ylim) 371 | plt.savefig(cd['RESULTS_PATH']+'/'+cd['SCRIPT_START_TIME'] + 372 | '/figures/'+title+str(ylim)+'.png', bbox_inches='tight') 373 | else: 374 | plt.savefig(cd['RESULTS_PATH']+'/'+cd['SCRIPT_START_TIME'] + 375 | '/figures/'+title+'.png', bbox_inches='tight') 376 | plt.close() 377 | 378 | 379 | def plot_traffic(data, title, cd): 380 | plt.clf() 381 | # Extract data columns 382 | slot_ids = data['Slot ID'] 383 | blocks_counts = data['Blocks Count'] 384 | 385 | # Create a bar plot using Matplotlib 386 | plt.bar(slot_ids, blocks_counts, label='Stored Blocks') 387 | 388 | # Add axis labels and title 389 | plt.xlabel('Slot') 390 | plt.xlim(0, max(slot_ids)+1) 391 | plt.ylabel('Blocks per Slot') 392 | plt.title('Traffic') 393 | 394 | # Add a red line representing Congestions 395 | quarter = len(slot_ids) // 4 396 | congestions = [50] * quarter + [150] * \ 397 | quarter + [150] * quarter + [50] * quarter 398 | plt.plot(congestions, 'r', label='Congestion') 399 | 400 | # Add legend 401 | plt.legend() 402 | 403 | plt.savefig(cd['RESULTS_PATH']+'/'+cd['SCRIPT_START_TIME'] + 404 | '/figures/'+title+'.png', bbox_inches='tight') 405 | plt.close() 406 | 407 | 408 | def plot_latency(latencies, times, cd, title): 409 | fig, ax = plt.subplots(figsize=(8, 4)) 410 | ax.set_xlabel("Time (s)") 411 | ax.set_ylabel("Latency (ms)") 412 | ax.grid(linestyle='--') 413 | ax.title.set_text(title) 414 | endtime = max([times[NodeID][-1] for NodeID in range(len(times))]) 415 | nbins = 100 416 | bins = np.arange(0, endtime, endtime/nbins) 417 | for NodeID in range(len(latencies)): 418 | lats = np.zeros(nbins) 419 | i = 0 420 | for bin in bins: 421 | if times[NodeID][i] < bin: 422 | continue 423 | # incomplete 424 | -------------------------------------------------------------------------------- /simulation/counter.go: -------------------------------------------------------------------------------- 1 | package simulation 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | 7 | "github.com/iotaledger/hive.go/core/generics/constraints" 8 | "github.com/iotaledger/multivers-simulation/network" 9 | 10 | "github.com/iotaledger/multivers-simulation/multiverse" 11 | ) 12 | 13 | // Generic counter //////////////////////////////////////////////////////////////////////////////////////////////////// 14 | 15 | // CounterElements defines types that can be used as elements by the generic Counters. 16 | type CounterElements interface { 17 | multiverse.Color | network.PeerID | string 18 | } 19 | 20 | // MapCounters is a generic counter that can be used to count any type of elements. 21 | type MapCounters[T CounterElements, V constraints.Integer] struct { 22 | counters map[string]map[T]V 23 | counterMutex sync.RWMutex 24 | } 25 | 26 | // NewCounters creates a new Counters. 27 | func NewCounters[T CounterElements, V constraints.Integer]() *MapCounters[T, V] { 28 | return &MapCounters[T, V]{ 29 | counters: make(map[string]map[T]V), 30 | } 31 | } 32 | 33 | // CreateCounter creates a new counting map under provided counterKey, with provided elements and optional initial values. 34 | func (c *MapCounters[T, V]) CreateCounter(counterKey string, elements []T, values ...V) { 35 | c.counterMutex.Lock() 36 | defer c.counterMutex.Unlock() 37 | if len(elements) == 0 { 38 | return 39 | } 40 | if _, ok := c.counters[counterKey]; !ok { 41 | c.counters[counterKey] = make(map[T]V) 42 | } 43 | for _, element := range elements { 44 | if len(values) == len(elements) { 45 | c.counters[counterKey][element] = values[0] 46 | } else if len(elements) > 1 && len(values) == 1 { 47 | c.counters[counterKey][element] = values[0] 48 | } else { 49 | c.counters[counterKey][element] = 0 50 | } 51 | } 52 | } 53 | 54 | func (c *MapCounters[T, V]) Add(counterKey string, value V, element T) { 55 | c.counterMutex.Lock() 56 | defer c.counterMutex.Unlock() 57 | counter, ok := c.counters[counterKey] 58 | if !ok { 59 | panic(fmt.Sprintf("Trying add to not initiated counter, key: %s, element: %s", counterKey, element)) 60 | } 61 | counter[element] += value 62 | } 63 | 64 | func (c *MapCounters[T, V]) Set(counterKey string, value V, element T) { 65 | c.counterMutex.Lock() 66 | defer c.counterMutex.Unlock() 67 | counter, ok := c.counters[counterKey] 68 | if !ok { 69 | panic(fmt.Sprintf("Trying set for not initiated counter, key: %s, element: %s", counterKey, element)) 70 | } 71 | counter[element] = value 72 | } 73 | 74 | func (c *MapCounters[T, V]) Get(counterKey string, element T) V { 75 | c.counterMutex.RLock() 76 | defer c.counterMutex.RUnlock() 77 | counter, ok := c.counters[counterKey] 78 | if !ok { 79 | panic(fmt.Sprintf("Trying get from not initiated counter, key: %s, element: %s", counterKey, element)) 80 | } 81 | return counter[element] 82 | } 83 | 84 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// 85 | 86 | // region AtomicCounters //////////////////////////////////////////////////////////////////////////////////////////////// 87 | 88 | type AtomicCounters[T CounterElements, V constraints.Integer] struct { 89 | counters map[T]V 90 | countersMutex sync.RWMutex 91 | } 92 | 93 | func NewAtomicCounters[T CounterElements, V constraints.Integer]() *AtomicCounters[T, V] { 94 | return &AtomicCounters[T, V]{ 95 | counters: make(map[T]V), 96 | } 97 | } 98 | 99 | func (ac *AtomicCounters[T, V]) CreateCounter(counterKey T, initValue V) { 100 | ac.countersMutex.Lock() 101 | defer ac.countersMutex.Unlock() 102 | // if key not exist create new counter 103 | if _, ok := ac.counters[counterKey]; !ok { 104 | ac.counters[counterKey] = initValue 105 | } 106 | } 107 | 108 | func (ac *AtomicCounters[T, V]) Get(counterKey T) V { 109 | ac.countersMutex.RLock() 110 | defer ac.countersMutex.RUnlock() 111 | counter, ok := ac.counters[counterKey] 112 | if !ok { 113 | panic(fmt.Sprintf("Trying get from not initiated counter, key: %s", counterKey)) 114 | } 115 | return counter 116 | } 117 | 118 | func (ac *AtomicCounters[T, V]) Add(counterKey T, value V) { 119 | ac.countersMutex.Lock() 120 | defer ac.countersMutex.Unlock() 121 | counter, ok := ac.counters[counterKey] 122 | if !ok { 123 | panic(fmt.Sprintf("Trying add to not initiated counter, key: %s", counterKey)) 124 | } 125 | counter += value 126 | } 127 | 128 | func (ac *AtomicCounters[T, V]) Set(counterKey T, value V) { 129 | ac.countersMutex.Lock() 130 | defer ac.countersMutex.Unlock() 131 | _, ok := ac.counters[counterKey] 132 | if !ok { 133 | panic(fmt.Sprintf("Trying set for not initiated counter, key: %s", counterKey)) 134 | } 135 | ac.counters[counterKey] = value 136 | } 137 | 138 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// 139 | 140 | // region ColorCounters //////////////////////////////////////////////////////////////////////////////////////////////// 141 | 142 | type ColorCounters struct { 143 | counts map[string]map[multiverse.Color]int64 144 | mu sync.RWMutex 145 | } 146 | 147 | func NewColorCounters() *ColorCounters { 148 | return &ColorCounters{ 149 | counts: make(map[string]map[multiverse.Color]int64), 150 | } 151 | } 152 | 153 | // CreateCounter Adds new counter with key and provided initial conditions. 154 | func (c *ColorCounters) CreateCounter(counterKey string, colors []multiverse.Color, initValues []int64) { 155 | c.mu.Lock() 156 | defer c.mu.Unlock() 157 | if len(initValues) == 0 { 158 | return 159 | } 160 | // if key not exist create new map 161 | if innerMap, ok := c.counts[counterKey]; !ok { 162 | innerMap = make(map[multiverse.Color]int64) 163 | for i, color := range colors { 164 | innerMap[color] = initValues[i] 165 | } 166 | c.counts[counterKey] = innerMap 167 | } 168 | } 169 | 170 | func (c *ColorCounters) Add(counterKey string, value int64, color multiverse.Color) { 171 | c.mu.Lock() 172 | defer c.mu.Unlock() 173 | innerMap, ok := c.counts[counterKey] 174 | if !ok { 175 | panic(fmt.Sprintf("Trying add to not initiated counter, key: %s, color: %s", counterKey, color)) 176 | } 177 | innerMap[color] += value 178 | } 179 | 180 | func (c *ColorCounters) Set(counterKey string, value int64, color multiverse.Color) { 181 | c.mu.Lock() 182 | defer c.mu.Unlock() 183 | innerMap, ok := c.counts[counterKey] 184 | if !ok { 185 | panic(fmt.Sprintf("Trying set the not initiated counter value, key: %s, color: %s", counterKey, color)) 186 | } 187 | innerMap[color] = value 188 | } 189 | 190 | // Get gets the counter value for provided key and color. 191 | func (c *ColorCounters) Get(counterKey string, color multiverse.Color) int64 { 192 | c.mu.RLock() 193 | defer c.mu.RUnlock() 194 | innerMap, ok := c.counts[counterKey] 195 | if !ok { 196 | panic(fmt.Sprintf("Trying get value for not initiated counter, key: %s, color: %s", counterKey, color)) 197 | } 198 | return innerMap[color] 199 | } 200 | 201 | func (c *ColorCounters) GetInt(counterKey string, color multiverse.Color) int { 202 | v := c.Get(counterKey, color) 203 | return int(v) 204 | } 205 | 206 | // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// 207 | -------------------------------------------------------------------------------- /simulation/metrics_setup.go: -------------------------------------------------------------------------------- 1 | package simulation 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/iotaledger/hive.go/events" 7 | "github.com/iotaledger/multivers-simulation/config" 8 | "github.com/iotaledger/multivers-simulation/multiverse" 9 | "github.com/iotaledger/multivers-simulation/network" 10 | ) 11 | 12 | // SetupMetrics registers all metrics that are used in the simulation, add any new metric registration here. 13 | func (s *MetricsManager) SetupMetrics() { 14 | // counters for double spending 15 | s.ColorCounters.CreateCounter("opinions", s.uRGBColors, int64(config.Params.NodesCount), 0, 0, 0) 16 | s.ColorCounters.CreateCounter("confirmedNodes", s.uRGBColors) 17 | s.ColorCounters.CreateCounter("opinionsWeights", s.uRGBColors) 18 | s.ColorCounters.CreateCounter("likeAccumulatedWeight", s.uRGBColors) 19 | s.ColorCounters.CreateCounter("processedMessages", s.uRGBColors) 20 | s.ColorCounters.CreateCounter("requestedMissingMessages", s.uRGBColors) 21 | s.ColorCounters.CreateCounter("tipPoolSizes", s.uRGBColors) 22 | 23 | s.ColorCounters.CreateCounter("colorUnconfirmed", s.RGBColors) 24 | s.ColorCounters.CreateCounter("confirmedAccumulatedWeight", s.RGBColors) 25 | s.ColorCounters.CreateCounter("confirmedAccumulatedWeight", s.RGBColors) 26 | s.ColorCounters.CreateCounter("unconfirmedAccumulatedWeight", s.RGBColors) 27 | 28 | s.AdversaryCounters.CreateCounter("likeAccumulatedWeight", s.RGBColors) 29 | s.AdversaryCounters.CreateCounter("opinions", s.RGBColors, int64(s.adversaryNodesCount), 0, 0, 0) 30 | s.AdversaryCounters.CreateCounter("confirmedNodes", s.RGBColors) 31 | s.AdversaryCounters.CreateCounter("confirmedAccumulatedWeight", s.RGBColors) 32 | 33 | // all peers and tip pool sizes and processed messages per color 34 | for _, peerID := range s.allPeerIDs { 35 | tipCounterName := fmt.Sprint("tipPoolSizes-", peerID) 36 | processedCounterName := fmt.Sprint("processedMessages-", peerID) 37 | s.ColorCounters.CreateCounter(tipCounterName, s.uRGBColors) 38 | s.ColorCounters.CreateCounter(processedCounterName, s.uRGBColors) 39 | } 40 | // Initialize the minConfirmedWeight to be the max value (i.e., the total weight) 41 | s.PeerCounters.CreateCounter("minConfirmedAccumulatedWeight", s.allPeerIDs, int64(config.Params.NodesTotalWeight)) 42 | s.PeerCounters.CreateCounter("unconfirmationCount", s.allPeerIDs, 0) 43 | s.PeerCounters.CreateCounter("issuedMessages", s.allPeerIDs, 0) 44 | s.PeerCounters.CreateCounter("confirmedMessageCount", s.watchedPeerIDs) 45 | 46 | s.GlobalCounters.CreateCounter("flips", 0) 47 | s.GlobalCounters.CreateCounter("honestFlips", 0) 48 | s.GlobalCounters.CreateCounter("tps", 0) 49 | s.GlobalCounters.CreateCounter("relevantValidators", 0) 50 | s.GlobalCounters.CreateCounter("issuedMessages", 0) 51 | 52 | } 53 | 54 | func (s *MetricsManager) SetupMetricsCollection() { 55 | for _, p := range s.network.Peers { 56 | peerID := p.ID 57 | 58 | p.Node.(multiverse.NodeInterface).Tangle().OpinionManager.Events().OpinionChanged.Attach(events.NewClosure(func(oldOpinion multiverse.Color, newOpinion multiverse.Color, weight int64) { 59 | s.opinionChangedCollectorFunc(oldOpinion, newOpinion, weight, peerID) 60 | })) 61 | p.Node.(multiverse.NodeInterface).Tangle().OpinionManager.Events().ColorConfirmed.Attach(events.NewClosure(func(confirmedColor multiverse.Color, weight int64) { 62 | s.colorConfirmedCollectorFunc(confirmedColor, weight, peerID) 63 | })) 64 | p.Node.(multiverse.NodeInterface).Tangle().OpinionManager.Events().ColorUnconfirmed.Attach(events.NewClosure(func(unconfirmedColor multiverse.Color, unconfirmedSupport int64, weight int64) { 65 | s.colorUnconfirmedCollectorFunc(unconfirmedColor, unconfirmedSupport, weight, peerID) 66 | })) 67 | p.Node.(multiverse.NodeInterface).Tangle().OpinionManager.Events().MinConfirmedWeightUpdated.Attach(events.NewClosure(func(minConfirmedWeight int64) { 68 | s.minConfirmedWeightUpdatedCollectorFunc(minConfirmedWeight, peerID) 69 | })) 70 | tipCounterName := fmt.Sprint("tipPoolSizes-", peerID) 71 | processedCounterName := fmt.Sprint("processedMessages-", peerID) 72 | p.Node.(multiverse.NodeInterface).Tangle().TipManager.Events.MessageProcessed.Attach(events.NewClosure( 73 | func(opinion multiverse.Color, tipPoolSize int, processedMessages uint64, issuedMessages int64) { 74 | s.ColorCounters.Set(tipCounterName, int64(tipPoolSize), opinion) 75 | s.ColorCounters.Set(processedCounterName, int64(processedMessages), opinion) 76 | s.PeerCounters.Set("issuedMessages", issuedMessages, peerID) 77 | })) 78 | } 79 | 80 | // Here we only monitor the opinion weight of node w/ the highest weight 81 | highestWeightPeer := s.network.Peers[s.highestWeightPeerID] 82 | highestWeightPeer.Node.(multiverse.NodeInterface).Tangle().OpinionManager.Events().ApprovalWeightUpdated.Attach(events.NewClosure( 83 | s.approvalWeightUpdatedCollectorFunc, 84 | )) 85 | 86 | // Here we only monitor the tip pool size of node w/ the highest weight 87 | highestWeightPeer.Node.(multiverse.NodeInterface).Tangle().TipManager.Events.MessageProcessed.Attach(events.NewClosure( 88 | s.messageProcessedCollectFunc, 89 | )) 90 | highestWeightPeer.Node.(multiverse.NodeInterface).Tangle().Requester.Events.Request.Attach(events.NewClosure( 91 | s.requestMissingMessageCollectFunc, 92 | )) 93 | } 94 | 95 | func (s *MetricsManager) opinionChangedCollectorFunc(oldOpinion multiverse.Color, newOpinion multiverse.Color, weight int64, peerID network.PeerID) { 96 | s.ColorCounters.Add("opinions", -1, oldOpinion) 97 | s.ColorCounters.Add("opinions", 1, newOpinion) 98 | 99 | s.ColorCounters.Add("likeAccumulatedWeight", -weight, oldOpinion) 100 | s.ColorCounters.Add("likeAccumulatedWeight", weight, newOpinion) 101 | 102 | // todo implement in simulator 103 | //r, g, b := getLikesPerRGB(colorCounters, "opinions") 104 | //if mostLikedColorChanged(r, g, b, &mostLikedColor) { 105 | // atomicCounters.Add("flips", 1) 106 | //} 107 | 108 | if network.IsAdversary(int(peerID)) { 109 | s.AdversaryCounters.Add("likeAccumulatedWeight", -weight, oldOpinion) 110 | s.AdversaryCounters.Add("likeAccumulatedWeight", weight, newOpinion) 111 | s.AdversaryCounters.Add("opinions", -1, oldOpinion) 112 | s.AdversaryCounters.Add("opinions", 1, newOpinion) 113 | } 114 | 115 | //ar, ag, ab := getLikesPerRGB(adversaryCounters, "opinions") 116 | //// honest nodes likes status only, flips 117 | //if mostLikedColorChanged(r-ar, g-ag, b-ab, &honestOnlyMostLikedColor) { 118 | // atomicCounters.Add("honestFlips", 1) 119 | //} 120 | } 121 | 122 | func (s *MetricsManager) colorConfirmedCollectorFunc(confirmedColor multiverse.Color, weight int64, peerID network.PeerID) { 123 | s.ColorCounters.Add("confirmedNodes", 1, confirmedColor) 124 | s.ColorCounters.Add("confirmedAccumulatedWeight", weight, confirmedColor) 125 | if network.IsAdversary(int(peerID)) { 126 | s.AdversaryCounters.Add("confirmedNodes", 1, confirmedColor) 127 | s.AdversaryCounters.Add("confirmedAccumulatedWeight", weight, confirmedColor) 128 | } 129 | } 130 | 131 | func (s *MetricsManager) colorUnconfirmedCollectorFunc(unconfirmedColor multiverse.Color, unconfirmedSupport int64, weight int64, peerID network.PeerID) { 132 | s.ColorCounters.Add("colorUnconfirmed", 1, unconfirmedColor) 133 | s.ColorCounters.Add("confirmedNodes", -1, unconfirmedColor) 134 | 135 | s.ColorCounters.Add("unconfirmedAccumulatedWeight", weight, unconfirmedColor) 136 | s.ColorCounters.Add("confirmedAccumulatedWeight", -weight, unconfirmedColor) 137 | 138 | // When the color is unconfirmed, the min confirmed accumulated weight should be reset 139 | s.PeerCounters.Set("minConfirmedAccumulatedWeight", int64(config.Params.NodesTotalWeight), peerID) 140 | 141 | // Accumulate the unconfirmed count for each node 142 | s.PeerCounters.Add("unconfirmationCount", 1, peerID) 143 | } 144 | 145 | func (s *MetricsManager) minConfirmedWeightUpdatedCollectorFunc(minConfirmedWeight int64, peerID network.PeerID) { 146 | if s.PeerCounters.Get("minConfirmedAccumulatedWeight", peerID) > minConfirmedWeight { 147 | s.PeerCounters.Set("minConfirmedAccumulatedWeight", minConfirmedWeight, peerID) 148 | } 149 | } 150 | 151 | func (s *MetricsManager) approvalWeightUpdatedCollectorFunc(opinion multiverse.Color, deltaWeight int64) { 152 | s.ColorCounters.Add("opinionsWeights", deltaWeight, opinion) 153 | } 154 | 155 | func (s *MetricsManager) messageProcessedCollectFunc(opinion multiverse.Color, tipPoolSize int, processedMessages uint64, issuedMessages int64) { 156 | s.ColorCounters.Set("tipPoolSizes", int64(tipPoolSize), opinion) 157 | s.ColorCounters.Set("processedMessages", int64(processedMessages), opinion) 158 | s.GlobalCounters.Set("issuedMessages", issuedMessages) 159 | } 160 | 161 | func (s *MetricsManager) requestMissingMessageCollectFunc(messageID multiverse.MessageID) { 162 | s.ColorCounters.Add("requestedMissingMessages", 1, multiverse.UndefinedColor) 163 | } 164 | -------------------------------------------------------------------------------- /simulation/metrics_writers.go: -------------------------------------------------------------------------------- 1 | package simulation 2 | 3 | import ( 4 | "encoding/csv" 5 | "fmt" 6 | "os" 7 | "path" 8 | "strconv" 9 | "sync" 10 | "time" 11 | 12 | "github.com/iotaledger/hive.go/events" 13 | "github.com/iotaledger/hive.go/typeutils" 14 | "github.com/iotaledger/multivers-simulation/config" 15 | "github.com/iotaledger/multivers-simulation/multiverse" 16 | "github.com/iotaledger/multivers-simulation/network" 17 | ) 18 | 19 | type csvRows [][]string 20 | 21 | func singleRowFunc(row []string) func() csvRows { 22 | return func() csvRows { 23 | return csvRows{row} 24 | } 25 | } 26 | 27 | // SetupWriters sets up the csv writers for the simulation. 28 | func (s *MetricsManager) SetupWriters() { 29 | s.DumpOnce("ad", 30 | []string{"AdversaryGroupID", "Strategy", "AdversaryCount", "q", "ns since issuance"}, 31 | func() csvRows { 32 | rows := make(csvRows, 0) 33 | for groupID, group := range s.network.AdversaryGroups { 34 | row := []string{ 35 | strconv.FormatInt(int64(groupID), 10), 36 | network.AdversaryTypeToString(group.AdversaryType), 37 | strconv.FormatInt(int64(len(group.NodeIDs)), 10), 38 | strconv.FormatFloat(group.GroupMana/float64(config.Params.NodesTotalWeight), 'f', 6, 64), 39 | strconv.FormatInt(time.Since(s.simulationStartTime).Nanoseconds(), 10), 40 | } 41 | rows = append(rows, row) 42 | } 43 | return rows 44 | }, 45 | ) 46 | s.DumpOnceStreaming("nw", 47 | []string{"Peer ID", "Neighbor ID", "Network Delay (ns)", "Packet Loss (%)", "Weight"}, 48 | func() <-chan []string { 49 | c := make(chan []string) 50 | go func() { 51 | for _, peer := range s.network.Peers { 52 | for neighbor, connection := range peer.Neighbors { 53 | record := []string{ 54 | strconv.FormatInt(int64(peer.ID), 10), 55 | strconv.FormatInt(int64(neighbor), 10), 56 | strconv.FormatInt(connection.NetworkDelay().Nanoseconds(), 10), 57 | strconv.FormatInt(int64(connection.PacketLoss()*100), 10), 58 | strconv.FormatInt(int64(s.network.WeightDistribution.Weight(peer.ID)), 10), 59 | } 60 | // At this moment csv writer will be flushed 61 | c <- record 62 | } 63 | } 64 | // remember to close the channel 65 | close(c) 66 | }() 67 | return c 68 | }, 69 | ) 70 | 71 | s.DumpOnTick("ds", 72 | []string{"UndefinedColor", "Blue", "Red", "Green", "ns since start", "ns since issuance"}, 73 | singleRowFunc([]string{ 74 | strconv.FormatInt(s.ColorCounters.Get("opinionsWeights", multiverse.UndefinedColor), 10), 75 | strconv.FormatInt(s.ColorCounters.Get("opinionsWeights", multiverse.Blue), 10), 76 | strconv.FormatInt(s.ColorCounters.Get("opinionsWeights", multiverse.Red), 10), 77 | strconv.FormatInt(s.ColorCounters.Get("opinionsWeights", multiverse.Green), 10), 78 | strconv.FormatInt(time.Since(s.simulationStartTime).Nanoseconds(), 10), 79 | s.sinceDSIssuanceTimeStr(), 80 | }), 81 | ) 82 | s.DumpOnTick("tp", 83 | []string{"UndefinedColor (Tip Pool Size)", "Blue (Tip Pool Size)", "Red (Tip Pool Size)", "Green (Tip Pool Size)", 84 | "UndefinedColor (Processed)", "Blue (Processed)", "Red (Processed)", "Green (Processed)", "# of Issued Messages", "ns since start"}, 85 | singleRowFunc([]string{ 86 | strconv.FormatInt(s.ColorCounters.Get("tipPoolSizes", multiverse.UndefinedColor), 10), 87 | strconv.FormatInt(s.ColorCounters.Get("tipPoolSizes", multiverse.Blue), 10), 88 | strconv.FormatInt(s.ColorCounters.Get("tipPoolSizes", multiverse.Red), 10), 89 | strconv.FormatInt(s.ColorCounters.Get("tipPoolSizes", multiverse.Green), 10), 90 | strconv.FormatInt(s.ColorCounters.Get("processedMessages", multiverse.UndefinedColor), 10), 91 | strconv.FormatInt(s.ColorCounters.Get("processedMessages", multiverse.Blue), 10), 92 | strconv.FormatInt(s.ColorCounters.Get("processedMessages", multiverse.Red), 10), 93 | strconv.FormatInt(s.ColorCounters.Get("processedMessages", multiverse.Green), 10), 94 | strconv.FormatInt(s.GlobalCounters.Get("issuedMessages"), 10), 95 | strconv.FormatInt(time.Since(s.simulationStartTime).Nanoseconds(), 10), 96 | }), 97 | ) 98 | s.DumpOnTick("mm", 99 | []string{"Number of Requested Messages", "ns since start"}, 100 | singleRowFunc([]string{ 101 | strconv.FormatInt(s.ColorCounters.Get("requestedMissingMessages", multiverse.UndefinedColor), 10), 102 | strconv.FormatInt(time.Since(s.simulationStartTime).Nanoseconds(), 10), 103 | }), 104 | ) 105 | s.DumpOnTick("cc", []string{"Blue (Confirmed)", "Red (Confirmed)", "Green (Confirmed)", 106 | "Blue (Adversary Confirmed)", "Red (Adversary Confirmed)", "Green (Adversary Confirmed)", 107 | "Blue (Confirmed Accumulated Weight)", "Red (Confirmed Accumulated Weight)", "Green (Confirmed Accumulated Weight)", 108 | "Blue (Confirmed Adversary Weight)", "Red (Confirmed Adversary Weight)", "Green (Confirmed Adversary Weight)", 109 | "Blue (Like)", "Red (Like)", "Green (Like)", 110 | "Blue (Like Accumulated Weight)", "Red (Like Accumulated Weight)", "Green (Like Accumulated Weight)", 111 | "Blue (Adversary Like Accumulated Weight)", "Red (Adversary Like Accumulated Weight)", "Green (Adversary Like Accumulated Weight)", 112 | "Unconfirmed Blue", "Unconfirmed Red", "Unconfirmed Green", 113 | "Unconfirmed Blue Accumulated Weight", "Unconfirmed Red Accumulated Weight", "Unconfirmed Green Accumulated Weight", 114 | "Flips (Winning color changed)", "Honest nodes Flips", "ns since start", "ns since issuance"}, 115 | singleRowFunc([]string{ 116 | strconv.FormatInt(s.ColorCounters.Get("confirmedNodes", multiverse.Blue), 10), 117 | strconv.FormatInt(s.ColorCounters.Get("confirmedNodes", multiverse.Red), 10), 118 | strconv.FormatInt(s.ColorCounters.Get("confirmedNodes", multiverse.Green), 10), 119 | strconv.FormatInt(s.AdversaryCounters.Get("confirmedNodes", multiverse.Blue), 10), 120 | strconv.FormatInt(s.AdversaryCounters.Get("confirmedNodes", multiverse.Red), 10), 121 | strconv.FormatInt(s.AdversaryCounters.Get("confirmedNodes", multiverse.Green), 10), 122 | strconv.FormatInt(s.ColorCounters.Get("confirmedAccumulatedWeight", multiverse.Blue), 10), 123 | strconv.FormatInt(s.ColorCounters.Get("confirmedAccumulatedWeight", multiverse.Red), 10), 124 | strconv.FormatInt(s.ColorCounters.Get("confirmedAccumulatedWeight", multiverse.Green), 10), 125 | strconv.FormatInt(s.AdversaryCounters.Get("confirmedAccumulatedWeight", multiverse.Blue), 10), 126 | strconv.FormatInt(s.AdversaryCounters.Get("confirmedAccumulatedWeight", multiverse.Red), 10), 127 | strconv.FormatInt(s.AdversaryCounters.Get("confirmedAccumulatedWeight", multiverse.Green), 10), 128 | strconv.FormatInt(s.ColorCounters.Get("opinions", multiverse.Blue), 10), 129 | strconv.FormatInt(s.ColorCounters.Get("opinions", multiverse.Red), 10), 130 | strconv.FormatInt(s.ColorCounters.Get("opinions", multiverse.Green), 10), 131 | strconv.FormatInt(s.ColorCounters.Get("likeAccumulatedWeight", multiverse.Blue), 10), 132 | strconv.FormatInt(s.ColorCounters.Get("likeAccumulatedWeight", multiverse.Red), 10), 133 | strconv.FormatInt(s.ColorCounters.Get("likeAccumulatedWeight", multiverse.Green), 10), 134 | strconv.FormatInt(s.AdversaryCounters.Get("likeAccumulatedWeight", multiverse.Blue), 10), 135 | strconv.FormatInt(s.AdversaryCounters.Get("likeAccumulatedWeight", multiverse.Red), 10), 136 | strconv.FormatInt(s.AdversaryCounters.Get("likeAccumulatedWeight", multiverse.Green), 10), 137 | strconv.FormatInt(s.ColorCounters.Get("colorUnconfirmed", multiverse.Blue), 10), 138 | strconv.FormatInt(s.ColorCounters.Get("colorUnconfirmed", multiverse.Red), 10), 139 | strconv.FormatInt(s.ColorCounters.Get("colorUnconfirmed", multiverse.Green), 10), 140 | strconv.FormatInt(s.ColorCounters.Get("unconfirmedAccumulatedWeight", multiverse.Blue), 10), 141 | strconv.FormatInt(s.ColorCounters.Get("unconfirmedAccumulatedWeight", multiverse.Red), 10), 142 | strconv.FormatInt(s.ColorCounters.Get("unconfirmedAccumulatedWeight", multiverse.Green), 10), 143 | strconv.FormatInt(s.GlobalCounters.Get("flips"), 10), 144 | strconv.FormatInt(s.GlobalCounters.Get("honestFlips"), 10), 145 | strconv.FormatInt(time.Since(s.simulationStartTime).Nanoseconds(), 10), 146 | s.sinceDSIssuanceTimeStr(), 147 | }), 148 | ) 149 | s.DumpOnEvent("ww", 150 | []string{"Witness Weight", "Time (ns)"}, 151 | func() <-chan []string { 152 | c := make(chan []string) 153 | 154 | wwPeer := s.network.Peers[config.Params.MonitoredWitnessWeightPeer] 155 | previousWitnessWeight := uint64(config.Params.NodesTotalWeight) 156 | wwPeer.Node.(multiverse.NodeInterface).Tangle().ApprovalManager.Events.MessageWitnessWeightUpdated.Attach( 157 | events.NewClosure(func(message *multiverse.Message, weight uint64) { 158 | if previousWitnessWeight == weight { 159 | return 160 | } 161 | previousWitnessWeight = weight 162 | record := []string{ 163 | strconv.FormatUint(weight, 10), 164 | strconv.FormatInt(time.Since(message.IssuanceTime).Nanoseconds(), 10), 165 | } 166 | // writing to the file 167 | c <- record 168 | })) 169 | return c 170 | }, 171 | ) 172 | s.DumpOnEvent("aw", 173 | []string{"Message ID", "Issuance Time (unix)", "Confirmation Time (ns)", "Weight", "# of Confirmed Messages", 174 | "# of Issued Messages", "ns since start"}, 175 | func() <-chan []string { 176 | c := make(chan []string) 177 | 178 | for _, id := range s.watchedPeerIDs { 179 | awPeer := s.network.Peers[id] 180 | if typeutils.IsInterfaceNil(awPeer) { 181 | panic(fmt.Sprintf("unknowm peer with id %d", id)) 182 | } 183 | awPeer.Node.(multiverse.NodeInterface).Tangle().ApprovalManager.Events.MessageConfirmed.Attach( 184 | events.NewClosure(func(message *multiverse.Message, messageMetadata *multiverse.MessageMetadata, weight uint64, messageIDCounter int64) { 185 | // increased here and not in metrics setup to rice between counter attached there and read here 186 | s.PeerCounters.Add("confirmedMessageCount", 1, awPeer.ID) 187 | record := []string{ 188 | strconv.FormatInt(int64(message.ID), 10), 189 | strconv.FormatInt(message.IssuanceTime.Unix(), 10), 190 | strconv.FormatInt(int64(messageMetadata.ConfirmationTime().Sub(message.IssuanceTime)), 10), 191 | strconv.FormatUint(weight, 10), 192 | strconv.FormatInt(s.PeerCounters.Get("confirmedMessageCount", awPeer.ID), 10), 193 | strconv.FormatInt(messageIDCounter, 10), 194 | strconv.FormatInt(time.Since(s.simulationStartTime).Nanoseconds(), 10), 195 | } 196 | // writing to the file 197 | c <- record 198 | })) 199 | } 200 | return c 201 | }, 202 | ) 203 | s.DumpOnTick("all-tp", allNodesHeader(), 204 | func() csvRows { 205 | record := make([]string, config.Params.NodesCount+1) 206 | i := 0 207 | for peerID := 0; peerID < config.Params.NodesCount; peerID++ { 208 | tipCounterName := fmt.Sprint("tipPoolSizes-", peerID) 209 | record[i+0] = strconv.FormatInt(s.ColorCounters.Get(tipCounterName, multiverse.UndefinedColor), 10) 210 | i = i + 1 211 | } 212 | record[i] = strconv.FormatInt(time.Since(s.simulationStartTime).Nanoseconds(), 10) 213 | return csvRows{record} 214 | }, 215 | ) 216 | s.DumpOnShutdown("nd", 217 | []string{"Node ID", "Adversary", "Min Confirmed Accumulated Weight", "Unconfirmation Count"}, 218 | func() csvRows { 219 | records := make(csvRows, config.Params.NodesCount) 220 | for i := 0; i < config.Params.NodesCount; i++ { 221 | record := []string{ 222 | strconv.FormatInt(int64(i), 10), 223 | strconv.FormatBool(network.IsAdversary(int(i))), 224 | strconv.FormatInt(s.PeerCounters.Get("minConfirmedAccumulatedWeight", network.PeerID(i)), 10), 225 | strconv.FormatInt(s.PeerCounters.Get("unconfirmationCount", network.PeerID(i)), 10), 226 | } 227 | records[i] = record 228 | } 229 | return records 230 | }, 231 | ) 232 | } 233 | 234 | // DumpOnTick registers a new writer to the metrics manager. 235 | func (s *MetricsManager) DumpOnTick(key string, header []string, collectFunc func() csvRows) { 236 | resultsWriter := s.createWriter(key, header) 237 | s.writers[key] = resultsWriter 238 | s.collectFuncs[key] = collectFunc 239 | } 240 | 241 | // DumpOnce dumps the results once. 242 | func (s *MetricsManager) DumpOnce(key string, header []string, collectFunc func() csvRows) { 243 | resultsWriter := s.createWriter(key, header) 244 | 245 | rows := collectFunc() 246 | 247 | for _, row := range rows { 248 | if err := resultsWriter.Write(row); err != nil { 249 | log.Fatal("error writing record to csv:", err) 250 | } 251 | } 252 | 253 | if err := resultsWriter.Error(); err != nil { 254 | log.Fatal(err) 255 | } 256 | resultsWriter.Flush() 257 | } 258 | 259 | // DumpOnceStreaming dumps the results row by row, use for large files. 260 | func (s *MetricsManager) DumpOnceStreaming(key string, header []string, collectFunc func() <-chan []string) { 261 | resultsWriter := s.createWriter(key, header) 262 | 263 | for row := range collectFunc() { 264 | if err := resultsWriter.Write(row); err != nil { 265 | log.Fatal("error writing record to csv:", err) 266 | } 267 | resultsWriter.Flush() 268 | } 269 | 270 | if err := resultsWriter.Error(); err != nil { 271 | log.Fatal(err) 272 | } 273 | } 274 | 275 | func (s *MetricsManager) DumpOnEvent(key string, header []string, setupEventFunc func() <-chan []string) { 276 | resultsWriter := s.createWriter(key, header) 277 | mu := &sync.Mutex{} 278 | 279 | go func() { 280 | for { 281 | select { 282 | case row := <-setupEventFunc(): 283 | mu.Lock() 284 | if err := resultsWriter.Write(row); err != nil { 285 | log.Fatal("error writing record to csv:", err) 286 | } 287 | resultsWriter.Flush() 288 | mu.Unlock() 289 | if err := resultsWriter.Write(header); err != nil { 290 | panic(err) 291 | } 292 | case <-s.dumpOnEventShutdown: 293 | return 294 | } 295 | } 296 | }() 297 | } 298 | 299 | func (s *MetricsManager) DumpOnShutdown(key string, header []string, collectFunc func() csvRows) { 300 | s.onShutdownDumpers = append(s.onShutdownDumpers, func() { 301 | s.DumpOnce(key, header, collectFunc) 302 | }) 303 | } 304 | 305 | func (s *MetricsManager) createWriter(key string, header []string) *csv.Writer { 306 | filename := fmt.Sprintf("%s-%s.csv", key, formatTime(s.simulationStartTime)) 307 | file, err := os.Create(path.Join(config.Params.ResultDir, filename)) 308 | if err != nil { 309 | panic(err) 310 | } 311 | 312 | resultsWriter := csv.NewWriter(file) 313 | if err = resultsWriter.Write(header); err != nil { 314 | panic(err) 315 | } 316 | return resultsWriter 317 | } 318 | 319 | func (s *MetricsManager) sinceDSIssuanceTimeStr() string { 320 | if s.dsIssuanceTime.IsZero() { 321 | return "0" 322 | } 323 | return strconv.FormatInt(time.Since(s.dsIssuanceTime).Nanoseconds(), 10) 324 | } 325 | -------------------------------------------------------------------------------- /simulation/metricsmanager.go: -------------------------------------------------------------------------------- 1 | package simulation 2 | 3 | import ( 4 | "encoding/csv" 5 | "fmt" 6 | "time" 7 | 8 | "github.com/iotaledger/hive.go/core/generics/constraints" 9 | "github.com/iotaledger/hive.go/types" 10 | "github.com/iotaledger/multivers-simulation/config" 11 | "github.com/iotaledger/multivers-simulation/multiverse" 12 | "github.com/iotaledger/multivers-simulation/network" 13 | ) 14 | 15 | type Csv struct { 16 | filename string 17 | header []string 18 | } 19 | 20 | type MetricsManager struct { 21 | network *network.Network 22 | 23 | // metrics 24 | GlobalCounters *AtomicCounters[string, int64] 25 | PeerCounters *MapCounters[network.PeerID, int64] 26 | ColorCounters *MapCounters[multiverse.Color, int64] 27 | AdversaryCounters *MapCounters[multiverse.Color, int64] 28 | 29 | // internal variables for the metrics 30 | RGBColors []multiverse.Color 31 | uRGBColors []multiverse.Color 32 | adversaryNodesCount int 33 | honestNodesCount int 34 | highestWeightPeerID int 35 | allPeerIDs []network.PeerID // all peers in the network 36 | watchedPeerIDs []network.PeerID // peers with collected more specific metrics 37 | simulationStartTime time.Time 38 | dsIssuanceTime time.Time 39 | 40 | // csv writers 41 | writers map[string]*csv.Writer 42 | collectFuncs map[string]func() csvRows 43 | dumpingTicker *time.Ticker 44 | onShutdownDumpers []func() 45 | 46 | shutdown chan types.Empty 47 | 48 | dumpOnEventUsed bool 49 | dumpOnEventShutdown chan types.Empty 50 | } 51 | 52 | func NewMetricsManager() *MetricsManager { 53 | return &MetricsManager{ 54 | GlobalCounters: NewAtomicCounters[string, int64](), 55 | PeerCounters: NewCounters[network.PeerID, int64](), 56 | ColorCounters: NewCounters[multiverse.Color, int64](), 57 | AdversaryCounters: NewCounters[multiverse.Color, int64](), 58 | 59 | allPeerIDs: make([]network.PeerID, 0), 60 | watchedPeerIDs: make([]network.PeerID, 0), 61 | 62 | writers: make(map[string]*csv.Writer), 63 | collectFuncs: make(map[string]func() csvRows), 64 | 65 | shutdown: make(chan types.Empty), 66 | dumpOnEventShutdown: make(chan types.Empty), 67 | } 68 | } 69 | 70 | func (s *MetricsManager) Setup(network *network.Network) { 71 | s.network = network 72 | s.SetupInternalVariables() 73 | DumpConfig(fmt.Sprint("aw-", formatTime(s.simulationStartTime), ".config")) 74 | s.SetupMetrics() 75 | s.SetupMetricsCollection() 76 | s.SetupWriters() 77 | } 78 | 79 | func (s *MetricsManager) SetupInternalVariables() { 80 | s.RGBColors = []multiverse.Color{multiverse.Red, multiverse.Green, multiverse.Blue} 81 | s.uRGBColors = []multiverse.Color{multiverse.UndefinedColor, multiverse.Red, multiverse.Green, multiverse.Blue} 82 | s.adversaryNodesCount = len(network.AdversaryNodeIDToGroupIDMap) // todo can we define it with config info only? 83 | s.honestNodesCount = config.Params.NodesCount - s.adversaryNodesCount 84 | s.highestWeightPeerID = 0 // todo make sure all simulation modes has 0 index as the highest weight peer 85 | for _, peer := range s.network.Peers { 86 | s.allPeerIDs = append(s.allPeerIDs, peer.ID) 87 | } 88 | // peers with collected more specific metrics, can be set in config 89 | for _, monitoredID := range config.Params.MonitoredAWPeers { 90 | s.watchedPeerIDs = append(s.watchedPeerIDs, network.PeerID(monitoredID)) 91 | } 92 | s.simulationStartTime = time.Now() 93 | } 94 | 95 | func (s *MetricsManager) StartMetricsCollection() { 96 | s.dumpingTicker = time.NewTicker(time.Duration(config.Params.SlowdownFactor*config.Params.ConsensusMonitorTick) * time.Millisecond) 97 | go func() { 98 | for { 99 | select { 100 | case <-s.dumpingTicker.C: 101 | s.collectMetrics() 102 | case <-s.shutdown: 103 | for _, w := range s.writers { 104 | w.Flush() 105 | } 106 | 107 | // todo move final condition reaching detection to some more accurate place 108 | // determines whether consensus has been reached and simulation is over 109 | r, g, b := getLikesPerRGB(s.ColorCounters, "confirmedNodes") 110 | aR, aG, aB := getLikesPerRGB(s.AdversaryCounters, "confirmedNodes") 111 | hR, hG, hB := r-aR, g-aG, b-aB 112 | if max(max(hB, hR), hG) >= int64(config.Params.SimulationStopThreshold*float64(s.honestNodesCount)) { 113 | //shutdownSignal <- types.Void 114 | } 115 | s.GlobalCounters.Set("tps", 0) 116 | return 117 | } 118 | } 119 | 120 | }() 121 | } 122 | 123 | func (s *MetricsManager) Shutdown() { 124 | s.shutdown <- types.Void 125 | 126 | s.dumpOnShutdown() 127 | if s.dumpingTicker != nil { 128 | s.dumpingTicker.Stop() 129 | } 130 | s.dumpOnEventShutdown <- types.Void 131 | } 132 | 133 | func (s *MetricsManager) dumpOnShutdown() { 134 | for _, collector := range s.onShutdownDumpers { 135 | collector() 136 | } 137 | } 138 | 139 | func (s *MetricsManager) collectMetrics() { 140 | for key := range s.writers { 141 | s.collect(key) 142 | } 143 | } 144 | 145 | func (s *MetricsManager) collect(writerKey string) { 146 | writer := s.writers[writerKey] 147 | record := s.collectFuncs[writerKey]() 148 | for _, row := range record { 149 | if err := writer.Write(row); err != nil { 150 | log.Fatal("error writing record to csv:", err) 151 | } 152 | } 153 | 154 | if err := writer.Error(); err != nil { 155 | log.Fatal(err) 156 | } 157 | writer.Flush() 158 | } 159 | 160 | func (s *MetricsManager) SetDSIssuanceTime() { 161 | s.dsIssuanceTime = time.Now() 162 | } 163 | 164 | func allNodesHeader() []string { 165 | header := make([]string, 0, config.Params.NodesCount+1) 166 | for i := 0; i < config.Params.NodesCount; i++ { 167 | header = append(header, fmt.Sprintf("Node %d", i)) 168 | } 169 | header = append(header, "ns since start") 170 | return header 171 | } 172 | 173 | func formatTime(t time.Time) string { 174 | return t.UTC().Format(time.RFC3339) 175 | } 176 | 177 | func getLikesPerRGB(counter *MapCounters[multiverse.Color, int64], flag string) (int64, int64, int64) { 178 | return counter.Get(flag, multiverse.Red), counter.Get(flag, multiverse.Green), counter.Get(flag, multiverse.Blue) 179 | } 180 | 181 | // max returns the largest of x or y. 182 | func max[T constraints.Numeric](x, y T) T { 183 | if x < y { 184 | return y 185 | } 186 | return x 187 | } 188 | -------------------------------------------------------------------------------- /simulation/simulator.go: -------------------------------------------------------------------------------- 1 | package simulation 2 | 3 | import ( 4 | "github.com/iotaledger/multivers-simulation/network" 5 | ) 6 | 7 | type Simulator struct { 8 | network *network.Network 9 | metrics *MetricsManager 10 | } 11 | -------------------------------------------------------------------------------- /singlenodeattacks/blowball.go: -------------------------------------------------------------------------------- 1 | package singlenodeattacks 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/iotaledger/hive.go/types" 7 | "github.com/iotaledger/multivers-simulation/config" 8 | "github.com/iotaledger/multivers-simulation/multiverse" 9 | ) 10 | 11 | type BlowballNode struct { 12 | *multiverse.Node 13 | 14 | nearTSCSet *multiverse.TipSet 15 | } 16 | 17 | func NewBlowballNode() interface{} { 18 | node := multiverse.NewNode().(*multiverse.Node) 19 | blowBallNode := &BlowballNode{ 20 | Node: node, 21 | nearTSCSet: multiverse.NewTipSet(nil), 22 | } 23 | return blowBallNode 24 | } 25 | 26 | func (n *BlowballNode) IssuePayload(payload multiverse.Color) { 27 | // create a blow ball 28 | tm := n.Tangle().TipManager 29 | tipSet := tm.TipSet(multiverse.UndefinedColor) 30 | oldestMessageID := tm.WalkForOldestUnconfirmed(tipSet) 31 | oldestMessage := n.CreateMessage(oldestMessageID, payload) 32 | // gossip and process oldest message 33 | n.Tangle().ProcessMessage(oldestMessage) 34 | n.Peer().GossipNetworkMessage(oldestMessage) 35 | 36 | // create and issue blowball 37 | blowBall := n.CreateBlowBall(oldestMessage, payload) 38 | for _, message := range blowBall { 39 | n.Tangle().ProcessMessage(message) 40 | n.Peer().GossipNetworkMessage(message) 41 | } 42 | } 43 | 44 | func (n *BlowballNode) CreateBlowBall(centerMessage *multiverse.Message, payload multiverse.Color) map[multiverse.MessageID]*multiverse.Message { 45 | blowBallMessages := make(map[multiverse.MessageID]*multiverse.Message) 46 | for i := 0; i < config.Params.BlowballSize; i++ { 47 | m := n.CreateMessage(centerMessage.ID, payload) 48 | blowBallMessages[m.ID] = m 49 | } 50 | return blowBallMessages 51 | } 52 | 53 | func (n *BlowballNode) CreateMessage(parent multiverse.MessageID, payload multiverse.Color) *multiverse.Message { 54 | // create a new message 55 | strongParents := multiverse.MessageIDs{parent: types.Void} 56 | weakParents := multiverse.MessageIDs{} 57 | m := &multiverse.Message{ 58 | ID: multiverse.NewMessageID(), 59 | StrongParents: strongParents, 60 | WeakParents: weakParents, 61 | SequenceNumber: n.Tangle().MessageFactory.SequenceNumber(), 62 | Issuer: n.Tangle().Peer.ID, 63 | Payload: payload, 64 | IssuanceTime: time.Now(), 65 | } 66 | return m 67 | } 68 | 69 | func (n *BlowballNode) AssignColor(color multiverse.Color) { 70 | // no need to assign the color to the node for now 71 | } 72 | --------------------------------------------------------------------------------