├── .editorconfig
├── .github
├── ISSUE_TEMPLATE
│ ├── bug_report.md
│ ├── feature_request.md
│ └── question.md
├── actions
│ └── setup-env
│ │ └── action.yml
└── workflows
│ ├── documentation.yml
│ ├── main.yml
│ ├── on-release.yml
│ └── url-health.yml
├── .gitignore
├── .pre-commit-config.yaml
├── CHANGELOG.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── Dockerfile
├── LICENSE
├── Makefile
├── README.md
├── analysis
├── README.md
├── data-analysis
│ └── __init__.py
└── network-analysis
│ └── __init__.py
├── app
├── __init__.py
├── certs
│ └── __init__.py
├── config
│ └── __init__.py
├── deployer.py
├── logs
│ └── __init__.py
└── main.py
├── docs
├── _prebuilt
│ ├── changelog.md
│ ├── contributing.md
│ ├── css
│ │ └── toc.css
│ ├── developerguide.md
│ ├── index.md
│ ├── installation.md
│ ├── js
│ │ └── toc.js
│ ├── static
│ │ ├── armasuisse.jpg
│ │ ├── custom.css
│ │ ├── developerguide
│ │ │ └── structure.png
│ │ ├── docker-required-options.png
│ │ ├── nebula-logo.jpg
│ │ ├── nebula-logo.pdf
│ │ ├── nebula-mockup.png
│ │ ├── umu.jpg
│ │ ├── userguide
│ │ │ ├── deployment.png
│ │ │ ├── frontend.png
│ │ │ ├── monitor.png
│ │ │ ├── nebula-dashboard.png
│ │ │ ├── realtime-images.png
│ │ │ ├── realtime-scalars.png
│ │ │ ├── realtime-time-series.png
│ │ │ └── running_nebula.png
│ │ └── uzh.jpg
│ └── userguide.md
├── mkdocs.yml
└── scripts
│ └── gen_ref_pages.py
├── nebula
├── __init__.py
├── addons
│ ├── __init__.py
│ ├── attacks
│ │ ├── __init__.py
│ │ ├── attacks.py
│ │ ├── communications
│ │ │ ├── __init__.py
│ │ │ ├── communicationattack.py
│ │ │ ├── delayerattack.py
│ │ │ └── floodingattack.py
│ │ ├── dataset
│ │ │ ├── __init__.py
│ │ │ ├── datapoison.py
│ │ │ ├── datasetattack.py
│ │ │ └── labelflipping.py
│ │ └── model
│ │ │ ├── __init__.py
│ │ │ ├── gllneuroninversion.py
│ │ │ ├── modelattack.py
│ │ │ ├── modelpoison.py
│ │ │ └── swappingweights.py
│ ├── env.py
│ ├── functions.py
│ ├── gps
│ │ ├── __init__.py
│ │ ├── gpsmodule.py
│ │ └── nebulagps.py
│ ├── mobility.py
│ ├── networksimulation
│ │ ├── __init__.py
│ │ ├── nebulanetworksimulator.py
│ │ └── networksimulator.py
│ ├── reporter.py
│ ├── reputation
│ │ ├── __init__.py
│ │ └── reputation.py
│ ├── topologymanager.py
│ ├── trustworthiness
│ │ ├── __init__.py
│ │ ├── benchmarks
│ │ │ ├── CPU_benchmarks_v4.csv
│ │ │ └── GPU_benchmarks_v7.csv
│ │ ├── calculation.py
│ │ ├── configs
│ │ │ ├── eval_metrics.json
│ │ │ └── factsheet_template.json
│ │ ├── factsheet.py
│ │ ├── graphics.py
│ │ ├── metric.py
│ │ ├── pillar.py
│ │ ├── trustworthiness.py
│ │ └── utils.py
│ └── waf
│ │ ├── Dockerfile-grafana
│ │ ├── Dockerfile-loki
│ │ ├── Dockerfile-promtail
│ │ ├── Dockerfile-waf
│ │ ├── __init__.py
│ │ ├── crs-setup.conf
│ │ ├── default.conf
│ │ ├── geoip
│ │ └── GeoIP.conf
│ │ ├── grafana
│ │ ├── automatic.yml
│ │ ├── dashboard.json
│ │ └── dashboard_config.yml
│ │ ├── loki-config.yml
│ │ ├── nginx.conf
│ │ └── promtail-config.yml
├── config
│ ├── __init__.py
│ ├── config.py
│ └── mender.py
├── controller
│ ├── Dockerfile
│ ├── __init__.py
│ ├── controller.py
│ ├── database.py
│ ├── scenarios.py
│ └── start_services.sh
├── core
│ ├── __init__.py
│ ├── addonmanager.py
│ ├── aggregation
│ │ ├── __init__.py
│ │ ├── aggregator.py
│ │ ├── fedavg.py
│ │ ├── krum.py
│ │ ├── median.py
│ │ ├── trimmedmean.py
│ │ └── updatehandlers
│ │ │ ├── __init__.py
│ │ │ ├── cflupdatehandler.py
│ │ │ ├── dflupdatehandler.py
│ │ │ ├── sdflupdatehandler.py
│ │ │ └── updatehandler.py
│ ├── datasets
│ │ ├── __init__.py
│ │ ├── changeablesubset.py
│ │ ├── cifar10
│ │ │ ├── __init__.py
│ │ │ └── cifar10.py
│ │ ├── cifar100
│ │ │ ├── __init__.py
│ │ │ └── cifar100.py
│ │ ├── datamodule.py
│ │ ├── emnist
│ │ │ ├── __init__.py
│ │ │ └── emnist.py
│ │ ├── fashionmnist
│ │ │ ├── __init__.py
│ │ │ └── fashionmnist.py
│ │ ├── mnist
│ │ │ ├── __init__.py
│ │ │ └── mnist.py
│ │ └── nebuladataset.py
│ ├── engine.py
│ ├── eventmanager.py
│ ├── models
│ │ ├── __init__.py
│ │ ├── cifar10
│ │ │ ├── __init__.py
│ │ │ ├── cnn.py
│ │ │ ├── cnnV2.py
│ │ │ ├── cnnV3.py
│ │ │ ├── fastermobilenet.py
│ │ │ ├── resnet.py
│ │ │ └── simplemobilenet.py
│ │ ├── cifar100
│ │ │ ├── __init__.py
│ │ │ └── cnn.py
│ │ ├── emnist
│ │ │ ├── __init__.py
│ │ │ ├── cnn.py
│ │ │ └── mlp.py
│ │ ├── fashionmnist
│ │ │ ├── __init__.py
│ │ │ ├── cnn.py
│ │ │ └── mlp.py
│ │ ├── mnist
│ │ │ ├── __init__.py
│ │ │ ├── cnn.py
│ │ │ └── mlp.py
│ │ ├── nebulamodel.py
│ │ └── sentiment140
│ │ │ ├── __init__.py
│ │ │ ├── cnn.py
│ │ │ └── rnn.py
│ ├── nebulaevents.py
│ ├── network
│ │ ├── __init__.py
│ │ ├── actions.py
│ │ ├── blacklist.py
│ │ ├── communications.py
│ │ ├── connection.py
│ │ ├── discoverer.py
│ │ ├── externalconnection
│ │ │ ├── __init__.py
│ │ │ ├── externalconnectionservice.py
│ │ │ └── nebuladiscoveryservice.py
│ │ ├── forwarder.py
│ │ ├── health.py
│ │ ├── messages.py
│ │ └── propagator.py
│ ├── node.py
│ ├── noderole.py
│ ├── pb
│ │ ├── __init__.py
│ │ ├── nebula.proto
│ │ └── nebula_pb2.py
│ ├── role.py
│ ├── situationalawareness
│ │ ├── __init__.py
│ │ ├── awareness
│ │ │ ├── __init__.py
│ │ │ ├── arbitrationpolicies
│ │ │ │ ├── __init__.py
│ │ │ │ ├── arbitrationpolicy.py
│ │ │ │ └── staticarbitrationpolicy.py
│ │ │ ├── sanetwork
│ │ │ │ ├── __init__.py
│ │ │ │ ├── neighborpolicies
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── distanceneighborpolicy.py
│ │ │ │ │ ├── fcneighborpolicy.py
│ │ │ │ │ ├── idleneighborpolicy.py
│ │ │ │ │ ├── neighborpolicy.py
│ │ │ │ │ ├── ringneighborpolicy.py
│ │ │ │ │ └── starneighborpolicy.py
│ │ │ │ └── sanetwork.py
│ │ │ ├── sareasoner.py
│ │ │ ├── satraining
│ │ │ │ ├── satraining.py
│ │ │ │ └── trainingpolicy
│ │ │ │ │ ├── bpstrainingpolicy.py
│ │ │ │ │ ├── fastreboot.py
│ │ │ │ │ ├── htstrainingpolicy.py
│ │ │ │ │ ├── qdstrainingpolicy.py
│ │ │ │ │ └── trainingpolicy.py
│ │ │ ├── sautils
│ │ │ │ ├── __init__.py
│ │ │ │ ├── sacommand.py
│ │ │ │ ├── samoduleagent.py
│ │ │ │ └── sasystemmonitor.py
│ │ │ └── suggestionbuffer.py
│ │ ├── discovery
│ │ │ ├── __init__.py
│ │ │ ├── candidateselection
│ │ │ │ ├── __init__.py
│ │ │ │ ├── candidateselector.py
│ │ │ │ ├── distcandidateselector.py
│ │ │ │ ├── fccandidateselector.py
│ │ │ │ ├── ringcandidateselector.py
│ │ │ │ └── stdcandidateselector.py
│ │ │ ├── federationconnector.py
│ │ │ └── modelhandlers
│ │ │ │ ├── __init__.py
│ │ │ │ ├── aggmodelhandler.py
│ │ │ │ ├── defaultmodelhandler.py
│ │ │ │ ├── modelhandler.py
│ │ │ │ └── stdmodelhandler.py
│ │ └── situationalawareness.py
│ ├── training
│ │ ├── __init__.py
│ │ ├── lightning.py
│ │ ├── scikit.py
│ │ └── siamese.py
│ └── utils
│ │ ├── __init__.py
│ │ ├── certificate.py
│ │ ├── deterministic.py
│ │ ├── helper.py
│ │ ├── locker.py
│ │ ├── nebulalogger_tensorboard.py
│ │ └── tasks.py
├── frontend
│ ├── Dockerfile
│ ├── __init__.py
│ ├── app.py
│ ├── config
│ │ ├── __init__.py
│ │ ├── nebula
│ │ └── participant.json.example
│ ├── start_services.sh
│ ├── static
│ │ ├── css
│ │ │ ├── bootstrap.min.css
│ │ │ ├── bootstrap.min.united.css
│ │ │ ├── dashboard.css
│ │ │ ├── deployment.css
│ │ │ ├── images
│ │ │ │ ├── layers-2x.png
│ │ │ │ ├── layers.png
│ │ │ │ ├── marker-icon-2x.png
│ │ │ │ ├── marker-icon.png
│ │ │ │ └── marker-shadow.png
│ │ │ ├── leaflet.css
│ │ │ ├── particles.css
│ │ │ └── style.css
│ │ ├── images
│ │ │ ├── README
│ │ │ ├── android-chrome-192x192.png
│ │ │ ├── android-chrome-512x512.png
│ │ │ ├── apple-touch-icon.png
│ │ │ ├── balancediid.png
│ │ │ ├── browserconfig.xml
│ │ │ ├── contributors
│ │ │ │ ├── alberto-huertas.jpg
│ │ │ │ ├── angel-luis.jpg
│ │ │ │ ├── chao-feng.jpeg
│ │ │ │ ├── enrique-tomas-martinez-beltran.jpg
│ │ │ │ ├── gerome-bovet.jpeg
│ │ │ │ ├── gregorio-martinez-perez.jpg
│ │ │ │ ├── manuel-gil.jpg
│ │ │ │ ├── pedro-miguel.jpg
│ │ │ │ └── sergio-lopez-bernal.png
│ │ │ ├── device.png
│ │ │ ├── dirichlet_noniid.png
│ │ │ ├── drone.svg
│ │ │ ├── drone_offline.svg
│ │ │ ├── favicon-16x16.png
│ │ │ ├── favicon-32x32.png
│ │ │ ├── favicon.ico
│ │ │ ├── mstile-144x144.png
│ │ │ ├── mstile-150x150.png
│ │ │ ├── mstile-310x150.png
│ │ │ ├── mstile-310x310.png
│ │ │ ├── mstile-70x70.png
│ │ │ ├── nebula-icon-white.svg
│ │ │ ├── nebula-icon.svg
│ │ │ ├── nebula-logo.jpg
│ │ │ ├── percentage.png
│ │ │ ├── physical-device.png
│ │ │ ├── publications
│ │ │ │ ├── arxiv.png
│ │ │ │ ├── eswa.gif
│ │ │ │ ├── ieee.png
│ │ │ │ └── ijcai-23.png
│ │ │ ├── safari-pinned-tab.svg
│ │ │ ├── site.webmanifest
│ │ │ └── unbalanceiid.png
│ │ ├── js
│ │ │ ├── bootstrap.min.js
│ │ │ ├── custom.js
│ │ │ ├── dashboard
│ │ │ │ ├── config-manager.js
│ │ │ │ ├── dashboard.js
│ │ │ │ ├── notes-manager.js
│ │ │ │ └── scenario-actions.js
│ │ │ ├── dat.gui.js
│ │ │ ├── dat.gui.js.map
│ │ │ ├── deployment
│ │ │ │ ├── attack.js
│ │ │ │ ├── graph-settings.js
│ │ │ │ ├── help-content.js
│ │ │ │ ├── main.js
│ │ │ │ ├── mobility.js
│ │ │ │ ├── reputation.js
│ │ │ │ ├── scenario.js
│ │ │ │ ├── situational-awareness.js
│ │ │ │ ├── topology.js
│ │ │ │ ├── trustworthiness.js
│ │ │ │ ├── ui-controls.js
│ │ │ │ └── utils.js
│ │ │ ├── graph.js
│ │ │ ├── jquery.min.js
│ │ │ ├── leaflet.js
│ │ │ ├── leaflet.js.map
│ │ │ ├── leaflet.moving.js
│ │ │ ├── monitor
│ │ │ │ └── monitor.js
│ │ │ ├── particles.json
│ │ │ └── particles.min.js
│ │ ├── maintenance.html
│ │ └── vendor
│ │ │ ├── aos
│ │ │ ├── aos.cjs.js
│ │ │ ├── aos.css
│ │ │ ├── aos.esm.js
│ │ │ ├── aos.js
│ │ │ └── aos.js.map
│ │ │ ├── bootstrap-icons
│ │ │ ├── bootstrap-icons.css
│ │ │ ├── bootstrap-icons.json
│ │ │ ├── bootstrap-icons.min.css
│ │ │ ├── bootstrap-icons.scss
│ │ │ └── fonts
│ │ │ │ ├── bootstrap-icons.woff
│ │ │ │ └── bootstrap-icons.woff2
│ │ │ ├── bootstrap
│ │ │ ├── css
│ │ │ │ ├── bootstrap-grid.css
│ │ │ │ ├── bootstrap-grid.css.map
│ │ │ │ ├── bootstrap-grid.min.css
│ │ │ │ ├── bootstrap-grid.min.css.map
│ │ │ │ ├── bootstrap-grid.rtl.css
│ │ │ │ ├── bootstrap-grid.rtl.css.map
│ │ │ │ ├── bootstrap-grid.rtl.min.css
│ │ │ │ ├── bootstrap-grid.rtl.min.css.map
│ │ │ │ ├── bootstrap-reboot.css
│ │ │ │ ├── bootstrap-reboot.css.map
│ │ │ │ ├── bootstrap-reboot.min.css
│ │ │ │ ├── bootstrap-reboot.min.css.map
│ │ │ │ ├── bootstrap-reboot.rtl.css
│ │ │ │ ├── bootstrap-reboot.rtl.css.map
│ │ │ │ ├── bootstrap-reboot.rtl.min.css
│ │ │ │ ├── bootstrap-reboot.rtl.min.css.map
│ │ │ │ ├── bootstrap-utilities.css
│ │ │ │ ├── bootstrap-utilities.css.map
│ │ │ │ ├── bootstrap-utilities.min.css
│ │ │ │ ├── bootstrap-utilities.min.css.map
│ │ │ │ ├── bootstrap-utilities.rtl.css
│ │ │ │ ├── bootstrap-utilities.rtl.css.map
│ │ │ │ ├── bootstrap-utilities.rtl.min.css
│ │ │ │ ├── bootstrap-utilities.rtl.min.css.map
│ │ │ │ ├── bootstrap.css
│ │ │ │ ├── bootstrap.css.map
│ │ │ │ ├── bootstrap.min.css
│ │ │ │ ├── bootstrap.min.css.map
│ │ │ │ ├── bootstrap.rtl.css
│ │ │ │ ├── bootstrap.rtl.css.map
│ │ │ │ ├── bootstrap.rtl.min.css
│ │ │ │ └── bootstrap.rtl.min.css.map
│ │ │ └── js
│ │ │ │ ├── bootstrap.bundle.js
│ │ │ │ ├── bootstrap.bundle.js.map
│ │ │ │ ├── bootstrap.bundle.min.js
│ │ │ │ ├── bootstrap.bundle.min.js.map
│ │ │ │ ├── bootstrap.esm.js
│ │ │ │ ├── bootstrap.esm.js.map
│ │ │ │ ├── bootstrap.esm.min.js
│ │ │ │ ├── bootstrap.esm.min.js.map
│ │ │ │ ├── bootstrap.js
│ │ │ │ ├── bootstrap.js.map
│ │ │ │ ├── bootstrap.min.js
│ │ │ │ └── bootstrap.min.js.map
│ │ │ ├── boxicons
│ │ │ ├── css
│ │ │ │ ├── animations.css
│ │ │ │ ├── boxicons.css
│ │ │ │ ├── boxicons.min.css
│ │ │ │ └── transformations.css
│ │ │ └── fonts
│ │ │ │ ├── boxicons.eot
│ │ │ │ ├── boxicons.svg
│ │ │ │ ├── boxicons.ttf
│ │ │ │ ├── boxicons.woff
│ │ │ │ └── boxicons.woff2
│ │ │ ├── glightbox
│ │ │ ├── css
│ │ │ │ ├── glightbox.css
│ │ │ │ ├── glightbox.min.css
│ │ │ │ ├── plyr.css
│ │ │ │ └── plyr.min.css
│ │ │ └── js
│ │ │ │ ├── glightbox.js
│ │ │ │ └── glightbox.min.js
│ │ │ ├── isotope-layout
│ │ │ ├── isotope.pkgd.js
│ │ │ └── isotope.pkgd.min.js
│ │ │ ├── php-email-form
│ │ │ └── validate.js
│ │ │ ├── purecounter
│ │ │ ├── purecounter_vanilla.js
│ │ │ └── purecounter_vanilla.js.map
│ │ │ ├── remixicon
│ │ │ ├── remixicon.css
│ │ │ ├── remixicon.eot
│ │ │ ├── remixicon.glyph.json
│ │ │ ├── remixicon.less
│ │ │ ├── remixicon.svg
│ │ │ ├── remixicon.symbol.svg
│ │ │ ├── remixicon.ttf
│ │ │ ├── remixicon.woff
│ │ │ └── remixicon.woff2
│ │ │ └── swiper
│ │ │ ├── swiper-bundle.min.css
│ │ │ ├── swiper-bundle.min.js
│ │ │ └── swiper-bundle.min.js.map
│ └── templates
│ │ ├── 401.html
│ │ ├── 403.html
│ │ ├── 404.html
│ │ ├── 405.html
│ │ ├── 413.html
│ │ ├── admin.html
│ │ ├── dashboard.html
│ │ ├── deployment.html
│ │ ├── index.html
│ │ ├── layout.html
│ │ ├── monitor.html
│ │ ├── private.html
│ │ └── statistics.html
└── utils.py
└── pyproject.toml
/.editorconfig:
--------------------------------------------------------------------------------
1 | root = true
2 |
3 | [*]
4 | charset = utf-8
5 | end_of_line = lf
6 | insert_final_newline = true
7 | trim_trailing_whitespace = true
8 |
9 | [*.{py,toml}]
10 | indent_style = space
11 | indent_size = 4
12 |
13 | [*.yml,yaml,json]
14 | indent_style = space
15 | indent_size = 2
16 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: "[BUG]"
5 | labels: bug
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe the bug**
11 | A clear and concise description of what the bug is.
12 |
13 | **To Reproduce**
14 | Steps to reproduce the behavior:
15 | 1. Go to '...'
16 | 2. Click on '....'
17 | 3. Deploy the following scenario '....'
18 | 4. See error
19 |
20 | **Expected behavior**
21 | A clear and concise description of what you expected to happen.
22 |
23 | **Screenshots**
24 | If applicable, add screenshots to help explain your problem.
25 |
26 | **Desktop (please complete the following information):**
27 | - OS: [e.g. MacOS, Windows 11]
28 | - Python version [e.g. 3.10, 3.11]
29 |
30 | **Additional context**
31 | Add any other context about the problem here.
32 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for this project
4 | title: "[FEATURE]"
5 | labels: enhancement
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Is your feature request related to a problem? Please describe.**
11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
12 |
13 | **Describe the solution you'd like**
14 | A clear and concise description of what you want to happen.
15 |
16 | **Describe alternatives you've considered**
17 | A clear and concise description of any alternative solutions or features you've considered.
18 |
19 | **Additional context**
20 | Add any other context or screenshots about the feature request here.
21 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/question.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Question
3 | about: Ask a question about a specific topic
4 | title: "[QUESTION]"
5 | labels: question
6 | assignees: ''
7 |
8 | ---
9 |
10 | ** What is your question related to? **
11 | A specific topic or area of the project.
12 | - [ ] Core
13 | - [ ] Frontend
14 | - [ ] Other (please specify)
15 |
16 | ** What is the question you would like to ask? **
17 | Please provide a detailed description of your question.
18 |
--------------------------------------------------------------------------------
/.github/actions/setup-env/action.yml:
--------------------------------------------------------------------------------
1 | name: "setup-env"
2 | description: "Set up a Python environment"
3 |
4 | inputs:
5 | python-version:
6 | required: false
7 | description: "The version of Python"
8 | default: "3.11.7"
9 |
10 | runs:
11 | using: "composite"
12 | steps:
13 | - name: Set up python
14 | run: |
15 | curl -fsSL https://astral.sh/uv/install.sh | bash
16 | uv python install ${{ inputs.python-version }}
17 | uv python pin ${{ inputs.python-version }}
18 | shell: bash
19 |
20 | - name: Add uv to Path
21 | run: echo "$HOME/.local/bin" >> $GITHUB_PATH
22 | shell: bash
23 |
24 | - name: Cache virtual environment
25 | id: cache-venv
26 | uses: actions/cache@v4
27 | with:
28 | path: .venv
29 | key: venv-${{ runner.os }}-${{ inputs.python-version }}-${{ hashFiles('pyproject.toml', 'uv.lock') }}
30 |
31 | - name: Install dependencies if cache is not hit
32 | if: steps.cache-venv.outputs.cache-hit != 'true'
33 | run: uv sync
34 | shell: bash
35 |
--------------------------------------------------------------------------------
/.github/workflows/documentation.yml:
--------------------------------------------------------------------------------
1 | name: Update documentation
2 | on:
3 | workflow_dispatch:
4 | inputs:
5 | logLevel:
6 | description: 'Log level'
7 | required: true
8 | default: 'manual'
9 | type: choice
10 | options:
11 | - manual
12 | - info
13 | - warning
14 | - debug
15 |
16 | jobs:
17 | build:
18 | runs-on: ubuntu-latest
19 | steps:
20 | - uses: actions/setup-python@v5
21 | with:
22 | python-version: '3.11.7'
23 | - uses: actions/checkout@v4
24 | with:
25 | fetch-depth: 0 # otherwise, you will failed to push refs to dest repo
26 | - name: Set up the environment
27 | uses: ./.github/actions/setup-env
28 | - name: Install doc dependencies
29 | run: |
30 | uv sync --group docs
31 | - name: List directories
32 | run: |
33 | ls -la
34 | - name: Build Documentation
35 | run: |
36 | uv run mkdocs build -f docs/mkdocs.yml -d _build
37 | - name: Add files
38 | run: |
39 | echo "docs.nebula-dfl.com" > docs/_build/CNAME
40 | - name: List directories
41 | run: |
42 | ls -la docs/_build/
43 | - uses: actions/checkout@v4
44 | with:
45 | ref: gh-pages
46 | path: gh-pages
47 | - name: List directories
48 | run: |
49 | ls -la gh-pages/
50 | - name: Copy documentation to the gh-pages branch
51 | run: |
52 | cp -r docs/_build/* gh-pages/
53 | touch gh-pages/.nojekyll
54 | - name: List directories
55 | run: |
56 | ls -la gh-pages/
57 | - name: Commit and Push changes
58 | run: |
59 | cd gh-pages
60 | git config user.name github-actions
61 | git config user.email github-actions@github.com
62 | git add .
63 | git commit -m "Update documentation" -a || echo "No changes to commit"
64 | git push origin gh-pages
65 | env:
66 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
67 |
--------------------------------------------------------------------------------
/.github/workflows/main.yml:
--------------------------------------------------------------------------------
1 | name: Main
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | pull_request:
8 | types: [opened, synchronize, reopened, ready_for_review]
9 |
10 | jobs:
11 | quality:
12 | runs-on: ubuntu-latest
13 | steps:
14 | - name: Check out
15 | uses: actions/checkout@v4
16 |
17 | - uses: actions/cache@v4
18 | with:
19 | path: ~/.cache/pre-commit
20 | key: pre-commit-${{ hashFiles('.pre-commit-config.yaml') }}
21 |
22 | - name: Set up the environment
23 | uses: ./.github/actions/setup-env
24 |
25 | - name: Run checks
26 | run: make check
27 |
28 | check-docs:
29 | runs-on: ubuntu-latest
30 | steps:
31 | - name: Check out
32 | uses: actions/checkout@v4
33 |
34 | - name: Set up the environment
35 | uses: ./.github/actions/setup-env
36 |
37 | - name: Install doc dependencies
38 | run: |
39 | uv sync --group docs
40 |
41 | - name: Check if documentation can be built
42 | run: |
43 | uv run mkdocs build -f docs/mkdocs.yml -d _build
44 |
--------------------------------------------------------------------------------
/.github/workflows/on-release.yml:
--------------------------------------------------------------------------------
1 | name: Release
2 |
3 | on:
4 | release:
5 | types: [published]
6 | branches: [main]
7 |
8 | jobs:
9 | publish:
10 | runs-on: ubuntu-latest
11 | steps:
12 | - name: Check out
13 | uses: actions/checkout@v4
14 |
15 | - name: Set up the environment
16 | uses: ./.github/actions/setup-env
17 |
18 | - name: Export tag
19 | id: vars
20 | run: echo tag=${GITHUB_REF#refs/*/} >> $GITHUB_OUTPUT
21 |
22 | - name: Build and publish
23 | run: |
24 | source .venv/bin/activate
25 | make build-and-publish
26 | env:
27 | PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }}
28 | RELEASE_VERSION: ${{ steps.vars.outputs.tag }}
29 |
--------------------------------------------------------------------------------
/.github/workflows/url-health.yml:
--------------------------------------------------------------------------------
1 | name: URL Health Check
2 |
3 | on:
4 | schedule:
5 | - cron: '0 * * * *' # Execute the action every hour
6 | workflow_dispatch: # Allows you to run the workflow manually
7 |
8 | jobs:
9 | health-check:
10 | runs-on: ubuntu-latest
11 |
12 | container:
13 | image: alpine:latest
14 |
15 | steps:
16 | - name: Install curl
17 | run: apk add --no-cache curl
18 |
19 | - name: URL Health Check
20 | uses: Jtalk/url-health-check-action@v4
21 | with:
22 | url: 'https://federatedlearning.inf.um.es'
23 | max-attempts: 3
24 | retry-delay: 3s
25 | follow-redirect: true
26 | retry-all: false
27 | cookie: 'token=url-health'
28 | basic-auth: # optional
29 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Data escenarios reputation
10 | nebula/core/reputation/nebula_DFL_*
11 |
12 | # File uv.lock
13 | uv.lock
14 |
15 | # Distribution / packaging
16 | .Python
17 | docs/build/
18 | develop-eggs/
19 | dist/
20 | downloads/
21 | analysis/addons
22 | eggs/
23 | .eggs/
24 | lib/
25 | lib64/
26 | parts/
27 | sdist/
28 | var/
29 | wheels/
30 | pip-wheel-metadata/
31 | share/python-wheels/
32 | *.egg-info/
33 | .installed.cfg
34 | *.egg
35 | MANIFEST
36 |
37 | # PyInstaller
38 | # Usually these files are written by a python script from a template
39 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
40 | *.manifest
41 | *.spec
42 |
43 | # Installer logs
44 | pip-log.txt
45 | pip-delete-this-directory.txt
46 |
47 | # Unit test / coverage reports
48 | htmlcov/
49 | .tox/
50 | .nox/
51 | .coverage
52 | .coverage.*
53 | .cache
54 | nosetests.xml
55 | coverage.xml
56 | *.cover
57 | *.py,cover
58 | .hypothesis/
59 | .pytest_cache/
60 |
61 | # Translations
62 | *.mo
63 | *.pot
64 |
65 | # Django stuff:
66 | *.log
67 | local_settings.py
68 | db.sqlite3
69 | db.sqlite3-journal
70 |
71 | # Flask stuff:
72 | instance/
73 | .webassets-cache
74 |
75 | # Scrapy stuff:
76 | .scrapy
77 |
78 | # Sphinx documentation
79 | docs/_build/
80 |
81 | # PyBuilder
82 | target/
83 |
84 | # Jupyter Notebook
85 | .ipynb_checkpoints
86 |
87 | # IPython
88 | profile_default/
89 | ipython_config.py
90 |
91 | # pyenv
92 | .python-version
93 |
94 | # pipenv
95 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
96 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
97 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
98 | # install all needed dependencies.
99 | #Pipfile.lock
100 |
101 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
102 | __pypackages__/
103 |
104 | # Celery stuff
105 | celerybeat-schedule
106 | celerybeat.pid
107 |
108 | # SageMath parsed files
109 | *.sage.py
110 |
111 | # Environments
112 | .env
113 | .venv
114 | env/
115 | venv/
116 | ENV/
117 | env.bak/
118 | venv.bak/
119 |
120 | # Spyder project settings
121 | .spyderproject
122 | .spyproject
123 |
124 | # Rope project settings
125 | .ropeproject
126 |
127 | # uv
128 | .venv/
129 | uv.lock
130 |
131 | # mkdocs documentation
132 | /site
133 | _build/
134 |
135 | # mypy
136 | .mypy_cache/
137 | .dmypy.json
138 | dmypy.json
139 |
140 | # ruff
141 | .ruff/
142 | .ruff_cache/
143 |
144 | # Pyre type checker
145 | .pyre/
146 |
147 | .idea/
148 | _build/
149 | *.DS_Store
150 | app/
151 | *.yaml
152 | data/
153 | *.db*
154 | *.out
155 | *.pid
156 |
157 | .requirements.txt
158 | data-analysis/
159 | network-analysis/
160 | docker-compose.yml
161 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: https://github.com/pre-commit/pre-commit-hooks
3 | rev: "v5.0.0"
4 | hooks:
5 | - id: check-case-conflict
6 | - id: check-merge-conflict
7 | - id: check-toml
8 | - id: check-yaml
9 | - id: end-of-file-fixer
10 | - id: trailing-whitespace
11 |
12 | - repo: https://github.com/astral-sh/ruff-pre-commit
13 | rev: v0.7.2
14 | hooks:
15 | - id: ruff
16 | args: [--exit-non-zero-on-fix]
17 | - id: ruff-format
18 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Changelog
2 | All notable changes to this project will be documented in this file.
3 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing
2 | We welcome contributions to this project. Please read the following guidelines.
3 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM nvidia/cuda:12.4.1-cudnn-runtime-ubuntu22.04
2 |
3 | ENV DEBIAN_FRONTEND=noninteractive
4 |
5 | RUN apt-get update && apt-get install -y tzdata \
6 | && ln -fs /usr/share/zoneinfo/Europe/Madrid /etc/localtime \
7 | && dpkg-reconfigure -f noninteractive tzdata
8 |
9 | ENV TZ=Europe/Madrid
10 |
11 | # Install python3.11.7
12 | RUN apt-get clean && rm -rf /var/lib/apt/lists/* && apt-get update && apt-get install -y software-properties-common
13 | RUN add-apt-repository ppa:deadsnakes/ppa
14 | RUN apt-get update && apt-get install -y python3.11 python3.11-dev python3.11-distutils python3.11-venv
15 |
16 | # Install curl and network tools
17 | RUN apt-get install -y curl net-tools iproute2 iputils-ping
18 |
19 | # Update alternatives to make Python 3.11 the default
20 | RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 2
21 | RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 1
22 |
23 | # Install gcc and git
24 | RUN apt-get update && apt-get install -y build-essential gcc g++ clang git make cmake
25 |
26 | # Install docker
27 | RUN apt-get update && apt-get install -y ca-certificates curl gnupg
28 | RUN install -m 0755 -d /etc/apt/keyrings
29 | RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg
30 | RUN chmod a+r /etc/apt/keyrings/docker.gpg
31 | RUN echo \
32 | "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \
33 | "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \
34 | tee /etc/apt/sources.list.d/docker.list > /dev/null
35 | RUN apt-get update
36 |
37 | RUN apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
38 |
39 | ADD https://astral.sh/uv/install.sh /uv-installer.sh
40 |
41 | RUN sh /uv-installer.sh && rm /uv-installer.sh
42 |
43 | ENV PATH="/root/.local/bin/:$PATH"
44 |
45 | COPY pyproject.toml .
46 |
47 | RUN uv python install 3.11.7
48 |
49 | RUN uv python pin 3.11.7
50 |
51 | RUN uv sync --group core
52 |
53 | ENV PATH=".venv/bin:$PATH"
54 |
--------------------------------------------------------------------------------
/analysis/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
NEBULA: A Platform for Decentralized Federated Learning
8 |
9 |
10 | nebula-dfl.com |
11 | nebula-dfl.eu |
12 | federatedlearning.inf.um.es
13 |
14 |
15 |
16 | ## NEBULA Analysis
17 |
18 | This folder contains different analyses of the NEBULA platform. The analysis is divided into two parts:
19 |
20 | - **Data analysis**: This folder contains the analysis of the data included in the platform. The analysis is done using Jupyter Notebooks and Python.
21 |
22 | - **Network analysis**: This folder contains the analysis of the network of the platform. The analysis is done using WireShark and Python.
23 |
24 | _*NOTE:* Some data is not included in this repository due to privacy reasons. If you want to access the data, please contact us._
25 |
--------------------------------------------------------------------------------
/analysis/data-analysis/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/analysis/data-analysis/__init__.py
--------------------------------------------------------------------------------
/analysis/network-analysis/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/analysis/network-analysis/__init__.py
--------------------------------------------------------------------------------
/app/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/app/__init__.py
--------------------------------------------------------------------------------
/app/certs/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/app/certs/__init__.py
--------------------------------------------------------------------------------
/app/config/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/app/config/__init__.py
--------------------------------------------------------------------------------
/app/logs/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/app/logs/__init__.py
--------------------------------------------------------------------------------
/docs/_prebuilt/changelog.md:
--------------------------------------------------------------------------------
1 | ---
2 | hide:
3 | -toc
4 | ---
5 | --8<-- "CHANGELOG.md"
6 |
--------------------------------------------------------------------------------
/docs/_prebuilt/contributing.md:
--------------------------------------------------------------------------------
1 | ---
2 | hide:
3 | -toc
4 | ---
5 | --8<-- "CONTRIBUTING.md"
6 |
--------------------------------------------------------------------------------
/docs/_prebuilt/css/toc.css:
--------------------------------------------------------------------------------
1 | .md-sidebar--secondary {
2 | order: 0;
3 | }
4 |
5 | .md-sidebar--primary {
6 | display: none;
7 | }
8 |
--------------------------------------------------------------------------------
/docs/_prebuilt/index.md:
--------------------------------------------------------------------------------
1 | ---
2 | hide:
3 | -toc
4 | ---
5 | --8<-- "README.md"
6 |
--------------------------------------------------------------------------------
/docs/_prebuilt/js/toc.js:
--------------------------------------------------------------------------------
1 | document.addEventListener('DOMContentLoaded', function() {
2 | if (window.location.pathname.includes("api")) {
3 | document.querySelector('.md-sidebar--primary').style.display = 'block';
4 | }
5 | });
6 |
--------------------------------------------------------------------------------
/docs/_prebuilt/static/armasuisse.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/docs/_prebuilt/static/armasuisse.jpg
--------------------------------------------------------------------------------
/docs/_prebuilt/static/custom.css:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/docs/_prebuilt/static/custom.css
--------------------------------------------------------------------------------
/docs/_prebuilt/static/developerguide/structure.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/docs/_prebuilt/static/developerguide/structure.png
--------------------------------------------------------------------------------
/docs/_prebuilt/static/docker-required-options.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/docs/_prebuilt/static/docker-required-options.png
--------------------------------------------------------------------------------
/docs/_prebuilt/static/nebula-logo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/docs/_prebuilt/static/nebula-logo.jpg
--------------------------------------------------------------------------------
/docs/_prebuilt/static/nebula-logo.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/docs/_prebuilt/static/nebula-logo.pdf
--------------------------------------------------------------------------------
/docs/_prebuilt/static/nebula-mockup.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/docs/_prebuilt/static/nebula-mockup.png
--------------------------------------------------------------------------------
/docs/_prebuilt/static/umu.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/docs/_prebuilt/static/umu.jpg
--------------------------------------------------------------------------------
/docs/_prebuilt/static/userguide/deployment.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/docs/_prebuilt/static/userguide/deployment.png
--------------------------------------------------------------------------------
/docs/_prebuilt/static/userguide/frontend.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/docs/_prebuilt/static/userguide/frontend.png
--------------------------------------------------------------------------------
/docs/_prebuilt/static/userguide/monitor.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/docs/_prebuilt/static/userguide/monitor.png
--------------------------------------------------------------------------------
/docs/_prebuilt/static/userguide/nebula-dashboard.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/docs/_prebuilt/static/userguide/nebula-dashboard.png
--------------------------------------------------------------------------------
/docs/_prebuilt/static/userguide/realtime-images.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/docs/_prebuilt/static/userguide/realtime-images.png
--------------------------------------------------------------------------------
/docs/_prebuilt/static/userguide/realtime-scalars.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/docs/_prebuilt/static/userguide/realtime-scalars.png
--------------------------------------------------------------------------------
/docs/_prebuilt/static/userguide/realtime-time-series.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/docs/_prebuilt/static/userguide/realtime-time-series.png
--------------------------------------------------------------------------------
/docs/_prebuilt/static/userguide/running_nebula.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/docs/_prebuilt/static/userguide/running_nebula.png
--------------------------------------------------------------------------------
/docs/_prebuilt/static/uzh.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/docs/_prebuilt/static/uzh.jpg
--------------------------------------------------------------------------------
/docs/scripts/gen_ref_pages.py:
--------------------------------------------------------------------------------
1 | """Generate the code reference pages and navigation."""
2 |
3 | from pathlib import Path
4 |
5 | import mkdocs_gen_files
6 |
7 | nav = mkdocs_gen_files.Nav()
8 |
9 | root = Path(__file__).parent.parent.parent
10 | src = root / "nebula"
11 |
12 | print(f"Generating API pages from {src}")
13 |
14 | excluded_dirs = ["tests", "utils", "config"]
15 |
16 | for path in sorted(src.rglob("*.py")):
17 | if any(excluded in path.parts for excluded in excluded_dirs):
18 | print(f"Skipping excluded directory: {path}")
19 | continue
20 | print(f"Generating API page for {path}")
21 | module_path = path.relative_to(src).with_suffix("")
22 | print(f"Module path: {module_path}")
23 | doc_path = path.relative_to(src).with_suffix(".md")
24 | print(f"Doc path: {doc_path}")
25 | full_doc_path = Path("api", doc_path)
26 | print(f"Full doc path: {full_doc_path}")
27 |
28 | parts = tuple(module_path.parts)
29 | if not parts:
30 | continue
31 |
32 | # Prepend 'nebula' to the parts to include the root module
33 | parts = ("nebula",) + parts
34 |
35 | if parts[-1] == "__init__":
36 | parts = parts[:-1]
37 | print(f"Parts: {parts}")
38 | doc_path = doc_path.with_name("index.md")
39 | full_doc_path = full_doc_path.with_name("index.md")
40 | elif parts[-1] == "__main__":
41 | continue
42 |
43 | nav[parts] = doc_path.as_posix()
44 |
45 | with mkdocs_gen_files.open(full_doc_path, "w") as fd:
46 | ident = ".".join(parts)
47 |
48 | custom_title = f"Documentation for {parts[-1].capitalize()} Module"
49 | fd.write("---\n")
50 | fd.write("hide:\n - toc\n")
51 | fd.write("---\n")
52 | fd.write(f"# {custom_title}\n\n")
53 | if parts[-1].capitalize() == "Nebula":
54 | fd.write(
55 | "This API Reference is designed to help developers understand every part of the code, providing detailed information about functions, parameters, data structures, and interactions within the platform.\n\n On the left, you'll find the directory tree of the platform, including folders, functions, code, and documentation.\n\n"
56 | )
57 | fd.write(f"::: {ident}")
58 |
59 | mkdocs_gen_files.set_edit_path(full_doc_path, path.relative_to(root))
60 |
61 | with mkdocs_gen_files.open("api/SUMMARY.md", "w") as nav_file:
62 | nav_file.writelines(nav.build_literate_nav())
63 |
--------------------------------------------------------------------------------
/nebula/__init__.py:
--------------------------------------------------------------------------------
1 | __version__ = "1.0.0"
2 | __description__ = "NEBULA: A Platform for Decentralized Federated Learning"
3 | __long_description__ = "NEBULA: A Platform for Decentralized Federated Learning"
4 | __author__ = "Enrique Tomás Martínez Beltrán, Alberto Huertas Celdrán, Alejandro Avilés Serrano, Fernando Torres Vega"
5 | __long_description_content_type__ = "text/markdown"
6 | __keywords__ = "federated learning, decentralized federated learning, machine learning, deep learning, neural networks, collaborative learning"
7 |
--------------------------------------------------------------------------------
/nebula/addons/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | This package consists of several modules that handle different aspects of the network simulation:
3 |
4 | 1. `env.py`:
5 | - Manages the environment configuration and settings.
6 | - It initializes the system environment, loads configuration parameters, and ensures correct operation of other components based on the simulation's settings.
7 |
8 | 2. `functions.py`:
9 | - Contains utility functions that are used across different parts of the simulation.
10 | - It provides helper methods for common operations like data processing, mathematical calculations, and other reusable functionalities.
11 |
12 | 3. `mobility.py`:
13 | - Models and simulates the mobility of nodes within the network.
14 | - It handles dynamic aspects of the simulation, such as node movement and position updates, based on mobility models and the simulation's configuration.
15 |
16 | 4. `reporter.py`:
17 | - Responsible for collecting and reporting data during the simulation.
18 | - It tracks various system metrics, including node status and network performance, and periodically sends updates to a controller or dashboard for analysis and monitoring.
19 |
20 | 5. `topologymanager.py`:
21 | - Manages the topology of the network.
22 | - It handles the creation and maintenance of the network's structure (e.g., nodes and their connections), including generating different types of topologies like ring, random, or fully connected based on simulation parameters.
23 |
24 | Each of these modules plays a critical role in simulating a network environment, enabling real-time tracking, topology management, mobility simulation, and efficient reporting of results.
25 | """
26 |
--------------------------------------------------------------------------------
/nebula/addons/attacks/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/addons/attacks/__init__.py
--------------------------------------------------------------------------------
/nebula/addons/attacks/communications/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/addons/attacks/communications/__init__.py
--------------------------------------------------------------------------------
/nebula/addons/attacks/communications/delayerattack.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import logging
3 | from functools import wraps
4 |
5 | from nebula.addons.attacks.communications.communicationattack import CommunicationAttack
6 | from nebula.core.network.communications import CommunicationsManager
7 |
8 |
9 | class DelayerAttack(CommunicationAttack):
10 | """
11 | Implements an attack that delays the execution of a target method by a specified amount of time.
12 | """
13 |
14 | def __init__(self, engine, attack_params: dict):
15 | """
16 | Initializes the DelayerAttack with the engine and attack parameters.
17 |
18 | Args:
19 | engine: The engine managing the attack context.
20 | attack_params (dict): Parameters for the attack, including the delay duration.
21 | """
22 | try:
23 | self.delay = int(attack_params["delay"])
24 | round_start = int(attack_params["round_start_attack"])
25 | round_stop = int(attack_params["round_stop_attack"])
26 | attack_interval = int(attack_params["attack_interval"])
27 | self.target_percentage = int(attack_params["target_percentage"])
28 | self.selection_interval = int(attack_params["selection_interval"])
29 | except KeyError as e:
30 | raise ValueError(f"Missing required attack parameter: {e}")
31 | except ValueError:
32 | raise ValueError("Invalid value in attack_params. Ensure all values are integers.")
33 |
34 | super().__init__(
35 | engine,
36 | CommunicationsManager.get_instance(),
37 | "send_message",
38 | round_start,
39 | round_stop,
40 | attack_interval,
41 | self.delay,
42 | self.target_percentage,
43 | self.selection_interval,
44 | )
45 |
46 | def decorator(self, delay: int):
47 | """
48 | Decorator that adds a delay to the execution of the original method.
49 |
50 | Args:
51 | delay (int): The time in seconds to delay the method execution.
52 |
53 | Returns:
54 | function: A decorator function that wraps the target method with the delay logic.
55 | """
56 |
57 | def decorator(func):
58 | @wraps(func)
59 | async def wrapper(*args, **kwargs):
60 | if len(args) == 4 and args[3] == "model":
61 | dest_addr = args[1]
62 | if dest_addr in self.targets:
63 | logging.info(f"[DelayerAttack] Delaying model propagation to {dest_addr} by {delay} seconds")
64 | await asyncio.sleep(delay)
65 | _, *new_args = args # Exclude self argument
66 | return await func(*new_args)
67 |
68 | return wrapper
69 |
70 | return decorator
71 |
--------------------------------------------------------------------------------
/nebula/addons/attacks/communications/floodingattack.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from functools import wraps
3 |
4 | from nebula.addons.attacks.communications.communicationattack import CommunicationAttack
5 | from nebula.core.network.communications import CommunicationsManager
6 |
7 |
8 | class FloodingAttack(CommunicationAttack):
9 | """
10 | Implements an attack that delays the execution of a target method by a specified amount of time.
11 | """
12 |
13 | def __init__(self, engine, attack_params: dict):
14 | """
15 | Initializes the DelayerAttack with the engine and attack parameters.
16 |
17 | Args:
18 | engine: The engine managing the attack context.
19 | attack_params (dict): Parameters for the attack, including the delay duration.
20 | """
21 | try:
22 | round_start = int(attack_params["round_start_attack"])
23 | round_stop = int(attack_params["round_stop_attack"])
24 | attack_interval = int(attack_params["attack_interval"])
25 | self.flooding_factor = int(attack_params["flooding_factor"])
26 | self.target_percentage = int(attack_params["target_percentage"])
27 | self.selection_interval = int(attack_params["selection_interval"])
28 | except KeyError as e:
29 | raise ValueError(f"Missing required attack parameter: {e}")
30 | except ValueError:
31 | raise ValueError("Invalid value in attack_params. Ensure all values are integers.")
32 |
33 | self.verbose = False
34 |
35 | super().__init__(
36 | engine,
37 | CommunicationsManager.get_instance(),
38 | "send_message",
39 | round_start,
40 | round_stop,
41 | attack_interval,
42 | self.flooding_factor,
43 | self.target_percentage,
44 | self.selection_interval,
45 | )
46 |
47 | def decorator(self, flooding_factor: int):
48 | """
49 | Decorator that adds a delay to the execution of the original method.
50 |
51 | Args:
52 | flooding_factor (int): The number of times to repeat the function execution.
53 |
54 | Returns:
55 | function: A decorator function that wraps the target method with the delay logic.
56 | """
57 |
58 | def decorator(func):
59 | @wraps(func)
60 | async def wrapper(*args, **kwargs):
61 | if len(args) == 4 and args[3] == "model":
62 | dest_addr = args[1]
63 | if dest_addr in self.targets:
64 | logging.info(f"[FloodingAttack] Flooding message to {dest_addr} by {flooding_factor} times")
65 | for i in range(flooding_factor):
66 | if self.verbose:
67 | logging.info(
68 | f"[FloodingAttack] Sending duplicate {i + 1}/{flooding_factor} to {dest_addr}"
69 | )
70 | _, *new_args = args # Exclude self argument
71 | await func(*new_args, **kwargs)
72 | _, *new_args = args
73 | return await func(*new_args)
74 |
75 | return wrapper
76 |
77 | return decorator
78 |
--------------------------------------------------------------------------------
/nebula/addons/attacks/dataset/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/addons/attacks/dataset/__init__.py
--------------------------------------------------------------------------------
/nebula/addons/attacks/dataset/datasetattack.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from abc import abstractmethod
3 |
4 | from nebula.addons.attacks.attacks import Attack
5 |
6 |
7 | class DatasetAttack(Attack):
8 | """
9 | Implements an attack that replaces the training dataset with a malicious version
10 | during specific rounds of the engine's execution.
11 |
12 | This attack modifies the dataset used by the engine's trainer to introduce malicious
13 | data, potentially impacting the model's training process.
14 | """
15 |
16 | def __init__(self, engine, round_start_attack, round_stop_attack, attack_interval):
17 | """
18 | Initializes the DatasetAttack with the given engine.
19 |
20 | Args:
21 | engine: The engine managing the attack context.
22 | """
23 | self.engine = engine
24 | self.round_start_attack = round_start_attack
25 | self.round_stop_attack = round_stop_attack
26 | self.attack_interval = attack_interval
27 |
28 | async def attack(self):
29 | """
30 | Performs the attack by replacing the training dataset with a malicious version.
31 |
32 | During the specified rounds of the attack, the engine's trainer is provided
33 | with a malicious dataset. The attack is stopped when the engine reaches the
34 | designated stop round.
35 | """
36 | if self.engine.round not in range(self.round_start_attack, self.round_stop_attack + 1):
37 | pass
38 | elif self.engine.round == self.round_stop_attack:
39 | logging.info(f"[{self.__class__.__name__}] Stopping attack")
40 | elif self.engine.round >= self.round_start_attack and (
41 | (self.engine.round - self.round_start_attack) % self.attack_interval == 0
42 | ):
43 | logging.info(f"[{self.__class__.__name__}] Performing attack")
44 | self.engine.trainer.datamodule.train_set = self.get_malicious_dataset()
45 |
46 | async def _inject_malicious_behaviour(self, target_function, *args, **kwargs):
47 | """
48 | Abstract method for injecting malicious behavior into a target function.
49 |
50 | This method is not implemented in this class and must be overridden by subclasses
51 | if additional malicious behavior is required.
52 |
53 | Args:
54 | target_function (callable): The function to inject the malicious behavior into.
55 | *args: Positional arguments for the malicious behavior.
56 | **kwargs: Keyword arguments for the malicious behavior.
57 |
58 | Raises:
59 | NotImplementedError: This method is not implemented in this class.
60 | """
61 | pass
62 |
63 | @abstractmethod
64 | def get_malicious_dataset(self):
65 | """
66 | Abstract method to retrieve the malicious dataset.
67 |
68 | Subclasses must implement this method to define how the malicious dataset
69 | is created or retrieved.
70 |
71 | Raises:
72 | NotImplementedError: If the method is not implemented in a subclass.
73 | """
74 | raise NotImplementedError
75 |
--------------------------------------------------------------------------------
/nebula/addons/attacks/model/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/addons/attacks/model/__init__.py
--------------------------------------------------------------------------------
/nebula/addons/attacks/model/gllneuroninversion.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | import torch
4 |
5 | from nebula.addons.attacks.model.modelattack import ModelAttack
6 |
7 |
8 | class GLLNeuronInversionAttack(ModelAttack):
9 | """
10 | Implements a neuron inversion attack on the received model weights.
11 |
12 | This attack aims to invert the values of neurons in specific layers
13 | by replacing their values with random noise, potentially disrupting the model's
14 | functionality during aggregation.
15 |
16 | Args:
17 | engine (object): The training engine object that manages the aggregator.
18 | _ (any): A placeholder argument (not used in this class).
19 | """
20 |
21 | def __init__(self, engine, attack_params):
22 | """
23 | Initializes the GLLNeuronInversionAttack with the specified engine.
24 |
25 | Args:
26 | engine (object): The training engine object.
27 | _ (any): A placeholder argument (not used in this class).
28 | """
29 | try:
30 | round_start = int(attack_params["round_start_attack"])
31 | round_stop = int(attack_params["round_stop_attack"])
32 | attack_interval = int(attack_params["attack_interval"])
33 | except KeyError as e:
34 | raise ValueError(f"Missing required attack parameter: {e}")
35 | except ValueError:
36 | raise ValueError("Invalid value in attack_params. Ensure all values are integers.")
37 |
38 | super().__init__(engine, round_start, round_stop, attack_interval)
39 |
40 | def model_attack(self, received_weights):
41 | """
42 | Performs the neuron inversion attack by modifying the weights of a specific
43 | layer with random noise.
44 |
45 | This attack replaces the weights of a chosen layer with random values,
46 | which may disrupt the functionality of the model.
47 |
48 | Args:
49 | received_weights (dict): The aggregated model weights to be modified.
50 |
51 | Returns:
52 | dict: The modified model weights after applying the neuron inversion attack.
53 | """
54 | logging.info("[GLLNeuronInversionAttack] Performing neuron inversion attack")
55 | lkeys = list(received_weights.keys())
56 | logging.info(f"Layer inverted: {lkeys[-2]}")
57 | received_weights[lkeys[-2]].data = torch.rand(received_weights[lkeys[-2]].shape) * 10000
58 | return received_weights
59 |
--------------------------------------------------------------------------------
/nebula/addons/functions.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 |
4 | def print_msg_box(msg, indent=1, width=None, title=None, logger_name=None):
5 | """
6 | Prints a formatted message box to the logger with an optional title.
7 |
8 | This function creates a visually appealing message box format for logging messages.
9 | It allows for indentation, custom width, and inclusion of a title. If the message is
10 | multiline, each line will be included in the box.
11 |
12 | Args:
13 | msg (str): The message to be displayed inside the box. Must be a string.
14 | indent (int, optional): The number of spaces to indent the message box. Default is 1.
15 | width (int, optional): The width of the message box. If not provided, it will be calculated
16 | based on the longest line of the message and the title (if provided).
17 | title (str, optional): An optional title for the message box. Must be a string if provided.
18 | logger_name (str, optional): The name of the logger to use. If not provided, the root logger
19 | will be used.
20 |
21 | Raises:
22 | TypeError: If `msg` or `title` is not a string.
23 |
24 | Returns:
25 | None
26 |
27 | Notes:
28 | - The message box is bordered with decorative characters to enhance visibility in the logs.
29 | - If the `width` is not provided, it will automatically adjust to fit the content.
30 | """
31 | logger = logging.getLogger(logger_name) if logger_name else logging.getLogger()
32 |
33 | if not isinstance(msg, str):
34 | raise TypeError("msg parameter must be a string") # noqa: TRY003
35 |
36 | lines = msg.split("\n")
37 | space = " " * indent
38 | if not width:
39 | width = max(map(len, lines))
40 | if title:
41 | width = max(width, len(title))
42 | box = f"\n╔{'═' * (width + indent * 2)}╗\n" # upper_border
43 | if title:
44 | if not isinstance(title, str):
45 | raise TypeError("title parameter must be a string") # noqa: TRY003
46 | box += f"║{space}{title:<{width}}{space}║\n" # title
47 | box += f"║{space}{'-' * len(title):<{width}}{space}║\n" # underscore
48 | box += "".join([f"║{space}{line:<{width}}{space}║\n" for line in lines])
49 | box += f"╚{'═' * (width + indent * 2)}╝" # lower_border
50 | logger.info(box)
51 |
--------------------------------------------------------------------------------
/nebula/addons/gps/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/addons/gps/__init__.py
--------------------------------------------------------------------------------
/nebula/addons/gps/gpsmodule.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 |
3 |
4 | class GPSModule(ABC):
5 | """
6 | Abstract base class representing a GPS module interface.
7 |
8 | This class defines the required asynchronous methods that any concrete GPS module implementation must provide.
9 | These methods allow for lifecycle control (start/stop), status checking, and distance calculation between coordinates.
10 |
11 | Any subclass must implement all the following asynchronous methods:
12 | - `start()`: Begins GPS tracking or data acquisition.
13 | - `stop()`: Halts the GPS module's operation.
14 | - `is_running()`: Checks whether the GPS module is currently active.
15 | - `calculate_distance()`: Computes the distance between two geographic coordinates (latitude and longitude).
16 |
17 | All implementations should ensure that methods are non-blocking and integrate smoothly with async event loops.
18 | """
19 |
20 | @abstractmethod
21 | async def start(self):
22 | """
23 | Starts the GPS module operation.
24 |
25 | This may involve initiating hardware tracking, establishing connections, or beginning periodic updates.
26 | """
27 | pass
28 |
29 | @abstractmethod
30 | async def stop(self):
31 | """
32 | Stops the GPS module operation.
33 |
34 | Ensures that any background tasks or hardware interactions are properly terminated.
35 | """
36 | pass
37 |
38 | @abstractmethod
39 | async def is_running(self):
40 | """
41 | Checks whether the GPS module is currently active.
42 |
43 | Returns:
44 | bool: True if the module is running, False otherwise.
45 | """
46 | pass
47 |
48 | @abstractmethod
49 | async def calculate_distance(self, self_lat, self_long, other_lat, other_long):
50 | """
51 | Calculates the distance between two geographic points.
52 |
53 | Args:
54 | self_lat (float): Latitude of the source point.
55 | self_long (float): Longitude of the source point.
56 | other_lat (float): Latitude of the target point.
57 | other_long (float): Longitude of the target point.
58 |
59 | Returns:
60 | float: Distance in meters (or implementation-defined units) between the two coordinates.
61 | """
62 | pass
63 |
64 |
65 | class GPSModuleException(Exception):
66 | pass
67 |
68 |
69 | def factory_gpsmodule(gps_module, config, addr, update_interval: float = 5.0, verbose=False) -> GPSModule:
70 | from nebula.addons.gps.nebulagps import NebulaGPS
71 |
72 | GPS_SERVICES = {
73 | "nebula": NebulaGPS,
74 | }
75 |
76 | gps_module = GPS_SERVICES.get(gps_module, NebulaGPS)
77 |
78 | if gps_module:
79 | return gps_module(config, addr, update_interval, verbose)
80 | else:
81 | raise GPSModuleException(f"GPS Module {gps_module} not found")
82 |
--------------------------------------------------------------------------------
/nebula/addons/networksimulation/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/addons/networksimulation/__init__.py
--------------------------------------------------------------------------------
/nebula/addons/reputation/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/addons/reputation/__init__.py
--------------------------------------------------------------------------------
/nebula/addons/trustworthiness/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/addons/trustworthiness/__init__.py
--------------------------------------------------------------------------------
/nebula/addons/trustworthiness/configs/factsheet_template.json:
--------------------------------------------------------------------------------
1 | {
2 | "project": {
3 | "overview": "",
4 | "purpose": "",
5 | "background": ""
6 | },
7 | "data": {
8 | "provenance": "",
9 | "preprocessing": "",
10 | "avg_entropy": ""
11 | },
12 | "participants": {
13 | "client_num": "",
14 | "sample_client_rate": "",
15 | "client_selector": "",
16 | "avg_dataset_size": ""
17 | },
18 | "configuration": {
19 | "aggregation_algorithm": "",
20 | "training_model": "",
21 | "personalization": "",
22 | "visualization": "",
23 | "differential_privacy": "",
24 | "dp_epsilon": "",
25 | "trainable_param_num": "",
26 | "total_round_num": "",
27 | "learning_rate": "",
28 | "local_update_steps": ""
29 | },
30 | "performance": {
31 | "test_loss_avg": "",
32 | "test_acc_avg": "",
33 | "test_feature_importance_cv": "",
34 | "test_clever": ""
35 | },
36 | "fairness": {
37 | "test_acc_cv": "",
38 | "selection_cv": "",
39 | "class_imbalance": ""
40 | },
41 | "system": {
42 | "avg_time_minutes": "",
43 | "avg_model_size": "",
44 | "total_upload_bytes": "",
45 | "total_download_bytes":"",
46 | "avg_upload_bytes": "",
47 | "avg_download_bytes": ""
48 | },
49 | "sustainability": {
50 | "avg_carbon_intensity_server": "",
51 | "avg_carbon_intensity_clients": "",
52 | "avg_power_performance_clients": "",
53 | "avg_power_performance_server": "",
54 | "emissions_training": "",
55 | "emissions_aggregation": "",
56 | "emissions_communication_uplink": "",
57 | "emissions_communication_downlink": ""
58 | }
59 | }
60 |
--------------------------------------------------------------------------------
/nebula/addons/trustworthiness/metric.py:
--------------------------------------------------------------------------------
1 | import json
2 | import logging
3 | import os
4 |
5 | from nebula.addons.trustworthiness.graphics import Graphics
6 | from nebula.addons.trustworthiness.pillar import TrustPillar
7 | from nebula.addons.trustworthiness.utils import write_results_json
8 |
9 | dirname = os.path.dirname(__file__)
10 |
11 | logger = logging.getLogger(__name__)
12 |
13 |
14 | class TrustMetricManager:
15 | """
16 | Manager class to help store the output directory and handle calls from the FL framework.
17 | """
18 |
19 | def __init__(self, scenario_start_time):
20 | self.factsheet_file_nm = "factsheet.json"
21 | self.eval_metrics_file_nm = "eval_metrics.json"
22 | self.nebula_trust_results_nm = "nebula_trust_results.json"
23 | self.scenario_start_time = scenario_start_time
24 |
25 | def evaluate(self, experiment_name, weights, use_weights=False):
26 | """
27 | Evaluates the trustworthiness score.
28 |
29 | Args:
30 | scenario (object): The scenario in whith the trustworthiness will be calculated.
31 | weights (dict): The desired weghts of the pillars.
32 | use_weights (bool): True to turn on the weights in the metric config file, default to False.
33 | """
34 | # Get scenario name
35 | scenario_name = experiment_name
36 | factsheet_file = os.path.join(os.environ.get('NEBULA_LOGS_DIR'), scenario_name, "trustworthiness", self.factsheet_file_nm)
37 | metrics_cfg_file = os.path.join(dirname, "configs", self.eval_metrics_file_nm)
38 | results_file = os.path.join(os.environ.get('NEBULA_LOGS_DIR'), scenario_name, "trustworthiness", self.nebula_trust_results_nm)
39 |
40 | if not os.path.exists(factsheet_file):
41 | logger.error(f"{factsheet_file} is missing! Please check documentation.")
42 | return
43 |
44 | if not os.path.exists(metrics_cfg_file):
45 | logger.error(f"{metrics_cfg_file} is missing! Please check documentation.")
46 | return
47 |
48 | with open(factsheet_file, "r") as f, open(metrics_cfg_file, "r") as m:
49 | factsheet = json.load(f)
50 | metrics_cfg = json.load(m)
51 | metrics = metrics_cfg.items()
52 | input_docs = {"factsheet": factsheet}
53 |
54 | result_json = {"trust_score": 0, "pillars": []}
55 | final_score = 0
56 | result_print = []
57 | for key, value in metrics:
58 | pillar = TrustPillar(key, value, input_docs, use_weights)
59 | score, result = pillar.evaluate()
60 | weight = weights.get(key) / 100
61 | final_score += weight * score
62 | result_print.append([key, score])
63 | result_json["pillars"].append(result)
64 | final_score = round(final_score, 2)
65 | result_json["trust_score"] = final_score
66 | write_results_json(results_file, result_json)
67 |
68 | graphics = Graphics(self.scenario_start_time, scenario_name)
69 | graphics.graphics()
70 |
--------------------------------------------------------------------------------
/nebula/addons/waf/Dockerfile-grafana:
--------------------------------------------------------------------------------
1 | FROM grafana/grafana:latest
2 |
3 | ARG USER
4 | ENV USER=${USER}
5 |
6 | COPY ./grafana/dashboard_config.yml /etc/grafana/provisioning/dashboards/local.yml
7 | COPY ./grafana/automatic.yml /etc/grafana/provisioning/datasources/automatic.yml
8 | COPY ./grafana/dashboard.json /var/lib/grafana/dashboards/dashboard.json
9 |
10 | RUN sed -i "s|http://nebula|http://$USER|g" /etc/grafana/provisioning/datasources/automatic.yml
11 |
--------------------------------------------------------------------------------
/nebula/addons/waf/Dockerfile-loki:
--------------------------------------------------------------------------------
1 | FROM grafana/loki:latest
2 |
3 | COPY loki-config.yml /mnt/config/loki-config.yml
4 |
--------------------------------------------------------------------------------
/nebula/addons/waf/Dockerfile-promtail:
--------------------------------------------------------------------------------
1 | FROM grafana/promtail:latest
2 |
3 | ARG USER
4 | ENV USER=${USER}
5 |
6 | COPY promtail-config.yml /etc/promtail/config.yml
7 |
8 | RUN sed -i "s|http://nebula|http://$USER|g" /etc/promtail/config.yml
9 |
--------------------------------------------------------------------------------
/nebula/addons/waf/Dockerfile-waf:
--------------------------------------------------------------------------------
1 | # First Stage: Build NGINX and modules
2 | FROM owasp/modsecurity-crs:3.3.5-nginx-202310170110
3 |
4 | ARG NGINX_VERSION=1.24.0
5 |
6 | ARG USER
7 | ENV USER=${USER}
8 |
9 | # Installed necessary packages
10 | RUN apt-get update && apt-get install -y libmaxminddb0 libmaxminddb-dev mmdb-bin git wget
11 | RUN apt install -y build-essential libpcre3 libpcre3-dev zlib1g zlib1g-dev libssl-dev
12 | RUN wget http://nginx.org/download/nginx-$NGINX_VERSION.tar.gz -P nginx-modules
13 | RUN git clone https://github.com/leev/ngx_http_geoip2_module.git nginx-modules/ngx_http_geoip2_module
14 | RUN tar zxvf nginx-modules/nginx-$NGINX_VERSION.tar.gz -C nginx-modules
15 |
16 | # nginx with geoip2 compiled
17 | RUN cd nginx-modules/nginx-1.24.0 && ./configure --add-dynamic-module=../ngx_http_geoip2_module --prefix=/etc/nginx --sbin-path=/usr/sbin/nginx \
18 | --modules-path=/usr/lib/nginx/modules --conf-path=/etc/nginx/nginx.conf --error-log-path=/var/log/nginx/error.log \
19 | --http-log-path=/var/log/nginx/access.log --pid-path=/var/run/nginx.pid --lock-path=/var/run/nginx.lock \
20 | --http-client-body-temp-path=/var/cache/nginx/client_temp --http-proxy-temp-path=/var/cache/nginx/proxy_temp \
21 | --http-fastcgi-temp-path=/var/cache/nginx/fastcgi_temp --http-uwsgi-temp-path=/var/cache/nginx/uwsgi_temp \
22 | --http-scgi-temp-path=/var/cache/nginx/scgi_temp --user=nginx --group=nginx --with-compat --with-file-aio --with-threads \
23 | --with-http_addition_module --with-http_auth_request_module --with-http_dav_module --with-http_flv_module --with-http_gunzip_module \
24 | --with-http_gzip_static_module --with-http_mp4_module --with-http_random_index_module --with-http_realip_module --with-http_secure_link_module \
25 | --with-http_slice_module --with-http_ssl_module --with-http_stub_status_module --with-http_sub_module --with-http_v2_module --with-mail \
26 | --with-mail_ssl_module --with-stream --with-stream_realip_module --with-stream_ssl_module --with-stream_ssl_preread_module \
27 | --with-cc-opt='-g -O2 -ffile-prefix-map=/data/builder/debuild/nginx-1.24.0/debian/debuild-base/nginx-1.24.0=. -fstack-protector-strong \
28 | -Wformat -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -fPIC' --with-ld-opt='-Wl,-z,relro -Wl,-z,now -Wl,--as-needed -pie'
29 |
30 | # nginx geoip2 installed
31 | RUN cd nginx-modules/nginx-$NGINX_VERSION && make
32 | RUN cd nginx-modules/nginx-$NGINX_VERSION && make install
33 |
34 | # copy modules to default modules path
35 | RUN cp nginx-modules/nginx-1.24.0/objs/ngx_http_geoip2_module.so /usr/lib/nginx/modules
36 | RUN cp nginx-modules/nginx-1.24.0/objs/ngx_stream_geoip2_module.so /usr/lib/nginx/modules
37 |
38 | # geoip2 database downloaded
39 | RUN wget https://github.com/P3TERX/GeoLite.mmdb/raw/download/GeoLite2-Country.mmdb -P /usr/share/GeoIP/
40 |
41 | # nginx configuration files
42 | COPY default.conf /etc/nginx/templates/conf.d/default.conf.template
43 |
44 | RUN sed -i "s|http://nebula|http://${USER}|g" /etc/nginx/templates/conf.d/default.conf.template
45 |
46 | COPY nginx.conf /etc/nginx/templates/nginx.conf.template
47 |
48 | # owasp crs
49 | COPY crs-setup.conf /etc/modsecurity.d/owasp-crs/crs-setup.conf
50 |
--------------------------------------------------------------------------------
/nebula/addons/waf/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/addons/waf/__init__.py
--------------------------------------------------------------------------------
/nebula/addons/waf/default.conf:
--------------------------------------------------------------------------------
1 | # Nginx configuration for both HTTP and SSL
2 | map $http_upgrade $connection_upgrade {
3 | default upgrade;
4 | '' close;
5 | }
6 |
7 | set_real_ip_from 192.0.0.0/8;
8 | real_ip_header X-Forwarded-For;
9 | real_ip_recursive on;
10 |
11 | server {
12 | listen 80 default_server;
13 |
14 | server_name localhost;
15 | set $upstream http://nebula_nebula-frontend; # Change this
16 | set $always_redirect off;
17 | modsecurity on;
18 | location /platform {
19 | client_max_body_size 0;
20 |
21 | if ($always_redirect = on) {
22 | return 301 https://$host$request_uri;
23 | }
24 |
25 | modsecurity_rules '
26 | SecRule REQUEST_URI "@beginsWith /platform/socket.io/" "id:4200000,phase:1,allow,nolog"
27 | ';
28 |
29 | modsecurity_rules '
30 | SecRule REQUEST_URI "@beginsWith /platform/ws/" "id:4200001,phase:1,allow,nolog"
31 | ';
32 |
33 | modsecurity_rules '
34 | SecRule REQUEST_URI "@beginsWith /platform/dashboard/deployment/run" "id:4200005,phase:1,nolog,pass,ctl:ruleRemoveById=200007"
35 | ';
36 |
37 | modsecurity_rules '
38 | SecRule REQUEST_URI "@beginsWith /platform/dashboard/deployment/run" "id:4200006,phase:2,nolog,pass,ctl:ruleRemoveById=200002"
39 | ';
40 |
41 | # modsecurity_rules "
42 | # SecRule REMOTE_ADDR \"@geoLookup\" \\
43 | # \"id:100, phase:1, t:none, pass, \\
44 | # setvar:tx.geoip_country_code=%{geoip.country_code3}, \\
45 | # log, auditlog, msg:'GeoIP Country Code: %{tx.geoip_country_code}'\"
46 | # ";
47 |
48 | include includes/proxy_backend.conf;
49 |
50 | index index.html index.htm;
51 | root /usr/share/nginx/html;
52 | }
53 |
54 | access_log /var/log/nginx/analytics.log json_analytics;
55 |
56 | include includes/location_common.conf;
57 | #include includes/custom_locations.conf;
58 |
59 | }
60 |
61 | server {
62 | listen 443 ssl;
63 |
64 | server_name localhost;
65 | set $upstream http://localhost:80;
66 |
67 | ssl_certificate /etc/nginx/conf/server.crt;
68 | ssl_certificate_key /etc/nginx/conf/server.key;
69 | ssl_session_timeout 1d;
70 | ssl_session_cache shared:MozSSL:10m;
71 | ssl_session_tickets off;
72 |
73 | ssl_dhparam /etc/ssl/certs/dhparam-2048.pem;
74 |
75 | ssl_protocols TLSv1.2 TLSv1.3;
76 | ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384;
77 | ssl_prefer_server_ciphers off;
78 |
79 | ssl_stapling off;
80 | ssl_stapling_verify off;
81 |
82 | ssl_verify_client off;
83 |
84 | location / {
85 | client_max_body_size 0;
86 |
87 | include includes/proxy_backend.conf;
88 |
89 | index index.html index.htm;
90 | root /usr/share/nginx/html;
91 | }
92 | include includes/location_common.conf;
93 | #include includes/custom_locations.conf;
94 | }
95 |
--------------------------------------------------------------------------------
/nebula/addons/waf/geoip/GeoIP.conf:
--------------------------------------------------------------------------------
1 | # GeoIP.conf file for `geoipupdate` program, for versions >= 3.1.1.
2 | # Used to update GeoIP databases from https://www.maxmind.com.
3 | # For more information about this config file, visit the docs at
4 | # https://dev.maxmind.com/geoip/updating-databases.
5 |
6 | # `AccountID` is from your MaxMind account.
7 | AccountID your_AccountID
8 |
9 | # `LicenseKey` is from your MaxMind account
10 | LicenseKey your_LicenseKey
11 |
12 | # `EditionIDs` is from your MaxMind account.
13 | EditionIDs GeoLite2-ASN GeoLite2-City GeoLite2-Country
14 |
15 | # The remaining settings are OPTIONAL.
16 |
17 | # The directory to store the database files. Defaults to /usr/share/GeoIP
18 | DatabaseDirectory /usr/share/GeoIP/
19 |
20 | # The server to use. Defaults to "updates.maxmind.com".
21 | # Host updates.maxmind.com
22 |
23 | # The proxy host name or IP address. You may optionally specify a
24 | # port number, e.g., 127.0.0.1:8888. If no port number is specified, 1080
25 | # will be used.
26 | # Proxy 127.0.0.1:8888
27 |
28 | # The user name and password to use with your proxy server.
29 | # ProxyUserPassword username:password
30 |
31 | # Whether to preserve modification times of files downloaded from the server.
32 | # Defaults to "0".
33 | # PreserveFileTimes 0
34 |
35 | # The lock file to use. This ensures only one geoipupdate process can run at a
36 | # time.
37 | # Note: Once created, this lockfile is not removed from the filesystem.
38 | # Defaults to ".geoipupdate.lock" under the DatabaseDirectory.
39 | # LockFile /usr/share/GeoIP/.geoipupdate.lock
40 |
41 | # The amount of time to retry for when errors during HTTP transactions are
42 | # encountered. It can be specified as a (possibly fractional) decimal number
43 | # followed by a unit suffix. Valid time units are "ns", "us" (or "µs"), "ms",
44 | # "s", "m", "h".
45 | # Defaults to "5m" (5 minutes).
46 | # RetryFor 5m
47 |
48 | # The number of parallel database downloads.
49 | # Defaults to "1".
50 | # Parallelism 1
51 |
--------------------------------------------------------------------------------
/nebula/addons/waf/grafana/automatic.yml:
--------------------------------------------------------------------------------
1 | datasources:
2 | - name: Loki
3 | type: loki
4 | url: http://nebula_nebula-waf-loki:3100
5 | isDefault: true
6 | editable: true
7 |
--------------------------------------------------------------------------------
/nebula/addons/waf/grafana/dashboard_config.yml:
--------------------------------------------------------------------------------
1 | apiVersion: 1
2 |
3 | providers:
4 | # an unique provider name. Required
5 | - name: 'a unique provider name'
6 | # Org id. Default to 1
7 | orgId: 1
8 | # name of the dashboard folder.
9 | folder: ''
10 | # folder UID. will be automatically generated if not specified
11 | folderUid: ''
12 | # provider type. Default to 'file'
13 | type: file
14 | # disable dashboard deletion
15 | disableDeletion: false
16 | # how often Grafana will scan for changed dashboards
17 | updateIntervalSeconds: 10
18 | # allow updating provisioned dashboards from the UI
19 | allowUiUpdates: true
20 | options:
21 | # path to dashboard files on disk. Required when using the 'file' type
22 | path: /var/lib/grafana/dashboards
23 | # use folder names from filesystem to create folders in Grafana
24 | foldersFromFilesStructure: true
25 |
--------------------------------------------------------------------------------
/nebula/addons/waf/loki-config.yml:
--------------------------------------------------------------------------------
1 | auth_enabled: false
2 |
3 | common:
4 | path_prefix: /loki
5 |
6 | server:
7 | http_listen_port: 3100
8 |
9 | ingester:
10 | lifecycler:
11 | address: 127.0.0.1
12 | ring:
13 | kvstore:
14 | store: inmemory
15 | replication_factor: 1
16 | final_sleep: 0s
17 | chunk_idle_period: 1h # Any chunk not receiving new logs in this time will be flushed
18 | max_chunk_age: 1h # All chunks will be flushed when they hit this age, default is 1h
19 | chunk_target_size: 1048576 # Loki will attempt to build chunks up to 1.5MB, flushing first if chunk_idle_period or max_chunk_age is reached first
20 | chunk_retain_period: 30s # Must be greater than index read cache TTL if using an index cache (Default index read cache TTL is 5m)
21 |
22 | query_scheduler:
23 | max_outstanding_requests_per_tenant: 4096
24 |
25 | schema_config:
26 | configs:
27 | - from: 2020-10-24
28 | store: tsdb
29 | object_store: filesystem
30 | schema: v13
31 | index:
32 | prefix: index_
33 | period: 24h
34 |
35 | storage_config:
36 | filesystem:
37 | directory: /loki/chunks
38 | tsdb_shipper:
39 | active_index_directory: /loki/tsdb-index
40 | cache_location: /loki/tsdb-cache
41 |
42 | compactor:
43 | working_directory: /loki/boltdb-shipper-compactor
44 |
45 | limits_config:
46 | reject_old_samples: true
47 | max_query_length: 0h
48 | max_query_series: 100000
49 | reject_old_samples_max_age: 168h
50 | allow_structured_metadata: false
51 |
52 | table_manager:
53 | retention_deletes_enabled: false
54 | retention_period: 0s
55 |
56 | # limits_config:
57 | # ingestion_rate_strategy: local # Default: global
58 | # max_global_streams_per_user: 5000
59 | # max_query_length: 0h # Default: 721h
60 | # max_query_parallelism: 32 # Old Default: 14
61 | # max_streams_per_user: 0 # Old Default: 10000
62 |
63 | ruler:
64 | storage:
65 | type: local
66 | local:
67 | directory: /loki/rules
68 | rule_path: /loki/rules-temp
69 | alertmanager_url: http://localhost:9093
70 | ring:
71 | kvstore:
72 | store: inmemory
73 | enable_api: true
74 |
--------------------------------------------------------------------------------
/nebula/addons/waf/nginx.conf:
--------------------------------------------------------------------------------
1 | load_module modules/ngx_http_modsecurity_module.so;
2 | load_module modules/ngx_http_geoip2_module.so;
3 | load_module modules/ngx_stream_geoip2_module.so;
4 |
5 | worker_processes auto;
6 | pid /var/run/nginx.pid;
7 |
8 | events {
9 | worker_connections 1024;
10 | }
11 |
12 | http {
13 |
14 | geoip2 /usr/share/GeoIP/GeoLite2-Country.mmdb {
15 | $geoip2_country_iso_code country iso_code;
16 | }
17 |
18 | map $http_referer $httpReferer {
19 | default "$http_referer";
20 | "" "(direct)";
21 | }
22 |
23 | map $http_user_agent $httpAgent {
24 | default "$http_user_agent";
25 | "" "Unknown";
26 | }
27 |
28 | map $geoip2_country_iso_code $geoIP {
29 | default "$geoip2_country_iso_code";
30 | "" "Unknown";
31 | }
32 |
33 | log_format json_analytics escape=json '{'
34 | '"time_local": "$time_local", '
35 | '"remote_addr": "$remote_addr", '
36 | '"request_uri": "$request_uri", '
37 | '"status": "$status", '
38 | '"http_referer": "$httpReferer", '
39 | '"http_user_agent": "$httpAgent", '
40 | '"server_name": "$server_name", '
41 | '"request_time": "$request_time", '
42 | '"geoip_country_code": "$geoIP"'
43 | '}';
44 |
45 | include /etc/nginx/mime.types;
46 | default_type application/octet-stream;
47 | keepalive_timeout 60s;
48 | sendfile on;
49 |
50 | resolver 127.0.0.11 valid=5s;
51 | include /etc/nginx/conf.d/*.conf;
52 | }
53 |
--------------------------------------------------------------------------------
/nebula/addons/waf/promtail-config.yml:
--------------------------------------------------------------------------------
1 | server:
2 | http_listen_port: 9080
3 | grpc_listen_port: 0
4 |
5 | positions:
6 | filename: /tmp/positions.yaml
7 |
8 | clients:
9 | - url: http://nebula_nebula-waf-loki:3100/loki/api/v1/push
10 |
11 | scrape_configs:
12 | - job_name: nginx
13 | static_configs:
14 | - targets:
15 | - localhost
16 | labels:
17 | job: nginx
18 | host: localhost
19 | agent: promtail
20 | __path__: /var/log/nginx/*
21 | pipeline_stages:
22 | - json:
23 | expressions:
24 | http_user_agent:
25 | request_uri:
26 | - drop:
27 | source: http_user_agent
28 | expression: "(bot|Bot|RSS|Producer|Expanse|spider|crawler|Crawler|Inspect|test)"
29 | - drop:
30 | source: request_uri
31 | expression: "/(assets|img)/"
32 | - drop:
33 | source: request_uri
34 | expression: "/(robots.txt|favicon.ico|index.php)"
35 | - drop:
36 | source: request_uri
37 | expression: "(.php|.xml|.png)$"
38 |
--------------------------------------------------------------------------------
/nebula/config/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/config/__init__.py
--------------------------------------------------------------------------------
/nebula/controller/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:22.04
2 |
3 | ENV DEBIAN_FRONTEND=noninteractive
4 |
5 | RUN apt-get update && apt-get install -y tzdata \
6 | && ln -fs /usr/share/zoneinfo/Europe/Madrid /etc/localtime \
7 | && dpkg-reconfigure -f noninteractive tzdata
8 |
9 | ENV TZ=Europe/Madrid
10 |
11 | # Install python3.11.7
12 | RUN apt-get update && apt-get install -y software-properties-common
13 | RUN add-apt-repository ppa:deadsnakes/ppa
14 | RUN apt-get update && apt-get install -y python3.11 python3.11-dev python3.11-distutils python3.11-venv
15 |
16 | # Install curl and network tools
17 | RUN apt-get install -y curl net-tools iproute2 iputils-ping
18 |
19 | # Update alternatives to make Python 3.11 the default
20 | RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 2
21 | RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 1
22 |
23 | # Install gcc and git
24 | RUN apt-get update && apt-get install -y build-essential gcc g++ clang git make cmake g++-aarch64-linux-gnu dos2unix
25 |
26 | # Install docker
27 | RUN apt-get install -y ca-certificates curl gnupg
28 | RUN install -m 0755 -d /etc/apt/keyrings
29 | RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg
30 | RUN chmod a+r /etc/apt/keyrings/docker.gpg
31 | RUN echo \
32 | "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \
33 | "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \
34 | tee /etc/apt/sources.list.d/docker.list > /dev/null
35 | RUN apt-get update
36 |
37 | RUN apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
38 |
39 | ADD https://astral.sh/uv/install.sh /uv-installer.sh
40 |
41 | RUN sh /uv-installer.sh && rm /uv-installer.sh
42 |
43 | ENV PATH="/root/.local/bin/:$PATH"
44 |
45 | COPY pyproject.toml .
46 |
47 | RUN uv python install 3.11.7
48 |
49 | RUN uv python pin 3.11.7
50 |
51 | RUN uv sync --group controller --group core
52 |
53 | ENV PATH="/.venv/bin:$PATH"
54 |
55 | COPY /nebula/controller/start_services.sh .
56 |
57 | RUN dos2unix start_services.sh
58 |
59 | RUN chmod +x start_services.sh
60 |
61 | ENTRYPOINT ["/bin/bash", "/start_services.sh"]
62 |
--------------------------------------------------------------------------------
/nebula/controller/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/controller/__init__.py
--------------------------------------------------------------------------------
/nebula/controller/start_services.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Print commands and their arguments as they are executed (debugging)
4 | set -x
5 |
6 | # Print in console debug messages
7 | echo "Starting services..."
8 |
9 | cd nebula
10 | echo "path $(pwd)"
11 | # Start Gunicorn
12 | NEBULA_SOCK=nebula.sock
13 |
14 | echo "NEBULA_PRODUCTION: $NEBULA_PRODUCTION"
15 | if [ "$NEBULA_PRODUCTION" = "False" ]; then
16 | echo "Starting Gunicorn in dev mode..."
17 | uvicorn nebula.controller.controller:app --host 0.0.0.0 --port $NEBULA_CONTROLLER_PORT --log-level debug --proxy-headers --forwarded-allow-ips "*" &
18 | else
19 | echo "Starting Gunicorn in production mode..."
20 | uvicorn nebula.controller.controller:app --host 0.0.0.0 --port $NEBULA_CONTROLLER_PORT --log-level info --proxy-headers --forwarded-allow-ips "*" &
21 | fi
22 |
23 | tail -f /dev/null
24 |
--------------------------------------------------------------------------------
/nebula/core/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/core/__init__.py
--------------------------------------------------------------------------------
/nebula/core/aggregation/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/core/aggregation/__init__.py
--------------------------------------------------------------------------------
/nebula/core/aggregation/fedavg.py:
--------------------------------------------------------------------------------
1 | import gc
2 |
3 | import torch
4 |
5 | from nebula.core.aggregation.aggregator import Aggregator
6 |
7 |
8 | class FedAvg(Aggregator):
9 | """
10 | Aggregator: Federated Averaging (FedAvg)
11 | Authors: McMahan et al.
12 | Year: 2016
13 | """
14 |
15 | def __init__(self, config=None, **kwargs):
16 | super().__init__(config, **kwargs)
17 |
18 | def run_aggregation(self, models):
19 | super().run_aggregation(models)
20 |
21 | models = list(models.values())
22 |
23 | total_samples = float(sum(weight for _, weight in models))
24 |
25 | if total_samples == 0:
26 | raise ValueError("Total number of samples must be greater than zero.")
27 |
28 | last_model_params = models[-1][0]
29 | accum = {layer: torch.zeros_like(param, dtype=torch.float32) for layer, param in last_model_params.items()}
30 |
31 | with torch.no_grad():
32 | for model_parameters, weight in models:
33 | normalized_weight = weight / total_samples
34 | for layer in accum:
35 | accum[layer].add_(
36 | model_parameters[layer].to(accum[layer].dtype),
37 | alpha=normalized_weight,
38 | )
39 |
40 | del models
41 | gc.collect()
42 |
43 | # self.print_model_size(accum)
44 | return accum
45 |
--------------------------------------------------------------------------------
/nebula/core/aggregation/krum.py:
--------------------------------------------------------------------------------
1 | import numpy
2 | import torch
3 |
4 | from nebula.core.aggregation.aggregator import Aggregator
5 |
6 |
7 | class Krum(Aggregator):
8 | """
9 | Aggregator: Krum
10 | Authors: Peva Blanchard et al.
11 | Year: 2017
12 | Note: https://papers.nips.cc/paper/2017/hash/f4b9ec30ad9f68f89b29639786cb62ef-Abstract.html
13 | """
14 |
15 | def __init__(self, config=None, **kwargs):
16 | super().__init__(config, **kwargs)
17 |
18 | def run_aggregation(self, models):
19 | super().run_aggregation(models)
20 |
21 | models = list(models.values())
22 |
23 | accum = {layer: torch.zeros_like(param).float() for layer, param in models[-1][0].items()}
24 | total_models = len(models)
25 | distance_list = [0 for i in range(0, total_models)]
26 | min_index = 0
27 | min_distance_sum = float("inf")
28 |
29 | for i in range(0, total_models):
30 | m1, _ = models[i]
31 | for j in range(0, total_models):
32 | m2, _ = models[j]
33 | distance = 0
34 | if i == j:
35 | distance = 0
36 | else:
37 | for layer in m1:
38 | l1 = m1[layer]
39 |
40 | l2 = m2[layer]
41 | distance += numpy.linalg.norm(l1 - l2)
42 | distance_list[i] += distance
43 |
44 | if min_distance_sum > distance_list[i]:
45 | min_distance_sum = distance_list[i]
46 | min_index = i
47 | m, _ = models[min_index]
48 | for layer in m:
49 | accum[layer] = accum[layer] + m[layer]
50 |
51 | return accum
52 |
--------------------------------------------------------------------------------
/nebula/core/aggregation/median.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 |
4 | from nebula.core.aggregation.aggregator import Aggregator
5 |
6 |
7 | class Median(Aggregator):
8 | """
9 | Aggregator: Median
10 | Authors: Dong Yin et al et al.
11 | Year: 2021
12 | Note: https://arxiv.org/pdf/1803.01498.pdf
13 | """
14 |
15 | def __init__(self, config=None, **kwargs):
16 | super().__init__(config, **kwargs)
17 |
18 | def get_median(self, weights):
19 | # check if the weight tensor has enough space
20 | weight_len = len(weights)
21 |
22 | median = 0
23 | if weight_len % 2 == 1:
24 | # odd number, return the median
25 | median, _ = torch.median(weights, 0)
26 | else:
27 | # even number, return the mean of median two numbers
28 | # sort the tensor
29 | arr_weights = np.asarray(weights)
30 | nobs = arr_weights.shape[0]
31 | start = int(nobs / 2) - 1
32 | end = int(nobs / 2) + 1
33 | atmp = np.partition(arr_weights, (start, end - 1), 0)
34 | sl = [slice(None)] * atmp.ndim
35 | sl[0] = slice(start, end)
36 | arr_median = np.mean(atmp[tuple(sl)], axis=0)
37 | median = torch.tensor(arr_median)
38 | return median
39 |
40 | def run_aggregation(self, models):
41 | super().run_aggregation(models)
42 |
43 | models = list(models.values())
44 | models_params = [m for m, _ in models]
45 |
46 | total_models = len(models)
47 |
48 | accum = {layer: torch.zeros_like(param).float() for layer, param in models[-1][0].items()}
49 |
50 | # Calculate the trimmedmean for each parameter
51 | for layer in accum:
52 | weight_layer = accum[layer]
53 | # get the shape of layer tensor
54 | l_shape = list(weight_layer.shape)
55 |
56 | # get the number of elements of layer tensor
57 | number_layer_weights = torch.numel(weight_layer)
58 | # if its 0-d tensor
59 | if l_shape == []:
60 | weights = torch.tensor([models_params[j][layer] for j in range(0, total_models)])
61 | weights = weights.double()
62 | w = self.get_median(weights)
63 | accum[layer] = w
64 |
65 | else:
66 | # flatten the tensor
67 | weight_layer_flatten = weight_layer.view(number_layer_weights)
68 |
69 | # flatten the tensor of each model
70 | models_layer_weight_flatten = torch.stack(
71 | [models_params[j][layer].view(number_layer_weights) for j in range(0, total_models)],
72 | 0,
73 | )
74 |
75 | # get the weight list [w1j,w2j,··· ,wmj], where wij is the jth parameter of the ith local model
76 | median = self.get_median(models_layer_weight_flatten)
77 | accum[layer] = median.view(l_shape)
78 | return accum
79 |
--------------------------------------------------------------------------------
/nebula/core/aggregation/trimmedmean.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 |
4 | from nebula.core.aggregation.aggregator import Aggregator
5 |
6 |
7 | class TrimmedMean(Aggregator):
8 | """
9 | Aggregator: TrimmedMean
10 | Authors: Dong Yin et al et al.
11 | Year: 2021
12 | Note: https://arxiv.org/pdf/1803.01498.pdf
13 | """
14 |
15 | def __init__(self, config=None, beta=0, **kwargs):
16 | super().__init__(config, **kwargs)
17 | self.beta = beta
18 |
19 | def get_trimmedmean(self, weights):
20 | # check if the weight tensor has enough space
21 | weight_len = len(weights)
22 |
23 | if weight_len <= 2 * self.beta:
24 | remaining_wrights = weights
25 | res = torch.mean(remaining_wrights, 0)
26 |
27 | else:
28 | # remove the largest and smallest β items
29 | arr_weights = np.asarray(weights)
30 | nobs = arr_weights.shape[0]
31 | start = self.beta
32 | end = nobs - self.beta
33 | atmp = np.partition(arr_weights, (start, end - 1), 0)
34 | sl = [slice(None)] * atmp.ndim
35 | sl[0] = slice(start, end)
36 | print(atmp[tuple(sl)])
37 | arr_median = np.mean(atmp[tuple(sl)], axis=0)
38 | res = torch.tensor(arr_median)
39 |
40 | # get the mean of the remaining weights
41 |
42 | return res
43 |
44 | def run_aggregation(self, models):
45 | super().run_aggregation(models)
46 |
47 | models = list(models.values())
48 | models_params = [m for m, _ in models]
49 |
50 | total_models = len(models)
51 |
52 | accum = {layer: torch.zeros_like(param).float() for layer, param in models[-1][0].items()}
53 |
54 | for layer in accum:
55 | weight_layer = accum[layer]
56 | # get the shape of layer tensor
57 | l_shape = list(weight_layer.shape)
58 |
59 | # get the number of elements of layer tensor
60 | number_layer_weights = torch.numel(weight_layer)
61 | # if its 0-d tensor
62 | if l_shape == []:
63 | weights = torch.tensor([models_params[j][layer] for j in range(0, total_models)])
64 | weights = weights.double()
65 | w = self.get_trimmedmean(weights)
66 | accum[layer] = w
67 |
68 | else:
69 | # flatten the tensor
70 | weight_layer_flatten = weight_layer.view(number_layer_weights)
71 |
72 | # flatten the tensor of each model
73 | models_layer_weight_flatten = torch.stack(
74 | [models_params[j][layer].view(number_layer_weights) for j in range(0, total_models)],
75 | 0,
76 | )
77 |
78 | # get the weight list [w1j,w2j,··· ,wmj], where wij is the jth parameter of the ith local model
79 | trimmedmean = self.get_trimmedmean(models_layer_weight_flatten)
80 | accum[layer] = trimmedmean.view(l_shape)
81 |
82 | return accum
83 |
--------------------------------------------------------------------------------
/nebula/core/aggregation/updatehandlers/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/core/aggregation/updatehandlers/__init__.py
--------------------------------------------------------------------------------
/nebula/core/aggregation/updatehandlers/sdflupdatehandler.py:
--------------------------------------------------------------------------------
1 | from nebula.core.aggregation.updatehandlers.updatehandler import UpdateHandler
2 | from nebula.core.nebulaevents import UpdateNeighborEvent, UpdateReceivedEvent
3 |
4 |
5 | class SFDLUpdateHandler(UpdateHandler):
6 | def __init__(
7 | self,
8 | aggregator,
9 | addr,
10 | ):
11 | pass
12 |
13 | async def init():
14 | raise NotImplementedError
15 |
16 | async def round_expected_updates(self, federation_nodes: set):
17 | raise NotImplementedError
18 |
19 | async def storage_update(self, updt_received_event: UpdateReceivedEvent):
20 | raise NotImplementedError
21 |
22 | async def get_round_updates(self) -> dict[str, tuple[object, float]]:
23 | raise NotImplementedError
24 |
25 | async def notify_federation_update(self, updt_nei_event: UpdateNeighborEvent):
26 | raise NotImplementedError
27 |
28 | async def get_round_missing_nodes(self) -> set[str]:
29 | raise NotImplementedError
30 |
31 | async def notify_if_all_updates_received(self):
32 | raise NotImplementedError
33 |
34 | async def stop_notifying_updates(self):
35 | raise NotImplementedError
36 |
--------------------------------------------------------------------------------
/nebula/core/datasets/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/core/datasets/__init__.py
--------------------------------------------------------------------------------
/nebula/core/datasets/changeablesubset.py:
--------------------------------------------------------------------------------
1 | from torch.utils.data import Subset
2 |
3 |
4 | class ChangeableSubset(Subset):
5 | def __init__(
6 | self,
7 | dataset,
8 | indices,
9 | ):
10 | super().__init__(dataset, indices)
11 | self.dataset = dataset
12 | self.indices = indices
13 |
14 | def __getitem__(self, idx):
15 | if isinstance(idx, list):
16 | return self.dataset[[self.indices[i] for i in idx]]
17 | return self.dataset[self.indices[idx]]
18 |
19 | def __len__(self):
20 | return len(self.indices)
21 |
--------------------------------------------------------------------------------
/nebula/core/datasets/cifar10/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/core/datasets/cifar10/__init__.py
--------------------------------------------------------------------------------
/nebula/core/datasets/cifar100/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/core/datasets/cifar100/__init__.py
--------------------------------------------------------------------------------
/nebula/core/datasets/emnist/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/core/datasets/emnist/__init__.py
--------------------------------------------------------------------------------
/nebula/core/datasets/fashionmnist/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/core/datasets/fashionmnist/__init__.py
--------------------------------------------------------------------------------
/nebula/core/datasets/mnist/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/core/datasets/mnist/__init__.py
--------------------------------------------------------------------------------
/nebula/core/models/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/core/models/__init__.py
--------------------------------------------------------------------------------
/nebula/core/models/cifar10/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/core/models/cifar10/__init__.py
--------------------------------------------------------------------------------
/nebula/core/models/cifar10/cnn.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from nebula.core.models.nebulamodel import NebulaModel
4 |
5 |
6 | class CIFAR10ModelCNN(NebulaModel):
7 | def __init__(
8 | self,
9 | input_channels=3,
10 | num_classes=10,
11 | learning_rate=1e-3,
12 | metrics=None,
13 | confusion_matrix=None,
14 | seed=None,
15 | ):
16 | super().__init__(input_channels, num_classes, learning_rate, metrics, confusion_matrix, seed)
17 |
18 | self.config = {"beta1": 0.851436, "beta2": 0.999689, "amsgrad": True}
19 |
20 | self.example_input_array = torch.rand(1, 3, 32, 32)
21 | self.learning_rate = learning_rate
22 | self.criterion = torch.nn.CrossEntropyLoss()
23 | self.conv1 = torch.nn.Conv2d(input_channels, 16, 3, padding=1)
24 | self.conv2 = torch.nn.Conv2d(16, 32, 3, padding=1)
25 | self.conv3 = torch.nn.Conv2d(32, 64, 3, padding=1)
26 | self.pool = torch.nn.MaxPool2d(2, 2)
27 | self.fc1 = torch.nn.Linear(64 * 4 * 4, 512)
28 | self.fc2 = torch.nn.Linear(512, num_classes)
29 |
30 | def forward(self, x):
31 | x = self.pool(torch.relu(self.conv1(x)))
32 | x = self.pool(torch.relu(self.conv2(x)))
33 | x = self.pool(torch.relu(self.conv3(x)))
34 | x = x.view(-1, 64 * 4 * 4)
35 | x = torch.relu(self.fc1(x))
36 | x = self.fc2(x)
37 | return x
38 |
39 | def configure_optimizers(self):
40 | optimizer = torch.optim.Adam(
41 | self.parameters(),
42 | lr=self.learning_rate,
43 | betas=(self.config["beta1"], self.config["beta2"]),
44 | amsgrad=self.config["amsgrad"],
45 | )
46 | self._optimizer = optimizer
47 | return optimizer
48 |
--------------------------------------------------------------------------------
/nebula/core/models/cifar10/cnnV2.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from nebula.core.models.nebulamodel import NebulaModel
4 |
5 |
6 | class CIFAR10ModelCNN_V2(NebulaModel):
7 | def __init__(
8 | self,
9 | input_channels=3,
10 | num_classes=10,
11 | learning_rate=1e-3,
12 | metrics=None,
13 | confusion_matrix=None,
14 | seed=None,
15 | ):
16 | super().__init__(input_channels, num_classes, learning_rate, metrics, confusion_matrix, seed)
17 |
18 | self.config = {"beta1": 0.851436, "beta2": 0.999689, "amsgrad": True}
19 |
20 | self.example_input_array = torch.rand(1, 3, 32, 32)
21 | self.learning_rate = learning_rate
22 | self.criterion = torch.nn.CrossEntropyLoss()
23 | self.conv1 = torch.nn.Conv2d(input_channels, 32, 5, padding=2)
24 | self.bn1 = torch.nn.BatchNorm2d(32)
25 | self.conv2 = torch.nn.Conv2d(32, 64, 3, padding=1)
26 | self.bn2 = torch.nn.BatchNorm2d(64)
27 | self.conv3 = torch.nn.Conv2d(64, 128, 3, padding=1)
28 | self.bn3 = torch.nn.BatchNorm2d(128)
29 | self.pool = torch.nn.MaxPool2d(2, 2)
30 | self.fc1 = torch.nn.Linear(128 * 4 * 4, 512)
31 | self.fc2 = torch.nn.Linear(512, num_classes)
32 | self.dropout = torch.nn.Dropout(0.5)
33 |
34 | def forward(self, x):
35 | x = self.pool(torch.relu(self.bn1(self.conv1(x))))
36 | x = self.pool(torch.relu(self.bn2(self.conv2(x))))
37 | x = self.pool(torch.relu(self.bn3(self.conv3(x))))
38 | x = x.view(-1, 128 * 4 * 4)
39 | x = torch.relu(self.fc1(x))
40 | x = self.dropout(x)
41 | x = self.fc2(x)
42 | return x
43 |
44 | def configure_optimizers(self):
45 | optimizer = torch.optim.Adam(
46 | self.parameters(),
47 | lr=self.learning_rate,
48 | betas=(self.config["beta1"], self.config["beta2"]),
49 | amsgrad=self.config["amsgrad"],
50 | )
51 | return optimizer
52 |
--------------------------------------------------------------------------------
/nebula/core/models/cifar10/cnnV3.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from nebula.core.models.nebulamodel import NebulaModel
4 |
5 |
6 | class CIFAR10ModelCNN_V3(NebulaModel):
7 | def __init__(
8 | self,
9 | input_channels=3,
10 | num_classes=10,
11 | learning_rate=1e-3,
12 | metrics=None,
13 | confusion_matrix=None,
14 | seed=None,
15 | ):
16 | super().__init__(input_channels, num_classes, learning_rate, metrics, confusion_matrix, seed)
17 |
18 | self.config = {"beta1": 0.851436, "beta2": 0.999689, "amsgrad": True}
19 |
20 | self.example_input_array = torch.rand(1, 3, 32, 32)
21 | self.learning_rate = learning_rate
22 | self.criterion = torch.torch.nn.CrossEntropyLoss()
23 | self.layer1 = torch.nn.Sequential(
24 | torch.nn.Conv2d(input_channels, 32, kernel_size=3, padding=1),
25 | torch.nn.BatchNorm2d(32),
26 | torch.nn.ReLU(),
27 | torch.nn.Conv2d(32, 32, kernel_size=3, padding=1),
28 | torch.nn.BatchNorm2d(32),
29 | torch.nn.ReLU(),
30 | torch.nn.MaxPool2d(kernel_size=2, stride=2),
31 | torch.nn.Dropout(0.25),
32 | )
33 |
34 | self.layer2 = torch.nn.Sequential(
35 | torch.nn.Conv2d(32, 64, kernel_size=3, padding=1),
36 | torch.nn.BatchNorm2d(64),
37 | torch.nn.ReLU(),
38 | torch.nn.Conv2d(64, 64, kernel_size=3, padding=1),
39 | torch.nn.BatchNorm2d(64),
40 | torch.nn.ReLU(),
41 | torch.nn.MaxPool2d(kernel_size=2, stride=2),
42 | torch.nn.Dropout(0.25),
43 | )
44 |
45 | self.layer3 = torch.nn.Sequential(
46 | torch.nn.Conv2d(64, 128, kernel_size=3, padding=1),
47 | torch.nn.BatchNorm2d(128),
48 | torch.nn.ReLU(),
49 | torch.nn.Conv2d(128, 128, kernel_size=3, padding=1),
50 | torch.nn.BatchNorm2d(128),
51 | torch.nn.ReLU(),
52 | torch.nn.MaxPool2d(kernel_size=2, stride=2),
53 | torch.nn.Dropout(0.25),
54 | )
55 |
56 | self.fc_layer = torch.nn.Sequential(
57 | torch.nn.Linear(128 * 4 * 4, 512),
58 | torch.nn.ReLU(),
59 | torch.nn.Dropout(0.5),
60 | torch.nn.Linear(512, num_classes),
61 | )
62 |
63 | def forward(self, x):
64 | x = self.layer1(x)
65 | x = self.layer2(x)
66 | x = self.layer3(x)
67 | x = x.view(x.size(0), -1) # Flatten the layer
68 | x = self.fc_layer(x)
69 | return x
70 |
71 | def configure_optimizers(self):
72 | optimizer = torch.optim.Adam(
73 | self.parameters(),
74 | lr=self.learning_rate,
75 | betas=(self.config["beta1"], self.config["beta2"]),
76 | amsgrad=self.config["amsgrad"],
77 | )
78 | return optimizer
79 |
--------------------------------------------------------------------------------
/nebula/core/models/cifar10/fastermobilenet.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 |
4 | from nebula.core.models.nebulamodel import NebulaModel
5 |
6 |
7 | class FasterMobileNet(NebulaModel):
8 | def __init__(
9 | self,
10 | input_channels=3,
11 | num_classes=10,
12 | learning_rate=1e-3,
13 | metrics=None,
14 | confusion_matrix=None,
15 | seed=None,
16 | ):
17 | super().__init__(input_channels, num_classes, learning_rate, metrics, confusion_matrix, seed)
18 |
19 | self.config = {"beta1": 0.851436, "beta2": 0.999689, "amsgrad": True}
20 |
21 | self.example_input_array = torch.rand(1, 3, 32, 32)
22 | self.learning_rate = learning_rate
23 | self.criterion = torch.torch.nn.CrossEntropyLoss()
24 |
25 | def conv_dw(input_channels, num_classes, stride):
26 | return nn.Sequential(
27 | nn.Conv2d(
28 | input_channels,
29 | input_channels,
30 | 3,
31 | stride,
32 | 1,
33 | groups=input_channels,
34 | bias=False,
35 | ),
36 | nn.BatchNorm2d(input_channels),
37 | nn.ReLU(inplace=True),
38 | nn.Conv2d(input_channels, num_classes, 1, 1, 0, bias=False),
39 | nn.BatchNorm2d(num_classes),
40 | nn.ReLU(inplace=True),
41 | )
42 |
43 | self.model = nn.Sequential(
44 | nn.Conv2d(3, 16, 3, 1, 1, bias=False),
45 | nn.BatchNorm2d(16),
46 | nn.ReLU(inplace=True),
47 | conv_dw(16, 32, 1),
48 | conv_dw(32, 64, 2),
49 | conv_dw(64, 64, 1),
50 | nn.AdaptiveAvgPool2d(1),
51 | )
52 | self.fc = nn.Linear(64, num_classes)
53 |
54 | def forward(self, x):
55 | x = self.model(x)
56 | x = x.view(-1, 64)
57 | x = self.fc(x)
58 | return x
59 |
60 | def configure_optimizers(self):
61 | optimizer = torch.optim.Adam(
62 | self.parameters(),
63 | lr=self.learning_rate,
64 | betas=(self.config["beta1"], self.config["beta2"]),
65 | amsgrad=self.config["amsgrad"],
66 | )
67 | return optimizer
68 |
--------------------------------------------------------------------------------
/nebula/core/models/cifar10/simplemobilenet.py:
--------------------------------------------------------------------------------
1 | import matplotlib
2 | import matplotlib.pyplot as plt
3 |
4 | matplotlib.use("Agg")
5 | plt.switch_backend("Agg")
6 | import torch
7 | from torch import nn
8 |
9 | from nebula.core.models.nebulamodel import NebulaModel
10 |
11 |
12 | class SimpleMobileNetV1(NebulaModel):
13 | def __init__(
14 | self,
15 | input_channels=3,
16 | num_classes=10,
17 | learning_rate=1e-3,
18 | metrics=None,
19 | confusion_matrix=None,
20 | seed=None,
21 | ):
22 | super().__init__(input_channels, num_classes, learning_rate, metrics, confusion_matrix, seed)
23 |
24 | self.config = {"beta1": 0.851436, "beta2": 0.999689, "amsgrad": True}
25 |
26 | self.example_input_array = torch.rand(1, 3, 32, 32)
27 | self.learning_rate = learning_rate
28 | self.criterion = torch.torch.nn.CrossEntropyLoss()
29 |
30 | def conv_dw(input_channels, num_classes, stride):
31 | return nn.Sequential(
32 | nn.Conv2d(
33 | input_channels,
34 | input_channels,
35 | 3,
36 | stride,
37 | 1,
38 | groups=input_channels,
39 | bias=False,
40 | ),
41 | nn.BatchNorm2d(input_channels),
42 | nn.ReLU(inplace=True),
43 | nn.Conv2d(input_channels, num_classes, 1, 1, 0, bias=False),
44 | nn.BatchNorm2d(num_classes),
45 | nn.ReLU(inplace=True),
46 | )
47 |
48 | self.model = nn.Sequential(
49 | nn.Conv2d(3, 32, 3, 1, 1, bias=False),
50 | nn.BatchNorm2d(32),
51 | nn.ReLU(inplace=True),
52 | conv_dw(32, 64, 1),
53 | conv_dw(64, 128, 2),
54 | conv_dw(128, 128, 1),
55 | conv_dw(128, 256, 2),
56 | conv_dw(256, 256, 1),
57 | nn.AdaptiveAvgPool2d(1),
58 | )
59 | self.fc = nn.Linear(256, num_classes)
60 |
61 | def forward(self, x):
62 | x = self.model(x)
63 | x = x.view(-1, 256)
64 | x = self.fc(x)
65 | return x
66 |
67 | def configure_optimizers(self):
68 | optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)
69 | return optimizer
70 |
--------------------------------------------------------------------------------
/nebula/core/models/cifar100/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/core/models/cifar100/__init__.py
--------------------------------------------------------------------------------
/nebula/core/models/emnist/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/core/models/emnist/__init__.py
--------------------------------------------------------------------------------
/nebula/core/models/emnist/cnn.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from nebula.core.models.nebulamodel import NebulaModel
4 |
5 |
6 | class EMNISTModelCNN(NebulaModel):
7 | def __init__(
8 | self,
9 | input_channels=1,
10 | num_classes=47,
11 | learning_rate=1e-3,
12 | metrics=None,
13 | confusion_matrix=None,
14 | seed=None,
15 | ):
16 | super().__init__(input_channels, num_classes, learning_rate, metrics, confusion_matrix, seed)
17 |
18 | self.config = {"beta1": 0.851436, "beta2": 0.999689, "amsgrad": True}
19 |
20 | self.example_input_array = torch.rand(1, 1, 28, 28)
21 | self.learning_rate = learning_rate
22 | self.criterion = torch.nn.CrossEntropyLoss()
23 |
24 | self.conv1 = torch.nn.Conv2d(
25 | in_channels=input_channels,
26 | out_channels=32,
27 | kernel_size=(5, 5),
28 | padding="same",
29 | )
30 | self.relu = torch.nn.ReLU()
31 | self.pool1 = torch.nn.MaxPool2d(kernel_size=(2, 2), stride=2)
32 | self.conv2 = torch.nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(5, 5), padding="same")
33 | self.pool2 = torch.nn.MaxPool2d(kernel_size=(2, 2), stride=2)
34 | self.l1 = torch.nn.Linear(7 * 7 * 64, 2048)
35 | self.l2 = torch.nn.Linear(2048, num_classes)
36 |
37 | self.epoch_global_number = {"Train": 0, "Validation": 0, "Test": 0}
38 |
39 | def forward(self, x):
40 | input_layer = x.view(-1, 1, 28, 28)
41 | conv1 = self.relu(self.conv1(input_layer))
42 | pool1 = self.pool1(conv1)
43 | conv2 = self.relu(self.conv2(pool1))
44 | pool2 = self.pool2(conv2)
45 | pool2_flat = pool2.reshape(-1, 7 * 7 * 64)
46 |
47 | dense = self.relu(self.l1(pool2_flat))
48 | logits = self.l2(dense)
49 | return logits
50 |
51 | def configure_optimizers(self):
52 | optimizer = torch.optim.Adam(
53 | self.parameters(),
54 | lr=self.learning_rate,
55 | betas=(self.config["beta1"], self.config["beta2"]),
56 | amsgrad=self.config["amsgrad"],
57 | )
58 | return optimizer
59 |
--------------------------------------------------------------------------------
/nebula/core/models/emnist/mlp.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from nebula.core.models.nebulamodel import NebulaModel
4 |
5 |
6 | class EMNISTModelMLP(NebulaModel):
7 | def __init__(
8 | self,
9 | input_channels=1,
10 | num_classes=47,
11 | learning_rate=1e-3,
12 | metrics=None,
13 | confusion_matrix=None,
14 | seed=None,
15 | ):
16 | super().__init__(input_channels, num_classes, learning_rate, metrics, confusion_matrix, seed)
17 |
18 | self.config = {"beta1": 0.851436, "beta2": 0.999689, "amsgrad": True}
19 |
20 | self.example_input_array = torch.zeros(1, 1, 28, 28)
21 | self.learning_rate = learning_rate
22 | self.criterion = torch.nn.CrossEntropyLoss()
23 |
24 | self.l1 = torch.nn.Linear(28 * 28, 256)
25 | self.l2 = torch.nn.Linear(256, 128)
26 | self.l3 = torch.nn.Linear(128, num_classes)
27 |
28 | def forward(self, x):
29 | batch_size, channels, width, height = x.size()
30 | x = x.view(batch_size, -1)
31 | x = self.l1(x)
32 | x = torch.relu(x)
33 | x = self.l2(x)
34 | x = torch.relu(x)
35 | x = self.l3(x)
36 | return x
37 |
38 | def configure_optimizers(self):
39 | optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)
40 | return optimizer
41 |
--------------------------------------------------------------------------------
/nebula/core/models/fashionmnist/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/core/models/fashionmnist/__init__.py
--------------------------------------------------------------------------------
/nebula/core/models/fashionmnist/cnn.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from nebula.core.models.nebulamodel import NebulaModel
4 |
5 |
6 | class FashionMNISTModelCNN(NebulaModel):
7 | def __init__(
8 | self,
9 | input_channels=1,
10 | num_classes=10,
11 | learning_rate=1e-3,
12 | metrics=None,
13 | confusion_matrix=None,
14 | seed=None,
15 | ):
16 | super().__init__(input_channels, num_classes, learning_rate, metrics, confusion_matrix, seed)
17 |
18 | self.config = {"beta1": 0.851436, "beta2": 0.999689, "amsgrad": True}
19 |
20 | self.example_input_array = torch.rand(1, 1, 28, 28)
21 | self.learning_rate = learning_rate
22 | self.criterion = torch.nn.CrossEntropyLoss()
23 |
24 | self.conv1 = torch.nn.Conv2d(
25 | in_channels=input_channels,
26 | out_channels=32,
27 | kernel_size=(5, 5),
28 | padding="same",
29 | )
30 | self.relu = torch.nn.ReLU()
31 | self.pool1 = torch.nn.MaxPool2d(kernel_size=(2, 2), stride=2)
32 | self.conv2 = torch.nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(5, 5), padding="same")
33 | self.pool2 = torch.nn.MaxPool2d(kernel_size=(2, 2), stride=2)
34 | self.l1 = torch.nn.Linear(7 * 7 * 64, 2048)
35 | self.l2 = torch.nn.Linear(2048, num_classes)
36 |
37 | self.epoch_global_number = {"Train": 0, "Validation": 0, "Test": 0}
38 |
39 | def forward(self, x):
40 | input_layer = x.view(-1, 1, 28, 28)
41 | conv1 = self.relu(self.conv1(input_layer))
42 | pool1 = self.pool1(conv1)
43 | conv2 = self.relu(self.conv2(pool1))
44 | pool2 = self.pool2(conv2)
45 | pool2_flat = pool2.reshape(-1, 7 * 7 * 64)
46 |
47 | dense = self.relu(self.l1(pool2_flat))
48 | logits = self.l2(dense)
49 | return logits
50 |
51 | def configure_optimizers(self):
52 | optimizer = torch.optim.Adam(
53 | self.parameters(),
54 | lr=self.learning_rate,
55 | betas=(self.config["beta1"], self.config["beta2"]),
56 | amsgrad=self.config["amsgrad"],
57 | )
58 | return optimizer
59 |
--------------------------------------------------------------------------------
/nebula/core/models/fashionmnist/mlp.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from nebula.core.models.nebulamodel import NebulaModel
4 |
5 |
6 | class FashionMNISTModelMLP(NebulaModel):
7 | def __init__(
8 | self,
9 | input_channels=1,
10 | num_classes=10,
11 | learning_rate=1e-3,
12 | metrics=None,
13 | confusion_matrix=None,
14 | seed=None,
15 | ):
16 | super().__init__(input_channels, num_classes, learning_rate, metrics, confusion_matrix, seed)
17 |
18 | self.config = {"beta1": 0.851436, "beta2": 0.999689, "amsgrad": True}
19 |
20 | self.example_input_array = torch.rand(1, 1, 28, 28)
21 | self.learning_rate = learning_rate
22 | self.criterion = torch.nn.CrossEntropyLoss()
23 |
24 | self.l1 = torch.nn.Linear(28 * 28, 256)
25 | self.l2 = torch.nn.Linear(256, 128)
26 | self.l3 = torch.nn.Linear(128, num_classes)
27 |
28 | def forward(self, x):
29 | batch_size, channels, width, height = x.size()
30 | x = x.view(batch_size, -1)
31 | x = self.l1(x)
32 | x = torch.relu(x)
33 | x = self.l2(x)
34 | x = torch.relu(x)
35 | x = self.l3(x)
36 | return x
37 |
38 | def configure_optimizers(self):
39 | optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)
40 | return optimizer
41 |
--------------------------------------------------------------------------------
/nebula/core/models/mnist/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/core/models/mnist/__init__.py
--------------------------------------------------------------------------------
/nebula/core/models/mnist/cnn.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from nebula.core.models.nebulamodel import NebulaModel
4 |
5 |
6 | class MNISTModelCNN(NebulaModel):
7 | def __init__(
8 | self,
9 | input_channels=1,
10 | num_classes=10,
11 | learning_rate=1e-3,
12 | metrics=None,
13 | confusion_matrix=None,
14 | seed=None,
15 | ):
16 | super().__init__(input_channels, num_classes, learning_rate, metrics, confusion_matrix, seed)
17 |
18 | self.config = {"beta1": 0.851436, "beta2": 0.999689, "amsgrad": True}
19 |
20 | self.example_input_array = torch.zeros(1, 1, 28, 28)
21 | self.learning_rate = learning_rate
22 | self.criterion = torch.nn.CrossEntropyLoss()
23 | self.conv1 = torch.nn.Conv2d(
24 | in_channels=input_channels,
25 | out_channels=32,
26 | kernel_size=(5, 5),
27 | padding="same",
28 | )
29 | self.relu = torch.nn.ReLU()
30 | self.pool1 = torch.nn.MaxPool2d(kernel_size=(2, 2), stride=2)
31 | self.conv2 = torch.nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(5, 5), padding="same")
32 | self.pool2 = torch.nn.MaxPool2d(kernel_size=(2, 2), stride=2)
33 | self.l1 = torch.nn.Linear(7 * 7 * 64, 2048)
34 | self.l2 = torch.nn.Linear(2048, num_classes)
35 |
36 | def forward(self, x):
37 | input_layer = x.view(-1, 1, 28, 28)
38 | conv1 = self.relu(self.conv1(input_layer))
39 | pool1 = self.pool1(conv1)
40 | conv2 = self.relu(self.conv2(pool1))
41 | pool2 = self.pool2(conv2)
42 | pool2_flat = pool2.reshape(-1, 7 * 7 * 64)
43 |
44 | dense = self.relu(self.l1(pool2_flat))
45 | logits = self.l2(dense)
46 | return logits
47 |
48 | def configure_optimizers(self):
49 | optimizer = torch.optim.Adam(
50 | self.parameters(),
51 | lr=self.learning_rate,
52 | betas=(self.config["beta1"], self.config["beta2"]),
53 | amsgrad=self.config["amsgrad"],
54 | )
55 | self._optimizer = optimizer
56 | return optimizer
57 |
--------------------------------------------------------------------------------
/nebula/core/models/mnist/mlp.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from nebula.core.models.nebulamodel import NebulaModel
4 |
5 |
6 | class MNISTModelMLP(NebulaModel):
7 | def __init__(
8 | self,
9 | input_channels=1,
10 | num_classes=10,
11 | learning_rate=1e-3,
12 | metrics=None,
13 | confusion_matrix=None,
14 | seed=None,
15 | ):
16 | super().__init__(input_channels, num_classes, learning_rate, metrics, confusion_matrix, seed)
17 |
18 | self.example_input_array = torch.zeros(1, 1, 28, 28)
19 | self.learning_rate = learning_rate
20 | self.criterion = torch.nn.CrossEntropyLoss()
21 | self.l1 = torch.nn.Linear(28 * 28, 256)
22 | self.l2 = torch.nn.Linear(256, 128)
23 | self.l3 = torch.nn.Linear(128, num_classes)
24 |
25 | def forward(self, x):
26 | batch_size, channels, width, height = x.size()
27 | x = x.view(batch_size, -1)
28 | x = self.l1(x)
29 | x = torch.relu(x)
30 | x = self.l2(x)
31 | x = torch.relu(x)
32 | x = self.l3(x)
33 | return x
34 |
35 | def configure_optimizers(self):
36 | optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)
37 | self._optimizer = optimizer
38 | return optimizer
39 |
40 | def get_learning_rate(self):
41 | return self.learning_rate
42 |
43 | def count_parameters(self):
44 | return sum(p.numel() for p in self.parameters() if p.requires_grad)
--------------------------------------------------------------------------------
/nebula/core/models/sentiment140/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/core/models/sentiment140/__init__.py
--------------------------------------------------------------------------------
/nebula/core/models/sentiment140/cnn.py:
--------------------------------------------------------------------------------
1 | import math
2 |
3 | import torch
4 |
5 | from nebula.core.models.nebulamodel import NebulaModel
6 |
7 |
8 | class Sentiment140ModelCNN(NebulaModel):
9 | def __init__(
10 | self,
11 | input_channels=3,
12 | num_classes=10,
13 | learning_rate=1e-3,
14 | metrics=None,
15 | confusion_matrix=None,
16 | seed=None,
17 | ):
18 | super().__init__(input_channels, num_classes, learning_rate, metrics, confusion_matrix, seed)
19 |
20 | self.config = {"beta1": 0.851436, "beta2": 0.999689, "amsgrad": True}
21 | self.example_input_array = torch.zeros(1, 1, 28, 28)
22 | self.learning_rate = learning_rate
23 |
24 | self.criterion = torch.nn.CrossEntropyLoss()
25 |
26 | self.filter_sizes = [2, 3, 4]
27 | self.n_filters = math.ceil(300 * len(self.filter_sizes) / 3)
28 | self.convs = torch.nn.ModuleList([
29 | torch.nn.Conv2d(in_channels=1, out_channels=self.n_filters, kernel_size=(fs, 300))
30 | for fs in self.filter_sizes
31 | ])
32 | self.fc = torch.nn.Linear(len(self.filter_sizes) * self.n_filters, self.num_classes)
33 | self.dropout = torch.nn.Dropout(0.5)
34 |
35 | self.epoch_global_number = {"Train": 0, "Validation": 0, "Test": 0}
36 |
37 | def forward(self, x):
38 | x = x.unsqueeze(1)
39 | conved = [
40 | torch.nn.functional.relu(conv(x)).squeeze(3) for conv in self.convs
41 | ] # [(batch_size, n_filters, sent_len), ...] * len(filter_sizes)
42 | pooled = [
43 | torch.nn.functional.max_pool1d(conv, conv.shape[2]).squeeze(2) for conv in conved
44 | ] # [(batch_size, n_filters), ...] * len(filter_sizes)
45 | cat = self.dropout(torch.cat(pooled, dim=1))
46 | out = self.fc(cat)
47 | return out
48 |
49 | def configure_optimizers(self):
50 | optimizer = torch.optim.Adam(
51 | self.parameters(),
52 | lr=self.learning_rate,
53 | betas=(self.config["beta1"], self.config["beta2"]),
54 | amsgrad=self.config["amsgrad"],
55 | )
56 | return optimizer
57 |
--------------------------------------------------------------------------------
/nebula/core/models/sentiment140/rnn.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from nebula.core.models.nebulamodel import NebulaModel
4 |
5 |
6 | class Sentiment140ModelRNN(NebulaModel):
7 | def __init__(
8 | self,
9 | input_channels=3,
10 | num_classes=10,
11 | learning_rate=1e-3,
12 | metrics=None,
13 | confusion_matrix=None,
14 | seed=None,
15 | ):
16 | super().__init__(input_channels, num_classes, learning_rate, metrics, confusion_matrix, seed)
17 |
18 | self.config = {"beta1": 0.851436, "beta2": 0.999689, "amsgrad": True}
19 |
20 | self.example_input_array = torch.zeros(1, 1, 28, 28)
21 | self.learning_rate = learning_rate
22 |
23 | self.embedding_dim = 300
24 | self.hidden_dim = 256
25 | self.n_layers = 1
26 | self.bidirectional = True
27 | self.output_dim = num_classes
28 |
29 | self.encoder = torch.nn.LSTM(
30 | self.embedding_dim,
31 | self.hidden_dim,
32 | num_layers=self.n_layers,
33 | bidirectional=self.bidirectional,
34 | dropout=0.5,
35 | batch_first=True,
36 | )
37 | self.fc = torch.nn.Linear(self.hidden_dim * 2, self.output_dim)
38 | self.dropout = torch.nn.Dropout(0.5)
39 |
40 | self.criterion = torch.nn.CrossEntropyLoss()
41 |
42 | self.l1 = torch.nn.Linear(28 * 28, 256)
43 | self.l2 = torch.nn.Linear(256, 128)
44 | self.l3 = torch.nn.Linear(128, num_classes)
45 |
46 | self.epoch_global_number = {"Train": 0, "Validation": 0, "Test": 0}
47 |
48 | def forward(self, x):
49 | packed_output, (hidden, cell) = self.encoder(x)
50 | hidden = self.dropout(torch.cat((hidden[-2, :, :], hidden[-1, :, :]), dim=1))
51 | out = self.fc(hidden)
52 | out = torch.log_softmax(out, dim=1)
53 | return out
54 |
55 | def configure_optimizers(self):
56 | optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)
57 | return optimizer
58 |
--------------------------------------------------------------------------------
/nebula/core/network/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/core/network/__init__.py
--------------------------------------------------------------------------------
/nebula/core/network/discoverer.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import logging
3 |
4 | from nebula.addons.functions import print_msg_box
5 |
6 |
7 | class Discoverer:
8 | def __init__(self, addr, config):
9 | print_msg_box(msg="Starting discoverer module...", indent=2, title="Discoverer module")
10 | self.addr = addr
11 | self.config = config
12 | self._cm = None
13 | self.grace_time = self.config.participant["discoverer_args"]["grace_time_discovery"]
14 | self.period = self.config.participant["discoverer_args"]["discovery_frequency"]
15 | self.interval = self.config.participant["discoverer_args"]["discovery_interval"]
16 |
17 | @property
18 | def cm(self):
19 | if not self._cm:
20 | from nebula.core.network.communications import CommunicationsManager
21 |
22 | self._cm = CommunicationsManager.get_instance()
23 | return self._cm
24 | else:
25 | return self._cm
26 |
27 | async def start(self):
28 | asyncio.create_task(self.run_discover())
29 |
30 | async def run_discover(self):
31 | if self.config.participant["scenario_args"]["federation"] == "CFL":
32 | logging.info("🔍 Federation is CFL. Discoverer is disabled...")
33 | return
34 | await asyncio.sleep(self.grace_time)
35 | while True:
36 | if len(self.cm.connections) > 0:
37 | latitude = self.config.participant["mobility_args"]["latitude"]
38 | longitude = self.config.participant["mobility_args"]["longitude"]
39 | message = self.cm.create_message("discovery", "discover", latitude=latitude, longitude=longitude)
40 | try:
41 | logging.debug("🔍 Sending discovery message to neighbors...")
42 | current_connections = await self.cm.get_addrs_current_connections(only_direct=True)
43 | await self.cm.send_message_to_neighbors(message, current_connections, self.interval)
44 | except Exception as e:
45 | logging.exception(f"🔍 Cannot send discovery message to neighbors. Error: {e!s}")
46 | await asyncio.sleep(self.period)
47 |
--------------------------------------------------------------------------------
/nebula/core/network/externalconnection/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/core/network/externalconnection/__init__.py
--------------------------------------------------------------------------------
/nebula/core/pb/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/core/pb/__init__.py
--------------------------------------------------------------------------------
/nebula/core/role.py:
--------------------------------------------------------------------------------
1 | from enum import Enum
2 |
3 | class Role(Enum):
4 | """
5 | This class defines the participant roles of the platform.
6 | """
7 |
8 | TRAINER = "trainer"
9 | AGGREGATOR = "aggregator"
10 | PROXY = "proxy"
11 | IDLE = "idle"
12 | SERVER = "server"
13 |
14 | def factory_node_role(role: str) -> Role:
15 | if role == "trainer":
16 | return Role.TRAINER
17 | elif role == "aggregator":
18 | return Role.AGGREGATOR
19 | elif role == "proxy":
20 | return Role.PROXY
21 | elif role == "idle":
22 | return Role.IDLE
23 | elif role == "server":
24 | return Role.SERVER
25 | else:
26 | return ""
27 |
--------------------------------------------------------------------------------
/nebula/core/situationalawareness/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/core/situationalawareness/__init__.py
--------------------------------------------------------------------------------
/nebula/core/situationalawareness/awareness/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/core/situationalawareness/awareness/__init__.py
--------------------------------------------------------------------------------
/nebula/core/situationalawareness/awareness/arbitrationpolicies/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/core/situationalawareness/awareness/arbitrationpolicies/__init__.py
--------------------------------------------------------------------------------
/nebula/core/situationalawareness/awareness/arbitrationpolicies/arbitrationpolicy.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 |
3 | from nebula.core.situationalawareness.awareness.sautils.sacommand import SACommand
4 |
5 |
6 | class ArbitrationPolicy(ABC):
7 | """
8 | Abstract base class defining the arbitration policy for resolving conflicts between SA commands.
9 |
10 | This class establishes the interface for implementing arbitration logic used in the
11 | Situational Awareness module. It includes initialization and a tie-breaking mechanism
12 | when two commands have the same priority or conflict.
13 |
14 | Methods:
15 | - init(config): Initialize the arbitration policy with a configuration object.
16 | - tie_break(sac1, sac2): Decide which command to keep when two conflict and have equal priority.
17 | """
18 |
19 | @abstractmethod
20 | async def init(self, config):
21 | """
22 | Initialize the arbitration policy with the provided configuration.
23 |
24 | Parameters:
25 | config (Any): A configuration object or dictionary to set up internal parameters.
26 | """
27 | raise NotImplementedError
28 |
29 | @abstractmethod
30 | async def tie_break(self, sac1: SACommand, sac2: SACommand) -> bool:
31 | """
32 | Resolve a conflict between two commands with equal priority.
33 |
34 | Parameters:
35 | sac1 (SACommand): First command in conflict.
36 | sac2 (SACommand): Second command in conflict.
37 |
38 | Returns:
39 | bool: True if sac1 should be kept over sac2, False if sac2 is preferred.
40 | """
41 | raise NotImplementedError
42 |
43 |
44 | def factory_arbitration_policy(arbitatrion_policy, verbose) -> ArbitrationPolicy:
45 | from nebula.core.situationalawareness.awareness.arbitrationpolicies.staticarbitrationpolicy import SAP
46 |
47 | options = {
48 | "sap": SAP, # "Static Arbitatrion Policy" (SAP) -- default value
49 | }
50 |
51 | cs = options.get(arbitatrion_policy, SAP)
52 | return cs(verbose)
53 |
--------------------------------------------------------------------------------
/nebula/core/situationalawareness/awareness/arbitrationpolicies/staticarbitrationpolicy.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from nebula.core.situationalawareness.awareness.arbitrationpolicies.arbitrationpolicy import ArbitrationPolicy
4 | from nebula.core.situationalawareness.awareness.sautils.sacommand import SACommand
5 |
6 |
7 | class SAP(ArbitrationPolicy): # Static Arbitatrion Policy
8 | """
9 | Static Arbitration Policy for the Reasoner module.
10 |
11 | This class implements a fixed priority arbitration mechanism for
12 | SA (Situational Awareness) components. Each SA component category
13 | is assigned a static weight representing its priority level.
14 |
15 | In case of conflicting SA commands, the policy selects the command
16 | whose originating component has the highest priority weight.
17 |
18 | Attributes:
19 | _verbose (bool): Enables verbose logging for debugging and tracing.
20 | agent_weights (dict): Mapping of SA component categories to static weights.
21 |
22 | Methods:
23 | init(config): Placeholder for initialization with external configuration.
24 | tie_break(sac1, sac2): Resolves conflicts between two SA commands by
25 | comparing their category weights, returning True if sac1 wins.
26 | """
27 | def __init__(self, verbose):
28 | self._verbose = verbose
29 | # Define static weights for SA Agents from SA Components
30 | self.agent_weights = {"SATraining": 1, "SANetwork": 2, "SAReputation": 3}
31 |
32 | async def init(self, config):
33 | pass
34 |
35 | async def _get_agent_category(self, sa_command: SACommand) -> str:
36 | """
37 | Extract agent category name.
38 | Example: "SATraining_Agent1" → "SATraining"
39 | """
40 | full_name = await sa_command.get_owner()
41 | return full_name.split("_")[0] if "_" in full_name else full_name
42 |
43 | async def tie_break(self, sac1: SACommand, sac2: SACommand) -> bool:
44 | """
45 | Tie break conflcited SA Commands
46 | """
47 | if self._verbose:
48 | logging.info(
49 | f"Tie break between ({await sac1.get_owner()}, {sac1.get_action().value}) & ({await sac2.get_owner()}, {sac2.get_action().value})"
50 | )
51 |
52 | async def get_weight(cmd):
53 | category = await self._get_agent_category(cmd)
54 | return self.agent_weights.get(category, 0)
55 |
56 | if await get_weight(sac1) > await get_weight(sac2):
57 | if self._verbose:
58 | logging.info(
59 | f"Tie break resolved, SA Command choosen ({await sac1.get_owner()}, {sac1.get_action().value})"
60 | )
61 | return True
62 | else:
63 | if self._verbose:
64 | logging.info(
65 | f"Tie break resolved, SA Command choosen ({await sac2.get_owner()}, {sac2.get_action().value})"
66 | )
67 | return False
68 |
--------------------------------------------------------------------------------
/nebula/core/situationalawareness/awareness/sanetwork/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/core/situationalawareness/awareness/sanetwork/__init__.py
--------------------------------------------------------------------------------
/nebula/core/situationalawareness/awareness/sanetwork/neighborpolicies/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/core/situationalawareness/awareness/sanetwork/neighborpolicies/__init__.py
--------------------------------------------------------------------------------
/nebula/core/situationalawareness/awareness/satraining/trainingpolicy/bpstrainingpolicy.py:
--------------------------------------------------------------------------------
1 | from nebula.core.situationalawareness.awareness.satraining.trainingpolicy.trainingpolicy import TrainingPolicy
2 | from nebula.core.situationalawareness.awareness.suggestionbuffer import SuggestionBuffer
3 | from nebula.core.situationalawareness.awareness.sautils.sacommand import SACommand, factory_sa_command, SACommandAction, SACommandPRIO
4 | from nebula.core.nebulaevents import RoundEndEvent
5 |
6 | class BPSTrainingPolicy(TrainingPolicy):
7 |
8 | def __init__(self, config=None):
9 | pass
10 |
11 | async def init(self, config):
12 | await self.register_sa_agent()
13 |
14 | async def get_evaluation_results(self):
15 | sac = factory_sa_command(
16 | "connectivity",
17 | SACommandAction.MAINTAIN_CONNECTIONS,
18 | self,
19 | "",
20 | SACommandPRIO.LOW,
21 | False,
22 | None,
23 | None
24 | )
25 | await self.suggest_action(sac)
26 | await self.notify_all_suggestions_done(RoundEndEvent)
27 |
28 | async def get_agent(self) -> str:
29 | return "SATraining_BPSTP"
30 |
31 | async def register_sa_agent(self):
32 | await SuggestionBuffer.get_instance().register_event_agents(RoundEndEvent, self)
33 |
34 | async def suggest_action(self, sac : SACommand):
35 | await SuggestionBuffer.get_instance().register_suggestion(RoundEndEvent, self, sac)
36 |
37 | async def notify_all_suggestions_done(self, event_type):
38 | await SuggestionBuffer.get_instance().notify_all_suggestions_done_for_agent(self, event_type)
--------------------------------------------------------------------------------
/nebula/core/situationalawareness/awareness/satraining/trainingpolicy/htstrainingpolicy.py:
--------------------------------------------------------------------------------
1 | from nebula.core.situationalawareness.awareness.satraining.trainingpolicy.trainingpolicy import TrainingPolicy
2 | from nebula.core.situationalawareness.awareness.satraining.trainingpolicy.trainingpolicy import factory_training_policy
3 | from nebula.core.situationalawareness.awareness.satraining.trainingpolicy.trainingpolicy import TrainingPolicy
4 | import logging
5 |
6 | # "Hybrid Training Strategy" (HTS)
7 | class HTSTrainingPolicy(TrainingPolicy):
8 | """
9 | Implements a Hybrid Training Strategy (HTS) that combines multiple training policies
10 | (e.g., QDS, FRTS) to collaboratively decide on the evaluation and potential pruning
11 | of neighbors in a decentralized federated learning scenario.
12 |
13 | Attributes:
14 | TRAINING_POLICY (set): Names of training policy classes to instantiate and manage.
15 | """
16 |
17 | TRAINING_POLICY = {
18 | "Quality-Driven Selection",
19 | "Fast Reboot Training Strategy",
20 | }
21 |
22 | def __init__(self, config):
23 | """
24 | Initializes the HTS policy with the node's address and verbosity level.
25 | It creates instances of each sub-policy listed in TRAINING_POLICY.
26 |
27 | Args:
28 | config (dict): Configuration dictionary with keys:
29 | - 'addr': Node's address
30 | - 'verbose': Enable verbose logging
31 | """
32 | self._addr = config["addr"]
33 | self._verbose = config["verbose"]
34 | self._training_policies : set[TrainingPolicy] = set()
35 | self._training_policies.add([factory_training_policy(x, config) for x in self.TRAINING_POLICY])
36 |
37 | def __str__(self):
38 | return "HTS"
39 |
40 | @property
41 | def tps(self):
42 | return self._training_policies
43 |
44 | async def init(self, config):
45 | for tp in self.tps:
46 | await tp.init(config)
47 |
48 | async def update_neighbors(self, node, remove=False):
49 | pass
50 |
51 | async def get_evaluation_results(self):
52 | """
53 | Asynchronously calls the `get_evaluation_results` of each policy,
54 | and logs the nodes each policy would remove.
55 |
56 | Returns:
57 | None (future version may merge all evaluations).
58 | """
59 | nodes_to_remove = dict()
60 | for tp in self.tps:
61 | nodes_to_remove[tp] = await tp.get_evaluation_results()
62 |
63 | for tp, nodes in nodes_to_remove.items():
64 | logging.info(f"Training Policy: {tp}, nodes to remove: {nodes}")
65 |
66 | return None
--------------------------------------------------------------------------------
/nebula/core/situationalawareness/awareness/satraining/trainingpolicy/trainingpolicy.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | from nebula.core.situationalawareness.awareness.sautils.samoduleagent import SAModuleAgent
3 |
4 | class TrainingPolicy(SAModuleAgent):
5 |
6 | @abstractmethod
7 | async def init(self, config):
8 | pass
9 |
10 | @abstractmethod
11 | async def get_evaluation_results(self):
12 | pass
13 |
14 |
15 | def factory_training_policy(training_policy, config) -> TrainingPolicy:
16 | from nebula.core.situationalawareness.awareness.satraining.trainingpolicy.bpstrainingpolicy import BPSTrainingPolicy
17 | from nebula.core.situationalawareness.awareness.satraining.trainingpolicy.qdstrainingpolicy import QDSTrainingPolicy
18 | from nebula.core.situationalawareness.awareness.satraining.trainingpolicy.htstrainingpolicy import HTSTrainingPolicy
19 | from nebula.core.situationalawareness.awareness.satraining.trainingpolicy.fastreboot import FastReboot
20 |
21 | options = {
22 | "Broad-Propagation Strategy": BPSTrainingPolicy, # "Broad-Propagation Strategy" (BPS) -- default value
23 | "Quality-Driven Selection": QDSTrainingPolicy, # "Quality-Driven Selection" (QDS)
24 | "Hybrid Training Strategy": HTSTrainingPolicy, # "Hybrid Training Strategy" (HTS)
25 | "Fast Reboot Training Strategy": FastReboot, # "Fast Reboot Training Strategy" (FRTS)
26 | }
27 |
28 | cs = options.get(training_policy, BPSTrainingPolicy)
29 | return cs(config)
--------------------------------------------------------------------------------
/nebula/core/situationalawareness/awareness/sautils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/core/situationalawareness/awareness/sautils/__init__.py
--------------------------------------------------------------------------------
/nebula/core/situationalawareness/awareness/sautils/samoduleagent.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 |
3 | from nebula.core.situationalawareness.awareness.sautils.sacommand import SACommand
4 |
5 |
6 | class SAModuleAgent(ABC):
7 | """
8 | Abstract base class representing a Situational Awareness (SA) module agent.
9 |
10 | This interface defines the essential methods that any SA agent must implement
11 | to participate in the suggestion and arbitration pipeline. Agents are responsible
12 | for registering themselves, suggesting actions in the form of commands, and
13 | notifying when all suggestions related to an event are complete.
14 |
15 | Methods:
16 | - get_agent(): Return a unique identifier or name of the agent.
17 | - register_sa_agent(): Perform initialization or registration steps for the agent.
18 | - suggest_action(sac): Submit a suggested command (SACommand) for arbitration.
19 | - notify_all_suggestions_done(event_type): Indicate that all suggestions for a given event are complete.
20 | """
21 |
22 | @abstractmethod
23 | async def get_agent(self) -> str:
24 | """
25 | Return the unique identifier or name of the agent.
26 |
27 | Returns:
28 | str: The identifier or label representing this SA agent.
29 | """
30 | raise NotImplementedError
31 |
32 | @abstractmethod
33 | async def register_sa_agent(self):
34 | """
35 | Perform initialization logic required to register this SA agent
36 | within the system (e.g., announcing its presence or preparing state).
37 | """
38 | raise NotImplementedError
39 |
40 | @abstractmethod
41 | async def suggest_action(self, sac: SACommand):
42 | """
43 | Submit a suggested action in the form of a SACommand for a given context.
44 |
45 | Parameters:
46 | sac (SACommand): The command proposed by the agent for execution.
47 | """
48 | raise NotImplementedError
49 |
50 | @abstractmethod
51 | async def notify_all_suggestions_done(self, event_type):
52 | """
53 | Notify that this agent has completed all its suggestions for a particular event.
54 |
55 | Parameters:
56 | event_type (Type[NodeEvent]): The type of the event for which suggestions are now complete.
57 | """
58 | raise NotImplementedError
59 |
--------------------------------------------------------------------------------
/nebula/core/situationalawareness/discovery/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/core/situationalawareness/discovery/__init__.py
--------------------------------------------------------------------------------
/nebula/core/situationalawareness/discovery/candidateselection/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/core/situationalawareness/discovery/candidateselection/__init__.py
--------------------------------------------------------------------------------
/nebula/core/situationalawareness/discovery/candidateselection/candidateselector.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 |
3 |
4 | class CandidateSelector(ABC):
5 | @abstractmethod
6 | async def set_config(self, config):
7 | """
8 | Configure internal parameters for the candidate selection strategy.
9 |
10 | Parameters:
11 | config: A configuration object or dictionary with necessary parameters.
12 | """
13 | pass
14 |
15 | @abstractmethod
16 | async def add_candidate(self, candidate):
17 | """
18 | Add a new candidate to the internal pool of potential selections.
19 |
20 | Parameters:
21 | candidate: The candidate node or object to be considered for selection.
22 | """
23 | pass
24 |
25 | @abstractmethod
26 | async def select_candidates(self):
27 | """
28 | Apply the selection logic to choose the best candidates from the internal pool.
29 |
30 | Returns:
31 | list: A list of selected candidates based on the implemented strategy.
32 | """
33 | pass
34 |
35 | @abstractmethod
36 | async def remove_candidates(self):
37 | """
38 | Remove one or more candidates from the pool based on internal rules or external decisions.
39 | """
40 | pass
41 |
42 | @abstractmethod
43 | async def any_candidate(self):
44 | """
45 | Check whether there are any candidates currently available in the internal pool.
46 |
47 | Returns:
48 | bool: True if at least one candidate is available, False otherwise.
49 | """
50 | pass
51 |
52 |
53 | def factory_CandidateSelector(selector) -> CandidateSelector:
54 | from nebula.core.situationalawareness.discovery.candidateselection.distcandidateselector import (
55 | DistanceCandidateSelector,
56 | )
57 | from nebula.core.situationalawareness.discovery.candidateselection.fccandidateselector import FCCandidateSelector
58 | from nebula.core.situationalawareness.discovery.candidateselection.ringcandidateselector import (
59 | RINGCandidateSelector,
60 | )
61 | from nebula.core.situationalawareness.discovery.candidateselection.stdcandidateselector import STDandidateSelector
62 |
63 | options = {
64 | "ring": RINGCandidateSelector,
65 | "fully": FCCandidateSelector,
66 | "random": STDandidateSelector,
67 | "distance": DistanceCandidateSelector,
68 | }
69 |
70 | cs = options.get(selector, FCCandidateSelector)
71 | return cs()
72 |
--------------------------------------------------------------------------------
/nebula/core/situationalawareness/discovery/candidateselection/fccandidateselector.py:
--------------------------------------------------------------------------------
1 | from nebula.core.situationalawareness.discovery.candidateselection.candidateselector import CandidateSelector
2 | from nebula.core.utils.locker import Locker
3 |
4 |
5 | class FCCandidateSelector(CandidateSelector):
6 | """
7 | Candidate selector for fully-connected (FC) topologies.
8 |
9 | In a fully-connected network, all available candidates are accepted
10 | without applying any filtering criteria. This selector simply returns
11 | all collected candidates.
12 |
13 | Attributes:
14 | candidates (list): List of all discovered candidate nodes.
15 | candidates_lock (Locker): Lock to ensure thread-safe access to the candidate list.
16 |
17 | Methods:
18 | set_config(config): No-op for fully-connected mode.
19 | add_candidate(candidate): Adds a new candidate to the list.
20 | select_candidates(): Returns all currently stored candidates.
21 | remove_candidates(): Clears the candidate list.
22 | any_candidate(): Returns True if there is at least one candidate.
23 |
24 | Inherits from:
25 | CandidateSelector: Base class interface for candidate selection logic.
26 | """
27 |
28 | def __init__(self):
29 | self.candidates = []
30 | self.candidates_lock = Locker(name="candidates_lock")
31 |
32 | async def set_config(self, config):
33 | pass
34 |
35 | async def add_candidate(self, candidate):
36 | self.candidates_lock.acquire()
37 | self.candidates.append(candidate)
38 | self.candidates_lock.release()
39 |
40 | async def select_candidates(self):
41 | """
42 | In Fully-Connected topology all candidates should be selected
43 | """
44 | self.candidates_lock.acquire()
45 | cdts = self.candidates.copy()
46 | self.candidates_lock.release()
47 | return (cdts, [])
48 |
49 | async def remove_candidates(self):
50 | self.candidates_lock.acquire()
51 | self.candidates = []
52 | self.candidates_lock.release()
53 |
54 | async def any_candidate(self):
55 | self.candidates_lock.acquire()
56 | any = True if len(self.candidates) > 0 else False
57 | self.candidates_lock.release()
58 | return any
59 |
--------------------------------------------------------------------------------
/nebula/core/situationalawareness/discovery/candidateselection/ringcandidateselector.py:
--------------------------------------------------------------------------------
1 | import random
2 |
3 | from nebula.core.situationalawareness.discovery.candidateselection.candidateselector import CandidateSelector
4 | from nebula.core.utils.locker import Locker
5 |
6 |
7 | class RINGCandidateSelector(CandidateSelector):
8 | """
9 | Candidate selector for ring topology.
10 |
11 | In a ring topology, each node connects to a limited set of neighbors forming a closed loop.
12 | This selector chooses exactly one candidate from the pool of candidates that has the fewest neighbors,
13 | aiming to maintain a balanced ring by connecting nodes with fewer existing connections, avoiding overcharging
14 | as possible.
15 |
16 | Attributes:
17 | candidates (list): List of candidate nodes available for selection.
18 | candidates_lock (Locker): Async lock to ensure thread-safe access to candidates.
19 |
20 | Methods:
21 | set_config(config): Optional configuration, currently unused.
22 | add_candidate(candidate): Adds a candidate node to the candidate list.
23 | select_candidates(): Selects and returns a single candidate with the minimum number of neighbors.
24 | remove_candidates(): Clears the candidates list.
25 | any_candidate(): Returns True if there is at least one candidate available.
26 |
27 | Inherits from:
28 | CandidateSelector: Base interface for candidate selection strategies.
29 | """
30 |
31 | def __init__(self):
32 | self._candidates = []
33 | self._rejected_candidates = []
34 | self.candidates_lock = Locker(name="candidates_lock")
35 |
36 | async def set_config(self, config):
37 | pass
38 |
39 | async def add_candidate(self, candidate):
40 | """
41 | To avoid topology problems select 1st candidate found
42 | """
43 | self.candidates_lock.acquire()
44 | self._candidates.append(candidate)
45 | self.candidates_lock.release()
46 |
47 | async def select_candidates(self):
48 | self.candidates_lock.acquire()
49 | cdts = []
50 |
51 | if self._candidates:
52 | min_neighbors = min(self._candidates, key=lambda x: x[1])[1]
53 | tied_candidates = [c for c in self._candidates if c[1] == min_neighbors]
54 |
55 | selected = random.choice(tied_candidates)
56 | cdts.append(selected)
57 |
58 | for cdt in self._candidates:
59 | if cdt not in cdts:
60 | self._rejected_candidates.append(cdt)
61 |
62 | not_cdts = self._rejected_candidates.copy()
63 | self.candidates_lock.release()
64 | return (cdts, not_cdts)
65 |
66 | async def remove_candidates(self):
67 | self.candidates_lock.acquire()
68 | self._candidates = []
69 | self.candidates_lock.release()
70 |
71 | async def any_candidate(self):
72 | self.candidates_lock.acquire()
73 | any = True if len(self._candidates) > 0 else False
74 | self.candidates_lock.release()
75 | return any
76 |
--------------------------------------------------------------------------------
/nebula/core/situationalawareness/discovery/candidateselection/stdcandidateselector.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from nebula.core.situationalawareness.discovery.candidateselection.candidateselector import CandidateSelector
4 | from nebula.core.utils.locker import Locker
5 |
6 |
7 | class STDandidateSelector(CandidateSelector):
8 | """
9 | Candidate selector for scenarios without a predefined structural topology.
10 |
11 | In cases where the federation topology is not explicitly structured,
12 | this selector chooses candidates based on the average number of neighbors
13 | indicated in their offers. It selects approximately as many candidates as the
14 | average neighbor count, aiming to balance connectivity dynamically.
15 |
16 | Attributes:
17 | candidates (list): List of candidate nodes available for selection.
18 | candidates_lock (Locker): Async lock to ensure thread-safe access to candidates.
19 |
20 | Methods:
21 | set_config(config): Optional configuration method.
22 | add_candidate(candidate): Adds a candidate node to the candidate list.
23 | select_candidates(): Selects candidates based on the average neighbor count from offers.
24 | remove_candidates(): Clears the candidates list.
25 | any_candidate(): Returns True if there is at least one candidate available.
26 |
27 | Inherits from:
28 | CandidateSelector: Base interface for candidate selection strategies.
29 | """
30 |
31 | def __init__(self):
32 | self.candidates = []
33 | self.candidates_lock = Locker(name="candidates_lock")
34 |
35 | async def set_config(self, config):
36 | pass
37 |
38 | async def add_candidate(self, candidate):
39 | self.candidates_lock.acquire()
40 | self.candidates.append(candidate)
41 | self.candidates_lock.release()
42 |
43 | async def select_candidates(self):
44 | """
45 | Select mean number of neighbors
46 | """
47 | self.candidates_lock.acquire()
48 | mean_neighbors = round(sum(n for _, n, _ in self.candidates) / len(self.candidates) if self.candidates else 0)
49 | logging.info(f"mean number of neighbors: {mean_neighbors}")
50 | cdts = self.candidates[:mean_neighbors]
51 | not_selected = set(self.candidates) - set(cdts)
52 | self.candidates_lock.release()
53 | return (cdts, not_selected)
54 |
55 | async def remove_candidates(self):
56 | self.candidates_lock.acquire()
57 | self.candidates = []
58 | self.candidates_lock.release()
59 |
60 | async def any_candidate(self):
61 | self.candidates_lock.acquire()
62 | any = True if len(self.candidates) > 0 else False
63 | self.candidates_lock.release()
64 | return any
65 |
--------------------------------------------------------------------------------
/nebula/core/situationalawareness/discovery/modelhandlers/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/core/situationalawareness/discovery/modelhandlers/__init__.py
--------------------------------------------------------------------------------
/nebula/core/situationalawareness/discovery/modelhandlers/aggmodelhandler.py:
--------------------------------------------------------------------------------
1 | from nebula.core.situationalawareness.discovery.modelhandlers.modelhandler import ModelHandler
2 | from nebula.core.utils.locker import Locker
3 |
4 |
5 | class AGGModelHandler(ModelHandler):
6 | def __init__(self):
7 | self.model = None
8 | self.rounds = 0
9 | self.round = 0
10 | self.epochs = 1
11 | self.model_list = []
12 | self.models_lock = Locker(name="model_lock")
13 | self.params_lock = Locker(name="param_lock")
14 |
15 | def set_config(self, config):
16 | """
17 | Args:
18 | config[0] -> total rounds
19 | config[1] -> current round
20 | config[2] -> epochs
21 | """
22 | self.params_lock.acquire()
23 | self.rounds = config[0]
24 | if config[1] > self.round:
25 | self.round = config[0]
26 | self.epochs = config[2]
27 | self.params_lock.release()
28 |
29 | def accept_model(self, model):
30 | """
31 | Save first model receive and collect the rest for pre-processing
32 | """
33 | self.models_lock.acquire()
34 | if self.model is None:
35 | self.model = model
36 | else:
37 | self.model_list.append(model)
38 | self.models_lock.release()
39 |
40 | def get_model(self, model):
41 | """
42 | Returns:
43 | neccesary data to create trainer after pre-processing
44 | """
45 | self.models_lock.acquire()
46 | self.pre_process_model()
47 | self.models_lock.release()
48 | return (self.model, self.rounds, self.round, self.epochs)
49 |
50 | def pre_process_model(self):
51 | # define pre-processing strategy
52 | pass
53 |
--------------------------------------------------------------------------------
/nebula/core/situationalawareness/discovery/modelhandlers/defaultmodelhandler.py:
--------------------------------------------------------------------------------
1 | from nebula.core.situationalawareness.discovery.federationconnector import FederationConnector
2 | from nebula.core.situationalawareness.discovery.modelhandlers.modelhandler import ModelHandler
3 | from nebula.core.utils.locker import Locker
4 |
5 |
6 | class DefaultModelHandler(ModelHandler):
7 | """
8 | Provides the initial default model.
9 |
10 | This handler returns the baseline model with default weights,
11 | typically used at the start of the federation or when no suitable
12 | model offers have been received from peers.
13 |
14 | Inherits from:
15 | ModelHandler: Provides the base interface for model operations.
16 | """
17 |
18 | def __init__(self):
19 | self.model = None
20 | self.rounds = 0
21 | self.round = 0
22 | self.epochs = 0
23 | self.model_lock = Locker(name="model_lock")
24 | self.params_lock = Locker(name="param_lock")
25 | self._nm: FederationConnector = None
26 |
27 | def set_config(self, config):
28 | """
29 | Args:
30 | config[0] -> total rounds
31 | config[1] -> current round
32 | config[2] -> epochs
33 | config[3] -> FederationConnector
34 | """
35 | self.params_lock.acquire()
36 | self.rounds = config[0]
37 | if config[1] > self.round:
38 | self.round = config[1]
39 | self.epochs = config[2]
40 | if not self._nm:
41 | self._nm = config[3]
42 | self.params_lock.release()
43 |
44 | def accept_model(self, model):
45 | return True
46 |
47 | async def get_model(self, model):
48 | """
49 | Returns:
50 | model with default weights
51 | """
52 | (sm, _, _) = await self._nm.engine.cm.propagator.get_model_information(None, "initialization", init=True)
53 | return (sm, self.rounds, self.round, self.epochs)
54 |
55 | def pre_process_model(self):
56 | """
57 | no pre-processing defined
58 | """
59 | pass
60 |
--------------------------------------------------------------------------------
/nebula/core/situationalawareness/discovery/modelhandlers/modelhandler.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 |
3 |
4 | class ModelHandler(ABC):
5 | @abstractmethod
6 | def set_config(self, config):
7 | """
8 | Configure internal settings for the model handler using the provided configuration.
9 |
10 | Parameters:
11 | config: A configuration object or dictionary with parameters relevant to model handling.
12 | """
13 | pass
14 |
15 | @abstractmethod
16 | def accept_model(self, model):
17 | """
18 | Evaluate and store a received model if it satisfies the required criteria.
19 |
20 | Parameters:
21 | model: The model object to be processed or stored.
22 |
23 | Returns:
24 | bool: True if the model is accepted, False otherwise.
25 | """
26 | pass
27 |
28 | @abstractmethod
29 | async def get_model(self, model):
30 | """
31 | Asynchronously retrieve or generate the model to be used.
32 |
33 | Parameters:
34 | model: A reference to the kind of model to be used.
35 |
36 | Returns:
37 | object: The model instance requested.
38 | """
39 | pass
40 |
41 | @abstractmethod
42 | def pre_process_model(self):
43 | """
44 | Perform any necessary preprocessing steps on the model before it is used.
45 |
46 | Returns:
47 | object: The preprocessed model, ready for further operations.
48 | """
49 | pass
50 |
51 |
52 | def factory_ModelHandler(model_handler) -> ModelHandler:
53 | from nebula.core.situationalawareness.discovery.modelhandlers.aggmodelhandler import AGGModelHandler
54 | from nebula.core.situationalawareness.discovery.modelhandlers.defaultmodelhandler import DefaultModelHandler
55 | from nebula.core.situationalawareness.discovery.modelhandlers.stdmodelhandler import STDModelHandler
56 |
57 | options = {
58 | "std": STDModelHandler,
59 | "default": DefaultModelHandler,
60 | "aggregator": AGGModelHandler,
61 | }
62 |
63 | cs = options.get(model_handler, STDModelHandler)
64 | return cs()
65 |
--------------------------------------------------------------------------------
/nebula/core/situationalawareness/discovery/modelhandlers/stdmodelhandler.py:
--------------------------------------------------------------------------------
1 | from nebula.core.situationalawareness.discovery.modelhandlers.modelhandler import ModelHandler
2 | from nebula.core.utils.locker import Locker
3 |
4 |
5 | class STDModelHandler(ModelHandler):
6 | """
7 | Handles the selection and acquisition of the most up-to-date model
8 | during the discovery phase of the federation process.
9 |
10 | This handler choose the first model received.
11 |
12 | Inherits from:
13 | ModelHandler: Provides the base interface for model operations.
14 |
15 | Intended Use:
16 | Used during the initial, when a node discovers others and must
17 | align itself with the most recent global model state.
18 | """
19 |
20 | def __init__(self):
21 | self.model = None
22 | self.rounds = 0
23 | self.round = 0
24 | self.epochs = 0
25 | self.model_lock = Locker(name="model_lock")
26 | self.params_lock = Locker(name="param_lock")
27 |
28 | def set_config(self, config):
29 | """
30 | Args:
31 | config[0] -> total rounds
32 | config[1] -> current round
33 | config[2] -> epochs
34 | """
35 | self.params_lock.acquire()
36 | self.rounds = config[0]
37 | if config[1] > self.round:
38 | self.round = config[1]
39 | self.epochs = config[2]
40 | self.params_lock.release()
41 |
42 | def accept_model(self, model):
43 | """
44 | save only first model received to set up own model later
45 | """
46 | if not self.model_lock.locked():
47 | self.model_lock.acquire()
48 | self.model = model
49 | return True
50 |
51 | async def get_model(self, model):
52 | """
53 | Returns:
54 | neccesary data to create trainer
55 | """
56 | if self.model is not None:
57 | return (self.model, self.rounds, self.round, self.epochs)
58 | else:
59 | return (None, 0, 0, 0)
60 |
61 | def pre_process_model(self):
62 | """
63 | no pre-processing defined
64 | """
65 | pass
66 |
--------------------------------------------------------------------------------
/nebula/core/training/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/core/training/__init__.py
--------------------------------------------------------------------------------
/nebula/core/training/scikit.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import pickle
3 | import traceback
4 |
5 | from sklearn.metrics import accuracy_score
6 |
7 |
8 | class Scikit:
9 | def __init__(self, model, data, config=None, logger=None):
10 | self.model = model
11 | self.data = data
12 | self.config = config
13 | self.logger = logger
14 | self.round = 0
15 | self.epochs = 1
16 | self.logger.log_data({"Round": self.round}, step=self.logger.global_step)
17 |
18 | def set_model(self, model):
19 | self.model = model
20 |
21 | def get_round(self):
22 | return self.round
23 |
24 | def set_data(self, data):
25 | self.data = data
26 |
27 | def serialize_model(self, params=None):
28 | if params is None:
29 | params = self.model.get_params()
30 | return pickle.dumps(params)
31 |
32 | def deserialize_model(self, data):
33 | try:
34 | params = pickle.loads(data)
35 | return params
36 | except:
37 | raise Exception("Error decoding parameters")
38 |
39 | def set_model_parameters(self, params):
40 | self.model.set_params(**params)
41 |
42 | def get_model_parameters(self):
43 | return self.model.get_params()
44 |
45 | def set_epochs(self, epochs):
46 | self.epochs = epochs
47 |
48 | def fit(self):
49 | try:
50 | X_train, y_train = self.data.train_dataloader()
51 | self.model.fit(X_train, y_train)
52 | except Exception as e:
53 | logging.exception(f"Error with scikit-learn fit. {e}")
54 | logging.exception(traceback.format_exc())
55 |
56 | def interrupt_fit(self):
57 | pass
58 |
59 | def evaluate(self):
60 | try:
61 | X_test, y_test = self.data.test_dataloader()
62 | y_pred = self.model.predict(X_test)
63 | accuracy = accuracy_score(y_test, y_pred)
64 | logging.info(f"Accuracy: {accuracy}")
65 | except Exception as e:
66 | logging.exception(f"Error with scikit-learn evaluate. {e}")
67 | logging.exception(traceback.format_exc())
68 | return None
69 |
70 | def get_train_size(self):
71 | return (
72 | len(self.data.train_dataloader()),
73 | len(self.data.test_dataloader()),
74 | )
75 |
76 | def finalize_round(self):
77 | self.round += 1
78 | if self.logger:
79 | self.logger.log_data({"Round": self.round})
80 |
--------------------------------------------------------------------------------
/nebula/core/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/core/utils/__init__.py
--------------------------------------------------------------------------------
/nebula/core/utils/deterministic.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 | import random
4 |
5 | import numpy as np
6 | import torch
7 |
8 |
9 | def enable_deterministic(seed):
10 | logging.info(f"Fixing randomness with seed {seed}")
11 | np.random.seed(seed)
12 | os.environ["PYTHONHASHSEED"] = str(seed)
13 | random.seed(seed)
14 | torch.manual_seed(seed)
15 | torch.cuda.manual_seed(seed)
16 | torch.cuda.manual_seed_all(seed)
17 | torch.backends.cudnn.deterministic = True
18 | torch.backends.cudnn.benchmark = False
19 |
--------------------------------------------------------------------------------
/nebula/core/utils/nebulalogger_tensorboard.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from datetime import datetime
3 |
4 | from lightning.pytorch.loggers import TensorBoardLogger
5 |
6 |
7 | class NebulaTensorBoardLogger(TensorBoardLogger):
8 | def __init__(self, scenario_start_time, *args, **kwargs):
9 | self.scenario_start_time = scenario_start_time
10 | self.local_step = 0
11 | self.global_step = 0
12 | super().__init__(*args, **kwargs)
13 |
14 | def get_step(self):
15 | return int((datetime.now() - datetime.strptime(self.scenario_start_time, "%d/%m/%Y %H:%M:%S")).total_seconds())
16 |
17 | def log_data(self, data, step=None):
18 | if step is None:
19 | step = self.get_step()
20 | # logging.debug(f"Logging data for global step {step} | local step {self.local_step} | global step {self.global_step}")
21 | try:
22 | super().log_metrics(data, step)
23 | except ValueError:
24 | pass
25 | except Exception as e:
26 | logging.exception(f"Error logging statistics data [{data}] for step [{step}]: {e}")
27 |
28 | def log_metrics(self, metrics, step=None):
29 | if step is None:
30 | self.local_step += 1
31 | step = self.global_step + self.local_step
32 | # logging.debug(f"Logging metrics for global step {step} | local step {self.local_step} | global step {self.global_step}")
33 | if "epoch" in metrics:
34 | metrics.pop("epoch")
35 | try:
36 | super().log_metrics(metrics, step)
37 | except Exception as e:
38 | logging.exception(f"Error logging metrics [{metrics}] for step [{step}]: {e}")
39 |
40 | def log_figure(self, figure, step=None, name=None):
41 | if step is None:
42 | step = self.get_step()
43 | try:
44 | self.experiment.add_figure(name, figure, step)
45 | except Exception as e:
46 | logging.exception(f"Error logging figure [{name}] for step [{step}]: {e}")
47 |
48 | def get_logger_config(self):
49 | return {
50 | "scenario_start_time": self.scenario_start_time,
51 | "local_step": self.local_step,
52 | "global_step": self.global_step,
53 | }
54 |
55 | def set_logger_config(self, logger_config):
56 | if logger_config is None:
57 | return
58 | try:
59 | self.scenario_start_time = logger_config["scenario_start_time"]
60 | self.local_step = logger_config["local_step"]
61 | self.global_step = logger_config["global_step"]
62 | except Exception as e:
63 | logging.exception(f"Error setting logger config: {e}")
64 |
--------------------------------------------------------------------------------
/nebula/core/utils/tasks.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import logging
3 |
4 |
5 | async def debug_tasks():
6 | while True:
7 | tasks = asyncio.all_tasks()
8 | logging.info(f"Active tasks: {len(tasks)}")
9 | for task in tasks:
10 | logging.info(f"Task: {task}")
11 | await asyncio.sleep(5)
12 |
--------------------------------------------------------------------------------
/nebula/frontend/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:22.04
2 |
3 | ENV DEBIAN_FRONTEND=noninteractive
4 |
5 | RUN apt-get update && apt-get install -y tzdata \
6 | && ln -fs /usr/share/zoneinfo/Europe/Madrid /etc/localtime \
7 | && dpkg-reconfigure -f noninteractive tzdata
8 |
9 | ENV TZ=Europe/Madrid
10 |
11 | # Install python3.11.7
12 | RUN apt-get update && apt-get install -y software-properties-common
13 | RUN add-apt-repository ppa:deadsnakes/ppa
14 | RUN apt-get update && apt-get install -y python3.11 python3.11-dev python3.11-distutils python3.11-venv
15 |
16 | # Install curl and network tools
17 | RUN apt-get install -y curl net-tools iproute2 iputils-ping
18 |
19 | # Update alternatives to make Python 3.11 the default
20 | RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 2
21 | RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 1
22 |
23 | RUN apt-get install -y nginx
24 |
25 | # Install gcc and git
26 | RUN apt-get update && apt-get install -y build-essential gcc g++ clang git make cmake g++-aarch64-linux-gnu dos2unix
27 |
28 | # Install docker
29 | RUN apt-get install -y ca-certificates curl gnupg
30 | RUN install -m 0755 -d /etc/apt/keyrings
31 | RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg
32 | RUN chmod a+r /etc/apt/keyrings/docker.gpg
33 | RUN echo \
34 | "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \
35 | "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \
36 | tee /etc/apt/sources.list.d/docker.list > /dev/null
37 | RUN apt-get update
38 |
39 | RUN apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
40 |
41 | ADD https://astral.sh/uv/install.sh /uv-installer.sh
42 |
43 | RUN sh /uv-installer.sh && rm /uv-installer.sh
44 |
45 | ENV PATH="/root/.local/bin/:$PATH"
46 |
47 | COPY pyproject.toml .
48 |
49 | RUN uv python install 3.11.7
50 |
51 | RUN uv python pin 3.11.7
52 |
53 | RUN uv sync --group frontend
54 |
55 | ENV PATH="/.venv/bin:$PATH"
56 |
57 | COPY /nebula/frontend/start_services.sh .
58 |
59 | RUN dos2unix start_services.sh
60 |
61 | RUN chmod +x start_services.sh
62 |
63 | ENTRYPOINT ["/bin/bash", "/start_services.sh"]
64 |
--------------------------------------------------------------------------------
/nebula/frontend/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/__init__.py
--------------------------------------------------------------------------------
/nebula/frontend/config/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/config/__init__.py
--------------------------------------------------------------------------------
/nebula/frontend/config/nebula:
--------------------------------------------------------------------------------
1 | ##
2 | # You should look at the following URL's in order to grasp a solid understanding
3 | # of Nginx configuration files in order to fully unleash the power of Nginx.
4 | # https://www.nginx.com/resources/wiki/start/
5 | # https://www.nginx.com/resources/wiki/start/topics/tutorials/config_pitfalls/
6 | # https://wiki.debian.org/Nginx/DirectoryStructure
7 | #
8 | # In most cases, administrators will remove this file from sites-enabled/ and
9 | # leave it as reference inside of sites-available where it will continue to be
10 | # updated by the nginx packaging team.
11 | #
12 | # This file will automatically load configuration files provided by other
13 | # applications, such as Drupal or Wordpress. These applications will be made
14 | # available underneath a path with that package name, such as /drupal8.
15 | #
16 | # Please see /usr/share/doc/nginx-doc/examples/ for more detailed examples.
17 | ##
18 |
19 | server {
20 | listen 80;
21 | server_name localhost;
22 |
23 | location /static {
24 | alias /nebula/nebula/frontend/static;
25 | }
26 | location /images {
27 | alias /nebula/nebula/frontend/images;
28 | }
29 | location / {
30 | include proxy_params;
31 | proxy_intercept_errors on;
32 | error_page 500 502 503 504 /maintenance.html;
33 | proxy_pass http://unix:/tmp/nebula.sock;
34 | proxy_http_version 1.1;
35 | proxy_set_header Upgrade $http_upgrade;
36 | proxy_set_header Connection "upgrade";
37 | proxy_set_header Host $host;
38 | proxy_set_header X-Real-IP $remote_addr;
39 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
40 | proxy_set_header X-Forwarded-Proto $scheme;
41 | proxy_set_header X-Forwarded-Host $host;
42 | proxy_set_header X-Forwarded-Port $server_port;
43 | proxy_cache_bypass $http_upgrade;
44 | add_header 'Access-Control-Allow-Origin' '*';
45 | }
46 | location = /maintenance.html {
47 | internal;
48 | root /nebula/nebula/frontend/static;
49 | add_header Content-Type "text/html";
50 | }
51 | }
52 |
--------------------------------------------------------------------------------
/nebula/frontend/start_services.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Print commands and their arguments as they are executed (debugging)
4 | set -x
5 |
6 | # Print in console debug messages
7 | echo "Starting services..."
8 |
9 | # Start Nginx in the foreground in a subshell for the script to proceed
10 | nginx &
11 |
12 | # Change directory to where app.py is located
13 | NEBULA_FRONTEND_DIR=/nebula/nebula/frontend
14 | cd $NEBULA_FRONTEND_DIR
15 |
16 | # Start Gunicorn
17 | NEBULA_SOCK=nebula.sock
18 |
19 | NEBULA_FRONTEND_STATIC_DIR=/nebula/nebula/frontend/static
20 | NEBULA_FRONTEND_TEMPLATES_DIR=/nebula/nebula/frontend/templates
21 | echo "NEBULA_PRODUCTION: $NEBULA_PRODUCTION"
22 | if [ "$NEBULA_PRODUCTION" = "False" ]; then
23 | echo "Starting Gunicorn in dev mode..."
24 | uvicorn app:app --uds /tmp/$NEBULA_SOCK --log-level debug --proxy-headers --forwarded-allow-ips "*" &
25 | else
26 | echo "Starting Gunicorn in production mode..."
27 | uvicorn app:app --uds /tmp/$NEBULA_SOCK --log-level info --proxy-headers --forwarded-allow-ips "*" &
28 | fi
29 |
30 | if [ "$NEBULA_ADVANCED_ANALYTICS" = "False" ]; then
31 | echo "Starting Tensorboard analytics"
32 | tensorboard --host 0.0.0.0 --port 8080 --logdir $NEBULA_LOGS_DIR --window_title "NEBULA Statistics" --reload_interval 30 --max_reload_threads 10 --reload_multifile true &
33 | else
34 | echo "Advanced analytics are enabled"
35 | fi
36 |
37 | tail -f /dev/null
38 |
--------------------------------------------------------------------------------
/nebula/frontend/static/css/images/layers-2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/css/images/layers-2x.png
--------------------------------------------------------------------------------
/nebula/frontend/static/css/images/layers.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/css/images/layers.png
--------------------------------------------------------------------------------
/nebula/frontend/static/css/images/marker-icon-2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/css/images/marker-icon-2x.png
--------------------------------------------------------------------------------
/nebula/frontend/static/css/images/marker-icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/css/images/marker-icon.png
--------------------------------------------------------------------------------
/nebula/frontend/static/css/images/marker-shadow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/css/images/marker-shadow.png
--------------------------------------------------------------------------------
/nebula/frontend/static/css/particles.css:
--------------------------------------------------------------------------------
1 | canvas {
2 | display: block;
3 | }
4 |
5 | #particles-js {
6 | position: fixed;
7 | width: 100%;
8 | height: 100%;
9 | top: 0;
10 | left: 0;
11 | background-color: #ffffff;
12 | background-image: url("");
13 | background-repeat: no-repeat;
14 | background-size: cover;
15 | background-position: 50% 50%;
16 | z-index: -1;
17 | pointer-events: auto;
18 | }
19 |
--------------------------------------------------------------------------------
/nebula/frontend/static/images/README:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/nebula/frontend/static/images/android-chrome-192x192.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/images/android-chrome-192x192.png
--------------------------------------------------------------------------------
/nebula/frontend/static/images/android-chrome-512x512.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/images/android-chrome-512x512.png
--------------------------------------------------------------------------------
/nebula/frontend/static/images/apple-touch-icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/images/apple-touch-icon.png
--------------------------------------------------------------------------------
/nebula/frontend/static/images/balancediid.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/images/balancediid.png
--------------------------------------------------------------------------------
/nebula/frontend/static/images/browserconfig.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | #da532c
7 |
8 |
9 |
10 |
--------------------------------------------------------------------------------
/nebula/frontend/static/images/contributors/alberto-huertas.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/images/contributors/alberto-huertas.jpg
--------------------------------------------------------------------------------
/nebula/frontend/static/images/contributors/angel-luis.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/images/contributors/angel-luis.jpg
--------------------------------------------------------------------------------
/nebula/frontend/static/images/contributors/chao-feng.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/images/contributors/chao-feng.jpeg
--------------------------------------------------------------------------------
/nebula/frontend/static/images/contributors/enrique-tomas-martinez-beltran.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/images/contributors/enrique-tomas-martinez-beltran.jpg
--------------------------------------------------------------------------------
/nebula/frontend/static/images/contributors/gerome-bovet.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/images/contributors/gerome-bovet.jpeg
--------------------------------------------------------------------------------
/nebula/frontend/static/images/contributors/gregorio-martinez-perez.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/images/contributors/gregorio-martinez-perez.jpg
--------------------------------------------------------------------------------
/nebula/frontend/static/images/contributors/manuel-gil.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/images/contributors/manuel-gil.jpg
--------------------------------------------------------------------------------
/nebula/frontend/static/images/contributors/pedro-miguel.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/images/contributors/pedro-miguel.jpg
--------------------------------------------------------------------------------
/nebula/frontend/static/images/contributors/sergio-lopez-bernal.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/images/contributors/sergio-lopez-bernal.png
--------------------------------------------------------------------------------
/nebula/frontend/static/images/device.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/images/device.png
--------------------------------------------------------------------------------
/nebula/frontend/static/images/dirichlet_noniid.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/images/dirichlet_noniid.png
--------------------------------------------------------------------------------
/nebula/frontend/static/images/drone.svg:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/nebula/frontend/static/images/drone_offline.svg:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/nebula/frontend/static/images/favicon-16x16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/images/favicon-16x16.png
--------------------------------------------------------------------------------
/nebula/frontend/static/images/favicon-32x32.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/images/favicon-32x32.png
--------------------------------------------------------------------------------
/nebula/frontend/static/images/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/images/favicon.ico
--------------------------------------------------------------------------------
/nebula/frontend/static/images/mstile-144x144.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/images/mstile-144x144.png
--------------------------------------------------------------------------------
/nebula/frontend/static/images/mstile-150x150.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/images/mstile-150x150.png
--------------------------------------------------------------------------------
/nebula/frontend/static/images/mstile-310x150.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/images/mstile-310x150.png
--------------------------------------------------------------------------------
/nebula/frontend/static/images/mstile-310x310.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/images/mstile-310x310.png
--------------------------------------------------------------------------------
/nebula/frontend/static/images/mstile-70x70.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/images/mstile-70x70.png
--------------------------------------------------------------------------------
/nebula/frontend/static/images/nebula-icon-white.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/nebula/frontend/static/images/nebula-icon.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/nebula/frontend/static/images/nebula-logo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/images/nebula-logo.jpg
--------------------------------------------------------------------------------
/nebula/frontend/static/images/percentage.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/images/percentage.png
--------------------------------------------------------------------------------
/nebula/frontend/static/images/physical-device.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/images/physical-device.png
--------------------------------------------------------------------------------
/nebula/frontend/static/images/publications/arxiv.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/images/publications/arxiv.png
--------------------------------------------------------------------------------
/nebula/frontend/static/images/publications/eswa.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/images/publications/eswa.gif
--------------------------------------------------------------------------------
/nebula/frontend/static/images/publications/ieee.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/images/publications/ieee.png
--------------------------------------------------------------------------------
/nebula/frontend/static/images/publications/ijcai-23.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/images/publications/ijcai-23.png
--------------------------------------------------------------------------------
/nebula/frontend/static/images/safari-pinned-tab.svg:
--------------------------------------------------------------------------------
1 |
2 |
4 |
7 |
8 | Created by potrace 1.14, written by Peter Selinger 2001-2017
9 |
10 |
12 |
39 |
40 |
41 |
--------------------------------------------------------------------------------
/nebula/frontend/static/images/site.webmanifest:
--------------------------------------------------------------------------------
1 | {
2 | "name": "NEBULA: A Platform for Decentralized Federated Learning",
3 | "short_name": "NEBULA",
4 | "icons": [
5 | {
6 | "src": "/platform/static/images/android-chrome-192x192.png",
7 | "sizes": "192x192",
8 | "type": "image/png"
9 | },
10 | {
11 | "src": "/platform/static/images/android-chrome-512x512.png",
12 | "sizes": "512x512",
13 | "type": "image/png"
14 | }
15 | ],
16 | "theme_color": "#ffffff",
17 | "background_color": "#ffffff",
18 | "display": "standalone"
19 | }
20 |
--------------------------------------------------------------------------------
/nebula/frontend/static/images/unbalanceiid.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/images/unbalanceiid.png
--------------------------------------------------------------------------------
/nebula/frontend/static/js/custom.js:
--------------------------------------------------------------------------------
1 | (function() {
2 | "use strict";
3 |
4 | const select = (el, all = false) => {
5 | el = el.trim()
6 | if (all) {
7 | return [...document.querySelectorAll(el)]
8 | } else {
9 | return document.querySelector(el)
10 | }
11 | }
12 |
13 | const on = (type, el, listener, all = false) => {
14 | let selectEl = select(el, all)
15 | if (selectEl) {
16 | if (all) {
17 | selectEl.forEach(e => e.addEventListener(type, listener))
18 | } else {
19 | selectEl.addEventListener(type, listener)
20 | }
21 | }
22 | }
23 |
24 | document.addEventListener('DOMContentLoaded', function() {
25 | const copyright = document.getElementById("copyright");
26 | var date = new Date();
27 | var year = date.getFullYear();
28 | copyright.innerHTML = `© ${year} NEBULA. All rights reserved. Documentation | Source code
`;
29 | });
30 |
31 | on('click', '.mobile-nav-toggle', function(e) {
32 | select('#navbar').classList.toggle('navbar-mobile')
33 | this.classList.toggle('bi-list')
34 | this.classList.toggle('bi-x')
35 | })
36 |
37 | on('click', '.navbar .dropdown > a', function(e) {
38 | if (select('#navbar').classList.contains('navbar-mobile')) {
39 | e.preventDefault()
40 | this.nextElementSibling.classList.toggle('dropdown-active')
41 | }
42 | }, true)
43 |
44 | let preloader = select('#preloader');
45 | if (preloader) {
46 | window.addEventListener('load', () => {
47 | preloader.remove()
48 | });
49 | }
50 |
51 | })()
52 |
--------------------------------------------------------------------------------
/nebula/frontend/static/js/dashboard/config-manager.js:
--------------------------------------------------------------------------------
1 | // Configuration Manager Module
2 | const ConfigManager = {
3 | init() {
4 | this.bindEvents();
5 | },
6 |
7 | bindEvents() {
8 | document.querySelectorAll('[id^=config-btn]').forEach(button => {
9 | button.addEventListener('click', () => {
10 | this.toggleConfigRow(button.dataset.scenarioName);
11 | });
12 | });
13 | },
14 |
15 | async toggleConfigRow(scenarioName) {
16 | const configRow = document.getElementById(`config-row-${scenarioName}`);
17 | const configTextElement = document.getElementById(`config-text-${scenarioName}`);
18 |
19 | if (configRow.style.display === 'none') {
20 | try {
21 | const response = await fetch(`/platform/dashboard/${scenarioName}/config`);
22 | const data = await response.json();
23 |
24 | if (data.status === 'success') {
25 | configTextElement.value = JSON.stringify(data.config, null, 2);
26 | } else {
27 | configTextElement.value = 'No configuration available.';
28 | }
29 | } catch (error) {
30 | console.error('Error:', error);
31 | alert('An error occurred while retrieving the configuration.');
32 | return;
33 | }
34 | }
35 | configRow.style.display = configRow.style.display === 'none' ? '' : 'none';
36 | }
37 | };
38 |
39 | export default ConfigManager;
40 |
--------------------------------------------------------------------------------
/nebula/frontend/static/js/dashboard/dashboard.js:
--------------------------------------------------------------------------------
1 | import ScenarioActions from './scenario-actions.js';
2 | import NotesManager from './notes-manager.js';
3 | import ConfigManager from './config-manager.js';
4 |
5 | // Main Dashboard Module
6 | const Dashboard = {
7 | init() {
8 | this.initializeModules();
9 | // Only show demo message if user is not logged in
10 | if (typeof window.userLoggedIn === 'boolean' && !window.userLoggedIn) {
11 | this.checkDemoMode();
12 | }
13 | },
14 |
15 | initializeModules() {
16 | ScenarioActions.init();
17 | NotesManager.init();
18 | ConfigManager.init();
19 | },
20 |
21 | checkDemoMode() {
22 | showAlert('info', 'Some functionalities are disabled in the demo version.');
23 | }
24 | };
25 |
26 | // Initialize dashboard when DOM is ready
27 | document.addEventListener('DOMContentLoaded', () => {
28 | Dashboard.init();
29 | });
30 |
31 | export default Dashboard;
32 |
--------------------------------------------------------------------------------
/nebula/frontend/static/js/dashboard/notes-manager.js:
--------------------------------------------------------------------------------
1 | // Notes Manager Module
2 | const NotesManager = {
3 | init() {
4 | this.bindEvents();
5 | },
6 |
7 | bindEvents() {
8 | document.querySelectorAll('[id^=note-btn]').forEach(button => {
9 | button.addEventListener('click', () => {
10 | this.toggleNotesRow(button.dataset.scenarioName);
11 | });
12 | });
13 |
14 | document.querySelectorAll('[id^=save-note]').forEach(button => {
15 | button.addEventListener('click', () => {
16 | this.saveNotes(button.dataset.scenarioName);
17 | });
18 | });
19 | },
20 |
21 | async toggleNotesRow(scenarioName) {
22 | const notesRow = document.getElementById(`notes-row-${scenarioName}`);
23 | const notesTextElement = document.getElementById(`notes-text-${scenarioName}`);
24 |
25 | if (notesRow.style.display === 'none') {
26 | try {
27 | const response = await fetch(`/platform/dashboard/${scenarioName}/notes`);
28 | const data = await response.json();
29 |
30 | if (data.status === 'success') {
31 | notesTextElement.value = data.notes;
32 | } else {
33 | notesTextElement.value = '';
34 | }
35 | } catch (error) {
36 | console.error('Error:', error);
37 | alert('An error occurred while retrieving the notes.');
38 | return;
39 | }
40 | }
41 |
42 | notesRow.style.display = notesRow.style.display === 'none' ? '' : 'none';
43 | },
44 |
45 | async saveNotes(scenarioName) {
46 | const notesText = document.getElementById(`notes-text-${scenarioName}`).value;
47 |
48 | try {
49 | const response = await fetch(`/platform/dashboard/${scenarioName}/save_note`, {
50 | method: 'POST',
51 | headers: {
52 | 'Content-Type': 'application/json',
53 | },
54 | body: JSON.stringify({ notes: notesText }),
55 | });
56 |
57 | const data = await response.json();
58 |
59 | if (data.status === 'success') {
60 | showAlert('success', 'Notes saved successfully');
61 | } else {
62 | if (data.code === 401) {
63 | showAlert('info', 'Some functionalities are disabled in the demo version.');
64 | } else {
65 | showAlert('error', 'Failed to save notes');
66 | }
67 | }
68 | } catch (error) {
69 | console.error('Error:', error);
70 | showAlert('error', 'Failed to save notes');
71 | }
72 | }
73 | };
74 |
75 | export default NotesManager;
76 |
--------------------------------------------------------------------------------
/nebula/frontend/static/js/dashboard/scenario-actions.js:
--------------------------------------------------------------------------------
1 | // Scenario Actions Module
2 | const ScenarioActions = {
3 | init() {
4 | this.bindEvents();
5 | },
6 |
7 | bindEvents() {
8 | $(document).on('click', '#relaunch-btn', this.handleRelaunch.bind(this));
9 | $(document).on('click', '#remove-btn', this.handleRemove.bind(this));
10 | },
11 |
12 | handleRelaunch(event) {
13 | const scenarioName = $(event.currentTarget).data('scenario-name');
14 | const scenarioTitle = $(event.currentTarget).data('scenario-title');
15 |
16 | $('#confirm-modal').modal('show');
17 | $('#confirm-modal .modal-title').text('Relaunch scenario');
18 | $('#confirm-modal #confirm-modal-body').html(`Are you sure you want to relaunch the scenario ${scenarioTitle}?`);
19 |
20 | $('#confirm-modal #yes-button').off('click').on('click', () => {
21 | this.executeRelaunch(scenarioName);
22 | });
23 | },
24 |
25 | handleRemove(event) {
26 | const scenarioName = $(event.currentTarget).data('scenario-name');
27 |
28 | $('#confirm-modal').modal('show');
29 | $('#confirm-modal .modal-title').text('Remove scenario');
30 | $('#confirm-modal #confirm-modal-body').html(
31 | `Are you sure you want to remove the scenario ${scenarioName}? ` +
32 | `Warning: you will remove the scenario from the database
`
33 | );
34 |
35 | $('#confirm-modal #yes-button').off('click').on('click', () => {
36 | this.executeRemove(scenarioName);
37 | });
38 | },
39 |
40 | async executeRelaunch(scenarioName) {
41 | try {
42 | const response = await fetch(`/platform/dashboard/${scenarioName}/relaunch`, {
43 | method: 'GET'
44 | });
45 |
46 | if (response.redirected) {
47 | window.location.href = response.url;
48 | } else {
49 | $('#confirm-modal').modal('hide');
50 | $('#confirm-modal').on('hidden.bs.modal', () => {
51 | $('#info-modal-body').html('You are not allowed to relaunch a scenario with demo role.');
52 | $('#info-modal').modal('show');
53 | });
54 | }
55 | } catch (error) {
56 | console.error('Error:', error);
57 | }
58 | },
59 |
60 | async executeRemove(scenarioName) {
61 | try {
62 | const response = await fetch(`/platform/dashboard/${scenarioName}/remove`, {
63 | method: 'GET'
64 | });
65 |
66 | if (response.redirected) {
67 | window.location.href = response.url;
68 | } else {
69 | $('#confirm-modal').modal('hide');
70 | $('#confirm-modal').on('hidden.bs.modal', () => {
71 | $('#info-modal-body').html('You are not allowed to remove a scenario with demo role.');
72 | $('#info-modal').modal('show');
73 | });
74 | }
75 | } catch (error) {
76 | console.error('Error:', error);
77 | }
78 | }
79 | };
80 |
81 | export default ScenarioActions;
82 |
--------------------------------------------------------------------------------
/nebula/frontend/static/js/deployment/graph-settings.js:
--------------------------------------------------------------------------------
1 | // Graph Settings Module
2 | const GraphSettings = (function() {
3 | const Settings = {
4 | solidDistance: 50,
5 | Distance: 50
6 | };
7 |
8 | function initializeDistanceControls() {
9 | const distanceInput = document.getElementById('distanceInput');
10 | const distanceValue = document.getElementById('distanceValue');
11 |
12 | distanceInput.addEventListener('input', function() {
13 | distanceValue.value = distanceInput.value;
14 | Settings.Distance = distanceInput.value;
15 | updateLinkDistance();
16 | });
17 |
18 | distanceValue.addEventListener('input', function() {
19 | distanceInput.value = distanceValue.value;
20 | Settings.Distance = distanceValue.value;
21 | updateLinkDistance();
22 | });
23 | }
24 |
25 | function updateLinkDistance() {
26 | const Graph = window.TopologyManager.getGraph();
27 | if (Graph) {
28 | Graph.d3Force('link')
29 | .distance(link => link.color ? Settings.solidDistance : Settings.Distance);
30 | Graph.numDimensions(3); // Re-heat simulation
31 | }
32 | }
33 |
34 | return {
35 | initializeDistanceControls,
36 | updateLinkDistance,
37 | getSettings: () => Settings
38 | };
39 | })();
40 |
41 | export default GraphSettings;
42 |
--------------------------------------------------------------------------------
/nebula/frontend/static/js/deployment/utils.js:
--------------------------------------------------------------------------------
1 | // Utility Functions Module
2 | const Utils = (function() {
3 | function greaterThan0(input) {
4 | const value = parseInt(input.value);
5 | if(value < 1 && !isNaN(value)) {
6 | input.value = 1;
7 | }
8 | }
9 |
10 | function isInRange(input, min, max) {
11 | const value = parseFloat(input.value);
12 | if(isNaN(value)) {
13 | input.value = min;
14 | } else {
15 | input.value = Math.min(Math.max(value, min), max);
16 | }
17 | }
18 |
19 | function handleProbabilityChange(input) {
20 | let value = parseFloat(input.value);
21 | if (isNaN(value)) {
22 | value = 0.5;
23 | } else {
24 | value = Math.min(Math.max(value, 0), 1);
25 | }
26 | input.value = value.toFixed(1);
27 |
28 | // Trigger topology update if Random is selected
29 | const topologySelect = document.getElementById('predefined-topology-select');
30 | if (topologySelect && topologySelect.value === 'Random') {
31 | window.TopologyManager.generatePredefinedTopology();
32 | }
33 | }
34 |
35 | function atLeastOneChecked(checkboxIds) {
36 | return checkboxIds.some(function(id) {
37 | const checkbox = document.getElementById(id);
38 | return checkbox && checkbox.checked;
39 | });
40 | }
41 |
42 | function selectALL(checkboxIds, checked) {
43 | for (let i = 0; i < checkboxIds.length; i++) {
44 | document.getElementById(checkboxIds[i]).checked = checked;
45 | }
46 | }
47 |
48 | return {
49 | showAlert,
50 | greaterThan0,
51 | isInRange,
52 | atLeastOneChecked,
53 | selectALL,
54 | handleProbabilityChange
55 | };
56 | })();
57 |
58 | export default Utils;
59 |
--------------------------------------------------------------------------------
/nebula/frontend/static/js/particles.json:
--------------------------------------------------------------------------------
1 | {
2 | "particles": {
3 | "number": {
4 | "value": 40,
5 | "density": {
6 | "enable": true,
7 | "value_area": 800
8 | }
9 | },
10 | "color": {
11 | "value": "#1d2253"
12 | },
13 | "shape": {
14 | "type": "circle",
15 | "stroke": {
16 | "width": 0,
17 | "color": "#000000"
18 | },
19 | "polygon": {
20 | "nb_sides": 5
21 | },
22 | "image": {
23 | "src": "img/github.svg",
24 | "width": 100,
25 | "height": 100
26 | }
27 | },
28 | "opacity": {
29 | "value": 0.5,
30 | "random": false,
31 | "anim": {
32 | "enable": false,
33 | "speed": 1,
34 | "opacity_min": 0.1,
35 | "sync": false
36 | }
37 | },
38 | "size": {
39 | "value": 5,
40 | "random": true,
41 | "anim": {
42 | "enable": false,
43 | "speed": 40,
44 | "size_min": 0.1,
45 | "sync": false
46 | }
47 | },
48 | "line_linked": {
49 | "enable": true,
50 | "distance": 150,
51 | "color": "#1d2253",
52 | "opacity": 0.4,
53 | "width": 1
54 | },
55 | "move": {
56 | "enable": true,
57 | "speed": 2,
58 | "direction": "none",
59 | "random": false,
60 | "straight": false,
61 | "out_mode": "out",
62 | "attract": {
63 | "enable": false,
64 | "rotateX": 600,
65 | "rotateY": 1200
66 | }
67 | }
68 | },
69 | "interactivity": {
70 | "detect_on": "window",
71 | "events": {
72 | "onhover": {
73 | "enable": true,
74 | "mode": "repulse"
75 | },
76 | "onclick": {
77 | "enable": true,
78 | "mode": "push"
79 | },
80 | "resize": true
81 | },
82 | "modes": {
83 | "grab": {
84 | "distance": 400,
85 | "line_linked": {
86 | "opacity": 1
87 | }
88 | },
89 | "bubble": {
90 | "distance": 400,
91 | "size": 40,
92 | "duration": 2,
93 | "opacity": 8,
94 | "speed": 3
95 | },
96 | "repulse": {
97 | "distance": 100
98 | },
99 | "push": {
100 | "particles_nb": 4
101 | },
102 | "remove": {
103 | "particles_nb": 2
104 | }
105 | }
106 | },
107 | "retina_detect": true,
108 | "config_demo": {
109 | "hide_card": false,
110 | "background_color": "#1d2253",
111 | "background_image": "",
112 | "background_position": "50% 50%",
113 | "background_repeat": "no-repeat",
114 | "background_size": "cover"
115 | }
116 | }
117 |
--------------------------------------------------------------------------------
/nebula/frontend/static/maintenance.html:
--------------------------------------------------------------------------------
1 |
2 | Site Maintenance
3 |
4 |
15 |
16 |
17 | Asset 3
18 | We’ll be back soon!
19 |
20 |
Sorry for the inconvenience. We’re performing some maintenance at the moment. If you need to you can always contact me on Email for updates, otherwise we’ll be back up shortly!
21 |
— NEBULA team
22 |
23 |
24 |
--------------------------------------------------------------------------------
/nebula/frontend/static/vendor/bootstrap-icons/fonts/bootstrap-icons.woff:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/vendor/bootstrap-icons/fonts/bootstrap-icons.woff
--------------------------------------------------------------------------------
/nebula/frontend/static/vendor/bootstrap-icons/fonts/bootstrap-icons.woff2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/vendor/bootstrap-icons/fonts/bootstrap-icons.woff2
--------------------------------------------------------------------------------
/nebula/frontend/static/vendor/boxicons/css/transformations.css:
--------------------------------------------------------------------------------
1 | .bx-rotate-90
2 | {
3 | transform: rotate(90deg);
4 |
5 | -ms-filter: 'progid:DXImageTransform.Microsoft.BasicImage(rotation=1)';
6 | }
7 | .bx-rotate-180
8 | {
9 | transform: rotate(180deg);
10 |
11 | -ms-filter: 'progid:DXImageTransform.Microsoft.BasicImage(rotation=2)';
12 | }
13 | .bx-rotate-270
14 | {
15 | transform: rotate(270deg);
16 |
17 | -ms-filter: 'progid:DXImageTransform.Microsoft.BasicImage(rotation=3)';
18 | }
19 | .bx-flip-horizontal
20 | {
21 | transform: scaleX(-1);
22 |
23 | -ms-filter: 'progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)';
24 | }
25 | .bx-flip-vertical
26 | {
27 | transform: scaleY(-1);
28 |
29 | -ms-filter: 'progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)';
30 | }
31 |
--------------------------------------------------------------------------------
/nebula/frontend/static/vendor/boxicons/fonts/boxicons.eot:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/vendor/boxicons/fonts/boxicons.eot
--------------------------------------------------------------------------------
/nebula/frontend/static/vendor/boxicons/fonts/boxicons.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/vendor/boxicons/fonts/boxicons.ttf
--------------------------------------------------------------------------------
/nebula/frontend/static/vendor/boxicons/fonts/boxicons.woff:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/vendor/boxicons/fonts/boxicons.woff
--------------------------------------------------------------------------------
/nebula/frontend/static/vendor/boxicons/fonts/boxicons.woff2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/vendor/boxicons/fonts/boxicons.woff2
--------------------------------------------------------------------------------
/nebula/frontend/static/vendor/php-email-form/validate.js:
--------------------------------------------------------------------------------
1 | /**
2 | * PHP Email Form Validation - v3.7
3 | * URL: https://bootstrapmade.com/php-email-form/
4 | * Author: BootstrapMade.com
5 | */
6 | (function () {
7 | "use strict";
8 |
9 | let forms = document.querySelectorAll('.php-email-form');
10 |
11 | forms.forEach( function(e) {
12 | e.addEventListener('submit', function(event) {
13 | event.preventDefault();
14 |
15 | let thisForm = this;
16 |
17 | let action = thisForm.getAttribute('action');
18 | let recaptcha = thisForm.getAttribute('data-recaptcha-site-key');
19 |
20 | if( ! action ) {
21 | displayError(thisForm, 'The form action property is not set!');
22 | return;
23 | }
24 | thisForm.querySelector('.loading').classList.add('d-block');
25 | thisForm.querySelector('.error-message').classList.remove('d-block');
26 | thisForm.querySelector('.sent-message').classList.remove('d-block');
27 |
28 | let formData = new FormData( thisForm );
29 |
30 | if ( recaptcha ) {
31 | if(typeof grecaptcha !== "undefined" ) {
32 | grecaptcha.ready(function() {
33 | try {
34 | grecaptcha.execute(recaptcha, {action: 'php_email_form_submit'})
35 | .then(token => {
36 | formData.set('recaptcha-response', token);
37 | php_email_form_submit(thisForm, action, formData);
38 | })
39 | } catch(error) {
40 | displayError(thisForm, error);
41 | }
42 | });
43 | } else {
44 | displayError(thisForm, 'The reCaptcha javascript API url is not loaded!')
45 | }
46 | } else {
47 | php_email_form_submit(thisForm, action, formData);
48 | }
49 | });
50 | });
51 |
52 | function php_email_form_submit(thisForm, action, formData) {
53 | fetch(action, {
54 | method: 'POST',
55 | body: formData,
56 | headers: {'X-Requested-With': 'XMLHttpRequest'}
57 | })
58 | .then(response => {
59 | if( response.ok ) {
60 | return response.text();
61 | } else {
62 | throw new Error(`${response.status} ${response.statusText} ${response.url}`);
63 | }
64 | })
65 | .then(data => {
66 | thisForm.querySelector('.loading').classList.remove('d-block');
67 | if (data.trim() == 'OK') {
68 | thisForm.querySelector('.sent-message').classList.add('d-block');
69 | thisForm.reset();
70 | } else {
71 | throw new Error(data ? data : 'Form submission failed and no error message returned from: ' + action);
72 | }
73 | })
74 | .catch((error) => {
75 | displayError(thisForm, error);
76 | });
77 | }
78 |
79 | function displayError(thisForm, error) {
80 | thisForm.querySelector('.loading').classList.remove('d-block');
81 | thisForm.querySelector('.error-message').innerHTML = error;
82 | thisForm.querySelector('.error-message').classList.add('d-block');
83 | }
84 |
85 | })();
86 |
--------------------------------------------------------------------------------
/nebula/frontend/static/vendor/remixicon/remixicon.eot:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/vendor/remixicon/remixicon.eot
--------------------------------------------------------------------------------
/nebula/frontend/static/vendor/remixicon/remixicon.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/vendor/remixicon/remixicon.ttf
--------------------------------------------------------------------------------
/nebula/frontend/static/vendor/remixicon/remixicon.woff:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/vendor/remixicon/remixicon.woff
--------------------------------------------------------------------------------
/nebula/frontend/static/vendor/remixicon/remixicon.woff2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CyberDataLab/nebula/b85d196cd9927503ed3514ad80684882d17fb018/nebula/frontend/static/vendor/remixicon/remixicon.woff2
--------------------------------------------------------------------------------
/nebula/frontend/templates/401.html:
--------------------------------------------------------------------------------
1 | {% extends "layout.html" %}
2 | {% block body %}
3 | {{ super() }}
4 |
5 |
6 |
Unauthorized (401)
7 |
You're not allowed to access.
8 |
9 |
10 | {% endblock %}
11 |
--------------------------------------------------------------------------------
/nebula/frontend/templates/403.html:
--------------------------------------------------------------------------------
1 | {% extends "layout.html" %}
2 | {% block body %}
3 | {{ super() }}
4 |
5 |
6 |
Forbidden (403)
7 |
This operation is forbidden.
8 |
9 |
10 | {% endblock %}
11 |
--------------------------------------------------------------------------------
/nebula/frontend/templates/404.html:
--------------------------------------------------------------------------------
1 | {% extends "layout.html" %}
2 | {% block body %}
3 | {{ super() }}
4 |
5 |
6 |
Not Found (404)
7 |
The resource can not be found.
8 |
9 |
10 | {% endblock %}
11 |
--------------------------------------------------------------------------------
/nebula/frontend/templates/405.html:
--------------------------------------------------------------------------------
1 | {% extends "layout.html" %}
2 | {% block body %}
3 | {{ super() }}
4 |
5 |
6 |
Method not allowed (405)
7 |
The method of your request is not allowed.
8 |
9 |
10 | {% endblock %}
11 |
--------------------------------------------------------------------------------
/nebula/frontend/templates/413.html:
--------------------------------------------------------------------------------
1 | {% extends "layout.html" %}
2 | {% block body %}
3 | {{ super() }}
4 |
5 |
6 |
Request if too big (413)
7 |
Please check the file size you're uploading.
8 |
9 |
10 | {% endblock %}
11 |
--------------------------------------------------------------------------------
/nebula/frontend/templates/private.html:
--------------------------------------------------------------------------------
1 | {% extends "layout.html" %}
2 | {% block body %}
3 | {{ super() }}
4 |
5 |
6 |
7 |
Private Area
8 |
This is the private area of the scenario {{ scenario_name }} .
9 |
10 |
11 |
12 | {% endblock %}
13 |
--------------------------------------------------------------------------------
/nebula/frontend/templates/statistics.html:
--------------------------------------------------------------------------------
1 | {% extends "layout.html" %}
2 | {% block body %}
3 |
4 |
5 |
6 |
11 | {% endblock %}
12 |
13 | {% block footer %}
14 | {% endblock %}
15 |
--------------------------------------------------------------------------------