├── scrapers ├── firmware_files │ └── .gitkeep ├── lfwc_scraper │ ├── __init__.py │ ├── spiders │ │ ├── __init__.py │ │ ├── archive_linksys.py │ │ ├── ubiquiti.py │ │ ├── archive_avm.py │ │ ├── engenius.py │ │ ├── asus.py │ │ ├── dlink.py │ │ ├── edimax.py │ │ ├── trendnet.py │ │ ├── netgear.py │ │ ├── linksys.py │ │ ├── avm.py │ │ └── tplink.py │ ├── custom_requests.py │ ├── items.py │ ├── pipelines.py │ ├── settings.py │ ├── custom_spiders.py │ ├── handlers.py │ └── middlewares.py ├── scrape ├── scrapy.cfg ├── prepare ├── pyproject.toml ├── LICENSE └── README.md ├── replication ├── replicate-lfwc ├── replicate_lfwc │ ├── __init__.py │ ├── utils.py │ ├── types.py │ ├── fact.py │ └── __main__.py ├── prepare ├── pyproject.toml ├── README.md ├── scripts │ └── fetch-wayback-urls.py └── LICENSES │ ├── CC0-1.0.txt │ ├── CC-BY-4.0.txt │ └── CC-BY-SA-4.0.txt ├── downscaling ├── build_corpus ├── prepare ├── README.md └── _build_corpus.py ├── notebooks ├── figures │ ├── f1_challenges.pdf │ ├── f2_requirements.pdf │ ├── f8_corpus_classes.pdf │ ├── f10_corpus_architectures.pdf │ ├── f7_corpus_release_dates.pdf │ ├── f9_corpus_linux_banners.pdf │ ├── f3_literature_methodology.pdf │ ├── f5_requirement_score_literature_tricolor.pdf │ └── f4_relative_degree_of_measure_documentation_across_papers.pdf ├── jupyter ├── requirements.txt ├── prepare ├── public_data │ ├── lfwc-failed-masked.csv │ ├── literature_overview.csv │ └── literature_results.csv ├── README.md └── notebooks │ ├── _mask_lfwc.ipynb │ └── V_lfwc.ipynb ├── prepare ├── Vagrantfile ├── README.md └── .gitignore /scrapers/firmware_files/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /scrapers/lfwc_scraper/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /scrapers/scrape: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source "./.venv/bin/activate" 4 | scrapy crawl $1 -o "$1".json 5 | deactivate 6 | -------------------------------------------------------------------------------- /replication/replicate-lfwc: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source "./.venv/bin/activate" 4 | replicate-lfwc "$@" 5 | deactivate 6 | -------------------------------------------------------------------------------- /downscaling/build_corpus: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source "./.venv/bin/activate" 4 | python3 _build_corpus.py "$@" 5 | deactivate 6 | -------------------------------------------------------------------------------- /notebooks/figures/f1_challenges.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fkie-cad/linux-firmware-corpus/HEAD/notebooks/figures/f1_challenges.pdf -------------------------------------------------------------------------------- /notebooks/jupyter: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python3 -m venv .venv 4 | source .venv/bin/activate 5 | 6 | jupyter lab 7 | 8 | deactivate 9 | -------------------------------------------------------------------------------- /downscaling/prepare: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python3 -m venv .venv 4 | source "./.venv/bin/activate" 5 | 6 | pip install pandas 7 | 8 | 9 | -------------------------------------------------------------------------------- /notebooks/figures/f2_requirements.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fkie-cad/linux-firmware-corpus/HEAD/notebooks/figures/f2_requirements.pdf -------------------------------------------------------------------------------- /notebooks/figures/f8_corpus_classes.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fkie-cad/linux-firmware-corpus/HEAD/notebooks/figures/f8_corpus_classes.pdf -------------------------------------------------------------------------------- /notebooks/figures/f10_corpus_architectures.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fkie-cad/linux-firmware-corpus/HEAD/notebooks/figures/f10_corpus_architectures.pdf -------------------------------------------------------------------------------- /notebooks/figures/f7_corpus_release_dates.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fkie-cad/linux-firmware-corpus/HEAD/notebooks/figures/f7_corpus_release_dates.pdf -------------------------------------------------------------------------------- /notebooks/figures/f9_corpus_linux_banners.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fkie-cad/linux-firmware-corpus/HEAD/notebooks/figures/f9_corpus_linux_banners.pdf -------------------------------------------------------------------------------- /notebooks/figures/f3_literature_methodology.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fkie-cad/linux-firmware-corpus/HEAD/notebooks/figures/f3_literature_methodology.pdf -------------------------------------------------------------------------------- /notebooks/requirements.txt: -------------------------------------------------------------------------------- 1 | pandas 2 | matplotlib 3 | numpy 4 | jupyterlab 5 | isort 6 | seaborn 7 | black 8 | isort 9 | jupyterlab-code-formatter 10 | pandoc 11 | tabulate 12 | -------------------------------------------------------------------------------- /notebooks/figures/f5_requirement_score_literature_tricolor.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fkie-cad/linux-firmware-corpus/HEAD/notebooks/figures/f5_requirement_score_literature_tricolor.pdf -------------------------------------------------------------------------------- /notebooks/prepare: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python3 -m venv .venv 4 | source .venv/bin/activate 5 | 6 | sudo apt update && sudo apt install texlive-full -y 7 | pip install -r requirements.txt 8 | -------------------------------------------------------------------------------- /replication/replicate_lfwc/__init__.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2024 Fraunhofer FKIE 2 | # SPDX-FileContributor: Marten Ringwelski 3 | # 4 | # SPDX-License-Identifier: GPL-3.0-or-later 5 | -------------------------------------------------------------------------------- /prepare: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | root=$(pwd) 4 | 5 | cd "${root}/replication" && ./prepare 6 | cd "${root}/scrapers" && ./prepare 7 | cd "${root}/notebooks" && ./prepare 8 | cd "${root}/downscaling" && ./prepare 9 | -------------------------------------------------------------------------------- /scrapers/lfwc_scraper/spiders/__init__.py: -------------------------------------------------------------------------------- 1 | # This package will contain the spiders of your Scrapy project 2 | # 3 | # Please refer to the documentation for information on how to create and manage 4 | # your spiders. 5 | -------------------------------------------------------------------------------- /notebooks/figures/f4_relative_degree_of_measure_documentation_across_papers.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fkie-cad/linux-firmware-corpus/HEAD/notebooks/figures/f4_relative_degree_of_measure_documentation_across_papers.pdf -------------------------------------------------------------------------------- /replication/prepare: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | rm -rf .venv 4 | python3 -m venv .venv 5 | source "./.venv/bin/activate" 6 | 7 | pip install . 8 | sudo apt update && sudo apt install aria2 -y 9 | sudo apt install jq -y 10 | -------------------------------------------------------------------------------- /scrapers/lfwc_scraper/custom_requests.py: -------------------------------------------------------------------------------- 1 | from scrapy.http import Request 2 | 3 | 4 | class FTPRequest(Request): 5 | pass 6 | 7 | 8 | class FTPFileRequest(FTPRequest): 9 | pass 10 | 11 | 12 | class FTPListRequest(FTPRequest): 13 | pass 14 | -------------------------------------------------------------------------------- /scrapers/scrapy.cfg: -------------------------------------------------------------------------------- 1 | # Automatically created by: scrapy startproject 2 | # 3 | # For more information about the [deploy] section see: 4 | # https://scrapyd.readthedocs.io/en/latest/deploy.html 5 | 6 | [settings] 7 | default = lfwc_scraper.settings 8 | 9 | [deploy] 10 | project = lfwc_scraper 11 | -------------------------------------------------------------------------------- /scrapers/lfwc_scraper/items.py: -------------------------------------------------------------------------------- 1 | from scrapy.item import Field, Item 2 | 3 | 4 | class FirmwareItem(Item): 5 | vendor = Field(default=None) 6 | device_name = Field(default=None) 7 | firmware_version = Field(default=None) 8 | device_class = Field(default=None) 9 | release_date = Field(default=None) 10 | source = Field(default=None) 11 | 12 | files = Field() 13 | file_urls = Field() 14 | -------------------------------------------------------------------------------- /scrapers/prepare: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | rm -rf .venv 4 | python3 -m venv .venv 5 | source "./.venv/bin/activate" 6 | 7 | pip install scrapy 8 | pip install . 9 | 10 | sudo add-apt-repository ppa:mozillateam/ppa -y 11 | echo ' 12 | Package: * 13 | Pin: release o=LP-PPA-mozillateam 14 | Pin-Priority: 1001 15 | ' | sudo tee /etc/apt/preferences.d/mozilla-firefox 16 | 17 | sudo apt update && sudo apt install firefox -y 18 | -------------------------------------------------------------------------------- /scrapers/lfwc_scraper/pipelines.py: -------------------------------------------------------------------------------- 1 | from scrapy.pipelines.files import FilesPipeline 2 | 3 | 4 | class FirmwarePipeline(FilesPipeline): 5 | def file_path(self, request, response=None, info=None, *, item=None): 6 | return request.url.split("/")[-1] 7 | 8 | 9 | class HpPipeline(FirmwarePipeline): 10 | pass 11 | 12 | 13 | class LinksysPipeline(FirmwarePipeline): 14 | pass 15 | 16 | 17 | class AvmPipeline(FirmwarePipeline): 18 | pass 19 | 20 | 21 | class AsusPipeline(FirmwarePipeline): 22 | pass 23 | -------------------------------------------------------------------------------- /scrapers/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "lfwc_scraper" 3 | authors = [ 4 | {name = "René Helmke", email = "rene.helmke@fkie.fraunhofer.de"} 5 | ] 6 | readme = "README.md" 7 | requires-python = ">=3.10" 8 | license = {file = "LICENSE"} 9 | version = "0.1.0" 10 | 11 | dependencies = [ 12 | "scrapy==2.11.2", 13 | "selenium==4.21.0", 14 | "webdriver-manager==4.0.1", 15 | ] 16 | 17 | [tool.setuptools] 18 | py-modules = [] 19 | 20 | [tool.black] 21 | line-length = 119 22 | target-version = ["py312"] 23 | include = "firmware" 24 | -------------------------------------------------------------------------------- /notebooks/public_data/lfwc-failed-masked.csv: -------------------------------------------------------------------------------- 1 | id,manufacturer,device_name,firmware_version,release_date,device_class,filename,source_type,source_link,files_in_firmware,compressed_firmware_size,firmware_mime,top_level_unpacker,md5,sha1,sha256,sha512,ssdeep,tlsh,elf_architectures,linux_banners,wayback 2 | 0,manufacturer,device,1.0,1970-01-01,corpus,lfwc.zip,github,https://doi.org/10.5281/zenodo.12659436,3,TODO_SIZE,application/zip,7z,TODO_md5,TODO_sha1,TODO_sha256,TODO_sha512,TODO_ssdeep,TODO_tlsh,mips|arm|esp|m68k|ppc|riscv|s/390|SPARC|x86,Linux kernel version 1.0 - 5.4,https://archive.org/TODO_WAYBACK_LINK 3 | -------------------------------------------------------------------------------- /replication/replicate_lfwc/utils.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2024 Fraunhofer FKIE 2 | # SPDX-FileContributor: Marten Ringwelski 3 | # 4 | # SPDX-License-Identifier: GPL-3.0-or-later 5 | 6 | 7 | def _encode_forwared_slash(string: str): 8 | return string.replace("/", "%2f") 9 | 10 | 11 | def image_path_from_firmware_image(image: "FirmwareImage") -> str: # noqa: F821 12 | return "/".join( 13 | _encode_forwared_slash(name) 14 | for name in [ 15 | image.class_, 16 | image.vendor, 17 | image.device, 18 | f"{image.sha256}:{image.filename}", 19 | ] 20 | ) 21 | -------------------------------------------------------------------------------- /scrapers/lfwc_scraper/settings.py: -------------------------------------------------------------------------------- 1 | # Scrapy settings for firmware project 2 | 3 | BOT_NAME = "lfwc_scraper" 4 | 5 | SPIDER_MODULES = ["lfwc_scraper.spiders"] 6 | NEWSPIDER_MODULE = "lfwc_scraper.spiders" 7 | 8 | FILES_STORE = "firmware_files/" 9 | 10 | # Obey robots.txt rules 11 | ROBOTSTXT_OBEY = True 12 | DOWNLOAD_TIMEOUT = 320 13 | LOG_LEVEL = "DEBUG" 14 | FTP_USER = "anonymous" 15 | FTP_PASSWORD = "guest" 16 | 17 | DOWNLOAD_HANDLERS = {"ftp": "lfwc_scraper.handlers.FTPHandler"} 18 | 19 | DOWNLOADER_MIDDLEWARES = { 20 | "lfwc_scraper.middlewares.FirmwareDownloaderMiddleware": 543, 21 | } 22 | 23 | ITEM_PIPELINES = { 24 | "lfwc_scraper.pipelines.HpPipeline": 300, 25 | "lfwc_scraper.pipelines.AsusPipeline": 300, 26 | "lfwc_scraper.pipelines.AvmPipeline": 1, 27 | "lfwc_scraper.pipelines.LinksysPipeline": 1, 28 | } 29 | -------------------------------------------------------------------------------- /replication/pyproject.toml: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2024 Fraunhofer FKIE 2 | # SPDX-FileContributor: Marten Ringwelski 3 | # 4 | # SPDX-License-Identifier: GPL-3.0-or-later 5 | 6 | [tool.poetry] 7 | name = "replicate-lfwc" 8 | version = "0.1.0" 9 | description = "" 10 | authors = ["Marten Ringwelski "] 11 | readme = "README.md" 12 | license = "GPL-3.0-or-later" 13 | 14 | [tool.poetry.scripts] 15 | replicate-lfwc = 'replicate_lfwc.__main__:cli' 16 | 17 | [tool.poetry.dependencies] 18 | scrapy = "^2.11.2" 19 | python = "^3.10" 20 | click = "^8.1.7" 21 | pandas = "^2.2.1" 22 | requests = "^2.31.0" 23 | attrs = "^23.2.0" 24 | 25 | 26 | [tool.poetry.group.dev.dependencies] 27 | ruff = "^0.3.2" 28 | pre-commit = "^3.7.0" 29 | reuse = "^3.0.1" 30 | 31 | [build-system] 32 | requires = ["poetry-core"] 33 | build-backend = "poetry.core.masonry.api" 34 | -------------------------------------------------------------------------------- /scrapers/lfwc_scraper/custom_spiders.py: -------------------------------------------------------------------------------- 1 | from abc import ABCMeta 2 | from typing import Generator 3 | 4 | from scrapy import Spider 5 | from scrapy.loader import ItemLoader 6 | 7 | from lfwc_scraper.custom_requests import FTPFileRequest, FTPListRequest 8 | from lfwc_scraper.items import FirmwareItem 9 | 10 | 11 | class FirmwareSpider(Spider, metaclass=ABCMeta): 12 | 13 | @staticmethod 14 | def item_pipeline(meta_data: dict) -> Generator[FirmwareItem, None, None]: 15 | loader = ItemLoader(item=FirmwareItem(), selector=meta_data["file_urls"]) 16 | for key, value in meta_data.items(): 17 | loader.add_value(key, value) 18 | yield loader.load_item() 19 | 20 | 21 | class FTPSpider(FirmwareSpider, metaclass=ABCMeta): 22 | 23 | def start_requests(self): 24 | for url in self.start_urls: 25 | yield FTPListRequest(url) if url.endswith("/") else FTPFileRequest(url) 26 | -------------------------------------------------------------------------------- /scrapers/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2024 Fraunhofer FKIE 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 4 | 5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 6 | 7 | THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 8 | -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2024 Fraunhofer FKIE 2 | # SPDX-FileContributor: Marten Ringwelski 3 | # 4 | # SPDX-License-Identifier: GPL-3.0-or-later 5 | 6 | # The port that should expose the FACT frontend 7 | PORT = 5000 8 | # The number of CPUs to assign to the virtual machine 9 | CPUS = 4 10 | # The amount of RAM in megabytes 11 | RAM = 16 * 1024 12 | # The disksize to use. If you intend to upload the whole corpus, 13 | # you need about 5,000GB. 14 | DISK = "100GB" 15 | 16 | Vagrant.configure("2") do |config| 17 | config.vagrant.plugins = "vagrant-disksize" 18 | 19 | config.vm.box = "fact-cad/FACT-master" 20 | config.vm.box_version = "20231223" 21 | 22 | config.vm.network "forwarded_port", guest: 5000, host: PORT # FACT port 23 | 24 | config.vm.synced_folder ".", "/vagrant", disabled: true 25 | config.vm.synced_folder ".", "/home/vagrant/linux-firmware-corpus" 26 | config.disksize.size = DISK 27 | 28 | config.ssh.extra_args = ["-L", "8888:localhost:8888", "-L" "8889:localhost:8889"] 29 | 30 | config.vm.provision "shell", inline: "cd ~/linux-firmware-corpus/ && ./prepare", privileged: false 31 | 32 | config.vm.provider "virtualbox" do |vb| 33 | vb.gui = false 34 | vb.cpus = CPUS 35 | vb.memory = RAM 36 | end 37 | end 38 | 39 | -------------------------------------------------------------------------------- /scrapers/lfwc_scraper/handlers.py: -------------------------------------------------------------------------------- 1 | from json import dumps 2 | 3 | from scrapy.core.downloader.handlers.ftp import FTPDownloadHandler 4 | from scrapy.http import TextResponse 5 | from twisted.protocols.ftp import FTPFileListProtocol 6 | 7 | from lfwc_scraper.custom_requests import FTPListRequest 8 | 9 | # Thanks to https://gearheart.io/articles/crawling-ftp-server-with-scrapy/ 10 | 11 | 12 | class FTPHandler(FTPDownloadHandler): 13 | 14 | def __init__(self, settings): 15 | self.result = None 16 | super().__init__(settings) 17 | 18 | def gotClient(self, client, request, filepath): 19 | # download file 20 | if isinstance(request, FTPListRequest): 21 | # ftp listings 22 | proto = FTPFileListProtocol() 23 | result = client.list(filepath, proto).addCallbacks( 24 | callback=self._build_listing_response, 25 | callbackArgs=[request, proto], 26 | errback=self._failed, 27 | errbackArgs=[request], 28 | ) 29 | client.quit() 30 | return result 31 | 32 | result = super().gotClient(client, request, filepath) 33 | client.quit() 34 | return result 35 | 36 | def _build_listing_response(self, result, request, protocol): 37 | # encode ftp listings in TextResponse JSON structure 38 | self.result = result 39 | return TextResponse(url=request.url, status=200, body=dumps(protocol.files), encoding="utf-8") 40 | -------------------------------------------------------------------------------- /scrapers/README.md: -------------------------------------------------------------------------------- 1 | # scrapers 2 | 3 | This subfolder archives all scrapers used to obtain the raw samples from all 10 vendors included in LFwC: 4 | 5 | 1. [ASUS](https://www.asus.com) 6 | 2. [AVM](https://avm.de) 7 | 3. [D-Link](https://www.dlink.com/) 8 | 4. [EDIMAX](https://www.edimax.com/edimax/global/) 9 | 5. [ENGENIUS](https://www.engeniustech.com) 10 | 6. [Linksys](https://www.linksys.com/) 11 | 7. [NETGEAR](https://netgear.com) 12 | 8. [TP-Link](https://www.tp-link.com/) 13 | 9. [TRENDnet](https://www.trendnet.com) 14 | 10. [Ubiquiti](https://www.ui.com) 15 | 16 | ## Note 17 | 18 | The scrapers in this directory are for archival purposes and their use is discouraged. 19 | They are no appropriate tool to replicate LFWC because website layouts change and sample availability fluctuates over time. 20 | Thus, it is likely that various scrapers in this project do not work anymore. 21 | 22 | To replicate the corpus, please refer to the autodownloader tools that work in conjunction with the `.csv` metadata we distribute. 23 | They use the official direct download links and fall back to archive.org when the original source longer exists. Thanks! 24 | 25 | ## Setup 26 | 27 | ```bash 28 | python3 -m venv .venv 29 | source .venv/bin/activate 30 | pip install . 31 | ``` 32 | 33 | For more information about scrapy, [see here.](https://docs.scrapy.org/en/latest/intro/install.html#intro-install) 34 | 35 | ## Use 36 | 37 | ```bash 38 | scrapy crawl -o 39 | ``` 40 | 41 | ## Available Scrapers 42 | 43 | ```plain 44 | archive_avm 45 | archive_linksys 46 | asus 47 | avm 48 | edimax 49 | engenius 50 | linksys 51 | netgear 52 | tplink 53 | trendnet 54 | ubiquiti 55 | ``` 56 | -------------------------------------------------------------------------------- /downscaling/README.md: -------------------------------------------------------------------------------- 1 | # downscaling 2 | 3 | This folder holds all scripts to create downscaled, mini versions of LFwC. The function is intuitive: take `lfwc-full.csv` and pseudorandomly pick a set of samples from it for each manufacturer to create `lfwc-mini.csv`. 4 | 5 | The default selects five samples from each manufacturer and excludes firmware images larger than `30 MiB`. Use a `--seed` to share and/or persist random picks. 6 | 7 | ## Install dependenices 8 | 9 | ```sh 10 | ./prepare 11 | ``` 12 | 13 | ## Synopsis 14 | 15 | ```sh 16 | ./build_corpus --help 17 | usage: build_corpus [-h] [--full_corpus FULL_CORPUS] [--output OUTPUT] [--samples_per_manufacturer SAMPLES_PER_MANUFACTURER] [--seed SEED] [--max_fw_size MAX_FW_SIZE] 18 | [--overwrite | --no-overwrite] 19 | 20 | options: 21 | -h, --help show this help message and exit 22 | --full_corpus FULL_CORPUS 23 | path to full corpus csv 24 | --output OUTPUT path to output corpus csv (default: '../../notebooks/public_data/lfwc-mini.csv') 25 | --samples_per_manufacturer SAMPLES_PER_MANUFACTURER 26 | samples per manufacturer (default: 5) 27 | --seed SEED PRNG seed (default: 0) 28 | --max_fw_size MAX_FW_SIZE 29 | max firmware size (bytes, default: 31457280 (30MB)) 30 | --overwrite, --no-overwrite 31 | Overwrite if output file already exists (default: false) 32 | ``` 33 | 34 | ## Folder Structure Deciphered 35 | 36 | 37 | ```plain 38 | . 39 | ├── build_corpus # wrapper script for _build_corpus.py in an isolated venv 40 | ├── _build_corpus.py # corpus creation script 41 | └── prepare # install dependencies and create venv 42 | ``` 43 | -------------------------------------------------------------------------------- /replication/README.md: -------------------------------------------------------------------------------- 1 | # replication 2 | 3 | This folder contains a replication tool for LFwC. 4 | 5 | It contains two functionalities: 6 | * A leightweight wrapper around [aria2][aria2] to download the firmware corpus 7 | * An upload script to pass firmware to [FACT][fact] 8 | 9 | The paper describes two files containing firmware meta data for replication: 10 | 11 | - `lfwc-full.csv` - 10,913 fully unpackable linux firmware images 12 | - `lfwc-failed.csv` - 3,670 firmware images that failed unpacking verification 13 | 14 | These files work in conjunction with the tool, **but are not part of this repository**. Please request the corpus meta data via Zenodo. 15 | 16 | ## Installation 17 | 18 | First, install the system dependencies: 19 | 20 | - [aria2][aria2]: Required for the `download` command (version 1.37.0 at the time of writing) 21 | - [FACT][fact]: Required for the `upload-to-fact` command (See Section FACT Vagrant Image). 22 | 23 | The tool can be installed with `pip`. 24 | 25 | ```sh 26 | $ pip install . 27 | ``` 28 | 29 | ## Usage 30 | After installing as described above, the script is exposed as 31 | `replicate_lfwc`. 32 | ``` 33 | Usage: python -m replicate_lfwc [OPTIONS] COMMAND [ARGS]... 34 | 35 | Use aria2 to download LFWC 36 | 37 | Options: 38 | --corpus-csv PATH Path to corpus.csv. Either use the file vendored with the paper, or a compatible file. [required] 39 | --help Show this message and exit. 40 | 41 | Commands: 42 | download Download all missing files. 43 | dump-aria2-input Dump the input file that can be used directly with aria2c. 44 | upload-to-fact Upload the firmware corpus to fact. 45 | verify Prints the path and status of all firmwares that have a status other than 'success' to stderr. 46 | ``` 47 | 48 | [aria2]: https://github.com/aria2/aria2 49 | [poetry]: https://python-poetry.org/ 50 | [fact]: https://github.com/fkie-cad/FACT_core/ 51 | -------------------------------------------------------------------------------- /notebooks/public_data/literature_overview.csv: -------------------------------------------------------------------------------- 1 | "Paper","Conference","Year","Type","Method","Scalable" 2 | "Cui et al.","NDSS","2013","S","P","Y" 3 | "Costin et al.","USENIX","2014","S","P","Y" 4 | "Avatar","NDSS","2014","H","SE;HIL;E","N" 5 | "Pewny et al.","SP","2015","S","CS","U" 6 | "Firmalice","NDSS","2015","S","SE;FA","U" 7 | "PIE","ACSAC","2015","S","FA;ML","U" 8 | "FIRMADYNE","NDSS","2016","D","E","Y" 9 | "discovRE","NDSS","2016","S","CS","Y" 10 | "Costin et al.","Asia CCS","2016","H","P;E","Y" 11 | "Genius","CCS","2016","S","CS;ML","Y" 12 | "BootStomp","USENIX","2017","S","SE;FA","U" 13 | "FirmUSB","CCS","2017","S","SE","U" 14 | "Gemini","CCS","2017","S","CS;ML","Y" 15 | "Muench et al.","NDSS","2018","H","E;HIL;F","N" 16 | "DTaint","DSN","2018","S","FA","Y" 17 | "Tian et al.","USENIX","2018","S","P","Y" 18 | "VulSeeker","ASE","2018","S","CS;ML","Y" 19 | "FirmUp","ASPLOS","2018","S","CS","Y" 20 | "IoTFuzzer","NDSS","2018","D","HIL;F","N" 21 | "FIRM-AFL","USENIX","2019","D","E;F","Y" 22 | "FirmFuzz","IoT SP","2019","H","E;F","Y" 23 | "SRFuzzer","ACSAC","2019","D","HIL;F","N" 24 | "Pretender","RAID","2019","D","E;HIL","N" 25 | "HALucinator","USENIX","2020","H","E;F","U" 26 | "FirmScope","USENIX","2020","S","FA","Y" 27 | "PDiff","CCS","2020","S","SA","Y" 28 | "P$^2$IM","USENIX","2020","H","E;F","U" 29 | "Karonte","SP","2020","S","FA","Y" 30 | "Laelaps","ACSAC","2020","H","E;SE;F","Y" 31 | "FirmAE","ACSAC","2020","H","E;F","Y" 32 | "CPscan","CCS","2021","S","FA","Y" 33 | "Diane","SP","2021","H","HIL;FA;F","N" 34 | "DICE","SP","2021","D","E;F","Y" 35 | "ECMO","CCS","2021","H","E","Y" 36 | "iFIZZ","ASE","2021","H","E;HIL;F","U" 37 | "Jetset","USENIX","2021","H","SE;E","U" 38 | "SaTC","USENIX","2021","S","FA","Y" 39 | "Snipuzz","CCS","2021","D","HIL;F","N" 40 | "$\mu$Emu","USENIX","2021","H","SE;E;F","Y" 41 | "SymLM","CCS","2022","S","ML","Y" 42 | "Marcelli et al.","USENIX","2022","S","ML","Y" 43 | "Greenhouse","USENIX","2023","H","E;FA;F","Y" 44 | "FirmSolo","USENIX","2023","H","E;F","Y" 45 | "VulHawk","NDSS","2023","S","CS;ML","Y" 46 | -------------------------------------------------------------------------------- /scrapers/lfwc_scraper/spiders/archive_linksys.py: -------------------------------------------------------------------------------- 1 | import re 2 | from datetime import datetime 3 | from json import loads 4 | 5 | from scrapy.http import Response 6 | 7 | from lfwc_scraper.custom_spiders import FirmwareSpider 8 | 9 | 10 | class ArchiveLinksys(FirmwareSpider): 11 | name = "archive_linksys" 12 | allowed_domains = ["web.archive.org"] 13 | start_urls = [ 14 | "https://web.archive.org/cdx/search/cdx?url=downloads.linksys.com&matchType=prefix&limit=10000&output=json" 15 | "&filter=urlkey:.*firmware.*&filter=!urlkey:.*(txt|pdf)$&filter=mimetype:application.*" 16 | ] 17 | 18 | custom_settings = { 19 | "ROBOTSTXT_OBEY": True, 20 | "CONCURRENT_REQUESTS": 10, 21 | "CONCURRENT_ITEMS": 1, 22 | "DOWNLOAD_DELAY": 0.75, 23 | "RANDOMIZE_DOWNLOAD_DELAY": True, 24 | "REFERER_ENABLED": False, 25 | } 26 | 27 | meta_regex = { 28 | "device_name": re.compile(r"^(?:Produkt|Controller)\s*:\s+(.*)$", flags=re.MULTILINE | re.IGNORECASE), 29 | "firmware_version": re.compile(r"^Version\s*:\s+(.*)$", flags=re.MULTILINE | re.IGNORECASE), 30 | "release_date": re.compile(r"^(?:Release-Datum|Build)\s*:\s+(.*)$", flags=re.MULTILINE | re.IGNORECASE), 31 | } 32 | 33 | def parse(self, response: Response, **_): 34 | images_in_archive = loads(response.text)[1:] 35 | 36 | for _, archive_timestamp, original_url, _, _, _, _ in images_in_archive: 37 | image_url = f"https://web.archive.org/web/{archive_timestamp}if_/{original_url}" 38 | meta_data = { 39 | "vendor": "linksys", 40 | "source": ["archive.org"], 41 | "file_urls": [image_url], 42 | "device_name": image_url.split("/")[-1], 43 | "device_class": "manual", 44 | "firmware_version": "manual", 45 | "release_date": [datetime.strptime(archive_timestamp, "%Y%m%d%H%M%S").isoformat()], 46 | } 47 | 48 | yield from self.item_pipeline(meta_data) 49 | -------------------------------------------------------------------------------- /replication/scripts/fetch-wayback-urls.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2024 Fraunhofer FKIE 2 | # SPDX-FileContributor: Marten Ringwelski 3 | # 4 | # SPDX-License-Identifier: CC0-1.0 5 | 6 | """Usage: 7 | fetch-wayback-urls.py corpus.csv 8 | 9 | Adds a column "wayback" to the given csv and writes the csv to corpus.csv-wayback. 10 | """ 11 | 12 | import sys 13 | import time 14 | import urllib.parse 15 | 16 | import pandas as pd 17 | import requests 18 | 19 | 20 | def wayback_url_from_url(url: str) -> str | None: 21 | parsed = urllib.parse.urlparse(url) 22 | if parsed.scheme not in ["http", "https"]: 23 | return None 24 | 25 | if parsed.hostname in ["archive.org", "www.archive.org"]: 26 | return url 27 | 28 | response = requests.get( 29 | "https://archive.org/wayback/available", 30 | params={ 31 | "url": url, 32 | }, 33 | ) 34 | response.raise_for_status() 35 | data = response.json() 36 | archived_snapshots = data["archived_snapshots"] 37 | if len(archived_snapshots) == 0: 38 | return None 39 | 40 | closest = archived_snapshots.get("closest") 41 | 42 | return closest["url"] 43 | 44 | 45 | def main(): 46 | if len(sys.argv) != 2: # noqa: PLR2004 47 | print("Please provide the path to corpus.csv as the only cli argument") 48 | sys.exit(1) 49 | 50 | path = sys.argv[1] 51 | df = pd.read_csv( 52 | path, 53 | index_col=0, 54 | ) 55 | 56 | BACKOFF = 60 57 | 58 | def wrapper(url): 59 | while True: 60 | try: 61 | ret = wayback_url_from_url(url) 62 | return ret 63 | except requests.HTTPError as e: 64 | if e.response.status != 429: # noqa: PLR2004 65 | print() 66 | print(url) 67 | print(e) 68 | return None 69 | 70 | print("Backing off") 71 | time.sleep(BACKOFF) 72 | except Exception as e: 73 | print() 74 | print(url) 75 | print(e) 76 | return None 77 | 78 | df["wayback"] = df["source_link"].apply(wrapper) 79 | df.to_csv(path + "-wayback") 80 | 81 | 82 | if __name__ == "__main__": 83 | main() 84 | -------------------------------------------------------------------------------- /scrapers/lfwc_scraper/spiders/ubiquiti.py: -------------------------------------------------------------------------------- 1 | import json 2 | from datetime import datetime 3 | from json import JSONDecodeError 4 | from typing import Generator 5 | 6 | from scrapy import Request 7 | from scrapy.http import Response 8 | from typing_extensions import override 9 | 10 | from lfwc_scraper.custom_spiders import FirmwareSpider 11 | from lfwc_scraper.items import FirmwareItem 12 | 13 | 14 | class Ubiquiti(FirmwareSpider): 15 | name = "ubiquiti" 16 | manufacturer = "ubiquiti" 17 | 18 | custom_settings = { 19 | "ROBOTSTXT_OBEY": True, 20 | "CONCURRENT_REQUESTS": 1, 21 | "CONCURRENT_ITEMS": 1, 22 | "DOWNLOAD_DELAY": 0.75, 23 | "RANDOMIZE_DOWNLOAD_DELAY": True, 24 | "REFERER_ENABLED": False, 25 | } 26 | 27 | @override 28 | def start_requests(self): 29 | yield Request( 30 | url=f"https://download.svc.ui.com/v1/downloads?page=1", 31 | callback=self.parse_page, 32 | cb_kwargs={"current_page": 1}, 33 | ) 34 | 35 | def parse_page(self, response: Response, current_page: int, **_) -> Generator[Request | FirmwareItem, None, None]: 36 | try: 37 | data = json.loads(response.body.decode()) 38 | except (UnicodeDecodeError, JSONDecodeError): 39 | return 40 | 41 | for item in data["downloads"]: 42 | if item["category"]["name"].lower() != "firmware": 43 | continue 44 | download_link = item["file_path"] 45 | release_date = datetime.fromisoformat(item["date_published"]).isoformat() 46 | firmware_version = item["version"] 47 | device_name = item["name"] 48 | device_class = device_name 49 | meta_data = { 50 | "vendor": "ubiquiti", 51 | "release_date": release_date, 52 | "device_name": device_name, 53 | "firmware_version": firmware_version, 54 | "device_class": device_class, 55 | "file_urls": download_link, 56 | } 57 | yield from self.item_pipeline(meta_data) 58 | 59 | next_page = current_page + 1 60 | if next_page > data["pagination"]["totalPages"]: 61 | return 62 | yield Request( 63 | url=f"https://download.svc.ui.com/v1/downloads?page={next_page}", 64 | callback=self.parse_page, 65 | cb_kwargs={ 66 | "current_page": next_page, 67 | }, 68 | ) 69 | -------------------------------------------------------------------------------- /scrapers/lfwc_scraper/spiders/archive_avm.py: -------------------------------------------------------------------------------- 1 | import re 2 | from json import loads 3 | 4 | from scrapy.http import Response 5 | 6 | from lfwc_scraper.custom_spiders import FirmwareSpider 7 | 8 | 9 | class ArchiveAVM(FirmwareSpider): 10 | name = "archive_avm" 11 | allowed_domains = ["web.archive.org"] 12 | start_urls = [ 13 | "https://web.archive.org/cdx/search/cdx?url=download.avm.de&matchType=prefix&limit=10000" 14 | "&filter=urlkey:.*(image|zip|bin|raw)$&output=json&filter=!urlkey:.*(misc|other|english|englisch).*" 15 | "&filter=statuscode:200" 16 | ] 17 | 18 | custom_settings = { 19 | "ROBOTSTXT_OBEY": True, 20 | "CONCURRENT_REQUESTS": 10, 21 | "CONCURRENT_ITEMS": 1, 22 | "DOWNLOAD_DELAY": 0.75, 23 | "RANDOMIZE_DOWNLOAD_DELAY": True, 24 | "REFERER_ENABLED": False, 25 | } 26 | 27 | meta_regex = { 28 | "device_name": re.compile(r"^(?:Produkt|Controller)\s*:\s+(.*)$", flags=re.MULTILINE | re.IGNORECASE), 29 | "firmware_version": re.compile(r"^Version\s*:\s+(.*)$", flags=re.MULTILINE | re.IGNORECASE), 30 | "release_date": re.compile(r"^(?:Release-Datum|Build)\s*:\s+(.*)$", flags=re.MULTILINE | re.IGNORECASE), 31 | } 32 | 33 | def parse(self, response: Response, **_): 34 | images_in_archive = loads(response.text)[1:] 35 | 36 | for _, archive_timestamp, original_url, _, _, _, _ in images_in_archive: 37 | if "develper" in original_url: 38 | continue 39 | image_url = f"https://web.archive.org/web/{archive_timestamp}if_/{original_url}" 40 | meta_data = { 41 | "vendor": "AVM", 42 | "source": "archive.org", 43 | "file_urls": [image_url], 44 | "device_name": image_url.split("/")[-1], 45 | "device_class": self.map_device_class(image_path=image_url), 46 | "firmware_version": "manual", 47 | "release_date": "", 48 | } 49 | 50 | yield from self.item_pipeline(meta_data) 51 | 52 | @staticmethod 53 | def map_device_class(image_path: str) -> str: 54 | # /fritzbox///fritz.os/ 55 | if any(substr in image_path.lower() for substr in ["repeater", "repeater"]): 56 | return "repeater" 57 | if "fritzwlan-usb" in image_path.lower(): 58 | return "wifi-usb" 59 | if "powerline" in image_path.lower(): 60 | return "powerline" 61 | if any(substr in image_path.lower() for substr in ["box.", "box_"]): 62 | return "router" 63 | return "unknown" 64 | -------------------------------------------------------------------------------- /scrapers/lfwc_scraper/spiders/engenius.py: -------------------------------------------------------------------------------- 1 | import json 2 | from datetime import datetime 3 | from json import JSONDecodeError 4 | from typing import Generator 5 | 6 | from scrapy import Request 7 | from scrapy.http import Response 8 | from typing_extensions import override 9 | 10 | from lfwc_scraper.custom_spiders import FirmwareSpider 11 | from lfwc_scraper.items import FirmwareItem 12 | 13 | 14 | class Engenius(FirmwareSpider): 15 | name = "engenius" 16 | manufacturer = "engenius" 17 | 18 | custom_settings = { 19 | "ROBOTSTXT_OBEY": True, 20 | "CONCURRENT_REQUESTS": 1, 21 | "CONCURRENT_ITEMS": 1, 22 | "DOWNLOAD_DELAY": 0.75, 23 | "RANDOMIZE_DOWNLOAD_DELAY": True, 24 | "REFERER_ENABLED": False, 25 | } 26 | 27 | @override 28 | def start_requests(self): 29 | yield Request( 30 | url=f"https://www.engeniusnetworks.eu/wp-json/wp/v2/file?status=publish&page=1&per_page=100&type=file", 31 | callback=self.parse_page, 32 | cb_kwargs={"current_page": 1}, 33 | ) 34 | 35 | def parse_page(self, response: Response, current_page: int, **_) -> Generator[Request | FirmwareItem, None, None]: 36 | try: 37 | data = json.loads(response.body.decode()) 38 | except (UnicodeDecodeError, JSONDecodeError): 39 | return 40 | 41 | for item in data: 42 | if item["acf"]["type"].lower() != "firmware": 43 | continue 44 | download_link = item["acf"]["download_link"]["url"] 45 | release_date = datetime.strptime( 46 | item["acf"]["download_link"]["date"].split(" ")[0], "%Y-%m-%d" 47 | ).isoformat() 48 | firmware_version = item["acf"]["version"] 49 | try: 50 | device_class = item["pure_taxonomies"]["categories"][0]["category_nicename"] 51 | except Exception: 52 | device_class = "unknown" 53 | device_name = item["acf"]["download_link"]["name"] 54 | meta_data = { 55 | "vendor": "engenius", 56 | "release_date": release_date, 57 | "device_name": device_name, 58 | "firmware_version": firmware_version, 59 | "device_class": device_class, 60 | "file_urls": download_link, 61 | } 62 | yield from self.item_pipeline(meta_data) 63 | 64 | yield Request( 65 | url="https://www.engeniusnetworks.eu/wp-json/wp/v2/file?status=publish" 66 | f"&page={current_page + 1}&per_page=100&type=file", 67 | callback=self.parse_page, 68 | cb_kwargs={"current_page": current_page + 1}, 69 | ) 70 | -------------------------------------------------------------------------------- /downscaling/_build_corpus.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | from pathlib import Path 5 | 6 | import pandas as pd 7 | 8 | 9 | def parse_args(): 10 | parser = argparse.ArgumentParser("build_corpus") 11 | parser.add_argument( 12 | "--full_corpus", 13 | required=True, 14 | type=Path, 15 | help="path to full corpus csv", 16 | ) 17 | parser.add_argument( 18 | "--output", 19 | default=Path("../../notebooks/public_data/lfwc-mini.csv"), 20 | required=False, 21 | type=Path, 22 | help="path to output corpus csv (default: '../../notebooks/public_data/lfwc-mini.csv')", 23 | ) 24 | parser.add_argument( 25 | "--samples_per_manufacturer", 26 | default=5, 27 | type=int, 28 | required=False, 29 | help="samples per manufacturer (default: 5)", 30 | ) 31 | parser.add_argument( 32 | "--seed", default=0, type=int, required=False, help="PRNG seed (default: 0)" 33 | ) 34 | parser.add_argument( 35 | "--max_fw_size", 36 | default=31457280, 37 | type=int, 38 | required=False, 39 | help="max firmware size (bytes, default: 31457280 (30MB))", 40 | ) 41 | 42 | parser.add_argument( 43 | "--overwrite", 44 | default=False, 45 | type=bool, 46 | required=False, 47 | action=argparse.BooleanOptionalAction, 48 | help="Overwrite if output file already exists (default: false)", 49 | ) 50 | return parser.parse_args() 51 | 52 | 53 | def main(): 54 | args = parse_args() 55 | if args.output.exists() and not args.overwrite: 56 | print( 57 | f"WARNING: output file '{args.output}' already exists. If you are sure you want to overwrite it, restart with '--overwrite' flag." 58 | ) 59 | return 60 | 61 | df = pd.read_csv(args.full_corpus, index_col="id") 62 | 63 | print("Full Corpus File Preview") 64 | print("=========================") 65 | print(df) 66 | print("=========================") 67 | 68 | sampled_subsets = [] 69 | for manufacturer in df["manufacturer"].unique(): 70 | fixed_manufacturer = df["manufacturer"] == manufacturer 71 | size_constraint = df["compressed_firmware_size"] <= args.max_fw_size 72 | subset = df[fixed_manufacturer & size_constraint] 73 | samples = subset.sample(args.samples_per_manufacturer, random_state=args.seed) 74 | sampled_subsets += [samples] 75 | print(f"Sampled Subset for {manufacturer}") 76 | print("=========================") 77 | print(samples) 78 | print("=========================") 79 | 80 | print("Done!") 81 | mini_corpus = pd.concat(sampled_subsets) 82 | print(f"Final mini corpus shape: {mini_corpus.shape}") 83 | print("=========================") 84 | print(mini_corpus) 85 | print("=========================") 86 | mini_corpus.to_csv(args.output) 87 | print(f"Saved mini corpus to {args.output}") 88 | 89 | 90 | if __name__ == "__main__": 91 | main() 92 | -------------------------------------------------------------------------------- /notebooks/README.md: -------------------------------------------------------------------------------- 1 | # notebooks 2 | 3 | This folder holds all notebooks that were used in our paper. Feel free to interactively explore the data contained here! 4 | 5 | ## Install dependenices 6 | 7 | ```sh 8 | ./prepare 9 | ``` 10 | 11 | ## Run Jupyter Lab 12 | 13 | ```sh 14 | ./jupyter 15 | ``` 16 | 17 | ## Folder Structure Deciphered 18 | 19 | 20 | ```plain 21 | . 22 | ├── figures 23 | │   ├── f10_corpus_architectures.pdf # corpus architecture distribution figure 24 | │   ├── f1_challenges.pdf # firmware corpus creation challenges from Sec. II (InkScape) 25 | │   ├── f2_requirements.pdf # Corpus Requirements from Sec. III (InkScape) 26 | │   ├── f3_literature_methodology.pdf # Literature Methodology from Sec. IV (InkScape) 27 | │   ├── f4_relative_degree_of_measure_documentation_across_papers.pdf # measure score results from Sec. IV 28 | │   ├── f5_requirement_score_literature_tricolor.pdf # requirement score results from Sec. IV 29 | │   ├── f7_corpus_release_dates.pdf # corpus release date distribution figure 30 | │   ├── f8_corpus_classes.pdf # corpus class distribution figure 31 | │   └── f9_corpus_linux_banners.pdf # linux banner analysis figure 32 | ├── jupyter # run jupyter lab 33 | ├── notebooks # holds all Jupyter Notebooks 34 | │   ├── IV_literature_review.ipynb # literature review analysis from our paper (Sec. IV) 35 | │   ├── V_lfwc.ipynb # LFwC meta data analysis from our paper (Sec. V) 36 | │   ├── _mask_lfwc.ipynb # (internal use, for documentation: mask full lfwc versions) 37 | │   └── html_output # pre-rendered versions of the jupyter notebooks 38 | │   ├── IV_literature_review.html # IV_literature_review.ipynb 39 | │   ├── V_lfwc.html # V_lfwc.ipynb 40 | │   └── _mask_lfwc.html # _mask_lfwc.ipynb 41 | ├── prepare # install dependencies script for the notebooks. Works with Ubuntu. 42 | ├── public_data # publicly shared data sets 43 | │   ├── lfwc-failed-masked.csv # masked version of lfwc-failed with all replication data removed. Request full version at Zenodo. 44 | │   ├── lfwc-masked.csv # masked version of lfwc-full with all replication data removed. Request full version at Zenodo. 45 | │   ├── literature_overview.csv # raw data for Tab. I in the paper 46 | │   ├── literature_results.csv # raw data for Tab. II in the paper 47 | │   └── routersploit_mapping.json # routersploit ground truth mapping to lfwc samples 48 | └── requirements.txt # python requirements 49 | ``` 50 | 51 | ## Additional Resources 52 | 53 | - [Jupyter Documentation & Getting Started](https://docs.jupyter.org/en/latest/#) 54 | - [pandas Documentation & Getting Started](https://pandas.pydata.org/docs/getting_started/index.html) 55 | 56 | -------------------------------------------------------------------------------- /notebooks/public_data/literature_results.csv: -------------------------------------------------------------------------------- 1 | "Paper","Packed","Unpacked","Deduplication","Unpack Proc.","Reasoning","Acquisition","Vulnerabilities","Rel. Dates","Versions","Links","Hashes","Manufacturer","Models","Dev. Classes","ISAs","FW Types" 2 | "Cui et al.","373","N","N","Y","Y","N","Y","Y","U","N","N","1","63","1","2","II" 3 | "Costin et al.","32356","26275","N","Y","U","S","U","N","N","Y","Y","U","U","U","U","U" 4 | "Avatar","3","3","Y","N","Y","M","U","N","N","N","N","3","3","3","1","II;III" 5 | "Pewny et al.","6","6","Y","N","Y","M","Y","N","Y","Y","N","6","6","3","3","0;I" 6 | "PIE","4","4","Y","N","U","N","N","N","N","N","N","U","4","4","1","III" 7 | "Firmalice","3","3","Y","N","Y","M","Y","N","N","N","N","3","3","3","2","I" 8 | "FIRMADYNE","23035","9486","Y","Y","U","S","Y","Y","Y","Y","Y","42","U","U","7","I;II" 9 | "discovRE","3","3","Y","N","Y","M","Y","N","Y","Y","Y","3","3","3","4","0;I" 10 | "Costin et al.","1925","1925","N","N","Y","N","U","N","U","N","N","U","U","U","9","I" 11 | "Genius","33045","8126","N","N","N","S;R","U","N","N","Y","N","26","U","U","U","U" 12 | "BootStomp","5","5","Y","U","Y","M","Y","N","U","N","N","4","4","1","1","III" 13 | "FirmUSB","2","2","Y","U","N","M","N","N","N","N","N","2","2","1","U","III" 14 | "Gemini","33045","8126","N","N","N","R","U","N","N","Y","N","26","U","U","U","U" 15 | "Muench et al.","4","4","Y","N","Y","M","U","N","N","N","N","4","4","4","1","0;I;II;III" 16 | "DTaint","6","6","Y","N","Y","N","Y","N","Y","N","N","4","6","U","2","I" 17 | "Tian et al.","2018","U","N","Y","Y","S","NA","N","Y","U","N","11","U","1","NA","I" 18 | "VulSeeker","4643","N","N","U","N","R","U","N","N","Y","N","U","U","U","U","U" 19 | "FirmUp","U 5000","U","N","Y","N","S","Y","N","N","N","N","U","U","U","U","U" 20 | "IoTFuzzer","17","NA","Y","NA","Y","N","Y","N","Y","N","N","12","17","10","U","U" 21 | "FIRM-AFL","11","11","Y","N","N","M;R","Y","N","Y","N","N","5","11","2","U","I" 22 | "FirmFuzz","6427","1013","Y","N","Y","S","Y","N","N","N","N","3","U","1","2","I" 23 | "SRFuzzer","10","NA","Y","NA","N","M","N","N","Y","N","N","5","10","1","2","U" 24 | "Pretender","6","NA","Y","NA","U","M","N","N","N","Y","Y","2","3","1","1","III" 25 | "HALucinator","16","16","Y","N","Y","M","U","N","N","Y","N","3","4","1","1","III" 26 | "FirmScope","2017","U","N","Y","U","S","Y","N","N","U","N","99+","U","1","NA","I" 27 | "PDiff","715","N","N","N","N","N","Y","N","N","N","N","8","U","3","2","I" 28 | "P$^2$IM","10","10","Y","U","Y","M","N","N","N","Y","N","3","4","10","1","II;;III" 29 | "Karonte","53;899","U","Y","Y","Y","S;R","Y","Y","Y","Y","Y","25","U","U","3","I;II;III" 30 | "Laelaps","30","NA","Y","U","Y","N","N","N","N","N","N","2","4","24","1","II;III" 31 | "FirmAE","1306","1124","Y","Y","Y","S","Y","Y","N","Y","Y","8","U","2","2","I" 32 | "CPscan","28","28","Y","N","N","N","N","N","Y","N","N","10","28","U","U","I" 33 | "Diane","11","NA","Y","NA","N","N","Y","N","Y","N","N","9","11","4","U","U" 34 | "DICE","7","NA","Y","NA","Y","M","N","N","N","Y","N","6","7","7","1","II;III" 35 | "ECMO","815","815","N","Y","U","N","N","N","N","N","N","2","37","1","1","I" 36 | "iFIZZ","10","10","Y","Y","Y","N","U","N","Y","N","N","7","10","4","2","I" 37 | "Jetset","13","13","Y","N","U","M;R","N","N","N","N","N","4","13","3","3","I;II;III" 38 | "SaTC","39;49","39;49","Y","Y","N","N;R","N","N","Y","Y","Y","6;4","6;U","2;U","2;3","U" 39 | "Snipuzz","20","NA","N","NA","Y","M","N","N","Y","N","N","17","20","8","U","U" 40 | "$\mu$Emu","21","21","Y","N","N","M;R","Y","N","Y","Y","N","U","21","U","1","II;III" 41 | "SymLM","8","8","Y","N","U","R","NA","N","N","N","N","U","8","U","1","II;III" 42 | "Marcelli et al.","2","2","Y","N","Y","M","Y","N","N","N","N","2","2","1","2","I" 43 | "Greenhouse","7141","5690","Y","Y","Y","S;R","Y","N","N","N","N","9","1764","2","3","I" 44 | "FirmSolo","8787","1470","Y","U","Y","N;R","Y","N","U","U","N","U","U","U","2","I" 45 | "VulHawk","20","20","N","N","N","N","Y","N","N","N","N","3","20","U","U","U" 46 | -------------------------------------------------------------------------------- /scrapers/lfwc_scraper/spiders/asus.py: -------------------------------------------------------------------------------- 1 | import json 2 | import re 3 | from contextlib import suppress 4 | from datetime import datetime 5 | from json import JSONDecodeError 6 | from typing import Generator, List 7 | 8 | from scrapy import Request 9 | from scrapy.http import Response 10 | from typing_extensions import override 11 | 12 | from lfwc_scraper.custom_spiders import FirmwareSpider 13 | from lfwc_scraper.items import FirmwareItem 14 | 15 | 16 | class Asus(FirmwareSpider): 17 | name = "asus" 18 | manufacturer = "ASUS" 19 | 20 | custom_settings = { 21 | "ROBOTSTXT_OBEY": True, 22 | "CONCURRENT_REQUESTS": 1, 23 | "CONCURRENT_ITEMS": 1, 24 | "DOWNLOAD_DELAY": 0.75, 25 | "RANDOMIZE_DOWNLOAD_DELAY": True, 26 | "REFERER_ENABLED": False, 27 | } 28 | 29 | @override 30 | def start_requests(self): 31 | product_type_ids = [ 32 | ("Business Switches", "switch", 24591), 33 | ("Gaming Networking", "router", 24506), 34 | ("Gaming Wireless-Router", "router", 25908), 35 | ("ROG Raming WLAN-Router", "router", 25740), 36 | ("4G LTE/3G-Router", "router", 3158), 37 | ("Kabel-Modems & Router", "router", 21981), 38 | ("Media-Bridge, Repeater und mehr", "repeater", 2619), 39 | ("Powerline-Adapter", "powerline", 2850), 40 | ("Wireless-Router", "router", 2542), 41 | ("WLAN-Mesh-Systeme", "mesh", 25266), 42 | ("xDSL-Modem-Router", "router", 3081), 43 | ("Business Switches", "switches", 24676), 44 | ("Business WLAN Router", "router", 25017), 45 | ] 46 | 47 | for _, device_class, type_id in product_type_ids: 48 | yield Request( 49 | f"https://www.asus.com/support/api/product.asmx/GetPDLevel?website=de&type=2&typeid={type_id}" 50 | "&productflag=1", 51 | callback=self.parse_pdlevel, 52 | cb_kwargs={"device_class": device_class}, 53 | ) 54 | 55 | def parse_pdlevel(self, response: Response, device_class: str, **_) -> Generator[Request, None, None]: 56 | try: 57 | data = json.loads(response.body.decode()) 58 | print(data) 59 | except (UnicodeDecodeError, JSONDecodeError): 60 | yield from [] 61 | return 62 | 63 | for product in data["Result"]["Product"]: 64 | pd_hashed_id = product["PDHashedId"] 65 | device_name = re.sub("<[^<]+?>", "", product["PDName"]).strip() 66 | yield Request( 67 | url=f"https://www.asus.com/support/api/product.asmx/GetPDBIOS?website=de&model={device_name}" 68 | f"&pdhashedid={pd_hashed_id}", 69 | callback=self.parse_pdbios, 70 | cb_kwargs={"device_name": device_name, "device_class": device_class}, 71 | ) 72 | 73 | def parse_pdbios( 74 | self, response: Response, device_name: str, device_class: str 75 | ) -> Generator[FirmwareItem, None, None]: 76 | try: 77 | data = json.loads(response.body.decode()) 78 | except (UnicodeDecodeError, JSONDecodeError): 79 | return 80 | 81 | if not data["Result"]: 82 | return 83 | 84 | firmware = self.extract_firmware_files(data) 85 | 86 | for item in firmware: 87 | meta_data = { 88 | "vendor": "asus", 89 | "release_date": datetime.strptime(item["ReleaseDate"], "%Y/%m/%d").isoformat(), 90 | "device_name": device_name, 91 | "firmware_version": item["Version"], 92 | "device_class": device_class, 93 | "file_urls": item["DownloadUrl"]["Global"], 94 | } 95 | yield from self.item_pipeline(meta_data) 96 | 97 | @staticmethod 98 | def extract_firmware_files(data: dict) -> List[dict]: 99 | with suppress(KeyError): 100 | for obj in data["Result"]["Obj"]: 101 | if obj["Name"] == "Firmware": 102 | return obj["Files"] 103 | return [] 104 | -------------------------------------------------------------------------------- /notebooks/notebooks/_mask_lfwc.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "541e35d9-8282-4a64-a390-6da30c1e79cd", 6 | "metadata": {}, 7 | "source": [ 8 | "# Pseudonymize LFwC for Review." 9 | ] 10 | }, 11 | { 12 | "cell_type": "code", 13 | "execution_count": null, 14 | "id": "749dbda1-cb32-4952-894c-d0d0f66c6e6e", 15 | "metadata": {}, 16 | "outputs": [], 17 | "source": [ 18 | "import secrets\n", 19 | "from pathlib import Path\n", 20 | "\n", 21 | "import pandas as pd\n", 22 | "import ssdeep" 23 | ] 24 | }, 25 | { 26 | "cell_type": "code", 27 | "execution_count": null, 28 | "id": "0ebee9b3-46d4-4781-a76c-1946a9935dd9", 29 | "metadata": {}, 30 | "outputs": [], 31 | "source": [ 32 | "CORPUS_PATH: Path = Path(\"lfwc.csv\")" 33 | ] 34 | }, 35 | { 36 | "cell_type": "code", 37 | "execution_count": null, 38 | "id": "518ee792-996c-4307-9c98-d3dae795fc82", 39 | "metadata": {}, 40 | "outputs": [], 41 | "source": [ 42 | "df: pd.DataFrame = pd.read_csv(CORPUS_PATH, index_col=0)" 43 | ] 44 | }, 45 | { 46 | "cell_type": "code", 47 | "execution_count": null, 48 | "id": "3ee71f4f-0d49-48f7-812f-3e539c96182a", 49 | "metadata": {}, 50 | "outputs": [], 51 | "source": [ 52 | "length_mapping: dict[\"str\", int] = {\"md5\": 16, \"sha1\": 20, \"sha256\": 32, \"sha512\": 64}\n", 53 | "\n", 54 | "\n", 55 | "def replace_hash(_: str, kind: str) -> str:\n", 56 | " if kind == \"tlsh\":\n", 57 | " return f\"T1{secrets.token_hex(nbytes=35).upper()}\"\n", 58 | " if kind == \"ssdeep\":\n", 59 | " return ssdeep.hash(secrets.token_bytes(nbytes=1024))\n", 60 | " return secrets.token_hex(nbytes=length_mapping[kind])" 61 | ] 62 | }, 63 | { 64 | "cell_type": "code", 65 | "execution_count": null, 66 | "id": "0c14b5cb-2825-4d6d-8712-8241820c5d70", 67 | "metadata": {}, 68 | "outputs": [], 69 | "source": [ 70 | "for column in [\"md5\", \"sha1\", \"sha256\", \"sha512\", \"ssdeep\", \"tlsh\"]:\n", 71 | " df[column] = df[column].apply(lambda x: replace_hash(x, column))" 72 | ] 73 | }, 74 | { 75 | "cell_type": "code", 76 | "execution_count": null, 77 | "id": "f11e5603-ffa2-4537-919f-440c001b80e0", 78 | "metadata": {}, 79 | "outputs": [], 80 | "source": [ 81 | "pseudo_name_map: dict[str, str] = {}\n", 82 | "\n", 83 | "\n", 84 | "def pseudonymize_device_names(name: str) -> str:\n", 85 | " if name not in pseudo_name_map:\n", 86 | " pseudo_name_map[name] = secrets.token_hex(nbytes=12)\n", 87 | " return pseudo_name_map[name]" 88 | ] 89 | }, 90 | { 91 | "cell_type": "code", 92 | "execution_count": null, 93 | "id": "c36ffd5c-a0a0-401c-b589-76d8cdc3188b", 94 | "metadata": {}, 95 | "outputs": [], 96 | "source": [ 97 | "df[\"device_name\"] = df[\"device_name\"].apply(pseudonymize_device_names)\n", 98 | "df[\"filename\"] = \"*****\"\n", 99 | "df[\"source_link\"] = \"https://*****\"\n", 100 | "df[\"wayback\"] = \"https://web.archive.org/*****\"" 101 | ] 102 | }, 103 | { 104 | "cell_type": "code", 105 | "execution_count": null, 106 | "id": "3644b991-b04b-4d92-b2ff-81aa8c920653", 107 | "metadata": {}, 108 | "outputs": [], 109 | "source": [ 110 | "df" 111 | ] 112 | }, 113 | { 114 | "cell_type": "code", 115 | "execution_count": null, 116 | "id": "f8384946-9577-4802-a1a9-802c4f58f65f", 117 | "metadata": {}, 118 | "outputs": [], 119 | "source": [ 120 | "df.to_csv(\"../public_data/lfwc_masked.csv\")" 121 | ] 122 | } 123 | ], 124 | "metadata": { 125 | "kernelspec": { 126 | "display_name": "Python 3 (ipykernel)", 127 | "language": "python", 128 | "name": "python3" 129 | }, 130 | "language_info": { 131 | "codemirror_mode": { 132 | "name": "ipython", 133 | "version": 3 134 | }, 135 | "file_extension": ".py", 136 | "mimetype": "text/x-python", 137 | "name": "python", 138 | "nbconvert_exporter": "python", 139 | "pygments_lexer": "ipython3", 140 | "version": "3.12.3" 141 | } 142 | }, 143 | "nbformat": 4, 144 | "nbformat_minor": 5 145 | } 146 | -------------------------------------------------------------------------------- /replication/replicate_lfwc/types.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2024 Fraunhofer FKIE 2 | # SPDX-FileContributor: Marten Ringwelski 3 | # 4 | # SPDX-License-Identifier: GPL-3.0-or-later 5 | 6 | import datetime 7 | import enum 8 | import hashlib 9 | import pathlib as pl 10 | 11 | import attrs 12 | import pandas as pd 13 | 14 | from . import utils 15 | 16 | 17 | @attrs.frozen 18 | class FirmwareImage: 19 | vendor: str 20 | device: str 21 | version: str | None 22 | release: datetime.date | None 23 | class_: str 24 | # Derived from url 25 | filename: str 26 | size: int 27 | sha256: str 28 | url: str 29 | source: str 30 | 31 | @classmethod 32 | def from_row(cls, row) -> "FirmwareImage": 33 | return cls( 34 | vendor=row["vendor"], 35 | device=row["device"], 36 | version=row["version"], 37 | release=( 38 | datetime.date.fromisoformat(row["release"]) if row["release"] else None 39 | ), 40 | class_=row["class"], 41 | filename=row["filename"], 42 | size=row["size"], 43 | sha256=row["sha256"], 44 | url=row["url"], 45 | source=row["source_type"], 46 | ) 47 | 48 | def __repr__(self) -> str: 49 | return f"{self.class_}:{self.vendor}:{self.device}:{self.version}" 50 | 51 | 52 | class FirmwareImageStatus(str, enum.Enum): 53 | MISSING = "missing" 54 | DOWNLOAD_STARTED = "download-started" 55 | HASH_MISMATCH = "hash-mismatch" 56 | SUCCESS = "success" 57 | 58 | @classmethod 59 | def download_started(cls, image: FirmwareImage, corpus: "Corpus") -> bool: 60 | return pl.Path(str(corpus.image_path(image)) + ".aria2").exists() 61 | 62 | @classmethod 63 | def missing(cls, image: FirmwareImage, corpus: "Corpus") -> bool: 64 | return not corpus.image_path(image).exists() 65 | 66 | @classmethod 67 | def hash_mismatch(cls, image: FirmwareImage, corpus: "Corpus") -> bool: 68 | if cls.missing(image, corpus): 69 | return False 70 | if cls.download_started(image, corpus): 71 | return False 72 | 73 | image_path = corpus.image_path(image) 74 | 75 | downloaded_sha256 = hashlib.sha256(image_path.read_bytes()).hexdigest() 76 | 77 | return image.sha256 != downloaded_sha256 78 | 79 | @classmethod 80 | def success(cls, image: FirmwareImage, corpus: "Corpus") -> bool: 81 | if cls.missing(image, corpus): 82 | return False 83 | if cls.download_started(image, corpus): 84 | return False 85 | if cls.hash_mismatch(image, corpus): 86 | return False 87 | return True 88 | 89 | @classmethod 90 | def image_has_status( 91 | cls, image: FirmwareImage, corpus: "Corpus", status: "FirmwareImageStatus" 92 | ) -> bool: # noqa: PLR0911 93 | match status: 94 | case FirmwareImageStatus.MISSING: 95 | return cls.missing(image, corpus) 96 | case FirmwareImageStatus.DOWNLOAD_STARTED: 97 | return cls.download_started(image, corpus) 98 | case FirmwareImageStatus.HASH_MISMATCH: 99 | return cls.hash_mismatch(image, corpus) 100 | case FirmwareImageStatus.SUCCESS: 101 | return cls.success(image, corpus) 102 | case _: 103 | assert False 104 | 105 | @classmethod 106 | def from_image( 107 | cls, image: FirmwareImage, corpus: "Corpus" 108 | ) -> "FirmwareImageStatus": # noqa: PLR0911 109 | if cls.missing(image, corpus): 110 | return cls.MISSING 111 | 112 | if cls.download_started(image, corpus): 113 | return cls.DOWNLOAD_STARTED 114 | 115 | if cls.hash_mismatch(image, corpus): 116 | return cls.HASH_MISMATCH 117 | 118 | if cls.success(image, corpus): 119 | return cls.SUCCESS 120 | 121 | assert False, corpus.image_path(image) 122 | 123 | 124 | @attrs.frozen(slots=False) 125 | class Corpus: 126 | path: pl.Path 127 | dataframe: pd.DataFrame 128 | 129 | def image_path(self, image: "FirmwareImage") -> pl.Path: 130 | return self.path / utils.image_path_from_firmware_image(image) 131 | 132 | def iter_images(self, status: "FirmwareImageStatus|None" = None): 133 | iterator = self.dataframe.apply(FirmwareImage.from_row, axis=1) 134 | if status is None: 135 | return iterator 136 | return ( 137 | image 138 | for image in iterator 139 | if FirmwareImageStatus.image_has_status(image, self, status) 140 | ) 141 | -------------------------------------------------------------------------------- /scrapers/lfwc_scraper/middlewares.py: -------------------------------------------------------------------------------- 1 | from time import sleep 2 | 3 | from scrapy import signals 4 | from scrapy.exceptions import IgnoreRequest 5 | from scrapy.http import HtmlResponse 6 | from selenium import webdriver 7 | from selenium.common.exceptions import TimeoutException 8 | from selenium.webdriver.chrome.service import Service 9 | from selenium.webdriver.common.by import By 10 | from selenium.webdriver.support import expected_conditions 11 | from selenium.webdriver.support.ui import WebDriverWait 12 | from webdriver_manager.firefox import GeckoDriverManager 13 | 14 | 15 | class FirmwareSpiderMiddleware(object): 16 | 17 | @classmethod 18 | def from_crawler(cls, crawler): 19 | settings = cls() 20 | crawler.signals.connect(settings.spider_opened, signal=signals.spider_opened) 21 | return settings 22 | 23 | def process_spider_input(self, response, spider): 24 | return None 25 | 26 | def process_spider_output(self, response, result, spider): 27 | for i in result: 28 | yield i 29 | 30 | def process_spider_exception(self, response, exception, spider): 31 | pass 32 | 33 | def process_start_requests(self, start_requests, spider): 34 | for request in start_requests: 35 | yield request 36 | 37 | def spider_opened(self, spider): 38 | spider.logger.info("Spider opened: %s" % spider.name) 39 | 40 | 41 | class FirmwareDownloaderMiddleware(object): 42 | 43 | def __init__(self): 44 | options = webdriver.FirefoxOptions() 45 | options.add_argument("--headless") 46 | service = Service(GeckoDriverManager().install()) 47 | driver_cls = webdriver.Firefox 48 | self.driver = driver_cls(options=options, service=service) # pyright:ignore 49 | self.wait = WebDriverWait(self.driver, 15) 50 | 51 | def process_request(self, request, spider): 52 | if "selenium" not in request.meta: 53 | return None 54 | self.driver.get(request.url) 55 | 56 | if "hp" in request.meta: 57 | body = self.hp_processor() 58 | else: 59 | sleep(2) 60 | body = str.encode(self.driver.page_source) 61 | 62 | return HtmlResponse(self.driver.current_url, body=body, encoding="utf-8", request=request) 63 | 64 | def asus_processor(self): 65 | try: 66 | self.wait.until(expected_conditions.presence_of_element_located((By.LINK_TEXT, "DOWNLOAD"))) 67 | except TimeoutException: 68 | print( 69 | "No DOWNLOAD Field accessible for {}\nStop processing of {}".format( 70 | self.driver.current_url, self.driver.current_url 71 | ) 72 | ) 73 | raise IgnoreRequest 74 | finally: 75 | return str.encode(self.driver.page_source) 76 | 77 | def hp_processor(self): 78 | self.driver.fullscreen_window() 79 | self.handle_404() 80 | self.choose_country() 81 | self.choose_os() 82 | self.choose_version() 83 | self.update_os_version() 84 | 85 | return str.encode(self.driver.page_source) 86 | 87 | def handle_404(self): 88 | if "Oops!" in self.driver.find_element_by_xpath("//h1").text or "Error 404" in self.driver.page_source: 89 | print(self.driver.current_url, ": 404 Page Not Found - no firmware to find here") 90 | raise IgnoreRequest 91 | 92 | def choose_country(self): 93 | element = self.wait.until(expected_conditions.element_to_be_clickable((By.LINK_TEXT, "Australia"))) 94 | element.click() 95 | try: 96 | self.wait.until(expected_conditions.invisibility_of_element_located(element)) 97 | except TimeoutException: 98 | element.click() 99 | pass 100 | 101 | def choose_os(self): 102 | if self.wait.until(expected_conditions.element_to_be_clickable((By.ID, "SelectDiffOS"))): 103 | self.driver.find_element_by_id("SelectDiffOS").click() 104 | self.wait.until(expected_conditions.element_to_be_clickable((By.ID, "platform_dd_headerLink"))).click() 105 | 106 | for element in self.driver.find_elements_by_xpath('//ul[@id="platform_dd_list"]/li'): 107 | if element.text == "OS Independent": 108 | element.click() 109 | break 110 | 111 | def choose_version(self): 112 | self.driver.find_element_by_id("versionnew_dd_headerValue").click() 113 | for element in self.driver.find_elements_by_xpath( 114 | '//ul[@id="versionnew_dd_list" and @class="dropdown-menu"]/li' 115 | ): 116 | if element.text == "OS Independent": 117 | element.click() 118 | break 119 | 120 | def update_os_version(self): 121 | element = self.driver.find_element_by_id("os-update") 122 | element.click() 123 | if self.wait.until(expected_conditions.invisibility_of_element_located(element)): 124 | pass 125 | 126 | def process_response(self, request, response, spider): 127 | return response 128 | 129 | def process_exception(self, request, exception, spider): 130 | pass 131 | 132 | def spider_opened(self, spider): 133 | spider.logger.info("Spider opened: %s" % spider.name) 134 | 135 | def spider_closed(self): 136 | self.driver.quit() 137 | -------------------------------------------------------------------------------- /replication/replicate_lfwc/fact.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2024 Fraunhofer FKIE 2 | # SPDX-FileContributor: Marten Ringwelski 3 | # 4 | # SPDX-License-Identifier: GPL-3.0-or-later 5 | 6 | import base64 7 | import enum 8 | import pathlib as pl 9 | 10 | import attrs 11 | import requests 12 | 13 | from .types import Corpus, FirmwareImage 14 | 15 | 16 | class Plugin(str, enum.Enum): 17 | FILE_SYSTEM_METADATA = "file_system_metadata" 18 | FIL = "file name" 19 | CVE_LOOKUP = "cve_lookup" 20 | KERNEL_CONFIG = "kernel_config" 21 | CRYPTO_MATERIAL = "crypto_material" 22 | ELF_ANALYSIS = "elf_analysis" 23 | USERS_AND_PASSWORDS = "users_and_passwords" 24 | SOURCE_CODE_ANALYSIS = "source_code_analysis" 25 | BINWALK = "binwalk" 26 | DUMMY_PLUGIN_FOR_TESTING_ONLY = "dummy_plugin_for_testing_only" 27 | IP_AND_URI_FINDER = "ip_and_uri_finder" 28 | INTERESTING_URIS = "interesting_uris" 29 | CRYPTO_HINTS = "crypto_hints" 30 | INPUT_VECTORS = "input_vectors" 31 | TLSH = "tlsh" 32 | SOFTWARE_COMPONENTS = "software_components" 33 | INFORMATION_LEAKS = "information_leaks" 34 | PRINTABLE_STRINGS = "printable_strings" 35 | DEVICE_TREE = "device_tree" 36 | IPC_ANALYZER = "ipc_analyzer" 37 | HASHLOOKUP = "hashlookup" 38 | EXPLOIT_MITIGATIONS = "exploit_mitigations" 39 | CPU_ARCHITECTURE = "cpu_architecture" 40 | CWE_CHECKER = "cwe_checker" 41 | INIT_SYSTEMS = "init_systems" 42 | FILE_TYPE = "file_type" 43 | HARDWARE_ANALYSIS = "hardware_analysis" 44 | STRING_EVALUATOR = "string_evaluator" 45 | FILE_HASHES = "file_hashes" 46 | KNOWN_VULNERABILITIES = "known_vulnerabilities" 47 | QEMU_EXEC = "qemu_exec" 48 | 49 | 50 | @attrs.frozen 51 | class Context: 52 | url: str 53 | corpus: Corpus 54 | 55 | 56 | @attrs.frozen 57 | class FirmwareAnalysisProgress: 58 | uid: str 59 | analyzed_count: int 60 | unpacked_count: int 61 | 62 | @classmethod 63 | def iter_from_status_response(cls, r): 64 | data = r.json() 65 | for uid in data["system_status"]["backend"]["analysis"]["current_analyses"]: 66 | yield cls.from_status_response(r, uid) 67 | 68 | @classmethod 69 | def from_status_response(cls, r, uid: str) -> "FirmwareAnalysisProgress | None": 70 | data = r.json() 71 | analyses = data["system_status"]["backend"]["analysis"]["current_analyses"][uid] 72 | 73 | return cls( 74 | analyzed_count=int(analyses["analyzed_count"]), 75 | unpacked_count=int(analyses["unpacked_count"]), 76 | uid=uid, 77 | ) 78 | 79 | 80 | class FactError(Exception): 81 | pass 82 | 83 | 84 | class UploadFailedError(FactError): 85 | pass 86 | 87 | 88 | class UnknownStatusError(FactError): 89 | pass 90 | 91 | 92 | def hash_from_uid(uid: str): 93 | return uid.split("_")[0] 94 | 95 | 96 | def uid_from_image(image: FirmwareImage): 97 | return f"{image.sha256}_{image.size}" 98 | 99 | 100 | def upload(image: FirmwareImage, plugins: list[str], ctx: Context) -> str: 101 | """Uploads a firmware image to FACT and returns the uid of the uploaded 102 | firmware.""" 103 | image_path = ctx.corpus.image_path(image) 104 | if not pl.Path(image_path).exists(): 105 | raise FileNotFoundError(f"{image_path} does not exist.") 106 | 107 | image_data = pl.Path(image_path).read_bytes() 108 | payload = { 109 | "binary": base64.b64encode(image_data).decode(), 110 | "device_class": image.class_, 111 | "device_name": image.device, 112 | "device_part": "Unknown", 113 | "file_name": image.filename, 114 | "requested_analysis_systems": plugins, 115 | "vendor": image.vendor, 116 | "version": image.version if image.version else "Unknown", 117 | "tags": f"source:{image.source}", 118 | } 119 | if image.release: 120 | payload.update({"release_date": image.release.isoformat()}) 121 | 122 | r = requests.put( 123 | ctx.url + "/rest/firmware", 124 | json=payload, 125 | ) 126 | 127 | try: 128 | r.raise_for_status() 129 | except requests.HTTPError as e: 130 | raise UploadFailedError() from e 131 | answer = r.json() 132 | return answer["uid"] 133 | 134 | 135 | def firmware_is_uploaded(image: FirmwareImage, ctx: Context) -> bool: 136 | r = requests.get(ctx.url + f"/rest/firmware/{uid_from_image(image)}") 137 | 138 | return r.status_code == 200 # noqa: PLR2004 139 | 140 | 141 | def get_running_analysis_firmware_hashes(ctx: Context) -> list[str]: 142 | r = requests.get(ctx.url + "/rest/status") 143 | 144 | try: 145 | r.raise_for_status() 146 | except requests.HTTPError as e: 147 | raise UnknownStatusError() from e 148 | 149 | data = r.json() 150 | analyses = data["system_status"]["backend"]["analysis"]["current_analyses"] 151 | 152 | return [hash_from_uid(uid) for uid in analyses] 153 | 154 | 155 | def get_firmware_analysis_progress(ctx: Context) -> dict[str, FirmwareAnalysisProgress]: 156 | r = requests.get(ctx.url + "/rest/status") 157 | 158 | try: 159 | r.raise_for_status() 160 | except requests.HTTPError as e: 161 | raise UnknownStatusError() from e 162 | 163 | return { 164 | progress.uid: progress 165 | for progress in FirmwareAnalysisProgress.iter_from_status_response(r) 166 | } 167 | -------------------------------------------------------------------------------- /scrapers/lfwc_scraper/spiders/dlink.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | from typing import Generator, Union 3 | 4 | from scrapy import Request 5 | from scrapy.http import Response 6 | 7 | from lfwc_scraper.custom_spiders import FirmwareSpider 8 | from lfwc_scraper.items import FirmwareItem 9 | 10 | 11 | class DLink(FirmwareSpider): 12 | handle_httpstatus_list = [404] 13 | name = "dlink" 14 | allowed_domains = ["eu.dlink.com", "ftp.dlink.de", "www.dlink.com"] 15 | 16 | start_urls = [ 17 | "https://eu.dlink.com/de/de/for-home/wifi?page=-1&mode=ajax&filters=&categories=&target=products", 18 | "https://eu.dlink.com/de/de/for-home/cameras?page=-1&mode=ajax&filters=&categories=&target=products", 19 | "https://eu.dlink.com/de/de/for-home/smart-home?page=-1&mode=ajax&filters=&categories=&target=products", 20 | "https://eu.dlink.com/de/de/for-home/switches?page=-1&mode=ajax&filters=&categories=&target=products", 21 | ] 22 | 23 | custom_settings = { 24 | "ROBOTSTXT_OBEY": True, 25 | "CONCURRENT_REQUESTS": 1, 26 | "CONCURRENT_ITEMS": 1, 27 | "DOWNLOAD_DELAY": 0.75, 28 | "RANDOMIZE_DOWNLOAD_DELAY": True, 29 | "REFERER_ENABLED": True, 30 | } 31 | 32 | xpath = { 33 | "product_names_in_category": '//div[@class="product-item__number"]/text()', 34 | "detail_pages_in_category": '//div[@class="product-item__number"]/parent::a/@href', 35 | "detail_latest_revision_name": '//select[@id="supportRevision"]/option[last()]/text()', 36 | "detail_latest_revision_param": '//select[@id="supportRevision"]/option[last()]/@value', 37 | "version": '//div[@id="firmware"]//td[@data-table-header="Version"]/text()', 38 | "date": '//div[@id="firmware"]//td[@data-table-header="Datum"]/text()', 39 | "download_link": '//div[@id="firmware"]//td[@data-table-header=""]/a/@href', 40 | } 41 | 42 | device_classes_dict = { 43 | "dba": "Access Point", 44 | "dap": "Access Point", 45 | "dis": "Converter", 46 | "dmc": "Converter", 47 | "dge": "PCIe-Networkcard", 48 | "dwa": "PCIe-Networkcard", 49 | "dxe": "PCIe-Networkcard", 50 | "dps": "Redundant Power Supply", 51 | "dsr": "Router (Business)", 52 | "dwr": "Router (mobile)", 53 | "dwm": "Router (mobile)", 54 | "dsl": "Router (Modem)", 55 | "covr": "Router (Home)", 56 | "dir": "Router (Home)", 57 | "dva": "Router (Home)", 58 | "go": "Router (Home)", 59 | "dsp": "Smart Plug", 60 | "dcs": "Smart Wi-Fi Camera", 61 | "dsh": "Smart Wi-Fi Camera", 62 | "des": "Switch", 63 | "dgs": "Switch", 64 | "dkvm": "Switch", 65 | "dqs": "Switch", 66 | "dxs": "Switch", 67 | "dem": "Transceiver", 68 | "dub": "USB Extensions", 69 | "dnr": "Video Recorder", 70 | "dwc": "Wireless Controller", 71 | "dwl": "other", 72 | } 73 | 74 | def parse(self, response: Response, **_) -> Generator[Request, None, None]: 75 | names = response.xpath(self.xpath["product_names_in_category"]).extract() 76 | detail_links = response.xpath(self.xpath["detail_pages_in_category"]).extract() 77 | for name, detail_link in zip(names, detail_links): 78 | yield Request( 79 | url=response.urljoin(detail_link), callback=self.process_detail_page, cb_kwargs=dict(product_name=name) 80 | ) 81 | 82 | def process_detail_page( 83 | self, response: Response, product_name: str, product_revision: str = "" 84 | ) -> Generator[Union[Request, FirmwareItem], None, None]: 85 | if product_revision == "": 86 | latest_revision_on_page = response.xpath(self.xpath["detail_latest_revision_name"]).extract() 87 | latest_revision_query_param = response.xpath(self.xpath["detail_latest_revision_param"]).extract() 88 | 89 | if len(latest_revision_on_page + latest_revision_query_param) > 0: 90 | yield Request( 91 | url=response.urljoin(f"?revision={latest_revision_query_param[0]}"), 92 | callback=self.process_detail_page, 93 | cb_kwargs=dict(product_name=product_name, product_revision=latest_revision_on_page[0]), 94 | ) 95 | return 96 | 97 | version = response.xpath(self.xpath["version"]).extract() 98 | release_date = response.xpath(self.xpath["date"]).extract() 99 | download_link = response.xpath(self.xpath["download_link"]).extract() 100 | 101 | if len(download_link + release_date + version) != 3: 102 | yield from [] 103 | return 104 | 105 | meta_data = { 106 | "vendor": "DLink", 107 | "file_urls": [download_link[0]], 108 | "device_name": f"{product_name} {product_revision}".strip(), 109 | "device_class": self.map_device_class(download_link[0]), 110 | "firmware_version": version[0], 111 | "release_date": datetime.strptime(release_date[0].strip(), "%d.%m.%Y").strftime("%d-%m-%Y"), 112 | } 113 | 114 | yield from self.item_pipeline(meta_data) 115 | 116 | @classmethod 117 | def map_device_class(cls, image_path: str) -> str: 118 | device_class = "unknown" 119 | for key, value in cls.device_classes_dict.items(): 120 | if key in image_path.lower(): 121 | device_class = value 122 | break 123 | return device_class 124 | -------------------------------------------------------------------------------- /scrapers/lfwc_scraper/spiders/edimax.py: -------------------------------------------------------------------------------- 1 | import re 2 | from datetime import datetime 3 | from typing import Generator 4 | 5 | from scrapy import Request 6 | from scrapy.http import Response 7 | 8 | from lfwc_scraper.custom_spiders import FirmwareSpider 9 | from lfwc_scraper.items import FirmwareItem 10 | 11 | 12 | class Edimax(FirmwareSpider): 13 | name = "edimax" 14 | manufacturer = "edimax" 15 | 16 | start_urls = ["https://www.edimax.com/edimax/download/download/data/edimax/global/download/"] 17 | 18 | custom_settings = { 19 | "ROBOTSTXT_OBEY": True, 20 | "CONCURRENT_REQUESTS": 1, 21 | "CONCURRENT_ITEMS": 1, 22 | "DOWNLOAD_DELAY": 0.75, 23 | "RANDOMIZE_DOWNLOAD_DELAY": True, 24 | "REFERER_ENABLED": False, 25 | } 26 | 27 | xpath = { 28 | "option_links": '//select[@class="step1_select_cb drop_select"]/option[not(@disabled)]/@value', 29 | "device_names": "//select/option[not(@disabled)]/text()", 30 | "option_links_2": "//select/option[not(@disabled)]/@value", 31 | } 32 | 33 | def parse(self, response: Response, **_) -> Generator[Request, None, None]: 34 | 35 | solution_links = response.xpath(self.xpath["option_links"]).extract() 36 | for solution in solution_links: 37 | yield Request( 38 | url=response.urljoin( 39 | f"/edimax/product/ajax_product_admin/get_catagory_list_cb/2/{solution}/0/show_all/" 40 | ), 41 | callback=self.solution_parse, 42 | ) 43 | 44 | def solution_parse(self, response: Response) -> Generator[Request, None, None]: 45 | category_links = response.xpath(self.xpath["option_links_2"]).extract() 46 | for category in category_links: 47 | device_class = category 48 | yield Request( 49 | url=response.urljoin( 50 | f"/edimax/product/ajax_product_admin/get_product_list_cb/2/{category}/0/show_all/" 51 | ), 52 | callback=self.class_parse, 53 | cb_kwargs={"device_class": device_class}, 54 | ) 55 | 56 | def class_parse(self, response: Response, device_class: str) -> Generator[Request, None, None]: 57 | device_links = response.xpath(self.xpath["option_links_2"]).extract() 58 | device_names = response.xpath(self.xpath["device_names"]).extract() 59 | for device_name, device_link in zip(device_names, device_links): 60 | yield Request( 61 | url=response.urljoin( 62 | f"/edimax/download/ajax_download/get_download_list/2/global/download/{device_link}/{device_link}/3/" 63 | ), 64 | callback=self.device_parse, 65 | cb_kwargs={"device_class": device_class, "device_name": device_name}, 66 | ) 67 | 68 | def device_parse( 69 | self, response: Response, device_class: str, device_name: str 70 | ) -> Generator[FirmwareItem, None, None]: 71 | table_row_selectors = response.xpath( 72 | '//h3[text()="Firmware"]/following-sibling::div[@class="datagrid_tablesorter"][1]//tr' 73 | ) 74 | 75 | date_re = re.compile(r"(\d+\-\d+\-\d+).*") 76 | version_re = re.compile(r".*\(Version\s?\:?\s?(.*)\).*", flags=re.MULTILINE) 77 | 78 | if not table_row_selectors: 79 | return 80 | 81 | for row in table_row_selectors[1:]: 82 | download_link = row.xpath("./td[4]//a/@href").get() 83 | release_dates_dirty = row.xpath("./td[1]//span/text()").extract() 84 | info_rows = row.xpath("./td[1]/text()").extract() 85 | 86 | release_date = datetime.strptime("1970-01-01", "%Y-%m-%d").isoformat() 87 | for date_dirty in release_dates_dirty: 88 | search_results = date_re.findall(date_dirty) 89 | 90 | if search_results: 91 | release_date = datetime.strptime(search_results[0].strip(), "%Y-%m-%d").isoformat() 92 | 93 | firmware_version = "1.0.0" 94 | for info in info_rows: 95 | search_results = version_re.findall(info) 96 | 97 | if search_results: 98 | firmware_version = search_results[0] 99 | 100 | meta_data = { 101 | "vendor": "edimax", 102 | "release_date": release_date, 103 | "device_name": device_name, 104 | "firmware_version": firmware_version, 105 | "device_class": device_class, 106 | "file_urls": [response.urljoin(download_link)], 107 | } 108 | yield from self.item_pipeline(meta_data) 109 | 110 | @staticmethod 111 | def map_device_class(device_title: str) -> str: 112 | if "switch" in device_title.lower(): 113 | return "switch" 114 | if "access" in device_title.lower(): 115 | return "accesspoint" 116 | if "repeater" in device_title.lower(): 117 | return "repeater" 118 | if "powerline" in device_title.lower(): 119 | return "powerline" 120 | if "router" in device_title.lower(): 121 | return "router" 122 | if "modem" in device_title.lower(): 123 | return "modem" 124 | if "mesh" in device_title.lower(): 125 | return "mesh" 126 | if "camera" in device_title.lower(): 127 | return "ipcam" 128 | if "phone" in device_title.lower(): 129 | return "phone" 130 | return "unknown" 131 | -------------------------------------------------------------------------------- /scrapers/lfwc_scraper/spiders/trendnet.py: -------------------------------------------------------------------------------- 1 | import re 2 | from datetime import datetime 3 | from typing import Generator 4 | 5 | from scrapy import Request 6 | from scrapy.http import Response 7 | 8 | from lfwc_scraper.custom_spiders import FirmwareSpider 9 | from lfwc_scraper.items import FirmwareItem 10 | 11 | 12 | class Trendnet(FirmwareSpider): 13 | name = "trendnet" 14 | manufacturer = "trendnet" 15 | 16 | start_urls = ["https://www.trendnet.com/support/"] 17 | 18 | custom_settings = { 19 | "ROBOTSTXT_OBEY": True, 20 | "CONCURRENT_REQUESTS": 1, 21 | "CONCURRENT_ITEMS": 1, 22 | "DOWNLOAD_DELAY": 0.75, 23 | "RANDOMIZE_DOWNLOAD_DELAY": True, 24 | "REFERER_ENABLED": False, 25 | } 26 | 27 | xpath = { 28 | "product_links": '//option[starts-with(@value,"support-detail.asp")]/@value', 29 | "device_class_hint": '//h1[@class="g-font-weight-300 mb-0"]/text()', 30 | "device_name": '//h2[contains(@class, "g-mb-10 g-font-size-18")]/text()', 31 | "release_dates": "//pre/text()", 32 | "firmware_links": '//a[contains(@href, "Firmware") or contains(@href, "firmware")]/@href', 33 | "firmware_filenames": '//a[contains(@href, "Firmware") or contains(@href, "firmware")]/text()', 34 | "download_folder_hint": '//div[contains(@class, "card-header") and contains(text(), "Firmware")]//parent::*//a[@id="download"]/@data-src[1]', 35 | "download_folder": '//a[contains(@href, "https://downloads.trendnet.com")]/@href[1]', 36 | } 37 | 38 | regex = {"firmware_version": re.compile(r".*Firmware(?:\s?\-?[vV]ersion)?\s([\d\.]+).*$", flags=re.MULTILINE)} 39 | 40 | def parse(self, response: Response, **_) -> Generator[Request, None, None]: 41 | product_support_links = response.xpath(self.xpath["product_links"]).extract() 42 | for link in product_support_links: 43 | yield Request(url=response.urljoin(link), callback=self.support_page) 44 | 45 | def support_page(self, response: Response) -> Generator[Request, None, None]: 46 | device_class_hint = response.xpath(self.xpath["device_class_hint"]).get() 47 | 48 | if not device_class_hint: 49 | device_class_hint = "unknown" 50 | 51 | device_class = self.map_device_class(device_class_hint) 52 | 53 | device_name_dirty = response.xpath(self.xpath["device_name"]).get() 54 | 55 | if not device_name_dirty: 56 | return 57 | 58 | device_name_dirty = device_name_dirty.strip().replace("\n", "").replace("\xa0", "") 59 | device_name = re.sub(r"\s\s+", " ", device_name_dirty) 60 | 61 | download_folder_hint = response.xpath(self.xpath["download_folder_hint"]).get() 62 | 63 | if not download_folder_hint: 64 | return 65 | 66 | yield Request( 67 | url=f"{response.urljoin(download_folder_hint)}&button=Continue+with+Download&Continue=yes", 68 | callback=self.extract_download_folder_hint, 69 | cb_kwargs={ 70 | "device_name": device_name, 71 | "device_class": device_class, 72 | }, 73 | meta={"dont_redirect": True}, 74 | ) 75 | 76 | def extract_download_folder_hint( 77 | self, response: Response, device_name: str, device_class: str 78 | ) -> Generator[Request, None, None]: 79 | download_folder_dirty = response.xpath(self.xpath["download_folder"]).get() 80 | if not download_folder_dirty: 81 | return 82 | download_folder_link = "/".join(download_folder_dirty.split("/")[:-1]) 83 | yield Request( 84 | url=download_folder_link, 85 | callback=self.directory_listing, 86 | cb_kwargs={ 87 | "device_name": device_name, 88 | "device_class": device_class, 89 | }, 90 | ) 91 | 92 | def directory_listing( 93 | self, response: Response, device_name: str, device_class: str 94 | ) -> Generator[FirmwareItem, None, None]: 95 | text_nodes_with_release_date = response.xpath(self.xpath["release_dates"]).extract() 96 | 97 | dates = [ 98 | datetime.strptime(d.strip().split(" ")[0], "%m/%d/%Y").isoformat() for d in text_nodes_with_release_date 99 | ] 100 | 101 | download_links = response.xpath(self.xpath["firmware_links"]).extract() 102 | fw_filenames = response.xpath(self.xpath["firmware_filenames"]).extract() 103 | 104 | for fn, link, date in zip(fw_filenames, download_links, dates): 105 | meta_data = { 106 | "vendor": "trendnet", 107 | "release_date": date, 108 | "device_name": device_name, 109 | "firmware_version": fn, 110 | "device_class": device_class, 111 | "file_urls": [response.urljoin(link)], 112 | } 113 | yield from self.item_pipeline(meta_data) 114 | 115 | @staticmethod 116 | def map_device_class(device_title: str) -> str: 117 | if "switch" in device_title.lower(): 118 | return "switch" 119 | if "access" in device_title.lower(): 120 | return "accesspoint" 121 | if "repeater" in device_title.lower(): 122 | return "repeater" 123 | if "powerline" in device_title.lower(): 124 | return "powerline" 125 | if "router" in device_title.lower(): 126 | return "router" 127 | if "modem" in device_title.lower(): 128 | return "modem" 129 | if "mesh" in device_title.lower(): 130 | return "mesh" 131 | if "camera" in device_title.lower(): 132 | return "ipcam" 133 | if "phone" in device_title.lower(): 134 | return "phone" 135 | return "unknown" 136 | -------------------------------------------------------------------------------- /scrapers/lfwc_scraper/spiders/netgear.py: -------------------------------------------------------------------------------- 1 | import json 2 | import re 3 | from datetime import datetime 4 | from typing import Generator 5 | 6 | from scrapy import Request 7 | from scrapy.http import Response 8 | 9 | from lfwc_scraper.custom_spiders import FirmwareSpider 10 | from lfwc_scraper.items import FirmwareItem 11 | 12 | 13 | class Netgear(FirmwareSpider): 14 | name = "netgear" 15 | manufacturer = "NETGEAR" 16 | 17 | start_urls = ["https://www.netgear.de/system/supportModels.json"] 18 | 19 | custom_settings = { 20 | "ROBOTSTXT_OBEY": True, 21 | "CONCURRENT_REQUESTS": 10, 22 | "CONCURRENT_ITEMS": 10, 23 | "DOWNLOAD_DELAY": 0.75, 24 | "RANDOMIZE_DOWNLOAD_DELAY": True, 25 | "REFERER_ENABLED": False, 26 | } 27 | 28 | xpath = { 29 | "get_kb_article": '//h1[contains(text(), "Firmware")]/parent::a/parent::div/div[@class="accordion-content"]//a[contains(@href, "kb.netgear.com")]/@href', 30 | "get_download_link": '//h1[contains(text(), "Firmware")]/parent::a/parent::div/div[@class="accordion-content"]/div[@class="links"]/a/@href', 31 | "get_firmware_text": '//h1[contains(text(), "Firmware")]', 32 | "get_release_date": '//p[@class="last-updated"]/text()', 33 | } 34 | 35 | regex = {"firmware_version": re.compile(r".*Firmware(?:\s?\-?[vV]ersion)?\s([\d\.]+).*$", flags=re.MULTILINE)} 36 | 37 | def parse(self, response: Response, **kwargs) -> Generator[Request, None, None]: 38 | all_products = json.loads(response.text) 39 | for product in all_products: 40 | device_class = self.map_device_class(product["title"]) 41 | if device_class == "unknown" or product["external"] != "": 42 | continue 43 | support_url = response.urljoin(product["url"]) 44 | device_name = product["model"] 45 | yield Request( 46 | url=support_url, 47 | callback=self.consult_support_pages, 48 | cb_kwargs={ 49 | "device_name": device_name, 50 | "device_class": device_class, 51 | }, 52 | meta={"selenium": True}, # required because the xpath queries need a properly built DOM tree via JS 53 | ) 54 | 55 | def consult_support_pages( 56 | self, response: Response, device_name: str, device_class: str 57 | ) -> Generator[Request | FirmwareItem, None, None]: 58 | fw_text_selectors = response.xpath(self.xpath["get_firmware_text"]) 59 | 60 | for sel in fw_text_selectors: 61 | dirty_firmware_version = sel.xpath("./text()").get() 62 | content_sel = sel.xpath('./parent::a/parent::div/div[@class="accordion-content"]') 63 | download_link = content_sel.xpath('./div[@class="links"]/a/@href').get() 64 | kb_article_link = content_sel.xpath('.//a[contains(@href, "kb.netgear.com")]/@href').get() 65 | firmware_version = self.regex["firmware_version"].findall(dirty_firmware_version) 66 | if len(firmware_version) == 1: 67 | firmware_version = firmware_version[0] 68 | else: 69 | firmware_version = "0.0.0.0" 70 | if kb_article_link is not None: 71 | yield Request( 72 | url=kb_article_link, 73 | callback=self.parse_kb_article, 74 | cb_kwargs={ 75 | "firmware_version": firmware_version, 76 | "download_link": download_link, 77 | "device_name": device_name, 78 | "device_class": device_class, 79 | }, 80 | ) 81 | else: 82 | meta_data = { 83 | "vendor": "netgear", 84 | "release_date": datetime.strptime("01-01-1970", "%m-%d-%Y").isoformat(), 85 | "device_name": device_name, 86 | "firmware_version": firmware_version, 87 | "device_class": device_class, 88 | "file_urls": [download_link.strip()], 89 | } 90 | yield from self.item_pipeline(meta_data) 91 | 92 | def parse_kb_article( 93 | self, response: Response, device_name: str, firmware_version: str, download_link: str, device_class: str 94 | ) -> Generator[FirmwareItem, None, None]: 95 | 96 | dirty_release_date = response.xpath(self.xpath["get_release_date"]).get() 97 | 98 | release_date = dirty_release_date.split(":")[-1].strip().replace("/", "-") 99 | 100 | meta_data = { 101 | "vendor": "netgear", 102 | "release_date": datetime.strptime(release_date, "%m-%d-%Y").isoformat(), 103 | "device_name": device_name, 104 | "firmware_version": firmware_version, 105 | "device_class": device_class, 106 | "file_urls": [download_link.strip()], 107 | } 108 | yield from self.item_pipeline(meta_data) 109 | 110 | @staticmethod 111 | def map_device_class(device_title: str) -> str: 112 | if any(substr in device_title.lower() for substr in ["usb", "unmanaged"]): 113 | return "unknown" 114 | if "switch" in device_title.lower(): 115 | return "switch" 116 | if "access" in device_title.lower(): 117 | return "accesspoint" 118 | if "repeater" in device_title.lower(): 119 | return "repeater" 120 | if "powerline" in device_title.lower(): 121 | return "powerline" 122 | if "router" in device_title.lower(): 123 | return "router" 124 | if "modem" in device_title.lower(): 125 | return "modem" 126 | if "mesh" in device_title.lower(): 127 | return "mesh" 128 | return "unknown" 129 | -------------------------------------------------------------------------------- /scrapers/lfwc_scraper/spiders/linksys.py: -------------------------------------------------------------------------------- 1 | import re 2 | from datetime import datetime 3 | from typing import Generator, Optional, Tuple 4 | 5 | from scrapy import Request 6 | from scrapy.http import Response 7 | 8 | from lfwc_scraper.custom_spiders import FirmwareSpider 9 | from lfwc_scraper.items import FirmwareItem 10 | 11 | 12 | class ClassIdentifier: 13 | def __init__(self, shortcuts: tuple): 14 | self.shortcuts: tuple = shortcuts 15 | 16 | 17 | class Linksys(FirmwareSpider): 18 | name = "linksys" 19 | 20 | device_classes = { 21 | ClassIdentifier(("AM",)): "modem", 22 | ClassIdentifier(("CIT",)): "phone", 23 | ClassIdentifier(("EF", "EP", "PPS", "PSU", "WPS")): "printer", 24 | ClassIdentifier(("DMP", "DMC", "DMR", "DMS", "KWH", "MCC")): "media", 25 | ClassIdentifier(("DMA",)): "media", 26 | ClassIdentifier(("LAPN", "LAPAC")): "accesspoint", 27 | ClassIdentifier(("LCA",)): "ipcam", 28 | ClassIdentifier(("LMR", "LNR")): "video_recorder", 29 | ClassIdentifier(("LRT",)): "router", 30 | ClassIdentifier(("LGS",)): "switch", 31 | ClassIdentifier(("MR", "EA", "WRT", "E", "BEF", "WKU", "WRK")): "router", 32 | ClassIdentifier(("M10", "M20")): "accesspoint", 33 | ClassIdentifier(("PL",)): "powerline", 34 | ClassIdentifier(("RE", "WRE")): "repeater", 35 | ClassIdentifier(("SE", "EZX")): "switch", 36 | ClassIdentifier(("WAP",)): "accesspoint", 37 | ClassIdentifier(("WET", "WUM", "WES")): "repeater", 38 | ClassIdentifier(("WHW", "VLP", "MX")): "mesh", 39 | ClassIdentifier(("WMC", "WVC")): "ipcam", 40 | ClassIdentifier(("WML",)): "media", 41 | ClassIdentifier(("X", "AG", "WAG")): "router", 42 | } 43 | 44 | custom_settings = { 45 | "ROBOTSTXT_OBEY": True, 46 | "CONCURRENT_REQUESTS": 1, 47 | "CONCURRENT_ITEMS": 1, 48 | "DOWNLOAD_DELAY": 0.75, 49 | "RANDOMIZE_DOWNLOAD_DELAY": True, 50 | "REFERER_ENABLED": True, 51 | } 52 | 53 | xpath = { 54 | "device_names": '//li[@class="sitemap-list__item"]/a/text()', 55 | "support_pages": '//li[@class="sitemap-list__item"]/a/@href', 56 | "download_page": '//a[contains(@title, "FIRMWARE")]/@href', 57 | "hardware_version_selectors": '//div[starts-with(@id, "version")]', 58 | } 59 | 60 | start_urls = ["https://www.linksys.com/sitemap"] 61 | 62 | def parse(self, response: Response, **_) -> Generator[Request, None, None]: 63 | 64 | device_names = response.xpath(self.xpath["device_names"]).extract() 65 | support_pages = response.xpath(self.xpath["support_pages"]).extract() 66 | 67 | for name_dirty, url in zip(device_names, support_pages): 68 | yield Request( 69 | url=response.urljoin(url), 70 | callback=self.parse_support_page, 71 | cb_kwargs={"device_name": name_dirty.split(".")[0].strip()}, 72 | ) 73 | 74 | def parse_support_page(self, response: Response, device_name: str) -> Optional[Request]: 75 | download_page = response.xpath(self.xpath["download_page"]).get() 76 | if not download_page: 77 | return None 78 | return Request( 79 | url=response.urljoin(download_page), 80 | callback=self.parse_download_page, 81 | cb_kwargs={"device_name": device_name}, 82 | ) 83 | 84 | @classmethod 85 | def extract_date_and_version(cls, response: Response) -> Tuple[str, str]: 86 | matches = response.xpath(cls.xpath["date_and_version"]).extract() 87 | if len(matches) < 2: 88 | return "", "" 89 | 90 | firmware_version = matches[0].replace("Ver.", "") 91 | release_date = matches[1].split(" ")[-1].replace("/", "-") 92 | return firmware_version, release_date 93 | 94 | def parse_download_page(self, response: Response, device_name: str) -> Generator[FirmwareItem, None, None]: 95 | hw_version_selectors = response.xpath(self.xpath["hardware_version_selectors"]) 96 | 97 | for sel in hw_version_selectors: 98 | hw_version = "ver. 1.0" 99 | hw_version_dirty = sel.xpath("./@id").get() 100 | 101 | if hw_version_dirty is not None: 102 | hw_version = f'ver. {hw_version_dirty.replace("version_", "").replace("_", ".")}' 103 | 104 | device_name = f"{device_name} {hw_version}" 105 | 106 | firmware_download_urls = sel.xpath('.//p//a[contains(@href, "firmware")]/@href').extract() 107 | versions = [] 108 | release_dates = [] 109 | 110 | date_finder = re.compile(r".*\:\s*(\d+\/\d+\/\d{4}).*") 111 | 112 | for text in sel.xpath('.//p[contains(text(), "Ver.")]/text()').extract(): 113 | if "Ver." in text: 114 | versions += [text.replace("Ver.", "").replace(" ", "")] 115 | continue 116 | 117 | dates = date_finder.findall(text) 118 | if dates: 119 | release_dates += [datetime.strptime(dates[0], "%m/%d/%Y").isoformat()] 120 | 121 | for url, version, date in zip(firmware_download_urls, versions, release_dates): 122 | meta_data = { 123 | "vendor": "linksys", 124 | "source": "vendor", 125 | "file_urls": [url], 126 | "device_name": device_name, 127 | "device_class": self.map_device_class(device_name), 128 | "firmware_version": version, 129 | "release_date": date, 130 | } 131 | 132 | yield from self.item_pipeline(meta_data) 133 | 134 | @classmethod 135 | def map_device_class(cls, device_name: str) -> str: 136 | for identifiers, device_class in cls.device_classes.items(): 137 | if device_name.startswith(identifiers.shortcuts): 138 | return device_class 139 | return "unknown" 140 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Linux Firmware Corpus (LFwC) 2 | 3 | This is the companion and artifact repository to the paper `Mens Sana in Corpore Sano: Sound Firmware Corpora for Vulnerability Research`. 4 | 5 | In Sec. V of the paper, be build a proof of concept Linux Firmware Corpus (LFwC) to assess the feasibility of our requirements for scientifically sound firmware corpora (Sec. III). 6 | It is based on data until June 2023 and consists of 10,913 deduplicated and unpacked firmware images from ten known manufacturers. 7 | It includes recent and historical firmware, covering 2,365 unique devices across 22 classes. 8 | 9 | We share as much data as legally possible and publish all scripts, tools, and virtual machines for replicability in this repository. 10 | We tear down the firmware unpacking barrier with an open source process for verified unpacking success based on the [Firmware Analysis and Comparison Tool (FACT)](https://github.com/fkie-cad/FACT_core). 11 | 12 | ## Read & Cite the Paper 13 | 14 | The paper was published at the [Network and Distributed System Security (NDSS) Symposium 2025](https://www.ndss-symposium.org/ndss2025/) and can be downloaded [here](https://www.ndss-symposium.org/wp-content/uploads/2025-669-paper-1.pdf). 15 | 16 | ```bibtex 17 | @inproceedings{helmkeSoundFirmwareCorpora, 18 | author = {Helmke, René and Padilla, Elmar and Aschenbruck, Nils}, 19 | title = {{Mens Sana In Corpore Sano: Sound Firmware Corpora for Vulnerability Research}}, 20 | booktitle = {{Proceedings of the Network and Distributed System Security Symposium (NDSS'25)}}, 21 | year = {2025}, 22 | publisher = {{The Internet Society}}, 23 | address = {{San Diego, California, USA}}, 24 | url = {https://www.ndss-symposium.org/wp-content/uploads/2025-669-paper-1.pdf} 25 | } 26 | ``` 27 | 28 | ## Get the Corpus 29 | 30 | Access to LFwC is gated for scientific purposes. Request the meta data for corpus replication [here](https://doi.org/10.5281/zenodo.12659436). 31 | As per our ethical discussion in the paper, please state your academic affiliation, position, and roughly explain to us how you want to use the corpus in your research. 32 | By gaining access to the corpus, you also vouch that you handle the meta data discretely and do your best, also for the sake of replicability for other researchers, to not leak the meta data to the public. 33 | 34 | ## Corpus Updates 35 | 36 | As LFwC is an important resource for our day-to-day research and firmware analysis development, we plan to publish, at least, yearly updates in this repository to ensure replicability and sample actuality. 37 | 38 | ## Artifact Evaluation Tag 39 | 40 | Check out the artifact evaluation tag to get the same version as used in our Artifact Appendix: 41 | 42 | ```sh 43 | git checkout ndss-25-ae 44 | ``` 45 | 46 | We also share a ZIP file of the artifact evaluation version on [Zenodo](https://doi.org/10.5281/zenodo.12659339). 47 | 48 | ## Requirements 49 | 50 | ### Configuration 1: High-End Server Setup for the Full LFwC Corpus 51 | 52 | The full LFwC corpus requires 354 GiB for samples, and 2.5 TiB for unpacking and content analysis using FACT. 53 | Unpacking and analysis take several months on server-grade hardware: 54 | 55 | | **Compontent** | **Specifications** | 56 | |---------------:|:----------------------------------------:| 57 | | CPU | 2x Intel Xeon E5-2650 v3@ 2.30 GHz, NUMA | 58 | | RAM | 157 GiB DDR4 @ 2133MHz | 59 | | Board | Dell 0HFG24, LGA 2011 (PowerEdge R430) | 60 | | SSD (OS) | 512 GiB (Ubuntu 22.04.04 LTS) | 61 | | HDD (Data) | 4 TiB, mounted at `/media/data` | 62 | | Python | `3.10.12` | 63 | | FACT | Commit `0984d0ca` | 64 | 65 | Before you commit to a multi-month analysis, better start out with Configuration 2. 66 | 67 | See Appendix D-B in the paper for setup instructions. 68 | 69 | ### Configuration 2: Quick-and-Dirty Virtual Machine Setup for Result Verification and Small Corpus Subsets 70 | 71 | We provide a VirtualBox-based vagrant machine to quickly deploy a fully working artifact and FACT environment. 72 | The host requirements are: 73 | 74 | | **Compontent** | **Specifications** | 75 | |---------------:|:----------------------------------------:| 76 | | CPU | 4 Free CPU Cores with VT-x or AMD-v | 77 | | RAM | 16 GiB | 78 | | Storage | 100 GiB (Preferably SSD) | 79 | | Host OS | Arbitrary Desktop Linux (x86\_64) | 80 | | VirtualBox | `>=7.0` | 81 | | Vagrant | `~2.4` (verified) | 82 | 83 | See Appendix D-B in the paper for setup instructions. 84 | 85 | #### Spin up the Vagrant Machine 86 | 87 | ```sh 88 | vagrant up 89 | ``` 90 | 91 | Wait until finished. FACT is now available at `http://localhost:5000`. All dependencies are installed. Then, ssh into the machine and find this repository as shared folder inside: 92 | 93 | ```sh 94 | vagrant ssh 95 | ``` 96 | 97 | The SSH commands locally forward ports `8888` and `8889` from the VM to the host, so that you can spin up jupyter lab. 98 | 99 | Finally, VM control: 100 | 101 | ```sh 102 | vagrant up # start the vm, only first start is slow due to deployment 103 | vagrant halt # stop the vm 104 | vagrant destroy # destroy the vm _AND ITS INCLUDED DATA_ 105 | ``` 106 | 107 | ## Repository Layout 108 | 109 | ```plain 110 | . 111 | ├── downscaling # corpus downscaling scripts for Configuration 2. See Appendix D-F in the paper for instructions. 112 | ├── notebooks # jupyter notebooks to interactively explore the data sets of Sec. V and Sec. IV of our paper. See Appendix D-D and D-G for instructions. 113 | ├── prepare # install all dependencies of this repository (you have still to install vagrant and virtualbox for configuration 2, and FACT for configuration 1). 114 | ├── replication # LFwC replication scripts. See Appendix D-E and D-F for instructions. 115 | ├── scrapers # Scrapers, the original source of LFwC. Only included for transparency, as most of them do not work anymore do to changed manufacturer web sites. 116 | └── Vagrantfile # vagrant virtual machine configuration file. See Appendix D-B for instructions. 117 | ``` 118 | -------------------------------------------------------------------------------- /scrapers/lfwc_scraper/spiders/avm.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | from contextlib import suppress 4 | from datetime import datetime 5 | from json import loads 6 | from typing import Generator, Union 7 | 8 | from scrapy import Request 9 | from scrapy.http import Response 10 | 11 | from lfwc_scraper.custom_requests import FTPFileRequest, FTPListRequest 12 | from lfwc_scraper.custom_spiders import FTPSpider 13 | from lfwc_scraper.items import FirmwareItem 14 | 15 | 16 | class AVM(FTPSpider): 17 | """ 18 | This is the original scraper for ftp://ftp.avm.de. It won't work anymore. 19 | 20 | The FTP service has been shut down since. You can obtain all images from https://download.avm.de 21 | """ 22 | 23 | handle_httpstatus_list = [404] 24 | name = "avm" 25 | allowed_domains = ["ftp.avm.de", "avm.de"] 26 | start_urls = ["ftp://ftp.avm.de/"] 27 | 28 | custom_settings = { 29 | "ROBOTSTXT_OBEY": True, 30 | "CONCURRENT_REQUESTS": 1, 31 | "CONCURRENT_ITEMS": 1, 32 | "DOWNLOAD_DELAY": 0.75, 33 | "RANDOMIZE_DOWNLOAD_DELAY": True, 34 | "REFERER_ENABLED": False, 35 | } 36 | 37 | filter_eol_products = False 38 | 39 | meta_regex = { 40 | "device_name": re.compile(r"^(?:Produkt|Controller)\s*:\s+(.*)$", flags=re.MULTILINE | re.IGNORECASE), 41 | "firmware_version": re.compile(r"^Version\s*:\s+(.*)$", flags=re.MULTILINE | re.IGNORECASE), 42 | "release_date": re.compile(r"^(?:Release-Datum|Build)\s*:\s+(.*)$", flags=re.MULTILINE | re.IGNORECASE), 43 | } 44 | 45 | def parse(self, response: Response, **_): 46 | folder = loads(response.body) 47 | 48 | yield from self.recurse_sub_folders(folder, base_url=response.url) 49 | yield from self.search_firmware_images(folder, base_url=response.url) 50 | 51 | def parse_metadata_and_download_image( 52 | self, response: Response, image_path, release_date, **_ 53 | ) -> Generator[Union[Request, FirmwareItem], None, None]: 54 | info_de_txt = response.body.decode("latin-1") 55 | 56 | if release_date is None: 57 | release_date = ( 58 | self.meta_regex["release_date"].findall(info_de_txt)[0].strip().replace(".", "-").replace("/", "-") 59 | ) 60 | # fallback 61 | year = release_date.split("-")[-1] 62 | if len(year) == 2: 63 | # millenium-relative date 64 | abs_year = "20" + year 65 | release_date = "-".join(release_date.split("-")[:-1] + [abs_year]) 66 | release_date = datetime.strptime(release_date, "%d-%m-%Y") 67 | 68 | meta_data = { 69 | "vendor": "AVM", 70 | "source": "vendor", 71 | "file_urls": [image_path], 72 | "device_name": self.meta_regex["device_name"].findall(info_de_txt)[0].strip(), 73 | "device_class": self.map_device_class(image_path=image_path), 74 | "firmware_version": self.meta_regex["firmware_version"].findall(info_de_txt)[0].strip().split(" ")[-1], 75 | "release_date": release_date.isoformat(), 76 | } 77 | 78 | if self.filter_eol_products: 79 | product_path = image_path.split("/")[-4] 80 | product_line = image_path.split("/")[-5] 81 | yield Request( 82 | f"https://avm.de/produkte/{product_line}/{product_path}", 83 | callback=self.verify_support, 84 | cb_kwargs={"meta_data": meta_data}, 85 | ) 86 | else: 87 | yield from self.item_pipeline(meta_data) 88 | 89 | def search_firmware_images(self, folder: list, base_url: str) -> Generator[FTPFileRequest, None, None]: 90 | for image in self._image_file_filter(folder): 91 | image_path = os.path.join(base_url, image["filename"]) 92 | release_date: datetime | None = None 93 | 94 | date_str = re.sub(r"\s\s+", " ", image["date"]) 95 | 96 | date_formats: list[str] = ["%b %d %Y", "%d-%b-%Y %H:%M", "%b %D %H:%M"] 97 | for fmt in date_formats: 98 | with suppress(ValueError): 99 | release_date = datetime.strptime(date_str, fmt) 100 | break 101 | if release_date is not None and release_date.year == 1900: 102 | release_date = release_date.replace(year=datetime.now().year) 103 | info_path = os.path.join(base_url, "info_de.txt") 104 | yield FTPFileRequest( 105 | info_path, 106 | callback=self.parse_metadata_and_download_image, 107 | cb_kwargs={"image_path": image_path, "release_date": release_date}, 108 | ) 109 | 110 | def verify_support(self, response: Response, meta_data: dict, **_): 111 | if response.status == 200: 112 | yield from self.item_pipeline(meta_data) 113 | 114 | @staticmethod 115 | def _folder_filter(entries): 116 | for entry in entries: 117 | if any( 118 | [ 119 | entry["filetype"] != "d", 120 | entry["filename"] 121 | in ["..", "archive", "beta", "other", "recover", "belgium", "tools", "switzerland"], 122 | entry["linktarget"] is not None, 123 | ] 124 | ): 125 | continue 126 | yield entry 127 | 128 | @staticmethod 129 | def _image_file_filter(entries: list): 130 | for entry in entries: 131 | if any( 132 | [ 133 | entry["filetype"] != "-", 134 | not entry["filename"].endswith((".image", ".zip")), 135 | entry["linktarget"] is not None, 136 | ] 137 | ): 138 | continue 139 | yield entry 140 | 141 | @classmethod 142 | def recurse_sub_folders(cls, folder: list, base_url: str): 143 | for sub_folder in cls._folder_filter(folder): 144 | name = sub_folder["filename"] 145 | recursive_path = f"{os.path.join(base_url, name)}/" 146 | yield FTPListRequest(recursive_path) 147 | 148 | @staticmethod 149 | def map_device_class(image_path: str) -> str: 150 | # /fritzbox///fritz.os/ 151 | if any(substr in image_path.lower() for substr in ["fritzrepeater", "fritzwlan-repeater"]): 152 | return "repeater" 153 | if "fritzwlan-usb" in image_path.lower(): 154 | return "wifi-usb" 155 | if "fritzpowerline" in image_path.lower(): 156 | return "powerline" 157 | return "router" 158 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Created by https://www.toptal.com/developers/gitignore/api/python,windows,linux,vim,visualstudiocode,macos 2 | # Edit at https://www.toptal.com/developers/gitignore?templates=python,windows,linux,vim,visualstudiocode,macos 3 | 4 | ### Linux ### 5 | *~ 6 | 7 | # temporary files which can be created if a process still has a handle open of a deleted file 8 | .fuse_hidden* 9 | 10 | # KDE directory preferences 11 | .directory 12 | 13 | # Linux trash folder which might appear on any partition or disk 14 | .Trash-* 15 | 16 | # .nfs files are created when an open file is removed but is still being accessed 17 | .nfs* 18 | 19 | ### macOS ### 20 | # General 21 | .DS_Store 22 | .AppleDouble 23 | .LSOverride 24 | 25 | # Icon must end with two \r 26 | Icon 27 | 28 | 29 | # Thumbnails 30 | ._* 31 | 32 | # Files that might appear in the root of a volume 33 | .DocumentRevisions-V100 34 | .fseventsd 35 | .Spotlight-V100 36 | .TemporaryItems 37 | .Trashes 38 | .VolumeIcon.icns 39 | .com.apple.timemachine.donotpresent 40 | 41 | # Directories potentially created on remote AFP share 42 | .AppleDB 43 | .AppleDesktop 44 | Network Trash Folder 45 | Temporary Items 46 | .apdisk 47 | 48 | ### macOS Patch ### 49 | # iCloud generated files 50 | *.icloud 51 | 52 | ### Python ### 53 | # Byte-compiled / optimized / DLL files 54 | __pycache__/ 55 | *.py[cod] 56 | *$py.class 57 | 58 | # C extensions 59 | *.so 60 | 61 | # Distribution / packaging 62 | .Python 63 | build/ 64 | develop-eggs/ 65 | dist/ 66 | downloads/ 67 | eggs/ 68 | .eggs/ 69 | lib/ 70 | lib64/ 71 | parts/ 72 | sdist/ 73 | var/ 74 | wheels/ 75 | share/python-wheels/ 76 | *.egg-info/ 77 | .installed.cfg 78 | *.egg 79 | MANIFEST 80 | 81 | # PyInstaller 82 | # Usually these files are written by a python script from a template 83 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 84 | *.manifest 85 | *.spec 86 | 87 | # Installer logs 88 | pip-log.txt 89 | pip-delete-this-directory.txt 90 | 91 | # Unit test / coverage reports 92 | htmlcov/ 93 | .tox/ 94 | .nox/ 95 | .coverage 96 | .coverage.* 97 | .cache 98 | nosetests.xml 99 | coverage.xml 100 | *.cover 101 | *.py,cover 102 | .hypothesis/ 103 | .pytest_cache/ 104 | cover/ 105 | 106 | # Translations 107 | *.mo 108 | *.pot 109 | 110 | # Django stuff: 111 | *.log 112 | local_settings.py 113 | db.sqlite3 114 | db.sqlite3-journal 115 | 116 | # Flask stuff: 117 | instance/ 118 | .webassets-cache 119 | 120 | # Scrapy stuff: 121 | .scrapy 122 | 123 | # Sphinx documentation 124 | docs/_build/ 125 | 126 | # PyBuilder 127 | .pybuilder/ 128 | target/ 129 | 130 | # Jupyter Notebook 131 | .ipynb_checkpoints 132 | 133 | # IPython 134 | profile_default/ 135 | ipython_config.py 136 | 137 | # pyenv 138 | # For a library or package, you might want to ignore these files since the code is 139 | # intended to run in multiple environments; otherwise, check them in: 140 | # .python-version 141 | 142 | # pipenv 143 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 144 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 145 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 146 | # install all needed dependencies. 147 | #Pipfile.lock 148 | 149 | # poetry 150 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 151 | # This is especially recommended for binary packages to ensure reproducibility, and is more 152 | # commonly ignored for libraries. 153 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 154 | #poetry.lock 155 | 156 | # pdm 157 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 158 | #pdm.lock 159 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 160 | # in version control. 161 | # https://pdm.fming.dev/#use-with-ide 162 | .pdm.toml 163 | 164 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 165 | __pypackages__/ 166 | 167 | # Celery stuff 168 | celerybeat-schedule 169 | celerybeat.pid 170 | 171 | # SageMath parsed files 172 | *.sage.py 173 | 174 | # Environments 175 | .env 176 | .venv 177 | env/ 178 | venv/ 179 | ENV/ 180 | env.bak/ 181 | venv.bak/ 182 | 183 | # Spyder project settings 184 | .spyderproject 185 | .spyproject 186 | 187 | # Rope project settings 188 | .ropeproject 189 | 190 | # mkdocs documentation 191 | /site 192 | 193 | # mypy 194 | .mypy_cache/ 195 | .dmypy.json 196 | dmypy.json 197 | 198 | # Pyre type checker 199 | .pyre/ 200 | 201 | # pytype static type analyzer 202 | .pytype/ 203 | 204 | # Cython debug symbols 205 | cython_debug/ 206 | 207 | # PyCharm 208 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 209 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 210 | # and can be added to the global gitignore or merged into this file. For a more nuclear 211 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 212 | #.idea/ 213 | 214 | ### Python Patch ### 215 | # Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration 216 | poetry.toml 217 | 218 | # ruff 219 | .ruff_cache/ 220 | 221 | # LSP config files 222 | pyrightconfig.json 223 | 224 | ### Vim ### 225 | # Swap 226 | [._]*.s[a-v][a-z] 227 | !*.svg # comment out if you don't need vector files 228 | [._]*.sw[a-p] 229 | [._]s[a-rt-v][a-z] 230 | [._]ss[a-gi-z] 231 | [._]sw[a-p] 232 | 233 | # Session 234 | Session.vim 235 | Sessionx.vim 236 | 237 | # Temporary 238 | .netrwhist 239 | # Auto-generated tag files 240 | tags 241 | # Persistent undo 242 | [._]*.un~ 243 | 244 | ### VisualStudioCode ### 245 | .vscode/* 246 | !.vscode/settings.json 247 | !.vscode/tasks.json 248 | !.vscode/launch.json 249 | !.vscode/extensions.json 250 | !.vscode/*.code-snippets 251 | 252 | # Local History for Visual Studio Code 253 | .history/ 254 | 255 | # Built Visual Studio Code Extensions 256 | *.vsix 257 | 258 | ### VisualStudioCode Patch ### 259 | # Ignore all local history of files 260 | .history 261 | .ionide 262 | 263 | ### Windows ### 264 | # Windows thumbnail cache files 265 | Thumbs.db 266 | Thumbs.db:encryptable 267 | ehthumbs.db 268 | ehthumbs_vista.db 269 | 270 | # Dump file 271 | *.stackdump 272 | 273 | # Folder config file 274 | [Dd]esktop.ini 275 | 276 | # Recycle Bin used on file shares 277 | $RECYCLE.BIN/ 278 | 279 | # Windows Installer files 280 | *.cab 281 | *.msi 282 | *.msix 283 | *.msm 284 | *.msp 285 | 286 | # Windows shortcuts 287 | *.lnk 288 | 289 | # End of https://www.toptal.com/developers/gitignore/api/python,windows,linux,vim,visualstudiocode,macos 290 | 291 | .vagrant 292 | -------------------------------------------------------------------------------- /replication/LICENSES/CC0-1.0.txt: -------------------------------------------------------------------------------- 1 | Creative Commons Legal Code 2 | 3 | CC0 1.0 Universal 4 | 5 | CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE 6 | LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN 7 | ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS 8 | INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES 9 | REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS 10 | PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM 11 | THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED 12 | HEREUNDER. 13 | 14 | Statement of Purpose 15 | 16 | The laws of most jurisdictions throughout the world automatically confer 17 | exclusive Copyright and Related Rights (defined below) upon the creator 18 | and subsequent owner(s) (each and all, an "owner") of an original work of 19 | authorship and/or a database (each, a "Work"). 20 | 21 | Certain owners wish to permanently relinquish those rights to a Work for 22 | the purpose of contributing to a commons of creative, cultural and 23 | scientific works ("Commons") that the public can reliably and without fear 24 | of later claims of infringement build upon, modify, incorporate in other 25 | works, reuse and redistribute as freely as possible in any form whatsoever 26 | and for any purposes, including without limitation commercial purposes. 27 | These owners may contribute to the Commons to promote the ideal of a free 28 | culture and the further production of creative, cultural and scientific 29 | works, or to gain reputation or greater distribution for their Work in 30 | part through the use and efforts of others. 31 | 32 | For these and/or other purposes and motivations, and without any 33 | expectation of additional consideration or compensation, the person 34 | associating CC0 with a Work (the "Affirmer"), to the extent that he or she 35 | is an owner of Copyright and Related Rights in the Work, voluntarily 36 | elects to apply CC0 to the Work and publicly distribute the Work under its 37 | terms, with knowledge of his or her Copyright and Related Rights in the 38 | Work and the meaning and intended legal effect of CC0 on those rights. 39 | 40 | 1. Copyright and Related Rights. A Work made available under CC0 may be 41 | protected by copyright and related or neighboring rights ("Copyright and 42 | Related Rights"). Copyright and Related Rights include, but are not 43 | limited to, the following: 44 | 45 | i. the right to reproduce, adapt, distribute, perform, display, 46 | communicate, and translate a Work; 47 | ii. moral rights retained by the original author(s) and/or performer(s); 48 | iii. publicity and privacy rights pertaining to a person's image or 49 | likeness depicted in a Work; 50 | iv. rights protecting against unfair competition in regards to a Work, 51 | subject to the limitations in paragraph 4(a), below; 52 | v. rights protecting the extraction, dissemination, use and reuse of data 53 | in a Work; 54 | vi. database rights (such as those arising under Directive 96/9/EC of the 55 | European Parliament and of the Council of 11 March 1996 on the legal 56 | protection of databases, and under any national implementation 57 | thereof, including any amended or successor version of such 58 | directive); and 59 | vii. other similar, equivalent or corresponding rights throughout the 60 | world based on applicable law or treaty, and any national 61 | implementations thereof. 62 | 63 | 2. Waiver. To the greatest extent permitted by, but not in contravention 64 | of, applicable law, Affirmer hereby overtly, fully, permanently, 65 | irrevocably and unconditionally waives, abandons, and surrenders all of 66 | Affirmer's Copyright and Related Rights and associated claims and causes 67 | of action, whether now known or unknown (including existing as well as 68 | future claims and causes of action), in the Work (i) in all territories 69 | worldwide, (ii) for the maximum duration provided by applicable law or 70 | treaty (including future time extensions), (iii) in any current or future 71 | medium and for any number of copies, and (iv) for any purpose whatsoever, 72 | including without limitation commercial, advertising or promotional 73 | purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each 74 | member of the public at large and to the detriment of Affirmer's heirs and 75 | successors, fully intending that such Waiver shall not be subject to 76 | revocation, rescission, cancellation, termination, or any other legal or 77 | equitable action to disrupt the quiet enjoyment of the Work by the public 78 | as contemplated by Affirmer's express Statement of Purpose. 79 | 80 | 3. Public License Fallback. Should any part of the Waiver for any reason 81 | be judged legally invalid or ineffective under applicable law, then the 82 | Waiver shall be preserved to the maximum extent permitted taking into 83 | account Affirmer's express Statement of Purpose. In addition, to the 84 | extent the Waiver is so judged Affirmer hereby grants to each affected 85 | person a royalty-free, non transferable, non sublicensable, non exclusive, 86 | irrevocable and unconditional license to exercise Affirmer's Copyright and 87 | Related Rights in the Work (i) in all territories worldwide, (ii) for the 88 | maximum duration provided by applicable law or treaty (including future 89 | time extensions), (iii) in any current or future medium and for any number 90 | of copies, and (iv) for any purpose whatsoever, including without 91 | limitation commercial, advertising or promotional purposes (the 92 | "License"). The License shall be deemed effective as of the date CC0 was 93 | applied by Affirmer to the Work. Should any part of the License for any 94 | reason be judged legally invalid or ineffective under applicable law, such 95 | partial invalidity or ineffectiveness shall not invalidate the remainder 96 | of the License, and in such case Affirmer hereby affirms that he or she 97 | will not (i) exercise any of his or her remaining Copyright and Related 98 | Rights in the Work or (ii) assert any associated claims and causes of 99 | action with respect to the Work, in either case contrary to Affirmer's 100 | express Statement of Purpose. 101 | 102 | 4. Limitations and Disclaimers. 103 | 104 | a. No trademark or patent rights held by Affirmer are waived, abandoned, 105 | surrendered, licensed or otherwise affected by this document. 106 | b. Affirmer offers the Work as-is and makes no representations or 107 | warranties of any kind concerning the Work, express, implied, 108 | statutory or otherwise, including without limitation warranties of 109 | title, merchantability, fitness for a particular purpose, non 110 | infringement, or the absence of latent or other defects, accuracy, or 111 | the present or absence of errors, whether or not discoverable, all to 112 | the greatest extent permissible under applicable law. 113 | c. Affirmer disclaims responsibility for clearing rights of other persons 114 | that may apply to the Work or any use thereof, including without 115 | limitation any person's Copyright and Related Rights in the Work. 116 | Further, Affirmer disclaims responsibility for obtaining any necessary 117 | consents, permissions or other rights required for any use of the 118 | Work. 119 | d. Affirmer understands and acknowledges that Creative Commons is not a 120 | party to this document and has no duty or obligation with respect to 121 | this CC0 or use of the Work. 122 | -------------------------------------------------------------------------------- /scrapers/lfwc_scraper/spiders/tplink.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | from typing import Generator 3 | 4 | from scrapy import Request 5 | from scrapy.http import Response 6 | 7 | from lfwc_scraper.custom_spiders import FirmwareSpider 8 | from lfwc_scraper.items import FirmwareItem 9 | 10 | 11 | class TPLink(FirmwareSpider): 12 | handle_httpstatus_list = [404] 13 | name = "tplink" 14 | 15 | allowed_domains = ["www.tp-link.com", "static.tp-link.com"] 16 | 17 | start_urls = [ 18 | "https://www.tp-link.com/de/support/download/", 19 | ] 20 | 21 | custom_settings = { 22 | "ROBOTSTXT_OBEY": True, 23 | "CONCURRENT_REQUESTS": 1, 24 | "CONCURRENT_ITEMS": 1, 25 | "DOWNLOAD_DELAY": 0.75, 26 | "RANDOMIZE_DOWNLOAD_DELAY": True, 27 | "REFERER_ENABLED": True, 28 | } 29 | 30 | xpath = { 31 | "products_on_page": '//a[contains(@class,"tp-product-link")]/@href', 32 | "product_pages": '//li[@class="tp-product-pagination-item"]/a[@class="tp-product-pagination-btn"]/@href', 33 | "product_name": '//h2[@class="product-name"]/text()|//label[@class="model-select"]/p/span/text()', 34 | "product_support_link": '//a[contains(@class,"support")]/@href', 35 | "firmware_download_link": '//tr[@class="basic-info"][1]//a[contains(@class, "download") and ' 36 | '(contains(@data-vars-event-category, "Firmware") or ' 37 | 'contains(@href, "firmware"))]/@href', 38 | "device_revision": '//span[@id="verison-hidden"]/text()', 39 | "firmware_release_date": '//*[@id="content_Firmware"]/table//tr[@class="detail-info"][1]/td[1]/span[2]/text()[1]', 40 | } 41 | 42 | def parse(self, response: Response, **_: dict) -> Generator[Request, None, None]: 43 | category_names = response.xpath('//span[@class="tp-m-show"]/text()').extract() 44 | category_xpath_selectors = response.xpath('//div[@class="item-box"]') 45 | 46 | for name, selector in zip(category_names, category_xpath_selectors): 47 | device_class = self.map_device_class(name) 48 | if device_class == "unknown": 49 | continue 50 | print(name) 51 | print(device_class) 52 | links = selector.xpath('.//a[@class="ga-click" and contains(@href, "download")]/@href').extract() 53 | device_names = selector.xpath('.//a[@class="ga-click" and contains(@href, "download")]/text()').extract() 54 | for device_name, link in zip(device_names, links): 55 | print(f"{device_name} -> {link}") 56 | yield Request( 57 | url=response.urljoin(link), 58 | callback=self.select_hardware_revision, 59 | cb_kwargs={ 60 | "device_name": device_name, 61 | "device_class": device_class, 62 | }, 63 | ) 64 | 65 | @classmethod 66 | def select_hardware_revision( 67 | cls, support_page: Response, device_name: str, device_class: str 68 | ) -> Generator[FirmwareItem | Request, None, None]: 69 | revisions = support_page.xpath('//dl[@class="select-version"]//ul/li/@data-value').extract() 70 | links = support_page.xpath('//dl[@class="select-version"]//ul/li/a/@href').extract() 71 | if not revisions: 72 | yield from cls.get_firmware_items(support_page, device_name, device_class) 73 | return 74 | 75 | for rev, link in zip(revisions, links): 76 | yield Request( 77 | url=link, 78 | callback=cls.get_firmware_items, 79 | cb_kwargs={ 80 | "device_name": f"{device_name} {rev}", 81 | "device_class": device_class, 82 | }, 83 | ) 84 | 85 | @classmethod 86 | def get_firmware_items( 87 | cls, support_page: Response, device_name: str, device_class: str 88 | ) -> Generator[FirmwareItem, None, None]: 89 | file_urls = cls.extract_firmware_download_links(support_page) 90 | release_dates = cls.extract_firmware_release_dates(support_page) 91 | 92 | for link, release_date in zip(file_urls, release_dates): 93 | meta_data = cls.prepare_meta_data(device_name, device_class, link, release_date) 94 | yield from cls.item_pipeline(meta_data) 95 | 96 | @staticmethod 97 | def prepare_meta_data(device_name: str, device_class: str, file_url: str, firmware_release_date) -> dict: 98 | return { 99 | "file_urls": [file_url], 100 | "vendor": "TP-Link", 101 | "device_name": f"{device_name}", 102 | "firmware_version": file_url.replace(".zip", "").split("_")[-1], 103 | "device_class": device_class, 104 | "release_date": datetime.strptime(firmware_release_date.strip(), "%Y-%m-%d").isoformat(), 105 | } 106 | 107 | @classmethod 108 | def extract_firmware_download_links(cls, support_page: Response) -> list[str]: 109 | return [ 110 | support_page.urljoin(link) for link in support_page.xpath(cls.xpath["firmware_download_link"]).extract() 111 | ] 112 | 113 | @classmethod 114 | def extract_firmware_release_dates(cls, support_page: Response) -> str: 115 | return support_page.xpath(cls.xpath["firmware_release_date"]).extract() 116 | 117 | @staticmethod 118 | def map_device_class(category: str) -> str: 119 | if "repeater" in category.lower(): 120 | return "repeater" 121 | if "deco" in category.lower(): 122 | return "mesh" 123 | if "wlan-router" in category.lower(): 124 | return "router" 125 | if "mobile 3g/4g-router" in category.lower(): 126 | return "router" 127 | if "managed switches" in category.lower(): 128 | return "switch" 129 | if "powerline-adapter" in category.lower(): 130 | return "powerline" 131 | if "dsl-router" in category.lower(): 132 | return "router" 133 | if "mobil (3g/4g)" in category.lower(): 134 | return "router" 135 | if "soho switches" in category.lower(): 136 | return "switch" 137 | if "3g/4g-router" in category.lower(): 138 | return "router" 139 | if "router" in category.lower(): 140 | return "router" 141 | if "poe-switches" in category.lower(): 142 | return "switch" 143 | if "accesspoints" in category.lower(): 144 | return "accesspoint" 145 | if "business-wlan" in category.lower(): 146 | return "accesspoint" 147 | if "cloud-kameras" in category.lower(): 148 | return "ipcam" 149 | if "omada-cloud-sdn > controller" in category.lower(): 150 | return "controller" 151 | if "vpn-router" in category.lower(): 152 | return "router" 153 | if "aps zur deckenmontage" in category.lower(): 154 | return "accesspoint" 155 | if "omada-cloud-sdn > switches" in category.lower(): 156 | return "switch" 157 | if "smart-switches" in category.lower(): 158 | return "switch" 159 | if "outdoor-aps" in category.lower(): 160 | return "accesspoint" 161 | if "loadbalance-router" in category.lower(): 162 | return "router" 163 | if "omada-cloud-sdn > router" in category.lower(): 164 | return "router" 165 | if "easy-smart-switches" in category.lower(): 166 | return "switch" 167 | if "aps zur wandmontage" in category.lower(): 168 | return "accesspoint" 169 | if "outdoor-wlan" in category.lower(): 170 | return "accesspoint" 171 | if "outdoor-aps" in category.lower(): 172 | return "accesspoint" 173 | if "hardware-controller" in category.lower(): 174 | return "controller" 175 | if "vigi netzwerkkameras" in category.lower(): 176 | return "ipcam" 177 | if "vigi netzwerkvideorecorder" in category.lower(): 178 | return "ipcam-recorder" 179 | return "unknown" 180 | -------------------------------------------------------------------------------- /replication/replicate_lfwc/__main__.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2024 Fraunhofer FKIE 2 | # SPDX-FileContributor: Marten Ringwelski 3 | # 4 | # SPDX-License-Identifier: GPL-3.0-or-later 5 | 6 | import json 7 | import pathlib as pl 8 | import subprocess as sp 9 | import time 10 | import urllib.parse 11 | 12 | import click 13 | import pandas as pd 14 | 15 | from . import fact, utils 16 | from .types import Corpus, FirmwareImage, FirmwareImageStatus 17 | 18 | 19 | def opinionated_dataframe_from_csv(csv_path: pl.Path): 20 | df = pd.read_csv( 21 | csv_path, 22 | index_col=0, 23 | ) 24 | 25 | df = df[ 26 | [ 27 | "manufacturer", 28 | "device_name", 29 | "firmware_version", 30 | "release_date", 31 | "device_class", 32 | "filename", 33 | "compressed_firmware_size", 34 | "sha256", 35 | "source_link", 36 | "source_type", 37 | "wayback", 38 | ] 39 | ] 40 | df = df.rename( 41 | columns={ 42 | "manufacturer": "vendor", 43 | "device_name": "device", 44 | "firmware_version": "version", 45 | "release_date": "release", 46 | "device_class": "class", 47 | "filename": "filename", 48 | "compressed_firmware_size": "size", 49 | "sha256": "sha256", 50 | "source_link": "url", 51 | "source_type": "source_type", 52 | "wayback": "wayback", 53 | }, 54 | ).copy() 55 | 56 | df["filename"] = df["filename"].astype(str).apply(urllib.parse.unquote) 57 | df["release"] = df["release"].apply( 58 | lambda release: None if release == "1970-01-01" else release 59 | ) 60 | df["host"] = ( 61 | df["url"] 62 | .astype(str) 63 | .apply(urllib.parse.urlparse) 64 | .apply(lambda parsed: parsed.netloc) 65 | ) 66 | 67 | return df 68 | 69 | 70 | def aria2c_options_from_firmware_image(image): 71 | # fmt: off 72 | return ( 73 | f"{image.url}\n" 74 | f" out={utils.image_path_from_firmware_image(image)}\n" 75 | f" checksum=sha-256={image.sha256}\n" 76 | ) 77 | # fmt: on 78 | 79 | 80 | @click.group( 81 | name="replicate_lfwc", 82 | # Ref: https://github.com/pallets/click/issues/486 83 | # Ref: https://github.com/ansible/molecule/commit/1de5946f606ab16be168c29eec7e8eb687a9698f 84 | context_settings=dict(max_content_width=9999), 85 | ) 86 | @click.option( 87 | "--corpus-csv", 88 | type=click.Path(exists=True, path_type=pl.Path), 89 | help="Path to corpus.csv. Either use the file vendored with the paper, or a compatible file.", 90 | required=True, 91 | ) 92 | @click.pass_context 93 | def cli(click_ctx, corpus_csv: pl.Path): 94 | """Use aria2 to download LFWC""" 95 | click_ctx.ensure_object(dict) 96 | corpus_df = opinionated_dataframe_from_csv(corpus_csv) 97 | click_ctx.obj["corpus-df"] = corpus_df 98 | 99 | 100 | @cli.command() 101 | @click.option( 102 | "--jobs", type=click.INT, default=1, help="The number concurrent downloads" 103 | ) 104 | @click.option( 105 | "--corpus-dir", 106 | type=click.Path(file_okay=False, dir_okay=True, exists=False, path_type=pl.Path), 107 | help="Path to the corpus directory.", 108 | required=True, 109 | ) 110 | @click.option( 111 | "--continue", 112 | "continue_", 113 | is_flag=True, 114 | help=("Continue downloading into an existing corpus directory."), 115 | default=False, 116 | ) 117 | @click.option( 118 | "--use-wayback-machine", 119 | is_flag=True, 120 | help=( 121 | "Prefer the archive.org links, if available." 122 | " To download all available firmwares first, invoke the script without" 123 | " this option. Then invoke the script a second time with --use-wayback-machine" 124 | " set. Due to ratelimiting this will download 5 firmwares per minute." 125 | ), 126 | default=False, 127 | ) 128 | @click.pass_context 129 | def download( 130 | click_ctx, 131 | corpus_dir: pl.Path, 132 | jobs: int, 133 | continue_: bool, 134 | use_wayback_machine: bool, 135 | ): 136 | """Download all missing files.""" 137 | corpus = Corpus( 138 | path=corpus_dir, 139 | dataframe=click_ctx.obj.get("corpus-df"), 140 | ) 141 | del corpus_dir 142 | 143 | if corpus.path.exists() and not continue_: 144 | try: 145 | _ = next(corpus.path.iterdir()) 146 | raise click.ClickException( 147 | f"{corpus.path} exists and is not empty." 148 | " Use --continue or specify an empty/non-existant directory as --corpus-dir." 149 | ) 150 | except StopIteration: 151 | pass 152 | 153 | corpus.path.mkdir( 154 | exist_ok=True, 155 | parents=False, 156 | ) 157 | 158 | downloaded_images_hashes = [ 159 | image.sha256 160 | for image in corpus.iter_images() 161 | if not ( 162 | FirmwareImageStatus.image_has_status( 163 | image, corpus, FirmwareImageStatus.MISSING 164 | ) 165 | or FirmwareImageStatus.image_has_status( 166 | image, corpus, FirmwareImageStatus.DOWNLOAD_STARTED 167 | ) 168 | ) 169 | ] 170 | 171 | df = corpus.dataframe 172 | missing_df = df[~df["sha256"].isin(downloaded_images_hashes)].copy() 173 | if use_wayback_machine: 174 | normal_df = missing_df[missing_df["wayback"].isnull()] 175 | _download_dataframe_to(normal_df, jobs=jobs, dest=corpus.path) 176 | 177 | wayback_df = missing_df[~missing_df["wayback"].isnull()].copy() 178 | wayback_df["url"] = wayback_df["wayback"] 179 | # See here for information about the rate limiting. 180 | # https://rationalwiki.org/wiki/Internet_Archive#Restrictions 181 | BACKOFF = 60 182 | DLS_PER_BACKOFF = 5 183 | for i in range(0, len(wayback_df), DLS_PER_BACKOFF): 184 | todo_df = wayback_df.iloc[i : i + DLS_PER_BACKOFF] 185 | _download_dataframe_to(todo_df, jobs=1, dest=corpus.path) 186 | time.sleep(BACKOFF) 187 | else: 188 | _download_dataframe_to(missing_df, jobs=jobs, dest=corpus.path) 189 | 190 | click.echo("Downloading finished, check failed files with\n" f" {cli.name} verify") 191 | 192 | 193 | def _download_dataframe_to(df, jobs, dest): 194 | aria2_input_file = aria2_input_from_dataframe(df) 195 | 196 | sp.run( 197 | [ 198 | "aria2c", 199 | "--connect-timeout=30", 200 | "--max-file-not-found=2", 201 | "--max-tries=5", 202 | "--lowest-speed-limit=1K", 203 | "--timeout=30", 204 | "--auto-file-renaming=false", 205 | "--check-certificate=false", # linksys does not properly send x509 chain... 206 | f"--max-concurrent-downloads={jobs}", 207 | f"--split=1", 208 | f"--dir={dest}", 209 | "--input-file=-", 210 | ], 211 | input=aria2_input_file.encode(), 212 | check=False, 213 | ) 214 | 215 | 216 | @cli.command() 217 | @click.option( 218 | "--corpus-dir", 219 | type=click.Path( 220 | file_okay=False, 221 | dir_okay=True, 222 | exists=True, 223 | path_type=pl.Path, 224 | ), 225 | help="Path to the corpus directory.", 226 | required=True, 227 | ) 228 | @click.option( 229 | "--json", 230 | "json_flag", 231 | is_flag=True, 232 | help=( 233 | "Print a json report of the download status of the firmware corpus to stdout." 234 | " The status report is a flat dictionary where keys are paths relative to" 235 | " the corpus directory and values are the firmware image's download status." 236 | " Possible values for status are: missing, download-started, hash-mismatch, and success.\n" 237 | " Note that missing means either that download failed," 238 | " or that the file is not downloaded yet." 239 | " This is a limitation of aria2's api." 240 | ), 241 | ) 242 | @click.pass_context 243 | def verify(click_ctx, corpus_dir: pl.Path, json_flag: bool): 244 | """Prints the path and status of all firmwares that have a status other 245 | than 'success' to stderr. 246 | See --json for a description of possible values for status.""" 247 | corpus = Corpus( 248 | path=corpus_dir, 249 | dataframe=click_ctx.obj.get("corpus-df"), 250 | ) 251 | 252 | # Keys are relative paths and values are FirmwareImageStatus 253 | report = {} 254 | for image in corpus.dataframe.apply(FirmwareImage.from_row, axis=1): 255 | image_rel_path = utils.image_path_from_firmware_image(image) 256 | status = FirmwareImageStatus.from_image(image, corpus) 257 | if status != FirmwareImageStatus.SUCCESS: 258 | click.echo( 259 | f"{image_rel_path}:{status}", 260 | err=True, 261 | ) 262 | 263 | report[image_rel_path] = status 264 | 265 | if json_flag: 266 | click.echo(json.dumps(report)) 267 | 268 | 269 | def aria2_input_from_dataframe(corpus_df): 270 | return "".join( 271 | corpus_df.apply(FirmwareImage.from_row, axis=1) 272 | .apply(aria2c_options_from_firmware_image) 273 | .values 274 | ) 275 | 276 | 277 | @cli.command() 278 | @click.pass_context 279 | def dump_aria2_input(click_ctx): 280 | """Dump the input file that can be used directly with aria2c.""" 281 | corpus_df = click_ctx.obj.get("corpus-df") 282 | 283 | header = ( 284 | "# This file can be used with the aria2 download utility [1].\n" 285 | "# To donwload the firmwre corpus, we suggest the following invocation of aria2:\n" 286 | "# ```\n" 287 | "# aria2c \\\n" 288 | "# --connect-timeout=5 \\\n" 289 | "# --max-file-not-found=2 \\\n" 290 | "# --max-tries=2 \\\n" 291 | "# --lowest-speed-limit=1K \\\n" 292 | "# --timeout=5 \\\n" 293 | "# --auto-file-renaming=false \\\n" 294 | "# --save-session=session.aria2 \\\n" 295 | "# --dir=path/to/corpus \\\n" 296 | "# --input-file=firmwares.aria2c \n" 297 | "# ```\n" 298 | "#\n" 299 | "# [1]: https://aria2.github.io/\n" 300 | ) 301 | 302 | aria2_input_file = aria2_input_from_dataframe(corpus_df) 303 | 304 | click.echo(header + aria2_input_file) 305 | 306 | 307 | @cli.command() 308 | @click.option( 309 | "--corpus-dir", 310 | type=click.Path( 311 | file_okay=False, 312 | dir_okay=True, 313 | exists=True, 314 | path_type=pl.Path, 315 | ), 316 | help="Path to the corpus directory.", 317 | required=True, 318 | ) 319 | @click.option( 320 | "--url", 321 | type=click.STRING, 322 | help="The url to the FACT instance.", 323 | default="http://localhost:5000", 324 | required=True, 325 | ) 326 | @click.option( 327 | "--plugins", 328 | help="A list of analysis plugins that should be analyzed on the given images.", 329 | type=click.Choice([plugin.value for plugin in fact.Plugin]), 330 | multiple=True, 331 | show_choices=False, 332 | prompt=False, 333 | ) 334 | @click.pass_context 335 | def upload_to_fact(click_ctx, corpus_dir: pl.Path, url, plugins: list[str]): 336 | """Upload the firmware corpus to fact. Takes a very long time (multiple months) to complete. 337 | Note that you can cancel this command anytime and resume by simply starting it again. 338 | """ 339 | corpus = Corpus( 340 | path=corpus_dir, 341 | dataframe=click_ctx.obj.get("corpus-df"), 342 | ) 343 | ctx = fact.Context( 344 | url=url, 345 | corpus=corpus, 346 | ) 347 | 348 | if not corpus.path.exists(): 349 | raise click.ClickException( 350 | f"Cannot upload non-existing corpus directory {corpus.path}" 351 | ) 352 | 353 | POLL_INTERVAL = 5 354 | MAX_CONCURRENT_ANALYSIS = 1 355 | GIVE_FACT_A_REST = 7 356 | # Sometimes analyses can get stuck as reported by various issues in FACT: 357 | # https://github.com/fkie-cad/FACT_core/issues/1206 358 | # https://github.com/fkie-cad/FACT_core/issues/1178 359 | # https://github.com/fkie-cad/FACT_core/issues/1163 360 | # [...] 361 | # 362 | # To account for this, we check how much is still to do and when nothing changes after 363 | # some time, we mark the analysis as stuck and continue uploading. 364 | # Note that we have to ensure that ALL uploaded firmwares are stuck. 365 | # Otherwise we cannot be sure if they are stuck or whether they are simply not 366 | # scheduled. 367 | STUCK_ANALYSES_TIMEOUT = 60 368 | 369 | uploaded_count = 0 370 | images = [ 371 | image 372 | for image in corpus.iter_images() 373 | if not ( 374 | FirmwareImageStatus.image_has_status( 375 | image, corpus, FirmwareImageStatus.MISSING 376 | ) 377 | or FirmwareImageStatus.image_has_status( 378 | image, corpus, FirmwareImageStatus.DOWNLOAD_STARTED 379 | ) 380 | ) 381 | ] 382 | for image in images: 383 | if fact.firmware_is_uploaded(image, ctx): 384 | continue 385 | 386 | last_changed_time = time.time() 387 | while ( 388 | len(uid2progress := fact.get_firmware_analysis_progress(ctx)) 389 | > MAX_CONCURRENT_ANALYSIS 390 | ): 391 | time.sleep(POLL_INTERVAL) 392 | 393 | now = time.time() 394 | if uid2progress != fact.get_firmware_analysis_progress(ctx): 395 | last_changed_time = now 396 | elif now - last_changed_time > STUCK_ANALYSES_TIMEOUT: 397 | click.echo(f"Analysis are stuck: {uid2progress}", err=True) 398 | break 399 | 400 | click.echo(f"Uploading: {image}") 401 | 402 | try: 403 | fact.upload(image, plugins, ctx) 404 | click.echo(f"Uploading successful (uid: {fact.uid_from_image(image)}).") 405 | uploaded_count += 1 406 | except fact.UploadFailedError: 407 | click.echo("Uploading failed.", err=True) 408 | 409 | time.sleep(GIVE_FACT_A_REST) 410 | 411 | click.echo(f"Uploaded: {uploaded_count}") 412 | click.echo(f"Failed: {len(images) - uploaded_count}") 413 | 414 | 415 | cli() 416 | -------------------------------------------------------------------------------- /replication/LICENSES/CC-BY-4.0.txt: -------------------------------------------------------------------------------- 1 | Creative Commons Attribution 4.0 International 2 | 3 | Creative Commons Corporation (“Creative Commons”) is not a law firm and does not provide legal services or legal advice. Distribution of Creative Commons public licenses does not create a lawyer-client or other relationship. Creative Commons makes its licenses and related information available on an “as-is” basis. Creative Commons gives no warranties regarding its licenses, any material licensed under their terms and conditions, or any related information. Creative Commons disclaims all liability for damages resulting from their use to the fullest extent possible. 4 | 5 | Using Creative Commons Public Licenses 6 | 7 | Creative Commons public licenses provide a standard set of terms and conditions that creators and other rights holders may use to share original works of authorship and other material subject to copyright and certain other rights specified in the public license below. The following considerations are for informational purposes only, are not exhaustive, and do not form part of our licenses. 8 | 9 | Considerations for licensors: Our public licenses are intended for use by those authorized to give the public permission to use material in ways otherwise restricted by copyright and certain other rights. Our licenses are irrevocable. Licensors should read and understand the terms and conditions of the license they choose before applying it. Licensors should also secure all rights necessary before applying our licenses so that the public can reuse the material as expected. Licensors should clearly mark any material not subject to the license. This includes other CC-licensed material, or material used under an exception or limitation to copyright. More considerations for licensors. 10 | 11 | Considerations for the public: By using one of our public licenses, a licensor grants the public permission to use the licensed material under specified terms and conditions. If the licensor’s permission is not necessary for any reason–for example, because of any applicable exception or limitation to copyright–then that use is not regulated by the license. Our licenses grant only permissions under copyright and certain other rights that a licensor has authority to grant. Use of the licensed material may still be restricted for other reasons, including because others have copyright or other rights in the material. A licensor may make special requests, such as asking that all changes be marked or described. Although not required by our licenses, you are encouraged to respect those requests where reasonable. More considerations for the public. 12 | 13 | Creative Commons Attribution 4.0 International Public License 14 | 15 | By exercising the Licensed Rights (defined below), You accept and agree to be bound by the terms and conditions of this Creative Commons Attribution 4.0 International Public License ("Public License"). To the extent this Public License may be interpreted as a contract, You are granted the Licensed Rights in consideration of Your acceptance of these terms and conditions, and the Licensor grants You such rights in consideration of benefits the Licensor receives from making the Licensed Material available under these terms and conditions. 16 | 17 | Section 1 – Definitions. 18 | 19 | a. Adapted Material means material subject to Copyright and Similar Rights that is derived from or based upon the Licensed Material and in which the Licensed Material is translated, altered, arranged, transformed, or otherwise modified in a manner requiring permission under the Copyright and Similar Rights held by the Licensor. For purposes of this Public License, where the Licensed Material is a musical work, performance, or sound recording, Adapted Material is always produced where the Licensed Material is synched in timed relation with a moving image. 20 | 21 | b. Adapter's License means the license You apply to Your Copyright and Similar Rights in Your contributions to Adapted Material in accordance with the terms and conditions of this Public License. 22 | 23 | c. Copyright and Similar Rights means copyright and/or similar rights closely related to copyright including, without limitation, performance, broadcast, sound recording, and Sui Generis Database Rights, without regard to how the rights are labeled or categorized. For purposes of this Public License, the rights specified in Section 2(b)(1)-(2) are not Copyright and Similar Rights. 24 | 25 | d. Effective Technological Measures means those measures that, in the absence of proper authority, may not be circumvented under laws fulfilling obligations under Article 11 of the WIPO Copyright Treaty adopted on December 20, 1996, and/or similar international agreements. 26 | 27 | e. Exceptions and Limitations means fair use, fair dealing, and/or any other exception or limitation to Copyright and Similar Rights that applies to Your use of the Licensed Material. 28 | 29 | f. Licensed Material means the artistic or literary work, database, or other material to which the Licensor applied this Public License. 30 | 31 | g. Licensed Rights means the rights granted to You subject to the terms and conditions of this Public License, which are limited to all Copyright and Similar Rights that apply to Your use of the Licensed Material and that the Licensor has authority to license. 32 | 33 | h. Licensor means the individual(s) or entity(ies) granting rights under this Public License. 34 | 35 | i. Share means to provide material to the public by any means or process that requires permission under the Licensed Rights, such as reproduction, public display, public performance, distribution, dissemination, communication, or importation, and to make material available to the public including in ways that members of the public may access the material from a place and at a time individually chosen by them. 36 | 37 | j. Sui Generis Database Rights means rights other than copyright resulting from Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, as amended and/or succeeded, as well as other essentially equivalent rights anywhere in the world. 38 | 39 | k. You means the individual or entity exercising the Licensed Rights under this Public License. Your has a corresponding meaning. 40 | 41 | Section 2 – Scope. 42 | 43 | a. License grant. 44 | 45 | 1. Subject to the terms and conditions of this Public License, the Licensor hereby grants You a worldwide, royalty-free, non-sublicensable, non-exclusive, irrevocable license to exercise the Licensed Rights in the Licensed Material to: 46 | 47 | A. reproduce and Share the Licensed Material, in whole or in part; and 48 | 49 | B. produce, reproduce, and Share Adapted Material. 50 | 51 | 2. Exceptions and Limitations. For the avoidance of doubt, where Exceptions and Limitations apply to Your use, this Public License does not apply, and You do not need to comply with its terms and conditions. 52 | 53 | 3. Term. The term of this Public License is specified in Section 6(a). 54 | 55 | 4. Media and formats; technical modifications allowed. The Licensor authorizes You to exercise the Licensed Rights in all media and formats whether now known or hereafter created, and to make technical modifications necessary to do so. The Licensor waives and/or agrees not to assert any right or authority to forbid You from making technical modifications necessary to exercise the Licensed Rights, including technical modifications necessary to circumvent Effective Technological Measures. For purposes of this Public License, simply making modifications authorized by this Section 2(a)(4) never produces Adapted Material. 56 | 57 | 5. Downstream recipients. 58 | 59 | A. Offer from the Licensor – Licensed Material. Every recipient of the Licensed Material automatically receives an offer from the Licensor to exercise the Licensed Rights under the terms and conditions of this Public License. 60 | 61 | B. No downstream restrictions. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, the Licensed Material if doing so restricts exercise of the Licensed Rights by any recipient of the Licensed Material. 62 | 63 | 6. No endorsement. Nothing in this Public License constitutes or may be construed as permission to assert or imply that You are, or that Your use of the Licensed Material is, connected with, or sponsored, endorsed, or granted official status by, the Licensor or others designated to receive attribution as provided in Section 3(a)(1)(A)(i). 64 | 65 | b. Other rights. 66 | 67 | 1. Moral rights, such as the right of integrity, are not licensed under this Public License, nor are publicity, privacy, and/or other similar personality rights; however, to the extent possible, the Licensor waives and/or agrees not to assert any such rights held by the Licensor to the limited extent necessary to allow You to exercise the Licensed Rights, but not otherwise. 68 | 69 | 2. Patent and trademark rights are not licensed under this Public License. 70 | 71 | 3. To the extent possible, the Licensor waives any right to collect royalties from You for the exercise of the Licensed Rights, whether directly or through a collecting society under any voluntary or waivable statutory or compulsory licensing scheme. In all other cases the Licensor expressly reserves any right to collect such royalties. 72 | 73 | Section 3 – License Conditions. 74 | 75 | Your exercise of the Licensed Rights is expressly made subject to the following conditions. 76 | 77 | a. Attribution. 78 | 79 | 1. If You Share the Licensed Material (including in modified form), You must: 80 | 81 | A. retain the following if it is supplied by the Licensor with the Licensed Material: 82 | 83 | i. identification of the creator(s) of the Licensed Material and any others designated to receive attribution, in any reasonable manner requested by the Licensor (including by pseudonym if designated); 84 | 85 | ii. a copyright notice; 86 | 87 | iii. a notice that refers to this Public License; 88 | 89 | iv. a notice that refers to the disclaimer of warranties; 90 | 91 | v. a URI or hyperlink to the Licensed Material to the extent reasonably practicable; 92 | 93 | B. indicate if You modified the Licensed Material and retain an indication of any previous modifications; and 94 | 95 | C. indicate the Licensed Material is licensed under this Public License, and include the text of, or the URI or hyperlink to, this Public License. 96 | 97 | 2. You may satisfy the conditions in Section 3(a)(1) in any reasonable manner based on the medium, means, and context in which You Share the Licensed Material. For example, it may be reasonable to satisfy the conditions by providing a URI or hyperlink to a resource that includes the required information. 98 | 99 | 3. If requested by the Licensor, You must remove any of the information required by Section 3(a)(1)(A) to the extent reasonably practicable. 100 | 101 | 4. If You Share Adapted Material You produce, the Adapter's License You apply must not prevent recipients of the Adapted Material from complying with this Public License. 102 | 103 | Section 4 – Sui Generis Database Rights. 104 | 105 | Where the Licensed Rights include Sui Generis Database Rights that apply to Your use of the Licensed Material: 106 | 107 | a. for the avoidance of doubt, Section 2(a)(1) grants You the right to extract, reuse, reproduce, and Share all or a substantial portion of the contents of the database; 108 | 109 | b. if You include all or a substantial portion of the database contents in a database in which You have Sui Generis Database Rights, then the database in which You have Sui Generis Database Rights (but not its individual contents) is Adapted Material; and 110 | 111 | c. You must comply with the conditions in Section 3(a) if You Share all or a substantial portion of the contents of the database. 112 | For the avoidance of doubt, this Section 4 supplements and does not replace Your obligations under this Public License where the Licensed Rights include other Copyright and Similar Rights. 113 | 114 | Section 5 – Disclaimer of Warranties and Limitation of Liability. 115 | 116 | a. Unless otherwise separately undertaken by the Licensor, to the extent possible, the Licensor offers the Licensed Material as-is and as-available, and makes no representations or warranties of any kind concerning the Licensed Material, whether express, implied, statutory, or other. This includes, without limitation, warranties of title, merchantability, fitness for a particular purpose, non-infringement, absence of latent or other defects, accuracy, or the presence or absence of errors, whether or not known or discoverable. Where disclaimers of warranties are not allowed in full or in part, this disclaimer may not apply to You. 117 | 118 | b. To the extent possible, in no event will the Licensor be liable to You on any legal theory (including, without limitation, negligence) or otherwise for any direct, special, indirect, incidental, consequential, punitive, exemplary, or other losses, costs, expenses, or damages arising out of this Public License or use of the Licensed Material, even if the Licensor has been advised of the possibility of such losses, costs, expenses, or damages. Where a limitation of liability is not allowed in full or in part, this limitation may not apply to You. 119 | 120 | c. The disclaimer of warranties and limitation of liability provided above shall be interpreted in a manner that, to the extent possible, most closely approximates an absolute disclaimer and waiver of all liability. 121 | 122 | Section 6 – Term and Termination. 123 | 124 | a. This Public License applies for the term of the Copyright and Similar Rights licensed here. However, if You fail to comply with this Public License, then Your rights under this Public License terminate automatically. 125 | 126 | b. Where Your right to use the Licensed Material has terminated under Section 6(a), it reinstates: 127 | 128 | 1. automatically as of the date the violation is cured, provided it is cured within 30 days of Your discovery of the violation; or 129 | 130 | 2. upon express reinstatement by the Licensor. 131 | 132 | c. For the avoidance of doubt, this Section 6(b) does not affect any right the Licensor may have to seek remedies for Your violations of this Public License. 133 | 134 | d. For the avoidance of doubt, the Licensor may also offer the Licensed Material under separate terms or conditions or stop distributing the Licensed Material at any time; however, doing so will not terminate this Public License. 135 | 136 | e. Sections 1, 5, 6, 7, and 8 survive termination of this Public License. 137 | 138 | Section 7 – Other Terms and Conditions. 139 | 140 | a. The Licensor shall not be bound by any additional or different terms or conditions communicated by You unless expressly agreed. 141 | 142 | b. Any arrangements, understandings, or agreements regarding the Licensed Material not stated herein are separate from and independent of the terms and conditions of this Public License. 143 | 144 | Section 8 – Interpretation. 145 | 146 | a. For the avoidance of doubt, this Public License does not, and shall not be interpreted to, reduce, limit, restrict, or impose conditions on any use of the Licensed Material that could lawfully be made without permission under this Public License. 147 | 148 | b. To the extent possible, if any provision of this Public License is deemed unenforceable, it shall be automatically reformed to the minimum extent necessary to make it enforceable. If the provision cannot be reformed, it shall be severed from this Public License without affecting the enforceability of the remaining terms and conditions. 149 | 150 | c. No term or condition of this Public License will be waived and no failure to comply consented to unless expressly agreed to by the Licensor. 151 | 152 | d. Nothing in this Public License constitutes or may be interpreted as a limitation upon, or waiver of, any privileges and immunities that apply to the Licensor or You, including from the legal processes of any jurisdiction or authority. 153 | 154 | Creative Commons is not a party to its public licenses. Notwithstanding, Creative Commons may elect to apply one of its public licenses to material it publishes and in those instances will be considered the “Licensor.” Except for the limited purpose of indicating that material is shared under a Creative Commons public license or as otherwise permitted by the Creative Commons policies published at creativecommons.org/policies, Creative Commons does not authorize the use of the trademark “Creative Commons” or any other trademark or logo of Creative Commons without its prior written consent including, without limitation, in connection with any unauthorized modifications to any of its public licenses or any other arrangements, understandings, or agreements concerning use of licensed material. For the avoidance of doubt, this paragraph does not form part of the public licenses. 155 | 156 | Creative Commons may be contacted at creativecommons.org. 157 | -------------------------------------------------------------------------------- /replication/LICENSES/CC-BY-SA-4.0.txt: -------------------------------------------------------------------------------- 1 | Creative Commons Attribution-ShareAlike 4.0 International 2 | 3 | Creative Commons Corporation (“Creative Commons”) is not a law firm and does not provide legal services or legal advice. Distribution of Creative Commons public licenses does not create a lawyer-client or other relationship. Creative Commons makes its licenses and related information available on an “as-is” basis. Creative Commons gives no warranties regarding its licenses, any material licensed under their terms and conditions, or any related information. Creative Commons disclaims all liability for damages resulting from their use to the fullest extent possible. 4 | 5 | Using Creative Commons Public Licenses 6 | 7 | Creative Commons public licenses provide a standard set of terms and conditions that creators and other rights holders may use to share original works of authorship and other material subject to copyright and certain other rights specified in the public license below. The following considerations are for informational purposes only, are not exhaustive, and do not form part of our licenses. 8 | 9 | Considerations for licensors: Our public licenses are intended for use by those authorized to give the public permission to use material in ways otherwise restricted by copyright and certain other rights. Our licenses are irrevocable. Licensors should read and understand the terms and conditions of the license they choose before applying it. Licensors should also secure all rights necessary before applying our licenses so that the public can reuse the material as expected. Licensors should clearly mark any material not subject to the license. This includes other CC-licensed material, or material used under an exception or limitation to copyright. More considerations for licensors. 10 | 11 | Considerations for the public: By using one of our public licenses, a licensor grants the public permission to use the licensed material under specified terms and conditions. If the licensor’s permission is not necessary for any reason–for example, because of any applicable exception or limitation to copyright–then that use is not regulated by the license. Our licenses grant only permissions under copyright and certain other rights that a licensor has authority to grant. Use of the licensed material may still be restricted for other reasons, including because others have copyright or other rights in the material. A licensor may make special requests, such as asking that all changes be marked or described. 12 | 13 | Although not required by our licenses, you are encouraged to respect those requests where reasonable. More considerations for the public. 14 | 15 | Creative Commons Attribution-ShareAlike 4.0 International Public License 16 | 17 | By exercising the Licensed Rights (defined below), You accept and agree to be bound by the terms and conditions of this Creative Commons Attribution-ShareAlike 4.0 International Public License ("Public License"). To the extent this Public License may be interpreted as a contract, You are granted the Licensed Rights in consideration of Your acceptance of these terms and conditions, and the Licensor grants You such rights in consideration of benefits the Licensor receives from making the Licensed Material available under these terms and conditions. 18 | 19 | Section 1 – Definitions. 20 | 21 | a. Adapted Material means material subject to Copyright and Similar Rights that is derived from or based upon the Licensed Material and in which the Licensed Material is translated, altered, arranged, transformed, or otherwise modified in a manner requiring permission under the Copyright and Similar Rights held by the Licensor. For purposes of this Public License, where the Licensed Material is a musical work, performance, or sound recording, Adapted Material is always produced where the Licensed Material is synched in timed relation with a moving image. 22 | 23 | b. Adapter's License means the license You apply to Your Copyright and Similar Rights in Your contributions to Adapted Material in accordance with the terms and conditions of this Public License. 24 | 25 | c. BY-SA Compatible License means a license listed at creativecommons.org/compatiblelicenses, approved by Creative Commons as essentially the equivalent of this Public License. 26 | 27 | d. Copyright and Similar Rights means copyright and/or similar rights closely related to copyright including, without limitation, performance, broadcast, sound recording, and Sui Generis Database Rights, without regard to how the rights are labeled or categorized. For purposes of this Public License, the rights specified in Section 2(b)(1)-(2) are not Copyright and Similar Rights. 28 | 29 | e. Effective Technological Measures means those measures that, in the absence of proper authority, may not be circumvented under laws fulfilling obligations under Article 11 of the WIPO Copyright Treaty adopted on December 20, 1996, and/or similar international agreements. 30 | 31 | f. Exceptions and Limitations means fair use, fair dealing, and/or any other exception or limitation to Copyright and Similar Rights that applies to Your use of the Licensed Material. 32 | 33 | g. License Elements means the license attributes listed in the name of a Creative Commons Public License. The License Elements of this Public License are Attribution and ShareAlike. 34 | 35 | h. Licensed Material means the artistic or literary work, database, or other material to which the Licensor applied this Public License. 36 | 37 | i. Licensed Rights means the rights granted to You subject to the terms and conditions of this Public License, which are limited to all Copyright and Similar Rights that apply to Your use of the Licensed Material and that the Licensor has authority to license. 38 | 39 | j. Licensor means the individual(s) or entity(ies) granting rights under this Public License. 40 | 41 | k. Share means to provide material to the public by any means or process that requires permission under the Licensed Rights, such as reproduction, public display, public performance, distribution, dissemination, communication, or importation, and to make material available to the public including in ways that members of the public may access the material from a place and at a time individually chosen by them. 42 | 43 | l. Sui Generis Database Rights means rights other than copyright resulting from Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, as amended and/or succeeded, as well as other essentially equivalent rights anywhere in the world. 44 | 45 | m. You means the individual or entity exercising the Licensed Rights under this Public License. Your has a corresponding meaning. 46 | 47 | Section 2 – Scope. 48 | 49 | a. License grant. 50 | 51 | 1. Subject to the terms and conditions of this Public License, the Licensor hereby grants You a worldwide, royalty-free, non-sublicensable, non-exclusive, irrevocable license to exercise the Licensed Rights in the Licensed Material to: 52 | 53 | A. reproduce and Share the Licensed Material, in whole or in part; and 54 | 55 | B. produce, reproduce, and Share Adapted Material. 56 | 57 | 2. Exceptions and Limitations. For the avoidance of doubt, where Exceptions and Limitations apply to Your use, this Public License does not apply, and You do not need to comply with its terms and conditions. 58 | 59 | 3. Term. The term of this Public License is specified in Section 6(a). 60 | 61 | 4. Media and formats; technical modifications allowed. The Licensor authorizes You to exercise the Licensed Rights in all media and formats whether now known or hereafter created, and to make technical modifications necessary to do so. The Licensor waives and/or agrees not to assert any right or authority to forbid You from making technical modifications necessary to exercise the Licensed Rights, including technical modifications necessary to circumvent Effective Technological Measures. For purposes of this Public License, simply making modifications authorized by this Section 2(a)(4) never produces Adapted Material. 62 | 63 | 5. Downstream recipients. 64 | 65 | A. Offer from the Licensor – Licensed Material. Every recipient of the Licensed Material automatically receives an offer from the Licensor to exercise the Licensed Rights under the terms and conditions of this Public License. 66 | 67 | B. Additional offer from the Licensor – Adapted Material. Every recipient of Adapted Material from You automatically receives an offer from the Licensor to exercise the Licensed Rights in the Adapted Material under the conditions of the Adapter’s License You apply. 68 | 69 | C. No downstream restrictions. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, the Licensed Material if doing so restricts exercise of the Licensed Rights by any recipient of the Licensed Material. 70 | 71 | 6. No endorsement. Nothing in this Public License constitutes or may be construed as permission to assert or imply that You are, or that Your use of the Licensed Material is, connected with, or sponsored, endorsed, or granted official status by, the Licensor or others designated to receive attribution as provided in Section 3(a)(1)(A)(i). 72 | 73 | b. Other rights. 74 | 75 | 1. Moral rights, such as the right of integrity, are not licensed under this Public License, nor are publicity, privacy, and/or other similar personality rights; however, to the extent possible, the Licensor waives and/or agrees not to assert any such rights held by the Licensor to the limited extent necessary to allow You to exercise the Licensed Rights, but not otherwise. 76 | 77 | 2. Patent and trademark rights are not licensed under this Public License. 78 | 79 | 3. To the extent possible, the Licensor waives any right to collect royalties from You for the exercise of the Licensed Rights, whether directly or through a collecting society under any voluntary or waivable statutory or compulsory licensing scheme. In all other cases the Licensor expressly reserves any right to collect such royalties. 80 | 81 | Section 3 – License Conditions. 82 | 83 | Your exercise of the Licensed Rights is expressly made subject to the following conditions. 84 | 85 | a. Attribution. 86 | 87 | 1. If You Share the Licensed Material (including in modified form), You must: 88 | 89 | A. retain the following if it is supplied by the Licensor with the Licensed Material: 90 | 91 | i. identification of the creator(s) of the Licensed Material and any others designated to receive attribution, in any reasonable manner requested by the Licensor (including by pseudonym if designated); 92 | 93 | ii. a copyright notice; 94 | 95 | iii. a notice that refers to this Public License; 96 | 97 | iv. a notice that refers to the disclaimer of warranties; 98 | 99 | v. a URI or hyperlink to the Licensed Material to the extent reasonably practicable; 100 | 101 | B. indicate if You modified the Licensed Material and retain an indication of any previous modifications; and 102 | 103 | C. indicate the Licensed Material is licensed under this Public License, and include the text of, or the URI or hyperlink to, this Public License. 104 | 105 | 2. You may satisfy the conditions in Section 3(a)(1) in any reasonable manner based on the medium, means, and context in which You Share the Licensed Material. For example, it may be reasonable to satisfy the conditions by providing a URI or hyperlink to a resource that includes the required information. 106 | 107 | 3. If requested by the Licensor, You must remove any of the information required by Section 3(a)(1)(A) to the extent reasonably practicable. 108 | 109 | b. ShareAlike.In addition to the conditions in Section 3(a), if You Share Adapted Material You produce, the following conditions also apply. 110 | 111 | 1. The Adapter’s License You apply must be a Creative Commons license with the same License Elements, this version or later, or a BY-SA Compatible License. 112 | 113 | 2. You must include the text of, or the URI or hyperlink to, the Adapter's License You apply. You may satisfy this condition in any reasonable manner based on the medium, means, and context in which You Share Adapted Material. 114 | 115 | 3. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, Adapted Material that restrict exercise of the rights granted under the Adapter's License You apply. 116 | 117 | Section 4 – Sui Generis Database Rights. 118 | 119 | Where the Licensed Rights include Sui Generis Database Rights that apply to Your use of the Licensed Material: 120 | 121 | a. for the avoidance of doubt, Section 2(a)(1) grants You the right to extract, reuse, reproduce, and Share all or a substantial portion of the contents of the database; 122 | 123 | b. if You include all or a substantial portion of the database contents in a database in which You have Sui Generis Database Rights, then the database in which You have Sui Generis Database Rights (but not its individual contents) is Adapted Material, including for purposes of Section 3(b); and 124 | 125 | c. You must comply with the conditions in Section 3(a) if You Share all or a substantial portion of the contents of the database. 126 | For the avoidance of doubt, this Section 4 supplements and does not replace Your obligations under this Public License where the Licensed Rights include other Copyright and Similar Rights. 127 | 128 | Section 5 – Disclaimer of Warranties and Limitation of Liability. 129 | 130 | a. Unless otherwise separately undertaken by the Licensor, to the extent possible, the Licensor offers the Licensed Material as-is and as-available, and makes no representations or warranties of any kind concerning the Licensed Material, whether express, implied, statutory, or other. This includes, without limitation, warranties of title, merchantability, fitness for a particular purpose, non-infringement, absence of latent or other defects, accuracy, or the presence or absence of errors, whether or not known or discoverable. Where disclaimers of warranties are not allowed in full or in part, this disclaimer may not apply to You. 131 | 132 | b. To the extent possible, in no event will the Licensor be liable to You on any legal theory (including, without limitation, negligence) or otherwise for any direct, special, indirect, incidental, consequential, punitive, exemplary, or other losses, costs, expenses, or damages arising out of this Public License or use of the Licensed Material, even if the Licensor has been advised of the possibility of such losses, costs, expenses, or damages. Where a limitation of liability is not allowed in full or in part, this limitation may not apply to You. 133 | 134 | c. The disclaimer of warranties and limitation of liability provided above shall be interpreted in a manner that, to the extent possible, most closely approximates an absolute disclaimer and waiver of all liability. 135 | 136 | Section 6 – Term and Termination. 137 | 138 | a. This Public License applies for the term of the Copyright and Similar Rights licensed here. However, if You fail to comply with this Public License, then Your rights under this Public License terminate automatically. 139 | 140 | b. Where Your right to use the Licensed Material has terminated under Section 6(a), it reinstates: 141 | 142 | 1. automatically as of the date the violation is cured, provided it is cured within 30 days of Your discovery of the violation; or 143 | 144 | 2. upon express reinstatement by the Licensor. 145 | 146 | c. For the avoidance of doubt, this Section 6(b) does not affect any right the Licensor may have to seek remedies for Your violations of this Public License. 147 | 148 | d. For the avoidance of doubt, the Licensor may also offer the Licensed Material under separate terms or conditions or stop distributing the Licensed Material at any time; however, doing so will not terminate this Public License. 149 | 150 | e. Sections 1, 5, 6, 7, and 8 survive termination of this Public License. 151 | 152 | Section 7 – Other Terms and Conditions. 153 | 154 | a. The Licensor shall not be bound by any additional or different terms or conditions communicated by You unless expressly agreed. 155 | 156 | b. Any arrangements, understandings, or agreements regarding the Licensed Material not stated herein are separate from and independent of the terms and conditions of this Public License. 157 | 158 | Section 8 – Interpretation. 159 | 160 | a. For the avoidance of doubt, this Public License does not, and shall not be interpreted to, reduce, limit, restrict, or impose conditions on any use of the Licensed Material that could lawfully be made without permission under this Public License. 161 | 162 | b. To the extent possible, if any provision of this Public License is deemed unenforceable, it shall be automatically reformed to the minimum extent necessary to make it enforceable. If the provision cannot be reformed, it shall be severed from this Public License without affecting the enforceability of the remaining terms and conditions. 163 | 164 | c. No term or condition of this Public License will be waived and no failure to comply consented to unless expressly agreed to by the Licensor. 165 | 166 | d. Nothing in this Public License constitutes or may be interpreted as a limitation upon, or waiver of, any privileges and immunities that apply to the Licensor or You, including from the legal processes of any jurisdiction or authority. 167 | 168 | Creative Commons is not a party to its public licenses. Notwithstanding, Creative Commons may elect to apply one of its public licenses to material it publishes and in those instances will be considered the “Licensor.” Except for the limited purpose of indicating that material is shared under a Creative Commons public license or as otherwise permitted by the Creative Commons policies published at creativecommons.org/policies, Creative Commons does not authorize the use of the trademark “Creative Commons” or any other trademark or logo of Creative Commons without its prior written consent including, without limitation, in connection with any unauthorized modifications to any of its public licenses or any other arrangements, understandings, or agreements concerning use of licensed material. For the avoidance of doubt, this paragraph does not form part of the public licenses. 169 | 170 | Creative Commons may be contacted at creativecommons.org. 171 | -------------------------------------------------------------------------------- /notebooks/notebooks/V_lfwc.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "6e6a56a3-92e8-4ff2-8d86-21a4b510aee0", 6 | "metadata": {}, 7 | "source": [ 8 | "# V. LFwC: A New Corpus to Demonstrate the Practicability of the Proposed Requirements\n", 9 | "\n", 10 | "We created a Linux Firmware Corpus (LFwC) to assess the practicability of our requirements. It is based on data until June 2023 and consists of 10,913 deduplicated and unpacked firmware images from ten known manufacturers. It includes both actual and historical firmware samples, covering 2,365\n", 11 | "unique devices across 22 classes. To provide an overview of LFwC, we added corpus data points to the bottom Table II. We share as much data as legally possible and publish all scripts, tools, and virtual machines for replicability. We tear down LFwC’s unpacking barrier with an open source process\n", 12 | "for verified unpacking success." 13 | ] 14 | }, 15 | { 16 | "cell_type": "markdown", 17 | "id": "98db0f08-c1f4-46b7-8da1-5f28e40f4e60", 18 | "metadata": {}, 19 | "source": [ 20 | "## Preparations\n", 21 | "\n", 22 | "Below you will find preparatory stuff such as imports and constant definitions for use down the road." 23 | ] 24 | }, 25 | { 26 | "cell_type": "markdown", 27 | "id": "30046f07-d0da-4315-87cb-4928908a2922", 28 | "metadata": {}, 29 | "source": [ 30 | "### Imports" 31 | ] 32 | }, 33 | { 34 | "cell_type": "code", 35 | "execution_count": null, 36 | "id": "3b6c7834-38e9-487e-809d-a927bb594718", 37 | "metadata": {}, 38 | "outputs": [], 39 | "source": [ 40 | "import json\n", 41 | "from collections import deque\n", 42 | "from pathlib import Path\n", 43 | "\n", 44 | "import matplotlib.pyplot as plt\n", 45 | "import numpy as np\n", 46 | "import pandas as pd\n", 47 | "import seaborn as sns\n", 48 | "from matplotlib import rc\n", 49 | "from matplotlib.ticker import ScalarFormatter\n", 50 | "from packaging.version import Version, parse" 51 | ] 52 | }, 53 | { 54 | "cell_type": "markdown", 55 | "id": "90f55d1e-34c2-4c6e-ab27-61553ad9ac69", 56 | "metadata": {}, 57 | "source": [ 58 | "### Constants" 59 | ] 60 | }, 61 | { 62 | "cell_type": "code", 63 | "execution_count": null, 64 | "id": "390bba8f-c1c5-4e0e-b29f-c0c7ef084427", 65 | "metadata": {}, 66 | "outputs": [], 67 | "source": [ 68 | "CMAP: list[int] = deque(sns.color_palette(\"colorblind\", as_cmap=True))\n", 69 | "CMAP.rotate(-4)\n", 70 | "CMAP = list(CMAP)\n", 71 | "\n", 72 | "CMAP_2 = deque(CMAP.copy())\n", 73 | "CMAP_2.rotate(1)\n", 74 | "CMAP_2 = list(CMAP_2)\n", 75 | "\n", 76 | "CORPUS_PATH: Path = Path(\"../public_data/lfwc-full.csv\")\n", 77 | "FIGURE_DEST: Path = Path(\"../figures\")\n", 78 | "\n", 79 | "Y_LABELS: list[str] = [\n", 80 | " \"Ubiquiti\",\n", 81 | " \"TRENDnet\",\n", 82 | " \"NETGEAR\",\n", 83 | " \"Linksys\",\n", 84 | " \"EnGenius\",\n", 85 | " \"EDIMAX\",\n", 86 | " \"D-Link\",\n", 87 | " \"ASUS\",\n", 88 | " \"TP-Link\",\n", 89 | " \"AVM\",\n", 90 | "]" 91 | ] 92 | }, 93 | { 94 | "cell_type": "markdown", 95 | "id": "183ae7f8-692b-437f-b874-4771717260c0", 96 | "metadata": {}, 97 | "source": [ 98 | "### Matplotlib Settings" 99 | ] 100 | }, 101 | { 102 | "cell_type": "code", 103 | "execution_count": null, 104 | "id": "23595ffb-5540-40d4-9467-9a2136e9cfbe", 105 | "metadata": {}, 106 | "outputs": [], 107 | "source": [ 108 | "rc(\"font\", **{\"family\": \"serif\", \"serif\": [\"Times\"], \"size\": 15})\n", 109 | "rc(\"text\", usetex=True)\n", 110 | "pd.set_option(\"display.max_colwidth\", None)" 111 | ] 112 | }, 113 | { 114 | "cell_type": "markdown", 115 | "id": "815a25d9-b801-49d6-bd34-580ccd83ccbc", 116 | "metadata": {}, 117 | "source": [ 118 | "### Read Data" 119 | ] 120 | }, 121 | { 122 | "cell_type": "code", 123 | "execution_count": null, 124 | "id": "9c626c04-d724-4518-8e66-a861c2947a43", 125 | "metadata": {}, 126 | "outputs": [], 127 | "source": [ 128 | "df = pd.read_csv(CORPUS_PATH, index_col=0)" 129 | ] 130 | }, 131 | { 132 | "cell_type": "markdown", 133 | "id": "07cdc487-eb55-41f0-8b21-30bff2e5da68", 134 | "metadata": {}, 135 | "source": [ 136 | "## Peek Into Raw Data" 137 | ] 138 | }, 139 | { 140 | "cell_type": "code", 141 | "execution_count": null, 142 | "id": "3c805dcb-e002-4d68-90ac-222a4340f7d3", 143 | "metadata": {}, 144 | "outputs": [], 145 | "source": [ 146 | "df" 147 | ] 148 | }, 149 | { 150 | "cell_type": "markdown", 151 | "id": "0d496555-9e98-4515-8e87-97a7c2b09962", 152 | "metadata": {}, 153 | "source": [ 154 | "## Table III - LFwC: Corpus Statistics Overview" 155 | ] 156 | }, 157 | { 158 | "cell_type": "code", 159 | "execution_count": null, 160 | "id": "439f0bf0-c151-44d8-a759-869c5f99eee1", 161 | "metadata": {}, 162 | "outputs": [], 163 | "source": [ 164 | "def corpus_statistics_overview(df: pd.DataFrame) -> pd.DataFrame:\n", 165 | " df_stats: pd.DataFrame = (\n", 166 | " df.groupby([\"manufacturer\"], as_index=False)\n", 167 | " .nunique()[[\"manufacturer\", \"sha256\", \"device_name\"]]\n", 168 | " .rename(columns={\"manufacturer\": \"Manufact.\", \"sha256\": \"Samples\", \"device_name\": \"Devices\"})\n", 169 | " )\n", 170 | "\n", 171 | " df_stats[\"Mean Samples per Device\"] = (df_stats[\"Samples\"] / df_stats[\"Devices\"]).round(2)\n", 172 | " df_stats[\"Mean Size per Sample\"] = (\n", 173 | " df[[\"manufacturer\", \"compressed_firmware_size\"]]\n", 174 | " .groupby([\"manufacturer\"], as_index=False)\n", 175 | " .mean()[\"compressed_firmware_size\"]\n", 176 | " / 1024**2\n", 177 | " ).round(0)\n", 178 | "\n", 179 | " df_stats[\"Mean Files per Sample\"] = (\n", 180 | " df[[\"manufacturer\", \"files_in_firmware\"]].groupby([\"manufacturer\"], as_index=False).mean()[\"files_in_firmware\"]\n", 181 | " ).round(2)\n", 182 | "\n", 183 | " return df_stats" 184 | ] 185 | }, 186 | { 187 | "cell_type": "code", 188 | "execution_count": null, 189 | "id": "b1d820db-adfa-4b2f-8040-3b36484d9ce2", 190 | "metadata": {}, 191 | "outputs": [], 192 | "source": [ 193 | "df_stats: pd.DataFrame = corpus_statistics_overview(df)\n", 194 | "df_stats" 195 | ] 196 | }, 197 | { 198 | "cell_type": "markdown", 199 | "id": "b82d78f9-8679-41c4-a846-31fd89f4419e", 200 | "metadata": {}, 201 | "source": [ 202 | "## Figure 7 - LFwC firmware distribution per release date. For 747 samples, our scrapers could not extract any release date from the sources." 203 | ] 204 | }, 205 | { 206 | "cell_type": "code", 207 | "execution_count": null, 208 | "id": "7243e296-e5bf-4044-a48f-d325f806b815", 209 | "metadata": {}, 210 | "outputs": [], 211 | "source": [ 212 | "def create_figure_7_firmware_distribution_per_release_date(df: pd.DataFrame) -> None:\n", 213 | " df_removed_day_from_date = df.copy()\n", 214 | " rc(\"font\", **{\"family\": \"serif\", \"serif\": [\"Times\"], \"size\": 16})\n", 215 | " df_removed_day_from_date[\"release_date\"] = (\n", 216 | " df_removed_day_from_date[\"release_date\"].str.split(\"-\").str[:-2].str.join(\"-\")\n", 217 | " )\n", 218 | " df_history = (\n", 219 | " df_removed_day_from_date.groupby([\"release_date\", \"manufacturer\"], as_index=False)\n", 220 | " .nunique()\n", 221 | " .pivot(index=\"release_date\", columns=[\"manufacturer\"], values=\"md5\")\n", 222 | " .fillna(value=0.0)\n", 223 | " )\n", 224 | " ax = df_history.plot(\n", 225 | " kind=\"bar\",\n", 226 | " grid=True,\n", 227 | " stacked=True,\n", 228 | " logy=False,\n", 229 | " figsize=(8, 6),\n", 230 | " rot=50,\n", 231 | " legend=False,\n", 232 | " edgecolor=\"black\",\n", 233 | " color=CMAP_2,\n", 234 | " )\n", 235 | " ax.set_xticklabels([\"unk.\"] + [str(i) for i in range(2005, 2024)])\n", 236 | " ax.set_ylabel(\"Sample Quantity [\\\\#]\")\n", 237 | " ax.set_xlabel(\"Release Year\")\n", 238 | " ax.legend(ncols=4, bbox_to_anchor=(0.9475, 1.275), labels=Y_LABELS[::-1], fontsize=13)\n", 239 | " ax.set_axisbelow(True)\n", 240 | " ax.yaxis.set_major_formatter(ScalarFormatter())\n", 241 | " plt.tight_layout()\n", 242 | " plt.savefig(FIGURE_DEST / \"f7_corpus_release_dates.pdf\", bbox_inches=\"tight\")\n", 243 | " rc(\"font\", **{\"family\": \"serif\", \"serif\": [\"Times\"], \"size\": 15})\n", 244 | " plt.show()" 245 | ] 246 | }, 247 | { 248 | "cell_type": "code", 249 | "execution_count": null, 250 | "id": "93829801-57cf-4ef4-b7ff-aa0ca7997967", 251 | "metadata": {}, 252 | "outputs": [], 253 | "source": [ 254 | "create_figure_7_firmware_distribution_per_release_date(df)" 255 | ] 256 | }, 257 | { 258 | "cell_type": "markdown", 259 | "id": "cb22a80a-480c-4f7d-8795-799723fe14a1", 260 | "metadata": {}, 261 | "source": [ 262 | "## Figure 8 - Distribution of device classes in LFwC. \n", 263 | "\n", 264 | "The three most prevalent classes are routers (49%), switches (14%), and access points (12%). We bundled device classes with less than 150 samples into the meta class misc. It contains: controller, board, converter, encoder, gateway, kvm, media, nas, phone, power supply, printer, recorder, san, and wifi-usb." 265 | ] 266 | }, 267 | { 268 | "cell_type": "code", 269 | "execution_count": null, 270 | "id": "f8aaa311-f50e-493f-b875-41c057a68fad", 271 | "metadata": {}, 272 | "outputs": [], 273 | "source": [ 274 | "def create_figure_8_distribution_of_device_classes_in_lfwc(df: pd.DataFrame) -> None:\n", 275 | " df_corpus_misc_classes = df.copy()\n", 276 | "\n", 277 | " flt = df_corpus_misc_classes[\"device_class\"].str.contains(\n", 278 | " \"controller|board|converter|encoder|gateway|kvm|media|nas|phone|power_supply|printer|recorder|san|wifi-usb\"\n", 279 | " )\n", 280 | " df_corpus_misc_classes.loc[flt, \"device_class\"] = \"misc\"\n", 281 | "\n", 282 | " by_classes = (\n", 283 | " df_corpus_misc_classes.groupby([\"device_class\", \"manufacturer\"], as_index=False)\n", 284 | " .nunique()\n", 285 | " .pivot(index=\"device_class\", columns=[\"manufacturer\"], values=\"md5\")\n", 286 | " .fillna(value=0.0)\n", 287 | " )\n", 288 | "\n", 289 | " rc(\"font\", **{\"family\": \"serif\", \"serif\": [\"Times\"], \"size\": 18})\n", 290 | " ax = by_classes.plot(\n", 291 | " kind=\"bar\",\n", 292 | " grid=True,\n", 293 | " stacked=False,\n", 294 | " logy=True,\n", 295 | " figsize=(20, 4.5),\n", 296 | " color=CMAP_2,\n", 297 | " edgecolor=\"black\",\n", 298 | " legend=False,\n", 299 | " width=0.8,\n", 300 | " rot=0,\n", 301 | " )\n", 302 | " ax.set_axisbelow(True)\n", 303 | " ax.yaxis.set_major_formatter(ScalarFormatter())\n", 304 | " ax.set_ylabel(\"Sample Quantity [\\\\#, log]\")\n", 305 | " ax.set_xlabel(\"Device Class\")\n", 306 | " ax.set_xlim(-0.41, 8.49)\n", 307 | " ax.legend(ncols=10, bbox_to_anchor=(1.0375, 1.2), labels=Y_LABELS[::-1], fontsize=16)\n", 308 | " for i in range(0, 11):\n", 309 | " ax.axvline(i + 0.500, color=\"black\", linewidth=1)\n", 310 | " plt.tight_layout()\n", 311 | " plt.savefig(FIGURE_DEST / \"f8_corpus_classes.pdf\", bbox_inches=\"tight\")\n", 312 | " rc(\"font\", **{\"family\": \"serif\", \"serif\": [\"Times\"], \"size\": 15})\n", 313 | " plt.show()" 314 | ] 315 | }, 316 | { 317 | "cell_type": "code", 318 | "execution_count": null, 319 | "id": "e5b46197-b183-4c9f-b785-30ae3395d4e3", 320 | "metadata": {}, 321 | "outputs": [], 322 | "source": [ 323 | "create_figure_8_distribution_of_device_classes_in_lfwc(df)" 324 | ] 325 | }, 326 | { 327 | "cell_type": "markdown", 328 | "id": "29dec476-8056-4d7c-b90a-4068736c6f34", 329 | "metadata": {}, 330 | "source": [ 331 | "## Figure 9 - Detected Linux kernel banners in LFwC samples." 332 | ] 333 | }, 334 | { 335 | "cell_type": "code", 336 | "execution_count": null, 337 | "id": "e46409cc-a1b6-469b-9f37-305aa973b710", 338 | "metadata": {}, 339 | "outputs": [], 340 | "source": [ 341 | "def create_figure_9_detected_linux_kernel_banners_in_lfwc_samples(df: pd.DataFrame) -> None:\n", 342 | " df_linux_prep = df.copy()\n", 343 | " linux_series = df_linux_prep[df_linux_prep[\"linux_banners\"].notnull()][\"linux_banners\"].apply(\n", 344 | " lambda x: x.split(\"|\")\n", 345 | " )\n", 346 | " df_linux_prep[\"linux_banners\"] = linux_series\n", 347 | " df_linux_prep = df_linux_prep.explode(\"linux_banners\", ignore_index=True)\n", 348 | "\n", 349 | " def bucketize(ver_str):\n", 350 | " if isinstance(ver_str, float):\n", 351 | " return parse(\"0.0\")\n", 352 | " prepared = ver_str.split(\" \")[-1].split(\".\")[0:2]\n", 353 | " ver = parse(\".\".join(prepared))\n", 354 | " return ver\n", 355 | "\n", 356 | " df_bucketize_version = df_linux_prep.copy()\n", 357 | " df_bucketize_version[\"linux_banners\"] = df_bucketize_version[\"linux_banners\"].apply(bucketize)\n", 358 | " df_bucketize_version\n", 359 | " rc(\"font\", **{\"family\": \"serif\", \"serif\": [\"Times\"], \"size\": 22})\n", 360 | " df_linux_banners = (\n", 361 | " df_bucketize_version.groupby([\"linux_banners\", \"manufacturer\"], as_index=False)\n", 362 | " .nunique()\n", 363 | " .pivot(index=\"linux_banners\", columns=[\"manufacturer\"], values=\"md5\")\n", 364 | " .fillna(value=0.0)\n", 365 | " )\n", 366 | "\n", 367 | " ax = df_linux_banners.plot(\n", 368 | " kind=\"barh\",\n", 369 | " grid=True,\n", 370 | " stacked=True,\n", 371 | " logx=True,\n", 372 | " figsize=(21, 7),\n", 373 | " rot=0,\n", 374 | " legend=False,\n", 375 | " edgecolor=\"black\",\n", 376 | " color=[\"grey\"],\n", 377 | " )\n", 378 | " ax.set_ylabel(None)\n", 379 | " ax.set_xlabel(\"Detected Linux Kernel Version Banners [Grouped by Major.Minor, log]\")\n", 380 | " ax.set_yticklabels([\"unk.\"] + ax.get_yticklabels()[1:], ha=\"left\", va=\"center\", position=(-0.0275, 0))\n", 381 | " ax.set_axisbelow(True)\n", 382 | " for i in range(0, 19):\n", 383 | " x = df_linux_banners.iloc[i].sum()\n", 384 | " plt.text(x + 5, i, int(x), va=\"center\")\n", 385 | " ax.xaxis.set_major_formatter(ScalarFormatter())\n", 386 | " plt.tight_layout()\n", 387 | " plt.savefig(FIGURE_DEST / \"f9_corpus_linux_banners.pdf\", bbox_inches=\"tight\")\n", 388 | " plt.show()" 389 | ] 390 | }, 391 | { 392 | "cell_type": "code", 393 | "execution_count": null, 394 | "id": "b812b893-76d5-4c65-b58f-3d05f163be5b", 395 | "metadata": {}, 396 | "outputs": [], 397 | "source": [ 398 | "create_figure_9_detected_linux_kernel_banners_in_lfwc_samples(df)" 399 | ] 400 | }, 401 | { 402 | "cell_type": "markdown", 403 | "id": "b09bdc97-fe03-4318-be41-a7cdffa4d9fd", 404 | "metadata": {}, 405 | "source": [ 406 | "## Figure 10 - Distribution of the nine detected ISAs in LFwC across all vendors\n", 407 | "\n", 408 | "The three most prevalent ISA families are MIPS (5,993 samples), ARM (4,764), and x86 (2,095). There are 13,429 unique findings on ISAs across all samples, because included subsystems must not run the same ISA as the main system." 409 | ] 410 | }, 411 | { 412 | "cell_type": "code", 413 | "execution_count": null, 414 | "id": "8b93dc03-4df3-4636-a5ba-2582fe7ce466", 415 | "metadata": {}, 416 | "outputs": [], 417 | "source": [ 418 | "def create_figure_10_isa_distribution(df: pd.DataFrame) -> None:\n", 419 | " df_arch_prep = df.copy()\n", 420 | " arch_series = df_arch_prep[df_arch_prep[\"elf_architectures\"].notnull()][\"elf_architectures\"].apply(\n", 421 | " lambda x: x.split(\"|\")\n", 422 | " )\n", 423 | " df_arch_prep[\"elf_architectures\"] = arch_series\n", 424 | " df_arch_prep\n", 425 | "\n", 426 | " rc(\"font\", **{\"family\": \"serif\", \"serif\": [\"Times\"], \"size\": 18})\n", 427 | "\n", 428 | " by_arch = df_arch_prep.explode(\"elf_architectures\", ignore_index=True)\n", 429 | " by_arch = (\n", 430 | " by_arch.groupby([\"elf_architectures\", \"manufacturer\"], as_index=False)\n", 431 | " .nunique()\n", 432 | " .pivot(index=\"elf_architectures\", columns=[\"manufacturer\"], values=\"md5\")\n", 433 | " .fillna(value=0.0)\n", 434 | " )\n", 435 | " # by_arch\n", 436 | " ax = by_arch.plot(\n", 437 | " kind=\"bar\",\n", 438 | " grid=True,\n", 439 | " stacked=False,\n", 440 | " logy=True,\n", 441 | " figsize=(20, 4),\n", 442 | " color=CMAP_2,\n", 443 | " edgecolor=\"black\",\n", 444 | " legend=False,\n", 445 | " width=0.8,\n", 446 | " rot=0,\n", 447 | " )\n", 448 | " ax.set_axisbelow(True)\n", 449 | " ax.yaxis.set_major_formatter(ScalarFormatter())\n", 450 | " ax.set_ylabel(\"Sample Quantity [\\\\#, log]\")\n", 451 | " ax.set_xlabel(\"Detected Architecture\")\n", 452 | " ax.set_xlim(-0.5, 8.5)\n", 453 | " ax.set_xticklabels([\"ARM\", \"ESP\", \"M68K\", \"MIPS\", \"PPC\", \"RISCV\", \"s/390\", \"SPARC\", \"x86\"])\n", 454 | " for i in range(0, 11):\n", 455 | " ax.axvline(i + 0.505, color=\"black\", linewidth=1)\n", 456 | " plt.tight_layout()\n", 457 | " plt.savefig(FIGURE_DEST / \"f10_corpus_architectures.pdf\", bbox_inches=\"tight\")\n", 458 | " rc(\"font\", **{\"family\": \"serif\", \"serif\": [\"Times\"], \"size\": 15})\n", 459 | " plt.show()" 460 | ] 461 | }, 462 | { 463 | "cell_type": "code", 464 | "execution_count": null, 465 | "id": "5a7890f9-7b73-49c9-937c-430c49afa464", 466 | "metadata": {}, 467 | "outputs": [], 468 | "source": [ 469 | "create_figure_10_isa_distribution(df)" 470 | ] 471 | }, 472 | { 473 | "cell_type": "markdown", 474 | "id": "14cee044-ef30-42a2-91cd-89f8b8c9f926", 475 | "metadata": {}, 476 | "source": [ 477 | "### Interactive\n", 478 | "\n", 479 | "Now it's your time to play with the corpus! Are you comfortable with [pandas](https://pandas.pydata.org/docs/user_guide/index.html)? You can do some amazing stuff to query the data!" 480 | ] 481 | }, 482 | { 483 | "cell_type": "code", 484 | "execution_count": null, 485 | "id": "43cedbb4-ee94-4dec-b6b7-62b81715e8fd", 486 | "metadata": {}, 487 | "outputs": [], 488 | "source": [ 489 | "print(\"e.g., only show firmware samples where a MIPS architecture was found:\")\n", 490 | "\n", 491 | "df[\n", 492 | " df[\"elf_architectures\"]. # take the \"elf_architectures\" row\n", 493 | " fillna(\"\"). # replace all NULL values, where no architecture was found, with an empty string\n", 494 | " str.contains(\"mips\") # get all rows in the dataframe that contain the \"mips\" keyword in column \"elf architecture\"\n", 495 | "]" 496 | ] 497 | } 498 | ], 499 | "metadata": { 500 | "kernelspec": { 501 | "display_name": "Python 3 (ipykernel)", 502 | "language": "python", 503 | "name": "python3" 504 | }, 505 | "language_info": { 506 | "codemirror_mode": { 507 | "name": "ipython", 508 | "version": 3 509 | }, 510 | "file_extension": ".py", 511 | "mimetype": "text/x-python", 512 | "name": "python", 513 | "nbconvert_exporter": "python", 514 | "pygments_lexer": "ipython3", 515 | "version": "3.10.12" 516 | } 517 | }, 518 | "nbformat": 4, 519 | "nbformat_minor": 5 520 | } 521 | --------------------------------------------------------------------------------