├── .github
└── workflows
│ └── jekyll.yml
├── .gitignore
├── 404.html
├── CNAME
├── CODE_OF_CONDUCT.md
├── Gemfile
├── Makefile
├── README.md
├── _config.yml
├── _data
└── navigation.yml
├── _includes
├── details.html
├── ip-table.html
├── location-table.html
├── machines-map.html
├── pool-table.html
└── use-table.html
├── _machines
├── ainia.md
├── armyofdockerness.md
├── asteria.md
├── autumn.md
├── bremusa.md
├── carpenter.md
├── ci-mirage-io.md
├── ci3.md
├── ci4.md
├── ci5.md
├── ci6.md
├── clete.md
├── dev1.md
├── dev2.md
├── doc.md
├── docs-staging.md
├── dopey.md
├── doris.md
├── eumache.md
├── grumpy.md
├── happy.md
├── hopi.md
├── i7-worker-01.md
├── i7-worker-02.md
├── i7-worker-03.md
├── i7-worker-04.md
├── iphito.md
├── kydoime.md
├── laodoke.md
├── leafcutter.md
├── m1-worker-01.md
├── m1-worker-02.md
├── m1-worker-03.md
├── m1-worker-04.md
├── marpe.md
├── michael.md
├── molpadia.md
├── navajo.md
├── ocaml-1.md
├── ocaml-2.md
├── odawa.md
├── okypous.md
├── opam-4.md
├── opam-5.md
├── oregano.md
├── orithia.md
├── pascal.md
├── phoebe.md
├── raphael.md
├── riscv-bm-a1.md
├── riscv-bm-a2.md
├── riscv-bm-a3.md
├── riscv-bm-a4.md
├── roo.md
├── rosemary.md
├── s390x-2.md
├── s390x.md
├── scyleia.md
├── sleepy.md
├── spring.md
├── staging.md
├── summer.md
├── tigger.md
├── toxis.md
├── turing.md
├── v2.md
├── v3b.md
├── v3c.md
├── watch.md
├── winter.md
├── x86-bm-c1.md
├── x86-bm-c2.md
├── x86-bm-c3.md
├── x86-bm-c4.md
├── x86-bm-c5.md
├── x86-bm-c6.md
├── x86-bm-c7.md
├── x86-bm-c8.md
└── x86-bm-c9.md
├── _posts
├── 2022-11-04-relocating-ocaml-org.md
├── 2022-11-08-tarsnap-backups.md
├── 2023-02-27-watch-ocaml-org.md
├── 2023-03-09-moving-opam-ocaml-org.md
├── 2023-03-10-opam-repository-mingw.md
├── 2023-03-17-opam-ci-ocaml-org.md
├── 2023-04-06-maintenance-operations.md
├── 2023-04-25-updated-images.md
├── 2023-04-26-check-ci-ocaml-org.md
├── 2023-05-05-opam-repo-ci.md
├── 2023-05-30-emissions-monitoring.md
├── 2023-06-09-grafana-changes.md
├── 2023-06-15-opam-repo-ci-ocaml-ci.md
├── 2023-06-28-upgrading-linux-distros.md
├── 2023-08-08-freebsd-testing.md
├── 2023-09-21-more-freebsd-news.md
├── 2023-11-06-current-bench-maintenance.md
├── 2023-11-09-macos-sonoma.md
├── 2023-12-04-services-moved.md
├── 2024-01-14-electrical-work.md
└── 2024-02-19-current-bench-maintenance.md
├── about.md
├── big.md
├── by-ip
├── 128.232.md
├── 136.144.md
├── 147.75.md
├── 148.100.md
├── 163.172.md
├── 212.47.md
├── 51.159.md
├── 54.146.md
└── 54.224.md
├── by-location.md
├── by-location
├── aws.md
├── caelum.md
├── custodian.md
├── equinix.md
├── iitm.md
├── marist-college.md
└── scaleway.md
├── by-use
├── benchmarking.md
├── freebsd-x86_64.md
├── general.md
├── linux-arm64.md
├── linux-ppc64.md
├── linux-riscv64.md
├── linux-s390x.md
├── linux-x86_64.md
├── macos-arm64.md
├── macos-x86_64.md
├── openbsd-amd64.md
├── windows-amd64.md
└── windows-x86_64.md
├── index.md
├── machines.csv
├── machines.md
├── opam-ocaml-org.md
├── scripts
└── generate-template.sh
├── summary.html
├── watch-ocaml-org.md
├── www-ocaml-org.md
└── zero.md
/.github/workflows/jekyll.yml:
--------------------------------------------------------------------------------
1 | # This workflow uses actions that are not certified by GitHub.
2 | # They are provided by a third-party and are governed by
3 | # separate terms of service, privacy policy, and support
4 | # documentation.
5 |
6 | # Sample workflow for building and deploying a Jekyll site to GitHub Pages
7 | name: Deploy Jekyll site to Pages
8 |
9 | on:
10 | # Runs on pushes targeting the default branch
11 | push:
12 | branches: ["main"]
13 |
14 | # Allows you to run this workflow manually from the Actions tab
15 | workflow_dispatch:
16 |
17 | # Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
18 | permissions:
19 | contents: read
20 | pages: write
21 | id-token: write
22 |
23 | # Allow one concurrent deployment
24 | concurrency:
25 | group: "pages"
26 | cancel-in-progress: true
27 |
28 | jobs:
29 | # Build job
30 | build:
31 | runs-on: ubuntu-latest
32 | steps:
33 | - name: Checkout
34 | uses: actions/checkout@v3
35 | - name: Setup Ruby
36 | uses: ruby/setup-ruby@0a29871fe2b0200a17a4497bae54fe5df0d973aa # v1.115.3
37 | with:
38 | ruby-version: '3.0' # Not needed with a .ruby-version file
39 | bundler-cache: true # runs 'bundle install' and caches installed gems automatically
40 | cache-version: 0 # Increment this number if you need to re-download cached gems
41 | - name: Setup Pages
42 | id: pages
43 | uses: actions/configure-pages@v2
44 | - name: Build with Jekyll
45 | # Outputs to the './_site' directory by default
46 | run: bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
47 | env:
48 | JEKYLL_ENV: production
49 | - name: Upload artifact
50 | # Automatically uploads an artifact from the './_site' directory by default
51 | uses: actions/upload-pages-artifact@v1
52 |
53 | # Deployment job
54 | deploy:
55 | environment:
56 | name: github-pages
57 | url: ${{ steps.deployment.outputs.page_url }}
58 | runs-on: ubuntu-latest
59 | needs: build
60 | steps:
61 | - name: Deploy to GitHub Pages
62 | id: deployment
63 | uses: actions/deploy-pages@v1
64 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .*.swp
2 | _site
3 | Gemfile.lock
4 | .sass-cache
5 | .bundle
6 | vendor
7 |
--------------------------------------------------------------------------------
/404.html:
--------------------------------------------------------------------------------
1 | ---
2 | layout: default
3 | ---
4 |
5 |
18 |
19 |
20 |
404
21 |
22 |
Page not found :(
23 |
The requested page could not be found.
24 |
25 |
--------------------------------------------------------------------------------
/CNAME:
--------------------------------------------------------------------------------
1 | infra.ocaml.org
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Code of Conduct
2 |
3 | This project has adopted the [OCaml Code of Conduct](https://github.com/ocaml/code-of-conduct/blob/main/CODE_OF_CONDUCT.md).
4 |
5 | # Enforcement
6 |
7 | This project follows the OCaml Code of Conduct [enforcement policy](https://github.com/ocaml/code-of-conduct/blob/main/CODE_OF_CONDUCT.md#enforcement).
8 |
--------------------------------------------------------------------------------
/Gemfile:
--------------------------------------------------------------------------------
1 | source "https://rubygems.org"
2 |
3 | # Hello! This is where you manage which Jekyll version is used to run.
4 | # When you want to use a different version, change it below, save the
5 | # file and run `bundle install`. Run Jekyll with `bundle exec`, like so:
6 | #
7 | # bundle exec jekyll serve
8 | #
9 | # This will help ensure the proper Jekyll version is running.
10 | # Happy Jekylling!
11 | gem "jekyll", "~> 3.9.0"
12 |
13 | gem "jekyll-include-cache", group: :jekyll_plugins
14 |
15 | # If you want to use GitHub Pages, remove the "gem "jekyll"" above and
16 | # uncomment the line below. To upgrade, run `bundle update github-pages`.
17 | gem "github-pages", group: :jekyll_plugins
18 |
19 | # If you have any plugins, put them here!
20 | group :jekyll_plugins do
21 | gem "jekyll-feed", "~> 0.6"
22 | end
23 |
24 | # Windows does not include zoneinfo files, so bundle the tzinfo-data gem
25 | # and associated library.
26 | platforms :mingw, :x64_mingw, :mswin, :jruby do
27 | gem "tzinfo", "~> 1.2"
28 | gem "tzinfo-data"
29 | end
30 |
31 | # Performance-booster for watching directories on Windows
32 | gem "wdm", "~> 0.1.0", :platforms => [:mingw, :x64_mingw, :mswin]
33 |
34 | # kramdown v2 ships without the gfm parser by default. If you're using
35 | # kramdown v1, comment out this line.
36 | gem "kramdown-parser-gfm"
37 |
38 |
39 | gem "webrick", "~> 1.7"
40 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | serve:
2 | bundle exec -- jekyll serve --host 0.0.0.0 --port 8080
3 |
4 | build:
5 | bundle exec -- jekyll build
6 |
7 | push: build
8 | git push -v origin
9 |
10 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | Documentation about the OCaml community's infrastructure. This repo
2 | is used for the [wiki](https://github.com/ocaml/infrastructure/wiki)
3 | and to serve [GitHub Pages](http://ocaml.github.io/infrastructure).
4 |
--------------------------------------------------------------------------------
/_config.yml:
--------------------------------------------------------------------------------
1 | # Welcome to Jekyll!
2 | #
3 | # This config file is meant for settings that affect your whole blog, values
4 | # which you are expected to set up once and rarely edit after that. If you find
5 | # yourself editing this file very often, consider using Jekyll's data files
6 | # feature for the data you need to update frequently.
7 | #
8 | # For technical reasons, this file is *NOT* reloaded automatically when you use
9 | # 'bundle exec jekyll serve'. If you change this file, please restart the server process.
10 |
11 | # Site settings
12 | # These are used to personalize your new site. If you look in the HTML files,
13 | # you will see them accessed via {{ site.title }}, {{ site.email }}, and so on.
14 | # You can create any custom variable you would like, and they will be accessible
15 | # in the templates via {{ site.myvariable }}.
16 | title: OCaml Infrastructure
17 | email: mark@tarides.com
18 | baseurl: "" # the subpath of your site, e.g. /blog
19 | url: "" # the base hostname & protocol for your site, e.g. http://example.com
20 |
21 | # Build settings
22 | markdown: kramdown
23 | remote_theme: "mmistakes/minimal-mistakes@4.24.0"
24 | plugins:
25 | - jekyll-feed
26 | - jekyll-include-cache
27 |
28 | header_pages:
29 | - about.md
30 | - machines.md
31 | - by-location.md
32 |
33 | # Exclude from processing.
34 | # The following items will not be processed, by default. Create a custom list
35 | # to override the default setting.
36 | # exclude:
37 | # - Gemfile
38 | # - Gemfile.lock
39 | # - node_modules
40 | # - vendor/bundle/
41 | # - vendor/cache/
42 | # - vendor/gems/
43 | # - vendor/ruby/
44 |
45 | collections:
46 | machines:
47 | output: true
48 |
49 | defaults:
50 | - scope:
51 | path: ""
52 | values:
53 | layout: single
54 | nav: main
55 | toc: true
56 | show_date: true
57 | toc_sticky: true
58 | sidebar:
59 | nav: left
60 | - scope:
61 | type: "machines"
62 | values:
63 | layout: single
64 | sidebar:
65 | nav: by-use
66 | - scope:
67 | path: "by-use"
68 | values:
69 | layout: single
70 | sidebar:
71 | nav: by-use
72 | - scope:
73 | path: "by-location"
74 | values:
75 | layout: single
76 | sidebar:
77 | nav: by-location
78 | - scope:
79 | path: "by-ip"
80 | values:
81 | layout: single
82 | sidebar:
83 | nav: by-ip
84 |
85 |
86 |
--------------------------------------------------------------------------------
/_data/navigation.yml:
--------------------------------------------------------------------------------
1 | main:
2 | - title: Machines
3 | url: /by-use/general
4 | - title: By Location
5 | url: /by-location/caelum
6 | - title: By IP
7 | url: /by-ip/128.232
8 |
9 | left:
10 | - title: www.ocaml.org
11 | url: www-ocaml-org
12 | - title: opam.ocaml.org
13 | url: opam-ocaml-org
14 | - title: watch.ocaml.org
15 | url: watch-ocaml-org
16 |
17 | by-use:
18 | - title: General
19 | url: /by-use/general
20 | - title: Benchmarking
21 | url: /by-use/benchmarking
22 | - title: Worker Pools
23 | children:
24 | - title: Linux-x86_64
25 | url: /by-use/linux-x86_64
26 | - title: Linux-arm64
27 | url: /by-use/linux-arm64
28 | - title: Linux-ppc64
29 | url: /by-use/linux-ppc64
30 | - title: Windows-x86_64
31 | url: /by-use/windows-x86_64
32 | - title: Linux-s390x
33 | url: /by-use/linux-s390x
34 | - title: Linux-riscv64
35 | url: /by-use/linux-riscv64
36 | - title: MacOS-x86_64
37 | url: /by-use/macos-x86_64
38 | - title: MacOS-arm64
39 | url: /by-use/macos-arm64
40 | - title: FreeBSD-x86_64
41 | url: /by-use/freebsd-x86_64
42 | - title: Windows-amd64
43 | url: /by-use/windows-amd64
44 | - title: OpenBSD-amd64
45 | url: /by-use/openbsd-amd64
46 |
47 | by-location:
48 | - title: Locations
49 | children:
50 | - title: Caelum
51 | url: /by-location/caelum
52 | - title: Equinix
53 | url: /by-location/equinix
54 | - title: Scaleway
55 | url: /by-location/scaleway
56 | - title: AWS
57 | url: /by-location/aws
58 | - title: Custodian
59 | url: /by-location/custodian
60 | - title: Marist College
61 | url: /by-location/marist-college
62 | - title: IIT Madras
63 | url: /by-location/iitm
64 |
65 | by-ip:
66 | - title: Subnets
67 | children:
68 | - title: "128.232"
69 | url: /by-ip/128.232
70 | - title: "136.144"
71 | url: /by-ip/136.144
72 | - title: "147.75"
73 | url: /by-ip/147.75
74 | - title: "148.100"
75 | url: /by-ip/148.100
76 | - title: "163.172"
77 | url: /by-ip/163.172
78 | - title: "212.47"
79 | url: /by-ip/212.47
80 | - title: "51.159"
81 | url: /by-ip/51.159
82 | - title: "54.146"
83 | url: /by-ip/54.146
84 | - title: "54.224"
85 | url: /by-ip/54.224
86 |
87 |
--------------------------------------------------------------------------------
/_includes/details.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | IP Address | {{page.ip}} |
4 | Manufacturer | {{page.manufacturer}} |
5 | Model | {{page.model}} |
6 | Serial | {{page.serial}} |
7 | OS | {{page.os}} |
8 | Processor | {{page.processor}} |
9 | Total Threads | {{page.threads}} |
10 | Memory | {{page.memory}} |
11 | Disks | {% for disk in page.disks %} - {{disk}}
{% endfor %} |
12 | Location | {{page.location}} |
13 |
14 |
15 | {{page.notes}}
16 |
--------------------------------------------------------------------------------
/_includes/ip-table.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | IP |
5 | Machine |
6 | Model |
7 | Threads |
8 | Notes |
9 |
10 | {% assign sorted_machines = site.machines | sort: 'ip' %}
11 | {% for item in sorted_machines %}
12 | {% if item.ip contains page.title %}
13 |
14 | {{item.ip}} |
15 | {{item.name}} |
16 | {{item.manufacturer}} {{item.model}} |
17 | {{item.threads}} |
18 | {{item.notes}} |
19 |
20 | {% endif %}
21 | {% endfor %}
22 |
23 |
24 |
--------------------------------------------------------------------------------
/_includes/location-table.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Machine |
5 | Model |
6 | Threads |
7 | Notes |
8 |
9 | {% assign lower_title = page.title | downcase %}
10 | {% for item in site.machines %}
11 | {% assign lower_location = item.location | downcase %}
12 | {% if lower_location == lower_title %}
13 |
14 | {{item.name}} |
15 | {{item.manufacturer}} {{item.model}} |
16 | {{item.threads}} |
17 | {{item.notes}} |
18 |
19 | {% endif %}
20 | {% endfor %}
21 |
22 |
23 |
--------------------------------------------------------------------------------
/_includes/machines-map.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
7 |
8 |
9 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
105 |
--------------------------------------------------------------------------------
/_includes/pool-table.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Machine |
5 | Model |
6 | OS |
7 | Threads |
8 | Location |
9 |
10 | {% assign lower_title = page.title | downcase %}
11 | {% for item in site.machines %}
12 | {% assign lower_pool = item.pool | downcase %}
13 | {% if lower_pool == lower_title %}
14 |
15 | {{item.name}} |
16 | {{item.manufacturer}} {{item.model}} |
17 | {{item.os}} |
18 | {{item.threads}} |
19 | {{item.location}} |
20 |
21 | {% endif %}
22 | {% endfor %}
23 |
24 |
25 |
--------------------------------------------------------------------------------
/_includes/use-table.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Machine |
5 | Model |
6 | OS |
7 | Threads |
8 | Location |
9 | Service |
10 |
11 | {% assign lower_title = page.title | downcase %}
12 | {% for item in site.machines %}
13 | {% assign lower_use = item.use | downcase %}
14 | {% if lower_use == lower_title %}
15 |
16 | {{item.name}} |
17 | {{item.manufacturer}} {{item.model}} |
18 | {{item.os}} |
19 | {{item.threads}} |
20 | {{item.location}} |
21 | {{item.service}} |
22 |
23 | {% endif %}
24 | {% endfor %}
25 |
26 |
27 |
--------------------------------------------------------------------------------
/_machines/ainia.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: ainia
3 | ip: 128.232.124.247
4 | fqdn: ainia.ocamllabs.io
5 | manufacturer: Avantek
6 | model: Ampere(TM) Mt Snow
7 | os: Ubuntu 22.04
8 | threads: 80
9 | memory: 256GB
10 | processor: Ampere Altra Processor ARMv8
11 | location: Caelum
12 | notes: Cluster worker + Arthur has a login for ARM benchmarking. 8 cores isolated
13 | pool: linux-arm64
14 | disks:
15 | - 900GB NVMe
16 | - 2 x 3.5TB NVMe
17 | latitude: 52.2109
18 | longitude: 0.0917
19 | ---
20 | {% include details.html %}
21 |
22 | In `/etc/default/grub`
23 | ```
24 | GRUB_CMDLINE_LINUX_DEFAULT="console=ttyS1,115200n8 isolcpus=72,73,74,75,76,77,78,79"
25 | ```
26 |
27 |
--------------------------------------------------------------------------------
/_machines/armyofdockerness.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: armyofdockerness
3 | ip:
4 | fqdn: armyofdockerness.ocamllabs.io
5 | model:
6 | os:
7 | threads:
8 | serial: C01-F60163
9 | location: Caelum
10 | notes: Powered off
11 | latitude: 52.2109
12 | longitude: 0.0917
13 | ---
14 | {% include details.html %}
15 |
16 |
--------------------------------------------------------------------------------
/_machines/asteria.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: asteria
3 | ip: 128.232.124.191
4 | fqdn: asteria.ocamllabs.io
5 | manufacturer: Dell
6 | model: PowerEdge R6525
7 | os: Ubuntu 22.04
8 | threads: 256
9 | location: Caelum
10 | notes: Cluster worker
11 | pool: linux-x86_64
12 | serial: C26L6L3
13 | disks:
14 | - 400GB SSD
15 | - 1.6TB NVMe
16 | - 1.6TB NVMe
17 | processor: 'AMD EPYC 7H12 64-Core Processor'
18 | latitude: 52.2109
19 | longitude: 0.0917
20 | ---
21 | {% include details.html %}
22 |
23 |
--------------------------------------------------------------------------------
/_machines/autumn.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: autumn
3 | ip: 128.232.124.187
4 | fqdn: autumn.ocamllabs.io
5 | model: Super Server
6 | processor: Intel(R) Xeon(R) Silver 4108 CPU @ 1.80GHz
7 | os: Ubuntu 22.04.3 LTS
8 | threads: 8
9 | location: Caelum
10 | notes: Benchmarking team. Current-bench and OCaml benchmarking projects.
11 | serial: NM18CS008840
12 | ssh: mte24@autumn.ocamllabs.io
13 | use: benchmarking
14 | service: current-bench
15 | latitude: 52.2109
16 | longitude: 0.0917
17 | ---
18 | {% include details.html %}
19 |
--------------------------------------------------------------------------------
/_machines/bremusa.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: bremusa
3 | ip: 128.232.80.167
4 | fqdn: bremusa.ocamllabs.io
5 | manufacturer: Dell
6 | model: PowerEdge R630
7 | serial: DLDZGL2
8 | os: Ubuntu 22.04.3 LTS
9 | threads: 72
10 | location: Caelum
11 | pool: linux-x86_64
12 | disks:
13 | - 400GB SSD
14 | - 400GB SSD
15 | - 1.6TB NVMe
16 | latitude: 52.2109
17 | longitude: 0.0917
18 | ---
19 | {% include details.html %}
20 |
21 |
--------------------------------------------------------------------------------
/_machines/carpenter.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: carpenter
3 | fqdn: carpenter.ocamllabs.io
4 | notes:
5 | location: Caelum
6 | manufacturer: SiFive
7 | model: U74-MC
8 | processor: RISC-V U74
9 | threads: 4
10 | os: Ubuntu 22.04.1
11 | pool: linux-riscv64
12 | fqdn: leafcutter.caelum.tarides.com
13 | ip: 128.232.124.206
14 | latitude: 52.2109
15 | longitude: 0.0917
16 | ---
17 | {% include details.html %}
18 |
19 |
--------------------------------------------------------------------------------
/_machines/ci-mirage-io.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: ci.mirage.io
3 | ip: 51.159.31.165
4 | fqdn: ci-mirage-io.ocamllabs.io
5 | threads: 8
6 | model: QuantaMicro X10E-9N
7 | manufacturer: Quanta Cloud Technology Inc.
8 | processor: 'Intel(R) Xeon(R) CPU E3-1240 v6 @ 3.70GHz'
9 | notes: ocurrent/mirage-ci
10 | os: Ubuntu 20.04.4
11 | location: Scaleway
12 | latitude: 48.8591
13 | longitude: 2.2935
14 | ---
15 | {% include details.html %}
16 |
17 |
--------------------------------------------------------------------------------
/_machines/ci3.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: ci3
3 | ip: 163.172.138.177
4 | fqdn: ci3.ocamllabs.io
5 | manufacturer: KVM
6 | model: VM
7 | os: Debian Buster
8 | threads: 4
9 | location: Scaleway
10 | notes: ci3.ocamllabs.io - ocluster scheduler
11 | serial: 7NDNYQ2
12 | processor: 'AMD EPYC 7401P 24-Core Processor'
13 | latitude: 48.8591
14 | longitude: 2.2935
15 | ---
16 | {% include details.html %}
17 |
18 |
--------------------------------------------------------------------------------
/_machines/ci4.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: ci4
3 | ip: 51.159.157.216
4 | fqdn: ci4.ocamllabs.io
5 | manufacturer: KVM
6 | model: VM
7 | os: Ubuntu 22.04.1
8 | threads: 4
9 | location: Scaleway
10 | notes: ocurrent/multicore-ci and ocurrent/solver-service
11 | serial: 7NDNYQ2
12 | processor: 'AMD EPYC 7402P 24-Core Processor'
13 | latitude: 48.8591
14 | longitude: 2.2935
15 | ---
16 | {% include details.html %}
17 |
18 |
--------------------------------------------------------------------------------
/_machines/ci5.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: ci5
3 | ip: 51.159.167.254
4 | fqdn: ci5.ocamllabs.io
5 | threads: 4
6 | model: Virtual
7 | notes: ocurrent/ci.ocamllabs.io-deployer aka deploy.ci.ocaml.org
8 | processor: 'AMD EPYC 7282 16-Core Processor'
9 | os: Ubuntu 20.04.3
10 | location: Scaleway
11 | latitude: 48.8591
12 | longitude: 2.2935
13 | ---
14 | {% include details.html %}
15 |
16 |
--------------------------------------------------------------------------------
/_machines/ci6.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: ci6
3 | ip: 51.15.222.237
4 | fqdn: ci6.ocamllabs.io
5 | manufacturer: KVM
6 | model: VM
7 | location: Scaleway
8 | notes: looks unused -- was probably docs.ci.ocamllabs.io
9 | location: Scaleway
10 | latitude: 48.8591
11 | longitude: 2.2935
12 | ---
13 | {% include details.html %}
14 |
15 |
--------------------------------------------------------------------------------
/_machines/clete.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: clete
3 | ip: 128.232.124.246
4 | fqdn: clete.ocamllabs.io
5 | manufacturer: Dell
6 | model: PowerEdge R630
7 | os: Ubuntu 22.04
8 | threads: 72
9 | location: Caelum
10 | notes: Cluster worker
11 | pool: linux-x86_64
12 | serial: DLDVGL2
13 | processor: 'Intel(R) Xeon(R) CPU E5-2695 v4 @ 2.10GHz'
14 | latitude: 52.2109
15 | longitude: 0.0917
16 | ---
17 | {% include details.html %}
18 |
19 |
--------------------------------------------------------------------------------
/_machines/dev1.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: dev1
3 | ip: 212.47.237.247
4 | fqdn: dev1.ocamllabs.io
5 | os: Ubuntu 22.04.1
6 | notes: kit-ty-kate opam testing
7 | location: Scaleway
8 | model: VM
9 | memory: 32GB
10 | threads: 8
11 | processor: 'AMD EPYC 7401P 24-Core Processor'
12 | disks:
13 | - 280GB
14 | latitude: 48.8591
15 | longitude: 2.2935
16 | ---
17 | {% include details.html %}
18 |
19 |
--------------------------------------------------------------------------------
/_machines/dev2.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: dev2
3 | ip:
4 | fqdn: dev2.ocamllabs.io
5 | notes: Naomi/Jan
6 | location: Caelum
7 | model: VM
8 | ssh: root@dev-02.ocamllabs.io
9 | latitude: 52.2109
10 | longitude: 0.0917
11 | ---
12 | {% include details.html %}
13 |
14 |
--------------------------------------------------------------------------------
/_machines/doc.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: doc
3 | ip: 128.232.124.240
4 | fqdn: doc.ocamllabs.io
5 | os: Ubuntu 22.04
6 | location: Caelum
7 | notes: BIND, netbootxyz and teleport
8 | manufacturer: Dell
9 | model: PowerEdge R210 II
10 | serial: 6WGLV02
11 | processor: 'Intel(R) Xeon(R) CPU E3-1220 V2 @ 3.10GHz'
12 | threads: 4
13 | disks:
14 | - 1TB
15 | latitude: 52.2109
16 | longitude: 0.0917
17 | ---
18 | {% include details.html %}
19 |
20 |
--------------------------------------------------------------------------------
/_machines/docs-staging.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: docs-staging
3 | ip: 51.158.234.115
4 | fqdn: docs-staging.sw.ocaml.org
5 | manufacturer: Scaleway
6 | model: SCW-PRO2-M
7 | os: Ubuntu 22.04.2
8 | threads: 16
9 | processor: 'AMD EPYC 7543 32-Core Processor'
10 | memory: 64GB
11 | location: Scaleway
12 | disks:
13 | - 18GB virtual disk
14 | - 230GB virtual disk
15 | notes: Staging site for docs.ocaml.org. Review in September 2023.
16 | location: Scaleway
17 | latitude: 48.8591
18 | longitude: 2.2935
19 | ---
20 | {% include details.html %}
21 |
22 |
--------------------------------------------------------------------------------
/_machines/dopey.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: dopey
3 | ip:
4 | fqdn: dopey.ocamllabs.io
5 | model: PowerEdge R210 II
6 | serial: CWGLV02
7 | os:
8 | threads: 8
9 | location: Caelum
10 | notes: Console ports for mini-itx
11 | latitude: 52.2109
12 | longitude: 0.0917
13 | ---
14 | {% include details.html %}
15 |
16 |
--------------------------------------------------------------------------------
/_machines/doris.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: doris
3 | ip: 128.232.124.221
4 | fqdn: doris.ocamllabs.io
5 | manufacturer: Dell
6 | model: PowerEdge R6525
7 | os: Ubuntu 22.04
8 | threads: 256
9 | location: Caelum
10 | notes: Cluster worker
11 | pool: linux-x86_64
12 | serial: G26L6L3
13 | disks:
14 | - 400GB SSD
15 | - 1.6TB NVMe
16 | - 1.6TB NVMe
17 | processor: 'AMD EPYC 7H12 64-Core Processor'
18 | latitude: 52.2109
19 | longitude: 0.0917
20 | ---
21 | {% include details.html %}
22 |
23 |
--------------------------------------------------------------------------------
/_machines/eumache.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: eumache
3 | ip:
4 | fqdn: eumache.ocamllabs.io
5 | notes: docs.ci.ocaml.org and docs-data.ocaml.org
6 | location: Caelum
7 | os: Ubuntu 21.04
8 | serial: DLDXGL2
9 | manufacturer: Dell
10 | model: PowerEdge R630
11 | threads: 72
12 | ip: 128.232.124.233
13 | processor: 'Intel(R) Xeon(R) CPU E5-2695 v4 @ 2.10GHz'
14 | latitude: 52.2109
15 | longitude: 0.0917
16 | ---
17 | {% include details.html %}
18 |
19 |
--------------------------------------------------------------------------------
/_machines/grumpy.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: grumpy
3 | ip: 128.232.124.200
4 | fqdn: grumpy.ocamllabs.io
5 | model: PowerEdge R210 II
6 | os: Ubuntu 22.04
7 | threads: 4
8 | location: Caelum
9 | notes: Git Repo
10 | serial:
11 | processor: 'Intel(R) Xeon(R) CPU E3-1220 V2 @ 3.10GHz'
12 | latitude: 52.2109
13 | longitude: 0.0917
14 | ---
15 | {% include details.html %}
16 |
17 |
--------------------------------------------------------------------------------
/_machines/happy.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: happy
3 | ip: 128.232.96.21
4 | fqdn: happy.cl.cam.ac.uk
5 | notes: Router
6 | os: EdgeOS
7 | location: Caelum
8 | latitude: 52.2109
9 | longitude: 0.0917
10 | ---
11 | {% include details.html %}
12 |
13 |
--------------------------------------------------------------------------------
/_machines/hopi.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: hopi
3 | ip: 128.232.124.183
4 | fqdn: hopi.ocamllabs.io
5 | notes: OCaml on Windows Desktops
6 | threads: 128
7 | manufacturer: Dell
8 | model: PowerEdge R7425
9 | os: Ubuntu 22.04
10 | location: Caelum
11 | latitude: 52.2109
12 | longitude: 0.0917
13 | ---
14 | {% include details.html %}
15 |
16 |
--------------------------------------------------------------------------------
/_machines/i7-worker-01.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: i7-worker-01
3 | ip:
4 | fqdn: i7-worker-01.ocamllabs.io
5 | model: Apple Mac Mini
6 | os: Monteray 12.6
7 | threads: 12
8 | location: Custodian
9 | pool: macos-x86_64
10 | notes: Cluster worker
11 | processor: 'Intel i7'
12 | disks:
13 | - 512GB SSD
14 | latitude: 51.2798
15 | longitude: 0.5487
16 | ---
17 | {% include details.html %}
18 |
19 |
--------------------------------------------------------------------------------
/_machines/i7-worker-02.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: i7-worker-02
3 | ip:
4 | fqdn: i7-worker-02.ocamllabs.io
5 | model: Apple Mac Mini
6 | os: Monteray 12.6
7 | threads: 12
8 | location: Custodian
9 | pool: macos-x86_64
10 | notes: Cluster worker
11 | processor: 'Intel i7'
12 | disks:
13 | - 512GB SSD
14 | latitude: 51.2798
15 | longitude: 0.5487
16 | ---
17 | {% include details.html %}
18 |
19 |
--------------------------------------------------------------------------------
/_machines/i7-worker-03.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: i7-worker-03
3 | ip:
4 | fqdn: i7-worker-03.ocamllabs.io
5 | model: Apple Mac Mini
6 | os: Monteray 12.6
7 | threads: 12
8 | location: Custodian
9 | pool: macos-x86_64
10 | notes: Cluster worker
11 | processor: 'Intel i7'
12 | disks:
13 | - 512GB SSD
14 | latitude: 51.2798
15 | longitude: 0.5487
16 | ---
17 | {% include details.html %}
18 |
19 |
--------------------------------------------------------------------------------
/_machines/i7-worker-04.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: i7-worker-04
3 | ip:
4 | fqdn: i7-worker-04.ocamllabs.io
5 | model: Apple Mac Mini
6 | os: Monteray 12.6
7 | threads: 12
8 | location: Custodian
9 | pool: macos-x86_64
10 | notes: Cluster worker
11 | processor: 'Intel i7'
12 | disks:
13 | - 512GB SSD
14 | latitude: 51.2798
15 | longitude: 0.5487
16 | ---
17 | {% include details.html %}
18 |
19 |
--------------------------------------------------------------------------------
/_machines/iphito.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: iphito
3 | ip: 128.232.124.244
4 | fqdn: iphito.ocamllabs.io
5 | manufacturer: Dell
6 | model: PowerEdge R6525
7 | os: Ubuntu 22.04
8 | threads: 256
9 | location: Caelum
10 | notes: Cluster worker
11 | pool: linux-x86_64
12 | serial: D26L6L3
13 | disks:
14 | - 400GB SSD
15 | - 1.6TB NVMe
16 | - 1.6TB NVMe
17 | processor: 'AMD EPYC 7H12 64-Core Processor'
18 | latitude: 52.2109
19 | longitude: 0.0917
20 | ---
21 | {% include details.html %}
22 |
23 |
--------------------------------------------------------------------------------
/_machines/kydoime.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: kydoime
3 | ip: 128.232.124.230
4 | fqdn: kydoime.ocamllabs.io
5 | manufacturer: Avantek
6 | model: Ampere(TM) Mt Snow
7 | os: Ubuntu 22.04
8 | threads: 80
9 | memory: 256GB
10 | processor: Ampere Altra Processor ARMv8
11 | location: Caelum
12 | notes: Cluster worker
13 | pool: linux-arm64
14 | disks:
15 | - 900GB NVMe
16 | - 2 x 3.5TB NVMe
17 | latitude: 52.2109
18 | longitude: 0.0917
19 | ---
20 | {% include details.html %}
21 |
22 |
--------------------------------------------------------------------------------
/_machines/laodoke.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: laodoke
3 | ip: 128.232.124.239
4 | fqdn: laodoke.ocamllabs.io
5 | manufacturer: Dell
6 | model: PowerEdge R630
7 | os: Ubuntu 22.04
8 | threads: 72
9 | location: Caelum
10 | notes: Cluster worker
11 | pool: linux-x86_64
12 | serial: DLDSGL2
13 | processor: 'Intel(R) Xeon(R) CPU E5-2695 v4 @ 2.10GHz'
14 | disks:
15 | - 120GB SSD
16 | - 1.5TB NVMe
17 | latitude: 52.2109
18 | longitude: 0.0917
19 | ---
20 | {% include details.html %}
21 |
22 |
--------------------------------------------------------------------------------
/_machines/leafcutter.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: leafcutter
3 | ip: 128.232.124.206
4 | fqdn: leafcutter.ocamllabs.io
5 | notes:
6 | location: Caelum
7 | manufacturer: SiFive
8 | model: U74-MC
9 | processor: RISC-V U74
10 | threads: 4
11 | os: Ubuntu 22.04.1
12 | pool: linux-riscv64
13 | latitude: 52.2109
14 | longitude: 0.0917
15 | ---
16 | {% include details.html %}
17 |
18 |
--------------------------------------------------------------------------------
/_machines/m1-worker-01.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: m1-worker-01
3 | ip:
4 | fqdn: m1-worker-01.macos.ci.dev
5 | model: Apple Mac Mini
6 | os: Monteray 12.6
7 | threads: 8
8 | location: Custodian
9 | pool: macos-arm64
10 | notes: Cluster worker
11 | processor: 'Apple M1'
12 | disks:
13 | - 512GB SSD
14 | latitude: 51.2798
15 | longitude: 0.5487
16 | ---
17 | {% include details.html %}
18 |
19 |
--------------------------------------------------------------------------------
/_machines/m1-worker-02.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: m1-worker-01
3 | ip:
4 | fqdn: m1-worker-02.macos.ci.dev
5 | model: Apple Mac Mini
6 | os: Monteray 12.6
7 | threads: 8
8 | location: Custodian
9 | pool: macos-arm64
10 | notes: Cluster worker
11 | processor: 'Apple M1'
12 | disks:
13 | - 512GB SSD
14 | latitude: 51.2798
15 | longitude: 0.5487
16 | ---
17 | {% include details.html %}
18 |
19 |
--------------------------------------------------------------------------------
/_machines/m1-worker-03.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: m1-worker-03
3 | ip:
4 | fqdn: m1-worker-03.macos.ci.dev
5 | model: Apple Mac Mini
6 | os: Monteray 12.6
7 | threads: 8
8 | location: Custodian
9 | pool: macos-arm64
10 | notes: Cluster worker
11 | processor: 'Apple M1'
12 | disks:
13 | - 512GB SSD
14 | latitude: 51.2798
15 | longitude: 0.5487
16 | ---
17 | {% include details.html %}
18 |
19 |
--------------------------------------------------------------------------------
/_machines/m1-worker-04.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: m1-worker-04
3 | ip:
4 | fqdn: m1-worker-04.macos.ci.dev
5 | model: Apple Mac Mini
6 | os: Monteray 12.6
7 | threads: 8
8 | location: Custodian
9 | pool: macos-arm64
10 | notes: Cluster worker
11 | processor: 'Apple M1'
12 | disks:
13 | - 512GB SSD
14 | latitude: 51.2798
15 | longitude: 0.5487
16 | ---
17 | {% include details.html %}
18 |
19 |
--------------------------------------------------------------------------------
/_machines/marpe.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: marpe
3 | ip: 128.232.124.222
4 | fqdn: marpe.ocamllabs.io
5 | manufacturer: Dell
6 | model: PowerEdge R6525
7 | os: Ubuntu 22.04
8 | threads: 256
9 | location: Caelum
10 | notes: Cluster worker
11 | pool: linux-x86_64
12 | serial: F26L6L3
13 | disks:
14 | - 400GB SSD
15 | - 1.6TB NVMe
16 | - 1.6TB NVMe
17 | processor: 'AMD EPYC 7H12 64-Core Processor'
18 | latitude: 52.2109
19 | longitude: 0.0917
20 | ---
21 | {% include details.html %}
22 |
23 |
--------------------------------------------------------------------------------
/_machines/michael.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: michael
3 | ip:
4 | fqdn: michael.caelum.ci.dev
5 | notes: OCaml on Windows benchmarking (@eutro)
6 | location: Caelum
7 | manufacturer: Dell
8 | model: PowerEdge R420
9 | serial: GG8TC5J
10 | threads: 24
11 | processor: Intel(R) Xeon(R) CPU E5-2420 0 @ 1.90GHz
12 | disks:
13 | - 1TB SSD
14 | latitude: 52.2109
15 | longitude: 0.0917
16 | ---
17 | {% include details.html %}
18 |
19 |
--------------------------------------------------------------------------------
/_machines/molpadia.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: molpadia
3 | ip: 128.232.124.172
4 | fqdn: molpadia.ocamllabs.io
5 | manufacturer: Avantek
6 | model: Ampere(TM) Mt Snow
7 | os: Ubuntu 22.04
8 | threads: 80
9 | memory: 256GB
10 | processor: Ampere Altra Processor ARMv8
11 | location: Caelum
12 | notes: Cluster worker
13 | pool: linux-arm64
14 | disks:
15 | - 900GB NVMe
16 | - 2 x 3.5TB NVMe
17 | latitude: 52.2109
18 | longitude: 0.0917
19 | ---
20 | {% include details.html %}
21 |
22 |
--------------------------------------------------------------------------------
/_machines/navajo.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: navajo
3 | ip: 128.232.124.245
4 | fqdn: navajo.ocamllabs.io
5 | manufacturer: Dell
6 | model: PowerEdge R7425
7 | processor: AMD EPYC 7551 32-Core Processor
8 | os: Ubuntu 22.04.3 LTS
9 | threads: 128
10 | location: Caelum
11 | use: benchmarking
12 | service: Sandmark Nightly
13 | notes: Benchmark Tooling Team. Sandmark benchmarking.
14 | serial: 7NCSYQ2
15 | ssh: mte24@navajo.ocamllabs.io
16 | latitude: 52.2109
17 | longitude: 0.0917
18 | ---
19 | {% include details.html %}
20 |
--------------------------------------------------------------------------------
/_machines/ocaml-1.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: ocaml-1
3 | ip: 140.211.11.97
4 | fqdn: ocaml-1.osuosl.ci.dev
5 | os: Ubuntu 24.04
6 | threads: 16
7 | memory: 32GB
8 | processor: ARMv8
9 | location: OSUOSL
10 | notes: Cluster worker
11 | pool: linux-arm64
12 | disks:
13 | - 320GB SSD
14 | latitude: 44.566337
15 | longitude: -123.2780719
16 | ---
17 | {% include details.html %}
18 |
19 |
--------------------------------------------------------------------------------
/_machines/ocaml-2.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: ocaml-2
3 | ip: 140.211.11.61
4 | fqdn: ocaml-2.osuosl.ci.dev
5 | os: Ubuntu 24.04
6 | threads: 16
7 | memory: 32GB
8 | processor: ARMv8
9 | location: OSUOSL
10 | notes: Cluster worker
11 | pool: linux-arm64
12 | disks:
13 | - 320GB SSD
14 | latitude: 44.566337
15 | longitude: -123.2780719
16 | ---
17 | {% include details.html %}
18 |
19 |
--------------------------------------------------------------------------------
/_machines/odawa.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: odawa
3 | ip: 128.232.124.232
4 | fqdn: odawa.ocamllabs.io
5 | manufacturer: Dell
6 | model: PowerEdge R6525
7 | serial: 47B6KH3
8 | notes: Windows Base Image Builder
9 | pool: windows-amd64
10 | location: Caelum
11 | ip: 128.232.124.247
12 | os: Ubuntu 24.04
13 | threads: 256
14 | processor: 'AMD EPYC 7763 64-Core Processor'
15 | latitude: 52.2109
16 | longitude: 0.0917
17 | ---
18 | {% include details.html %}
19 |
20 |
--------------------------------------------------------------------------------
/_machines/okypous.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: okypous
3 | ip: 128.232.124.241
4 | fqdn: okypous.ocamllabs.io
5 | manufacturer: Avantek
6 | model: Ampere(TM) Mt Snow
7 | os: Ubuntu 22.04
8 | threads: 80
9 | memory: 256GB
10 | processor: Ampere Altra Processor ARMv8
11 | location: Caelum
12 | notes: Cluster worker
13 | pool: linux-arm64
14 | disks:
15 | - 900GB NVMe
16 | - 2 x 3.5TB NVMe
17 | latitude: 52.2109
18 | longitude: 0.0917
19 | ---
20 | {% include details.html %}
21 |
22 |
--------------------------------------------------------------------------------
/_machines/opam-4.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: opam-4
3 | fqdn: opam-4.ocaml.org
4 | ip: 51.158.232.133
5 | os: Ubuntu 22.04.1
6 | manufacturer: Scaleway
7 | model: VM
8 | threads: 2
9 | location: Scaleway
10 | memory: 8GB
11 | disks:
12 | - 18GB
13 | - 140GB
14 | notes: opam.ocaml.org
15 | latitude: 48.8591
16 | longitude: 2.2935
17 | ---
18 | {% include details.html %}
19 |
20 |
--------------------------------------------------------------------------------
/_machines/opam-5.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: opam-5
3 | fqdn: opam-5.ocaml.org
4 | ip: 151.115.76.159
5 | os: Ubuntu 22.04.1
6 | manufacturer: Scaleway
7 | model: VM
8 | threads: 2
9 | location: Scaleway
10 | memory: 8GB
11 | disks:
12 | - 18GB
13 | - 140GB
14 | notes: opam.ocaml.org
15 | latitude: 48.8591
16 | longitude: 2.2935
17 | ---
18 | {% include details.html %}
19 |
20 |
--------------------------------------------------------------------------------
/_machines/oregano.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: oregano
3 | ip: 128.232.124.178
4 | fqdn: oregano.caelum.ci.dev
5 | manufacturer: Supermicro
6 | os: Ubuntu 24.04 LTS
7 | threads: 40
8 | location: Caelum
9 | pool: openbsd-amd64
10 | processor: 'Intel(R) Xeon(R) CPU E5-2640 v4 @ 2.40GHz'
11 | disks:
12 | - 1.7TB SSD
13 | latitude: 52.2109
14 | longitude: 0.0917
15 | ---
16 | {% include details.html %}
17 |
18 |
--------------------------------------------------------------------------------
/_machines/orithia.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: orithia
3 | ip: 128.232.124.217
4 | fqdn: orithia.ocamllabs.io
5 | manufacturer: Raptor
6 | model: Talos II
7 | serial: W1000194
8 | os: Ubuntu 22.04
9 | threads: 176
10 | location: Caelum
11 | notes: Cluster worker
12 | pool: linux-ppc64
13 | disks:
14 | - 500GB NVMe
15 | - 1.8TB NVMe
16 | - 1.8TB NVMe
17 | processor: Power9
18 | latitude: 52.2109
19 | longitude: 0.0917
20 | ---
21 | {% include details.html %}
22 |
23 |
--------------------------------------------------------------------------------
/_machines/pascal.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: pascal
3 | ip: 51.158.60.34
4 | fqdn: x86-bm-d1.sw.ocaml.org
5 | model: PowerEdge C6525
6 | processor: AMD EPYC 7313P 16-Core Processor
7 | memory: 125GiB
8 | disks:
9 | - 1.7TB
10 | os: Ubuntu 22.04.4 LTS
11 | threads: 32
12 | location: Scaleway
13 | notes: Benchmarking team. Current-bench and OCaml benchmarking projects.
14 | serial: C015BW3
15 | ssh: current-bench@x86-bm-d1.sw.ocaml.org
16 | use: benchmarking
17 | service: current-bench
18 | latitude: 48.8591
19 | longitude: 2.2935
20 | ---
21 | {% include details.html %}
22 |
--------------------------------------------------------------------------------
/_machines/phoebe.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: phoebe
3 | ip: 128.232.124.231
4 | fqdn: phoebe.ocamllabs.io
5 | manufacturer: Dell
6 | serial: DLDTGL2
7 | model: PowerEdge R630
8 | os: Ubuntu 22.04
9 | threads: 72
10 | location: Caelum
11 | processor: Intel(R) Xeon(R) CPU E5-2695 v4 @ 2.10GHz
12 | notes: Cluster worker
13 | pool: linux-x86_64
14 | disks:
15 | - 2 x 400GB SSD
16 | - 1.5TB NVMe
17 | latitude: 52.2109
18 | longitude: 0.0917
19 | ---
20 | {% include details.html %}
21 |
22 |
--------------------------------------------------------------------------------
/_machines/raphael.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: raphael
3 | ip:
4 | fqdn: raphael.ocamllabs.io
5 | notes: Powered off - serious hardware fault
6 | location: Caelum
7 | threads: 12
8 | latitude: 52.2109
9 | longitude: 0.0917
10 | ---
11 | {% include details.html %}
12 |
13 |
--------------------------------------------------------------------------------
/_machines/riscv-bm-a1.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: riscv-bm-a1
3 | fqdn: riscv-bm-a1.sw.ocaml.org
4 | model: RV1
5 | processor: rv64imafdcvsu
6 | memory: 15GiB
7 | disks:
8 | - mmcblk0: 116.5GB
9 | - mmcblk0boot0: 4MB
10 | - mmcblk0boot1: 4MB
11 | os: Ubuntu 24.04 LTS
12 | threads: 4
13 | location: Scaleway
14 | ssh: root@riscv-bm-a1
15 | use: RISCV worker
16 | pool: linux-riscv64
17 | latitude: 48.8591
18 | longitude: 2.2935
19 | ---
20 | {% include details.html %}
21 |
--------------------------------------------------------------------------------
/_machines/riscv-bm-a2.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: riscv-bm-a2
3 | fqdn: riscv-bm-a2.sw.ocaml.org
4 | model: RV1
5 | processor: rv64imafdcvsu
6 | memory: 15GiB
7 | disks:
8 | - mmcblk0: 116.5GB
9 | - mmcblk0boot0: 4MB
10 | - mmcblk0boot1: 4MB
11 | os: Ubuntu 24.04 LTS
12 | threads: 4
13 | location: Scaleway
14 | ssh: root@riscv-bm-a2
15 | use: RISCV worker
16 | pool: linux-riscv64
17 | latitude: 48.8591
18 | longitude: 2.2935
19 | ---
20 | {% include details.html %}
21 |
--------------------------------------------------------------------------------
/_machines/riscv-bm-a3.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: riscv-bm-a3
3 | fqdn: riscv-bm-a3.sw.ocaml.org
4 | model: RV1
5 | processor: rv64imafdcvsu
6 | memory: 15GiB
7 | disks:
8 | - mmcblk0: 116.5GB
9 | - mmcblk0boot0: 4MB
10 | - mmcblk0boot1: 4MB
11 | os: Ubuntu 24.04 LTS
12 | threads: 4
13 | location: Scaleway
14 | ssh: root@riscv-bm-a3
15 | use: RISCV worker
16 | pool: linux-riscv64
17 | latitude: 48.8591
18 | longitude: 2.2935
19 | ---
20 | {% include details.html %}
21 |
--------------------------------------------------------------------------------
/_machines/riscv-bm-a4.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: riscv-bm-a4
3 | fqdn: riscv-bm-a4.sw.ocaml.org
4 | model: RV1
5 | processor: rv64imafdcvsu
6 | memory: 15GiB
7 | disks:
8 | - mmcblk0: 116.5GB
9 | - mmcblk0boot0: 4MB
10 | - mmcblk0boot1: 4MB
11 | os: Ubuntu 24.04 LTS
12 | threads: 4
13 | location: Scaleway
14 | ssh: root@riscv-bm-a4
15 | use: RISCV worker
16 | pool: linux-riscv64
17 | latitude: 48.8591
18 | longitude: 2.2935
19 | ---
20 | {% include details.html %}
21 |
--------------------------------------------------------------------------------
/_machines/roo.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: roo
3 | ip: 128.232.124.253
4 | fqdn: roo.ocamllabs.io
5 | notes: Benchmarking -- fermat.ocamllabs.io -- parallel benchmarks
6 | manufacturer: SuperMicro
7 | model: h8qg6/h8qgi
8 | os: Ubuntu 22.04.3 LTS
9 | location: Caelum
10 | use: benchmarking
11 | service: current-bench
12 | threads: 48
13 | memory: 64GB
14 | processor: AMD Opteron Processor 6344
15 | disks:
16 | - 256GB SSD
17 | - 1TB
18 | - 1TB
19 | latitude: 52.2109
20 | longitude: 0.0917
21 | ---
22 | {% include details.html %}
23 |
24 |
--------------------------------------------------------------------------------
/_machines/rosemary.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: rosemary
3 | ip: 128.232.124.157
4 | fqdn: rosemary.caelum.ci.dev
5 | model: Supermicro
6 | os: FreeBSD 14.2
7 | threads: 40
8 | location: Caelum
9 | notes: FreeBSD worker
10 | processor: 'Intel(R) Xeon(R) CPU E5-2640 v4 @ 2.40GHz'
11 | pool: freebsd-x86_64
12 | disks:
13 | - 1.7TB SSD
14 | latitude: 52.2109
15 | longitude: 0.0917
16 | ---
17 | {% include details.html %}
18 |
19 |
--------------------------------------------------------------------------------
/_machines/s390x-2.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: s390x-2
3 | ip: 148.100.84.181
4 | fqdn: s390x-2.ci.ocamllabs.io
5 | model: IBM/S390
6 | os: Ubuntu 22.04
7 | threads: 4
8 | memory: 16GB
9 | location: Marist College
10 | notes: Cluster worker
11 | pool: linux-s390x
12 | disks:
13 | - 2 x 100GB
14 | latitude: 41.725974
15 | longitude: -73.93354
16 | ---
17 | {% include details.html %}
18 |
19 |
--------------------------------------------------------------------------------
/_machines/s390x.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: s390x
3 | ip: 148.100.84.120
4 | fqdn: s390x.ci.ocamllabs.io
5 | model: IBM/S390
6 | os: Ubuntu 22.04
7 | threads: 4
8 | memory: 16GB
9 | location: Marist College
10 | notes: Cluster worker
11 | pool: linux-s390x
12 | disks:
13 | - 500GB
14 | latitude: 41.725974
15 | longitude: -73.93354
16 | ---
17 | {% include details.html %}
18 |
19 |
--------------------------------------------------------------------------------
/_machines/scyleia.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: scyleia
3 | ip: 128.232.124.212
4 | fqdn: scyleia.ocamllabs.io
5 | manufacturer: Raptor
6 | model: Talos II
7 | serial: W1000194
8 | os: Ubuntu 22.04
9 | threads: 176
10 | location: Caelum
11 | notes: Cluster worker
12 | pool: linux-ppc64
13 | disks:
14 | - 500GB NVMe
15 | - 1.8TB NVMe
16 | - 1.8TB NVMe
17 | processor: Power9
18 | latitude: 52.2109
19 | longitude: 0.0917
20 | ---
21 | {% include details.html %}
22 |
23 |
--------------------------------------------------------------------------------
/_machines/sleepy.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: sleepy
3 | ip:
4 | fqdn: sleepy.ocamllabs.io
5 | manufacturer: Dell
6 | model: PowerEdge R210 II
7 | serial: 5XGLV02
8 | os: Ubuntu 18.04.2
9 | location: Caelum
10 | notes: Doesn't appear to be doing anything
11 | processor: 'Intel(R) Xeon(R) CPU E3-220 v3 @ 3.10GHz'
12 | threads: 4
13 | latitude: 52.2109
14 | longitude: 0.0917
15 | ---
16 | {% include details.html %}
17 |
18 |
--------------------------------------------------------------------------------
/_machines/spring.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: spring
3 | ip: 128.232.124.188
4 | fqdn: spring.caelum.ci.dev
5 | model: Super Server
6 | os: FreeBSD 13.2
7 | threads: 8
8 | location: Caelum
9 | use: benchmarking
10 | notes: FreeBSD worker + some user data in /home (check)
11 | serial: 0123456789
12 | processor: 'Intel(R) Xeon(R) Silver 4108 CPU @ 1.80GHz'
13 | ssh: mte24@spring.ocamllabs.io
14 | pool: freebsd-x86_64
15 | disks:
16 | - 200GB SSD
17 | - 1.0TB
18 | latitude: 52.2109
19 | longitude: 0.0917
20 | ---
21 | {% include details.html %}
22 |
23 |
--------------------------------------------------------------------------------
/_machines/staging.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: staging
3 | ip: 51.159.79.64
4 | fqdn: staging.ocaml.org
5 | os: Ubuntu 20.04.2
6 | location: Scaleway
7 | notes: ocurrent/ocaml.org
8 | manufacturer: Quanta Cloud Technology Inc.
9 | model: QuantaMicro X10E-9N
10 | serial: QTFCQ5804012D
11 | threads: 8
12 | processor: AMD EPYC 7282 16-Core Processor
13 | disks:
14 | - 240GB
15 | - 240GB
16 | latitude: 48.8591
17 | longitude: 2.2935
18 | ---
19 | {% include details.html %}
20 |
21 |
--------------------------------------------------------------------------------
/_machines/summer.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: summer
3 | ip: 128.232.124.251
4 | fqdn: summer.ocamllabs.io
5 | model: Super Server
6 | os: FreeBSD 13.2
7 | threads: 16
8 | location: Caelum
9 | notes: FreeBSD worker + some user data in /home (check)
10 | serial: 0123456789
11 | processor: 'Intel(R) Xeon(R) Silver 4108 CPU @ 1.80GHz'
12 | pool: freebsd-x86_64
13 | disks:
14 | - 200GB SSD
15 | - 1.0TB
16 | latitude: 52.2109
17 | longitude: 0.0917
18 | ---
19 | {% include details.html %}
20 |
21 |
--------------------------------------------------------------------------------
/_machines/tigger.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: tigger
3 | ip: 128.232.124.211
4 | fqdn: tigger.ocamllabs.io
5 | notes: Powered off. Frequent reboots due to watchdog timer events.
6 | location: Caelum
7 | manufacturer: SuperMicro
8 | model: h8qg6/h8qgi
9 | threads: 48
10 | memory: 64GB
11 | processor: AMD Opteron Processor 6344
12 | disks:
13 | - 64GB SSD
14 | - 1TB
15 | - 1TB
16 | ip: 128.232.124.211
17 | latitude: 52.2109
18 | longitude: 0.0917
19 | ---
20 | {% include details.html %}
21 |
22 |
--------------------------------------------------------------------------------
/_machines/toxis.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: toxis
3 | ip: 128.232.124.213
4 | fqdn: toxis.ocamllabs.io
5 | os: Ubuntu 22.04.4
6 | manufacturer: Dell
7 | model: PowerEdge R630
8 | serial: DLF0HL2
9 | processor: Intel(R) Xeon(R) CPU E5-2695 v4 @ 2.10GHz
10 | threads: 72
11 | location: Caelum
12 | notes: Cluster worker
13 | pool: linux-x86_64
14 | notes: ci.ocamllabs.io. /dev/sdb is unused
15 | disks:
16 | - 400GB SSD
17 | - 400GB SSD
18 | - 1.6TB NVMe
19 | latitude: 52.2109
20 | longitude: 0.0917
21 | ---
22 | {% include details.html %}
23 |
24 |
--------------------------------------------------------------------------------
/_machines/turing.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: turing
3 | ip: 10.147.20.85
4 | fqdn: turing.ocamllabs.io
5 | model: Precision 7820 Tower
6 | processor: Intel(R) Xeon(R) Gold 5120 CPU @ 2.20GHz
7 | os: Ubuntu 22.04.3 LTS
8 | threads: 28
9 | location: IIT Madras
10 | use: benchmarking
11 | service: Sandmark Nightly
12 | notes: Benchmarking Team -- Used for Sandmark; requires ZeroTier connection
13 | serial: 4YGD013
14 | latitude: 12.994745
15 | longitude: 80.233408
16 | ---
17 | {% include details.html %}
18 |
--------------------------------------------------------------------------------
/_machines/v2.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: v2
3 | ip: 51.159.152.205
4 | fqdn: v2.ocaml.org
5 | os: Ubuntu 20.04.3
6 | location: Scaleway
7 | notes: ocurrent/v2.ocaml.org
8 | manufacturer: Scaleway
9 | model: VM
10 | threads: 4
11 | processor: AMD EPYC 7282 16-Core Processor
12 | disks:
13 | - 75GB
14 | latitude: 48.8591
15 | longitude: 2.2935
16 | ---
17 | {% include details.html %}
18 |
19 |
--------------------------------------------------------------------------------
/_machines/v3b.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: v3b
3 | ip: 51.159.83.169
4 | fqdn: v3b.ocaml.org
5 | notes: staging.ocaml.org
6 | location: Scaleway
7 | manufacturer: Scaleway
8 | model: SCW-PRO2-XS
9 | processor: 'AMD EPYC 7543 32-Core Processor'
10 | threads: 4
11 | os: Ubuntu 22.04.1 LTS
12 | memory: 16GB
13 | disks:
14 | - 28GB
15 | - 110GB
16 | latitude: 48.8591
17 | longitude: 2.2935
18 | ---
19 | {% include details.html %}
20 |
21 |
--------------------------------------------------------------------------------
/_machines/v3c.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: v3c
3 | ip: 51.159.190.183
4 | fqdn: v3c.ocaml.org
5 | notes: www.ocaml.org
6 | location: Scaleway
7 | manufacturer: Scaleway
8 | model: SCW-PRO2-XS
9 | processor: 'AMD EPYC 7543 32-Core Processor'
10 | threads: 2
11 | os: Ubuntu 22.04.1 LTS
12 | memory: 8GB
13 | disks:
14 | - 28GB
15 | - 110GB
16 | latitude: 48.8591
17 | longitude: 2.2935
18 | ---
19 | {% include details.html %}
20 |
21 |
--------------------------------------------------------------------------------
/_machines/watch.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: watch
3 | ip: 163.172.166.101
4 | fqdn: watch.ocaml.org
5 | manufacturer: Scaleway
6 | model: SCW-GP1-XS
7 | os: Ubuntu 20.04.5
8 | threads: 4
9 | processor: 'AMD EPYC 7401P 24-Core Processor'
10 | memory: 16GB
11 | location: Scaleway
12 | disks:
13 | - 140GB virtual disk
14 | notes: PeerTube host at https://watch.ocaml.org
15 | location: Scaleway
16 | latitude: 48.8591
17 | longitude: 2.2935
18 | ---
19 | {% include details.html %}
20 |
21 |
--------------------------------------------------------------------------------
/_machines/winter.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: winter
3 | ip: 128.232.124.181
4 | fqdn: winter.ocamllabs.io
5 | model: Super Server
6 | os: Ubuntu 22.04.3 LTS
7 | threads: 16
8 | location: Caelum
9 | use: benchmarking
10 | service: Sandmark
11 | notes: General purpose machine, typically for benchmarking by compiler developers. Primary users nickbarnes, polytypic, fabrice, sadiq, shakthi
12 | serial: NM18CS008836
13 | processor: 'Intel(R) Xeon(R) Silver 4108 CPU @ 1.80GHz'
14 | ssh: mte24@winter.ocamllabs.io
15 | latitude: 52.2109
16 | longitude: 0.0917
17 | ---
18 | {% include details.html %}
19 |
20 |
--------------------------------------------------------------------------------
/_machines/x86-bm-c1.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: x86-bm-c1
3 | fqdn: x86-bm-c1.sw.ocaml.org
4 | manufacturer: Dell
5 | model: PowerEdge R720
6 | os: Ubuntu 22.04
7 | threads: 32
8 | memory: 256GB
9 | location: Scaleway
10 | pool: linux-x86_64
11 | notes: Cluster worker
12 | processor: 'Intel(R) Xeon(R) CPU E5-2650 v2 @ 2.60GHz'
13 | disks:
14 | - 2 x 950GB SSD
15 | latitude: 48.8591
16 | longitude: 2.2935
17 | ---
18 | {% include details.html %}
19 |
--------------------------------------------------------------------------------
/_machines/x86-bm-c2.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: x86-bm-c2
3 | fqdn: x86-bm-c2.sw.ocaml.org
4 | manufacturer: Dell
5 | model: PowerEdge R720
6 | os: Ubuntu 22.04
7 | threads: 32
8 | memory: 256GB
9 | location: Scaleway
10 | pool: linux-x86_64
11 | notes: Cluster worker
12 | processor: 'Intel(R) Xeon(R) CPU E5-2650 v2 @ 2.60GHz'
13 | disks:
14 | - 2 x 950GB SSD
15 | latitude: 48.8591
16 | longitude: 2.2935
17 | ---
18 | {% include details.html %}
19 |
--------------------------------------------------------------------------------
/_machines/x86-bm-c3.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: x86-bm-c3
3 | fqdn: x86-bm-c3.sw.ocaml.org
4 | manufacturer: Dell
5 | model: PowerEdge R720
6 | os: Ubuntu 22.04
7 | threads: 32
8 | memory: 256GB
9 | location: Scaleway
10 | pool: linux-x86_64
11 | notes: Cluster worker
12 | processor: 'Intel(R) Xeon(R) CPU E5-2650 v2 @ 2.60GHz'
13 | disks:
14 | - 2 x 950GB SSD
15 | latitude: 48.8591
16 | longitude: 2.2935
17 | ---
18 | {% include details.html %}
19 |
--------------------------------------------------------------------------------
/_machines/x86-bm-c4.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: x86-bm-c4
3 | ip: 51.159.214.53
4 | fqdn: x86-bm-c4.sw.ocaml.org
5 | manufacturer: Dell
6 | model: PowerEdge R720
7 | os: Ubuntu 22.04
8 | threads: 32
9 | memory: 256GB
10 | location: Scaleway
11 | pool: linux-x86_64
12 | notes: Cluster worker
13 | processor: 'Intel(R) Xeon(R) CPU E5-2650 v2 @ 2.60GHz'
14 | disks:
15 | - 2 x 950GB SSD
16 | latitude: 48.8591
17 | longitude: 2.2935
18 | ---
19 | {% include details.html %}
20 |
21 |
--------------------------------------------------------------------------------
/_machines/x86-bm-c5.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: x86-bm-c5
3 | ip: 51.159.214.52
4 | fqdn: x86-bm-c5.sw.ocaml.org
5 | manufacturer: Dell
6 | model: PowerEdge R720
7 | os: Ubuntu 22.04
8 | threads: 32
9 | memory: 256GB
10 | location: Scaleway
11 | pool: linux-x86_64
12 | notes: Cluster worker
13 | processor: 'Intel(R) Xeon(R) CPU E5-2650 v2 @ 2.60GHz'
14 | disks:
15 | - 2 x 950GB SSD
16 | latitude: 48.8591
17 | longitude: 2.2935
18 | ---
19 | {% include details.html %}
20 |
21 |
--------------------------------------------------------------------------------
/_machines/x86-bm-c6.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: x86-bm-c6
3 | ip: 51.159.214.57
4 | fqdn: x86-bm-c6.sw.ocaml.org
5 | manufacturer: Dell
6 | model: PowerEdge R720
7 | os: Ubuntu 22.04
8 | threads: 32
9 | memory: 256GB
10 | location: Scaleway
11 | pool: linux-x86_64
12 | notes: Cluster worker
13 | processor: 'Intel(R) Xeon(R) CPU E5-2650 v2 @ 2.60GHz'
14 | disks:
15 | - 2 x 950GB SSD
16 | latitude: 48.8591
17 | longitude: 2.2935
18 | ---
19 | {% include details.html %}
20 |
21 |
--------------------------------------------------------------------------------
/_machines/x86-bm-c7.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: x86-bm-c7
3 | ip: 51.159.214.58
4 | fqdn: x86-bm-c7.sw.ocaml.org
5 | manufacturer: Dell
6 | model: PowerEdge R720
7 | os: Ubuntu 22.04
8 | threads: 32
9 | memory: 256GB
10 | location: Scaleway
11 | pool: linux-x86_64
12 | notes: Cluster worker
13 | processor: 'Intel(R) Xeon(R) CPU E5-2650 v2 @ 2.60GHz'
14 | disks:
15 | - 2 x 950GB SSD
16 | latitude: 48.8591
17 | longitude: 2.2935
18 | ---
19 | {% include details.html %}
20 |
21 |
--------------------------------------------------------------------------------
/_machines/x86-bm-c8.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: x86-bm-c8
3 | ip: 51.159.110.118
4 | fqdn: x86-bm-c8.sw.ocaml.org
5 | manufacturer: Dell
6 | model: PowerEdge R720
7 | os: Ubuntu 22.04
8 | threads: 32
9 | memory: 256GB
10 | location: Scaleway
11 | pool: linux-x86_64
12 | notes: Cluster worker
13 | processor: 'Intel(R) Xeon(R) CPU E5-2650 v2 @ 2.60GHz'
14 | disks:
15 | - 2 x 950GB SSD
16 | latitude: 48.8591
17 | longitude: 2.2935
18 | ---
19 | {% include details.html %}
20 |
21 |
--------------------------------------------------------------------------------
/_machines/x86-bm-c9.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: x86-bm-c9
3 | ip: 51.159.109.177
4 | fqdn: x86-bm-c9.sw.ocaml.org
5 | manufacturer: Dell
6 | model: PowerEdge R720
7 | os: Ubuntu 22.04
8 | threads: 32
9 | memory: 256GB
10 | location: Scaleway
11 | pool: linux-x86_64
12 | notes: Cluster worker
13 | processor: 'Intel(R) Xeon(R) CPU E5-2650 v2 @ 2.60GHz'
14 | disks:
15 | - 2 x 7.1TB
16 | latitude: 48.8591
17 | longitude: 2.2935
18 | ---
19 | {% include details.html %}
20 |
21 |
--------------------------------------------------------------------------------
/_posts/2022-11-04-relocating-ocaml-org.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Relocating OCaml.org
3 | ---
4 |
5 | Equinix is closing their data centre in Amsterdam where we currently host [www.ocaml.org](www.ocaml.org) and [staging.ocaml.org](staging.ocaml.org) on an `m1.xlarge.x86`. These websites will be moved to virtual machines Scaleway cluster.
6 |
7 |
8 |
--------------------------------------------------------------------------------
/_posts/2022-11-08-tarsnap-backups.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Now using Tarsnap for VM backups
3 | ---
4 |
5 | Some of our ocaml.org services such as involve storing user uploaded content. We need a way to make sure these are backed up in case of information loss, and to date this has been adhoc (involving rsyncing to another machine).
6 |
7 | We now have a [Tarsnap](https://tarsnap.com) account as `tarsnap@ocaml.org`, and it is first being used to store backups of the videos uploaded to the service. We'll expand its use to other infrastructures that also have precious data.
8 |
9 | Other suggestions for backup services are welcome. In general, we're looking for solutions that do not involve a lot of key management, and a reasonable amount of redundancy (but backing up across 2-3 other machines in different datacentres is probably sufficient).
10 |
--------------------------------------------------------------------------------
/_posts/2023-02-27-watch-ocaml-org.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Promote watch.ocaml.org to non-beta
3 | ---
4 |
5 | [watch.ocaml.org](https://watch.ocaml.org) has been updated to run as a Docker service stack rather than via docker-compose. This change allowed an [OCurrent](https://github.com/ocurrent/ocurrent) pipeline to monitor the Docker repository and update the image via `docker service update` when a new version is available.
6 |
7 | We have several other services updated via OCurrent: [deploy.ci.ocaml.org](https://deploy.ci.ocaml.org)
8 |
9 | In OCurrent, we can create a schedule node that triggers every seven days and invokes a `docker pull`, yielding the current image SHA. If this has changed, run `docker service update` with the new image.
10 |
11 | ```
12 | let peertube =
13 | let weekly = Current_cache.Schedule.v ~valid_for:(Duration.of_day 7) () in
14 | let image = Cluster.Watch_docker.pull ~schedule:weekly "chocobozzz/peertube:production-bullseye" in
15 | Cluster.Watch_docker.service ~name:"infra_peertube" ~image ()
16 | ```
17 |
18 | The deployment is achieved through an Ansible Playbook. Further details are available [here](/watch-ocaml-org).
19 |
20 | The second part of the update was to improve the visibility of the backups for watch.ocaml.org. As noted [previously](/2022/11/08/tarsnap-backups.html), these use [Tarsnap](https://www.tarsnap.com) running monthly via `CRON`.
21 |
22 | For this, a new plugin was added to OCurrent called [ocurrent_ssh](https://github.com/ocurrent/ocurrent/tree/master/plugins/ssh). This plugin allows arbitrary SSH commands to be executed as part of an OCurrent pipeline.
23 |
24 | Again using a schedule node, the `Current_ssh.run` node will be triggered on a 30-day schedule, and the logs for each run will be available on [deploy.ci.ocaml.org](https://deploy.ci.ocaml.org).
25 |
26 | ```
27 | let monthly = Current_cache.Schedule.v ~valid_for:(Duration.of_day 30) () in
28 | let tarsnap = Current_ssh.run ~schedule:monthly "watch.ocaml.org" ~key:"tarsnap" (Current.return ["./tarsnap-backup.sh"])
29 | ```
30 |
31 |
--------------------------------------------------------------------------------
/_posts/2023-03-09-moving-opam-ocaml-org.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: opam.ocaml.org move to Scaleway
3 | ---
4 |
5 | Following @avsm post on [discuss.ocaml.org](https://discuss.ocaml.org/t/migration-opam-ocaml-org-moving-providers-this-week/11606), we are pleased to announce that DNS names have now been switched over.
6 |
7 | > We are moving the opam.ocaml.org 2 servers between hosting providers, and wanted to give everyone clear notice that this happening. Over the next 24-48 hours, if you notice any unexpected changes in the way your opam archives work (for example, in your CI or packaging systems), then please do let us know immediately, either here or in ocaml/infrastructure#19 2.
8 | >
9 | > The reason for the move is to take advantage of Scaleway’s generous sponsorship of ocaml.org, and to use their energy efficient renewable infrastructure 4 for our machines.
10 | >
11 | > This also marks a move to building the opam website via the ocurrent 1 infrastructure, which leads to faster and more reliable updates to the hosted package archives (see here for the service graph and build logs 4). There are also now multiple machines behind the opam.ocaml.org 2 DNS (via round-robin DNS), and this makes it easier for us to publish the archives to a global CDN in the future.
12 | >
13 | > But in the very short term, if something explodes unexpectedly, please do let us know.
14 |
--------------------------------------------------------------------------------
/_posts/2023-03-10-opam-repository-mingw.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Sunsetting opam-repository-mingw
3 | ---
4 |
5 | As [previously announced](https://fdopen.github.io/opam-repository-mingw/2021/02/26/repo-discontinued), "opam-repository-mingw" is no longer receiving updates.
6 |
7 | We're actively working on getting the Windows compiler packages into [ocaml/opam-repository](https://github.com/ocaml/opam-repository). There are two issues which are taking (me) a little while to finish solving, but more on that further below.
8 |
9 | In the gap - of hopefully only a month or so - for this being ready, there's is an issue that new releases are of course not available when opam-repository-mingw is being used with [`ocaml/setup-ocaml@v2`](https://github.com/ocaml/setup-ocaml) GitHub actions workflows. I'm hoping here to set out what's happening, and what steps you may need to take to keep your GitHub Actions Windows workflows running smoothly over the next few months.
10 |
11 | ## What's happening right now?
12 |
13 | We've updated setup-ocaml to use [ocaml-opam/opam-repository-mingw](https://github.com/ocaml-opam/opam-repository-mingw) instead of [fdopen/opam-repository-mingw](https://github.com/fdopen/opam-repository-mingw) (see [ocaml/setup-ocaml#651](https://github.com/ocaml/setup-ocaml/pull/651)). This clone has been augmented with:
14 | - OCaml 4.14.1 packages, in the same style as the 4.14.0 forked packages (the "pre-compiled" package variants exist, but they're not pre-compiled)
15 | - Changes to the constraints for _existing_ packages only
16 |
17 | If you're using setup-ocaml in its default configuration, you should notice no change except that `4.14.x` builds should now use 4.14.1 and the initial build will be a little slower as it builds from sources (GitHub Actions caching will then take over for subsequent runs).
18 |
19 | For new releases of packages, it's necessary to _add_ opam-repository to the repositories selections for the switches. It's important that opam-repository is at a _lower priority_ than opam-repository-mingw for existing packages, so it's better to use these lines in your `ocaml/setup-ocaml@v2` step than to issue `opam repo add --rank=1000` later:
20 |
21 | ```
22 | uses: ocaml/setup-ocaml@v2
23 | with:
24 | opam-repositories: |
25 | opam-repository-mingw: https://github.com/ocaml-opam/opam-repository-mingw.git#sunset
26 | default: https://github.com/ocaml/opam-repository.git
27 | ```
28 |
29 | ## What do I do when things are broken?
30 |
31 | There's an issue tracker on [ocaml-opam/opam-repository-mingw](https://github.com/ocaml-opam/opam-repository-mingw/issues), and this is a very good place to start.
32 |
33 | If a version of a package isn't building, there are three possible remedies:
34 |
35 | - Previous versions of the package may have carried non-upstreamed patches in opam-repository-mingw. opam-repository's policy is not to carry such patches. In this case, the package actually doesn't work on Windows.
36 | - opam-repository should be updated to have `os != "win32"` added to the `available` field for the package
37 | - An issue on the package's upstream repo should be opened highlighting the need to upstream patches (or even a pull request with them!)
38 | - The patches in opam-repository-mingw make changes which may not necessarily be accepted/acceptable upstream in their current form, so the issue may be a better starting point than simply taking a patch and opening a pull request for it (for example, the `utop` package contains patches which may require further work and review)
39 | - The package relies on environment changes in "OCaml for Windows". For example, the Zarith package works in "OCaml for Windows" because the compiler packages unconditionally set the `CC` environment variable. This change is both not particularly desirable change to upstream (it is _very_ confusing, for example, when working on the compiler itself) and also extremely difficult to upstream, so the fix here is instead to change the package's availability with `(os != "win32" | os-distribution = "cygwinports")` and constrain away OCaml 5 on Windows (`"ocaml" {< "5.0" | os != "win32"}`)
40 | - Package constraints on _existing packages_ need updating in ocaml-opam/opam-repository-mingw. For example, the release of ppxlib 0.29 required some existing packages to have upperbounds added.
41 |
42 | ## What about OCaml 5.0.0?
43 |
44 | OCaml 5.0.0 was released with support for the mingw-w64 port only, however, there's a quite major bug which wasn't caught by OCaml's testsuite, but is relatively easily triggered by opam packages. I've [previously announced](https://discuss.ocaml.org/t/pre-ann-installing-windows-ocaml-5-0-0-in-opam/11150) how to add OCaml 5 to a workflow. For the time being, the packages for OCaml 5 aren't automatically made available.
45 |
46 | ## What's next?
47 |
48 | The ultimate goal is to be using an upstream build of `opam.exe` with ocaml/opam-repository, just as on Unix. Once opam 2.2 is generally available (we're aiming for an alpha release at the end of March) and the compiler packages in opam-repository support the Windows builds, we will recommend stopping use of opam-repository-mingw completely. The default in setup-ocaml won't change straight away, since that risks breaking existing workflows.
49 |
50 | With upstream compiler support, we'll be able to extend some of the existing bulk build support already being done for Linux to Windows and start to close the gap of patches in opam-repository-mingw.
51 |
52 | ## Windows compiler packages
53 |
54 | I mentioned earlier the problems with moving the compiler packages into opam-repository, and just for general interest this elaborates on them.
55 |
56 | The first issue affects the use of the Visual Studio port ("MSVC") and is a consequence of the somewhat strange way that the C compiler is added to the environment when using the Visual Studio C compiler. "OCaml for Windows" (as well as Diskuv) use a wrapper command (it's `ocaml-env` in "OCaml for Windows" and `with-dkml` in Diskuv). Those commands are Windows-specific, which is an issue for upstream opam. There's an alternate way which sets the environment variables in a more opam-like way. Doing it that way, though, requires an improvement to opam's environment handling which is in opam 2.2, otherwise there's an easy risk of "blowing" the environment.
57 |
58 | The second issue is selecting the C compiler. On Unix, this is easy
59 | with `ocaml-base-compiler` because there is only one "system" C compiler. Windows has two ports of OCaml, and the configuration requires it to be explicitly selected. That requires input from the user on switch creation for a Windows switch.
60 |
61 | "OCaml for Windows" solves this by packaging the Windows compilers with the variant name appended, just as opam-repository used to, so `ocaml-variants.4.14.1+mingw64` selects the the mingw-w64 port and `ocaml-variants.4.14.1+msvc64` selects the MSVC64 port. The problem, as we already had in opam-repository, is that this adds 4 packages for each release of OCaml in `ocaml-variants`, and leads to a combinatorial explosion when we start considering flambda and other relevant compiler options.
62 |
63 | opam-repository switched to using the `ocaml-option-` packages to solve the combinatorial explosion which was already present in opam-repository. The demonstration repo for OCaml 5 on Windows is already using an adapted version of this so that `ocaml-option-mingw` selects the mingw-w64 port (by default 64-bit, with `ocaml-option-32bit` then selecting the 32-bit port).
64 |
65 | This work is all in progress and being tested alongside changes in opam 2.2 to support the _depext_ experience on Windows. The only reason that's not being upstreamed piecemeal is that changes to the compiler packages in opam-repository trigger switch rebuilds all over the world, so we don't want to that until we're sure that the packages are correct. The intention is to do this around the time of the alpha release of opam 2.2, once the work in opam itself has settled down.
66 |
67 | Thanks for getting to the end, and happy Windows CI testing!
68 |
69 | *[Discuss this post further](https://discuss.ocaml.org/t/sunsetting-opam-repository-mingw/11632) on the forums.*
70 |
--------------------------------------------------------------------------------
/_posts/2023-03-17-opam-ci-ocaml-org.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Renaming opam.ci.ocaml.org / opam.ci3.ocamllabs.io
3 | ---
4 |
5 | The intention is to retire the _ocamllabs.io_ domain.
6 | Therefore any services using the domain will be redirected.
7 | From today, the Web UI for Opam Repo CI is available on both
8 | [opam-ci.ci3.ocamllabs.io](https://opam-ci.ci3.ocamllabs.io)
9 | and [opam.ci.ocaml.org](https://opam.ci.ocaml.org)
10 | with the service available at both
11 | [opam-repo-ci.ci3.ocamllabs.io](https://opam-repo-ci.ci3.ocamllabs.io)
12 | and [opam-repo.ci.ocaml.org](https://opam-repo.ci.ocaml.org). In time,
13 | the ocamllabs.io sites will issue HTTP 301 permanent redirect messages.
14 |
15 | Previously, [opam.ci.ocaml.org](https://opam.ci.ocaml.org)
16 | targetted a web server which issued an HTTP 302 redirect to
17 | [opam.ci3.ocamllabs.io](https://opam.ci3.ocamllabs.io). This redirection
18 | has been removed. [opam.ci.ocaml.org](https://opam.ci.ocaml.org) points
19 | to the actual site.
20 |
21 |
--------------------------------------------------------------------------------
/_posts/2023-04-06-maintenance-operations.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Maintenance Operations
3 | ---
4 |
5 | Due to maintenance operations on Tuesday 11th April between 7:30am and 11:00am (UTC+1), the build system will be running at approximately 50% capacity. You may experience some build delays in Opam Repo CI and OCaml CI.
6 |
7 | Thank you for your patience and understanding during this time.
8 |
9 | Update: This work was completed on schedule and the cluster is back to full capacity.
--------------------------------------------------------------------------------
/_posts/2023-04-25-updated-images.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Updated Linux Distros and OCaml versions
3 | ---
4 |
5 | New Ubuntu 23.04 and Fedora 38 distributions have been added to docker base image builder. Following their respective releases:
6 | * Ubuntu 23.04 was released 20 April 2023
7 | * Fedora 38 was released 18 April 2023
8 | These images started building from Apr 25th 2023 and have been pushed to https://hub.docker.com/r/ocaml/opam with a full range of OCaml versions and variants. As always the status of the images can be viewed on [images.ci.ocaml.org](https://images.ci.ocaml.org).
9 |
10 | Alongside the Linux updates base images containing OCaml 5.1 have also been [published](https://hub.docker.com/r/ocaml/opam/tags?page=1&name=5.1) for supported Linux platforms. Following the OCaml 5.1 Alpha release announcement on [discuss.ocaml.org](https://discuss.ocaml.org/t/first-alpha-release-of-ocaml-5-1-0/11949). Enjoy and please report any issues on [ocurrent/docker-base-images/issues](https://github.com/ocurrent/docker-base-images/issues).
11 |
--------------------------------------------------------------------------------
/_posts/2023-04-26-check-ci-ocaml-org.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: check.ocamllabs.io moved to check.ci.ocaml.org
3 | ---
4 |
5 | [check.ocamllabs.io](http://check.ocamllabs.io) has moved to [check.ci.ocaml.org](https://check.ci.ocaml.org). Furthermore, HTTP connections are now upgraded to HTTPS.
6 |
7 | There is an HTTP 301 permanent redirect in place for the old address.
8 |
--------------------------------------------------------------------------------
/_posts/2023-05-05-opam-repo-ci.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Temporary relocation of opam.ci.ocaml.org
3 | ---
4 |
5 | Due to operational difficulties, [opam.ci.ocaml.org](https://opam.ci.ocaml.org) has temporarily moved to [toxis.caelum.ci.dev](https://toxis.caelum.ci.dev). An HTTP redirect is in place. Please do not update your bookmarks.
6 |
7 | This issue is being tracked on GitHub [ocurrent/opam-repo-ci#220](https://github.com/ocurrent/opam-repo-ci/issues/220).
8 |
9 |
--------------------------------------------------------------------------------
/_posts/2023-05-30-emissions-monitoring.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: An Update on the Environmental Impact of the OCaml.org Cluster
3 | ---
4 |
5 | *TL;DR For the 19 machines we monitor in the OCaml.org cluster we are seeing a figure in the ball park of 70kg of CO2e per week. [Discussion thread here](https://discuss.ocaml.org/t/initial-emissions-monitoring-of-the-ocaml-org-infrastructure/12335).*
6 |
7 | Since the OCaml.org redesign we have, as a community, [committed](https://discuss.ocaml.org/t/ocaml-org-recapping-2022-and-queries-on-the-fediverse/11099/21) to being [accountable for our impact on the environment](https://ocaml.org/policies/carbon-footprint). As a first step we aimed to accurately quantify our impact by calculating the total amount of energy we are using. It is necessary to establish a baseline of present activity to determine whether any changes we make in the future are reducing our CO2e emissions.
8 |
9 | This has not been a straightforward number to identify as there is a [large cluster of machines](https://infra.ocaml.org/by-use/general) running day and night providing services such as [ocaml-ci][], [opam-repo-ci][], the [docker base images](https://images.ci.ocaml.org) and services like ocaml.org itself and the federated watch.ocaml.org. These machines span numerous architectures, operating systems, and types (e.g. virtual vs. bare-metal) making it difficult to build portable tools to monitor each machine.
10 |
11 | We've used the past few months to build new tools and deploy monitoring devices, to give us as accurate a figure as possible.
12 |
13 | ## Monitoring Using IPMI-enabled Machines
14 |
15 | One method for collecting power usage numbers is to use the Intelligent Platform Management Interface (IPMI). IPMI is a specification for a set of interfaces that allow for the management and monitoring of computers. Quite often the controller used by the IPMI will be the Baseboard Management Controller (BMC). Sensors monitoring the server (including power consumption, temperature and fan speed) often report to the BMC.
16 |
17 | [Clarke][] is a tool we've built that provides a common interface for different methods of monitoring and reporting power usage. It can use [ipmitool][] to collect power consumption numbers and report this over to [Prometheus][].
18 |
19 | We have installed [Clarke][] on a number of machines that support IPMI and have made modifications to [OCluster][] to pick up the resulting Prometheus outputs for each machine.
20 |
21 | ## Quantifying the Carbon Intensity of Electricity
22 |
23 | Many systems that require electricity to run are powered from a national grid. The electricity from the grid is usually created from a variety of electricity-generating activities and the combination of different activities is usually referred to as the [grid's energy mix](https://www.nationalgrideso.com/electricity-explained/electricity-and-me/great-britains-monthly-electricity-stats).
24 |
25 | From this mix, the average *carbon intensity* can be calculated. Carbon intensity is the amount of Carbon Dioxide Equivalent (CO2e) emissions produced in order to supply a kilowatt hour of electricity. The units are grams of CO2e per kilowatt hour (gCO2e/kWh). Why CO2e? Carbon dioxide (CO2) is not the only Greenhouse Gas (for example there is water vapour, methane etc.). The [Carbon Dioxide Equivalent](https://ec.europa.eu/eurostat/statistics-explained/index.php?title=Glossary:Carbon_dioxide_equivalent) unit gives us a way to convert between the various Greenhouse Gases allowing for a fair way to compare different emitting activities.
26 |
27 |
28 | Fortunately, the machines we can monitor using IMPI, are all located in Cambridge. We can use the [carbon-intensity][] API for accessing real-time numbers for the national grid in Great Britain. In fact, we developed a tool of the same name, [carbon-intensity](https://github.com/geocaml/carbon-intensity), to abstract over the various APIs, providing a single common interface for fetching carbon intensity values.
29 |
30 | ```ocaml
31 | type t
32 | (** A configuration parameter for the API *)
33 |
34 | val get_intensity : t -> int option
35 | (** [get_intensity t] returns the current gCO2/kWh if available. *)
36 | ```
37 |
38 | ## A value for total emissions
39 |
40 | With both the cluster's total power draw and a value for the carbon intensity, we can now calculate a value for total emissions from energy use. For a single machine, let's assume a power reading of `250W`. We also know how frequently we are sampling the power reading, for example every `10s`. We can use these values to calculate the `kWh` value.
41 |
42 | ```
43 | (250 / 1000) * (10 / 3600) = 0.000694
44 | ```
45 |
46 | If the grid is currently reporting a carbon intensity of `100gC02e/kWh` then the emissions at this particular point in time are:
47 |
48 | ```
49 | 0.000694 * 100 = 0.0694gCO2
50 | ```
51 |
52 | To put that in perspective, for one person, the single journey from [Heathrow airport in London to Belfast International Airport](https://www.atmosfair.de/en/offset/flight/) (a distance of about 500km) works out at about `160kg` of CO2e. If the fictious machine above were to always draw `250W` and the grid always had the same intensity, it would take about 266 days to equate to the same amount.
53 |
54 | We can perform this calculation for every machine, every ten seconds and add them all together to arrive at the total emissions for these machines over some arbitrary amount of time (e.g. a week).
55 |
56 | Currently, for the 19 machines we monitor we are seeing a figure in the ball park of **`70kg` of CO2e per week**. This fluctuates depending on the current load on the cluster and the carbon intensity of the grid. At the time of writing, the maximum value for the carbon intensity of the grid today was `125gCO2e/kWh` and a minimum of `55gCO2e/kWh`.
57 |
58 | ## Next Steps
59 |
60 | ### Publicly Available Dashboard
61 | The numbers behind the emissions are currently not publicly available. We would like to provide a simple dashboard for interested people to see the real-time numbers.
62 |
63 | ### Machines without IPMI
64 |
65 | Not all machines support IPMI. One possible solution is to obtain power information directly from the hardware. [Variorum][] is a "vendor-neutral library for exposing power and performance capabilities". Using Variorum we can access information such as [Intel's RAPL](https://01.org/blogs/2014/running-average-power-limit-–-rapl) interface. We've worked on [OCaml bindings to this library](https://github.com/patricoferris/ocaml-variorum) which are already incorporated into [Clarke][]. Whilst the power values reported by Variorum might not be perfect or total, they would be a good proxy to enable more types of machines to be monitored.
66 |
67 | ### Other Emissions
68 |
69 | [Embodied energy and carbon](https://principles.green/principles/embodied-carbon/) also make up a large proportion of our impact on the environment. Embodied carbon reflects the amount of carbon pollution required to create and eventually dispose of the machines that run our software. These numbers are not necessarily the easiest to calculate, but we could make a start at trying to work out some figures for the embodied carbon of the hardware we use and also for the rate at which new hardware replaces slow or broken hardware (and what happens to that older hardware).
70 |
71 | By combining these figures we would get a more realistic idea of our carbon footprint, giving us a better chance of minimising our impact in the long term.
72 |
73 | ### Carbon-aware Solutions
74 |
75 | Now that we have some idea of our environmental impact, it is a good time to start thinking of ways to minimise it. For example:
76 |
77 | - Carbon-aware scheduling for low-priority jobs. The [carbon-intensity][] API supports fairly accurate forecasts of the grid's carbon intensity, using this we could schedule the Docker base-image builds to only happen when the carbon intensity is low.
78 | - Reducing the number of builds using better solving. Currently many packages have their dependencies installed in one go during a build. This means that a [solver-service][] works out the exact packages and dependencies needed and installs them in a single build step. This also means that if one down-stream package changes, the entire build step is invalidated resulting in a full rebuild. We could instead split the installation process into multiple steps to help mitigate this problem – particularly for large, more stable packages like dune.
79 |
80 |
81 |
82 | ## In Conclusion
83 | The community behind OCaml.org has been working hard to make good on our commitment to sustainability. Our first goal was to establish a way of measuring how much CO2e the OCaml.org cluster consumes over a specific period of time. We have spent the past couple of months developing a method that gives us a good estimate of that number.
84 |
85 | There is work left to do to get more accurate results, such as accounting for embedded energy and carbon as well as being able to measure the energy consumption of more types of machines. The goal is to create carbon-aware solutions that minimise the impact of the OCaml.org cluster on the environment.
86 |
87 | [ocaml-ci]: https://github.com/ocurrent/ocaml-ci
88 | [opam-repo-ci]: https://github.com/ocurrent/opam-repo-ci
89 | [carbon-intensity]: https://carbonintensity.org.uk
90 | [Clarke]: https://github.com/ocurrent/clarke
91 | [ipmitool]: https://github.com/ipmitool/ipmitool
92 | [Prometheus]: https://prometheus.io
93 | [OCluster]: https://github.com/ocurrent/ocluster
94 | [Variorum]: https://variorum.readthedocs.io/en/latest/
95 | [solver-service]: https://github.com/ocurrent/solver-service
96 |
--------------------------------------------------------------------------------
/_posts/2023-06-09-grafana-changes.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Grafana Dashboard Changes
3 | ---
4 |
5 | The Grafana dashboards at status.ci3.ocamllabs.io and
6 | status.ci.ocamllabs.io have been merged into a single dashboard at
7 | [status.ci.ocamllabs.io](https://status.ocaml.ci.dev). HTTP redirects
8 | are in place for the old addresses.
9 |
10 |
--------------------------------------------------------------------------------
/_posts/2023-06-15-opam-repo-ci-ocaml-ci.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Relocation of opam.ci.ocaml.org and ocaml.ci.dev
3 | ---
4 |
5 | The server `toxis` where [Opam-Repo-CI](https://opam.ci.ocaml.org) and [OCaml-CI](https://ocaml.ci.dev) were deployed suffered hardware difficulties yesterday, resulting in BTRFS filesystem corruption and memory issues. These issues are tracked on [ocaml/infrastructure#51](https://github.com/ocaml/infrastructure/issues/51). Services were restored temporarily using a spare spinning disk, but we continued to see ECC memory issues.
6 |
7 | All services have now been redeployed on new ARM64 hardware. We retained the databases for Prometheus, OCaml-CI and Opam-Repo-CI, but unfortunately, older job logs have been lost.
8 |
9 | The external URLs for these services are unchanged.
10 |
--------------------------------------------------------------------------------
/_posts/2023-06-28-upgrading-linux-distros.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Upgrading to Debian 12 for ocaml docker images
3 | ---
4 |
5 | The OCaml infrastructure team is going to move to Debian 12 as the main distribution from Debian 11. We will continue to provide Debian 11 and 10 images while they are supported, dropping Debian 10 when it reaches end of life in 2024-06-30. In addition to these changes we are deprecating Ubuntu 18.04, Alpine 3.16/17, OL7, OpenSuse 15.2 distributions as the have reached end of life. We strongly recommend updating to a newer version if you are still using them.
6 |
7 | Please get in touch on https://github.com/ocaml/infrastructure/issues if you have questions or requests for additional support.
8 |
--------------------------------------------------------------------------------
/_posts/2023-08-08-freebsd-testing.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: FreeBSD is now tested in opam-repo-ci
3 | ---
4 |
5 | [OBuilder](https://github.com/ocurrent/obuilder/issues/109)
6 | now supports [FreeBSD](https://www.freebsd.org) which has allowed
7 | [opam-repo-ci](https://opam.ci.ocaml.org) to be extended to test against
8 | it. Currently, we are testing Opam 2.1.5 with OCaml 4.14.1 and OCaml
9 | 5.0 with FreeBSD 13.2 (AMD64).
10 |
--------------------------------------------------------------------------------
/_posts/2023-09-21-more-freebsd-news.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: FreeBSD is Available in More Places
3 | ---
4 |
5 | Previously the infrastructure team had made FreeBSD available for [`opam-repo-ci`](https://opam.ci.ocaml.org).
6 | Now we can announce that the same support has been added to [`ocaml-ci`](https://ocaml.ci.dev), giving coverage for both OCaml
7 | 4.14 and the new OCaml 5.1 release. `opam-repo-ci` has also been upgraded to support OCaml 5.1. We aim to support both 4.14 as
8 | the Long Term Support release and the latest 5.* release.
9 |
10 | Additionally an [`opam-health-check` instance](http://freebsd-health-check.ocamllabs.io:8080) has been setup to provide
11 | continuous checking of opam repository packages against FreeBSD 13.2 x86_64 for both the 4.14 and 5.1 releases of OCaml.
12 | This will allow the community to check whether packages work on FreeBSD and provide fixes to `opam-repository` that will
13 | then get tested on FreeBSD. Closing the loop and giving the community the tools to support OCaml on FreeBSD effectively.
14 |
15 | We hope the community finds the FreeBSD support useful.
16 |
--------------------------------------------------------------------------------
/_posts/2023-11-06-current-bench-maintenance.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Current Bench Maintenance
3 | ---
4 |
5 | Due to storage related maintenance operations on Wednesday 8th November '23 between 12:00pm and 3:00pm (UTC+1), the current-bench benchmarking service will be unavailable.
6 |
7 | Thank you for your patience and understanding during this time.
8 |
--------------------------------------------------------------------------------
/_posts/2023-11-09-macos-sonoma.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: macOS Sonoma Update
3 | date: "2023-11-09"
4 | ---
5 |
6 | [OBuilder](https://github.com/ocurrent/obuilder) workers for both Intel and Apple Silicon have been updated from macOS Ventura to [macOS Sonoma](https://www.apple.com/macos/sonoma/) 14.1.1.
7 |
8 | From today, [ocaml-ci](https://ocaml.ci.dev) and [opam-repo-ci](https://opam.ci.ocaml.org) will test against Sonoma rather than Ventura.
9 |
--------------------------------------------------------------------------------
/_posts/2023-12-04-services-moved.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Relocating opam.ci.ocaml.org and ocaml.ci.dev
3 | ---
4 |
5 | About six months ago, [`opam-repo-ci` (opam.ci.ocaml.org)](https://opam.ci.ocaml.org) suffered from a lack of system memory ([issue 220](https://github.com/ocurrent/opam-repo-ci/issues/220)) which caused it to be moved to the machine hosting [`ocaml-ci` (ocaml.ci.dev)](https://ocaml.ci.dev).
6 |
7 | Subsequently, that machine suffered from BTRFS volume corruption ([issue 51](https://github.com/ocaml/infrastructure/issues/51)). Therefore, we moved both services to a larger new server. The data was efficiently migrated using BTRFS tools: `btrfs send | btrfs receive`.
8 |
9 | Since the move, we have seen issues with BTRFS metadata. Plus, we have suffered from a build-up of subvolumes, as reported by other users: [Docker gradually exhausts disk space on BTRFS](https://github.com/moby/moby/issues/27653).
10 |
11 | Unfortunately, both services went down on Friday evening ([issue 85](https://github.com/ocaml/infrastructure/issues/85)). Analysis showed over 500 BTRFS subvolumes, a shortage of metadata space, and insufficient space to perform a BTRFS _rebalance_.
12 |
13 | Returning to the original configuration of splitting the `ci.dev` and OCaml.org services, they have been moved onto new and separate hardware. The underlying filesystem is now a RAID1-backed ext4, formatted with `-i 8192` in order to ensure the availability of sufficient inodes. Docker uses Overlayfs. RSYNC was used to copy the databases and logs from the old server. This change should add resilience and has doubled the capacity for storing history logs.
14 |
15 |
--------------------------------------------------------------------------------
/_posts/2024-01-14-electrical-work.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Electrial Work in the Datacentre
3 | date: "2024-01-14"
4 | ---
5 |
6 | There is an electrical shutdown for essential maintenance this weekend, which will affect large parts of the cluster. All Power9, ARM64, RISC-V, Windows and FreeBSD workers and docs.ci.ocaml.org, docs-data.ocaml.org and the benchmarking servers will be unavailable. x86_64 workers will be substantially reduced. Service is expected to resume late on Sunday evening.
7 |
--------------------------------------------------------------------------------
/_posts/2024-02-19-current-bench-maintenance.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Current Bench Maintenance
3 | date: "2024-02-19"
4 | ---
5 |
6 | Due to storage related maintenance operations on Monday 19th February '24
7 | between 6:30am and 9:30am (UTC+1), the current-bench benchmarking service will
8 | be unavailable.
9 |
10 | Thank you for your patience and understanding during this time.
11 |
--------------------------------------------------------------------------------
/about.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: About
3 | permalink: /about/
4 | ---
5 |
6 | Disks
7 |
8 | ```sh
9 | lsblk
10 | ```
11 |
12 | And then
13 |
14 | ```sh
15 | apt install smartmontools
16 | smartctl -i /dev/sdb
17 | ```
18 |
19 | Processor
20 |
21 | ```sh
22 | nproc
23 | ```
24 |
25 | Ubuntu release
26 |
27 | ```sh
28 | lsb_release -a
29 | ```
30 |
31 | Machine model
32 | ```sh
33 | sudo dmidecode | less
34 | ```
35 |
36 |
--------------------------------------------------------------------------------
/big.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Machines where nproc > 50"
3 | ---
4 |
5 |
6 |
7 | Machine |
8 | Model |
9 | OS |
10 | Threads |
11 | Location |
12 | Worker Pool |
13 | Notes |
14 |
15 | {% for item in site.machines %}
16 | {% if item.threads > 50 %}
17 |
18 | {{item.name}} |
19 | {{item.manufacturer}} {{item.model}} |
20 | {{item.os}} |
21 | {{item.threads}} |
22 | {{item.location}} |
23 | {{item.pool}} |
24 | {{item.notes}} |
25 |
26 | {% endif %}
27 | {% endfor %}
28 |
29 |
30 |
--------------------------------------------------------------------------------
/by-ip/128.232.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: 128.232
3 | ---
4 |
5 | {% include ip-table.html %}
6 |
7 |
8 |
--------------------------------------------------------------------------------
/by-ip/136.144.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: 136.144
3 | ---
4 |
5 | {% include ip-table.html %}
6 |
7 |
8 |
--------------------------------------------------------------------------------
/by-ip/147.75.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: 147.75
3 | ---
4 |
5 | {% include ip-table.html %}
6 |
7 |
8 |
--------------------------------------------------------------------------------
/by-ip/148.100.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: 148.100
3 | ---
4 |
5 | {% include ip-table.html %}
6 |
7 |
8 |
--------------------------------------------------------------------------------
/by-ip/163.172.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: 163.172
3 | ---
4 |
5 | {% include ip-table.html %}
6 |
7 |
8 |
--------------------------------------------------------------------------------
/by-ip/212.47.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: 212.47
3 | ---
4 |
5 | {% include ip-table.html %}
6 |
7 |
8 |
--------------------------------------------------------------------------------
/by-ip/51.159.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: 51.159
3 | ---
4 |
5 | {% include ip-table.html %}
6 |
7 |
8 |
--------------------------------------------------------------------------------
/by-ip/54.146.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: 54.146
3 | ---
4 |
5 | {% include ip-table.html %}
6 |
7 |
8 |
--------------------------------------------------------------------------------
/by-ip/54.224.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: 54.224
3 | ---
4 |
5 | {% include ip-table.html %}
6 |
7 |
8 |
--------------------------------------------------------------------------------
/by-location.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "By Location"
3 | permalink: /location/
4 | ---
5 |
6 | {% assign locations = "Caelum,Equinix,Scaleway,AWS,Marist College,Custodian" | split: ',' %}
7 | {% for location in locations %}
8 |
9 | # {{location}}
10 |
11 |
12 |
13 | Machine |
14 | Model |
15 | Threads |
16 | Notes |
17 |
18 | {% for item in site.machines %}
19 | {% if item.location contains location %}
20 |
21 | {{item.name}} |
22 | {{item.manufacturer}} {{item.model}} |
23 | {{item.threads}} |
24 | {{item.notes}} |
25 |
26 | {% endif %}
27 | {% endfor %}
28 |
29 |
30 | {% endfor %}
31 |
32 |
--------------------------------------------------------------------------------
/by-location/aws.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: AWS
3 | ---
4 |
5 | {% include location-table.html %}
6 |
7 |
8 |
--------------------------------------------------------------------------------
/by-location/caelum.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Caelum
3 | ---
4 |
5 | {% include location-table.html %}
6 |
7 |
--------------------------------------------------------------------------------
/by-location/custodian.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Custodian
3 | ---
4 |
5 | {% include location-table.html %}
6 |
7 |
8 |
--------------------------------------------------------------------------------
/by-location/equinix.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Equinix
3 | ---
4 |
5 | {% include location-table.html %}
6 |
7 |
--------------------------------------------------------------------------------
/by-location/iitm.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: IIT Madras
3 | ---
4 |
5 | {% include location-table.html %}
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/by-location/marist-college.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Marist College
3 | ---
4 |
5 | {% include location-table.html %}
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/by-location/scaleway.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Scaleway
3 | ---
4 |
5 | {% include location-table.html %}
6 |
7 |
8 |
--------------------------------------------------------------------------------
/by-use/benchmarking.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Benchmarking
3 | ---
4 |
5 | {% include use-table.html %}
6 |
7 |
--------------------------------------------------------------------------------
/by-use/freebsd-x86_64.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: FreeBSD-x86_64
3 | ---
4 |
5 | {% include pool-table.html %}
6 |
7 |
--------------------------------------------------------------------------------
/by-use/general.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: General
3 | ---
4 |
5 |
6 |
7 | Machine |
8 | Model |
9 | OS |
10 | Threads |
11 | Location |
12 | Notes |
13 |
14 | {% for item in site.machines %}
15 | {% if item.pool == nil and item.use == nil %}
16 |
17 | {{item.name}} |
18 | {{item.manufacturer}} {{item.model}} |
19 | {{item.os}} |
20 | {{item.threads}} |
21 | {{item.location}} |
22 | {{item.notes}} |
23 |
24 | {% endif %}
25 | {% endfor %}
26 |
27 |
28 |
--------------------------------------------------------------------------------
/by-use/linux-arm64.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Linux-arm64
3 | ---
4 |
5 | {% include pool-table.html %}
6 |
7 |
--------------------------------------------------------------------------------
/by-use/linux-ppc64.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Linux-ppc64
3 | ---
4 |
5 | {% include pool-table.html %}
6 |
7 |
--------------------------------------------------------------------------------
/by-use/linux-riscv64.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Linux-riscv64
3 | ---
4 |
5 | {% include pool-table.html %}
6 |
7 |
--------------------------------------------------------------------------------
/by-use/linux-s390x.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Linux-s390x
3 | ---
4 |
5 | {% include pool-table.html %}
6 |
7 |
--------------------------------------------------------------------------------
/by-use/linux-x86_64.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Linux-x86_64
3 | ---
4 |
5 | {% include pool-table.html %}
6 |
7 |
--------------------------------------------------------------------------------
/by-use/macos-arm64.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: MacOS-arm64
3 | ---
4 |
5 | {% include pool-table.html %}
6 |
7 |
--------------------------------------------------------------------------------
/by-use/macos-x86_64.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: MacOS-x86_64
3 | ---
4 |
5 | {% include pool-table.html %}
6 |
7 |
--------------------------------------------------------------------------------
/by-use/openbsd-amd64.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: OpenBSD-amd64
3 | ---
4 |
5 |
6 |
7 | Machine |
8 | Model |
9 | OS |
10 | Threads |
11 | Location |
12 |
13 | {% assign lower_title = page.title | downcase %}
14 | {% for item in site.machines %}
15 | {% assign lower_pool = item.pool | downcase %}
16 | {% if lower_pool == lower_title %}
17 |
18 | {{item.name}} |
19 | {{item.manufacturer}} {{item.model}} |
20 | {{item.os}} |
21 | {{item.threads}} |
22 | {{item.location}} |
23 |
24 | {% endif %}
25 | {% endfor %}
26 |
27 |
28 |
--------------------------------------------------------------------------------
/by-use/windows-amd64.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Windows-amd64
3 | ---
4 |
5 |
6 |
7 | Machine |
8 | Model |
9 | OS |
10 | Threads |
11 | Location |
12 |
13 | {% assign lower_title = page.title | downcase %}
14 | {% for item in site.machines %}
15 | {% assign lower_pool = item.pool | downcase %}
16 | {% if lower_pool == lower_title %}
17 |
18 | {{item.name}} |
19 | {{item.manufacturer}} {{item.model}} |
20 | {{item.os}} |
21 | {{item.threads}} |
22 | {{item.location}} |
23 |
24 | {% endif %}
25 | {% endfor %}
26 |
27 |
28 |
--------------------------------------------------------------------------------
/by-use/windows-x86_64.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Windows-x86_64
3 | ---
4 |
5 |
6 |
7 | Machine |
8 | Model |
9 | OS |
10 | Threads |
11 | Location |
12 |
13 | {% assign lower_title = page.title | downcase %}
14 | {% for item in site.machines %}
15 | {% assign lower_pool = item.pool | downcase %}
16 | {% if lower_pool == lower_title %}
17 |
18 | {{item.name}} |
19 | {{item.manufacturer}} {{item.model}} |
20 | {{item.os}} |
21 | {{item.threads}} |
22 | {{item.location}} |
23 |
24 | {% endif %}
25 | {% endfor %}
26 |
27 |
28 |
--------------------------------------------------------------------------------
/index.md:
--------------------------------------------------------------------------------
1 | ---
2 | layout: home
3 | title: OCaml.org Infrastructure
4 | ---
5 |
6 |
--------------------------------------------------------------------------------
/machines.csv:
--------------------------------------------------------------------------------
1 | ---
2 | layout: null
3 | ---
4 |
5 | fqdn,ip,manufacturer,model,os,location,pool
6 | {% for item in site.machines -%}
7 | {{item.fqdn}},{{item.ip}},{{item.manufacturer}},{{item.model}},{{item.os}},{{item.location}},{{item.pool}}
8 | {% endfor %}
9 |
10 |
--------------------------------------------------------------------------------
/machines.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Machines
3 | toc: false
4 | classes: wide
5 | ---
6 |
7 | {% include machines-map.html %}
8 |
9 |
10 |
11 | Machine |
12 | Model |
13 | OS |
14 | Threads |
15 | Location |
16 | Notes |
17 |
18 | {% for item in site.machines %}
19 | {% if item.pool == nil %}
20 |
21 | {{item.name}} |
22 | {{item.manufacturer}} {{item.model}} |
23 | {{item.os}} |
24 | {{item.threads}} |
25 | {{item.location}} |
26 | {{item.notes}} |
27 |
28 | {% endif %}
29 | {% endfor %}
30 |
31 |
32 | {% assign pools = "linux-x86_64,linux-ppc64,linux-arm64,windows-x86_64,linux-s390x,linux-riscv64,macos-x86_64,macos-arm64,windows-amd64,openbsd-amd64" | split: ',' %}
33 | {% for pool in pools %}
34 |
35 | # Worker Pool {{pool}}
36 |
37 |
38 |
39 | Machine |
40 | Model |
41 | OS |
42 | Threads |
43 | Location |
44 |
45 | {% for item in site.machines %}
46 | {% if item.pool == pool %}
47 |
48 | {{item.name}} |
49 | {{item.manufacturer}} {{item.model}} |
50 | {{item.os}} |
51 | {{item.threads}} |
52 | {{item.location}} |
53 |
54 | {% endif %}
55 | {% endfor %}
56 |
57 |
58 | {% endfor %}
59 |
60 |
--------------------------------------------------------------------------------
/opam-ocaml-org.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: opam.ocaml.org Deployment
3 | ---
4 |
5 | # Basic Deployment
6 |
7 | Opam is a single Docker container which exposes a website on port 80. HTTPS is added using a reverse proxy such as NGINX or Caddy and an ACME provider such as Let's Encrypt.
8 |
9 | Caddy offers automatic certificate generation and renewal, thus given a `Caddyfile` like this:
10 |
11 | ```
12 | opam.ocaml.org {
13 | reverse_proxy opam_live:80
14 | }
15 | ```
16 |
17 | And a `docker-compose.yml` file, such as
18 |
19 | ```
20 | version: "3.7"
21 | services:
22 | caddy:
23 | image: caddy
24 | ports:
25 | - "80:80"
26 | - "443:443"
27 | volumes:
28 | - /etc/caddy:/etc/caddy:ro
29 | - caddy_data:/data
30 | - caddy_config:/config
31 | opam_live:
32 | image: ocurrent/opam.ocaml.org:live
33 | sysctls:
34 | - 'net.ipv4.tcp_keepalive_time=60'
35 | volumes:
36 | caddy_data:
37 | caddy_config:
38 | ```
39 |
40 | The service can be available with Docker compose:
41 |
42 | ```shell
43 | docker compose up
44 | ```
45 |
46 | # Round-Robin DNS
47 |
48 | Our deployment uses round-robin DNS with HTTP-01 challenges. When the ACME client requests the validation challenge, the request may go to either machine, not necessarily the originator. This is addressed by configuring each machine to send failed requests to `http:///.well-known/acme-challenge/` to the other machine.
49 |
50 | NGINX allows this to be configured via the `try_files` directive, which supports a list of places to check. Below is the configuration of `opam-4`, which shows that failed requests will be redirected to `opam-5`. The configuration is reversed on `opam-5`.
51 |
52 | ```
53 | server {
54 | server_name _;
55 |
56 | location @proxy {
57 | proxy_pass http://opam-5.ocaml.org;
58 | }
59 |
60 | location ^~ /.well-known/acme-challenge/ {
61 | default_type "text/plain";
62 | root /var/www/html;
63 | try_files $uri @proxy;
64 | break;
65 | }
66 |
67 | location = /.well-known/acme-challenge/ {
68 | return 404;
69 | }
70 |
71 | location / {
72 | return 301 https://$host$request_uri;
73 | }
74 | }
75 | ```
76 |
77 | # Initial certificate setup for each machine
78 |
79 | Copy the HTTP configuration file for NGINX (shown above) to both machines. These files differ only in the `proxy_pass` configuration, with each machine pointing to the other.
80 |
81 | ```sh
82 | scp nginx/opam-4-http.conf opam-4.ocaml.org:
83 | scp nginx/opam-5-http.conf opam-5.ocaml.org:
84 | ```
85 |
86 | On both machines, start NGINX
87 |
88 | ```
89 | docker run --rm -it -p 80:80 -p 443:443 -v ~:/etc/nginx/conf.d:ro -v wwwroot:/var/www/html nginx
90 | ```
91 |
92 | Create the certificates on both servers using Certbot. The `--webroot` invocation tells Certbot to write the well-known challenge to the path provided `/var/www/html` which will be served over HTTP by NGINX. As both are Docker containers, they communicate via a shared volume, `wwwroot`. On `opam-5` create the certificate for `opam-5.ocaml.org` rather than `opam-4`.
93 |
94 | ```sh
95 | for fqdn in opam-4.ocaml.org opam.ocaml.org staging.opam.ocaml.org ; do
96 | docker run --rm -it -v wwwroot:/var/www/html -v letsencrypt:/etc/letsencrypt certbot/certbot certonly --webroot -m mark@tarides.com --agree-tos --no-eff-email -d $fqdn --webroot-path /var/www/html
97 | done
98 | ```
99 |
100 | Verify success by checking the certificates on the disk.
101 |
102 | {% raw %}
103 | ls $(docker volume inspect --format '{{ .Mountpoint }}' letsencrypt)/live
104 | {% endraw %}
105 |
106 | The temporary NGINX containers can now be stopped.
107 |
108 | # Certificate renewal
109 |
110 | Using NGINX, we must renew the certificates manually. This can be achieved via a daily cron job which runs `certbot renew`. As we are running Certbot within a Docker container, we need to hook the deployment using an external file. The renewal process is, therefore:
111 |
112 | 1) Remove `deploy` file from the Let's Encrypt Docker volume
113 | 2) Run `certbot renew` with `--deploy-hook "touch /etc/letsencrypt/deploy"`
114 | 3) Reload the NGINX configuration if the `deploy` file has been created
115 |
116 | {% raw %}
117 | #!/bin/bash
118 | set -eux
119 |
120 | LEV="$(docker volume inspect --format '{{ .Mountpoint }}' letsencrypt)"
121 | rm -f $LEV/deploy
122 |
123 | systemtd-cat -t "certbot" docker run --rm -v wwwroot:/var/www/html -v letsencrypt:/etc/letsencrypt certbot/certbot renew --webroot -m mark@tarides.com --agree-tos --no-eff-email --webroot-path /var/www/html --deploy-hook "touch /etc/letsencrypt/deploy"
124 |
125 | if [ -f $LEV/deploy ] ; then
126 | PS=$(docker ps --filter=name=infra_nginx -q)
127 | if [ -n "$PS" ] ; then
128 | docker exec $PS nginx -s reload
129 | fi
130 | fi
131 | {% endraw %}
132 |
133 | # NGINX and new Docker containers
134 |
135 | If the NGINX `proxy_pass` directive points to a specific host such as `proxy_pass http://opam_live`, then this is evaluated once when NGINX starts. When the Docker container for the backend service is updated, the IP address will change, and NGINX will fail to reach the new container. However, when you use a variable to specify the domain name in the `proxy_pass` directive, NGINX re‑resolves the domain name when its TTL expires. You must include the `resolver` directive to explicitly specify the name server. Here 127.0.0.11 is the Docker name server.
136 |
137 | ```
138 | location / {
139 | resolver 127.0.0.11 [::1]:5353 valid=15s;
140 | set $opam_live "opam_live";
141 | proxy_pass http://$opam_live;
142 | }
143 | ```
144 |
145 |
146 | # IPv6 Considerations
147 |
148 | As noted for [www.ocaml.org](/www-ocaml-org), Docker does not listen on IPv6 addresses in swarm mode, and ACME providers check both A and AAAA records. As we have NGINX operating as a reverse proxy, we can define NGINX as a global service with exactly one container per swarm node, and that the ports are in host mode, which publishes a host port on the node which does listen on IPv6. In the Docker service configuration shown in the next section note `deploy: mode: global` and `ports: mode: host`.
149 |
150 | # TCP BBR Congestion Control
151 |
152 | Cubic has been the default TCP congestion algorithm on Linux since 2.6.19 with both MacOS and Microsoft also using it as the default. Google proposed Bottleneck Bandwidth and Round-trip propagation time (BBR) in 2016 which has been available in the Linux kernel since 4.9. It has been shown to generally achieve higher bandwidths and lower latencies. Thanks to @jpds for the impletement details:
153 |
154 | Add these two configuration commands to `/etc/sysctl.d/01-bbr.conf`:
155 |
156 | ```
157 | net.core.default_qdisc=fq
158 | net.ipv4.tcp_congestion_control=bbr
159 | ```
160 |
161 | and set them with
162 |
163 | ```sh
164 | sudo sysctl -p /etc/sysctl.d/01-bbr.conf
165 | ```
166 |
167 | # Ansible deployment
168 |
169 | We use an Anisble playbook to manage the deployment to the two hosts.
170 |
171 | {% raw %}
172 | - hosts: all
173 | name: Set up SwarmKit
174 | tasks:
175 | - docker_swarm:
176 | listen_addr: "127.0.0.1:2377"
177 | advertise_addr: "127.0.0.1:2377"
178 |
179 | - hosts: opam-4.ocaml.org:opam-5.ocaml.org
180 | tasks:
181 | - name: configure sysctl
182 | copy:
183 | src: "{{ item }}"
184 | dest: /etc/sysctl.d
185 | loop:
186 | - "sysctl/01-bbr.conf"
187 | notify:
188 | - reload sysctl settings
189 | - name: configure nginx
190 | copy:
191 | src: "{{ item }}"
192 | dest: /etc/nginx/conf.d
193 | loop:
194 | - name: create nginx directory
195 | file:
196 | path: /etc/nginx/conf.d
197 | state: directory
198 | - name: configure nginx
199 | copy:
200 | src: "{{ item }}"
201 | dest: /etc/nginx/conf.d
202 | loop:
203 | - "nginx/{{ inventory_hostname_short }}-http.conf"
204 | - "nginx/{{ inventory_hostname_short }}.conf"
205 | - "nginx/opam.conf"
206 | - "nginx/staging.conf"
207 | notify:
208 | - restart nginx
209 | - name: install certbot renewal script
210 | copy:
211 | src: letsencrypt-renew
212 | dest: /etc/cron.daily/letsencrypt-renew
213 | mode: u=rwx,g=rx,o=rx
214 | - name: Set up docker services
215 | docker_stack:
216 | name: infra
217 | compose:
218 | - version: "3.8"
219 | services:
220 | nginx:
221 | deploy:
222 | mode: global
223 | ports:
224 | - target: 80
225 | published: 80
226 | protocol: tcp
227 | mode: host
228 | - target: 443
229 | published: 443
230 | protocol: tcp
231 | mode: host
232 | image: nginx
233 | volumes:
234 | - /etc/nginx/conf.d:/etc/nginx/conf.d:ro
235 | - wwwroot:/var/www/html
236 | - letsencrypt:/etc/letsencrypt:ro
237 | opam_live:
238 | image: ocurrent/opam.ocaml.org:live
239 | command: --root /usr/share/caddy
240 | volumes:
241 | wwwroot:
242 | external: true
243 | letsencrypt:
244 | external: true
245 | handlers:
246 | - name: restart nginx
247 | shell:
248 | cmd: PS=$(docker ps --filter=name=infra_nginx -q) && if [ -n "$PS" ] ; then docker exec $PS nginx -s reload; fi
249 | - name: reload sysctl settings
250 | shell:
251 | cmd: sysctl --system
252 | {% endraw %}
253 |
--------------------------------------------------------------------------------
/scripts/generate-template.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Script to quickly generate a template for a new machine
4 | # NOTE: The script needs to be run on the machine
5 |
6 | # Get system information
7 | hostname=$(hostname)
8 | ip=$(hostname -I | awk '{print $1}')
9 | fqdn=$(hostname --fqdn)
10 | model=$(dmidecode -s system-product-name)
11 | processor=$(grep -m 1 'model name' /proc/cpuinfo | cut -d: -f2 | sed 's/^ *//')
12 | memory=$(free -mh | awk '/^Mem:/{print $2"B"}')
13 | disks=$(lsblk -d -n -o NAME,SIZE | grep -v '^loop' | awk '{print " - "$1": "$2"B"}')
14 | os=$(lsb_release -d | cut -f2)
15 | threads=$(grep -c ^processor /proc/cpuinfo)
16 | location="TODO: Caelum"
17 | notes="TODO: User visible description"
18 | serial=$(dmidecode -s system-serial-number)
19 | ssh="${USER}@${fqdn}"
20 | use="TODO: use?"
21 | service="TODO: service name?"
22 |
23 | # Create the template
24 | template=$(cat < "${hostname}.md"
49 |
50 | echo "Template saved as ${hostname}.md"
51 |
--------------------------------------------------------------------------------
/summary.html:
--------------------------------------------------------------------------------
1 | ---
2 | # Feel free to add content and custom Front Matter to this file.
3 | # To modify the layout, see https://jekyllrb.com/docs/themes/#overriding-theme-defaults
4 |
5 | layout: home
6 | ---
7 |
8 | {% assign pools = "linux-x86_64,linux-ppc64,linux-arm64,windows-x86_64,linux-s390x" | split: ',' %}
9 | {% for pool in pools %}
10 |
11 | - {{pool}}
12 | {%- for item in site.machines -%}
13 | {% if item.pool == pool %}
14 | * {{item.name}} ({{item.manufacturer}} {{item.model}}): {{item.threads}} threads
15 | {%- endif -%}
16 | {%- endfor -%}
17 |
18 | {% endfor %}
19 |
20 |
--------------------------------------------------------------------------------
/watch-ocaml-org.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: watch.ocaml.org
3 | ---
4 |
5 | watch.ocaml.org is a deployment of [PeerTube](https://joinpeertube.org).
6 |
7 | Docker deployments of PeerTube typically uses this
8 | [docker-compose.yml](https://github.com/Chocobozzz/PeerTube/blob/develop/support/docker/production/docker-compose.yml)
9 | file and following the
10 | [instructions](https://docs.joinpeertube.org/install/docker), however
11 | this limits our ability to run `docker service update` to refresh the
12 | Docker image when a new version is released.
13 |
14 | We will use Ansible to deploy a Docker service stack which will be peridically updated
15 | using [ocurrent deployer](https://deploy.ci.ocaml.org).
16 |
17 | The Ansible playbook is shown below. There are some initial setup
18 | steps to prepopulate the SSL certificate and secrets followed by a
19 | docker stack which implements the `docker-compose.yml` file.
20 |
21 | ```
22 | - hosts: watch.ocaml.org
23 | name: Install Peer Tube
24 | tasks:
25 | - name: Query certbot volume
26 | docker_volume_info:
27 | name: certbot-conf
28 | register: result
29 | - name: Create certbot volume
30 | shell:
31 | cmd: docker volume create certbot-conf
32 | when: not result.exists
33 | - name: Initialise a certbot certificate
34 | shell:
35 | cmd: docker run --rm --name certbot -p 80:80 -v "certbot-conf:/etc/letsencrypt" certbot/certbot certonly --standalone -d watch.ocaml.org --expand -n --agree-tos -m mark@tarides.com
36 | when: not result.exists
37 | - name: Download the nginx configuration file from the template
38 | shell:
39 | cmd: docker run --rm -v nginx-conf:/etc/nginx/conf.d bash wget https://raw.githubusercontent.com/Chocobozzz/PeerTube/master/support/nginx/peertube -O /etc/nginx/conf.d/peertube.template
40 | - name: Copy environment
41 | copy:
42 | src: secrets/env
43 | dest: /root/.env
44 | mode: 0600
45 | - name: set up deployer stack
46 | docker_stack:
47 | name: infra
48 | prune: yes
49 | compose:
50 | - version: "3.3"
51 | services:
52 | webserver:
53 | image: chocobozzz/peertube-webserver:latest
54 | env_file:
55 | - /root/.env
56 | deploy:
57 | mode: global
58 | ports:
59 | - target: 80
60 | published: 80
61 | protocol: tcp
62 | mode: host
63 | - target: 443
64 | published: 443
65 | protocol: tcp
66 | mode: host
67 | volumes:
68 | - nginx-conf:/etc/nginx/conf.d
69 | - peertube-assets:/var/www/peertube/peertube-latest/client/dist:ro
70 | - peertube-data:/var/www/peertube/storage
71 | - certbot-www:/var/www/certbot
72 | - certbot-conf:/etc/letsencrypt
73 | depends_on:
74 | - peertube
75 | restart: "always"
76 | certbot:
77 | container_name: certbot
78 | image: certbot/certbot
79 | volumes:
80 | - certbot-conf:/etc/letsencrypt
81 | - certbot-www:/var/www/certbot
82 | restart: unless-stopped
83 | entrypoint: /bin/sh -c "trap exit TERM; while :; do certbot renew --webroot -w /var/www/certbot; sleep 12h & wait $${!}; done;"
84 | depends_on:
85 | - webserver
86 | peertube:
87 | image: chocobozzz/peertube:production-bullseye
88 | env_file:
89 | - /root/.env
90 | ports:
91 | - "1935:1935"
92 | volumes:
93 | - peertube-assets:/app/client/dist
94 | - peertube-data:/data
95 | - peertube-conf:/config
96 | depends_on:
97 | - postgres
98 | - redis
99 | - postfix
100 | restart: "always"
101 | postgres:
102 | env_file:
103 | - /root/.env
104 | image: postgres:13-alpine
105 | volumes:
106 | - postgres:/var/lib/postgresql/data
107 | restart: "always"
108 | redis:
109 | image: redis:6-alpine
110 | volumes:
111 | - redis:/data
112 | restart: "always"
113 | postfix:
114 | image: mwader/postfix-relay
115 | env_file:
116 | - /root/.env
117 | volumes:
118 | - opendkim:/etc/opendkim/keys
119 | restart: "always"
120 | volumes:
121 | peertube-assets:
122 | external: true
123 | peertube-data:
124 | external: true
125 | peertube-conf:
126 | external: true
127 | nginx-conf:
128 | external: true
129 | certbot-conf:
130 | external: true
131 | certbot-www:
132 | external: true
133 | opendkim:
134 | external: true
135 | redis:
136 | external: true
137 | postgres:
138 | external: true
139 | ```
140 |
141 | The website site is backed up using [Tarsnap](https://www.tarsnap.com).
142 | The Ansible playbook below installs Tarsnap on Ubuntu.
143 |
144 | The backup script is perodically run
145 | using [ocurrent deployer](https://deploy.ci.ocaml.org).
146 |
147 | ```
148 | - hosts: watch.ocaml.org
149 | name: Install Tarsnap
150 | tasks:
151 | - name: Download Tarsnap's PGP public key
152 | apt_key:
153 | url: https://pkg.tarsnap.com/tarsnap-deb-packaging-key.asc
154 | keyring: /usr/share/keyrings/tarsnap-archive-keyring.gpg
155 | state: present
156 | - name: Add Tarsnap Repository
157 | apt_repository:
158 | repo: "deb [signed-by=/usr/share/keyrings/tarsnap-archive-keyring.gpg] http://pkg.tarsnap.com/deb/{{ ansible_distribution_release|lower }} ./"
159 | filename: tarsnap
160 | state: present
161 | update_cache: yes
162 | - name: Install Tarsnap
163 | package:
164 | name: tarsnap
165 | state: present
166 | - name: Copy tarsnap key
167 | copy:
168 | src: secrets/tarsnap.key
169 | dest: /root/tarsnap.key
170 | mode: 0600
171 | ```
172 |
173 |
--------------------------------------------------------------------------------
/www-ocaml-org.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: OCaml.org Deployment
3 | ---
4 |
5 | OCaml.org is a single Docker container which exposes the website on port 8080. Therefore, the simplest deployment is to just run:
6 |
7 | ```shell
8 | docker run --rm -it -p 8080:8080 ocurrent/v3.ocaml.org-server:live
9 | ```
10 |
11 | This makes the website available at http://127.0.0.1:8080.
12 |
13 | To provide HTTPS, a reverse proxy can be used such as Nginx or Caddy. We use Caddy as it has automatic certificate provisioning and renewal.
14 |
15 | The `Caddyfile` lists the expected domain names and the internal name of the Docker container. The complete file is shown below.
16 |
17 | ```
18 | v3a.ocaml.org, v3b.ocaml.org, v3.ocaml.org, ocaml.org, www.ocaml.org {
19 | reverse_proxy www:8080
20 | }
21 | ```
22 |
23 | Both Caddy and the website itself can be deployed using Docker Compose with a `docker-compose.yml` file as below.
24 |
25 | ```
26 | version: "3.7"
27 | services:
28 | caddy:
29 | image: caddy
30 | ports:
31 | - "80:80"
32 | - "443:443"
33 | volumes:
34 | - /etc/caddy:/etc/caddy:ro
35 | - caddy_data:/data
36 | - caddy_config:/config
37 | www:
38 | image: ocurrent/v3.ocaml.org-server:live
39 | sysctls:
40 | - 'net.ipv4.tcp_keepalive_time=60'
41 | volumes:
42 | caddy_data:
43 | caddy_config:
44 | ```
45 |
46 | Create the service as follows:
47 |
48 | ```shell
49 | docker compose up
50 | ```
51 |
52 | We use [OCurrent Deployer](https://deploy.ci.ocaml.org) to build the website Docker image from GitHub and deploy the update to the machine.
53 |
54 | The update process uses `docker service update --image ocurrent/v3.ocaml.org-server` which requires Docker to be in swarm mode. If the machine has an IPv6 address published in DNS, special care is needed as `docker swarm init` [does not listen on IPv6 addresses](https://github.com/moby/moby/issues/24379) and ACME providers check the published AAAA records point to the target machine. As we have Caddy operating as a reverse proxy we can define Caddy as a global service with exactly one container per swarm node, and that the ports are in host mode, which publishes a host port on the node. The default of a replicated service with distributed ingress ports does not listen on IPv6. Docker Composer listens on both IPv4 and IPv6 by default.
55 |
56 | The initial configration is performed using an Ansible Playbook as follows:
57 |
58 | ```
59 | ---
60 | - hosts: all
61 | name: Set up SwarmKit
62 | tasks:
63 | - docker_swarm:
64 | listen_addr: "127.0.0.1:2377"
65 | advertise_addr: "127.0.0.1:2377"
66 |
67 | - hosts: v3b.ocaml.org
68 | name: Configure controller host
69 | tasks:
70 | - name: create caddy directory
71 | file:
72 | path: /etc/caddy
73 | state: directory
74 | - name: configure caddy
75 | copy:
76 | src: Caddyfile
77 | dest: /etc/caddy/Caddyfile
78 | notify:
79 | - restart caddy
80 | - name: set up infrastructure stack
81 | docker_stack:
82 | name: infra
83 | prune: yes
84 | compose:
85 | - version: "3.7"
86 | services:
87 | caddy:
88 | deploy:
89 | mode: global
90 | ports:
91 | - target: 80
92 | published: 80
93 | protocol: tcp
94 | mode: host
95 | - target: 443
96 | published: 443
97 | protocol: tcp
98 | mode: host
99 | image: caddy
100 | volumes:
101 | - /etc/caddy:/etc/caddy:ro
102 | - caddy_data:/data
103 | - caddy_config:/config
104 | www:
105 | image: ocurrent/v3.ocaml.org-server:live
106 | sysctls:
107 | - 'net.ipv4.tcp_keepalive_time=60'
108 | volumes:
109 | caddy_data:
110 | caddy_config:
111 | handlers:
112 | - name: restart caddy
113 | shell:
114 | cmd: PS=$(docker ps --filter=name=infra_caddy -q) && if [ -n "$PS" ] ; then docker exec -w /etc/caddy $PS caddy reload ; fi
115 | ```
116 |
117 |
--------------------------------------------------------------------------------
/zero.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Machine with no thread count"
3 | ---
4 |
5 |
6 |
7 | Machine |
8 | Model |
9 | OS |
10 | Threads |
11 | Location |
12 | Pool |
13 | Notes |
14 |
15 | {% for item in site.machines %}
16 | {% if item.threads == nil %}
17 |
18 | {{item.name}} |
19 | {{item.manufacturer}} {{item.model}} |
20 | {{item.os}} |
21 | {{item.threads}} |
22 | {{item.location}} |
23 | {{item.pool}} |
24 | {{item.notes}} |
25 |
26 | {% endif %}
27 | {% endfor %}
28 |
29 |
30 |
--------------------------------------------------------------------------------