├── .github └── workflows │ └── build-release.yml ├── .gitignore ├── LICENSE ├── README.md ├── clients.yml ├── cmd └── decrypt │ ├── README.md │ └── main.go ├── config └── config.go ├── docs ├── api.md ├── config.md ├── img │ └── flow.svg └── metrics.md ├── eldim.service ├── eldim.yml ├── go.mod ├── go.sum ├── http.go ├── internal ├── backend │ └── backend.go ├── gcs │ └── gcs.go ├── s3 │ └── s3.go └── swift │ └── swift.go ├── main.go ├── metrics.go └── util.go /.github/workflows/build-release.yml: -------------------------------------------------------------------------------- 1 | name: Build Release 2 | 3 | on: create 4 | 5 | jobs: 6 | release: 7 | name: Build Release 8 | runs-on: ubuntu-latest 9 | steps: 10 | - name: Go Release Binary 11 | uses: ngs/go-release.action@v1.0.2 12 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | eldim 2 | *.DS_Store 3 | tests 4 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2018-2021, Antonios A. Chariton 2 | Copyright (c) 2020-2021, Google LLC 3 | All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are met: 7 | * Redistributions of source code must retain the above copyright 8 | notice, this list of conditions and the following disclaimer. 9 | * Redistributions in binary form must reproduce the above copyright 10 | notice, this list of conditions and the following disclaimer in the 11 | documentation and/or other materials provided with the distribution. 12 | * Neither the name of the contributing organizations nor the 13 | names of its contributors may be used to endorse or promote products 14 | derived from this software without specific prior written permission. 15 | 16 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 17 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 | DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY 20 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 22 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 23 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 25 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # eldim 2 | A Secure File Upload Proxy 3 | 4 | ## Description 5 | eldim is a web server that accepts file uploads from a particular set of 6 | hosts, and its job is to encrypt them, and then store them in an Object 7 | Storage backend system. 8 | 9 | ![The eldim flow of data](docs/img/flow.svg) 10 | 11 | It has a preconfigured ACL that only allows specific IP Addresses, or token 12 | bearers to access the file upload service. After a file is uploaded, it is 13 | encrypted, and then uploaded to a configured provider. 14 | 15 | It has been designed to work as a standalone application, which means it must 16 | not sit behind a proxy, but instead be exposed directly to the Internet. 17 | 18 | ## Groups & Mailing Lists 19 | Currently the project has two mailing lists, in Google Groups, that are used 20 | for communication: 21 | 22 | ### eldim-announce 23 | The [eldim-announce](https://groups.google.com/forum/#!forum/eldim-announce) 24 | group is **recommended** for all users of eldim. It includes announcements 25 | for new versions, a changelog, as well as breaking changes that may occur 26 | in the future. Moreover, it is the place that will be used for security 27 | announcements in the future, if and when they come. 28 | 29 | This is a very low volume list, and it is read-only. That is, only eldim 30 | updates are posted there, and you cannot send e-mails to other members. 31 | 32 | ### eldim-dev 33 | The [eldim-dev](https://groups.google.com/forum/#!forum/eldim-dev) group 34 | tries to address that final point above, and it is the techincal mailing 35 | list of the eldim project. 36 | 37 | This group can be used to report problems, share news, exchange ideas, etc. 38 | Basically it exists for communication about technical matters related to 39 | eldim, between the users, the contributors, or the developers. 40 | 41 | ## Design Decisions 42 | The design of eldim is data agnostic, and tries to push the relevant logic 43 | of all operations to the proper server. For example, the service itself does 44 | not care what types of files are uploaded, or when they're uploaded, or what 45 | they are. It simply receives a file and a file name, and then encrypts and 46 | uploads this file under a specific name to the Object Storage. 47 | 48 | In eldim's configuration file you can add a list of hosts, as well as their 49 | (host)names, and eldim makes sure that all files uploaded from a particular 50 | host will always have that host's name in their name. For example, files from 51 | the host `mail.example.com`, will always have a file name starting with 52 | `mail.example.com/`. 53 | 54 | The data collection part is left to the servers sending data to it. It is 55 | them who decide what to send, when to send it, and what operations, such as 56 | compression for example, must be applied to the file. 57 | 58 | ## Security 59 | In order for every server to be able to upload logs or backups to a central 60 | object storage bucket, they need to have some secrets stored in them. For 61 | example, in Swift, each server needs to have a username and an API key. This 62 | is something that is not really secure, as compromising any server would give 63 | full access to the backup repository. An attacker could download files, delete 64 | files, change them, etc. 65 | 66 | In eldim, the servers do not have any stored information, and instead just 67 | upload the files to a single server. This server is the one with the access, 68 | and can control what operations are being performed, and by whom. 69 | 70 | The way eldim works, no server is allowed to mess with another server's files. 71 | Server `mail.example.com` cannot upload files as `ftp.example.com`, even if 72 | they upload to the very same bucket. eldim automatically prepends all file 73 | uploads with the server hostname, which is inside its configuration file, and 74 | not sent by the servers themselves. 75 | 76 | Moreover, eldim will reject files that already exist. If the file 77 | `mail.example.com/2018-01-01/mail.log.tgz` already exists in the object store, 78 | it will not allow for it to be overwritten. This check is in place to prevent 79 | a hacked server from overwritting all previous log entries with empty data, 80 | effectively deleting everything. 81 | 82 | Finally, eldim works only over HTTPS. This decision is hard coded inside the 83 | server itself, and cannot be changed by the configuration file. A code change 84 | is required. It is configured to only work with at least TLSv1.2, the only 85 | currently secure versions of TLS, but currently it may accept some more weak 86 | ciphers and not only the most secure ones. 87 | 88 | ### Encryption 89 | Since version v0.6.0, eldim uses [age](https://age-encryption.org/) for file 90 | encryption. It is a well defined protocol, with multiple implementations, a 91 | very good CLI tool, and is already part of some operating system distributions. 92 | More importantly, it is modern, well-designed, and opinionated, with one and 93 | only one purpose in mind: encrypt files. It uses state of the art practices 94 | and algorithms, and is also very flexible. 95 | 96 | age is using asymmetric encryption, which means that eldim only needs to know 97 | about the **public** keys in its configuration file, and never needs or has 98 | access to the *private* keys. This vastly reduces the risk of a compromised 99 | eldim server, as files uploaded cannot be decrypted by the attacker. 100 | 101 | With age, eldim supports multiple public keys, so you can use more than one, 102 | and have the files encrypted with all of them. That means that files can be 103 | decrypted with **any** of the keys. You can use this functionality to have 104 | backup keys, or give access to multiple people, each one holding their own 105 | key pair. Unfortunately, eldim currently does not support *M of N* so you 106 | need to keep this in mind while threat modelling. 107 | 108 | To generate an age keypair, you can use the `age-keygen` CLI tool. However, 109 | a very nice feature is that eldim also supports SSH keys! You can use your 110 | RSA or Ed25519 SSH keys in addition to the age keys. A single eldim server 111 | supports multiple keys, of different types. 112 | 113 | ## How to run eldim 114 | eldim runs as a daemon, since it has to listen for HTTPS requests 115 | continuously. For this reason, you need to ensure that the binary is 116 | running all the time. The recommended way of achieving this is through your 117 | operating system's startup / init system. If you are using `systemd`, a basic 118 | unit file is provided in this repository for you to use. 119 | 120 | As with any software, it is **not** recommended to run eldim as `root`. For 121 | this reason, you should create an `eldim` user. The included `systemd` unit 122 | file assumes the `eldim` user exists in the system. 123 | 124 | You can create such user by running: 125 | 126 | ```bash 127 | sudo useradd -s /usr/sbin/nologin -r -M eldim 128 | ``` 129 | 130 | When executed, eldim has two command line flags that you can use to configure 131 | it before it even reads the configuration file. They are: 132 | 133 | * `-j`: When set, it will output all logs in JSON format, instead of plaintext 134 | * `-c`: The path to the configuration file 135 | 136 | ## Metrics 137 | As of `eldim v0.2.0`, eldim supports metrics exporting using 138 | [Prometheus](https://prometheus.io/). You can find more information about the 139 | metrics currently supported and exported [here](docs/metrics.md). 140 | 141 | ## Configuration 142 | In order to read the full documentation on how to configure `eldim`, click 143 | [here](docs/config.md). 144 | 145 | ## The HTTP API 146 | You can find the full specification of the HTTP API of `eldim` by clicking 147 | [here](docs/api.md). 148 | 149 | ## How to upload data from a server 150 | You can basically upload files to eldim in any way you like, as long as you 151 | follow the above API, but here are some examples. This code can be for example 152 | in a daily or weekly cron job: 153 | 154 | ```bash 155 | # Compress nginx' access.log 156 | tar -zcf /tmp/nginx.access.log.tgz /var/log/nginx/access.log /var/log/nginx/access.log.1 157 | # Upload to eldim 158 | curl -F filename=$(date +%F-%H-%M)/access.log -F file=@/tmp/nginx.access.log.tgz https://eldim.example.com/api/v1/file/upload/ 159 | ``` 160 | 161 | The `$(date +%F-%H-%M)` part will automatically print the date in the 162 | `2018-01-01-13-37` format (`YYYY-MM-DD-HH-MM`). 163 | 164 | If you are testing eldim, you may use `-k` in `curl`, to skip certificate 165 | checks, as you may be using a self-signed certificate. However, deploying 166 | this to production without a trusted certificate is **not** recommended. 167 | 168 | For production workloads, you may want to use the `--retry N` flag of `curl`, 169 | to retry the request up to `N` times, if it fails. It is recommended to also 170 | set the `--retry-connrefused` flag as well. You can combine the above with 171 | `--retry-delay X`, so `curl` will sleep `X` seconds between retries. Good 172 | values for `X` are eldim's domain TTL * 2, or something similar. 173 | 174 | eldim is designed to work without placing trust on the file upload servers. 175 | If, however, you want to not have to trust the eldim server either, you can 176 | optionally encrypt all data sent to eldim with `age` (or `gpg`). That way 177 | eldim won't be able to decrypt them, but neither will the sender alone. 178 | 179 | To encrypt files with `age`, use: 180 | 181 | ```bash 182 | cat file.tgz | age -r "AgeID" > out.tgz.enc 183 | ``` 184 | 185 | Of course, you need to replace "AgeID" with an age recipient address. 186 | 187 | ## eldim Logs 188 | Currently eldim logs a lot of information in detail. This is done on purpose 189 | and is not a debugging leftover. Since it is a tool that is related to 190 | security, it is always good to have a lot of information to be able to go back 191 | to in case something happens. 192 | 193 | It is totally normal for eldim to log up to 20 lines per successful upload 194 | request, or even more, depending on the configuration. 195 | 196 | During service startup, all information logged is related to actions and 197 | the configuration file, and is in plain text. After the service is started, 198 | all logs start with a UUID. This is called the Request ID. During the 199 | arrival of every request, eldim generates a unique identifier for this 200 | request. This identifier is included in every future log file entry that 201 | is related to this request. 202 | 203 | By default eldim logs to `stdout` and `stderr`, so if you are using the 204 | provided `systemd` unit file, all its logs will be available in `syslog`. 205 | -------------------------------------------------------------------------------- /clients.yml: -------------------------------------------------------------------------------- 1 | # All servers that can upload data to eldim 2 | 3 | - 4 | name: "mail.example.com" 5 | ipv4: 6 | - "192.0.2.1" 7 | ipv6: 8 | - 9 | name: "eldim.example.com" 10 | ipv4: 11 | - "192.0.2.100" 12 | - "192.0.2.200" 13 | ipv6: 14 | - "2001:db8::1" 15 | - "2001:db8::2" 16 | - 17 | name: "does-not-have-to-be-a-hostname" 18 | ipv4: 19 | - "127.0.0.1" 20 | ipv6: 21 | - "::1" 22 | - 23 | name: "password-match" 24 | password: "this-is-a-very-secure-password" 25 | -------------------------------------------------------------------------------- /cmd/decrypt/README.md: -------------------------------------------------------------------------------- 1 | # eldim decypt tool 2 | 3 | The `decrypt` tool of eldim is used to decrypt data that have been encrypted 4 | during upload, of all versions of eldim prior to v0.6.0 that use the TripleSec 5 | encryption algorithm. It cannot download data from the various backends, it reads 6 | the files from the local file system, so you need to download them before 7 | using it. For all files encrypted with eldim version v0.6.0 or later, you 8 | need to use [age](https://age-encryption.org/) and its tools to decrypt. 9 | 10 | ## Usage 11 | Using `decrypt` is really simple. After you have the `decrypt` binary, just 12 | run `decrypt -h` to see some help information: 13 | 14 | ``` 15 | $ decrypt -h 16 | Usage of decrypt: 17 | -in string 18 | The encrypted file to decrypt. (default "input.dat") 19 | -key string 20 | The encryption password to decrypt the data. (default "Insecure") 21 | -out string 22 | The file to save the decrypted data. (default "output.dat") 23 | ``` 24 | 25 | There are three command line flags, and all three are **required** for the 26 | tool to work. They are explained here: 27 | 28 | ### in 29 | The `in` argument contains the path to the encrypted file, that needs to be 30 | decrypted by `decrypt`. 31 | 32 | ### out 33 | The `out` argument contains the path to the output file of `decrypt`, which 34 | will be the plaintext file. 35 | 36 | ### key 37 | The `key` argument is the encryption password / key used and configured in 38 | eldim during the encryption phase, inside `eldim.yml`. 39 | 40 | ## Logging 41 | Currently `decrypt` logs a bit of information so you can know what is going 42 | on, as well as how much time it takes for various operations. It has been 43 | designed to log some unique parameters of each run, such as the input and 44 | output file, so when used in a script with many files, you can then have 45 | usable logs of what happened in each decryption. -------------------------------------------------------------------------------- /cmd/decrypt/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "io/ioutil" 6 | "os" 7 | 8 | "github.com/keybase/go-triplesec" 9 | 10 | "github.com/sirupsen/logrus" 11 | ) 12 | 13 | func main() { 14 | 15 | /* Output tool start log */ 16 | logrus.Printf("Starting the eldim decryption tool...") 17 | 18 | /* Command line flags with all needed data */ 19 | inputFile := flag.String("in", "input.dat", "The encrypted file to decrypt.") 20 | outputFile := flag.String("out", "output.dat", "The file to save the decrypted data.") 21 | encryptionKey := flag.String("key", "Insecure", "The encryption password to decrypt the data.") 22 | 23 | flag.Parse() 24 | 25 | /* Print input and output file to log for documentation purposes */ 26 | logrus.Printf("Started with input file \"%s\" and output file \"%s\"", *inputFile, *outputFile) 27 | 28 | /* Delete any file that exists in the outputFile */ 29 | os.Remove(*outputFile) 30 | 31 | /* Read the encrypted file to RAM */ 32 | logrus.Printf("Reading encrypted file to memory...") 33 | encData, err := ioutil.ReadFile(*inputFile) 34 | if err != nil { 35 | logrus.Fatalf("Failed to read input file: %v", err) 36 | } 37 | logrus.Printf("File in memory. Size: %d bytes", len(encData)) 38 | 39 | /* Create an output file */ 40 | f, err := os.Create(*outputFile) 41 | if err != nil { 42 | logrus.Fatalf("Failed to create output file: %v", err) 43 | } 44 | 45 | logrus.Printf("Decrypting data...") 46 | 47 | /* 48 | Create a new TripleSec cipher 49 | 50 | The number 4 being passed is the Cipher version, which is 51 | currently the latest version supported by TripleSec. 52 | */ 53 | cipher, err := triplesec.NewCipher([]byte(*encryptionKey), nil, 4) 54 | if err != nil { 55 | logrus.Fatalf("Failed to initialize the cryptographic engine: %v", err) 56 | } 57 | 58 | /* Decrypt the data in memory */ 59 | dec, err := cipher.Decrypt(encData) 60 | if err != nil { 61 | logrus.Fatalf("Decryption failed: %v", err) 62 | } 63 | 64 | logrus.Printf("Decryption completed.") 65 | logrus.Printf("Writting to file...") 66 | 67 | /* Write decrypted data to the output file */ 68 | _, err = f.Write(dec) 69 | if err != nil { 70 | logrus.Fatalf("Failed to write to file: %v", err) 71 | } 72 | 73 | /* Close the output file */ 74 | err = f.Close() 75 | if err != nil { 76 | logrus.Fatalf("Failed to finalize file write: %v", err) 77 | } 78 | 79 | /* Done */ 80 | logrus.Printf("Done.") 81 | 82 | } 83 | -------------------------------------------------------------------------------- /config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io/ioutil" 7 | "net" 8 | "os" 9 | "regexp" 10 | "strings" 11 | 12 | "filippo.io/age" 13 | "filippo.io/age/agessh" 14 | "github.com/daknob/eldim/internal/gcs" 15 | "github.com/daknob/eldim/internal/s3" 16 | 17 | "github.com/daknob/eldim/internal/backend" 18 | 19 | "github.com/daknob/eldim/internal/swift" 20 | "gopkg.in/yaml.v2" 21 | ) 22 | 23 | /* 24 | Config is the data structure outlying the configuration file of eldim 25 | */ 26 | type Config struct { 27 | /* Web Server Settings */ 28 | ListenPort int `yaml:"listenport"` 29 | ServerTokens bool `yaml:"servertokens"` 30 | MaxUploadRAM int64 `yaml:"maxuploadram"` 31 | 32 | /* TLS Settings */ 33 | TLSChainPath string `yaml:"tlschain"` 34 | TLSKeyPath string `yaml:"tlskey"` 35 | 36 | /* Backend Server */ 37 | SwiftBackends []swift.BackendConfig `yaml:"swiftbackends"` 38 | GCSBackends []gcs.BackendConfig `yaml:"gcsbackends"` 39 | S3Backends []s3.BackendConfig `yaml:"s3backends"` 40 | 41 | /* Clients */ 42 | ClientFile string `yaml:"clientfile"` 43 | 44 | /* Encryption */ 45 | EncryptionKey string `yaml:"encryptionkey"` // Deprecated in eldim v0.6.0 46 | Encryption struct { 47 | AgeID []string `yaml:"age-id"` 48 | AgeSSH []string `yaml:"age-ssh"` 49 | } `yaml:"encryption"` 50 | 51 | /* Prometheus Metrics */ 52 | PrometheusEnabled bool `yaml:"prometheusenabled"` 53 | PrometheusAuthUser string `yaml:"prometheusauthuser"` 54 | PrometheusAuthPass string `yaml:"prometheusauthpass"` 55 | } 56 | 57 | /* 58 | Validate validates the eldim configuration file and returns the 59 | first error that occured 60 | */ 61 | func (conf *Config) Validate() error { 62 | /* Validate Listening Port */ 63 | if conf.ListenPort < 0 { 64 | return fmt.Errorf("TCP Listening Port must be positive number") 65 | } 66 | if conf.ListenPort > 65535 { 67 | return fmt.Errorf("TCP Listening Port must be below 65535") 68 | } 69 | 70 | /* Validate TLS Chain File */ 71 | if conf.TLSChainPath == "" { 72 | return fmt.Errorf("TLS Chain File is required. eldim works only with HTTPS") 73 | } 74 | f, err := os.Open(conf.TLSChainPath) 75 | if err != nil { 76 | return fmt.Errorf("Failed to open TLS Chain File: %v", err) 77 | } 78 | err = f.Close() 79 | if err != nil { 80 | return fmt.Errorf("Failed to close TLS Chain File: %v", err) 81 | } 82 | 83 | /* Validate TLS Key File */ 84 | if conf.TLSKeyPath == "" { 85 | return fmt.Errorf("TLS Key File is required. eldim works only with HTTPS") 86 | } 87 | f, err = os.Open(conf.TLSKeyPath) 88 | if err != nil { 89 | return fmt.Errorf("Failed to open TLS Key File: %v", err) 90 | } 91 | err = f.Close() 92 | if err != nil { 93 | return fmt.Errorf("Failed to close TLS Key File: %v", err) 94 | } 95 | 96 | /* Validate Backends */ 97 | for _, b := range conf.SwiftBackends { 98 | err = b.Validate() 99 | if err != nil { 100 | return fmt.Errorf("Failed to validate OpenStack Swift Backend '%s': %v", b.Name(), err) 101 | } 102 | } 103 | for _, b := range conf.GCSBackends { 104 | err := b.Validate() 105 | if err != nil { 106 | return fmt.Errorf("Failed to validate Google Cloud Storage Backend '%s': %v", b.Name(), err) 107 | } 108 | } 109 | for _, b := range conf.S3Backends { 110 | err := b.Validate() 111 | if err != nil { 112 | return fmt.Errorf("Failed to validate S3 Backend '%s': %v", b.Name(), err) 113 | } 114 | } 115 | 116 | /* Ensure there is at least one backend */ 117 | if len(conf.SwiftBackends)+len(conf.GCSBackends)+len(conf.S3Backends) == 0 { 118 | return fmt.Errorf("eldim needs at least one backend to operate, 0 found") 119 | } 120 | 121 | /* Validate Max Upload RAM (in MB) */ 122 | if conf.MaxUploadRAM <= 0 { 123 | return fmt.Errorf("Maximum Upload RAM must be a positive number") 124 | } 125 | 126 | /* Validate Encryption Key */ 127 | if conf.EncryptionKey != "" { 128 | return fmt.Errorf("Use of encryption key is deprecated since v0.6.0. Please consult the docs") 129 | } 130 | if len(conf.Encryption.AgeID)+len(conf.Encryption.AgeSSH) == 0 { 131 | return fmt.Errorf("Please configure at least one age encryption key") 132 | } 133 | for _, r := range conf.Encryption.AgeID { 134 | _, err := age.ParseX25519Recipient(r) 135 | if err != nil { 136 | return fmt.Errorf("Failed to parse age Identity '%s': %v", r, err) 137 | } 138 | } 139 | for _, r := range conf.Encryption.AgeSSH { 140 | _, err := agessh.ParseRecipient(r) 141 | if err != nil { 142 | return fmt.Errorf("Failed to parse age ssh key Identity '%s': %v", r, err) 143 | } 144 | } 145 | 146 | /* Validate Prometheus Settings */ 147 | if conf.PrometheusEnabled == true { 148 | /* Only check Prometheus Configuration if Prometheus is enabled */ 149 | if conf.PrometheusAuthUser == "" { 150 | return fmt.Errorf("You need to set the prometheusauthuser in the configuration file. eldim only works with HTTP Basic Auth for Prometheus Metrics") 151 | } 152 | if !regexp.MustCompile("^[a-zA-Z0-9]{20,128}$").MatchString(conf.PrometheusAuthUser) { 153 | return fmt.Errorf("The prometheusauthuser must contain a-z, A-Z, and 0-9, and must be 20-128 characters long") 154 | } 155 | if conf.PrometheusAuthPass == "" { 156 | return fmt.Errorf("You need to set the prometheusauthpass in the configuration file. eldim only works with HTTP Basic Auth for Prometheus Metrics") 157 | } 158 | if !regexp.MustCompile("^[a-zA-Z0-9]{20,128}$").MatchString(conf.PrometheusAuthPass) { 159 | return fmt.Errorf("The prometheusauthpass must contain a-z, A-Z, and 0-9, and must be 20-128 characters long") 160 | } 161 | } 162 | 163 | /************************* 164 | * Validate Client Config * 165 | *************************/ 166 | 167 | if conf.ClientFile == "" { 168 | return fmt.Errorf("Did not supply a clients config file") 169 | } 170 | 171 | /* Attempt to read the Clients File */ 172 | fc, err := ioutil.ReadFile(conf.ClientFile) 173 | if err != nil { 174 | return fmt.Errorf("Failed to open clients file: %v", err) 175 | } 176 | 177 | /* Unmarshal the YAML Clients File */ 178 | var clients []ClientConfig 179 | 180 | err = yaml.Unmarshal(fc, &clients) 181 | if err != nil { 182 | return fmt.Errorf("Unable to decode client file YAML: %v", err) 183 | } 184 | 185 | /* Check if clients have been supplied */ 186 | if len(clients) == 0 { 187 | return fmt.Errorf("No clients have been supplied. eldim will not work") 188 | } 189 | 190 | /* Validate clients individually */ 191 | for i, c := range clients { 192 | err = c.Validate() 193 | if err != nil { 194 | return fmt.Errorf("Client '%s' (%d) is invalid: %v", c.Name(), i+1, err) 195 | } 196 | } 197 | 198 | /* Check for duplicate names / passwords / IPs */ 199 | var nameSet = make(map[string]bool) 200 | var passSet = make(map[string]bool) 201 | var ipSet = make(map[string]bool) 202 | for i, c := range clients { 203 | /* Duplicate Name Check */ 204 | if nameSet[c.Name()] { 205 | return fmt.Errorf("Client %d does not have a unique name: %s", i+1, c.Name()) 206 | } 207 | nameSet[c.Name()] = true 208 | 209 | /* Duplicate Password Check */ 210 | if c.Password != "" { 211 | if passSet[c.Password] { 212 | return fmt.Errorf("Client %d does not have a unique password: %s", i+1, c.Name()) 213 | } 214 | passSet[c.Password] = true 215 | } 216 | 217 | /* Duplicate IP Check */ 218 | for _, ip := range append(c.IPv6(), c.IPv4()...) { 219 | if ipSet[ip.String()] { 220 | return fmt.Errorf("Client '%s' (%d) reuses an IP Address: %s", c.Name(), i+1, ip.String()) 221 | } 222 | ipSet[ip.String()] = true 223 | } 224 | } 225 | 226 | return nil 227 | } 228 | 229 | /* 230 | Clients returns all configured Backend Clients 231 | */ 232 | func (conf *Config) Clients() []backend.Client { 233 | var ret []backend.Client 234 | 235 | /* OpenStack Swift */ 236 | for _, be := range conf.SwiftBackends { 237 | ret = append(ret, 238 | swift.New(context.Background(), 239 | be, 240 | ), 241 | ) 242 | } 243 | 244 | /* Google Cloud Storage */ 245 | for _, be := range conf.GCSBackends { 246 | ret = append(ret, 247 | gcs.New(context.Background(), 248 | be, 249 | ), 250 | ) 251 | } 252 | 253 | /* S3 */ 254 | for _, be := range conf.S3Backends { 255 | ret = append(ret, 256 | s3.New(context.Background(), 257 | be, 258 | ), 259 | ) 260 | } 261 | 262 | /* Return all clients */ 263 | return ret 264 | } 265 | 266 | /* 267 | ClientConfig is the data structure containing all information about 268 | a client that can connect to the eldim service 269 | */ 270 | type ClientConfig struct { 271 | ClientName string `yaml:"name"` 272 | IPv4Addr []string `yaml:"ipv4"` 273 | IPv6Addr []string `yaml:"ipv6"` 274 | Password string `yaml:"password"` 275 | } 276 | 277 | /* 278 | Validate validates a single client entry from the client configuration 279 | file 280 | */ 281 | func (client *ClientConfig) Validate() error { 282 | /* Check if client has name */ 283 | if client.ClientName == "" { 284 | return fmt.Errorf("Client has no name") 285 | } 286 | 287 | /* Make sure all IP Addresses can be parsed */ 288 | for _, ip := range append(client.IPv6(), client.IPv4()...) { 289 | if ip == nil { 290 | return fmt.Errorf("Client contains an invalid IP Address") 291 | } 292 | } 293 | 294 | /* Ensure IPv4 are in IPv4 and IPv6 are in IPv6 */ 295 | for _, v4 := range client.IPv4() { 296 | if !strings.Contains(v4.String(), ".") { 297 | return fmt.Errorf("Client contains a non-IPv4 in IPv4 list: %s", v4.String()) 298 | } 299 | } 300 | for _, v6 := range client.IPv6() { 301 | if !strings.Contains(v6.String(), ":") || strings.Contains(v6.String(), ".") { 302 | return fmt.Errorf("Client contains a non-IPv6 in IPv6 list: %s", v6.String()) 303 | } 304 | } 305 | 306 | /* Ensure there is at least one of (password, IP) */ 307 | if client.Password == "" && len(client.IPv4()) == 0 && len(client.IPv6()) == 0 { 308 | return fmt.Errorf("Client does not have at least one of (password, IPv6, IPv4)") 309 | } 310 | 311 | /* Enforce client authentication password policy */ 312 | if len(client.Password) < 32 && client.Password != "" { 313 | return fmt.Errorf("Client has a password shorter than 32 characters: 32-128 are acceptable") 314 | } 315 | if len(client.Password) > 128 { 316 | return fmt.Errorf("Client has a password longer than 128 characters: 32-128 are acceptable") 317 | } 318 | 319 | return nil 320 | } 321 | 322 | /* 323 | Name returns the name of the client, as configured 324 | */ 325 | func (client *ClientConfig) Name() string { 326 | if client.ClientName == "" { 327 | client.ClientName = "Unnamed Client" 328 | } 329 | return client.ClientName 330 | } 331 | 332 | /* 333 | IPv4 returns the list of client IPv4 Addresses 334 | */ 335 | func (client *ClientConfig) IPv4() []net.IP { 336 | var ret []net.IP 337 | for _, ip := range client.IPv4Addr { 338 | ret = append(ret, net.ParseIP(ip)) 339 | } 340 | 341 | return ret 342 | } 343 | 344 | /* 345 | IPv6 returns the list of client IPv6 Addresses 346 | */ 347 | func (client *ClientConfig) IPv6() []net.IP { 348 | var ret []net.IP 349 | for _, ip := range client.IPv6Addr { 350 | ret = append(ret, net.ParseIP(ip)) 351 | } 352 | 353 | return ret 354 | } 355 | -------------------------------------------------------------------------------- /docs/api.md: -------------------------------------------------------------------------------- 1 | # API Documentation 2 | 3 | eldim exposes an HTTP API for all servers to upload data. Here you can find 4 | all the currently supported calls to this API: 5 | 6 | ## GET / 7 | By sending a `GET` request to `/`, eldim will either print some information 8 | about it, or nothing, depending on the value of `servertokens` in the 9 | configuration file. 10 | 11 | ## POST /api/v1/file/upload/ 12 | By sending a `POST` request to `/api/v1/file/upload/`, you can upload files 13 | to eldim. Currently there are two parameters that are required: 14 | 15 | ### filename 16 | This is of type `string`, and must contain the desired name of the file. 17 | This can be anything, but spaces or symbols that are not normal for files 18 | are not recommended, since they may not be supported by the backends. 19 | 20 | ### file 21 | This `POST` parameter is the actual file. Send the entire file here that has 22 | to be uploaded here. 23 | 24 | This API call will return `HTTP 200` and print `Ok` if the upload succeeded. 25 | Any other HTTP Status Code or message is an error. 26 | 27 | ### password 28 | This `POST` parameter is a string that specifies a password, which will be 29 | checked against `eldim`'s `clients.yml` and will identify hosts based on their 30 | password key, instead of their IP Address. Password checks take precedence over 31 | IP Address checks. The password must be between 32 and 128 characters for 32 | security reasons. -------------------------------------------------------------------------------- /docs/config.md: -------------------------------------------------------------------------------- 1 | # Configuration 2 | 3 | This document covers all the configuration options for eldim. There is a main 4 | configuration file which can control the behavior and settings of the server, 5 | as well as a secondary one that contains all the hosts who are authorized to 6 | upload data. 7 | 8 | ## eldim.yml 9 | This is the primary configuration file. It is recommended to store this in 10 | `/etc/eldim/eldim.yml`, have it owned by `eldim:eldim`, and with permissions 11 | `0400`. 12 | 13 | Here you can find all the options of this file: 14 | 15 | ### listenport 16 | The `listenport` parameter accepts an integer number. This is the port number 17 | the server will listen on for TLS connections. 18 | 19 | ### servertokens 20 | The `servertokens` parameter is a boolean and if set to `true`, eldim will not 21 | try to hide that it is running in the system. For example, it will send the 22 | `Server` HTTP header in its responses, and will print its version on `GET /`. 23 | 24 | If it is set to `false`, it will not send the `Server` header, nor will it 25 | print anything in its home page. However, if someone wants to figure out if 26 | this server is running eldim, it is still trivial to do so. 27 | 28 | ### maxuploadram 29 | The `maxuploadram` parameter controls how many MBs of RAM should eldim 30 | allocate to new file uploads, before it starts saving the file to the disk 31 | directly. If a file is uploaded and is above this number, processing it may 32 | be slower. It is recommended to set this to about the largest file you can 33 | expect, plus some more, but not to something over 10% of the total server 34 | RAM. 35 | 36 | ### tlschain 37 | The `tlschain` parameter is the path to the TLS certificate chain file. If 38 | you are using Let's Encrypt, this is the `fullchain.pem` file. Make sure 39 | that this file also contains any intermediate certificates, and not only 40 | your certificate, as some clients may not like this. 41 | 42 | ### tlskey 43 | The `tlskey` parameter is the path to the TLS certificate private key. If 44 | you are using Let's Encrypt, this is the `privkey.pem` file. 45 | 46 | ### clientfile 47 | The `clientfile` parameter contains the path to the configuration file that 48 | includes all clients who are authorized to upload data to eldim. More on 49 | that file below. 50 | 51 | ### encryption 52 | The `encryption` part of the configuration file contains all the required 53 | information in order to encrypt the files uploaded to eldim. Essentially it 54 | contains all the public keys that will be used to encrypt the files uploaded. 55 | Please note that **ANY** key will be able to decrypt the file, and the number 56 | of them does not increase encryption time, but increases the uploaded file's 57 | size by a few bytes. 58 | It has the following two configuration data points: 59 | 60 | #### age-id 61 | The `age-id` is a list of strings that contains all 62 | [age](https://age-encryption.org/) identities which will be able to decrypt 63 | files uploaded to eldim. They start with `age` and can be generated using 64 | `age-keygen`. Make sure to use the **Public Key** and not the private key. 65 | 66 | #### age-ssh 67 | The `age-ssh` is a list of strings that contains SSH keys of type RSA 68 | (`ssh-rsa`) or Ed25519 (`ssh-ed25519`) which will be able to decrypt files 69 | uploaded to eldim. The full SSH key needs to be included, such as, for 70 | example 71 | `ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMAvJvMq0gej+uXqINFrrbqAElw2h32qyxGEBG8ef7vn comment`. 72 | As you can see, comments can be included as well. 73 | 74 | ### prometheusenabled 75 | The `prometheusenabled` is a boolean value. If it has the value `true`, it 76 | enabled exporting of Prometheus metrics. If it's `false` (default if missing), 77 | then Prometheus metrics export is disabled. 78 | 79 | ### prometheusauthuser 80 | The `prometheusauthuser` is a string that includes the HTTP Basic Auth Username 81 | for the Prometheus metrics endpoint (`/metrics`). It needs to be a-z, A-Z, 0-9, 82 | and 20-128 characters long, for security reasons. 83 | 84 | ### prometheusauthpass 85 | The `prometheusauthpass` is a string that contains the HTTP Basic Auth Password 86 | for the Prometheus metrics endpoint (`/metrics`). It needs to be a-z, A-Z, 0-9, 87 | and 20-128 characters long, for security reasons. 88 | 89 | ### swiftbackends 90 | The `swiftbackends` is an array, which contains a list of all OpenStack Swift 91 | backends that eldim will upload data to. More than one data storage is 92 | supported, but if you add too many it may take excessive amounts of bandwidth 93 | and time to complete the operations. 94 | 95 | The fields of each array element are below: 96 | 97 | #### name 98 | The `name` parameter is a friendly name that you set to identify this backend 99 | in eldim's logs, as well as its configuration file. It can be any string. 100 | 101 | #### username 102 | The `username` parameter is the OpenStack Swift v3 Username to authenticate to 103 | the backend. 104 | 105 | #### apikey 106 | The `apikey` parameter is the OpenStack Swift v3 API Key (or, in some clouds, 107 | password), to authenticate to the backend. 108 | 109 | #### authurl 110 | The `authurl` parameter is the OpenStack Swift v3 URL that eldim needs to 111 | communicate with to connect and upload data. It must include the scheme 112 | (`https://`). 113 | 114 | #### region 115 | The `region` parameter is the OpenStack Swift v3 Region. In some clouds this 116 | value is case sensitive, so try both `xxx1` and `XXX1` if it doesn't work. 117 | 118 | #### container 119 | The `container` parameter is the OpenStack Swift v3 container, or bucket, in 120 | which the data will be uploaded. This container must already exist before 121 | any data is uploaded to it. 122 | 123 | #### expireseconds 124 | The `expireseconds` parameter is a special header sent with the file upload. 125 | It is not supported by all clouds, but in the ones that do support it, eldim 126 | will ask for this file to be deleted after so many seconds. 127 | 128 | If you'd like to only keep your files for 90 days for example, and then have 129 | them deleted, you can set this to `7776000`. 130 | 131 | Since many providers offer a hot and a cold storage, you may want to add the 132 | same provider two times, one with a hot storage container, and an expiry of 133 | a week for example, or a month, and one with a cold storage container, and an 134 | expiry of months or years. That way you can keep the most recent files 135 | immediately available, while older files will take more time to be retrieved. 136 | 137 | ### gcsbackends 138 | The `gcsbackends` is an array that contains a list of all Google Cloud Storage 139 | backends that eldim will upload data to. You can specify more than one backend 140 | if you want, such as one per region. Unlike the `swiftbackends`, this does not 141 | support `eldim`-based file expiration and instead it must be configured from 142 | the Google Cloud Console. There you can find a much more flexible way which 143 | also includes storage class options, and gives you the ability to keep files 144 | in hot storage for 30 days, and then progressively move to colder and colder 145 | storage types, until their eventual deletion. 146 | 147 | The fields of each array element are: 148 | 149 | #### name 150 | The `name` parameter is a friendly name that you set to identify this backend 151 | in eldim's logs, as well as its configuration file. It can be any string. 152 | 153 | #### bucketname 154 | The `bucketname` parameter is the Google Cloud Storage bucket's name that you 155 | intend to upload all the data to. This must already exist and be configured 156 | before you start using it. 157 | 158 | #### credsfile 159 | The `credsfile` parameter includes the full path to the location of your 160 | service account's Google Cloud Storage credentials. It should be a JSON file 161 | that contains inside it all the information needed for eldim to establish a 162 | connection and authenticate properly. You can obtain this file by going to 163 | *IAM & Admin* in Google Cloud Console, clicking *Service Accounts*, and then 164 | creating one. When prompted, download the JSON secret file, and use this to 165 | deploy eldim with. 166 | 167 | ### s3backends 168 | The `s3backends` is an array that contains a list of all S3 backends that 169 | eldim will upload data to. You can specify more than one backend if you want, 170 | such as one per region. Like `gcsbackends`, this does not support eldim-based 171 | file expiration, and instead it must be configured from the S3 provider web 172 | console. Most providers offer lifecycle options for their service. 173 | 174 | The fields of each array element are: 175 | 176 | #### name 177 | The `name` parameter is a friendly name that you can set to identify this 178 | backend in eldim's logs, as well as its configuration file. It can be any 179 | string. 180 | 181 | #### endpoint 182 | The `endpoint` parameter is a string containing the domain name at which the 183 | S3 backend server is available. It should not include protocols like `https://` 184 | in front of it. The endpoint **MUST** support TLS, as eldim only makes 185 | encrypted connections to this server. This behavior cannot be changed. 186 | 187 | #### region 188 | The `region` parameter is a string that contains the name of the Region that 189 | the S3 bucket is using. In some servers, like Amazon's, this is what is used 190 | to differentiate between regions, and the endpoint is the same. In some other 191 | ones like Scaleway, the endpoint is different for every region, but the region 192 | still must be set, otherwise they will reject the request. And finally, in 193 | others, like Minio, the region string is disregarded completely. 194 | 195 | #### bucketname 196 | The `bucketname` parameter is a string that contains the S3 Bucket name. This 197 | must already exist beforehand and must be configured properly. 198 | 199 | #### accesskey 200 | #### secretkey 201 | The `accesskey` and `secretkey` parameters are what's used to authenticate 202 | and authorize eldim to the S3 server. They are provided together as a pair 203 | from the S3 operator. You need to ensure that eldim has appropriate 204 | permissions under the user it is running as, otherwise it will not work. 205 | 206 | #### sendcontentmd5 207 | The `sendcontentmd5` parameter is a boolean that controls whether `eldim` will 208 | send the `Content-MD5` HTTP Header to the S3 backend. Typically this is not 209 | required since `eldim` uses HTTPS, but some backends like the Backblaze B2 210 | service require it, depending on your configuration (e.g. if you use "Object 211 | Lock"). You can enable it regardless of whether it is needed or not, and 212 | `eldim` will calculate the MD5 sum of the file to be uploaded and then attach 213 | it with the request to upload the file. In theory this can detect corrupted 214 | files between `eldim` and the S3 provider, but this is already being taken care 215 | of by TLS. If the server supports and checks it, it will detect corruption in 216 | uploaded files. If it doesn't check it, it will be ignored. Enabling this 217 | setting will increase CPU usage (an averae Intel core can do ~2 GB/s of MD5) as 218 | well as memory usage (as this happens in RAM). It is disabled by default and 219 | only recommended to be enabled if required by the provider. 220 | 221 | ## clients.yml 222 | This configuration file contains all the hosts that are authorized to upload 223 | data to eldim. It is recommended to store this in `/etc/eldim/clients.yml`, 224 | have it owned by `eldim:eldim`, and with permissions `0400`. 225 | 226 | This file contains a YAML array, where each element contains the following 227 | fields: 228 | 229 | ### name 230 | The `name` parameter contains this host's name. This will be prepended to 231 | all file uploads from this host. If you set this to `example.com`, all files 232 | uploaded by this host will start with `example.com/`. 233 | 234 | ### ipv4 235 | The `ipv4` array is a list of strings with IPv4 addresses that belong to this 236 | host. They are stored in the normal version (`192.0.2.1`), and can be as many 237 | as required. 238 | 239 | ### ipv6 240 | The `ipv6` array is a list of strings with IPv6 addresses that belong to this 241 | host. The shortest format must be used, so `2001:db8::1` will work, but 242 | `2001:0db8:0000:0000:0000:0000:0000:0001` will not. They can be more than one. 243 | 244 | ### password 245 | The `password` string is a password that can be supplied by the `password` POST 246 | form element that can be used to lookup clients instead of by their IP Address. 247 | The `password` is checked before the IP Address of the host. For security 248 | reasons, the field **must** be between 32 and 128 characters in size. 249 | 250 | ## Example Configuration Files 251 | There are example configuration files that include all of the above commands 252 | in this repository. Feel free to start with them as your base, and then make 253 | all necessary changes to them. 254 | -------------------------------------------------------------------------------- /docs/img/flow.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 |
OpenStack
Swift
OpenSt...
eldim
eldim
Server 1
Server...
Server 2
Server...
Server 3
Server...
Cloud
Storage
Cloud...
1. Servers upload
their files
1. Servers upload...
2. eldim checks ACL
3. eldim encrypts files
2. eldim checks ACL...
4. Files are stored in
one or more backends
4. Files are stored i...
Viewer does not support full SVG 1.1
-------------------------------------------------------------------------------- /docs/metrics.md: -------------------------------------------------------------------------------- 1 | # Metrics 2 | 3 | As of `eldim v0.2.0`, eldim supports metrics exporting using 4 | [Prometheus](https://prometheus.io/). In 5 | order to access the metrics, Prometheus has to be enabled from the 6 | configuration file. eldim **requires** HTTP Basic Authentication on the 7 | Metrics URL, and it is only available over HTTPS, through the same TCP port as 8 | the public API. For security reasons, both the username and password must be 9 | 20-128 characters long. 10 | 11 | Currently the following metrics are exposed by `eldim`: 12 | 13 | ## HTTP Requests Served 14 | eldim exports `eldim_http_requests_served`, which is a counter vector, with 15 | the following labels: 16 | 17 | ### method 18 | The `method` label contains the HTTP method that was used for this particular 19 | HTTP request, and common values can be `GET` and `POST`. 20 | 21 | ### path 22 | The `path` label contains the URL of this HTTP Request, such as `/` or even 23 | `/api/v1/file/upload/`. 24 | 25 | ### status 26 | The `status` label contains the HTTP Request Status Code that was returned, 27 | i.e. `200` or `400`. 28 | 29 | ## Prometheus Metrics HTTP Basic Auth 30 | eldim exports `eldim_prometheus_metrics_scrape_auth`, which is a counter 31 | vector, and measures successful or unsuccessful scrapes of the Prometheus 32 | endpoint, based on their HTTP Basic Authentication Check. Through this you 33 | can monitor successful scrapes, scrape attempts without HTTP Basic Auth 34 | provided, as well as incorrect username or password attempts. It exposes 35 | the following labels: 36 | 37 | ### success 38 | Set to `true` or `false` depending on whether the scrape was successful. 39 | 40 | ### error 41 | Set to `HTTP-Basic-Auth-Not-Ok` during errors with HTTP Basic Auth, such as 42 | when no credentials were supplied, to `Incorrect-Username` when the username 43 | provided by the user is incorrect, or to `Incorrect-Password` when the password 44 | supplied is not correct. 45 | 46 | ## File Upload Error Metrics 47 | eldim exports `eldim_file_upload_errors_occured`, which is a counter vector, 48 | and essentially counts all errors occured during file upload requests. You 49 | can use this to see what errors come up, or if there is a spike in errors 50 | recently, an in coordination with `eldim_http_requests_served` identify 51 | problems in your eldim setup. 52 | 53 | ## Successful File Upload Time Histogram 54 | eldim exports `eldim_file_upload_request_time`, which is a histogram of the 55 | time it took to successfully serve a file upload request. The request time is 56 | measured in seconds, and the buckets are one for every minute, up to two hours. 57 | 58 | ## Available Clients 59 | eldim exports `eldim_loaded_clients`, which is a gauge vector that contains 60 | how many clients are available and loaded from the configuration file to the 61 | system and have `ipv6` and `ipv4` addressess. This metric only changes when 62 | the configuration file is loaded, but can be useful to track historical changes 63 | in `eldim` hosts. This field may also contain `password` for clients that are 64 | being identified by a password. 65 | eldim also exports `eldim_loaded_ip_addressess`, which is a gauge vector, 66 | containing information on how many IP addressess, and their version (`6`/`4`), 67 | have been loaded to `eldim`. Like above, this is only loaded when the 68 | `clients.yml` file is loaded, so it's also used for mostly historical reasons. 69 | 70 | ## Uploaded Bytes 71 | eldim exports `eldim_files_uploaded_bytes_successful`, which is a gauge, 72 | whose value contains the total amount of bytes since eldim launch that have 73 | been successfully uploaded and processed by eldim. This includes the sum of 74 | the size of all files uploaded **to** eldim. 75 | In addition to that, there's also `eldim_files_uploaded_bytes`, which 76 | includes the total amount of bytes that **eldim** uploaded, to the backends, 77 | tracked per backend type. This number is different than the previous once since it 78 | includes encryption overhead, as well as the possibility of multiple backends, 79 | causing more data to be uploaded. 80 | 81 | ## Client Identification Type 82 | eldim exports `eldim_client_id_type`, which is a counter vector, with the 83 | following label: 84 | 85 | ### type 86 | The `type` label contains the type of each successful authentication done 87 | against `eldim`. It contains two possible values: `ipaddr` and `password`. The 88 | first value is assigned every time a successful authentication is performed 89 | with an IP Address, and the second is assigned every time a successful 90 | authentication is performed with a password. 91 | 92 | ## Successful Authentications by Host Name 93 | eldim exports `eldim_host_authentications`, which is a counter vector, counting 94 | how many successful authentication attempts have happened so far, with the 95 | following label: 96 | 97 | ### hostname 98 | The `hostname` label contains the host name that has performed the 99 | authentication successfully. 100 | 101 | ## Successful File Uploads by Host Name 102 | eldim exports `eldim_host_uploads`, which is a counter vector, counting how 103 | many successful file uploads have happened so far, with the following label: 104 | 105 | ### hostname 106 | The `hostname` label contains the host name that has performed the successful 107 | file upload. 108 | 109 | ## eldim Server Version 110 | eldim exports the `eldim_server_version`, which is always set to `1`. This 111 | metric is a vector, with the label `eldimversion`, which is always set to 112 | the current version that the server is running. This will allow the operator 113 | to collect a time series metric of which eldim version was running at a 114 | particular time, and also, if there are more than one eldim servers being 115 | used and monitored, to show the distribution of versions across the server 116 | cluster. 117 | 118 | ## Default Prometheus for Go Metrics 119 | The Prometheus Client Library for Go exports a heap of metrics by default, 120 | which include, among others, Go Garbage Collection metrics, Goroutine Info, 121 | Go compiler version, Application Memory Info, Running Threads, as well as 122 | Exporter Info, such as how many times the application data has been scraped 123 | by Prometheus. 124 | -------------------------------------------------------------------------------- /eldim.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=eldim Data Storage Proxy 3 | After=network.target 4 | 5 | [Service] 6 | Type=simple 7 | ExecStart=/usr/bin/eldim -c /etc/eldim/eldim.yml 8 | Restart=always 9 | User=eldim 10 | 11 | [Install] 12 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /eldim.yml: -------------------------------------------------------------------------------- 1 | ##### 2 | # Web Server Settings 3 | ##### 4 | 5 | # The port to listen for HTTPS requests on 6 | listenport: 31337 7 | 8 | # Show or hide eldim information from HTTP headers 9 | servertokens: true 10 | 11 | # Maximum uploaded file size to keep in RAM before using disk 12 | maxuploadram: 100 13 | 14 | ##### 15 | # TLS Configuration 16 | ##### 17 | 18 | # The TLS Certificate Chain File 19 | tlschain: "/etc/letsencrypt/live/eldim.example.com/fullchain.pem" 20 | 21 | # The TLS Private Key File 22 | tlskey: "/etc/letsencrypt/live/eldim.example.com/privkey.pem" 23 | 24 | ##### 25 | # Clients 26 | ##### 27 | 28 | # The file to load the clients from 29 | clientfile: "/etc/eldim/clients.yml" 30 | 31 | ##### 32 | # Encryption 33 | ##### 34 | 35 | # The encryption password to use for all file uploads 36 | # encryptionkey: "Insecure" (DEPRECATED) 37 | 38 | # Public Keys that will be able to decrypt uploaded files 39 | encryption: 40 | age-id: 41 | - "age17w2a6ad6cccx5kenanamr72e9qzgt5d7vqhvq8rrxx8pc3qt53vq70un5p" 42 | age-ssh: 43 | - "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMAvJvMq0gej+uXqINFrrbqAElw2h32qyxGEBG8ef7vn a-comment" 44 | - "ssh-rsa AAAAB3RSAKeysAreTooLongSoISnippedThisSorrytlCx+PAaDpsD3yQ== another-comment" 45 | 46 | ##### 47 | # Prometheus 48 | ##### 49 | 50 | # Enable or disable Prometheus metrics 51 | prometheusenabled: true 52 | 53 | # Prometheus Endpoint (/metrics) HTTP Basic Auth Username 54 | prometheusauthuser: "username" 55 | 56 | # Prometheus Endpoint (/metrics) HTTP Basic Auth Password 57 | prometheusauthpass: "password" 58 | 59 | ##### 60 | # Backends 61 | ##### 62 | 63 | # All the OpenStack Swift Backends 64 | swiftbackends: 65 | - 66 | name: "OVH-Cold" 67 | username: "openStackUser" 68 | apikey: "openStackPassword" 69 | authurl: "https://auth.cloud.ovh.net/v3" 70 | region: "GRA3" 71 | container: "my-logs-cold" 72 | expireseconds: 63072000 73 | - 74 | name: "OVH-Hot" 75 | username: "openStackUser" 76 | apikey: "openStackPassword" 77 | authurl: "https://auth.cloud.ovh.net/v3" 78 | region: "SBG3" 79 | container: "my-logs-hot" 80 | expireseconds: 2592000 81 | 82 | # All the Google Cloud Storage Backends 83 | gcsbackends: 84 | - 85 | name: "Example Google Cloud Storage Backend" 86 | bucketname: "eldim-is-awesome" 87 | credsfile: "/etc/eldim/gcs-credentials.json" 88 | 89 | # All S3 Storage Backends 90 | s3backends: 91 | - 92 | name: "Minio Play" 93 | endpoint: "play.minio.io" 94 | region: "does-not-matter" 95 | bucketname: "0000eldim" 96 | accesskey: "Q3AM3UQ867SPQQA43P2F" 97 | secretkey: "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" 98 | - 99 | name: "Scaleway Example" 100 | endpoint: "s3.fr-par.scw.cloud" 101 | region: "fr-par" 102 | bucketname: "eldim-bucket" 103 | accesskey: "keyGoesHere" 104 | secretkey: "secretGoesHere" 105 | - 106 | name: "Amazon S3 Example" 107 | endpoint: "s3.amazonaws.com" 108 | region: "eu-central-1" 109 | bucketname: "eldim-in-amazon" 110 | accesskey: "AccessKey" 111 | secretkey: "SecretKey" 112 | - 113 | name: "Backblaze B2 Example" 114 | endpoint: "s3.eu-central-003.backblazeb2.com" 115 | region: "eu-central-003" 116 | bucketname: "b2-bucket-name" 117 | accesskey: "AppKey-keyID" 118 | secretkey: "AppKey-applicationKey" 119 | sendcontentmd5: true # If you use Object Lock (>=1d), this is mandatory 120 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/daknob/eldim 2 | 3 | go 1.14 4 | 5 | require ( 6 | cloud.google.com/go v0.110.0 // indirect 7 | cloud.google.com/go/iam v0.12.0 // indirect 8 | cloud.google.com/go/storage v1.29.0 9 | filippo.io/age v1.1.1 10 | github.com/daknob/hlog v0.1.0 11 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect 12 | github.com/julienschmidt/httprouter v1.3.0 13 | github.com/keybase/go-crypto v0.0.0-20200123153347-de78d2cb44f4 // indirect 14 | github.com/keybase/go-triplesec v0.0.0-20200218020411-6687d79e9f55 15 | github.com/minio/minio-go/v7 v7.0.49 16 | github.com/ncw/swift v1.0.53 17 | github.com/prometheus/client_golang v1.14.0 18 | github.com/prometheus/common v0.39.0 // indirect 19 | github.com/prometheus/procfs v0.9.0 // indirect 20 | github.com/sirupsen/logrus v1.9.0 21 | google.golang.org/api v0.110.0 22 | gopkg.in/yaml.v2 v2.4.0 23 | ) 24 | -------------------------------------------------------------------------------- /http.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "fmt" 7 | "io" 8 | "net" 9 | "net/http" 10 | "os" 11 | "strings" 12 | "time" 13 | 14 | "filippo.io/age" 15 | "filippo.io/age/agessh" 16 | "github.com/daknob/eldim/internal/backend" 17 | 18 | "github.com/daknob/hlog" 19 | p "github.com/prometheus/client_golang/prometheus" 20 | 21 | "github.com/julienschmidt/httprouter" 22 | "github.com/sirupsen/logrus" 23 | ) 24 | 25 | /* 26 | index handles GET requests to / 27 | */ 28 | func index(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { 29 | hlog.LogRequest(r) 30 | 31 | /* If it's okay to print information about the software, show some basic info */ 32 | if conf.ServerTokens { 33 | /* Set the Server HTTP Header */ 34 | w.Header().Set("Server", fmt.Sprintf("eldim %s", version)) 35 | w.Header().Set("Content-Type", "text/plain") 36 | 37 | /* Print eldim information */ 38 | fmt.Fprintf(w, "eldim %s\n", version) 39 | fmt.Fprintf(w, "\n") 40 | fmt.Fprintf(w, "GitHub: https://github.com/daknob/eldim/\n") 41 | } else { 42 | /* Print only a space to avoid showing an empty reponse error in some browsers */ 43 | w.Header().Set("Content-Type", "text/plain") 44 | fmt.Fprintf(w, " ") 45 | } 46 | 47 | promReqServed.With(p.Labels{"method": "GET", "path": "/", "status": "200"}).Inc() 48 | } 49 | 50 | /* 51 | v1fileUpload handles POST requests to /api/v1/file/upload/ 52 | */ 53 | func v1fileUpload(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { 54 | /* Normal HTTP Procedure */ 55 | rid := hlog.LogRequest(r) 56 | if conf.ServerTokens { 57 | w.Header().Set("Server", fmt.Sprintf("eldim %s", version)) 58 | } 59 | 60 | /* Start Request Service Timer */ 61 | now := time.Now().Unix() 62 | 63 | /* Get IP Address of Request */ 64 | ipAddr, _, err := net.SplitHostPort(r.RemoteAddr) 65 | if err != nil { 66 | logrus.Errorf("%s: failed to parse Remote IP of request: %v", rid, err) 67 | } 68 | 69 | /* Check if the Password supplied is allowed and matches a host */ 70 | hostname, err := getPassName(r.PostFormValue("password")) 71 | if err != nil { 72 | /* 73 | This is the case in which the password that was supplied by 74 | the user did not match any of the passwords in the database. 75 | 76 | This means we need to do an IP-based check, and try and see 77 | if this IP Address is allowed as a client. 78 | */ 79 | logrus.Printf("%s: client at %s did not supply a known password. Checking by IP", rid, ipAddr) 80 | 81 | hostname, err = getIPName(ipAddr) 82 | if err != nil { 83 | logrus.Printf("%s: IP Address %s is not a known client: %v", rid, ipAddr, err) 84 | w.WriteHeader(http.StatusForbidden) 85 | w.Header().Set("Content-Type", "text/plain") 86 | fmt.Fprintf(w, "IP Address not in access list") 87 | promReqServed.With(p.Labels{"method": "POST", "path": "/api/v1/file/upload/", "status": "403"}).Inc() 88 | promFileUpErrors.With(p.Labels{"error": "IP-Not-Allowed"}).Inc() 89 | return 90 | } 91 | 92 | promClientIDs.With(p.Labels{"type": "ipaddr"}).Inc() 93 | } else { 94 | promClientIDs.With(p.Labels{"type": "password"}).Inc() 95 | } 96 | 97 | /* Authentication has happened successfully */ 98 | logrus.Printf("%s: detected Hostname: %s", rid, hostname) 99 | promHostAuths.With(p.Labels{"hostname": hostname}).Inc() 100 | 101 | /* Begin file processing */ 102 | logrus.Printf("%s: parsing upload from %s [%s]", rid, hostname, ipAddr) 103 | err = r.ParseMultipartForm(conf.MaxUploadRAM * 1024 * 1024) 104 | if err != nil { 105 | if err == io.EOF { 106 | logrus.Errorf("%s: upload cancelled", rid) 107 | } else { 108 | logrus.Errorf("%s: unable to parse multipart form: %v", rid, err) 109 | } 110 | w.WriteHeader(http.StatusInternalServerError) 111 | w.Header().Set("Content-Type", "text/plain") 112 | fmt.Fprintf(w, "Error while processing upload request") 113 | promReqServed.With(p.Labels{"method": "POST", "path": "/api/v1/file/upload/", "status": "500"}).Inc() 114 | promFileUpErrors.With(p.Labels{"error": "Multipart-Form-Parse-Error"}).Inc() 115 | return 116 | } 117 | defer r.MultipartForm.RemoveAll() 118 | logrus.Printf("%s: done parsing upload", rid) 119 | 120 | /* Check if a file name has been provided */ 121 | if r.PostFormValue("filename") == "" { 122 | logrus.Errorf("%s: did not provide a file name to save the file as", rid) 123 | w.WriteHeader(http.StatusBadRequest) 124 | w.Header().Set("Content-Type", "text/plain") 125 | fmt.Fprintf(w, "File name not supplied") 126 | promReqServed.With(p.Labels{"method": "POST", "path": "/api/v1/file/upload/", "status": "400"}).Inc() 127 | promFileUpErrors.With(p.Labels{"error": "File-Name-Not-Provided"}).Inc() 128 | return 129 | } 130 | 131 | /* Connect to all Backends */ 132 | logrus.Printf("%s: connecting to Backends...", rid) 133 | var backends []backend.Client 134 | for _, be := range conf.Clients() { 135 | logrus.Printf("%s: connecting to '%s'", rid, be.Name()) 136 | 137 | err := be.Connect(r.Context()) 138 | if err != nil { 139 | logrus.Errorf("%s: unable to connect to %s Backend '%s': %v", rid, be.BackendName(), be.Name(), err) 140 | promFileUpErrors.With(p.Labels{ 141 | "error": fmt.Sprintf( 142 | "%s-Backend-Connection-Error", 143 | strings.ReplaceAll(be.BackendName(), " ", "-"), 144 | ), 145 | }).Inc() 146 | continue 147 | } 148 | logrus.Printf("%s: successfully connected to %s Backend: %s", rid, be.BackendName(), be.Name()) 149 | backends = append(backends, be) 150 | } 151 | 152 | if len(backends) == 0 { 153 | logrus.Errorf("%s: no Backends available to handle requests", rid) 154 | w.WriteHeader(http.StatusInternalServerError) 155 | w.Header().Set("Content-Type", "text/plain") 156 | fmt.Fprintf(w, "An error occured while processing the uploaded file") 157 | promReqServed.With(p.Labels{"method": "POST", "path": "/api/v1/file/upload/", "status": "500"}).Inc() 158 | promFileUpErrors.With(p.Labels{"error": "No-Backends-Available"}).Inc() 159 | return 160 | } 161 | logrus.Printf("%s: connection successful. %d Backends live.", rid, len(backends)) 162 | 163 | /* Check if file exists in all available containers */ 164 | logrus.Printf("%s: checking if file exists already in any Backend...", rid) 165 | for _, be := range backends { 166 | exists, err := be.ObjectExists(r.Context(), fmt.Sprintf("%s/%s", hostname, r.PostFormValue("filename"))) 167 | if err != nil { 168 | logrus.Errorf("%s: failed to check if object exists for backend %s: %v", rid, be.Name(), err) 169 | w.WriteHeader(http.StatusInternalServerError) 170 | w.Header().Set("Content-Type", "text/plain") 171 | fmt.Fprintf(w, "An error occured while processing the uploaded file") 172 | promReqServed.With(p.Labels{"method": "POST", "path": "/api/v1/file/upload/", "status": "500"}).Inc() 173 | promFileUpErrors.With(p.Labels{"error": "Error-Check-If-Object-Exists-In-Backend"}).Inc() 174 | return 175 | } 176 | if exists { 177 | logrus.Errorf("%s: file '%s' already exists", rid, r.PostFormValue("filename")) 178 | w.WriteHeader(http.StatusBadRequest) 179 | w.Header().Set("Content-Type", "text/plain") 180 | fmt.Fprintf(w, "File already exists.") 181 | promReqServed.With(p.Labels{"method": "POST", "path": "/api/v1/file/upload/", "status": "400"}).Inc() 182 | promFileUpErrors.With(p.Labels{"error": "File-Already-Exists"}).Inc() 183 | return 184 | } 185 | } 186 | logrus.Printf("%s: file does not exist in any Backend.", rid) 187 | 188 | /* Process uploaded file */ 189 | file, _, err := r.FormFile("file") 190 | if err != nil { 191 | logrus.Errorf("%s: uploaded File Error: %v", rid, err) 192 | w.WriteHeader(http.StatusBadRequest) 193 | w.Header().Set("Content-Type", "text/plain") 194 | fmt.Fprintf(w, "Did not supply a valid file.") 195 | promReqServed.With(p.Labels{"method": "POST", "path": "/api/v1/file/upload/", "status": "400"}).Inc() 196 | promFileUpErrors.With(p.Labels{"error": "File-Invalid-Or-Missing"}).Inc() 197 | return 198 | } 199 | 200 | /* 201 | * Determine the uploaded file size in bytes 202 | * In this case "file" is not an *os.File, unless it did not 203 | * fit in memory and had to be written to disk. So using stat 204 | * to determine the size is not reliable. We have to use seek 205 | * instead, and then restore to the beginning of the file. 206 | */ 207 | uploadSize, err := file.Seek(0, os.SEEK_END) 208 | if err != nil { 209 | logrus.Fatalf("failed to get file size: %v", err) 210 | w.WriteHeader(http.StatusInternalServerError) 211 | w.Header().Set("Content-Type", "text/plain") 212 | fmt.Fprintf(w, "An error occurred while processing the uploaded file.") 213 | promReqServed.With(p.Labels{"method": "POST", "path": "/api/v1/file/upload/", "status": "500"}).Inc() 214 | promFileUpErrors.With(p.Labels{"error": "File-Seek-To-End-Failed"}).Inc() 215 | return 216 | } 217 | 218 | _, err = file.Seek(0, os.SEEK_SET) 219 | if err != nil { 220 | logrus.Fatalf("failed to get file size: %v", err) 221 | w.WriteHeader(http.StatusInternalServerError) 222 | w.Header().Set("Content-Type", "text/plain") 223 | fmt.Fprintf(w, "An error occurred while processing the uploaded file.") 224 | promReqServed.With(p.Labels{"method": "POST", "path": "/api/v1/file/upload/", "status": "500"}).Inc() 225 | promFileUpErrors.With(p.Labels{"error": "File-Seek-To-Start-Failed"}).Inc() 226 | return 227 | } 228 | 229 | logrus.Printf("%s: file uploaded. Size: %d bytes.", rid, uploadSize) 230 | promBytesUploadedSuc.Add(float64(uploadSize)) 231 | logrus.Printf("%s: encrypting file...", rid) 232 | 233 | /* Load all Recipients from configuration */ 234 | var rcpt []age.Recipient 235 | for _, r := range conf.Encryption.AgeID { 236 | ar, err := age.ParseX25519Recipient(r) 237 | if err != nil { 238 | logrus.Errorf("%s: failed to parse age ID Recipient: %v", rid, err) 239 | continue 240 | } 241 | rcpt = append(rcpt, age.Recipient(ar)) 242 | } 243 | for _, r := range conf.Encryption.AgeSSH { 244 | ar, err := agessh.ParseRecipient(r) 245 | if err != nil { 246 | logrus.Errorf("%s: failed to parse age SSH Recipient: %v", rid, err) 247 | continue 248 | } 249 | rcpt = append(rcpt, age.Recipient(ar)) 250 | } 251 | 252 | if len(rcpt) == 0 { 253 | logrus.Errorf("%s: no recipients could be parsed from the configuration file", rid) 254 | w.WriteHeader(http.StatusInternalServerError) 255 | w.Header().Set("Content-Type", "text/plain") 256 | fmt.Fprintf(w, "Failed to encrypt file.") 257 | promReqServed.With(p.Labels{"method": "POST", "path": "/api/v1/file/upload/", "status": "500"}).Inc() 258 | promFileUpErrors.With(p.Labels{"error": "No-Valid-Age-Recipients-Found"}).Inc() 259 | return 260 | } 261 | 262 | /* Initialize age for encryption */ 263 | encBuff := &bytes.Buffer{} 264 | 265 | ew, err := age.Encrypt(encBuff, rcpt...) 266 | if err != nil { 267 | logrus.Errorf("%s: failed to initialize encryption: %v", rid, err) 268 | w.WriteHeader(http.StatusInternalServerError) 269 | w.Header().Set("Content-Type", "text/plain") 270 | fmt.Fprintf(w, "Failed to encrypt file.") 271 | promReqServed.With(p.Labels{"method": "POST", "path": "/api/v1/file/upload/", "status": "500"}).Inc() 272 | promFileUpErrors.With(p.Labels{"error": "Failed-To-Initialize-Age-Encryption"}).Inc() 273 | return 274 | } 275 | 276 | /* Encrypt file */ 277 | wb, err := io.Copy(ew, file) 278 | if err != nil || wb != uploadSize { 279 | logrus.Errorf("%s: encryption failed, expected %d bytes ciphertext, got %d: %v", rid, uploadSize, wb, err) 280 | w.WriteHeader(http.StatusInternalServerError) 281 | w.Header().Set("Content-Type", "text/plain") 282 | fmt.Fprintf(w, "Failed to encrypt file.") 283 | promReqServed.With(p.Labels{"method": "POST", "path": "/api/v1/file/upload/", "status": "500"}).Inc() 284 | promFileUpErrors.With(p.Labels{"error": "Failed-To-Encrypt-File"}).Inc() 285 | return 286 | } 287 | 288 | if ew.Close() != nil { 289 | logrus.Errorf("%s: encryption failed: %v", rid, err) 290 | w.WriteHeader(http.StatusInternalServerError) 291 | w.Header().Set("Content-Type", "text/plain") 292 | fmt.Fprintf(w, "Failed to encrypt file.") 293 | promReqServed.With(p.Labels{"method": "POST", "path": "/api/v1/file/upload/", "status": "500"}).Inc() 294 | promFileUpErrors.With(p.Labels{"error": "Failed-To-Encrypt-File-Close"}).Inc() 295 | return 296 | } 297 | 298 | encrSize := int64(encBuff.Len()) 299 | 300 | logrus.Printf("%s: encryption completed", rid) 301 | logrus.Printf("%s: uploading encrypted file to all Backends...", rid) 302 | 303 | /* All files are / */ 304 | uploadFileName := fmt.Sprintf("%s/%s", hostname, r.PostFormValue("filename")) 305 | /* Counts successful uploads */ 306 | uploads := 0 307 | 308 | /* For every backend */ 309 | for _, be := range backends { 310 | logrus.Printf("%s: uploading %s to %s", rid, uploadFileName, be.Name()) 311 | 312 | err := be.UploadFile(context.Background(), uploadFileName, bytes.NewReader(encBuff.Bytes()), encrSize) 313 | if err != nil { 314 | logrus.Errorf("%s: failed to upload %s to %s: %v", rid, uploadFileName, be.Name(), err) 315 | promFileUpErrors.With(p.Labels{ 316 | "error": fmt.Sprintf( 317 | "%s-Upload-Failed", strings.ReplaceAll(be.BackendName(), " ", "-"), 318 | ), 319 | }).Inc() 320 | } else { 321 | uploads++ 322 | promBytesUploaded.With( 323 | p.Labels{"backendtype": strings.ReplaceAll(be.BackendName(), " ", "-")}, 324 | ).Add(float64(encrSize)) 325 | } 326 | 327 | /* Disconnect from Backend */ 328 | be.Disconnect(r.Context()) 329 | } 330 | 331 | /* Reset the file buffer */ 332 | encBuff.Reset() 333 | 334 | /* Check if at least one file was uploaded */ 335 | if uploads == 0 { 336 | logrus.Errorf("%s: did not manage to upload to any Backends!", rid) 337 | w.WriteHeader(http.StatusInternalServerError) 338 | w.Header().Set("Content-Type", "text/plain") 339 | fmt.Fprintf(w, "Failed to store file.") 340 | promReqServed.With(p.Labels{"method": "POST", "path": "/api/v1/file/upload/", "status": "500"}).Inc() 341 | promFileUpErrors.With(p.Labels{"error": "All-Uploads-Failed"}).Inc() 342 | return 343 | } 344 | 345 | /* All good, finally it's over */ 346 | logrus.Printf("%s: uploaded encrypted file to %d Backends", rid, uploads) 347 | w.WriteHeader(http.StatusOK) 348 | w.Header().Set("Content-Type", "text/plain") 349 | fmt.Fprintf(w, "Ok") 350 | 351 | /* Update Prometheus on the successful request handling */ 352 | promReqServed.With(p.Labels{"method": "POST", "path": "/api/v1/file/upload/", "status": "200"}).Inc() 353 | promReqServTimeHist.Observe(float64(time.Now().Unix() - now)) 354 | promHostUploads.With(p.Labels{"hostname": hostname}).Inc() 355 | 356 | } 357 | -------------------------------------------------------------------------------- /internal/backend/backend.go: -------------------------------------------------------------------------------- 1 | package backend 2 | 3 | import ( 4 | "context" 5 | "io" 6 | ) 7 | 8 | /* 9 | Config is the interface that is required by 10 | eldim backend configuration nodes 11 | */ 12 | type Config interface { 13 | Validate() error 14 | Name() string 15 | } 16 | 17 | /* 18 | Client is the interface that is required by 19 | eldim backend clients 20 | */ 21 | type Client interface { 22 | Connect(ctx context.Context) error 23 | Disconnect(ctx context.Context) error 24 | 25 | BucketExists(ctx context.Context, name string) (bool, error) 26 | ObjectExists(ctx context.Context, name string) (bool, error) 27 | 28 | Name() string 29 | Bucket() string 30 | BackendName() string 31 | 32 | UploadFile(ctx context.Context, name string, file io.Reader, filesize int64) error 33 | } 34 | -------------------------------------------------------------------------------- /internal/gcs/gcs.go: -------------------------------------------------------------------------------- 1 | package gcs 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io" 7 | 8 | "cloud.google.com/go/storage" 9 | "google.golang.org/api/option" 10 | ) 11 | 12 | /* 13 | BackendConfig is the data structure containing all information 14 | required to connect to a Google Cloud Storage account 15 | */ 16 | type BackendConfig struct { 17 | BackendName string `yaml:"name"` 18 | CredentialsFile string `yaml:"credsfile"` 19 | Bucket string `yaml:"bucketname"` 20 | } 21 | 22 | /* 23 | Validate validates the Google Cloud Storage Backend and 24 | returns the first error that occured during validation. 25 | */ 26 | func (conf *BackendConfig) Validate() error { 27 | /* Check if backend has a name */ 28 | if conf.BackendName == "" { 29 | return fmt.Errorf("Google Cloud Storage Backend requires a name") 30 | } 31 | 32 | /* Check if all details are supplied */ 33 | if conf.CredentialsFile == "" || conf.Bucket == "" { 34 | return fmt.Errorf("all fields are required for Google Cloud Storage backends to work") 35 | } 36 | 37 | /* Attempt to connect to the Backend */ 38 | client := New(context.Background(), *conf) 39 | 40 | err := client.Connect(context.Background()) 41 | if err != nil { 42 | return fmt.Errorf("failed to authenticate to Backend: %v", err) 43 | } 44 | 45 | /* Check if the bucket exists */ 46 | exists, err := client.BucketExists(context.Background(), conf.Bucket) 47 | if err != nil { 48 | return fmt.Errorf("Google Cloud Storage Backend Bucket Error: %v", err) 49 | } 50 | if !exists { 51 | return fmt.Errorf("Google Cloud Storage Container does not exist: %s", conf.Bucket) 52 | } 53 | 54 | /* Disconnect from Google Cloud Storage */ 55 | client.Disconnect(context.Background()) 56 | 57 | return nil 58 | } 59 | 60 | /* 61 | Name returns the name configured for this Google Cloud Storage Backend 62 | */ 63 | func (conf *BackendConfig) Name() string { 64 | if conf.BackendName == "" { 65 | conf.BackendName = "Unnamed Google Cloud Storage Backend" 66 | } 67 | return conf.BackendName 68 | } 69 | 70 | /* 71 | Client is an Google Cloud Storage Client Object 72 | */ 73 | type Client struct { 74 | Conn *storage.Client 75 | Config BackendConfig 76 | } 77 | 78 | /* 79 | New creates a new Google Cloud Storage client 80 | */ 81 | func New(ctx context.Context, conf BackendConfig) *Client { 82 | var ret Client 83 | ret.Config = conf 84 | ret.Conn = nil 85 | 86 | return &ret 87 | } 88 | 89 | /* 90 | Connect connects the Google Cloud Storage client to 91 | the backend service and authenticates 92 | */ 93 | func (c *Client) Connect(ctx context.Context) error { 94 | gcl, err := storage.NewClient(ctx, 95 | option.WithCredentialsFile(c.Config.CredentialsFile), 96 | option.WithUserAgent("eldim")) 97 | if err != nil { 98 | return fmt.Errorf("failed to connect to GCS: %v", err) 99 | } 100 | c.Conn = gcl 101 | return nil 102 | } 103 | 104 | /* 105 | Disconnect terminates the connection of the Google 106 | Cloud Storage client with the backend server 107 | */ 108 | func (c *Client) Disconnect(ctx context.Context) error { 109 | return c.Conn.Close() 110 | } 111 | 112 | /* 113 | BucketExists returns if a particular bucket exists and is 114 | reachable by the Google Cloud Storage client 115 | */ 116 | func (c *Client) BucketExists(ctx context.Context, name string) (bool, error) { 117 | _, err := c.Conn.Bucket(name).Attrs(ctx) 118 | if err == storage.ErrBucketNotExist { 119 | return false, nil 120 | } 121 | if err != nil { 122 | return false, fmt.Errorf("failed to check if bucket exists: %v", err) 123 | } 124 | 125 | return true, nil 126 | } 127 | 128 | /* 129 | ObjectExists returns if a particular object exists and is 130 | reachable by the Google Cloud Storage client 131 | */ 132 | func (c *Client) ObjectExists(ctx context.Context, name string) (bool, error) { 133 | _, err := c.Conn.Bucket(c.Bucket()).Object(name).Attrs(ctx) 134 | if err == storage.ErrObjectNotExist { 135 | return false, nil 136 | } 137 | if err != nil { 138 | return false, fmt.Errorf("failed to check if object exists: %v", err) 139 | } 140 | 141 | return true, nil 142 | } 143 | 144 | /* 145 | Name returns the Google Cloud Storage Client Name 146 | */ 147 | func (c *Client) Name() string { 148 | return c.Config.Name() 149 | } 150 | 151 | /* 152 | Bucket returns the Google Cloud Storage Bucket Name 153 | */ 154 | func (c *Client) Bucket() string { 155 | return c.Config.Bucket 156 | } 157 | 158 | /* 159 | BackendName returns 'Google Cloud Storage' 160 | */ 161 | func (c *Client) BackendName() string { 162 | return "Google Cloud Storage" 163 | } 164 | 165 | /* 166 | UploadFile uploads a file to the Google Cloud Storage 167 | Backend, with a name of name. 168 | */ 169 | func (c *Client) UploadFile(ctx context.Context, name string, file io.Reader, filesize int64) error { 170 | 171 | w := c.Conn.Bucket(c.Bucket()).Object(name).NewWriter(ctx) 172 | w.ObjectAttrs.ContentType = "application/vnd.age" 173 | wb, err := io.Copy(w, file) 174 | if err != nil || wb != filesize { 175 | return fmt.Errorf("failed to write to object (expected: %d B, written: %d B): %v", filesize, wb, err) 176 | } 177 | err = w.Close() 178 | if err != nil { 179 | return fmt.Errorf("failed to upload file: %v", err) 180 | } 181 | 182 | return nil 183 | } 184 | -------------------------------------------------------------------------------- /internal/s3/s3.go: -------------------------------------------------------------------------------- 1 | package s3 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io" 7 | 8 | "github.com/minio/minio-go/v7" 9 | "github.com/minio/minio-go/v7/pkg/credentials" 10 | ) 11 | 12 | /* 13 | BackendConfig is the data structure containing all information 14 | required to connect to an S3 backend 15 | */ 16 | type BackendConfig struct { 17 | BackendName string `yaml:"name"` 18 | Endpoint string `yaml:"endpoint"` 19 | Bucket string `yaml:"bucketname"` 20 | Region string `yaml:"region"` 21 | AccessKey string `yaml:"accesskey"` 22 | SecretKey string `yaml:"secretkey"` 23 | SendMD5 bool `yaml:"sendcontentmd5"` 24 | } 25 | 26 | /* 27 | Validate validates the S3 Storage Backend and returns the 28 | first error that occured during validation. 29 | */ 30 | func (conf *BackendConfig) Validate() error { 31 | /* Check if backend has a name */ 32 | if conf.BackendName == "" { 33 | return fmt.Errorf("S3 Backend requires a name") 34 | } 35 | 36 | /* Check if all details are supplied */ 37 | if conf.Endpoint == "" || conf.Bucket == "" || conf.AccessKey == "" || conf.SecretKey == "" || conf.Region == "" { 38 | return fmt.Errorf("all fields are required for S3 backends to work") 39 | } 40 | 41 | /* Attempt to connect to the Backend */ 42 | mc, err := minio.New(conf.Endpoint, &minio.Options{ 43 | Creds: credentials.NewStaticV4( 44 | conf.AccessKey, 45 | conf.SecretKey, ""), 46 | Secure: true, 47 | Region: conf.Region, 48 | }) 49 | if err != nil { 50 | return fmt.Errorf("failed to connect to Backend: %v", err) 51 | } 52 | 53 | /* Set appropriate app information */ 54 | mc.SetAppInfo("eldim", "") 55 | 56 | /* Check if bucket exists */ 57 | exists, err := mc.BucketExists(context.Background(), conf.Bucket) 58 | if err != nil { 59 | return fmt.Errorf("failed to check if bucket exists: %v", err) 60 | } 61 | if !exists { 62 | return fmt.Errorf("bucket does not exist: %s", conf.Bucket) 63 | } 64 | 65 | return nil 66 | } 67 | 68 | /* 69 | Name returns the name configured for this S3 Backend 70 | */ 71 | func (conf *BackendConfig) Name() string { 72 | if conf.BackendName == "" { 73 | conf.BackendName = "Unnamed S3 Backend" 74 | } 75 | return conf.BackendName 76 | } 77 | 78 | /* 79 | Client is an S3 Backend Client Object 80 | */ 81 | type Client struct { 82 | Conn *minio.Client 83 | Config BackendConfig 84 | } 85 | 86 | /* 87 | New creates a new S3 Backend client 88 | */ 89 | func New(ctx context.Context, conf BackendConfig) *Client { 90 | var ret Client 91 | ret.Config = conf 92 | ret.Conn = nil 93 | 94 | return &ret 95 | } 96 | 97 | /* 98 | Connect connects the S3 Backend client to 99 | the backend service and authenticates 100 | */ 101 | func (c *Client) Connect(ctx context.Context) error { 102 | /* Create new S3 Client */ 103 | mc, err := minio.New(c.Config.Endpoint, &minio.Options{ 104 | Creds: credentials.NewStaticV4( 105 | c.Config.AccessKey, 106 | c.Config.SecretKey, ""), 107 | Secure: true, 108 | Region: c.Config.Region, 109 | }) 110 | if err != nil { 111 | return fmt.Errorf("failed to connect to S3: %v", err) 112 | } 113 | 114 | /* Set App Info */ 115 | mc.SetAppInfo("eldim", "") 116 | 117 | c.Conn = mc 118 | return nil 119 | } 120 | 121 | /* 122 | Disconnect terminates the connection of the 123 | S3 client with the backend server 124 | */ 125 | func (c *Client) Disconnect(ctx context.Context) error { 126 | return nil 127 | } 128 | 129 | /* 130 | BucketExists returns if a particular bucket exists and is 131 | reachable by the S3 client 132 | */ 133 | func (c *Client) BucketExists(ctx context.Context, name string) (bool, error) { 134 | return c.Conn.BucketExists(ctx, name) 135 | } 136 | 137 | /* 138 | ObjectExists returns if a particular object exists and is 139 | reachable by the S3 client 140 | */ 141 | func (c *Client) ObjectExists(ctx context.Context, name string) (bool, error) { 142 | 143 | _, err := c.Conn.StatObject(ctx, c.Config.Bucket, name, minio.StatObjectOptions{}) 144 | if err != nil { 145 | switch minio.ToErrorResponse(err).Code { 146 | case "NoSuchKey": 147 | return false, nil 148 | default: 149 | return false, fmt.Errorf("failed checking if object exists: %v", err) 150 | } 151 | } 152 | 153 | return true, nil 154 | } 155 | 156 | /* 157 | Name returns the S3 Client Name 158 | */ 159 | func (c *Client) Name() string { 160 | return c.Config.Name() 161 | } 162 | 163 | /* 164 | Bucket returns the S3 Bucket Name 165 | */ 166 | func (c *Client) Bucket() string { 167 | return c.Config.Bucket 168 | } 169 | 170 | /* 171 | BackendName returns 'S3' 172 | */ 173 | func (c *Client) BackendName() string { 174 | return "S3" 175 | } 176 | 177 | /* 178 | UploadFile uploads a file to the 179 | S3 Backend, with a name of name. 180 | */ 181 | func (c *Client) UploadFile(ctx context.Context, name string, file io.Reader, filesize int64) error { 182 | 183 | uinfo, err := c.Conn.PutObject(ctx, c.Config.Bucket, name, file, filesize, minio.PutObjectOptions{ 184 | ContentType: "application/vnd.age", 185 | SendContentMd5: c.Config.SendMD5, 186 | }) 187 | if err != nil { 188 | return fmt.Errorf("failed to upload file: %v", err) 189 | } 190 | if filesize != uinfo.Size { 191 | return fmt.Errorf("bytes uploaded is not the same as file size: %d vs %d", uinfo.Size, filesize) 192 | } 193 | 194 | return nil 195 | } 196 | -------------------------------------------------------------------------------- /internal/swift/swift.go: -------------------------------------------------------------------------------- 1 | package swift 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io" 7 | 8 | "github.com/ncw/swift" 9 | ) 10 | 11 | /* 12 | BackendConfig is the data structure containing all information 13 | required to connect to a single OpenStack Swift server 14 | */ 15 | type BackendConfig struct { 16 | BackendName string `yaml:"name"` 17 | Username string `yaml:"username"` 18 | APIKey string `yaml:"apikey"` 19 | AuthURL string `yaml:"authurl"` 20 | Region string `yaml:"region"` 21 | Container string `yaml:"container"` 22 | ExpireSeconds int `yaml:"expireseconds"` 23 | } 24 | 25 | /* 26 | Validate validates the OpenStack Swift Backend and returns the 27 | first error that occured during validation. 28 | */ 29 | func (conf *BackendConfig) Validate() error { 30 | /* Check if backend has a name */ 31 | if conf.BackendName == "" { 32 | return fmt.Errorf("OpenStack Swift Backend requires a name") 33 | } 34 | 35 | /* Check if all details are supplied */ 36 | if conf.Username == "" || conf.APIKey == "" || 37 | conf.AuthURL == "" || conf.Region == "" || 38 | conf.Container == "" { 39 | return fmt.Errorf("all fields are required for OpenStack Swift backends to work") 40 | } 41 | 42 | /* Ensure expire seconds is not negative */ 43 | if conf.ExpireSeconds < 0 { 44 | return fmt.Errorf("expiry Seconds cannot be a negative number") 45 | } 46 | 47 | /* Attempt to connect to OpenStack Swift Backend */ 48 | client := New(context.Background(), *conf) 49 | 50 | err := client.Connect(context.Background()) 51 | if err != nil { 52 | return fmt.Errorf("failed to authenticate to Backend: %v", err) 53 | } 54 | 55 | /* Check if container (bucket) exists */ 56 | exists, err := client.BucketExists(context.Background(), conf.Container) 57 | if err != nil { 58 | return fmt.Errorf("OpenStack Swift Backend Container Error: %v", err) 59 | } 60 | if !exists { 61 | return fmt.Errorf("OpenStack Swift Container does not exist: %s", conf.Container) 62 | } 63 | 64 | /* Disconnect from OpenStack */ 65 | client.Disconnect(context.Background()) 66 | 67 | return nil 68 | } 69 | 70 | /* 71 | Name returns the name configured for this OpenStack Swift Backend 72 | */ 73 | func (conf *BackendConfig) Name() string { 74 | if conf.BackendName == "" { 75 | conf.BackendName = "Unnamed Openstack Swift Backend" 76 | } 77 | return conf.BackendName 78 | } 79 | 80 | /* 81 | Client is an OpenStack Swift Client Object 82 | */ 83 | type Client struct { 84 | Conn swift.Connection 85 | Config BackendConfig 86 | } 87 | 88 | /* 89 | New creates a new OpenStack Swift client 90 | */ 91 | func New(ctx context.Context, conf BackendConfig) *Client { 92 | var ret Client 93 | ret.Config = conf 94 | ret.Conn = swift.Connection{ 95 | UserName: conf.Username, 96 | ApiKey: conf.APIKey, 97 | AuthUrl: conf.AuthURL, 98 | Domain: "default", 99 | Region: conf.Region, 100 | AuthVersion: 3, 101 | EndpointType: swift.EndpointTypePublic, 102 | UserAgent: "eldim", 103 | } 104 | return &ret 105 | } 106 | 107 | /* 108 | Connect connects the OpenStack Swift client to the backend 109 | service and authenticates 110 | */ 111 | func (c *Client) Connect(ctx context.Context) error { 112 | return c.Conn.Authenticate() 113 | } 114 | 115 | /* 116 | Disconnect terminates the connection of the OpenStack Swift 117 | client with the backend server 118 | */ 119 | func (c *Client) Disconnect(ctx context.Context) error { 120 | c.Conn.UnAuthenticate() 121 | return nil 122 | } 123 | 124 | /* 125 | BucketExists returns if a particular bucket exists and is 126 | reachable by the OpenStack Swift client 127 | */ 128 | func (c *Client) BucketExists(ctx context.Context, name string) (bool, error) { 129 | _, _, err := c.Conn.Container(name) 130 | if err == swift.ContainerNotFound { 131 | return false, nil 132 | } 133 | if err != nil { 134 | return false, fmt.Errorf("failed to check if bucket exists: %v", err) 135 | } 136 | 137 | return true, nil 138 | } 139 | 140 | /* 141 | ObjectExists returns if a particular object exists and is 142 | reachable by the OpenStack Swift client 143 | */ 144 | func (c *Client) ObjectExists(ctx context.Context, name string) (bool, error) { 145 | _, _, err := c.Conn.Object(c.Bucket(), name) 146 | if err == swift.ObjectNotFound { 147 | return false, nil 148 | } 149 | if err != nil { 150 | return false, fmt.Errorf("failed to check if object exists: %v", err) 151 | } 152 | 153 | return true, nil 154 | } 155 | 156 | /* 157 | Name returns the OpenStack Swift Client Name 158 | */ 159 | func (c *Client) Name() string { 160 | return c.Config.Name() 161 | } 162 | 163 | /* 164 | Bucket returns the OpenStack Swift Container Name 165 | */ 166 | func (c *Client) Bucket() string { 167 | return c.Config.Container 168 | } 169 | 170 | /* 171 | BackendName returns 'OpenStack Swift' 172 | */ 173 | func (c *Client) BackendName() string { 174 | return "OpenStack Swift" 175 | } 176 | 177 | /* 178 | UploadFile uploads a file to the OpenStack Swift Backend, with 179 | a name of name. 180 | */ 181 | func (c *Client) UploadFile(ctx context.Context, name string, file io.Reader, filesize int64) error { 182 | 183 | _, err := c.Conn.ObjectPut(c.Bucket(), name, file, false, "", 184 | "application/vnd.age", map[string]string{ 185 | "X-Delete-After": fmt.Sprintf("%d", c.Config.ExpireSeconds), 186 | }) 187 | if err != nil { 188 | return fmt.Errorf("failed to upload file: %v", err) 189 | } 190 | 191 | return nil 192 | } 193 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "crypto/tls" 5 | "flag" 6 | "fmt" 7 | "io/ioutil" 8 | "net/http" 9 | "time" 10 | 11 | "github.com/daknob/eldim/config" 12 | 13 | "github.com/prometheus/client_golang/prometheus/promhttp" 14 | 15 | "github.com/julienschmidt/httprouter" 16 | "github.com/sirupsen/logrus" 17 | yaml "gopkg.in/yaml.v2" 18 | ) 19 | 20 | var ( 21 | conf config.Config 22 | clients []config.ClientConfig 23 | ) 24 | 25 | const ( 26 | version = "v0.6.0" 27 | ) 28 | 29 | func main() { 30 | 31 | /* Output logs in JSON or Text */ 32 | logFormat := flag.Bool("j", false, "Output logs in JSON") 33 | 34 | /* Configuration File Path */ 35 | configPath := flag.String("c", "/etc/eldim/eldim.yml", "Path to the configuration file") 36 | 37 | /* Parse flags */ 38 | flag.Parse() 39 | 40 | /* Set the log format to JSON if requested */ 41 | if *logFormat == true { 42 | logrus.SetFormatter(&logrus.JSONFormatter{}) 43 | } else { 44 | logrus.SetFormatter(&logrus.TextFormatter{}) 45 | } 46 | 47 | /* Startup logs */ 48 | logrus.Printf("starting eldim...") 49 | logrus.Printf("log in JSON: %v", *logFormat) 50 | logrus.Printf("configuration file: %s", *configPath) 51 | 52 | /* Parse the configuration file */ 53 | logrus.Printf("parsing the configuration file...") 54 | 55 | /* Open the configuration file, and read contents to RAM */ 56 | confb, err := ioutil.ReadFile(*configPath) 57 | if err != nil { 58 | logrus.Fatalf("could not open configuration file: %v", err) 59 | } 60 | 61 | /* Attempt to parse it for YAML */ 62 | err = yaml.Unmarshal(confb, &conf) 63 | if err != nil { 64 | logrus.Fatalf("could not parse the YAML configuration file: %v", err) 65 | } 66 | 67 | logrus.Printf("configuration file loaded.") 68 | 69 | /* Validate configuration by appropriate function call */ 70 | logrus.Printf("validating parameters...") 71 | err = conf.Validate() 72 | if err != nil { 73 | logrus.Fatalf("invalid configuration: %v", err) 74 | } 75 | logrus.Printf("configuration file validated.") 76 | 77 | /* Load client file */ 78 | clib, err := ioutil.ReadFile(conf.ClientFile) 79 | if err != nil { 80 | logrus.Fatalf("could not open clients file: %v", err) 81 | } 82 | err = yaml.Unmarshal(clib, &clients) 83 | if err != nil { 84 | logrus.Fatalf("could not parse clients YML file: %v", err) 85 | } 86 | 87 | /* Register Prometheus Metrics */ 88 | registerPromMetrics() 89 | 90 | /* Update configuration-based Metrics */ 91 | updateConfMetrics() 92 | 93 | /* Various web server configurations */ 94 | logrus.Printf("configuring the HTTP Server...") 95 | 96 | /* Create an HTTP Router */ 97 | router := httprouter.New() 98 | router.GET("/", index) 99 | router.POST("/api/v1/file/upload/", v1fileUpload) 100 | 101 | /* Only enable Prometheus metrics if configured */ 102 | if conf.PrometheusEnabled { 103 | router.GET( 104 | "/metrics", 105 | requestBasicAuth( 106 | conf.PrometheusAuthUser, 107 | conf.PrometheusAuthPass, 108 | "Prometheus Metrics", 109 | *promMetricsAuth, 110 | httpHandlerToHTTPRouterHandler( 111 | promhttp.Handler(), 112 | ), 113 | ), 114 | ) 115 | } 116 | 117 | /* Configure TLS */ 118 | tlsConfig := &tls.Config{ 119 | PreferServerCipherSuites: true, 120 | CurvePreferences: []tls.CurveID{ 121 | tls.CurveP256, 122 | tls.X25519, 123 | }, 124 | MinVersion: tls.VersionTLS12, 125 | } 126 | 127 | /* Configure HTTP */ 128 | server := http.Server{ 129 | ReadHeaderTimeout: 5 * time.Second, 130 | WriteTimeout: 120 * time.Second, 131 | IdleTimeout: 180 * time.Second, 132 | TLSConfig: tlsConfig, 133 | Handler: router, 134 | Addr: fmt.Sprintf(":%d", conf.ListenPort), 135 | } 136 | 137 | logrus.Printf("HTTP Server Configured.") 138 | 139 | /* Start serving TLS */ 140 | logrus.Printf("serving on :%d ...", conf.ListenPort) 141 | 142 | err = server.ListenAndServeTLS( 143 | conf.TLSChainPath, 144 | conf.TLSKeyPath, 145 | ) 146 | if err != nil { 147 | logrus.Fatalf("failed to start HTTP Server: %v", err) 148 | } 149 | 150 | /* Exit */ 151 | logrus.Printf("eldim quitting...") 152 | 153 | } 154 | -------------------------------------------------------------------------------- /metrics.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import p "github.com/prometheus/client_golang/prometheus" 4 | 5 | /* Prometheus Metrics Declarations */ 6 | var ( 7 | promReqServed = p.NewCounterVec( 8 | p.CounterOpts{ 9 | Name: "eldim_http_requests_served", 10 | Help: "HTTP Requests Served by eldim, with corresponding types and status codes, per path", 11 | }, 12 | []string{ 13 | "method", 14 | "path", 15 | "status", 16 | }, 17 | ) 18 | promMetricsAuth = p.NewCounterVec( 19 | p.CounterOpts{ 20 | Name: "eldim_prometheus_metrics_scrape_auth", 21 | Help: "HTTP Requests to the Prometheus Metrics Endpoint and their Authentication Status", 22 | }, 23 | []string{ 24 | "success", 25 | "error", 26 | }, 27 | ) 28 | promFileUpErrors = p.NewCounterVec( 29 | p.CounterOpts{ 30 | Name: "eldim_file_upload_errors_occured", 31 | Help: "Types of errors occured during file uploads", 32 | }, 33 | []string{ 34 | "error", 35 | }, 36 | ) 37 | promReqServTimeHist = p.NewHistogram( 38 | p.HistogramOpts{ 39 | Name: "eldim_file_upload_request_time", 40 | Help: "Histogram of time of successful file uploads to eldim", 41 | Buckets: p.LinearBuckets(0, 60, 120), 42 | }, 43 | ) 44 | promClients = p.NewGaugeVec( 45 | p.GaugeOpts{ 46 | Name: "eldim_loaded_clients", 47 | Help: "Clients that are allowed to upload files to eldim", 48 | }, 49 | []string{ 50 | "type", 51 | }, 52 | ) 53 | promIPs = p.NewGaugeVec( 54 | p.GaugeOpts{ 55 | Name: "eldim_loaded_ip_addressess", 56 | Help: "IP Addressess that are allowed to upload files to eldim", 57 | }, 58 | []string{ 59 | "version", 60 | }, 61 | ) 62 | promBytesUploadedSuc = p.NewCounter( 63 | p.CounterOpts{ 64 | Name: "eldim_files_uploaded_bytes_successful", 65 | Help: "Amount of bytes of files uploaded to eldim successfully", 66 | }, 67 | ) 68 | promBytesUploaded = p.NewCounterVec( 69 | p.CounterOpts{ 70 | Name: "eldim_files_uploaded_bytes", 71 | Help: "Amount of bytes of files uploaded from eldim per Backend", 72 | }, 73 | []string{ 74 | "backendtype", 75 | }, 76 | ) 77 | promClientIDs = p.NewCounterVec( 78 | p.CounterOpts{ 79 | Name: "eldim_client_id_type", 80 | Help: "Type of Client Identification used (Password vs IP Address)", 81 | }, 82 | []string{ 83 | "type", 84 | }, 85 | ) 86 | promHostAuths = p.NewCounterVec( 87 | p.CounterOpts{ 88 | Name: "eldim_host_authentications", 89 | Help: "Successful authentications to eldim by hostname", 90 | }, 91 | []string{ 92 | "hostname", 93 | }, 94 | ) 95 | promHostUploads = p.NewCounterVec( 96 | p.CounterOpts{ 97 | Name: "eldim_host_uploads", 98 | Help: "Successful file uploads to eldim by hostname", 99 | }, 100 | []string{ 101 | "hostname", 102 | }, 103 | ) 104 | promEldimVersion = p.NewGaugeVec( 105 | p.GaugeOpts{ 106 | Name: "eldim_server_version", 107 | Help: "Each eldim server returns '1', with the software version as a tag", 108 | }, 109 | []string{ 110 | "eldimversion", 111 | }, 112 | ) 113 | ) 114 | 115 | /* Register Prometheus Metrics */ 116 | func registerPromMetrics() { 117 | /* Initialize Prometheus */ 118 | p.MustRegister(promReqServed) 119 | p.MustRegister(promMetricsAuth) 120 | p.MustRegister(promFileUpErrors) 121 | p.MustRegister(promReqServTimeHist) 122 | p.MustRegister(promClients) 123 | p.MustRegister(promIPs) 124 | p.MustRegister(promBytesUploadedSuc) 125 | p.MustRegister(promBytesUploaded) 126 | p.MustRegister(promClientIDs) 127 | p.MustRegister(promHostAuths) 128 | p.MustRegister(promHostUploads) 129 | p.MustRegister(promEldimVersion) 130 | } 131 | 132 | /* Update configuration-based Metrics */ 133 | func updateConfMetrics() { 134 | /* Set Prometheus Loaded Clients Metric */ 135 | var v4 float64 = 0 136 | var v6 float64 = 0 137 | var pass float64 = 0 138 | var v4a float64 = 0 139 | var v6a float64 = 0 140 | for _, c := range clients { 141 | if len(c.IPv4()) >= 1 { 142 | v4++ 143 | v4a += float64(len(c.IPv4())) 144 | } 145 | if len(c.IPv6()) >= 1 { 146 | v6++ 147 | v6a += float64(len(c.IPv6())) 148 | } 149 | if c.Password != "" { 150 | pass++ 151 | } 152 | } 153 | promClients.With(p.Labels{"type": "ipv6"}).Set(v6) 154 | promClients.With(p.Labels{"type": "ipv4"}).Set(v4) 155 | promClients.With(p.Labels{"type": "password"}).Set(pass) 156 | promIPs.With(p.Labels{"version": "6"}).Set(v6a) 157 | promIPs.With(p.Labels{"version": "4"}).Set(v4a) 158 | 159 | /* Set eldim version */ 160 | promEldimVersion.With(p.Labels{"eldimversion": version}).Set(1) 161 | } 162 | -------------------------------------------------------------------------------- /util.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "crypto/sha256" 5 | "crypto/subtle" 6 | "fmt" 7 | "net" 8 | "net/http" 9 | 10 | "github.com/julienschmidt/httprouter" 11 | p "github.com/prometheus/client_golang/prometheus" 12 | ) 13 | 14 | /* 15 | getIPName returns the client name of a given IP Address ip. If it is not found, 16 | an error is returned. 17 | */ 18 | func getIPName(ip string) (string, error) { 19 | naddr := net.ParseIP(ip) 20 | if naddr == nil { 21 | return "", fmt.Errorf("invalid IP Address given: %s", ip) 22 | } 23 | 24 | for _, c := range clients { 25 | for _, ip := range append(c.IPv6(), c.IPv4()...) { 26 | if ip.Equal(naddr) { 27 | return c.Name(), nil 28 | } 29 | } 30 | } 31 | 32 | return "", fmt.Errorf("IP Address not a client") 33 | } 34 | 35 | /* 36 | getPassName returns the client name for a given Password password. If it is not 37 | found, an error is returned. 38 | */ 39 | func getPassName(password string) (string, error) { 40 | 41 | if password == "" { 42 | return "", fmt.Errorf("password was empty. did not match") 43 | } 44 | 45 | for _, c := range clients { 46 | if c.Password == password { 47 | return c.Name(), nil 48 | } 49 | } 50 | 51 | return "", fmt.Errorf("password did not match client database") 52 | } 53 | 54 | /* 55 | requestBasicAuth is an HTTP Handler wrapper that will require the passed 56 | handler to be served only if the HTTP Basic Authentication Credentials are 57 | correct. 58 | */ 59 | func requestBasicAuth(username, password, realm string, pa p.CounterVec, handler httprouter.Handle) httprouter.Handle { 60 | 61 | /* Calculate the SHA-256 Hash of the Required Username and Password */ 62 | RequiredUserNameHash := sha256.Sum256([]byte(username)) 63 | RequiredPasswordHash := sha256.Sum256([]byte(password)) 64 | 65 | return func(w http.ResponseWriter, r *http.Request, Params httprouter.Params) { 66 | 67 | user, pass, ok := r.BasicAuth() 68 | 69 | /* Calculate the SHA-256 Hash of the Given Username and Password */ 70 | PassedUsername := sha256.Sum256([]byte(user)) 71 | PassedPassword := sha256.Sum256([]byte(pass)) 72 | 73 | /* 74 | subtle.ConstantTimeCompare is used so the username and password 75 | comparison take constant time, and therefore do not leak 76 | information about the length of the password, or allow time-based 77 | side channel attacks. However, in order to prevent password length 78 | guessing, SHA-256 is used, which is always a constant size. 79 | Calculation of the SHA-256 hash can still be attacked, but isn't as 80 | likely, since the inputs are constants. 81 | */ 82 | if !ok { 83 | pa.With(p.Labels{"success": "false", "error": "HTTP-Basic-Auth-Not-Ok"}).Inc() 84 | w.Header().Set("WWW-Authenticate", `Basic realm="`+realm+`"`) 85 | w.WriteHeader(401) 86 | w.Write([]byte("You need to supply the correct credentials for this page.\n")) 87 | return 88 | } 89 | if subtle.ConstantTimeCompare(PassedUsername[:], RequiredUserNameHash[:]) != 1 { 90 | pa.With(p.Labels{"success": "false", "error": "Incorrect-Username"}).Inc() 91 | w.Header().Set("WWW-Authenticate", `Basic realm="`+realm+`"`) 92 | w.WriteHeader(401) 93 | w.Write([]byte("You need to supply the correct credentials for this page.\n")) 94 | return 95 | } 96 | if subtle.ConstantTimeCompare(PassedPassword[:], RequiredPasswordHash[:]) != 1 { 97 | pa.With(p.Labels{"success": "false", "error": "Incorrect-Password"}).Inc() 98 | w.Header().Set("WWW-Authenticate", `Basic realm="`+realm+`"`) 99 | w.WriteHeader(401) 100 | w.Write([]byte("You need to supply the correct credentials for this page.\n")) 101 | return 102 | } 103 | 104 | pa.With(p.Labels{"success": "true", "error": ""}).Inc() 105 | handler(w, r, Params) 106 | } 107 | } 108 | 109 | /* 110 | httpHandlerToHTTPRouterHandler is a function that converts an HTTP Handler to 111 | an HTTPRouter Handler, ignoring the Params field and assuming it is not used 112 | */ 113 | func httpHandlerToHTTPRouterHandler(h http.Handler) httprouter.Handle { 114 | return func(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { 115 | h.ServeHTTP(w, r) 116 | } 117 | } 118 | --------------------------------------------------------------------------------