├── .github
├── CODEOWNERS
├── ISSUE_TEMPLATE
│ ├── bug_report.md
│ ├── config.yml
│ └── feature_request.md
├── logo.svg
└── workflows
│ ├── builds.yml
│ ├── codeql-analysis.yml
│ ├── dco.yml
│ └── tests.yml
├── .gitignore
├── .golangci.yml
├── CHANGELOG.md
├── CONTRIBUTING.md
├── CREDITS.md
├── Dockerfile
├── Dockerfile.dirty
├── LICENSE
├── Makefile
├── README.md
├── VERSION
├── app.go
├── config
├── config.env
├── config.yaml
└── dir
│ ├── pprof.yaml
│ └── prometheus.yaml
├── debian
├── changelog
├── control
├── copyright
├── frostfs-http-gw.dirs
├── frostfs-http-gw.docs
├── frostfs-http-gw.examples
├── frostfs-http-gw.install
├── frostfs-http-gw.postinst
├── frostfs-http-gw.postrm
├── frostfs-http-gw.preinst
├── frostfs-http-gw.prerm
├── frostfs-http-gw.service
├── rules
└── source
│ └── format
├── docs
├── api.md
├── building-deb-package.md
└── gate-configuration.md
├── downloader
├── download.go
├── download_test.go
├── head.go
└── reader_test.go
├── go.mod
├── go.sum
├── help.mk
├── integration_test.go
├── main.go
├── metrics
├── metrics.go
├── pprof.go
└── service.go
├── misc.go
├── resolver
├── frostfs.go
└── resolver.go
├── response
└── utils.go
├── server.go
├── settings.go
├── tokens
├── bearer-token.go
└── bearer-token_test.go
├── uploader
├── filter.go
├── filter_test.go
├── multipart.go
├── multipart
│ └── multipart.go
├── multipart_test.go
└── upload.go
└── utils
├── attributes.go
├── params.go
└── util.go
/.github/CODEOWNERS:
--------------------------------------------------------------------------------
1 | * @alexvanin @KirillovDenis
2 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: ''
5 | labels: community, triage
6 | assignees: ''
7 |
8 | ---
9 |
10 |
11 |
12 | ## Expected Behavior
13 |
14 |
15 |
16 | ## Current Behavior
17 |
18 |
19 |
20 | ## Possible Solution
21 |
22 |
23 |
24 | ## Steps to Reproduce (for bugs)
25 |
26 |
27 |
28 | 1.
29 | 2.
30 | 3.
31 | 4.
32 |
33 | ## Context
34 |
35 |
36 |
37 | ## Regression
38 |
39 |
40 |
41 | ## Your Environment
42 |
43 | * Version used:
44 | * Server setup and configuration:
45 | * Operating System and version (`uname -a`):
46 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
1 | blank_issues_enabled: false
2 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for this project
4 | title: ''
5 | labels: community, triage
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Is your feature request related to a problem? Please describe.**
11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
12 |
13 | **Describe the solution you'd like**
14 | A clear and concise description of what you want to happen.
15 |
16 | **Describe alternatives you've considered**
17 | A clear and concise description of any alternative solutions or features you've considered.
18 |
19 | **Additional context**
20 | Add any other context or screenshots about the feature request here.
21 |
--------------------------------------------------------------------------------
/.github/logo.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
71 |
--------------------------------------------------------------------------------
/.github/workflows/builds.yml:
--------------------------------------------------------------------------------
1 | name: Builds
2 |
3 | on:
4 | pull_request:
5 | branches:
6 | - master
7 | - 'support/*'
8 | types: [opened, synchronize]
9 | paths-ignore:
10 | - '**/*.md'
11 |
12 | jobs:
13 | build_cli:
14 | name: Build CLI
15 | runs-on: ubuntu-20.04
16 |
17 | steps:
18 | - uses: actions/checkout@v2
19 | with:
20 | fetch-depth: 0
21 |
22 | - name: Setup Go
23 | uses: actions/setup-go@v2
24 | with:
25 | go-version: 1.19
26 |
27 | - name: Restore Go modules from cache
28 | uses: actions/cache@v2
29 | with:
30 | path: /home/runner/go/pkg/mod
31 | key: deps-${{ hashFiles('go.sum') }}
32 |
33 | - name: Update Go modules
34 | run: make dep
35 |
36 | - name: Build CLI
37 | run: make
38 |
39 | - name: Check version
40 | run: if [[ $(make version) == *"dirty"* ]]; then exit 1; fi
41 |
42 | build_image:
43 | needs: build_cli
44 | name: Build Docker image
45 | runs-on: ubuntu-20.04
46 |
47 | steps:
48 | - uses: actions/checkout@v2
49 | with:
50 | fetch-depth: 0
51 |
52 | - name: Set up Go
53 | uses: actions/setup-go@v2
54 | with:
55 | go-version: 1.19
56 |
57 | - name: Restore Go modules from cache
58 | uses: actions/cache@v2
59 | with:
60 | path: /home/runner/go/pkg/mod
61 | key: deps-${{ hashFiles('go.sum') }}
62 |
63 | - name: Update Go modules
64 | run: make dep
65 |
66 | - name: Build Docker image
67 | run: make image
68 |
--------------------------------------------------------------------------------
/.github/workflows/codeql-analysis.yml:
--------------------------------------------------------------------------------
1 | # For most projects, this workflow file will not need changing; you simply need
2 | # to commit it to your repository.
3 | #
4 | # You may wish to alter this file to override the set of languages analyzed,
5 | # or to provide custom queries or build logic.
6 | #
7 | # ******** NOTE ********
8 | # We have attempted to detect the languages in your repository. Please check
9 | # the `language` matrix defined below to confirm you have the correct set of
10 | # supported CodeQL languages.
11 | #
12 | name: "CodeQL"
13 |
14 | on:
15 | push:
16 | branches: [ master, 'support/*' ]
17 | pull_request:
18 | # The branches below must be a subset of the branches above
19 | branches: [ master, 'support/*' ]
20 | schedule:
21 | - cron: '35 8 * * 1'
22 |
23 | jobs:
24 | analyze:
25 | name: Analyze
26 | runs-on: ubuntu-latest
27 |
28 | strategy:
29 | fail-fast: false
30 | matrix:
31 | language: [ 'go' ]
32 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
33 | # Learn more:
34 | # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
35 |
36 | steps:
37 | - name: Checkout repository
38 | uses: actions/checkout@v2
39 |
40 | # Initializes the CodeQL tools for scanning.
41 | - name: Initialize CodeQL
42 | uses: github/codeql-action/init@v2
43 | with:
44 | languages: ${{ matrix.language }}
45 | # If you wish to specify custom queries, you can do so here or in a config file.
46 | # By default, queries listed here will override any specified in a config file.
47 | # Prefix the list here with "+" to use these queries and those in the config file.
48 | # queries: ./path/to/local/query, your-org/your-repo/queries@main
49 |
50 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
51 | # If this step fails, then you should remove it and run the build manually (see below)
52 | - name: Autobuild
53 | uses: github/codeql-action/autobuild@v2
54 |
55 | # ℹ️ Command-line programs to run using the OS shell.
56 | # 📚 https://git.io/JvXDl
57 |
58 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
59 | # and modify them (or add more) to build your code if your project
60 | # uses a compiled language
61 |
62 | #- run: |
63 | # make bootstrap
64 | # make release
65 |
66 | - name: Perform CodeQL Analysis
67 | uses: github/codeql-action/analyze@v2
68 |
--------------------------------------------------------------------------------
/.github/workflows/dco.yml:
--------------------------------------------------------------------------------
1 | name: DCO check
2 |
3 | on:
4 | pull_request:
5 | branches:
6 | - master
7 | - 'support/*'
8 |
9 | jobs:
10 | commits_check_job:
11 | runs-on: ubuntu-latest
12 | name: Commits Check
13 | steps:
14 | - name: Get PR Commits
15 | id: 'get-pr-commits'
16 | uses: tim-actions/get-pr-commits@master
17 | with:
18 | token: ${{ secrets.GITHUB_TOKEN }}
19 | - name: DCO Check
20 | uses: tim-actions/dco@master
21 | with:
22 | commits: ${{ steps.get-pr-commits.outputs.commits }}
23 |
--------------------------------------------------------------------------------
/.github/workflows/tests.yml:
--------------------------------------------------------------------------------
1 | name: Tests
2 |
3 | on:
4 | pull_request:
5 | branches:
6 | - master
7 | - 'support/*'
8 | types: [opened, synchronize]
9 | paths-ignore:
10 | - '**/*.md'
11 |
12 | jobs:
13 | lint:
14 | name: Lint
15 | runs-on: ubuntu-latest
16 |
17 | steps:
18 | - uses: actions/checkout@v2
19 | - name: golangci-lint
20 | uses: golangci/golangci-lint-action@v2
21 | with:
22 | version: latest
23 |
24 | cover:
25 | name: Coverage
26 | runs-on: ubuntu-20.04
27 |
28 | env:
29 | CGO_ENABLED: 1
30 | steps:
31 | - uses: actions/checkout@v2
32 | with:
33 | fetch-depth: 0
34 |
35 | - name: Set up Go
36 | uses: actions/setup-go@v2
37 | with:
38 | go-version: 1.19
39 |
40 | - name: Restore Go modules from cache
41 | uses: actions/cache@v2
42 | with:
43 | path: /home/runner/go/pkg/mod
44 | key: deps-${{ hashFiles('go.sum') }}
45 |
46 | - name: Update Go modules
47 | run: make dep
48 |
49 | - name: Test and write coverage profile
50 | run: make cover
51 |
52 | - name: Upload coverage results to Codecov
53 | uses: codecov/codecov-action@v1
54 | with:
55 | fail_ci_if_error: false
56 | path_to_write_report: ./coverage.txt
57 | verbose: true
58 |
59 | tests:
60 | name: Tests
61 | runs-on: ubuntu-20.04
62 | strategy:
63 | matrix:
64 | go_versions: [ '1.18', '1.19' ]
65 | fail-fast: false
66 | steps:
67 | - uses: actions/checkout@v2
68 | with:
69 | fetch-depth: 0
70 |
71 | - name: Set up Go
72 | uses: actions/setup-go@v2
73 | with:
74 | go-version: '${{ matrix.go_versions }}'
75 |
76 | - name: Restore Go modules from cache
77 | uses: actions/cache@v2
78 | with:
79 | path: /home/runner/go/pkg/mod
80 | key: deps-${{ hashFiles('go.sum') }}
81 |
82 | - name: Update Go modules
83 | run: make dep
84 |
85 | - name: Run tests
86 | run: make test
87 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea
2 | bin
3 | temp
4 | /plugins/
5 | /vendor/
6 |
7 | .test.env
8 | *~
9 | *.log
10 | test.sh
11 | testfile
12 | .blast.yml
13 | .frostfs-cli.yml
14 |
15 | .cache
16 |
17 | coverage.txt
18 | coverage.html
19 |
20 | # debhelpers
21 | **/.debhelper
22 |
--------------------------------------------------------------------------------
/.golangci.yml:
--------------------------------------------------------------------------------
1 | # This file contains all available configuration options
2 | # with their default values.
3 |
4 | # options for analysis running
5 | run:
6 | # timeout for analysis, e.g. 30s, 5m, default is 1m
7 | timeout: 5m
8 |
9 | # include test files or not, default is true
10 | tests: true
11 |
12 | # output configuration options
13 | output:
14 | # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
15 | format: tab
16 |
17 | # all available settings of specific linters
18 | linters-settings:
19 | exhaustive:
20 | # indicates that switch statements are to be considered exhaustive if a
21 | # 'default' case is present, even if all enum members aren't listed in the
22 | # switch
23 | default-signifies-exhaustive: true
24 | govet:
25 | # report about shadowed variables
26 | check-shadowing: false
27 |
28 | linters:
29 | enable:
30 | # mandatory linters
31 | - govet
32 | - revive
33 |
34 | # some default golangci-lint linters
35 | - errcheck
36 | - gosimple
37 | - ineffassign
38 | - staticcheck
39 | - typecheck
40 | - unused
41 |
42 | # extra linters
43 | - exhaustive
44 | - godot
45 | - gofmt
46 | - whitespace
47 | - goimports
48 | disable-all: true
49 | fast: false
50 |
51 | issues:
52 | include:
53 | - EXC0002 # should have a comment
54 | - EXC0003 # test/Test ... consider calling this
55 | - EXC0004 # govet
56 | - EXC0005 # C-style breaks
57 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Changelog
2 |
3 | This document outlines major changes between releases.
4 |
5 | ## [Unreleased]
6 |
7 | ### Added
8 | - Multiple configs support (TrueCloudLab#12)
9 |
10 | ### Changed
11 | - Update go version to 1.18 (TrueCloudLab#9)
12 | - Update neo-go to v0.101.0 (#8)
13 | - Update viper to v1.15.0 (#8)
14 |
15 | ## [0.26.0] - 2022-12-28
16 |
17 | ### Fixed
18 | - ENV config example (#236)
19 |
20 | ### Added
21 | - Support the `Date` header on upload (#214)
22 | - Available routes specification (#216)
23 | - Mention caching strategy in docs (#215)
24 | - Add error response on attribute duplicates (#221)
25 | - Multiple server listeners (#228)
26 |
27 | ### Removed
28 | - Deprecated linters (#239)
29 |
30 | ### Updating from v0.25.1
31 | Make sure your configuration is valid:
32 |
33 | If you configure application using environment variables change:
34 | * `HTTP_GW_LISTEN_ADDRESS` -> `HTTP_GW_SERVER_0_ADDRESS`
35 | * `HTTP_GW_TLS_CERT_FILE` -> `HTTP_GW_SERVER_0_TLS_CERT_FILE` (and set `HTTP_GW_SERVER_0_TLS_ENABLED=true`)
36 | * `HTTP_GW_TLS_KEY_FILE` -> `HTTP_GW_SERVER_0_TLS_KEY_FILE` (and set `HTTP_GW_SERVER_0_TLS_ENABLED=true`)
37 |
38 | If you configure application using `.yaml` file change:
39 | * `listen_address` -> `server.0.address`
40 | * `tls.cert_file` -> `server.0.tls.cert_file` (and set `server.0.tls.enabled: true`)
41 | * `tls.key_file` -> `server.0.tls.key_file` (and set `server.0.tls.enabled: true`)
42 |
43 | ## [0.25.1] - 2022-11-30
44 |
45 | ### Fixed
46 | - Download zip archive when `FilePath` is invalid (#222)
47 | - Only one peer must be healthy to init pool (#233)
48 |
49 | ### Added
50 | - Debian packaging (#223)
51 | - Timeout for individual operations in streaming RPC (#234)
52 |
53 | ## [0.25.0] - 2022-10-31
54 |
55 | ### Added
56 | - Config reloading on SIGHUP (#200, #208)
57 | - Stop pool dial on SIGINT (#212)
58 | - Makefile help (#213)
59 |
60 | ### Changed
61 | - Update NeoFS error handling (#206)
62 | - GitHub actions updates (#205, #209)
63 | - Unified system attribute format for GET and HEAD (#213)
64 |
65 | ## [0.24.0] - 2022-09-14
66 |
67 | ### Fixed
68 | - Fix expiration epoch calculation (#198)
69 | - Fix panic on go1.19 (#188)
70 |
71 | ### Added
72 | - Exposure of pool metrics (#179, #194)
73 |
74 | ### Changed
75 | - Help doesn't print empty parameters (#186)
76 | - Update version calculation (#190, #199)
77 | - Update neofs-sdk-go (#196)
78 | - Update go version in CI and docker (#197, #202)
79 |
80 | ## [0.23.0] - 2022-08-02
81 |
82 | ### Added
83 | - New param to configure pool error threshold (#184)
84 |
85 | ### Changed
86 | - Pprof and prometheus metrics configuration (#171)
87 | - Drop GO111MODULES from builds (#182)
88 |
89 | ### Updating from v0.22.0
90 | 1. To enable pprof use `pprof.enabled` instead of `pprof` in config.
91 | To enable prometheus metrics use `prometheus.enabled` instead of `metrics` in config.
92 | If you are using the command line flags you can skip this step.
93 |
94 | ## [0.22.0] - 2022-07-25
95 |
96 | ### Added
97 | - Default params documentation (#172)
98 | - Health metric (#175)
99 |
100 | ### Changed
101 | - Version output (#169)
102 | - Updated SDK Version (#178)
103 |
104 | ## [0.21.0] - 2022-06-20
105 |
106 | ### Fixed
107 | - Downloading ZIP archive using streaming (#163)
108 |
109 | ### Added
110 | - New make target to build app in docker (#159)
111 |
112 | ### Changed
113 | - Increased buffer size for file uploading (#148)
114 | - Updated linter version to v1.46.2 (#161)
115 | - Updated CodeQL version to v2 (#158)
116 |
117 |
118 | ## [0.20.0] - 2022-04-29
119 |
120 | ### Fixed
121 | - Get rid of data race on server shutdown (#145)
122 | - Improved English in docs and comments (#153)
123 | - Use `FilePath` to download zip (#150)
124 |
125 | ### Added
126 | - Support container name NNS resolving (#142)
127 |
128 | ### Changed
129 | - Updated docs (#133, #140)
130 | - Increased default read/write timeouts (#154)
131 | - Updated SDK (#137, #139)
132 | - Updated go version to 1.17 (#143)
133 | - Improved error messages (#144)
134 |
135 | ## [0.19.0] - 2022-03-16
136 |
137 | ### Fixed
138 | - Uploading object with zero payload (#122)
139 | - Different headers format in GET and HEAD (#125)
140 | - Fixed project name in docs (#120)
141 |
142 | ### Added
143 | - Support object attributes with spaces (#123)
144 |
145 | ### Changed
146 | - Updated fasthttp to v1.34.0 (#129)
147 | - Updated NeoFS SDK to v1.0.0-rc.3 (#126, #132)
148 | - Refactored content type detecting (#128)
149 |
150 |
151 | ## [0.18.0] - 2021-12-10
152 |
153 | ### Fixed
154 | - System headers format (#111)
155 |
156 | ### Added
157 | - Different formats to set object's expiration: in epoch, duration, timestamp,
158 | RFC3339 (#108)
159 | - Support of nodes priority (#115)
160 |
161 | ### Changed
162 | - Updated testcontainers dependency (#100)
163 |
164 | ## [0.17.0] - 2021-11-15
165 |
166 | Support of bulk file download with zip streams and various bug fixes.
167 |
168 | ### Fixed
169 | - Allow canonical `X-Attribute-Neofs-*` headers (#87)
170 | - Responses with error message now end with `\n` character (#105)
171 | - Application does not require all neofs endpoints to be healthy at start now
172 | (#103)
173 | - Application now tracks session token errors and recreates tokens in runtime
174 | (#95)
175 |
176 | ### Added
177 | - Integration tests with [all-in-one](https://github.com/nspcc-dev/neofs-aio/)
178 | test containers (#85, #94)
179 | - Bulk download support with zip streams (#92, #96)
180 |
181 | ## 0.16.1 (28 Jul 2021)
182 |
183 | New features:
184 | * logging requests (#77)
185 | * HEAD methods for download routes (#76)
186 |
187 | Improvements:
188 | * updated sdk-go dependency (#82)
189 |
190 | Bugs fixed:
191 | * wrong NotFound status was used (#30)
192 |
193 | ## 0.16.0 (29 Jun 2021)
194 |
195 | We update HTTP gateway with NEP-6 wallets support, YAML configuration files
196 | and small fixes.
197 |
198 | New features:
199 | * YAML configuration file (#71)
200 |
201 | Behavior changes:
202 | * gateway key needs to be stored in a proper NEP-6 wallet now, `-k` option is
203 | no longer available, see `-w` and `-a` (#68)
204 |
205 | Bugs fixed:
206 | * downloads were not streamed leading to excessive memory usage (#67)
207 | * Last-Modified header incorrectly used local time (#75)
208 |
209 | ## 0.15.2 (22 Jun 2021)
210 |
211 | New features:
212 | * Content-Type returned for object GET requests can now be taken from
213 | attributes (overriding autodetection, #65)
214 |
215 | Behavior changes:
216 | * grpc keepalive options can no longer be changed (#60)
217 |
218 | Improvements:
219 | * code refactoring (more reuse between different gateways, moved some code to
220 | sdk-go, #47, #46, #51, #62, #63)
221 | * documentation updates and fixes (#53, #49, #55, #59)
222 | * updated api-go dependency (#57)
223 |
224 | Bugs fixed:
225 | * `-k` option wasn't accepted for key although it was documented (#50)
226 |
227 | ## 0.15.1 (24 May 2021)
228 |
229 | This important release makes HTTP gateway compatible with NeoFS node version
230 | 0.20.0.
231 |
232 | Behavior changes:
233 | * neofs-api-go was updated to 1.26.1, which contains some incompatible
234 | changes in underlying components (#39, #44)
235 | * `neofs-http-gw` is consistently used now for repository, binary and image
236 | names (#43)
237 |
238 | Improvements:
239 | * minor code cleanups based on stricter set of linters (#41)
240 | * updated README (#42)
241 |
242 | ## 0.15.0 (30 Apr 2021)
243 |
244 | This is the first public release incorporating latest NeoFS protocol support
245 | and fixing some bugs.
246 |
247 | New features:
248 | * upload support (#14, #13, #29)
249 | * ephemeral keys (#26)
250 | * TLS server support (#28)
251 |
252 | Behavior changes:
253 | * node weights can now be specified as simple numbers instead of percentages
254 | and gateway will calculate the proportion automatically (#27)
255 | * attributes are converted now to `X-Attribute-*` headers when retrieving
256 | object from gate instead of `X-*` (#29)
257 |
258 | Improvements:
259 | * better Makefile (#16, #24, #33, #34)
260 | * updated documentation (#16, #29, #35, #36)
261 | * updated neofs-api-go to v1.25.0 (#17, #20)
262 | * updated fasthttp to v1.23.0+ (#17, #29)
263 | * refactoring, eliminating some dependencies (#20, #29)
264 |
265 | Bugs fixed:
266 | * gateway attempted to work with no NeoFS peers configured (#29)
267 | * some invalid headers could be sent for attributes using non-ASCII or
268 | non-printable characters (#29)
269 |
270 | ## Older versions
271 |
272 | Please refer to [Github
273 | releases](https://github.com/nspcc-dev/neofs-http-gw/releases/) for older
274 | releases.
275 |
276 | [0.17.0]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.16.1...v0.17.0
277 | [0.18.0]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.17.0...v0.18.0
278 | [0.19.0]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.18.0...v0.19.0
279 | [0.20.0]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.19.0...v0.20.0
280 | [0.21.0]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.20.0...v0.21.0
281 | [0.22.0]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.21.0...v0.22.0
282 | [0.23.0]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.22.0...v0.23.0
283 | [0.24.0]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.23.0...v0.24.0
284 | [0.25.0]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.24.0...v0.25.0
285 | [0.25.1]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.25.0...v0.25.1
286 | [0.26.0]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.25.1...v0.26.0
287 | [Unreleased]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.26.0...master
288 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contribution guide
2 |
3 | First, thank you for contributing! We love and encourage pull requests from
4 | everyone. Please follow the guidelines:
5 |
6 | - Check the open [issues](https://github.com/TrueCloudLab/frostfs-http-gw/issues) and
7 | [pull requests](https://github.com/TrueCloudLab/frostfs-http-gw/pulls) for existing
8 | discussions.
9 |
10 | - Open an issue first, to discuss a new feature or enhancement.
11 |
12 | - Write tests and make sure the test suite passes locally and on CI.
13 |
14 | - Open a pull request and reference the relevant issue(s).
15 |
16 | - Make sure your commits are logically separated and have good comments
17 | explaining the details of your change.
18 |
19 | - After receiving a feedback, amend your commits or add new ones as
20 | appropriate.
21 |
22 | - **Have fun!**
23 |
24 | ## Development Workflow
25 |
26 | Start by forking the `frostfs-http-gw` repository, make changes in a branch and then
27 | send a pull request. We encourage pull requests to discuss code changes. Here
28 | are the steps in details:
29 |
30 | ### Set up your GitHub Repository
31 | Fork [FrostFS HTTP Gateway
32 | upstream](https://github.com/TrueCloudLab/frostfs-http-gw/fork) source repository
33 | to your own personal repository. Copy the URL of your fork (you will need it for
34 | the `git clone` command below).
35 |
36 | ```sh
37 | $ git clone https://github.com/TrueCloudLab/frostfs-http-gw
38 | ```
39 |
40 | ### Set up git remote as ``upstream``
41 | ```sh
42 | $ cd frostfs-http-gw
43 | $ git remote add upstream https://github.com/TrueCloudLab/frostfs-http-gw
44 | $ git fetch upstream
45 | $ git merge upstream/master
46 | ...
47 | ```
48 |
49 | ### Create your feature branch
50 | Before making code changes, make sure you create a separate branch for these
51 | changes. Maybe you will find it convenient to name a branch in
52 | `/-` format.
53 |
54 | ```
55 | $ git checkout -b feature/123-something_awesome
56 | ```
57 |
58 | ### Test your changes
59 | After your code changes, make sure
60 |
61 | - To add test cases for the new code.
62 | - To run `make lint`
63 | - To squash your commits into a single commit or a series of logically separated
64 | commits run `git rebase -i`. It's okay to force update your pull request.
65 | - To run `make test` and `make all` completes.
66 |
67 | ### Commit changes
68 | After verification, commit your changes. This is a [great
69 | post](https://chris.beams.io/posts/git-commit/) on how to write useful commit
70 | messages. Try following this template:
71 |
72 | ```
73 | [#Issue] Summary
74 |
75 | Description
76 |
77 |
78 |
79 |
80 | ```
81 |
82 | ```
83 | $ git commit -am '[#123] Add some feature'
84 | ```
85 |
86 | ### Push to the branch
87 | Push your locally committed changes to the remote origin (your fork)
88 | ```
89 | $ git push origin feature/123-something_awesome
90 | ```
91 |
92 | ### Create a Pull Request
93 | Pull requests can be created via GitHub. Refer to [this
94 | document](https://help.github.com/articles/creating-a-pull-request/) for
95 | detailed steps on how to create a pull request. After a Pull Request gets peer
96 | reviewed and approved, it will be merged.
97 |
98 | ## DCO Sign off
99 |
100 | All authors to the project retain copyright to their work. However, to ensure
101 | that they are only submitting work that they have rights to, we require
102 | everyone to acknowledge this by signing their work.
103 |
104 | Any copyright notices in this repository should specify the authors as "the
105 | contributors".
106 |
107 | To sign your work, just add a line like this at the end of your commit message:
108 |
109 | ```
110 | Signed-off-by: Samii Sakisaka
111 | ```
112 |
113 | This can be easily done with the `--signoff` option to `git commit`.
114 |
115 | By doing this you state that you can certify the following (from [The Developer
116 | Certificate of Origin](https://developercertificate.org/)):
117 |
118 | ```
119 | Developer Certificate of Origin
120 | Version 1.1
121 |
122 | Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
123 | 1 Letterman Drive
124 | Suite D4700
125 | San Francisco, CA, 94129
126 |
127 | Everyone is permitted to copy and distribute verbatim copies of this
128 | license document, but changing it is not allowed.
129 |
130 |
131 | Developer's Certificate of Origin 1.1
132 |
133 | By making a contribution to this project, I certify that:
134 |
135 | (a) The contribution was created in whole or in part by me and I
136 | have the right to submit it under the open source license
137 | indicated in the file; or
138 |
139 | (b) The contribution is based upon previous work that, to the best
140 | of my knowledge, is covered under an appropriate open source
141 | license and I have the right under that license to submit that
142 | work with modifications, whether created in whole or in part
143 | by me, under the same open source license (unless I am
144 | permitted to submit under a different license), as indicated
145 | in the file; or
146 |
147 | (c) The contribution was provided directly to me by some other
148 | person who certified (a), (b) or (c) and I have not modified
149 | it.
150 |
151 | (d) I understand and agree that this project and the contribution
152 | are public and that a record of the contribution (including all
153 | personal information I submit with it, including my sign-off) is
154 | maintained indefinitely and may be redistributed consistent with
155 | this project or the open source license(s) involved.
156 | ```
157 |
--------------------------------------------------------------------------------
/CREDITS.md:
--------------------------------------------------------------------------------
1 | # Credits
2 |
3 | In alphabetical order:
4 |
5 | - Alexey Vanin
6 | - Angira Kekteeva
7 | - Denis Kirillov
8 | - Evgeniy Kulikov
9 | - Pavel Korotkov
10 | - Roman Khimov
11 |
12 | # Contributors
13 |
14 | In chronological order:
15 |
16 | - Anatoly Bogatyrev
17 | - Stanislav Bogatyrev
18 | - Anastasia Prasolova
19 | - Leonard Liubich
20 | - Elizaveta Chichindaeva
21 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM golang:1.19-alpine as basebuilder
2 | RUN apk add --update make bash ca-certificates
3 |
4 | FROM basebuilder as builder
5 | ENV GOGC off
6 | ENV CGO_ENABLED 0
7 | ARG BUILD=now
8 | ARG VERSION=dev
9 | ARG REPO=repository
10 | WORKDIR /src
11 | COPY . /src
12 |
13 | RUN make
14 |
15 | # Executable image
16 | FROM scratch
17 |
18 | WORKDIR /
19 |
20 | COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
21 | COPY --from=builder /src/bin/frostfs-http-gw /bin/frostfs-http-gw
22 |
23 | ENTRYPOINT ["/bin/frostfs-http-gw"]
24 |
--------------------------------------------------------------------------------
/Dockerfile.dirty:
--------------------------------------------------------------------------------
1 | FROM alpine
2 | RUN apk add --update --no-cache bash ca-certificates
3 |
4 | WORKDIR /
5 |
6 | COPY bin/frostfs-http-gw /bin/frostfs-http-gw
7 |
8 | CMD ["frostfs-http-gw"]
9 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | #!/usr/bin/make -f
2 |
3 | REPO ?= $(shell go list -m)
4 | VERSION ?= $(shell git describe --tags --match "v*" --dirty --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
5 | GO_VERSION ?= 1.19
6 | LINT_VERSION ?= 1.49.0
7 | BUILD ?= $(shell date -u --iso=seconds)
8 |
9 | HUB_IMAGE ?= truecloudlab/frostfs-http-gw
10 | HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
11 |
12 | # List of binaries to build. For now just one.
13 | BINDIR = bin
14 | DIRS = $(BINDIR)
15 | BINS = $(BINDIR)/frostfs-http-gw
16 |
17 | .PHONY: all $(BINS) $(DIRS) dep docker/ test cover fmt image image-push dirty-image lint docker/lint version clean
18 |
19 | # .deb package versioning
20 | OS_RELEASE = $(shell lsb_release -cs)
21 | PKG_VERSION ?= $(shell echo $(VERSION) | sed "s/^v//" | \
22 | sed -E "s/(.*)-(g[a-fA-F0-9]{6,8})(.*)/\1\3~\2/" | \
23 | sed "s/-/~/")-${OS_RELEASE}
24 | .PHONY: debpackage debclean
25 |
26 | # Make all binaries
27 | all: $(BINS)
28 |
29 | $(BINS): $(DIRS) dep
30 | @echo "⇒ Build $@"
31 | CGO_ENABLED=0 \
32 | go build -v -trimpath \
33 | -ldflags "-X main.Version=$(VERSION)" \
34 | -o $@ ./
35 |
36 | $(DIRS):
37 | @echo "⇒ Ensure dir: $@"
38 | @mkdir -p $@
39 |
40 | # Pull go dependencies
41 | dep:
42 | @printf "⇒ Download requirements: "
43 | @CGO_ENABLED=0 \
44 | go mod download && echo OK
45 | @printf "⇒ Tidy requirements: "
46 | @CGO_ENABLED=0 \
47 | go mod tidy -v && echo OK
48 |
49 | # Run `make %` in Golang container, for more information run `make help.docker/%`
50 | docker/%:
51 | $(if $(filter $*,all $(BINS)), \
52 | @echo "=> Running 'make $*' in clean Docker environment" && \
53 | docker run --rm -t \
54 | -v `pwd`:/src \
55 | -w /src \
56 | -u `stat -c "%u:%g" .` \
57 | --env HOME=/src \
58 | golang:$(GO_VERSION) make $*,\
59 | @echo "supported docker targets: all $(BINS) lint")
60 |
61 | # Run tests
62 | test:
63 | @go test ./... -cover
64 |
65 | # Run tests with race detection and produce coverage output
66 | cover:
67 | @go test -v -race ./... -coverprofile=coverage.txt -covermode=atomic
68 | @go tool cover -html=coverage.txt -o coverage.html
69 |
70 | # Reformat code
71 | fmt:
72 | @echo "⇒ Processing gofmt check"
73 | @gofmt -s -w ./
74 |
75 | # Build clean Docker image
76 | image:
77 | @echo "⇒ Build FrostFS HTTP Gateway docker image "
78 | @docker build \
79 | --build-arg REPO=$(REPO) \
80 | --build-arg VERSION=$(VERSION) \
81 | --rm \
82 | -f Dockerfile \
83 | -t $(HUB_IMAGE):$(HUB_TAG) .
84 |
85 | # Push Docker image to the hub
86 | image-push:
87 | @echo "⇒ Publish image"
88 | @docker push $(HUB_IMAGE):$(HUB_TAG)
89 |
90 | # Build dirty Docker image
91 | dirty-image:
92 | @echo "⇒ Build FrostFS HTTP Gateway dirty docker image "
93 | @docker build \
94 | --build-arg REPO=$(REPO) \
95 | --build-arg VERSION=$(VERSION) \
96 | --rm \
97 | -f Dockerfile.dirty \
98 | -t $(HUB_IMAGE)-dirty:$(HUB_TAG) .
99 |
100 | # Run linters
101 | lint:
102 | @golangci-lint --timeout=5m run
103 |
104 | # Run linters in Docker
105 | docker/lint:
106 | docker run --rm -it \
107 | -v `pwd`:/src \
108 | -u `stat -c "%u:%g" .` \
109 | --env HOME=/src \
110 | golangci/golangci-lint:v$(LINT_VERSION) bash -c 'cd /src/ && make lint'
111 |
112 | # Print version
113 | version:
114 | @echo $(VERSION)
115 |
116 | # Clean up
117 | clean:
118 | rm -rf vendor
119 | rm -rf $(BINDIR)
120 |
121 | # Package for Debian
122 | debpackage:
123 | dch --package frostfs-http-gw \
124 | --controlmaint \
125 | --newversion $(PKG_VERSION) \
126 | --distribution $(OS_RELEASE) \
127 | "Please see CHANGELOG.md for code changes for $(VERSION)"
128 | dpkg-buildpackage --no-sign -b
129 |
130 | debclean:
131 | dh clean
132 |
133 | include help.mk
134 |
--------------------------------------------------------------------------------
/VERSION:
--------------------------------------------------------------------------------
1 | v0.26.0
2 |
--------------------------------------------------------------------------------
/app.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "crypto/ecdsa"
6 | "fmt"
7 | "net/http"
8 | "os"
9 | "os/signal"
10 | "strconv"
11 | "sync"
12 | "syscall"
13 |
14 | "github.com/TrueCloudLab/frostfs-http-gw/downloader"
15 | "github.com/TrueCloudLab/frostfs-http-gw/metrics"
16 | "github.com/TrueCloudLab/frostfs-http-gw/resolver"
17 | "github.com/TrueCloudLab/frostfs-http-gw/response"
18 | "github.com/TrueCloudLab/frostfs-http-gw/uploader"
19 | "github.com/TrueCloudLab/frostfs-http-gw/utils"
20 | "github.com/TrueCloudLab/frostfs-sdk-go/pool"
21 | "github.com/TrueCloudLab/frostfs-sdk-go/user"
22 | "github.com/fasthttp/router"
23 | "github.com/nspcc-dev/neo-go/cli/flags"
24 | "github.com/nspcc-dev/neo-go/cli/input"
25 | "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
26 | "github.com/nspcc-dev/neo-go/pkg/util"
27 | "github.com/nspcc-dev/neo-go/pkg/wallet"
28 | "github.com/spf13/viper"
29 | "github.com/valyala/fasthttp"
30 | "go.uber.org/zap"
31 | )
32 |
33 | type (
34 | app struct {
35 | log *zap.Logger
36 | logLevel zap.AtomicLevel
37 | pool *pool.Pool
38 | owner *user.ID
39 | cfg *viper.Viper
40 | webServer *fasthttp.Server
41 | webDone chan struct{}
42 | resolver *resolver.ContainerResolver
43 | metrics *gateMetrics
44 | services []*metrics.Service
45 | settings *appSettings
46 | servers []Server
47 | }
48 |
49 | appSettings struct {
50 | Uploader *uploader.Settings
51 | Downloader *downloader.Settings
52 | }
53 |
54 | // App is an interface for the main gateway function.
55 | App interface {
56 | Wait()
57 | Serve(context.Context)
58 | }
59 |
60 | // Option is an application option.
61 | Option func(a *app)
62 |
63 | gateMetrics struct {
64 | logger *zap.Logger
65 | provider GateMetricsProvider
66 | mu sync.RWMutex
67 | enabled bool
68 | }
69 |
70 | GateMetricsProvider interface {
71 | SetHealth(int32)
72 | Unregister()
73 | }
74 | )
75 |
76 | // WithLogger returns Option to set a specific logger.
77 | func WithLogger(l *zap.Logger, lvl zap.AtomicLevel) Option {
78 | return func(a *app) {
79 | if l == nil {
80 | return
81 | }
82 | a.log = l
83 | a.logLevel = lvl
84 | }
85 | }
86 |
87 | // WithConfig returns Option to use specific Viper configuration.
88 | func WithConfig(c *viper.Viper) Option {
89 | return func(a *app) {
90 | if c == nil {
91 | return
92 | }
93 | a.cfg = c
94 | }
95 | }
96 |
97 | func newApp(ctx context.Context, opt ...Option) App {
98 | var (
99 | key *ecdsa.PrivateKey
100 | err error
101 | )
102 |
103 | a := &app{
104 | log: zap.L(),
105 | cfg: viper.GetViper(),
106 | webServer: new(fasthttp.Server),
107 | webDone: make(chan struct{}),
108 | }
109 | for i := range opt {
110 | opt[i](a)
111 | }
112 |
113 | // -- setup FastHTTP server --
114 | a.webServer.Name = "frost-http-gw"
115 | a.webServer.ReadBufferSize = a.cfg.GetInt(cfgWebReadBufferSize)
116 | a.webServer.WriteBufferSize = a.cfg.GetInt(cfgWebWriteBufferSize)
117 | a.webServer.ReadTimeout = a.cfg.GetDuration(cfgWebReadTimeout)
118 | a.webServer.WriteTimeout = a.cfg.GetDuration(cfgWebWriteTimeout)
119 | a.webServer.DisableHeaderNamesNormalizing = true
120 | a.webServer.NoDefaultServerHeader = true
121 | a.webServer.NoDefaultContentType = true
122 | a.webServer.MaxRequestBodySize = a.cfg.GetInt(cfgWebMaxRequestBodySize)
123 | a.webServer.DisablePreParseMultipartForm = true
124 | a.webServer.StreamRequestBody = a.cfg.GetBool(cfgWebStreamRequestBody)
125 | // -- -- -- -- -- -- -- -- -- -- -- -- -- --
126 | key, err = getFrostFSKey(a)
127 | if err != nil {
128 | a.log.Fatal("failed to get frostfs credentials", zap.Error(err))
129 | }
130 |
131 | var owner user.ID
132 | user.IDFromKey(&owner, key.PublicKey)
133 | a.owner = &owner
134 |
135 | var prm pool.InitParameters
136 | prm.SetKey(key)
137 | prm.SetNodeDialTimeout(a.cfg.GetDuration(cfgConTimeout))
138 | prm.SetNodeStreamTimeout(a.cfg.GetDuration(cfgStreamTimeout))
139 | prm.SetHealthcheckTimeout(a.cfg.GetDuration(cfgReqTimeout))
140 | prm.SetClientRebalanceInterval(a.cfg.GetDuration(cfgRebalance))
141 | prm.SetErrorThreshold(a.cfg.GetUint32(cfgPoolErrorThreshold))
142 |
143 | for i := 0; ; i++ {
144 | address := a.cfg.GetString(cfgPeers + "." + strconv.Itoa(i) + ".address")
145 | weight := a.cfg.GetFloat64(cfgPeers + "." + strconv.Itoa(i) + ".weight")
146 | priority := a.cfg.GetInt(cfgPeers + "." + strconv.Itoa(i) + ".priority")
147 | if address == "" {
148 | break
149 | }
150 | if weight <= 0 { // unspecified or wrong
151 | weight = 1
152 | }
153 | if priority <= 0 { // unspecified or wrong
154 | priority = 1
155 | }
156 | prm.AddNode(pool.NewNodeParam(priority, address, weight))
157 | a.log.Info("add connection", zap.String("address", address),
158 | zap.Float64("weight", weight), zap.Int("priority", priority))
159 | }
160 |
161 | a.pool, err = pool.NewPool(prm)
162 | if err != nil {
163 | a.log.Fatal("failed to create connection pool", zap.Error(err))
164 | }
165 |
166 | err = a.pool.Dial(ctx)
167 | if err != nil {
168 | a.log.Fatal("failed to dial pool", zap.Error(err))
169 | }
170 |
171 | a.initAppSettings()
172 | a.initResolver()
173 | a.initMetrics()
174 |
175 | return a
176 | }
177 |
178 | func (a *app) initAppSettings() {
179 | a.settings = &appSettings{
180 | Uploader: &uploader.Settings{},
181 | Downloader: &downloader.Settings{},
182 | }
183 |
184 | a.updateSettings()
185 | }
186 |
187 | func (a *app) initResolver() {
188 | var err error
189 | a.resolver, err = resolver.NewContainerResolver(a.getResolverConfig())
190 | if err != nil {
191 | a.log.Fatal("failed to create resolver", zap.Error(err))
192 | }
193 | }
194 |
195 | func (a *app) getResolverConfig() ([]string, *resolver.Config) {
196 | resolveCfg := &resolver.Config{
197 | FrostFS: resolver.NewFrostFSResolver(a.pool),
198 | RPCAddress: a.cfg.GetString(cfgRPCEndpoint),
199 | }
200 |
201 | order := a.cfg.GetStringSlice(cfgResolveOrder)
202 | if resolveCfg.RPCAddress == "" {
203 | order = remove(order, resolver.NNSResolver)
204 | a.log.Warn(fmt.Sprintf("resolver '%s' won't be used since '%s' isn't provided", resolver.NNSResolver, cfgRPCEndpoint))
205 | }
206 |
207 | if len(order) == 0 {
208 | a.log.Info("container resolver will be disabled because of resolvers 'resolver_order' is empty")
209 | }
210 |
211 | return order, resolveCfg
212 | }
213 |
214 | func (a *app) initMetrics() {
215 | gateMetricsProvider := metrics.NewGateMetrics(a.pool)
216 | a.metrics = newGateMetrics(a.log, gateMetricsProvider, a.cfg.GetBool(cfgPrometheusEnabled))
217 | }
218 |
219 | func newGateMetrics(logger *zap.Logger, provider GateMetricsProvider, enabled bool) *gateMetrics {
220 | if !enabled {
221 | logger.Warn("metrics are disabled")
222 | }
223 | return &gateMetrics{
224 | logger: logger,
225 | provider: provider,
226 | }
227 | }
228 |
229 | func (m *gateMetrics) SetEnabled(enabled bool) {
230 | if !enabled {
231 | m.logger.Warn("metrics are disabled")
232 | }
233 |
234 | m.mu.Lock()
235 | m.enabled = enabled
236 | m.mu.Unlock()
237 | }
238 |
239 | func (m *gateMetrics) SetHealth(status int32) {
240 | m.mu.RLock()
241 | if !m.enabled {
242 | m.mu.RUnlock()
243 | return
244 | }
245 | m.mu.RUnlock()
246 |
247 | m.provider.SetHealth(status)
248 | }
249 |
250 | func (m *gateMetrics) Shutdown() {
251 | m.mu.Lock()
252 | if m.enabled {
253 | m.provider.SetHealth(0)
254 | m.enabled = false
255 | }
256 | m.provider.Unregister()
257 | m.mu.Unlock()
258 | }
259 |
260 | func remove(list []string, element string) []string {
261 | for i, item := range list {
262 | if item == element {
263 | return append(list[:i], list[i+1:]...)
264 | }
265 | }
266 | return list
267 | }
268 |
269 | func getFrostFSKey(a *app) (*ecdsa.PrivateKey, error) {
270 | walletPath := a.cfg.GetString(cfgWalletPath)
271 |
272 | if len(walletPath) == 0 {
273 | a.log.Info("no wallet path specified, creating ephemeral key automatically for this run")
274 | key, err := keys.NewPrivateKey()
275 | if err != nil {
276 | return nil, err
277 | }
278 | return &key.PrivateKey, nil
279 | }
280 | w, err := wallet.NewWalletFromFile(walletPath)
281 | if err != nil {
282 | return nil, err
283 | }
284 |
285 | var password *string
286 | if a.cfg.IsSet(cfgWalletPassphrase) {
287 | pwd := a.cfg.GetString(cfgWalletPassphrase)
288 | password = &pwd
289 | }
290 |
291 | address := a.cfg.GetString(cfgWalletAddress)
292 |
293 | return getKeyFromWallet(w, address, password)
294 | }
295 |
296 | func getKeyFromWallet(w *wallet.Wallet, addrStr string, password *string) (*ecdsa.PrivateKey, error) {
297 | var addr util.Uint160
298 | var err error
299 |
300 | if addrStr == "" {
301 | addr = w.GetChangeAddress()
302 | } else {
303 | addr, err = flags.ParseAddress(addrStr)
304 | if err != nil {
305 | return nil, fmt.Errorf("invalid address")
306 | }
307 | }
308 |
309 | acc := w.GetAccount(addr)
310 | if acc == nil {
311 | return nil, fmt.Errorf("couldn't find wallet account for %s", addrStr)
312 | }
313 |
314 | if password == nil {
315 | pwd, err := input.ReadPassword("Enter password > ")
316 | if err != nil {
317 | return nil, fmt.Errorf("couldn't read password")
318 | }
319 | password = &pwd
320 | }
321 |
322 | if err := acc.Decrypt(*password, w.Scrypt); err != nil {
323 | return nil, fmt.Errorf("couldn't decrypt account: %w", err)
324 | }
325 |
326 | return &acc.PrivateKey().PrivateKey, nil
327 | }
328 |
329 | func (a *app) Wait() {
330 | a.log.Info("starting application", zap.String("app_name", "frostfs-http-gw"), zap.String("version", Version))
331 |
332 | a.setHealthStatus()
333 |
334 | <-a.webDone // wait for web-server to be stopped
335 | }
336 |
337 | func (a *app) setHealthStatus() {
338 | a.metrics.SetHealth(1)
339 | }
340 |
341 | func (a *app) Serve(ctx context.Context) {
342 | uploadRoutes := uploader.New(ctx, a.AppParams(), a.settings.Uploader)
343 | downloadRoutes := downloader.New(ctx, a.AppParams(), a.settings.Downloader)
344 |
345 | // Configure router.
346 | a.configureRouter(uploadRoutes, downloadRoutes)
347 |
348 | a.startServices()
349 | a.initServers(ctx)
350 |
351 | for i := range a.servers {
352 | go func(i int) {
353 | a.log.Info("starting server", zap.String("address", a.servers[i].Address()))
354 | if err := a.webServer.Serve(a.servers[i].Listener()); err != nil && err != http.ErrServerClosed {
355 | a.log.Fatal("listen and serve", zap.Error(err))
356 | }
357 | }(i)
358 | }
359 |
360 | sigs := make(chan os.Signal, 1)
361 | signal.Notify(sigs, syscall.SIGHUP)
362 |
363 | LOOP:
364 | for {
365 | select {
366 | case <-ctx.Done():
367 | break LOOP
368 | case <-sigs:
369 | a.configReload()
370 | }
371 | }
372 |
373 | a.log.Info("shutting down web server", zap.Error(a.webServer.Shutdown()))
374 |
375 | a.metrics.Shutdown()
376 | a.stopServices()
377 |
378 | close(a.webDone)
379 | }
380 |
381 | func (a *app) configReload() {
382 | a.log.Info("SIGHUP config reload started")
383 | if !a.cfg.IsSet(cmdConfig) && !a.cfg.IsSet(cmdConfigDir) {
384 | a.log.Warn("failed to reload config because it's missed")
385 | return
386 | }
387 | if err := readInConfig(a.cfg); err != nil {
388 | a.log.Warn("failed to reload config", zap.Error(err))
389 | return
390 | }
391 |
392 | if lvl, err := getLogLevel(a.cfg); err != nil {
393 | a.log.Warn("log level won't be updated", zap.Error(err))
394 | } else {
395 | a.logLevel.SetLevel(lvl)
396 | }
397 |
398 | if err := a.resolver.UpdateResolvers(a.getResolverConfig()); err != nil {
399 | a.log.Warn("failed to update resolvers", zap.Error(err))
400 | }
401 |
402 | if err := a.updateServers(); err != nil {
403 | a.log.Warn("failed to reload server parameters", zap.Error(err))
404 | }
405 |
406 | a.stopServices()
407 | a.startServices()
408 |
409 | a.updateSettings()
410 |
411 | a.metrics.SetEnabled(a.cfg.GetBool(cfgPrometheusEnabled))
412 | a.setHealthStatus()
413 |
414 | a.log.Info("SIGHUP config reload completed")
415 | }
416 |
417 | func (a *app) updateSettings() {
418 | a.settings.Uploader.SetDefaultTimestamp(a.cfg.GetBool(cfgUploaderHeaderEnableDefaultTimestamp))
419 | a.settings.Downloader.SetZipCompression(a.cfg.GetBool(cfgZipCompression))
420 | }
421 |
422 | func (a *app) startServices() {
423 | pprofConfig := metrics.Config{Enabled: a.cfg.GetBool(cfgPprofEnabled), Address: a.cfg.GetString(cfgPprofAddress)}
424 | pprofService := metrics.NewPprofService(a.log, pprofConfig)
425 | a.services = append(a.services, pprofService)
426 | go pprofService.Start()
427 |
428 | prometheusConfig := metrics.Config{Enabled: a.cfg.GetBool(cfgPrometheusEnabled), Address: a.cfg.GetString(cfgPrometheusAddress)}
429 | prometheusService := metrics.NewPrometheusService(a.log, prometheusConfig)
430 | a.services = append(a.services, prometheusService)
431 | go prometheusService.Start()
432 | }
433 |
434 | func (a *app) stopServices() {
435 | ctx, cancel := context.WithTimeout(context.Background(), defaultShutdownTimeout)
436 | defer cancel()
437 |
438 | for _, svc := range a.services {
439 | svc.ShutDown(ctx)
440 | }
441 | }
442 |
443 | func (a *app) configureRouter(uploadRoutes *uploader.Uploader, downloadRoutes *downloader.Downloader) {
444 | r := router.New()
445 | r.RedirectTrailingSlash = true
446 | r.NotFound = func(r *fasthttp.RequestCtx) {
447 | response.Error(r, "Not found", fasthttp.StatusNotFound)
448 | }
449 | r.MethodNotAllowed = func(r *fasthttp.RequestCtx) {
450 | response.Error(r, "Method Not Allowed", fasthttp.StatusMethodNotAllowed)
451 | }
452 | r.POST("/upload/{cid}", a.logger(uploadRoutes.Upload))
453 | a.log.Info("added path /upload/{cid}")
454 | r.GET("/get/{cid}/{oid}", a.logger(downloadRoutes.DownloadByAddress))
455 | r.HEAD("/get/{cid}/{oid}", a.logger(downloadRoutes.HeadByAddress))
456 | a.log.Info("added path /get/{cid}/{oid}")
457 | r.GET("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.logger(downloadRoutes.DownloadByAttribute))
458 | r.HEAD("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.logger(downloadRoutes.HeadByAttribute))
459 | a.log.Info("added path /get_by_attribute/{cid}/{attr_key}/{attr_val:*}")
460 | r.GET("/zip/{cid}/{prefix:*}", a.logger(downloadRoutes.DownloadZipped))
461 | a.log.Info("added path /zip/{cid}/{prefix}")
462 |
463 | a.webServer.Handler = r.Handler
464 | }
465 |
466 | func (a *app) logger(h fasthttp.RequestHandler) fasthttp.RequestHandler {
467 | return func(ctx *fasthttp.RequestCtx) {
468 | a.log.Info("request", zap.String("remote", ctx.RemoteAddr().String()),
469 | zap.ByteString("method", ctx.Method()),
470 | zap.ByteString("path", ctx.Path()),
471 | zap.ByteString("query", ctx.QueryArgs().QueryString()),
472 | zap.Uint64("id", ctx.ID()))
473 | h(ctx)
474 | }
475 | }
476 |
477 | func (a *app) AppParams() *utils.AppParams {
478 | return &utils.AppParams{
479 | Logger: a.log,
480 | Pool: a.pool,
481 | Owner: a.owner,
482 | Resolver: a.resolver,
483 | }
484 | }
485 |
486 | func (a *app) initServers(ctx context.Context) {
487 | serversInfo := fetchServers(a.cfg)
488 |
489 | a.servers = make([]Server, 0, len(serversInfo))
490 | for _, serverInfo := range serversInfo {
491 | fields := []zap.Field{
492 | zap.String("address", serverInfo.Address), zap.Bool("tls enabled", serverInfo.TLS.Enabled),
493 | zap.String("tls cert", serverInfo.TLS.CertFile), zap.String("tls key", serverInfo.TLS.KeyFile),
494 | }
495 | srv, err := newServer(ctx, serverInfo)
496 | if err != nil {
497 | a.log.Warn("failed to add server", append(fields, zap.Error(err))...)
498 | continue
499 | }
500 |
501 | a.servers = append(a.servers, srv)
502 | a.log.Info("add server", fields...)
503 | }
504 |
505 | if len(a.servers) == 0 {
506 | a.log.Fatal("no healthy servers")
507 | }
508 | }
509 |
510 | func (a *app) updateServers() error {
511 | serversInfo := fetchServers(a.cfg)
512 |
513 | var found bool
514 | for _, serverInfo := range serversInfo {
515 | index := a.serverIndex(serverInfo.Address)
516 | if index == -1 {
517 | continue
518 | }
519 |
520 | if serverInfo.TLS.Enabled {
521 | if err := a.servers[index].UpdateCert(serverInfo.TLS.CertFile, serverInfo.TLS.KeyFile); err != nil {
522 | return fmt.Errorf("failed to update tls certs: %w", err)
523 | }
524 | }
525 | found = true
526 | }
527 |
528 | if !found {
529 | return fmt.Errorf("invalid servers configuration: no known server found")
530 | }
531 |
532 | return nil
533 | }
534 |
535 | func (a *app) serverIndex(address string) int {
536 | for i := range a.servers {
537 | if a.servers[i].Address() == address {
538 | return i
539 | }
540 | }
541 | return -1
542 | }
543 |
--------------------------------------------------------------------------------
/config/config.env:
--------------------------------------------------------------------------------
1 | # Wallet section.
2 |
3 | # Path to wallet.
4 | HTTP_GW_WALLET_PATH=/path/to/wallet.json
5 | # Account address. If omitted default one will be used.
6 | HTTP_GW_WALLET_ADDRESS=NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP
7 | # Passphrase to decrypt wallet. If you're using a wallet without a password, place '' here.
8 | HTTP_GW_WALLET_PASSPHRASE=pwd
9 |
10 | # Enable metrics.
11 | HTTP_GW_PPROF_ENABLED=true
12 | HTTP_GW_PPROF_ADDRESS=localhost:8083
13 |
14 | HTTP_GW_PROMETHEUS_ENABLED=true
15 | HTTP_GW_PROMETHEUS_ADDRESS=localhost:8084
16 |
17 | # Log level.
18 | HTTP_GW_LOGGER_LEVEL=debug
19 |
20 | HTTP_GW_SERVER_0_ADDRESS=0.0.0.0:443
21 | HTTP_GW_SERVER_0_TLS_ENABLED=false
22 | HTTP_GW_SERVER_0_TLS_CERT_FILE=/path/to/tls/cert
23 | HTTP_GW_SERVER_0_TLS_KEY_FILE=/path/to/tls/key
24 | HTTP_GW_SERVER_1_ADDRESS=0.0.0.0:444
25 | HTTP_GW_SERVER_1_TLS_ENABLED=true
26 | HTTP_GW_SERVER_1_TLS_CERT_FILE=/path/to/tls/cert
27 | HTTP_GW_SERVER_1_TLS_KEY_FILE=/path/to/tls/key
28 |
29 | # Nodes configuration.
30 | # This configuration make the gateway use the first node (grpc://s01.frostfs.devenv:8080)
31 | # while it's healthy. Otherwise, the gateway use the second node (grpc://s01.frostfs.devenv:8080)
32 | # for 10% of requests and the third node for 90% of requests.
33 |
34 | # Peer 1.
35 | # Endpoint.
36 | HTTP_GW_PEERS_0_ADDRESS=grpc://s01.frostfs.devenv:8080
37 | # Until nodes with the same priority level are healthy
38 | # nodes with other priority are not used.
39 | # The lower the value, the higher the priority.
40 | HTTP_GW_PEERS_0_PRIORITY=1
41 | # Load distribution proportion for nodes with the same priority.
42 | HTTP_GW_PEERS_0_WEIGHT=1
43 | # Peer 2.
44 | HTTP_GW_PEERS_1_ADDRESS=grpc://s02.frostfs.devenv:8080
45 | HTTP_GW_PEERS_1_PRIORITY=2
46 | HTTP_GW_PEERS_1_WEIGHT=1
47 | # Peer 3.
48 | HTTP_GW_PEERS_2_ADDRESS=grpc://s03.frostfs.devenv:8080
49 | HTTP_GW_PEERS_2_PRIORITY=2
50 | HTTP_GW_PEERS_2_WEIGHT=9
51 |
52 | # Per-connection buffer size for requests' reading.
53 | # This also limits the maximum header size.
54 | HTTP_GW_WEB_READ_BUFFER_SIZE=4096
55 | # Per-connection buffer size for responses' writing.
56 | HTTP_GW_WRITE_BUFFER_SIZE=4096
57 | # ReadTimeout is the amount of time allowed to read
58 | # the full request including body. The connection's read
59 | # deadline is reset when the connection opens, or for
60 | # keep-alive connections after the first byte has been read.
61 | HTTP_GW_READ_TIMEOUT=10m
62 | # WriteTimeout is the maximum duration before timing out
63 | # writes of the response. It is reset after the request handler
64 | # has returned.
65 | HTTP_GW_WRITE_TIMEOUT=5m
66 | # StreamRequestBody enables request body streaming,
67 | # and calls the handler sooner when given body is
68 | # larger then the current limit.
69 | HTTP_GW_STREAM_REQUEST_BODY=true
70 | # Maximum request body size.
71 | # The server rejects requests with bodies exceeding this limit.
72 | HTTP_GW_MAX_REQUEST_BODY_SIZE=4194304
73 |
74 | # RPC endpoint to be able to use nns container resolving.
75 | HTTP_GW_RPC_ENDPOINT=http://morph-chain.frostfs.devenv:30333
76 | # The order in which resolvers are used to find an container id by name.
77 | HTTP_GW_RESOLVE_ORDER="nns dns"
78 |
79 | # Create timestamp for object if it isn't provided by header.
80 | HTTP_GW_UPLOAD_HEADER_USE_DEFAULT_TIMESTAMP=false
81 |
82 | # Timeout to dial node.
83 | HTTP_GW_CONNECT_TIMEOUT=5s
84 | # Timeout for individual operations in streaming RPC.
85 | HTTP_GW_STREAM_TIMEOUT=10s
86 | # Timeout to check node health during rebalance.
87 | HTTP_GW_REQUEST_TIMEOUT=5s
88 | # Interval to check nodes health.
89 | HTTP_GW_REBALANCE_TIMER=30s
90 | # The number of errors on connection after which node is considered as unhealthy
91 | HTTP_GW_POOL_ERROR_THRESHOLD=100
92 |
93 | # Enable zip compression to download files by common prefix.
94 | HTTP_GW_ZIP_COMPRESSION=false
95 |
--------------------------------------------------------------------------------
/config/config.yaml:
--------------------------------------------------------------------------------
1 | wallet:
2 | path: /path/to/wallet.json # Path to wallet.
3 | address: NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP # Account address. If omitted default one will be used.
4 | passphrase: pwd # Passphrase to decrypt wallet. If you're using a wallet without a password, place '' here.
5 |
6 | pprof:
7 | enabled: false # Enable pprof.
8 | address: localhost:8083
9 | prometheus:
10 | enabled: false # Enable metrics.
11 | address: localhost:8084
12 |
13 | logger:
14 | level: debug # Log level.
15 |
16 | server:
17 | - address: 0.0.0.0:8080
18 | tls:
19 | enabled: false
20 | cert_file: /path/to/cert
21 | key_file: /path/to/key
22 | - address: 0.0.0.0:8081
23 | tls:
24 | enabled: false
25 | cert_file: /path/to/cert
26 | key_file: /path/to/key
27 |
28 | # Nodes configuration.
29 | # This configuration make the gateway use the first node (grpc://s01.frostfs.devenv:8080)
30 | # while it's healthy. Otherwise, the gateway use the second node (grpc://s01.frostfs.devenv:8080)
31 | # for 10% of requests and the third node for 90% of requests.
32 | peers:
33 | 0:
34 | # Endpoint.
35 | address: grpc://s01.frostfs.devenv:8080
36 |
37 | # Until nodes with the same priority level are healthy
38 | # nodes with other priority are not used.
39 | # The lower the value, the higher the priority.
40 | priority: 1
41 |
42 | # Load distribution proportion for nodes with the same priority.
43 | weight: 1
44 | 1:
45 | address: grpc://s02.frostfs.devenv:8080
46 | priority: 2
47 | weight: 1
48 | 2:
49 | address: grpc://s03.frostfs.devenv:8080
50 | priority: 2
51 | weight: 9
52 |
53 |
54 | web:
55 | # Per-connection buffer size for requests' reading.
56 | # This also limits the maximum header size.
57 | read_buffer_size: 4096
58 |
59 | # Per-connection buffer size for responses' writing.
60 | write_buffer_size: 4096
61 |
62 | # ReadTimeout is the amount of time allowed to read
63 | # the full request including body. The connection's read
64 | # deadline is reset when the connection opens, or for
65 | # keep-alive connections after the first byte has been read.
66 | read_timeout: 10m
67 |
68 | # WriteTimeout is the maximum duration before timing out
69 | # writes of the response. It is reset after the request handler
70 | # has returned.
71 | write_timeout: 5m
72 |
73 | # StreamRequestBody enables request body streaming,
74 | # and calls the handler sooner when given body is
75 | # larger then the current limit.
76 | stream_request_body: true
77 |
78 | # Maximum request body size.
79 | # The server rejects requests with bodies exceeding this limit.
80 | max_request_body_size: 4194304
81 |
82 | # RPC endpoint to be able to use nns container resolving.
83 | rpc_endpoint: http://morph-chain.frostfs.devenv:30333
84 | # The order in which resolvers are used to find an container id by name.
85 | resolve_order:
86 | - nns
87 | - dns
88 |
89 | upload_header:
90 | use_default_timestamp: false # Create timestamp for object if it isn't provided by header.
91 |
92 | connect_timeout: 5s # Timeout to dial node.
93 | stream_timeout: 10s # Timeout for individual operations in streaming RPC.
94 | request_timeout: 5s # Timeout to check node health during rebalance.
95 | rebalance_timer: 30s # Interval to check nodes health.
96 | pool_error_threshold: 100 # The number of errors on connection after which node is considered as unhealthy.
97 |
98 | zip:
99 | compression: false # Enable zip compression to download files by common prefix.
100 |
--------------------------------------------------------------------------------
/config/dir/pprof.yaml:
--------------------------------------------------------------------------------
1 | pprof:
2 | enabled: true
3 | address: localhost:8083
4 |
--------------------------------------------------------------------------------
/config/dir/prometheus.yaml:
--------------------------------------------------------------------------------
1 | prometheus:
2 | enabled: true
3 | address: localhost:8084
4 |
--------------------------------------------------------------------------------
/debian/changelog:
--------------------------------------------------------------------------------
1 | frostfs-http-gw (0.0.0) stable; urgency=medium
2 |
3 | * Please see CHANGELOG.md
4 |
5 | -- TrueCloudLab Wed, 24 Aug 2022 18:29:49 +0300
6 |
--------------------------------------------------------------------------------
/debian/control:
--------------------------------------------------------------------------------
1 | Source: frostfs-http-gw
2 | Section: frostfs
3 | Priority: optional
4 | Maintainer: TrueCloudLab
5 | Build-Depends: debhelper-compat (= 13), dh-sysuser, git, devscripts
6 | Standards-Version: 4.5.1
7 | Homepage: https://frostfs.info/
8 | Vcs-Git: https://github.com/TrueCloudLab/frostfs-http-gw.git
9 | Vcs-Browser: https://github.com/TrueCloudLab/frostfs-http-gw
10 |
11 | Package: frostfs-http-gw
12 | Architecture: any
13 | Depends: ${misc:Depends}
14 | Description: FrostFS HTTP Gateway bridges FrostFS internal protocol and HTTP standard.
15 |
16 |
--------------------------------------------------------------------------------
/debian/copyright:
--------------------------------------------------------------------------------
1 | Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
2 | Upstream-Name: frostfs-http-gw
3 | Upstream-Contact: tech@frostfs.info
4 | Source: https://github.com/TrueCloudLab/frostfs-http-gw
5 |
6 | Files: *
7 | Copyright: 2018-2022 NeoSPCC (@nspcc-dev), contributors of neofs-http-gw project
8 | (https://github.com/nspcc-dev/neofs-http-gw/blob/master/CREDITS.md)
9 | 2022 True Cloud Lab (@TrueCloudLab), contributors of frostfs-http-gw project
10 | (https://github.com/TrueCloudLab/frostfs-http-gw/blob/master/CREDITS.md)
11 |
12 |
13 | License: GPL-3
14 | This program is free software: you can redistribute it and/or modify it
15 | under the terms of the GNU General Public License as published
16 | by the Free Software Foundation; version 3.
17 |
18 | This program is distributed in the hope that it will be useful,
19 | but WITHOUT ANY WARRANTY; without even the implied warranty of
20 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 | General Public License for more details.
22 |
23 | You should have received a copy of the GNU General Public License
24 | along with this program or at /usr/share/common-licenses/GPL-3.
25 | If not, see .
26 |
--------------------------------------------------------------------------------
/debian/frostfs-http-gw.dirs:
--------------------------------------------------------------------------------
1 | etc/frostfs
2 | srv/frostfs_cache
3 |
--------------------------------------------------------------------------------
/debian/frostfs-http-gw.docs:
--------------------------------------------------------------------------------
1 | docs/gate-configuration.md
2 | README.md
3 | CREDITS.md
4 | CONTRIBUTING.md
5 |
--------------------------------------------------------------------------------
/debian/frostfs-http-gw.examples:
--------------------------------------------------------------------------------
1 | config/*
2 |
--------------------------------------------------------------------------------
/debian/frostfs-http-gw.install:
--------------------------------------------------------------------------------
1 | bin/frostfs-http-gw usr/bin
2 | config/config.yaml etc/frostfs/http
3 |
--------------------------------------------------------------------------------
/debian/frostfs-http-gw.postinst:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # postinst script for frostfs-http-gw
3 | #
4 | # see: dh_installdeb(1)
5 |
6 | set -e
7 |
8 | # summary of how this script can be called:
9 | # * `configure'
10 | # * `abort-upgrade'
11 | # * `abort-remove' `in-favour'
12 | #
13 | # * `abort-remove'
14 | # * `abort-deconfigure' `in-favour'
15 | # `removing'
16 | #
17 | # for details, see https://www.debian.org/doc/debian-policy/ or
18 | # the debian-policy package
19 |
20 |
21 | case "$1" in
22 | configure)
23 | USERNAME=http
24 | id -u frostfs-$USERNAME >/dev/null 2>&1 || useradd -s /usr/sbin/nologin -d /srv/frostfs_cache --system -M -U -c "FrostFS HTTP gateway" frostfs-$USERNAME
25 | if ! dpkg-statoverride --list /etc/frostfs/$USERNAME >/dev/null; then
26 | chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME
27 | chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME/config.yaml || true
28 | chmod -f 0750 /etc/frostfs/$USERNAME
29 | chmod -f 0640 /etc/frostfs/$USERNAME/config.yaml || true
30 | fi
31 | USERDIR=$(getent passwd "frostfs-$USERNAME" | cut -d: -f6)
32 | if ! dpkg-statoverride --list frostfs-$USERDIR >/dev/null; then
33 | chown -f frostfs-$USERNAME: $USERDIR
34 | fi
35 | ;;
36 |
37 | abort-upgrade|abort-remove|abort-deconfigure)
38 | ;;
39 |
40 | *)
41 | echo "postinst called with unknown argument \`$1'" >&2
42 | exit 1
43 | ;;
44 | esac
45 |
46 | # dh_installdeb will replace this with shell code automatically
47 | # generated by other debhelper scripts.
48 |
49 | #DEBHELPER#
50 |
51 | exit 0
52 |
--------------------------------------------------------------------------------
/debian/frostfs-http-gw.postrm:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # postrm script for frostfs-http-gw
3 | #
4 | # see: dh_installdeb(1)
5 |
6 | set -e
7 |
8 | # summary of how this script can be called:
9 | # * `remove'
10 | # * `purge'
11 | # * `upgrade'
12 | # * `failed-upgrade'
13 | # * `abort-install'
14 | # * `abort-install'
15 | # * `abort-upgrade'
16 | # * `disappear'
17 | #
18 | # for details, see https://www.debian.org/doc/debian-policy/ or
19 | # the debian-policy package
20 |
21 |
22 | case "$1" in
23 | purge)
24 | rm -rf /srv/frostfs_cache
25 | ;;
26 |
27 | remove|upgrade|failed-upgrade|abort-install|abort-upgrade|disappear)
28 | ;;
29 |
30 | *)
31 | echo "postrm called with unknown argument \`$1'" >&2
32 | exit 1
33 | ;;
34 | esac
35 |
36 | # dh_installdeb will replace this with shell code automatically
37 | # generated by other debhelper scripts.
38 |
39 | #DEBHELPER#
40 |
41 | exit 0
42 |
--------------------------------------------------------------------------------
/debian/frostfs-http-gw.preinst:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # preinst script for frostfs-http-gw
3 | #
4 | # see: dh_installdeb(1)
5 |
6 | set -e
7 |
8 | # summary of how this script can be called:
9 | # * `install'
10 | # * `install'
11 | # * `upgrade'
12 | # * `abort-upgrade'
13 | # for details, see https://www.debian.org/doc/debian-policy/ or
14 | # the debian-policy package
15 |
16 |
17 | case "$1" in
18 | install|upgrade)
19 | ;;
20 |
21 | abort-upgrade)
22 | ;;
23 |
24 | *)
25 | echo "preinst called with unknown argument \`$1'" >&2
26 | exit 1
27 | ;;
28 | esac
29 |
30 | # dh_installdeb will replace this with shell code automatically
31 | # generated by other debhelper scripts.
32 |
33 | #DEBHELPER#
34 |
35 | exit 0
36 |
--------------------------------------------------------------------------------
/debian/frostfs-http-gw.prerm:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # prerm script for frostfs-http-gw
3 | #
4 | # see: dh_installdeb(1)
5 |
6 | set -e
7 |
8 | # summary of how this script can be called:
9 | # * `remove'
10 | # * `upgrade'
11 | # * `failed-upgrade'
12 | # * `remove' `in-favour'
13 | # * `deconfigure' `in-favour'
14 | # `removing'
15 | #
16 | # for details, see https://www.debian.org/doc/debian-policy/ or
17 | # the debian-policy package
18 |
19 |
20 | case "$1" in
21 | remove|upgrade|deconfigure)
22 | ;;
23 |
24 | failed-upgrade)
25 | ;;
26 |
27 | *)
28 | echo "prerm called with unknown argument \`$1'" >&2
29 | exit 1
30 | ;;
31 | esac
32 |
33 | # dh_installdeb will replace this with shell code automatically
34 | # generated by other debhelper scripts.
35 |
36 | #DEBHELPER#
37 |
38 | exit 0
39 |
--------------------------------------------------------------------------------
/debian/frostfs-http-gw.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=FrostFS HTTP Gateway
3 | Requires=network.target
4 |
5 | [Service]
6 | Type=simple
7 | ExecStart=/usr/bin/frostfs-http-gw --config /etc/frostfs/http/config.yaml
8 | User=frostfs-http
9 | Group=frostfs-http
10 | WorkingDirectory=/srv/frostfs_cache
11 | Restart=always
12 | RestartSec=5
13 | PrivateTmp=true
14 |
15 | [Install]
16 | WantedBy=multi-user.target
17 |
--------------------------------------------------------------------------------
/debian/rules:
--------------------------------------------------------------------------------
1 | #!/usr/bin/make -f
2 |
3 | # Do not try to strip Go binaries and do not run test
4 | export DEB_BUILD_OPTIONS := nostrip nocheck
5 | SERVICE = frostfs-http-gw
6 |
7 | %:
8 | dh $@
9 |
10 | override_dh_installsystemd:
11 | dh_installsystemd --no-enable --no-start $(SERVICE).service
12 |
13 | override_dh_installchangelogs:
14 | dh_installchangelogs -k CHANGELOG.md
15 |
16 |
17 |
--------------------------------------------------------------------------------
/debian/source/format:
--------------------------------------------------------------------------------
1 | 3.0 (quilt)
2 |
--------------------------------------------------------------------------------
/docs/api.md:
--------------------------------------------------------------------------------
1 | # HTTP Gateway Specification
2 |
3 | | Route | Description |
4 | |-------------------------------------------------|----------------------------------------------|
5 | | `/upload/{cid}` | [Put object](#put-object) |
6 | | `/get/{cid}/{oid}` | [Get object](#get-object) |
7 | | `/get_by_attribute/{cid}/{attr_key}/{attr_val}` | [Search object](#search-object) |
8 | | `/zip/{cid}/{prefix}` | [Download objects in archive](#download-zip) |
9 |
10 | **Note:** `cid` parameter can be base58 encoded container ID or container name
11 | (the name must be registered in NNS, see appropriate section in [README](../README.md#nns)).
12 |
13 | Route parameters can be:
14 |
15 | * `Single` - match a single path segment (cannot contain `/` and be empty)
16 | * `Catch-All` - match everything (such parameter usually the last one in routes)
17 | * `Query` - regular query parameter
18 |
19 | ### Bearer token
20 |
21 | All routes can accept [bearer token](../README.md#authentication) from:
22 |
23 | * `Authorization` header with `Bearer` type and base64-encoded token in
24 | credentials field
25 | * `Bearer` cookie with base64-encoded token contents
26 |
27 | Example:
28 |
29 | Header:
30 |
31 | ```
32 | Authorization: Bearer ChA5Gev0d8JI26tAtWyyQA3WEhsKGTVxfQ56a0uQeFmOO63mqykBS1HNpw1rxSgaBgiyEBjODyIhAyxcn89Bj5fwCfXlj5HjSYjonHSErZoXiSqeyh0ZQSb2MgQIARAB
33 | ```
34 |
35 | Cookie:
36 |
37 | ```
38 | cookie: Bearer=ChA5Gev0d8JI26tAtWyyQA3WEhsKGTVxfQ56a0uQeFmOO63mqykBS1HNpw1rxSgaBgiyEBjODyIhAyxcn89Bj5fwCfXlj5HjSYjonHSErZoXiSqeyh0ZQSb2MgQIARAB
39 | ```
40 |
41 | ## Put object
42 |
43 | Route: `/upload/{cid}`
44 |
45 | | Route parameter | Type | Description |
46 | |-----------------|--------|---------------------------------------------------------|
47 | | `cid` | Single | Base58 encoded container ID or container name from NNS. |
48 |
49 | ### Methods
50 |
51 | #### POST
52 |
53 | Upload file as object with attributes to FrostFS.
54 |
55 | ##### Request
56 |
57 | ###### Headers
58 |
59 | | Header | Description |
60 | |-----------------------|---------------------------------------------------------------------------------------------------------------------------------------------------|
61 | | Common headers | See [bearer token](#bearer-token). |
62 | | `X-Attribute-Neofs-*` | Used to set system NeoFS object attributes
(e.g. use "X-Attribute-Neofs-Expiration-Epoch" to set `__NEOFS__EXPIRATION_EPOCH` attribute). |
63 | | `X-Attribute-*` | Used to set regular object attributes
(e.g. use "X-Attribute-My-Tag" to set `My-Tag` attribute). |
64 | | `Date` | This header is used to calculate the right `__NEOFS__EXPIRATION` attribute for object. If the header is missing, the current server time is used. |
65 |
66 | There are some reserved headers type of `X-Attribute-NEOFS-*` (headers are arranged in descending order of priority):
67 |
68 | 1. `X-Attribute-Neofs-Expiration-Epoch: 100`
69 | 2. `X-Attribute-Neofs-Expiration-Duration: 24h30m`
70 | 3. `X-Attribute-Neofs-Expiration-Timestamp: 1637574797`
71 | 4. `X-Attribute-Neofs-Expiration-RFC3339: 2021-11-22T09:55:49Z`
72 |
73 | which transforms to `X-Attribute-Neofs-Expiration-Epoch`. So you can provide expiration any convenient way.
74 |
75 | If you don't specify the `X-Attribute-Timestamp` header the `Timestamp` attribute can be set anyway
76 | (see http-gw [configuration](gate-configuration.md#upload-header-section)).
77 |
78 | The `X-Attribute-*` headers must be unique. If you provide several the same headers only one will be used.
79 | Attribute key and value must be valid utf8 string. All attributes in sum must not be greater than 3mb.
80 |
81 | ###### Body
82 |
83 | Body must contain multipart form with file.
84 | The `filename` field from the multipart form will be set as `FileName` attribute of object
85 | (can be overriden by `X-Attribute-FileName` header).
86 |
87 | ##### Response
88 |
89 | ###### Status codes
90 |
91 | | Status | Description |
92 | |--------|----------------------------------------------|
93 | | 200 | Object created successfully. |
94 | | 400 | Some error occurred during object uploading. |
95 |
96 | ## Get object
97 |
98 | Route: `/get/{cid}/{oid}?[download=true]`
99 |
100 | | Route parameter | Type | Description |
101 | |-----------------|--------|------------------------------------------------------------------------------------------------------------------------------------------------------------|
102 | | `cid` | Single | Base58 encoded container ID or container name from NNS. |
103 | | `oid` | Single | Base58 encoded object ID. |
104 | | `download` | Query | Set the `Content-Disposition` header as `attachment` in response.
This make the browser to download object as file instead of showing it on the page. |
105 |
106 | ### Methods
107 |
108 | #### GET
109 |
110 | Get an object (payload and attributes) by an address.
111 |
112 | ##### Request
113 |
114 | ###### Headers
115 |
116 | | Header | Description |
117 | |----------------|------------------------------------|
118 | | Common headers | See [bearer token](#bearer-token). |
119 |
120 | ##### Response
121 |
122 | ###### Headers
123 |
124 | | Header | Description |
125 | |-----------------------|----------------------------------------------------------------------------------------------------------------------------------------------|
126 | | `X-Attribute-Neofs-*` | System NeoFS object attributes
(e.g. `__NEOFS__EXPIRATION_EPOCH` set "X-Attribute-Neofs-Expiration-Epoch" header). |
127 | | `X-Attribute-*` | Regular object attributes
(e.g. `My-Tag` set "X-Attribute-My-Tag" header). |
128 | | `Content-Disposition` | Indicate how to browsers should treat file.
Set `filename` as base part of `FileName` object attribute (if it's set, empty otherwise). |
129 | | `Content-Type` | Indicate content type of object. Set from `Content-Type` attribute or detected using payload. |
130 | | `Content-Length` | Size of object payload. |
131 | | `Last-Modified` | Contains the `Timestamp` attribute (if exists) formatted as HTTP time (RFC7231,RFC1123). |
132 | | `X-Owner-Id` | Base58 encoded owner ID. |
133 | | `X-Container-Id` | Base58 encoded container ID. |
134 | | `X-Object-Id` | Base58 encoded object ID. |
135 |
136 | ###### Status codes
137 |
138 | | Status | Description |
139 | |--------|------------------------------------------------|
140 | | 200 | Object got successfully. |
141 | | 400 | Some error occurred during object downloading. |
142 | | 404 | Container or object not found. |
143 |
144 | #### HEAD
145 |
146 | Get an object attributes by an address.
147 |
148 | ##### Request
149 |
150 | ###### Headers
151 |
152 | | Header | Description |
153 | |----------------|------------------------------------|
154 | | Common headers | See [bearer token](#bearer-token). |
155 |
156 | ##### Response
157 |
158 | ###### Headers
159 |
160 | | Header | Description |
161 | |-----------------------|--------------------------------------------------------------------------------------------------------------------------|
162 | | `X-Attribute-Neofs-*` | System NeoFS object attributes
(e.g. `__NEOFS__EXPIRATION_EPOCH` set "X-Attribute-Neofs-Expiration-Epoch" header). |
163 | | `X-Attribute-*` | Regular object attributes
(e.g. `My-Tag` set "X-Attribute-My-Tag" header). |
164 | | `Content-Type` | Indicate content type of object. Set from `Content-Type` attribute or detected using payload. |
165 | | `Content-Length` | Size of object payload. |
166 | | `Last-Modified` | Contains the `Timestamp` attribute (if exists) formatted as HTTP time (RFC7231,RFC1123). |
167 | | `X-Owner-Id` | Base58 encoded owner ID. |
168 | | `X-Container-Id` | Base58 encoded container ID. |
169 | | `X-Object-Id` | Base58 encoded object ID. |
170 |
171 | ###### Status codes
172 |
173 | | Status | Description |
174 | |--------|---------------------------------------------------|
175 | | 200 | Object head successfully. |
176 | | 400 | Some error occurred during object HEAD operation. |
177 | | 404 | Container or object not found. |
178 |
179 | ## Search object
180 |
181 | Route: `/get_by_attribute/{cid}/{attr_key}/{attr_val}?[download=true]`
182 |
183 | | Route parameter | Type | Description |
184 | |-----------------|-----------|-------------------------------------------------------------------------------------------------------------------------------------------------------|
185 | | `cid` | Single | Base58 encoded container ID or container name from NNS. |
186 | | `attr_key` | Single | Object attribute key to search. |
187 | | `attr_val` | Catch-All | Object attribute value to match. |
188 | | `download` | Query | Set the `Content-Disposition` header as `attachment` in response. This make the browser to download object as file instead of showing it on the page. |
189 |
190 | ### Methods
191 |
192 | #### GET
193 |
194 | Find and get an object (payload and attributes) by a specific attribute.
195 | If more than one object is found, an arbitrary one will be returned.
196 |
197 | ##### Request
198 |
199 | ###### Headers
200 |
201 | | Header | Description |
202 | |----------------|------------------------------------|
203 | | Common headers | See [bearer token](#bearer-token). |
204 |
205 | ##### Response
206 |
207 | ###### Headers
208 |
209 | | Header | Description |
210 | |-----------------------|----------------------------------------------------------------------------------------------------------------------------------------------|
211 | | `X-Attribute-Neofs-*` | System NeoFS object attributes
(e.g. `__NEOFS__EXPIRATION_EPOCH` set "X-Attribute-Neofs-Expiration-Epoch" header). |
212 | | `X-Attribute-*` | Regular object attributes
(e.g. `My-Tag` set "X-Attribute-My-Tag" header). |
213 | | `Content-Disposition` | Indicate how to browsers should treat file.
Set `filename` as base part of `FileName` object attribute (if it's set, empty otherwise). |
214 | | `Content-Type` | Indicate content type of object. Set from `Content-Type` attribute or detected using payload. |
215 | | `Content-Length` | Size of object payload. |
216 | | `Last-Modified` | Contains the `Timestamp` attribute (if exists) formatted as HTTP time (RFC7231,RFC1123). |
217 | | `X-Owner-Id` | Base58 encoded owner ID. |
218 | | `X-Container-Id` | Base58 encoded container ID. |
219 | | `X-Object-Id` | Base58 encoded object ID. |
220 |
221 | ###### Status codes
222 |
223 | | Status | Description |
224 | |--------|------------------------------------------------|
225 | | 200 | Object got successfully. |
226 | | 400 | Some error occurred during object downloading. |
227 | | 404 | Container or object not found. |
228 |
229 | #### HEAD
230 |
231 | Get object attributes by a specific attribute.
232 | If more than one object is found, an arbitrary one will be used to get attributes.
233 |
234 | ##### Request
235 |
236 | ###### Headers
237 |
238 | | Header | Description |
239 | |----------------|------------------------------------|
240 | | Common headers | See [bearer token](#bearer-token). |
241 |
242 | ##### Response
243 |
244 | ###### Headers
245 |
246 | | Header | Description |
247 | |-----------------------|--------------------------------------------------------------------------------------------------------------------------|
248 | | `X-Attribute-Neofs-*` | System NeoFS object attributes
(e.g. `__NEOFS__EXPIRATION_EPOCH` set "X-Attribute-Neofs-Expiration-Epoch" header). |
249 | | `X-Attribute-*` | Regular object attributes
(e.g. `My-Tag` set "X-Attribute-My-Tag" header). |
250 | | `Content-Type` | Indicate content type of object. Set from `Content-Type` attribute or detected using payload. |
251 | | `Content-Length` | Size of object payload. |
252 | | `Last-Modified` | Contains the `Timestamp` attribute (if exists) formatted as HTTP time (RFC7231,RFC1123). |
253 | | `X-Owner-Id` | Base58 encoded owner ID. |
254 | | `X-Container-Id` | Base58 encoded container ID. |
255 | | `X-Object-Id` | Base58 encoded object ID. |
256 |
257 | ###### Status codes
258 |
259 | | Status | Description |
260 | |--------|---------------------------------------|
261 | | 200 | Object head successfully. |
262 | | 400 | Some error occurred during operation. |
263 | | 404 | Container or object not found. |
264 |
265 | ## Download zip
266 |
267 | Route: `/zip/{cid}/{prefix}`
268 |
269 | | Route parameter | Type | Description |
270 | |-----------------|-----------|---------------------------------------------------------|
271 | | `cid` | Single | Base58 encoded container ID or container name from NNS. |
272 | | `prefix` | Catch-All | Prefix for object attribute `FilePath` to match. |
273 |
274 | ### Methods
275 |
276 | #### GET
277 |
278 | Find objects by prefix for `FilePath` attributes. Return found objects in zip archive.
279 | Name of files in archive sets to `FilePath` attribute of objects.
280 | Time of files sets to time when object has started downloading.
281 | You can download all files in container that have `FilePath` attribute by `/zip/{cid}/` route.
282 |
283 | Archive can be compressed (see http-gw [configuration](gate-configuration.md#zip-section)).
284 |
285 | ##### Request
286 |
287 | ###### Headers
288 |
289 | | Header | Description |
290 | |----------------|------------------------------------|
291 | | Common headers | See [bearer token](#bearer-token). |
292 |
293 | ##### Response
294 |
295 | ###### Headers
296 |
297 | | Header | Description |
298 | |-----------------------|-------------------------------------------------------------------------------------------------------------------|
299 | | `Content-Disposition` | Indicate how to browsers should treat file (`attachment`). Set `filename` as `archive.zip`. |
300 | | `Content-Type` | Indicate content type of object. Set to `application/zip` |
301 |
302 | ###### Status codes
303 |
304 | | Status | Description |
305 | |--------|-----------------------------------------------------|
306 | | 200 | Object got successfully. |
307 | | 400 | Some error occurred during object downloading. |
308 | | 404 | Container or objects not found. |
309 | | 500 | Some inner error (e.g. error on streaming objects). |
310 |
--------------------------------------------------------------------------------
/docs/building-deb-package.md:
--------------------------------------------------------------------------------
1 | # Building Debian package on host
2 |
3 | ## Prerequisites
4 |
5 | For now, we're assuming building for Debian 11 (stable) x86_64.
6 |
7 | Go version 18.4 or later should already be installed, i.e. this runs
8 | successfully:
9 |
10 | * `make all`
11 |
12 | ## Installing packaging dependencies
13 |
14 | ```shell
15 | $ sudo apt install debhelper-compat dh-sequence-bash-completion devscripts
16 | ```
17 |
18 | Warining: number of package installed is pretty large considering dependecies.
19 |
20 | ## Package building
21 |
22 | ```shell
23 | $ make debpackage
24 | ```
25 |
26 | ## Leftovers cleaning
27 |
28 | ```shell
29 | $ make debclean
30 | ```
31 | or
32 | ```shell
33 | $ dh clean
34 | ```
35 |
36 | # Package versioning
37 |
38 | By default, package version is based on product version and may also contain git
39 | tags and hashes.
40 |
41 | Package version could be overwritten by setting `PKG_VERSION` variable before
42 | build, Debian package versioning rules should be respected.
43 |
44 | ```shell
45 | $ PKG_VERSION=0.32.0 make debpackge
46 | ```
47 |
--------------------------------------------------------------------------------
/docs/gate-configuration.md:
--------------------------------------------------------------------------------
1 | # FrostFS HTTP Gateway configuration file
2 |
3 | This section contains detailed FrostFS HTTP Gateway configuration file description
4 | including default config values and some tips to set up configurable values.
5 |
6 | There are some custom types used for brevity:
7 |
8 | * `duration` -- string consisting of a number and a suffix. Suffix examples include `s` (seconds), `m` (minutes), `ms` (
9 | milliseconds).
10 |
11 |
12 | # Reload on SIGHUP
13 |
14 | Some config values can be reloaded on SIGHUP signal.
15 | Such parameters have special mark in tables below.
16 |
17 | You can send SIGHUP signal to app using the following command:
18 |
19 | ```shell
20 | $ kill -s SIGHUP
21 | ```
22 |
23 | Example:
24 |
25 | ```shell
26 | $ ./bin/frostfs-http-gw --config config.yaml &> http.log &
27 | [1] 998346
28 |
29 | $ cat http.log
30 | # ...
31 | 2022-10-03T09:37:25.826+0300 info frostfs-http-gw/app.go:332 starting application {"app_name": "frostfs-http-gw", "version": "v0.24.0"}
32 | # ...
33 |
34 | $ kill -s SIGHUP 998346
35 |
36 | $ cat http.log
37 | # ...
38 | 2022-10-03T09:38:16.205+0300 info frostfs-http-gw/app.go:470 SIGHUP config reload completed
39 | ```
40 |
41 | # Structure
42 |
43 | | Section | Description |
44 | |-----------------|-------------------------------------------------------|
45 | | no section | [General parameters](#general-section) |
46 | | `wallet` | [Wallet configuration](#wallet-section) |
47 | | `peers` | [Nodes configuration](#peers-section) |
48 | | `logger` | [Logger configuration](#logger-section) |
49 | | `web` | [Web configuration](#web-section) |
50 | | `server` | [Server configuration](#server-section) |
51 | | `upload-header` | [Upload header configuration](#upload-header-section) |
52 | | `zip` | [ZIP configuration](#zip-section) |
53 | | `pprof` | [Pprof configuration](#pprof-section) |
54 | | `prometheus` | [Prometheus configuration](#prometheus-section) |
55 |
56 |
57 | # General section
58 |
59 | ```yaml
60 | rpc_endpoint: http://morph-chain.frostfs.devenv:30333
61 | resolve_order:
62 | - nns
63 | - dns
64 |
65 | connect_timeout: 5s
66 | stream_timeout: 10s
67 | request_timeout: 5s
68 | rebalance_timer: 30s
69 | pool_error_threshold: 100
70 | ```
71 |
72 | | Parameter | Type | SIGHUP reload | Default value | Description |
73 | |------------------------|------------|---------------|----------------|------------------------------------------------------------------------------------|
74 | | `rpc_endpoint` | `string` | yes | | The address of the RPC host to which the gateway connects to resolve bucket names. |
75 | | `resolve_order` | `[]string` | yes | `[nns, dns]` | Order of bucket name resolvers to use. |
76 | | `connect_timeout` | `duration` | | `10s` | Timeout to connect to a node. |
77 | | `stream_timeout` | `duration` | | `10s` | Timeout for individual operations in streaming RPC. |
78 | | `request_timeout` | `duration` | | `15s` | Timeout to check node health during rebalance. |
79 | | `rebalance_timer` | `duration` | | `60s` | Interval to check node health. |
80 | | `pool_error_threshold` | `uint32` | | `100` | The number of errors on connection after which node is considered as unhealthy. |
81 |
82 | # `wallet` section
83 |
84 | ```yaml
85 | wallet:
86 | path: /path/to/wallet.json
87 | address: NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP
88 | passphrase: pwd
89 | ```
90 |
91 | | Parameter | Type | Default value | Description |
92 | |--------------|----------|---------------|--------------------------------------------------------------------------|
93 | | `path` | `string` | | Path to the wallet. |
94 | | `address` | `string` | | Account address to get from wallet. If omitted default one will be used. |
95 | | `passphrase` | `string` | | Passphrase to decrypt wallet. |
96 |
97 | # `peers` section
98 |
99 | ```yaml
100 | # Nodes configuration
101 | # This configuration makes the gateway use the first node (node1.frostfs:8080)
102 | # while it's healthy. Otherwise, gateway uses the second node (node2.frostfs:8080)
103 | # for 10% of requests and the third node (node3.frostfs:8080) for 90% of requests.
104 | # Until nodes with the same priority level are healthy
105 | # nodes with other priority are not used.
106 | # The lower the value, the higher the priority.
107 | peers:
108 | 0:
109 | address: node1.frostfs:8080
110 | priority: 1
111 | weight: 1
112 | 1:
113 | address: node2.frostfs:8080
114 | priority: 2
115 | weight: 0.1
116 | 2:
117 | address: node3.frostfs:8080
118 | priority: 2
119 | weight: 0.9
120 | ```
121 |
122 | | Parameter | Type | Default value | Description |
123 | |------------|----------|---------------|---------------------------------------------------------------------------------------------------------------------------------------------------------|
124 | | `address` | `string` | | Address of storage node. |
125 | | `priority` | `int` | `1` | It allows to group nodes and don't switch group until all nodes with the same priority will be unhealthy. The lower the value, the higher the priority. |
126 | | `weight` | `float` | `1` | Weight of node in the group with the same priority. Distribute requests to nodes proportionally to these values. |
127 |
128 | # `server` section
129 |
130 | You can specify several listeners for server. For example, for `http` and `https`.
131 |
132 | ```yaml
133 | server:
134 | - address: 0.0.0.0:8080
135 | tls:
136 | enabled: false
137 | cert_file: /path/to/cert
138 | key_file: /path/to/key
139 | - address: 0.0.0.0:8081
140 | tls:
141 | enabled: true
142 | cert_file: /path/to/another/cert
143 | key_file: /path/to/another/key
144 | ```
145 |
146 | | Parameter | Type | SIGHUP reload | Default value | Description |
147 | |-----------------|----------|---------------|----------------|-----------------------------------------------|
148 | | `address` | `string` | | `0.0.0.0:8080` | The address that the gateway is listening on. |
149 | | `tls.enabled` | `bool` | | false | Enable TLS or not. |
150 | | `tls.cert_file` | `string` | yes | | Path to the TLS certificate. |
151 | | `tls.key_file` | `string` | yes | | Path to the key. |
152 |
153 |
154 | # `logger` section
155 |
156 | ```yaml
157 | logger:
158 | level: debug
159 | ```
160 |
161 | | Parameter | Type | SIGHUP reload | Default value | Description |
162 | |-----------|----------|---------------|---------------|----------------------------------------------------------------------------------------------------|
163 | | `level` | `string` | yes | `debug` | Logging level.
Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. |
164 |
165 |
166 | # `web` section
167 |
168 | ```yaml
169 | web:
170 | read_buffer_size: 4096
171 | write_buffer_size: 4096
172 | read_timeout: 10m
173 | write_timeout: 5m
174 | stream_request_body: true
175 | max_request_body_size: 4194304
176 | ```
177 |
178 | | Parameter | Type | Default value | Description |
179 | |-------------------------|------------|---------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
180 | | `read_buffer_size` | `int` | `4096` | Per-connection buffer size for requests' reading. This also limits the maximum header size. |
181 | | `write_buffer_size` | `int` | `4096` | Per-connection buffer size for responses' writing. |
182 | | `read_timeout` | `duration` | `10m` | The amount of time allowed to read the full request including body. The connection's read deadline is reset when the connection opens, or for keep-alive connections after the first byte has been read. |
183 | | `write_timeout` | `duration` | `5m` | The maximum duration before timing out writes of the response. It is reset after the request handler has returned. |
184 | | `stream_request_body` | `bool` | `true` | Enables request body streaming, and calls the handler sooner when given body is larger than the current limit. |
185 | | `max_request_body_size` | `int` | `4194304` | Maximum request body size. The server rejects requests with bodies exceeding this limit. |
186 |
187 |
188 | # `upload-header` section
189 |
190 | ```yaml
191 | upload_header:
192 | use_default_timestamp: false
193 | ```
194 |
195 | | Parameter | Type | SIGHUP reload | Default value | Description |
196 | |-------------------------|--------|---------------|---------------|-------------------------------------------------------------|
197 | | `use_default_timestamp` | `bool` | yes | `false` | Create timestamp for object if it isn't provided by header. |
198 |
199 |
200 | # `zip` section
201 |
202 | ```yaml
203 | zip:
204 | compression: false
205 | ```
206 |
207 | | Parameter | Type | SIGHUP reload | Default value | Description |
208 | |---------------|--------|---------------|---------------|--------------------------------------------------------------|
209 | | `compression` | `bool` | yes | `false` | Enable zip compression when download files by common prefix. |
210 |
211 |
212 | # `pprof` section
213 |
214 | Contains configuration for the `pprof` profiler.
215 |
216 | ```yaml
217 | pprof:
218 | enabled: true
219 | address: localhost:8083
220 | ```
221 |
222 | | Parameter | Type | SIGHUP reload | Default value | Description |
223 | |-----------|----------|---------------|------------------|-----------------------------------------|
224 | | `enabled` | `bool` | yes | `false` | Flag to enable the service. |
225 | | `address` | `string` | yes | `localhost:8083` | Address that service listener binds to. |
226 |
227 | # `prometheus` section
228 |
229 | Contains configuration for the `prometheus` metrics service.
230 |
231 | ```yaml
232 | prometheus:
233 | enabled: true
234 | address: localhost:8084
235 | ```
236 |
237 | | Parameter | Type | SIGHUP reload | Default value | Description |
238 | |-----------|----------|---------------|------------------|-----------------------------------------|
239 | | `enabled` | `bool` | yes | `false` | Flag to enable the service. |
240 | | `address` | `string` | yes | `localhost:8084` | Address that service listener binds to. |
241 |
--------------------------------------------------------------------------------
/downloader/download.go:
--------------------------------------------------------------------------------
1 | package downloader
2 |
3 | import (
4 | "archive/zip"
5 | "bufio"
6 | "bytes"
7 | "context"
8 | "errors"
9 | "fmt"
10 | "io"
11 | "net/http"
12 | "net/url"
13 | "path"
14 | "strconv"
15 | "strings"
16 | "time"
17 | "unicode"
18 | "unicode/utf8"
19 |
20 | "github.com/TrueCloudLab/frostfs-http-gw/resolver"
21 | "github.com/TrueCloudLab/frostfs-http-gw/response"
22 | "github.com/TrueCloudLab/frostfs-http-gw/tokens"
23 | "github.com/TrueCloudLab/frostfs-http-gw/utils"
24 | "github.com/TrueCloudLab/frostfs-sdk-go/bearer"
25 | "github.com/TrueCloudLab/frostfs-sdk-go/client"
26 | "github.com/TrueCloudLab/frostfs-sdk-go/container"
27 | cid "github.com/TrueCloudLab/frostfs-sdk-go/container/id"
28 | "github.com/TrueCloudLab/frostfs-sdk-go/object"
29 | oid "github.com/TrueCloudLab/frostfs-sdk-go/object/id"
30 | "github.com/TrueCloudLab/frostfs-sdk-go/pool"
31 | "github.com/valyala/fasthttp"
32 | "go.uber.org/atomic"
33 | "go.uber.org/zap"
34 | )
35 |
36 | type request struct {
37 | *fasthttp.RequestCtx
38 | appCtx context.Context
39 | log *zap.Logger
40 | }
41 |
42 | func isValidToken(s string) bool {
43 | for _, c := range s {
44 | if c <= ' ' || c > 127 {
45 | return false
46 | }
47 | if strings.ContainsRune("()<>@,;:\\\"/[]?={}", c) {
48 | return false
49 | }
50 | }
51 | return true
52 | }
53 |
54 | func isValidValue(s string) bool {
55 | for _, c := range s {
56 | // HTTP specification allows for more technically, but we don't want to escape things.
57 | if c < ' ' || c > 127 || c == '"' {
58 | return false
59 | }
60 | }
61 | return true
62 | }
63 |
64 | type readCloser struct {
65 | io.Reader
66 | io.Closer
67 | }
68 |
69 | // initializes io.Reader with the limited size and detects Content-Type from it.
70 | // Returns r's error directly. Also returns the processed data.
71 | func readContentType(maxSize uint64, rInit func(uint64) (io.Reader, error)) (string, []byte, error) {
72 | if maxSize > sizeToDetectType {
73 | maxSize = sizeToDetectType
74 | }
75 |
76 | buf := make([]byte, maxSize) // maybe sync-pool the slice?
77 |
78 | r, err := rInit(maxSize)
79 | if err != nil {
80 | return "", nil, err
81 | }
82 |
83 | n, err := r.Read(buf)
84 | if err != nil && err != io.EOF {
85 | return "", nil, err
86 | }
87 |
88 | buf = buf[:n]
89 |
90 | return http.DetectContentType(buf), buf, err // to not lose io.EOF
91 | }
92 |
93 | func (r request) receiveFile(clnt *pool.Pool, objectAddress oid.Address) {
94 | var (
95 | err error
96 | dis = "inline"
97 | start = time.Now()
98 | filename string
99 | )
100 | if err = tokens.StoreBearerToken(r.RequestCtx); err != nil {
101 | r.log.Error("could not fetch and store bearer token", zap.Error(err))
102 | response.Error(r.RequestCtx, "could not fetch and store bearer token: "+err.Error(), fasthttp.StatusBadRequest)
103 | return
104 | }
105 |
106 | var prm pool.PrmObjectGet
107 | prm.SetAddress(objectAddress)
108 | if btoken := bearerToken(r.RequestCtx); btoken != nil {
109 | prm.UseBearer(*btoken)
110 | }
111 |
112 | rObj, err := clnt.GetObject(r.appCtx, prm)
113 | if err != nil {
114 | r.handleFrostFSErr(err, start)
115 | return
116 | }
117 |
118 | // we can't close reader in this function, so how to do it?
119 |
120 | if r.Request.URI().QueryArgs().GetBool("download") {
121 | dis = "attachment"
122 | }
123 |
124 | payloadSize := rObj.Header.PayloadSize()
125 |
126 | r.Response.Header.Set(fasthttp.HeaderContentLength, strconv.FormatUint(payloadSize, 10))
127 | var contentType string
128 | for _, attr := range rObj.Header.Attributes() {
129 | key := attr.Key()
130 | val := attr.Value()
131 | if !isValidToken(key) || !isValidValue(val) {
132 | continue
133 | }
134 | if strings.HasPrefix(key, utils.SystemAttributePrefix) {
135 | key = systemBackwardTranslator(key)
136 | }
137 | r.Response.Header.Set(utils.UserAttributeHeaderPrefix+key, val)
138 | switch key {
139 | case object.AttributeFileName:
140 | filename = val
141 | case object.AttributeTimestamp:
142 | value, err := strconv.ParseInt(val, 10, 64)
143 | if err != nil {
144 | r.log.Info("couldn't parse creation date",
145 | zap.String("key", key),
146 | zap.String("val", val),
147 | zap.Error(err))
148 | continue
149 | }
150 | r.Response.Header.Set(fasthttp.HeaderLastModified,
151 | time.Unix(value, 0).UTC().Format(http.TimeFormat))
152 | case object.AttributeContentType:
153 | contentType = val
154 | }
155 | }
156 |
157 | idsToResponse(&r.Response, &rObj.Header)
158 |
159 | if len(contentType) == 0 {
160 | // determine the Content-Type from the payload head
161 | var payloadHead []byte
162 |
163 | contentType, payloadHead, err = readContentType(payloadSize, func(uint64) (io.Reader, error) {
164 | return rObj.Payload, nil
165 | })
166 | if err != nil && err != io.EOF {
167 | r.log.Error("could not detect Content-Type from payload", zap.Error(err))
168 | response.Error(r.RequestCtx, "could not detect Content-Type from payload: "+err.Error(), fasthttp.StatusBadRequest)
169 | return
170 | }
171 |
172 | // reset payload reader since a part of the data has been read
173 | var headReader io.Reader = bytes.NewReader(payloadHead)
174 |
175 | if err != io.EOF { // otherwise, we've already read full payload
176 | headReader = io.MultiReader(headReader, rObj.Payload)
177 | }
178 |
179 | // note: we could do with io.Reader, but SetBodyStream below closes body stream
180 | // if it implements io.Closer and that's useful for us.
181 | rObj.Payload = readCloser{headReader, rObj.Payload}
182 | }
183 | r.SetContentType(contentType)
184 |
185 | r.Response.Header.Set(fasthttp.HeaderContentDisposition, dis+"; filename="+path.Base(filename))
186 |
187 | r.Response.SetBodyStream(rObj.Payload, int(payloadSize))
188 | }
189 |
190 | // systemBackwardTranslator is used to convert headers looking like '__NEOFS__ATTR_NAME' to 'Neofs-Attr-Name'.
191 | func systemBackwardTranslator(key string) string {
192 | // trim specified prefix '__NEOFS__'
193 | key = strings.TrimPrefix(key, utils.SystemAttributePrefix)
194 |
195 | var res strings.Builder
196 | res.WriteString("Neofs-")
197 |
198 | strs := strings.Split(key, "_")
199 | for i, s := range strs {
200 | s = title(strings.ToLower(s))
201 | res.WriteString(s)
202 | if i != len(strs)-1 {
203 | res.WriteString("-")
204 | }
205 | }
206 |
207 | return res.String()
208 | }
209 |
210 | func title(str string) string {
211 | if str == "" {
212 | return ""
213 | }
214 |
215 | r, size := utf8.DecodeRuneInString(str)
216 | r0 := unicode.ToTitle(r)
217 | return string(r0) + str[size:]
218 | }
219 |
220 | func bearerToken(ctx context.Context) *bearer.Token {
221 | if tkn, err := tokens.LoadBearerToken(ctx); err == nil {
222 | return tkn
223 | }
224 | return nil
225 | }
226 |
227 | func (r *request) handleFrostFSErr(err error, start time.Time) {
228 | r.log.Error(
229 | "could not receive object",
230 | zap.Stringer("elapsed", time.Since(start)),
231 | zap.Error(err),
232 | )
233 |
234 | if client.IsErrObjectNotFound(err) || client.IsErrContainerNotFound(err) {
235 | response.Error(r.RequestCtx, "Not Found", fasthttp.StatusNotFound)
236 | return
237 | }
238 |
239 | msg := fmt.Sprintf("could not receive object: %v", err)
240 | response.Error(r.RequestCtx, msg, fasthttp.StatusBadRequest)
241 | }
242 |
243 | // Downloader is a download request handler.
244 | type Downloader struct {
245 | appCtx context.Context
246 | log *zap.Logger
247 | pool *pool.Pool
248 | containerResolver *resolver.ContainerResolver
249 | settings *Settings
250 | }
251 |
252 | // Settings stores reloading parameters, so it has to provide atomic getters and setters.
253 | type Settings struct {
254 | zipCompression atomic.Bool
255 | }
256 |
257 | func (s *Settings) ZipCompression() bool {
258 | return s.zipCompression.Load()
259 | }
260 |
261 | func (s *Settings) SetZipCompression(val bool) {
262 | s.zipCompression.Store(val)
263 | }
264 |
265 | // New creates an instance of Downloader using specified options.
266 | func New(ctx context.Context, params *utils.AppParams, settings *Settings) *Downloader {
267 | return &Downloader{
268 | appCtx: ctx,
269 | log: params.Logger,
270 | pool: params.Pool,
271 | settings: settings,
272 | containerResolver: params.Resolver,
273 | }
274 | }
275 |
276 | func (d *Downloader) newRequest(ctx *fasthttp.RequestCtx, log *zap.Logger) *request {
277 | return &request{
278 | RequestCtx: ctx,
279 | appCtx: d.appCtx,
280 | log: log,
281 | }
282 | }
283 |
284 | // DownloadByAddress handles download requests using simple cid/oid format.
285 | func (d *Downloader) DownloadByAddress(c *fasthttp.RequestCtx) {
286 | d.byAddress(c, request.receiveFile)
287 | }
288 |
289 | // byAddress is a wrapper for function (e.g. request.headObject, request.receiveFile) that
290 | // prepares request and object address to it.
291 | func (d *Downloader) byAddress(c *fasthttp.RequestCtx, f func(request, *pool.Pool, oid.Address)) {
292 | var (
293 | idCnr, _ = c.UserValue("cid").(string)
294 | idObj, _ = c.UserValue("oid").(string)
295 | log = d.log.With(zap.String("cid", idCnr), zap.String("oid", idObj))
296 | )
297 |
298 | cnrID, err := utils.GetContainerID(d.appCtx, idCnr, d.containerResolver)
299 | if err != nil {
300 | log.Error("wrong container id", zap.Error(err))
301 | response.Error(c, "wrong container id", fasthttp.StatusBadRequest)
302 | return
303 | }
304 |
305 | objID := new(oid.ID)
306 | if err = objID.DecodeString(idObj); err != nil {
307 | log.Error("wrong object id", zap.Error(err))
308 | response.Error(c, "wrong object id", fasthttp.StatusBadRequest)
309 | return
310 | }
311 |
312 | var addr oid.Address
313 | addr.SetContainer(*cnrID)
314 | addr.SetObject(*objID)
315 |
316 | f(*d.newRequest(c, log), d.pool, addr)
317 | }
318 |
319 | // DownloadByAttribute handles attribute-based download requests.
320 | func (d *Downloader) DownloadByAttribute(c *fasthttp.RequestCtx) {
321 | d.byAttribute(c, request.receiveFile)
322 | }
323 |
324 | // byAttribute is a wrapper similar to byAddress.
325 | func (d *Downloader) byAttribute(c *fasthttp.RequestCtx, f func(request, *pool.Pool, oid.Address)) {
326 | var (
327 | scid, _ = c.UserValue("cid").(string)
328 | key, _ = url.QueryUnescape(c.UserValue("attr_key").(string))
329 | val, _ = url.QueryUnescape(c.UserValue("attr_val").(string))
330 | log = d.log.With(zap.String("cid", scid), zap.String("attr_key", key), zap.String("attr_val", val))
331 | )
332 |
333 | containerID, err := utils.GetContainerID(d.appCtx, scid, d.containerResolver)
334 | if err != nil {
335 | log.Error("wrong container id", zap.Error(err))
336 | response.Error(c, "wrong container id", fasthttp.StatusBadRequest)
337 | return
338 | }
339 |
340 | res, err := d.search(c, containerID, key, val, object.MatchStringEqual)
341 | if err != nil {
342 | log.Error("could not search for objects", zap.Error(err))
343 | response.Error(c, "could not search for objects: "+err.Error(), fasthttp.StatusBadRequest)
344 | return
345 | }
346 |
347 | defer res.Close()
348 |
349 | buf := make([]oid.ID, 1)
350 |
351 | n, err := res.Read(buf)
352 | if n == 0 {
353 | if errors.Is(err, io.EOF) {
354 | log.Error("object not found", zap.Error(err))
355 | response.Error(c, "object not found", fasthttp.StatusNotFound)
356 | return
357 | }
358 |
359 | log.Error("read object list failed", zap.Error(err))
360 | response.Error(c, "read object list failed: "+err.Error(), fasthttp.StatusBadRequest)
361 | return
362 | }
363 |
364 | var addrObj oid.Address
365 | addrObj.SetContainer(*containerID)
366 | addrObj.SetObject(buf[0])
367 |
368 | f(*d.newRequest(c, log), d.pool, addrObj)
369 | }
370 |
371 | func (d *Downloader) search(c *fasthttp.RequestCtx, cid *cid.ID, key, val string, op object.SearchMatchType) (pool.ResObjectSearch, error) {
372 | filters := object.NewSearchFilters()
373 | filters.AddRootFilter()
374 | filters.AddFilter(key, val, op)
375 |
376 | var prm pool.PrmObjectSearch
377 | prm.SetContainerID(*cid)
378 | prm.SetFilters(filters)
379 | if btoken := bearerToken(c); btoken != nil {
380 | prm.UseBearer(*btoken)
381 | }
382 |
383 | return d.pool.SearchObjects(d.appCtx, prm)
384 | }
385 |
386 | func (d *Downloader) getContainer(cnrID cid.ID) (container.Container, error) {
387 | var prm pool.PrmContainerGet
388 | prm.SetContainerID(cnrID)
389 |
390 | return d.pool.GetContainer(d.appCtx, prm)
391 | }
392 |
393 | func (d *Downloader) addObjectToZip(zw *zip.Writer, obj *object.Object) (io.Writer, error) {
394 | method := zip.Store
395 | if d.settings.ZipCompression() {
396 | method = zip.Deflate
397 | }
398 |
399 | filePath := getZipFilePath(obj)
400 | if len(filePath) == 0 || filePath[len(filePath)-1] == '/' {
401 | return nil, fmt.Errorf("invalid filepath '%s'", filePath)
402 | }
403 |
404 | return zw.CreateHeader(&zip.FileHeader{
405 | Name: filePath,
406 | Method: method,
407 | Modified: time.Now(),
408 | })
409 | }
410 |
411 | // DownloadZipped handles zip by prefix requests.
412 | func (d *Downloader) DownloadZipped(c *fasthttp.RequestCtx) {
413 | scid, _ := c.UserValue("cid").(string)
414 | prefix, _ := url.QueryUnescape(c.UserValue("prefix").(string))
415 | log := d.log.With(zap.String("cid", scid), zap.String("prefix", prefix))
416 |
417 | containerID, err := utils.GetContainerID(d.appCtx, scid, d.containerResolver)
418 | if err != nil {
419 | log.Error("wrong container id", zap.Error(err))
420 | response.Error(c, "wrong container id", fasthttp.StatusBadRequest)
421 | return
422 | }
423 |
424 | if err = tokens.StoreBearerToken(c); err != nil {
425 | log.Error("could not fetch and store bearer token", zap.Error(err))
426 | response.Error(c, "could not fetch and store bearer token: "+err.Error(), fasthttp.StatusBadRequest)
427 | return
428 | }
429 |
430 | // check if container exists here to be able to return 404 error,
431 | // otherwise we get this error only in object iteration step
432 | // and client get 200 OK.
433 | if _, err = d.getContainer(*containerID); err != nil {
434 | log.Error("could not check container existence", zap.Error(err))
435 | if client.IsErrContainerNotFound(err) {
436 | response.Error(c, "Not Found", fasthttp.StatusNotFound)
437 | return
438 | }
439 | response.Error(c, "could not check container existence: "+err.Error(), fasthttp.StatusBadRequest)
440 | return
441 | }
442 |
443 | resSearch, err := d.search(c, containerID, object.AttributeFilePath, prefix, object.MatchCommonPrefix)
444 | if err != nil {
445 | log.Error("could not search for objects", zap.Error(err))
446 | response.Error(c, "could not search for objects: "+err.Error(), fasthttp.StatusBadRequest)
447 | return
448 | }
449 |
450 | c.Response.Header.Set(fasthttp.HeaderContentType, "application/zip")
451 | c.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.zip\"")
452 | c.Response.SetStatusCode(http.StatusOK)
453 |
454 | c.SetBodyStreamWriter(func(w *bufio.Writer) {
455 | defer resSearch.Close()
456 |
457 | zipWriter := zip.NewWriter(w)
458 |
459 | var bufZip []byte
460 | var addr oid.Address
461 |
462 | empty := true
463 | called := false
464 | btoken := bearerToken(c)
465 | addr.SetContainer(*containerID)
466 |
467 | errIter := resSearch.Iterate(func(id oid.ID) bool {
468 | called = true
469 |
470 | if empty {
471 | bufZip = make([]byte, 3<<20) // the same as for upload
472 | }
473 | empty = false
474 |
475 | addr.SetObject(id)
476 | if err = d.zipObject(zipWriter, addr, btoken, bufZip); err != nil {
477 | log.Error("failed to add object to archive", zap.String("oid", id.EncodeToString()), zap.Error(err))
478 | }
479 |
480 | return false
481 | })
482 | if errIter != nil {
483 | log.Error("iterating over selected objects failed", zap.Error(errIter))
484 | } else if !called {
485 | log.Error("objects not found")
486 | }
487 |
488 | if err = zipWriter.Close(); err != nil {
489 | log.Error("close zip writer", zap.Error(err))
490 | }
491 | })
492 | }
493 |
494 | func (d *Downloader) zipObject(zipWriter *zip.Writer, addr oid.Address, btoken *bearer.Token, bufZip []byte) error {
495 | var prm pool.PrmObjectGet
496 | prm.SetAddress(addr)
497 | if btoken != nil {
498 | prm.UseBearer(*btoken)
499 | }
500 |
501 | resGet, err := d.pool.GetObject(d.appCtx, prm)
502 | if err != nil {
503 | return fmt.Errorf("get FrostFS object: %v", err)
504 | }
505 |
506 | objWriter, err := d.addObjectToZip(zipWriter, &resGet.Header)
507 | if err != nil {
508 | return fmt.Errorf("zip create header: %v", err)
509 | }
510 |
511 | if _, err = io.CopyBuffer(objWriter, resGet.Payload, bufZip); err != nil {
512 | return fmt.Errorf("copy object payload to zip file: %v", err)
513 | }
514 |
515 | if err = resGet.Payload.Close(); err != nil {
516 | return fmt.Errorf("object body close error: %w", err)
517 | }
518 |
519 | if err = zipWriter.Flush(); err != nil {
520 | return fmt.Errorf("flush zip writer: %v", err)
521 | }
522 |
523 | return nil
524 | }
525 |
526 | func getZipFilePath(obj *object.Object) string {
527 | for _, attr := range obj.Attributes() {
528 | if attr.Key() == object.AttributeFilePath {
529 | return attr.Value()
530 | }
531 | }
532 |
533 | return ""
534 | }
535 |
--------------------------------------------------------------------------------
/downloader/download_test.go:
--------------------------------------------------------------------------------
1 | package downloader
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/stretchr/testify/require"
7 | )
8 |
9 | func TestSystemBackwardTranslator(t *testing.T) {
10 | input := []string{
11 | "__NEOFS__EXPIRATION_EPOCH",
12 | "__NEOFS__RANDOM_ATTR",
13 | }
14 | expected := []string{
15 | "Neofs-Expiration-Epoch",
16 | "Neofs-Random-Attr",
17 | }
18 |
19 | for i, str := range input {
20 | res := systemBackwardTranslator(str)
21 | require.Equal(t, expected[i], res)
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/downloader/head.go:
--------------------------------------------------------------------------------
1 | package downloader
2 |
3 | import (
4 | "io"
5 | "net/http"
6 | "strconv"
7 | "strings"
8 | "time"
9 |
10 | "github.com/TrueCloudLab/frostfs-http-gw/response"
11 | "github.com/TrueCloudLab/frostfs-http-gw/tokens"
12 | "github.com/TrueCloudLab/frostfs-http-gw/utils"
13 | "github.com/TrueCloudLab/frostfs-sdk-go/object"
14 | oid "github.com/TrueCloudLab/frostfs-sdk-go/object/id"
15 | "github.com/TrueCloudLab/frostfs-sdk-go/pool"
16 | "github.com/valyala/fasthttp"
17 | "go.uber.org/zap"
18 | )
19 |
20 | // max bytes needed to detect content type according to http.DetectContentType docs.
21 | const sizeToDetectType = 512
22 |
23 | const (
24 | hdrObjectID = "X-Object-Id"
25 | hdrOwnerID = "X-Owner-Id"
26 | hdrContainerID = "X-Container-Id"
27 | )
28 |
29 | func (r request) headObject(clnt *pool.Pool, objectAddress oid.Address) {
30 | var start = time.Now()
31 | if err := tokens.StoreBearerToken(r.RequestCtx); err != nil {
32 | r.log.Error("could not fetch and store bearer token", zap.Error(err))
33 | response.Error(r.RequestCtx, "could not fetch and store bearer token", fasthttp.StatusBadRequest)
34 | return
35 | }
36 |
37 | btoken := bearerToken(r.RequestCtx)
38 |
39 | var prm pool.PrmObjectHead
40 | prm.SetAddress(objectAddress)
41 | if btoken != nil {
42 | prm.UseBearer(*btoken)
43 | }
44 |
45 | obj, err := clnt.HeadObject(r.appCtx, prm)
46 | if err != nil {
47 | r.handleFrostFSErr(err, start)
48 | return
49 | }
50 |
51 | r.Response.Header.Set(fasthttp.HeaderContentLength, strconv.FormatUint(obj.PayloadSize(), 10))
52 | var contentType string
53 | for _, attr := range obj.Attributes() {
54 | key := attr.Key()
55 | val := attr.Value()
56 | if !isValidToken(key) || !isValidValue(val) {
57 | continue
58 | }
59 | if strings.HasPrefix(key, utils.SystemAttributePrefix) {
60 | key = systemBackwardTranslator(key)
61 | }
62 | r.Response.Header.Set(utils.UserAttributeHeaderPrefix+key, val)
63 | switch key {
64 | case object.AttributeTimestamp:
65 | value, err := strconv.ParseInt(val, 10, 64)
66 | if err != nil {
67 | r.log.Info("couldn't parse creation date",
68 | zap.String("key", key),
69 | zap.String("val", val),
70 | zap.Error(err))
71 | continue
72 | }
73 | r.Response.Header.Set(fasthttp.HeaderLastModified, time.Unix(value, 0).UTC().Format(http.TimeFormat))
74 | case object.AttributeContentType:
75 | contentType = val
76 | }
77 | }
78 |
79 | idsToResponse(&r.Response, &obj)
80 |
81 | if len(contentType) == 0 {
82 | contentType, _, err = readContentType(obj.PayloadSize(), func(sz uint64) (io.Reader, error) {
83 | var prmRange pool.PrmObjectRange
84 | prmRange.SetAddress(objectAddress)
85 | prmRange.SetLength(sz)
86 | if btoken != nil {
87 | prmRange.UseBearer(*btoken)
88 | }
89 |
90 | resObj, err := clnt.ObjectRange(r.appCtx, prmRange)
91 | if err != nil {
92 | return nil, err
93 | }
94 | return &resObj, nil
95 | })
96 | if err != nil && err != io.EOF {
97 | r.handleFrostFSErr(err, start)
98 | return
99 | }
100 | }
101 | r.SetContentType(contentType)
102 | }
103 |
104 | func idsToResponse(resp *fasthttp.Response, obj *object.Object) {
105 | objID, _ := obj.ID()
106 | cnrID, _ := obj.ContainerID()
107 | resp.Header.Set(hdrObjectID, objID.String())
108 | resp.Header.Set(hdrOwnerID, obj.OwnerID().String())
109 | resp.Header.Set(hdrContainerID, cnrID.String())
110 | }
111 |
112 | // HeadByAddress handles head requests using simple cid/oid format.
113 | func (d *Downloader) HeadByAddress(c *fasthttp.RequestCtx) {
114 | d.byAddress(c, request.headObject)
115 | }
116 |
117 | // HeadByAttribute handles attribute-based head requests.
118 | func (d *Downloader) HeadByAttribute(c *fasthttp.RequestCtx) {
119 | d.byAttribute(c, request.headObject)
120 | }
121 |
--------------------------------------------------------------------------------
/downloader/reader_test.go:
--------------------------------------------------------------------------------
1 | package downloader
2 |
3 | import (
4 | "io"
5 | "strings"
6 | "testing"
7 |
8 | "github.com/stretchr/testify/require"
9 | )
10 |
11 | func TestDetector(t *testing.T) {
12 | txtContentType := "text/plain; charset=utf-8"
13 | sb := strings.Builder{}
14 | for i := 0; i < 10; i++ {
15 | sb.WriteString("Some txt content. Content-Type must be detected properly by detector.")
16 | }
17 |
18 | for _, tc := range []struct {
19 | Name string
20 | ContentType string
21 | Expected string
22 | }{
23 | {
24 | Name: "less than 512b",
25 | ContentType: txtContentType,
26 | Expected: sb.String()[:256],
27 | },
28 | {
29 | Name: "more than 512b",
30 | ContentType: txtContentType,
31 | Expected: sb.String(),
32 | },
33 | } {
34 | t.Run(tc.Name, func(t *testing.T) {
35 | contentType, data, err := readContentType(uint64(len(tc.Expected)),
36 | func(sz uint64) (io.Reader, error) {
37 | return strings.NewReader(tc.Expected), nil
38 | },
39 | )
40 |
41 | require.NoError(t, err)
42 | require.Equal(t, tc.ContentType, contentType)
43 | require.True(t, strings.HasPrefix(tc.Expected, string(data)))
44 | })
45 | }
46 | }
47 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/TrueCloudLab/frostfs-http-gw
2 |
3 | go 1.18
4 |
5 | require (
6 | github.com/TrueCloudLab/frostfs-api-go/v2 v2.0.0-20221212144048-1351b6656d68
7 | github.com/TrueCloudLab/frostfs-sdk-go v0.0.0-20230130120602-cf64ddfb143c
8 | github.com/fasthttp/router v1.4.1
9 | github.com/nspcc-dev/neo-go v0.101.0
10 | github.com/prometheus/client_golang v1.13.0
11 | github.com/spf13/pflag v1.0.5
12 | github.com/spf13/viper v1.15.0
13 | github.com/stretchr/testify v1.8.1
14 | github.com/testcontainers/testcontainers-go v0.13.0
15 | github.com/valyala/fasthttp v1.34.0
16 | go.uber.org/atomic v1.10.0
17 | go.uber.org/zap v1.24.0
18 | )
19 |
20 | require (
21 | github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
22 | github.com/Microsoft/go-winio v0.5.2 // indirect
23 | github.com/Microsoft/hcsshim v0.9.2 // indirect
24 | github.com/TrueCloudLab/frostfs-contract v0.0.0-20221213081248-6c805c1b4e42 // indirect
25 | github.com/TrueCloudLab/frostfs-crypto v0.5.0 // indirect
26 | github.com/TrueCloudLab/hrw v1.1.0 // indirect
27 | github.com/TrueCloudLab/rfc6979 v0.3.0 // indirect
28 | github.com/TrueCloudLab/tzhash v1.7.0 // indirect
29 | github.com/andybalholm/brotli v1.0.4 // indirect
30 | github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20221202181307-76fa05c21b12 // indirect
31 | github.com/beorn7/perks v1.0.1 // indirect
32 | github.com/cenkalti/backoff/v4 v4.1.3 // indirect
33 | github.com/cespare/xxhash/v2 v2.1.2 // indirect
34 | github.com/containerd/cgroups v1.0.3 // indirect
35 | github.com/containerd/containerd v1.6.2 // indirect
36 | github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
37 | github.com/davecgh/go-spew v1.1.1 // indirect
38 | github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
39 | github.com/docker/distribution v2.8.1+incompatible // indirect
40 | github.com/docker/docker v20.10.14+incompatible // indirect
41 | github.com/docker/go-connections v0.4.0 // indirect
42 | github.com/docker/go-units v0.4.0 // indirect
43 | github.com/fsnotify/fsnotify v1.6.0 // indirect
44 | github.com/gogo/protobuf v1.3.2 // indirect
45 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
46 | github.com/golang/protobuf v1.5.2 // indirect
47 | github.com/google/uuid v1.3.0 // indirect
48 | github.com/gorilla/mux v1.8.0 // indirect
49 | github.com/gorilla/websocket v1.4.2 // indirect
50 | github.com/hashicorp/golang-lru v0.6.0 // indirect
51 | github.com/hashicorp/hcl v1.0.0 // indirect
52 | github.com/klauspost/compress v1.15.0 // indirect
53 | github.com/magiconair/properties v1.8.7 // indirect
54 | github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
55 | github.com/mitchellh/mapstructure v1.5.0 // indirect
56 | github.com/moby/sys/mount v0.3.2 // indirect
57 | github.com/moby/sys/mountinfo v0.6.1 // indirect
58 | github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
59 | github.com/morikuni/aec v1.0.0 // indirect
60 | github.com/mr-tron/base58 v1.2.0 // indirect
61 | github.com/nspcc-dev/go-ordered-json v0.0.0-20220111165707-25110be27d22 // indirect
62 | github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20221202075445-cb5c18dc73eb // indirect
63 | github.com/nspcc-dev/rfc6979 v0.2.0 // indirect
64 | github.com/opencontainers/go-digest v1.0.0 // indirect
65 | github.com/opencontainers/image-spec v1.0.2 // indirect
66 | github.com/opencontainers/runc v1.1.1 // indirect
67 | github.com/pelletier/go-toml/v2 v2.0.6 // indirect
68 | github.com/pkg/errors v0.9.1 // indirect
69 | github.com/pmezard/go-difflib v1.0.0 // indirect
70 | github.com/prometheus/client_model v0.2.0 // indirect
71 | github.com/prometheus/common v0.37.0 // indirect
72 | github.com/prometheus/procfs v0.8.0 // indirect
73 | github.com/russross/blackfriday/v2 v2.1.0 // indirect
74 | github.com/savsgio/gotils v0.0.0-20210617111740-97865ed5a873 // indirect
75 | github.com/sirupsen/logrus v1.8.1 // indirect
76 | github.com/spaolacci/murmur3 v1.1.0 // indirect
77 | github.com/spf13/afero v1.9.3 // indirect
78 | github.com/spf13/cast v1.5.0 // indirect
79 | github.com/spf13/jwalterweatherman v1.1.0 // indirect
80 | github.com/subosito/gotenv v1.4.2 // indirect
81 | github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
82 | github.com/urfave/cli v1.22.5 // indirect
83 | github.com/valyala/bytebufferpool v1.0.0 // indirect
84 | go.opencensus.io v0.24.0 // indirect
85 | go.uber.org/multierr v1.9.0 // indirect
86 | golang.org/x/crypto v0.4.0 // indirect
87 | golang.org/x/exp v0.0.0-20221227203929-1b447090c38c // indirect
88 | golang.org/x/net v0.4.0 // indirect
89 | golang.org/x/sync v0.1.0 // indirect
90 | golang.org/x/sys v0.3.0 // indirect
91 | golang.org/x/term v0.3.0 // indirect
92 | golang.org/x/text v0.5.0 // indirect
93 | google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef // indirect
94 | google.golang.org/grpc v1.52.0 // indirect
95 | google.golang.org/protobuf v1.28.1 // indirect
96 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
97 | gopkg.in/ini.v1 v1.67.0 // indirect
98 | gopkg.in/yaml.v3 v3.0.1 // indirect
99 | )
100 |
--------------------------------------------------------------------------------
/help.mk:
--------------------------------------------------------------------------------
1 | .PHONY: help
2 |
3 | # Show this help prompt
4 | help:
5 | @echo ' Usage:'
6 | @echo ''
7 | @echo ' make '
8 | @echo ''
9 | @echo ' Targets:'
10 | @echo ''
11 | @awk '/^#/{ comment = substr($$0,3) } comment && /^[a-zA-Z][a-zA-Z0-9.%_/-]+ ?:/{ print " ", $$1, comment }' $(MAKEFILE_LIST) | column -t -s ':' | grep -v 'IGNORE' | sort | uniq
12 |
13 | # Show help for docker/% IGNORE
14 | help.docker/%:
15 | $(eval TARGETS:=$(notdir all lint) ${BINS})
16 | @echo ' Usage:'
17 | @echo ''
18 | @echo ' make docker/% -- Run `make %` in Golang container'
19 | @echo ''
20 | @echo ' Supported docker targets:'
21 | @echo ''
22 | @$(foreach bin, $(TARGETS), echo ' ' $(bin);)
23 |
--------------------------------------------------------------------------------
/integration_test.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "archive/zip"
5 | "bytes"
6 | "context"
7 | "encoding/json"
8 | "fmt"
9 | "io"
10 | "mime/multipart"
11 | "net/http"
12 | "sort"
13 | "testing"
14 | "time"
15 |
16 | "github.com/TrueCloudLab/frostfs-sdk-go/container"
17 | "github.com/TrueCloudLab/frostfs-sdk-go/container/acl"
18 | cid "github.com/TrueCloudLab/frostfs-sdk-go/container/id"
19 | "github.com/TrueCloudLab/frostfs-sdk-go/netmap"
20 | "github.com/TrueCloudLab/frostfs-sdk-go/object"
21 | oid "github.com/TrueCloudLab/frostfs-sdk-go/object/id"
22 | "github.com/TrueCloudLab/frostfs-sdk-go/pool"
23 | "github.com/TrueCloudLab/frostfs-sdk-go/user"
24 | "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
25 | "github.com/spf13/viper"
26 | "github.com/stretchr/testify/require"
27 | "github.com/testcontainers/testcontainers-go"
28 | "github.com/testcontainers/testcontainers-go/wait"
29 | )
30 |
31 | type putResponse struct {
32 | CID string `json:"container_id"`
33 | OID string `json:"object_id"`
34 | }
35 |
36 | const (
37 | testContainerName = "friendly"
38 | versionWithNativeNames = "0.27.5"
39 | testListenAddress = "localhost:8082"
40 | testHost = "http://" + testListenAddress
41 | )
42 |
43 | func TestIntegration(t *testing.T) {
44 | rootCtx := context.Background()
45 | aioImage := "nspccdev/neofs-aio-testcontainer:"
46 | versions := []string{
47 | "0.29.0",
48 | "0.30.0",
49 | "0.32.0",
50 | "0.34.0",
51 | "latest",
52 | }
53 | key, err := keys.NewPrivateKeyFromHex("1dd37fba80fec4e6a6f13fd708d8dcb3b29def768017052f6c930fa1c5d90bbb")
54 | require.NoError(t, err)
55 |
56 | var ownerID user.ID
57 | user.IDFromKey(&ownerID, key.PrivateKey.PublicKey)
58 |
59 | for _, version := range versions {
60 | ctx, cancel2 := context.WithCancel(rootCtx)
61 |
62 | aioContainer := createDockerContainer(ctx, t, aioImage+version)
63 | server, cancel := runServer()
64 | clientPool := getPool(ctx, t, key)
65 | CID, err := createContainer(ctx, t, clientPool, ownerID, version)
66 | require.NoError(t, err, version)
67 |
68 | t.Run("simple put "+version, func(t *testing.T) { simplePut(ctx, t, clientPool, CID, version) })
69 | t.Run("put with duplicate keys "+version, func(t *testing.T) { putWithDuplicateKeys(t, CID) })
70 | t.Run("simple get "+version, func(t *testing.T) { simpleGet(ctx, t, clientPool, ownerID, CID, version) })
71 | t.Run("get by attribute "+version, func(t *testing.T) { getByAttr(ctx, t, clientPool, ownerID, CID, version) })
72 | t.Run("get zip "+version, func(t *testing.T) { getZip(ctx, t, clientPool, ownerID, CID, version) })
73 |
74 | cancel()
75 | server.Wait()
76 | err = aioContainer.Terminate(ctx)
77 | require.NoError(t, err)
78 | cancel2()
79 | }
80 | }
81 |
82 | func runServer() (App, context.CancelFunc) {
83 | cancelCtx, cancel := context.WithCancel(context.Background())
84 |
85 | v := getDefaultConfig()
86 | l, lvl := newLogger(v)
87 | application := newApp(cancelCtx, WithConfig(v), WithLogger(l, lvl))
88 | go application.Serve(cancelCtx)
89 |
90 | return application, cancel
91 | }
92 |
93 | func simplePut(ctx context.Context, t *testing.T, p *pool.Pool, CID cid.ID, version string) {
94 | url := testHost + "/upload/" + CID.String()
95 | makePutRequestAndCheck(ctx, t, p, CID, url)
96 |
97 | if version >= versionWithNativeNames {
98 | url = testHost + "/upload/" + testContainerName
99 | makePutRequestAndCheck(ctx, t, p, CID, url)
100 | }
101 | }
102 |
103 | func makePutRequestAndCheck(ctx context.Context, t *testing.T, p *pool.Pool, cnrID cid.ID, url string) {
104 | content := "content of file"
105 | keyAttr, valAttr := "User-Attribute", "user value"
106 | attributes := map[string]string{
107 | object.AttributeFileName: "newFile.txt",
108 | keyAttr: valAttr,
109 | }
110 |
111 | var buff bytes.Buffer
112 | w := multipart.NewWriter(&buff)
113 | fw, err := w.CreateFormFile("file", attributes[object.AttributeFileName])
114 | require.NoError(t, err)
115 | _, err = io.Copy(fw, bytes.NewBufferString(content))
116 | require.NoError(t, err)
117 | err = w.Close()
118 | require.NoError(t, err)
119 |
120 | request, err := http.NewRequest(http.MethodPost, url, &buff)
121 | require.NoError(t, err)
122 | request.Header.Set("Content-Type", w.FormDataContentType())
123 | request.Header.Set("X-Attribute-"+keyAttr, valAttr)
124 |
125 | resp, err := http.DefaultClient.Do(request)
126 | require.NoError(t, err)
127 |
128 | defer func() {
129 | err := resp.Body.Close()
130 | require.NoError(t, err)
131 | }()
132 |
133 | body, err := io.ReadAll(resp.Body)
134 | require.NoError(t, err)
135 |
136 | if resp.StatusCode != http.StatusOK {
137 | fmt.Println(string(body))
138 | }
139 | require.Equal(t, http.StatusOK, resp.StatusCode)
140 |
141 | addr := &putResponse{}
142 | err = json.Unmarshal(body, addr)
143 | require.NoError(t, err)
144 |
145 | err = cnrID.DecodeString(addr.CID)
146 | require.NoError(t, err)
147 |
148 | var id oid.ID
149 | err = id.DecodeString(addr.OID)
150 | require.NoError(t, err)
151 |
152 | var objectAddress oid.Address
153 | objectAddress.SetContainer(cnrID)
154 | objectAddress.SetObject(id)
155 |
156 | payload := bytes.NewBuffer(nil)
157 |
158 | var prm pool.PrmObjectGet
159 | prm.SetAddress(objectAddress)
160 |
161 | res, err := p.GetObject(ctx, prm)
162 | require.NoError(t, err)
163 |
164 | _, err = io.Copy(payload, res.Payload)
165 | require.NoError(t, err)
166 |
167 | require.Equal(t, content, payload.String())
168 |
169 | for _, attribute := range res.Header.Attributes() {
170 | require.Equal(t, attributes[attribute.Key()], attribute.Value())
171 | }
172 | }
173 |
174 | func putWithDuplicateKeys(t *testing.T, CID cid.ID) {
175 | url := testHost + "/upload/" + CID.String()
176 |
177 | attr := "X-Attribute-User-Attribute"
178 | content := "content of file"
179 | valOne, valTwo := "first_value", "second_value"
180 | fileName := "newFile.txt"
181 |
182 | var buff bytes.Buffer
183 | w := multipart.NewWriter(&buff)
184 | fw, err := w.CreateFormFile("file", fileName)
185 | require.NoError(t, err)
186 | _, err = io.Copy(fw, bytes.NewBufferString(content))
187 | require.NoError(t, err)
188 | err = w.Close()
189 | require.NoError(t, err)
190 |
191 | request, err := http.NewRequest(http.MethodPost, url, &buff)
192 | require.NoError(t, err)
193 | request.Header.Set("Content-Type", w.FormDataContentType())
194 | request.Header.Add(attr, valOne)
195 | request.Header.Add(attr, valTwo)
196 |
197 | resp, err := http.DefaultClient.Do(request)
198 | require.NoError(t, err)
199 |
200 | defer func() {
201 | err := resp.Body.Close()
202 | require.NoError(t, err)
203 | }()
204 |
205 | body, err := io.ReadAll(resp.Body)
206 | require.NoError(t, err)
207 | require.Equal(t, "key duplication error: "+attr+"\n", string(body))
208 | require.Equal(t, http.StatusBadRequest, resp.StatusCode)
209 | }
210 |
211 | func simpleGet(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID, version string) {
212 | content := "content of file"
213 | attributes := map[string]string{
214 | "some-attr": "some-get-value",
215 | }
216 |
217 | id := putObject(ctx, t, clientPool, ownerID, CID, content, attributes)
218 |
219 | resp, err := http.Get(testHost + "/get/" + CID.String() + "/" + id.String())
220 | require.NoError(t, err)
221 | checkGetResponse(t, resp, content, attributes)
222 |
223 | if version >= versionWithNativeNames {
224 | resp, err = http.Get(testHost + "/get/" + testContainerName + "/" + id.String())
225 | require.NoError(t, err)
226 | checkGetResponse(t, resp, content, attributes)
227 | }
228 | }
229 |
230 | func checkGetResponse(t *testing.T, resp *http.Response, content string, attributes map[string]string) {
231 | defer func() {
232 | err := resp.Body.Close()
233 | require.NoError(t, err)
234 | }()
235 |
236 | data, err := io.ReadAll(resp.Body)
237 | require.NoError(t, err)
238 | require.Equal(t, content, string(data))
239 |
240 | for k, v := range attributes {
241 | require.Equal(t, v, resp.Header.Get("X-Attribute-"+k))
242 | }
243 | }
244 |
245 | func checkGetByAttrResponse(t *testing.T, resp *http.Response, content string, attributes map[string]string) {
246 | defer func() {
247 | err := resp.Body.Close()
248 | require.NoError(t, err)
249 | }()
250 |
251 | data, err := io.ReadAll(resp.Body)
252 | require.NoError(t, err)
253 | require.Equal(t, content, string(data))
254 |
255 | for k, v := range attributes {
256 | require.Equal(t, v, resp.Header.Get(k))
257 | }
258 | }
259 |
260 | func getByAttr(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID, version string) {
261 | keyAttr, valAttr := "some-attr", "some-get-by-attr-value"
262 | content := "content of file"
263 | attributes := map[string]string{keyAttr: valAttr}
264 |
265 | id := putObject(ctx, t, clientPool, ownerID, CID, content, attributes)
266 |
267 | expectedAttr := map[string]string{
268 | "X-Attribute-" + keyAttr: valAttr,
269 | "x-object-id": id.String(),
270 | "x-container-id": CID.String(),
271 | }
272 |
273 | resp, err := http.Get(testHost + "/get_by_attribute/" + CID.String() + "/" + keyAttr + "/" + valAttr)
274 | require.NoError(t, err)
275 | checkGetByAttrResponse(t, resp, content, expectedAttr)
276 |
277 | if version >= versionWithNativeNames {
278 | resp, err = http.Get(testHost + "/get_by_attribute/" + testContainerName + "/" + keyAttr + "/" + valAttr)
279 | require.NoError(t, err)
280 | checkGetByAttrResponse(t, resp, content, expectedAttr)
281 | }
282 | }
283 |
284 | func getZip(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID, version string) {
285 | names := []string{"zipfolder/dir/name1.txt", "zipfolder/name2.txt"}
286 | contents := []string{"content of file1", "content of file2"}
287 | attributes1 := map[string]string{object.AttributeFilePath: names[0]}
288 | attributes2 := map[string]string{object.AttributeFilePath: names[1]}
289 |
290 | putObject(ctx, t, clientPool, ownerID, CID, contents[0], attributes1)
291 | putObject(ctx, t, clientPool, ownerID, CID, contents[1], attributes2)
292 |
293 | baseURL := testHost + "/zip/" + CID.String()
294 | makeZipTest(t, baseURL, names, contents)
295 |
296 | if version >= versionWithNativeNames {
297 | baseURL = testHost + "/zip/" + testContainerName
298 | makeZipTest(t, baseURL, names, contents)
299 | }
300 | }
301 |
302 | func makeZipTest(t *testing.T, baseURL string, names, contents []string) {
303 | url := baseURL + "/zipfolder"
304 | makeZipRequest(t, url, names, contents)
305 |
306 | // check nested folder
307 | url = baseURL + "/zipfolder/dir"
308 | makeZipRequest(t, url, names[:1], contents[:1])
309 | }
310 |
311 | func makeZipRequest(t *testing.T, url string, names, contents []string) {
312 | resp, err := http.Get(url)
313 | require.NoError(t, err)
314 | defer func() {
315 | err := resp.Body.Close()
316 | require.NoError(t, err)
317 | }()
318 |
319 | data, err := io.ReadAll(resp.Body)
320 | require.NoError(t, err)
321 | checkZip(t, data, int64(len(data)), names, contents)
322 | }
323 |
324 | func checkZip(t *testing.T, data []byte, length int64, names, contents []string) {
325 | readerAt := bytes.NewReader(data)
326 |
327 | zipReader, err := zip.NewReader(readerAt, length)
328 | require.NoError(t, err)
329 |
330 | require.Equal(t, len(names), len(zipReader.File))
331 |
332 | sort.Slice(zipReader.File, func(i, j int) bool {
333 | return zipReader.File[i].FileHeader.Name < zipReader.File[j].FileHeader.Name
334 | })
335 |
336 | for i, f := range zipReader.File {
337 | require.Equal(t, names[i], f.FileHeader.Name)
338 |
339 | rc, err := f.Open()
340 | require.NoError(t, err)
341 |
342 | all, err := io.ReadAll(rc)
343 | require.NoError(t, err)
344 | require.Equal(t, contents[i], string(all))
345 |
346 | err = rc.Close()
347 | require.NoError(t, err)
348 | }
349 | }
350 |
351 | func createDockerContainer(ctx context.Context, t *testing.T, image string) testcontainers.Container {
352 | req := testcontainers.ContainerRequest{
353 | Image: image,
354 | WaitingFor: wait.NewLogStrategy("aio container started").WithStartupTimeout(30 * time.Second),
355 | Name: "aio",
356 | Hostname: "aio",
357 | NetworkMode: "host",
358 | }
359 | aioC, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
360 | ContainerRequest: req,
361 | Started: true,
362 | })
363 | require.NoError(t, err)
364 |
365 | return aioC
366 | }
367 |
368 | func getDefaultConfig() *viper.Viper {
369 | v := settings()
370 | v.SetDefault(cfgPeers+".0.address", "localhost:8080")
371 | v.SetDefault(cfgPeers+".0.weight", 1)
372 | v.SetDefault(cfgPeers+".0.priority", 1)
373 |
374 | v.SetDefault(cfgRPCEndpoint, "http://localhost:30333")
375 | v.SetDefault("server.0.address", testListenAddress)
376 |
377 | return v
378 | }
379 |
380 | func getPool(ctx context.Context, t *testing.T, key *keys.PrivateKey) *pool.Pool {
381 | var prm pool.InitParameters
382 | prm.SetKey(&key.PrivateKey)
383 | prm.SetNodeDialTimeout(5 * time.Second)
384 | prm.AddNode(pool.NewNodeParam(1, "localhost:8080", 1))
385 |
386 | clientPool, err := pool.NewPool(prm)
387 | require.NoError(t, err)
388 |
389 | err = clientPool.Dial(ctx)
390 | require.NoError(t, err)
391 | return clientPool
392 | }
393 |
394 | func createContainer(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, version string) (cid.ID, error) {
395 | var policy netmap.PlacementPolicy
396 | err := policy.DecodeString("REP 1")
397 | require.NoError(t, err)
398 |
399 | var cnr container.Container
400 | cnr.Init()
401 | cnr.SetPlacementPolicy(policy)
402 | cnr.SetBasicACL(acl.PublicRWExtended)
403 | cnr.SetOwner(ownerID)
404 |
405 | container.SetCreationTime(&cnr, time.Now())
406 |
407 | if version >= versionWithNativeNames {
408 | var domain container.Domain
409 | domain.SetName(testContainerName)
410 | container.WriteDomain(&cnr, domain)
411 | }
412 |
413 | var waitPrm pool.WaitParams
414 | waitPrm.SetTimeout(15 * time.Second)
415 | waitPrm.SetPollInterval(3 * time.Second)
416 |
417 | var prm pool.PrmContainerPut
418 | prm.SetContainer(cnr)
419 | prm.SetWaitParams(waitPrm)
420 |
421 | CID, err := clientPool.PutContainer(ctx, prm)
422 | if err != nil {
423 | return cid.ID{}, err
424 | }
425 | fmt.Println(CID.String())
426 |
427 | return CID, err
428 | }
429 |
430 | func putObject(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID, content string, attributes map[string]string) oid.ID {
431 | obj := object.New()
432 | obj.SetContainerID(CID)
433 | obj.SetOwnerID(&ownerID)
434 |
435 | var attrs []object.Attribute
436 | for key, val := range attributes {
437 | attr := object.NewAttribute()
438 | attr.SetKey(key)
439 | attr.SetValue(val)
440 | attrs = append(attrs, *attr)
441 | }
442 | obj.SetAttributes(attrs...)
443 |
444 | var prm pool.PrmObjectPut
445 | prm.SetHeader(*obj)
446 | prm.SetPayload(bytes.NewBufferString(content))
447 |
448 | id, err := clientPool.PutObject(ctx, prm)
449 | require.NoError(t, err)
450 |
451 | return id
452 | }
453 |
--------------------------------------------------------------------------------
/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "os/signal"
6 | "syscall"
7 | )
8 |
9 | func main() {
10 | globalContext, _ := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
11 | v := settings()
12 | logger, atomicLevel := newLogger(v)
13 |
14 | application := newApp(globalContext, WithLogger(logger, atomicLevel), WithConfig(v))
15 | go application.Serve(globalContext)
16 | application.Wait()
17 | }
18 |
--------------------------------------------------------------------------------
/metrics/metrics.go:
--------------------------------------------------------------------------------
1 | package metrics
2 |
3 | import (
4 | "net/http"
5 |
6 | "github.com/TrueCloudLab/frostfs-sdk-go/pool"
7 | "github.com/prometheus/client_golang/prometheus"
8 | "github.com/prometheus/client_golang/prometheus/promhttp"
9 | "go.uber.org/zap"
10 | )
11 |
12 | const (
13 | namespace = "frostfs_http_gw"
14 | stateSubsystem = "state"
15 | poolSubsystem = "pool"
16 |
17 | methodGetBalance = "get_balance"
18 | methodPutContainer = "put_container"
19 | methodGetContainer = "get_container"
20 | methodListContainer = "list_container"
21 | methodDeleteContainer = "delete_container"
22 | methodGetContainerEacl = "get_container_eacl"
23 | methodSetContainerEacl = "set_container_eacl"
24 | methodEndpointInfo = "endpoint_info"
25 | methodNetworkInfo = "network_info"
26 | methodPutObject = "put_object"
27 | methodDeleteObject = "delete_object"
28 | methodGetObject = "get_object"
29 | methodHeadObject = "head_object"
30 | methodRangeObject = "range_object"
31 | methodCreateSession = "create_session"
32 | )
33 |
34 | type GateMetrics struct {
35 | stateMetrics
36 | poolMetricsCollector
37 | }
38 |
39 | type stateMetrics struct {
40 | healthCheck prometheus.Gauge
41 | }
42 |
43 | type poolMetricsCollector struct {
44 | pool *pool.Pool
45 | overallErrors prometheus.Gauge
46 | overallNodeErrors *prometheus.GaugeVec
47 | overallNodeRequests *prometheus.GaugeVec
48 | currentErrors *prometheus.GaugeVec
49 | requestDuration *prometheus.GaugeVec
50 | }
51 |
52 | // NewGateMetrics creates new metrics for http gate.
53 | func NewGateMetrics(p *pool.Pool) *GateMetrics {
54 | stateMetric := newStateMetrics()
55 | stateMetric.register()
56 |
57 | poolMetric := newPoolMetricsCollector(p)
58 | poolMetric.register()
59 |
60 | return &GateMetrics{
61 | stateMetrics: *stateMetric,
62 | poolMetricsCollector: *poolMetric,
63 | }
64 | }
65 |
66 | func (g *GateMetrics) Unregister() {
67 | g.stateMetrics.unregister()
68 | prometheus.Unregister(&g.poolMetricsCollector)
69 | }
70 |
71 | func newStateMetrics() *stateMetrics {
72 | return &stateMetrics{
73 | healthCheck: prometheus.NewGauge(prometheus.GaugeOpts{
74 | Namespace: namespace,
75 | Subsystem: stateSubsystem,
76 | Name: "health",
77 | Help: "Current HTTP gateway state",
78 | }),
79 | }
80 | }
81 |
82 | func (m stateMetrics) register() {
83 | prometheus.MustRegister(m.healthCheck)
84 | }
85 |
86 | func (m stateMetrics) unregister() {
87 | prometheus.Unregister(m.healthCheck)
88 | }
89 |
90 | func (m stateMetrics) SetHealth(s int32) {
91 | m.healthCheck.Set(float64(s))
92 | }
93 |
94 | func newPoolMetricsCollector(p *pool.Pool) *poolMetricsCollector {
95 | overallErrors := prometheus.NewGauge(
96 | prometheus.GaugeOpts{
97 | Namespace: namespace,
98 | Subsystem: poolSubsystem,
99 | Name: "overall_errors",
100 | Help: "Total number of errors in pool",
101 | },
102 | )
103 |
104 | overallNodeErrors := prometheus.NewGaugeVec(
105 | prometheus.GaugeOpts{
106 | Namespace: namespace,
107 | Subsystem: poolSubsystem,
108 | Name: "overall_node_errors",
109 | Help: "Total number of errors for connection in pool",
110 | },
111 | []string{
112 | "node",
113 | },
114 | )
115 |
116 | overallNodeRequests := prometheus.NewGaugeVec(
117 | prometheus.GaugeOpts{
118 | Namespace: namespace,
119 | Subsystem: poolSubsystem,
120 | Name: "overall_node_requests",
121 | Help: "Total number of requests to specific node in pool",
122 | },
123 | []string{
124 | "node",
125 | },
126 | )
127 |
128 | currentErrors := prometheus.NewGaugeVec(
129 | prometheus.GaugeOpts{
130 | Namespace: namespace,
131 | Subsystem: poolSubsystem,
132 | Name: "current_errors",
133 | Help: "Number of errors on current connections that will be reset after the threshold",
134 | },
135 | []string{
136 | "node",
137 | },
138 | )
139 |
140 | requestsDuration := prometheus.NewGaugeVec(
141 | prometheus.GaugeOpts{
142 | Namespace: namespace,
143 | Subsystem: poolSubsystem,
144 | Name: "avg_request_duration",
145 | Help: "Average request duration (in milliseconds) for specific method on node in pool",
146 | },
147 | []string{
148 | "node",
149 | "method",
150 | },
151 | )
152 |
153 | return &poolMetricsCollector{
154 | pool: p,
155 | overallErrors: overallErrors,
156 | overallNodeErrors: overallNodeErrors,
157 | overallNodeRequests: overallNodeRequests,
158 | currentErrors: currentErrors,
159 | requestDuration: requestsDuration,
160 | }
161 | }
162 |
163 | func (m *poolMetricsCollector) Collect(ch chan<- prometheus.Metric) {
164 | m.updateStatistic()
165 | m.overallErrors.Collect(ch)
166 | m.overallNodeErrors.Collect(ch)
167 | m.overallNodeRequests.Collect(ch)
168 | m.currentErrors.Collect(ch)
169 | m.requestDuration.Collect(ch)
170 | }
171 |
172 | func (m *poolMetricsCollector) Describe(descs chan<- *prometheus.Desc) {
173 | m.overallErrors.Describe(descs)
174 | m.overallNodeErrors.Describe(descs)
175 | m.overallNodeRequests.Describe(descs)
176 | m.currentErrors.Describe(descs)
177 | m.requestDuration.Describe(descs)
178 | }
179 |
180 | func (m *poolMetricsCollector) register() {
181 | prometheus.MustRegister(m)
182 | }
183 |
184 | func (m *poolMetricsCollector) updateStatistic() {
185 | stat := m.pool.Statistic()
186 |
187 | m.overallNodeErrors.Reset()
188 | m.overallNodeRequests.Reset()
189 | m.currentErrors.Reset()
190 | m.requestDuration.Reset()
191 |
192 | for _, node := range stat.Nodes() {
193 | m.overallNodeErrors.WithLabelValues(node.Address()).Set(float64(node.OverallErrors()))
194 | m.overallNodeRequests.WithLabelValues(node.Address()).Set(float64(node.Requests()))
195 |
196 | m.currentErrors.WithLabelValues(node.Address()).Set(float64(node.CurrentErrors()))
197 | m.updateRequestsDuration(node)
198 | }
199 |
200 | m.overallErrors.Set(float64(stat.OverallErrors()))
201 | }
202 |
203 | func (m *poolMetricsCollector) updateRequestsDuration(node pool.NodeStatistic) {
204 | m.requestDuration.WithLabelValues(node.Address(), methodGetBalance).Set(float64(node.AverageGetBalance().Milliseconds()))
205 | m.requestDuration.WithLabelValues(node.Address(), methodPutContainer).Set(float64(node.AveragePutContainer().Milliseconds()))
206 | m.requestDuration.WithLabelValues(node.Address(), methodGetContainer).Set(float64(node.AverageGetContainer().Milliseconds()))
207 | m.requestDuration.WithLabelValues(node.Address(), methodListContainer).Set(float64(node.AverageListContainer().Milliseconds()))
208 | m.requestDuration.WithLabelValues(node.Address(), methodDeleteContainer).Set(float64(node.AverageDeleteContainer().Milliseconds()))
209 | m.requestDuration.WithLabelValues(node.Address(), methodGetContainerEacl).Set(float64(node.AverageGetContainerEACL().Milliseconds()))
210 | m.requestDuration.WithLabelValues(node.Address(), methodSetContainerEacl).Set(float64(node.AverageSetContainerEACL().Milliseconds()))
211 | m.requestDuration.WithLabelValues(node.Address(), methodEndpointInfo).Set(float64(node.AverageEndpointInfo().Milliseconds()))
212 | m.requestDuration.WithLabelValues(node.Address(), methodNetworkInfo).Set(float64(node.AverageNetworkInfo().Milliseconds()))
213 | m.requestDuration.WithLabelValues(node.Address(), methodPutObject).Set(float64(node.AveragePutObject().Milliseconds()))
214 | m.requestDuration.WithLabelValues(node.Address(), methodDeleteObject).Set(float64(node.AverageDeleteObject().Milliseconds()))
215 | m.requestDuration.WithLabelValues(node.Address(), methodGetObject).Set(float64(node.AverageGetObject().Milliseconds()))
216 | m.requestDuration.WithLabelValues(node.Address(), methodHeadObject).Set(float64(node.AverageHeadObject().Milliseconds()))
217 | m.requestDuration.WithLabelValues(node.Address(), methodRangeObject).Set(float64(node.AverageRangeObject().Milliseconds()))
218 | m.requestDuration.WithLabelValues(node.Address(), methodCreateSession).Set(float64(node.AverageCreateSession().Milliseconds()))
219 | }
220 |
221 | // NewPrometheusService creates a new service for gathering prometheus metrics.
222 | func NewPrometheusService(log *zap.Logger, cfg Config) *Service {
223 | if log == nil {
224 | return nil
225 | }
226 |
227 | return &Service{
228 | Server: &http.Server{
229 | Addr: cfg.Address,
230 | Handler: promhttp.Handler(),
231 | },
232 | enabled: cfg.Enabled,
233 | serviceType: "Prometheus",
234 | log: log.With(zap.String("service", "Prometheus")),
235 | }
236 | }
237 |
--------------------------------------------------------------------------------
/metrics/pprof.go:
--------------------------------------------------------------------------------
1 | package metrics
2 |
3 | import (
4 | "net/http"
5 | "net/http/pprof"
6 |
7 | "go.uber.org/zap"
8 | )
9 |
10 | // NewPprofService creates a new service for gathering pprof metrics.
11 | func NewPprofService(l *zap.Logger, cfg Config) *Service {
12 | handler := http.NewServeMux()
13 | handler.HandleFunc("/debug/pprof/", pprof.Index)
14 | handler.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
15 | handler.HandleFunc("/debug/pprof/profile", pprof.Profile)
16 | handler.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
17 | handler.HandleFunc("/debug/pprof/trace", pprof.Trace)
18 |
19 | // Manually add support for paths linked to by index page at /debug/pprof/
20 | for _, item := range []string{"allocs", "block", "heap", "goroutine", "mutex", "threadcreate"} {
21 | handler.Handle("/debug/pprof/"+item, pprof.Handler(item))
22 | }
23 |
24 | return &Service{
25 | Server: &http.Server{
26 | Addr: cfg.Address,
27 | Handler: handler,
28 | },
29 | enabled: cfg.Enabled,
30 | serviceType: "Pprof",
31 | log: l.With(zap.String("service", "Pprof")),
32 | }
33 | }
34 |
--------------------------------------------------------------------------------
/metrics/service.go:
--------------------------------------------------------------------------------
1 | package metrics
2 |
3 | import (
4 | "context"
5 | "net/http"
6 |
7 | "go.uber.org/zap"
8 | )
9 |
10 | // Service serves metrics.
11 | type Service struct {
12 | *http.Server
13 | enabled bool
14 | log *zap.Logger
15 | serviceType string
16 | }
17 |
18 | // Config is a params to configure service.
19 | type Config struct {
20 | Address string
21 | Enabled bool
22 | }
23 |
24 | // Start runs http service with the exposed endpoint on the configured port.
25 | func (ms *Service) Start() {
26 | if ms.enabled {
27 | ms.log.Info("service is running", zap.String("endpoint", ms.Addr))
28 | err := ms.ListenAndServe()
29 | if err != nil && err != http.ErrServerClosed {
30 | ms.log.Warn("service couldn't start on configured port")
31 | }
32 | } else {
33 | ms.log.Info("service hasn't started since it's disabled")
34 | }
35 | }
36 |
37 | // ShutDown stops the service.
38 | func (ms *Service) ShutDown(ctx context.Context) {
39 | ms.log.Info("shutting down service", zap.String("endpoint", ms.Addr))
40 | err := ms.Shutdown(ctx)
41 | if err != nil {
42 | ms.log.Panic("can't shut down service")
43 | }
44 | }
45 |
--------------------------------------------------------------------------------
/misc.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | // Prefix is a prefix used for environment variables containing gateway
4 | // configuration.
5 | const Prefix = "HTTP_GW"
6 |
7 | var (
8 | // Version is the gateway version.
9 | Version = "dev"
10 | )
11 |
--------------------------------------------------------------------------------
/resolver/frostfs.go:
--------------------------------------------------------------------------------
1 | package resolver
2 |
3 | import (
4 | "context"
5 | "errors"
6 | "fmt"
7 |
8 | "github.com/TrueCloudLab/frostfs-sdk-go/pool"
9 | )
10 |
11 | // FrostFSResolver represents virtual connection to the FrostFS network.
12 | // It implements resolver.FrostFS.
13 | type FrostFSResolver struct {
14 | pool *pool.Pool
15 | }
16 |
17 | // NewFrostFSResolver creates new FrostFSResolver using provided pool.Pool.
18 | func NewFrostFSResolver(p *pool.Pool) *FrostFSResolver {
19 | return &FrostFSResolver{pool: p}
20 | }
21 |
22 | // SystemDNS implements resolver.FrostFS interface method.
23 | func (x *FrostFSResolver) SystemDNS(ctx context.Context) (string, error) {
24 | networkInfo, err := x.pool.NetworkInfo(ctx)
25 | if err != nil {
26 | return "", fmt.Errorf("read network info via client: %w", err)
27 | }
28 |
29 | domain := networkInfo.RawNetworkParameter("SystemDNS")
30 | if domain == nil {
31 | return "", errors.New("system DNS parameter not found or empty")
32 | }
33 |
34 | return string(domain), nil
35 | }
36 |
--------------------------------------------------------------------------------
/resolver/resolver.go:
--------------------------------------------------------------------------------
1 | package resolver
2 |
3 | import (
4 | "context"
5 | "errors"
6 | "fmt"
7 | "sync"
8 |
9 | "github.com/TrueCloudLab/frostfs-sdk-go/container"
10 | cid "github.com/TrueCloudLab/frostfs-sdk-go/container/id"
11 | "github.com/TrueCloudLab/frostfs-sdk-go/ns"
12 | )
13 |
14 | const (
15 | NNSResolver = "nns"
16 | DNSResolver = "dns"
17 | )
18 |
19 | // ErrNoResolvers returns when trying to resolve container without any resolver.
20 | var ErrNoResolvers = errors.New("no resolvers")
21 |
22 | // FrostFS represents virtual connection to the FrostFS network.
23 | type FrostFS interface {
24 | // SystemDNS reads system DNS network parameters of the FrostFS.
25 | //
26 | // Returns exactly on non-zero value. Returns any error encountered
27 | // which prevented the parameter to be read.
28 | SystemDNS(context.Context) (string, error)
29 | }
30 |
31 | type Config struct {
32 | FrostFS FrostFS
33 | RPCAddress string
34 | }
35 |
36 | type ContainerResolver struct {
37 | mu sync.RWMutex
38 | resolvers []*Resolver
39 | }
40 |
41 | type Resolver struct {
42 | Name string
43 | resolve func(context.Context, string) (*cid.ID, error)
44 | }
45 |
46 | func (r *Resolver) SetResolveFunc(fn func(context.Context, string) (*cid.ID, error)) {
47 | r.resolve = fn
48 | }
49 |
50 | func (r *Resolver) Resolve(ctx context.Context, name string) (*cid.ID, error) {
51 | return r.resolve(ctx, name)
52 | }
53 |
54 | func NewContainerResolver(resolverNames []string, cfg *Config) (*ContainerResolver, error) {
55 | resolvers, err := createResolvers(resolverNames, cfg)
56 | if err != nil {
57 | return nil, err
58 | }
59 |
60 | return &ContainerResolver{
61 | resolvers: resolvers,
62 | }, nil
63 | }
64 |
65 | func createResolvers(resolverNames []string, cfg *Config) ([]*Resolver, error) {
66 | resolvers := make([]*Resolver, len(resolverNames))
67 | for i, name := range resolverNames {
68 | cnrResolver, err := newResolver(name, cfg)
69 | if err != nil {
70 | return nil, err
71 | }
72 | resolvers[i] = cnrResolver
73 | }
74 |
75 | return resolvers, nil
76 | }
77 |
78 | func (r *ContainerResolver) Resolve(ctx context.Context, cnrName string) (*cid.ID, error) {
79 | r.mu.RLock()
80 | defer r.mu.RUnlock()
81 |
82 | var err error
83 | for _, resolver := range r.resolvers {
84 | cnrID, resolverErr := resolver.Resolve(ctx, cnrName)
85 | if resolverErr != nil {
86 | resolverErr = fmt.Errorf("%s: %w", resolver.Name, resolverErr)
87 | if err == nil {
88 | err = resolverErr
89 | } else {
90 | err = fmt.Errorf("%s: %w", err.Error(), resolverErr)
91 | }
92 | continue
93 | }
94 | return cnrID, nil
95 | }
96 |
97 | if err != nil {
98 | return nil, err
99 | }
100 |
101 | return nil, ErrNoResolvers
102 | }
103 |
104 | func (r *ContainerResolver) UpdateResolvers(resolverNames []string, cfg *Config) error {
105 | r.mu.Lock()
106 | defer r.mu.Unlock()
107 |
108 | if r.equals(resolverNames) {
109 | return nil
110 | }
111 |
112 | resolvers, err := createResolvers(resolverNames, cfg)
113 | if err != nil {
114 | return err
115 | }
116 |
117 | r.resolvers = resolvers
118 |
119 | return nil
120 | }
121 |
122 | func (r *ContainerResolver) equals(resolverNames []string) bool {
123 | if len(r.resolvers) != len(resolverNames) {
124 | return false
125 | }
126 |
127 | for i := 0; i < len(resolverNames); i++ {
128 | if r.resolvers[i].Name != resolverNames[i] {
129 | return false
130 | }
131 | }
132 | return true
133 | }
134 |
135 | func newResolver(name string, cfg *Config) (*Resolver, error) {
136 | switch name {
137 | case DNSResolver:
138 | return NewDNSResolver(cfg.FrostFS)
139 | case NNSResolver:
140 | return NewNNSResolver(cfg.RPCAddress)
141 | default:
142 | return nil, fmt.Errorf("unknown resolver: %s", name)
143 | }
144 | }
145 |
146 | func NewDNSResolver(frostFS FrostFS) (*Resolver, error) {
147 | if frostFS == nil {
148 | return nil, fmt.Errorf("pool must not be nil for DNS resolver")
149 | }
150 |
151 | var dns ns.DNS
152 |
153 | resolveFunc := func(ctx context.Context, name string) (*cid.ID, error) {
154 | domain, err := frostFS.SystemDNS(ctx)
155 | if err != nil {
156 | return nil, fmt.Errorf("read system DNS parameter of the FrostFS: %w", err)
157 | }
158 |
159 | domain = name + "." + domain
160 | cnrID, err := dns.ResolveContainerName(domain)
161 | if err != nil {
162 | return nil, fmt.Errorf("couldn't resolve container '%s' as '%s': %w", name, domain, err)
163 | }
164 | return &cnrID, nil
165 | }
166 |
167 | return &Resolver{
168 | Name: DNSResolver,
169 | resolve: resolveFunc,
170 | }, nil
171 | }
172 |
173 | func NewNNSResolver(rpcAddress string) (*Resolver, error) {
174 | var nns ns.NNS
175 |
176 | if err := nns.Dial(rpcAddress); err != nil {
177 | return nil, fmt.Errorf("could not dial nns: %w", err)
178 | }
179 |
180 | resolveFunc := func(_ context.Context, name string) (*cid.ID, error) {
181 | var d container.Domain
182 | d.SetName(name)
183 |
184 | cnrID, err := nns.ResolveContainerDomain(d)
185 | if err != nil {
186 | return nil, fmt.Errorf("couldn't resolve container '%s': %w", name, err)
187 | }
188 | return &cnrID, nil
189 | }
190 |
191 | return &Resolver{
192 | Name: NNSResolver,
193 | resolve: resolveFunc,
194 | }, nil
195 | }
196 |
--------------------------------------------------------------------------------
/response/utils.go:
--------------------------------------------------------------------------------
1 | package response
2 |
3 | import "github.com/valyala/fasthttp"
4 |
5 | func Error(r *fasthttp.RequestCtx, msg string, code int) {
6 | r.Error(msg+"\n", code)
7 | }
8 |
--------------------------------------------------------------------------------
/server.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "crypto/tls"
6 | "errors"
7 | "fmt"
8 | "net"
9 | "sync"
10 | )
11 |
12 | type (
13 | ServerInfo struct {
14 | Address string
15 | TLS ServerTLSInfo
16 | }
17 |
18 | ServerTLSInfo struct {
19 | Enabled bool
20 | CertFile string
21 | KeyFile string
22 | }
23 |
24 | Server interface {
25 | Address() string
26 | Listener() net.Listener
27 | UpdateCert(certFile, keyFile string) error
28 | }
29 |
30 | server struct {
31 | address string
32 | listener net.Listener
33 | tlsProvider *certProvider
34 | }
35 |
36 | certProvider struct {
37 | Enabled bool
38 |
39 | mu sync.RWMutex
40 | certPath string
41 | keyPath string
42 | cert *tls.Certificate
43 | }
44 | )
45 |
46 | func (s *server) Address() string {
47 | return s.address
48 | }
49 |
50 | func (s *server) Listener() net.Listener {
51 | return s.listener
52 | }
53 |
54 | func (s *server) UpdateCert(certFile, keyFile string) error {
55 | return s.tlsProvider.UpdateCert(certFile, keyFile)
56 | }
57 |
58 | func newServer(ctx context.Context, serverInfo ServerInfo) (*server, error) {
59 | var lic net.ListenConfig
60 | ln, err := lic.Listen(ctx, "tcp", serverInfo.Address)
61 | if err != nil {
62 | return nil, fmt.Errorf("could not prepare listener: %w", err)
63 | }
64 |
65 | tlsProvider := &certProvider{
66 | Enabled: serverInfo.TLS.Enabled,
67 | }
68 |
69 | if serverInfo.TLS.Enabled {
70 | if err = tlsProvider.UpdateCert(serverInfo.TLS.CertFile, serverInfo.TLS.KeyFile); err != nil {
71 | return nil, fmt.Errorf("failed to update cert: %w", err)
72 | }
73 |
74 | ln = tls.NewListener(ln, &tls.Config{
75 | GetCertificate: tlsProvider.GetCertificate,
76 | })
77 | }
78 |
79 | return &server{
80 | address: serverInfo.Address,
81 | listener: ln,
82 | tlsProvider: tlsProvider,
83 | }, nil
84 | }
85 |
86 | func (p *certProvider) GetCertificate(*tls.ClientHelloInfo) (*tls.Certificate, error) {
87 | if !p.Enabled {
88 | return nil, errors.New("cert provider: disabled")
89 | }
90 |
91 | p.mu.RLock()
92 | defer p.mu.RUnlock()
93 | return p.cert, nil
94 | }
95 |
96 | func (p *certProvider) UpdateCert(certPath, keyPath string) error {
97 | if !p.Enabled {
98 | return fmt.Errorf("tls disabled")
99 | }
100 |
101 | cert, err := tls.LoadX509KeyPair(certPath, keyPath)
102 | if err != nil {
103 | return fmt.Errorf("cannot load TLS key pair from certFile '%s' and keyFile '%s': %w", certPath, keyPath, err)
104 | }
105 |
106 | p.mu.Lock()
107 | p.certPath = certPath
108 | p.keyPath = keyPath
109 | p.cert = &cert
110 | p.mu.Unlock()
111 | return nil
112 | }
113 |
114 | func (p *certProvider) FilePaths() (string, string) {
115 | if !p.Enabled {
116 | return "", ""
117 | }
118 |
119 | p.mu.RLock()
120 | defer p.mu.RUnlock()
121 | return p.certPath, p.keyPath
122 | }
123 |
--------------------------------------------------------------------------------
/settings.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "os"
6 | "path"
7 | "runtime"
8 | "sort"
9 | "strconv"
10 | "strings"
11 | "time"
12 |
13 | "github.com/TrueCloudLab/frostfs-http-gw/resolver"
14 | "github.com/spf13/pflag"
15 | "github.com/spf13/viper"
16 | "github.com/valyala/fasthttp"
17 | "go.uber.org/zap"
18 | "go.uber.org/zap/zapcore"
19 | )
20 |
21 | const (
22 | defaultRebalanceTimer = 60 * time.Second
23 | defaultRequestTimeout = 15 * time.Second
24 | defaultConnectTimeout = 10 * time.Second
25 | defaultStreamTimeout = 10 * time.Second
26 |
27 | defaultShutdownTimeout = 15 * time.Second
28 |
29 | defaultPoolErrorThreshold uint32 = 100
30 |
31 | cfgServer = "server"
32 | cfgTLSEnabled = "tls.enabled"
33 | cfgTLSCertFile = "tls.cert_file"
34 | cfgTLSKeyFile = "tls.key_file"
35 |
36 | // Web.
37 | cfgWebReadBufferSize = "web.read_buffer_size"
38 | cfgWebWriteBufferSize = "web.write_buffer_size"
39 | cfgWebReadTimeout = "web.read_timeout"
40 | cfgWebWriteTimeout = "web.write_timeout"
41 | cfgWebStreamRequestBody = "web.stream_request_body"
42 | cfgWebMaxRequestBodySize = "web.max_request_body_size"
43 |
44 | // Metrics / Profiler.
45 | cfgPrometheusEnabled = "prometheus.enabled"
46 | cfgPrometheusAddress = "prometheus.address"
47 | cfgPprofEnabled = "pprof.enabled"
48 | cfgPprofAddress = "pprof.address"
49 |
50 | // Pool config.
51 | cfgConTimeout = "connect_timeout"
52 | cfgStreamTimeout = "stream_timeout"
53 | cfgReqTimeout = "request_timeout"
54 | cfgRebalance = "rebalance_timer"
55 | cfgPoolErrorThreshold = "pool_error_threshold"
56 |
57 | // Logger.
58 | cfgLoggerLevel = "logger.level"
59 |
60 | // Wallet.
61 | cfgWalletPassphrase = "wallet.passphrase"
62 | cfgWalletPath = "wallet.path"
63 | cfgWalletAddress = "wallet.address"
64 |
65 | // Uploader Header.
66 | cfgUploaderHeaderEnableDefaultTimestamp = "upload_header.use_default_timestamp"
67 |
68 | // Peers.
69 | cfgPeers = "peers"
70 |
71 | // NeoGo.
72 | cfgRPCEndpoint = "rpc_endpoint"
73 |
74 | // Resolving.
75 | cfgResolveOrder = "resolve_order"
76 |
77 | // Zip compression.
78 | cfgZipCompression = "zip.compression"
79 |
80 | // Command line args.
81 | cmdHelp = "help"
82 | cmdVersion = "version"
83 | cmdPprof = "pprof"
84 | cmdMetrics = "metrics"
85 | cmdWallet = "wallet"
86 | cmdAddress = "address"
87 | cmdConfig = "config"
88 | cmdConfigDir = "config-dir"
89 | cmdListenAddress = "listen_address"
90 | )
91 |
92 | var ignore = map[string]struct{}{
93 | cfgPeers: {},
94 | cmdHelp: {},
95 | cmdVersion: {},
96 | }
97 |
98 | func settings() *viper.Viper {
99 | v := viper.New()
100 | v.AutomaticEnv()
101 | v.SetEnvPrefix(Prefix)
102 | v.AllowEmptyEnv(true)
103 | v.SetConfigType("yaml")
104 | v.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
105 |
106 | // flags setup:
107 | flags := pflag.NewFlagSet("commandline", pflag.ExitOnError)
108 | flags.SetOutput(os.Stdout)
109 | flags.SortFlags = false
110 |
111 | flags.Bool(cmdPprof, false, "enable pprof")
112 | flags.Bool(cmdMetrics, false, "enable prometheus")
113 |
114 | help := flags.BoolP(cmdHelp, "h", false, "show help")
115 | version := flags.BoolP(cmdVersion, "v", false, "show version")
116 |
117 | flags.StringP(cmdWallet, "w", "", `path to the wallet`)
118 | flags.String(cmdAddress, "", `address of wallet account`)
119 | flags.StringArray(cmdConfig, nil, "config paths")
120 | flags.String(cmdConfigDir, "", "config dir path")
121 | flags.Duration(cfgConTimeout, defaultConnectTimeout, "gRPC connect timeout")
122 | flags.Duration(cfgStreamTimeout, defaultStreamTimeout, "gRPC individual message timeout")
123 | flags.Duration(cfgReqTimeout, defaultRequestTimeout, "gRPC request timeout")
124 | flags.Duration(cfgRebalance, defaultRebalanceTimer, "gRPC connection rebalance timer")
125 |
126 | flags.String(cmdListenAddress, "0.0.0.0:8080", "addresses to listen")
127 | flags.String(cfgTLSCertFile, "", "TLS certificate path")
128 | flags.String(cfgTLSKeyFile, "", "TLS key path")
129 | peers := flags.StringArrayP(cfgPeers, "p", nil, "FrostFS nodes")
130 |
131 | resolveMethods := flags.StringSlice(cfgResolveOrder, []string{resolver.NNSResolver, resolver.DNSResolver}, "set container name resolve order")
132 |
133 | // set defaults:
134 |
135 | // logger:
136 | v.SetDefault(cfgLoggerLevel, "debug")
137 |
138 | // pool:
139 | v.SetDefault(cfgPoolErrorThreshold, defaultPoolErrorThreshold)
140 |
141 | // web-server:
142 | v.SetDefault(cfgWebReadBufferSize, 4096)
143 | v.SetDefault(cfgWebWriteBufferSize, 4096)
144 | v.SetDefault(cfgWebReadTimeout, time.Minute*10)
145 | v.SetDefault(cfgWebWriteTimeout, time.Minute*5)
146 | v.SetDefault(cfgWebStreamRequestBody, true)
147 | v.SetDefault(cfgWebMaxRequestBodySize, fasthttp.DefaultMaxRequestBodySize)
148 |
149 | // upload header
150 | v.SetDefault(cfgUploaderHeaderEnableDefaultTimestamp, false)
151 |
152 | // zip:
153 | v.SetDefault(cfgZipCompression, false)
154 |
155 | // metrics
156 | v.SetDefault(cfgPprofAddress, "localhost:8083")
157 | v.SetDefault(cfgPrometheusAddress, "localhost:8084")
158 |
159 | // Binding flags
160 | if err := v.BindPFlag(cfgPprofEnabled, flags.Lookup(cmdPprof)); err != nil {
161 | panic(err)
162 | }
163 | if err := v.BindPFlag(cfgPrometheusEnabled, flags.Lookup(cmdMetrics)); err != nil {
164 | panic(err)
165 | }
166 |
167 | if err := v.BindPFlag(cfgWalletPath, flags.Lookup(cmdWallet)); err != nil {
168 | panic(err)
169 | }
170 |
171 | if err := v.BindPFlag(cfgWalletAddress, flags.Lookup(cmdAddress)); err != nil {
172 | panic(err)
173 | }
174 |
175 | if err := v.BindPFlags(flags); err != nil {
176 | panic(err)
177 | }
178 |
179 | if err := v.BindPFlag(cfgServer+".0.address", flags.Lookup(cmdListenAddress)); err != nil {
180 | panic(err)
181 | }
182 | if err := v.BindPFlag(cfgServer+".0."+cfgTLSKeyFile, flags.Lookup(cfgTLSKeyFile)); err != nil {
183 | panic(err)
184 | }
185 | if err := v.BindPFlag(cfgServer+".0."+cfgTLSCertFile, flags.Lookup(cfgTLSCertFile)); err != nil {
186 | panic(err)
187 | }
188 |
189 | if err := flags.Parse(os.Args); err != nil {
190 | panic(err)
191 | }
192 |
193 | if v.IsSet(cfgServer+".0."+cfgTLSKeyFile) && v.IsSet(cfgServer+".0."+cfgTLSCertFile) {
194 | v.Set(cfgServer+".0."+cfgTLSEnabled, true)
195 | }
196 |
197 | if resolveMethods != nil {
198 | v.SetDefault(cfgResolveOrder, *resolveMethods)
199 | }
200 |
201 | switch {
202 | case help != nil && *help:
203 | fmt.Printf("FrostFS HTTP Gateway %s\n", Version)
204 | flags.PrintDefaults()
205 |
206 | fmt.Println()
207 | fmt.Println("Default environments:")
208 | fmt.Println()
209 | keys := v.AllKeys()
210 | sort.Strings(keys)
211 |
212 | for i := range keys {
213 | if _, ok := ignore[keys[i]]; ok {
214 | continue
215 | }
216 |
217 | defaultValue := v.GetString(keys[i])
218 | if len(defaultValue) == 0 {
219 | continue
220 | }
221 |
222 | k := strings.Replace(keys[i], ".", "_", -1)
223 | fmt.Printf("%s_%s = %s\n", Prefix, strings.ToUpper(k), defaultValue)
224 | }
225 |
226 | fmt.Println()
227 | fmt.Println("Peers preset:")
228 | fmt.Println()
229 |
230 | fmt.Printf("%s_%s_[N]_ADDRESS = string\n", Prefix, strings.ToUpper(cfgPeers))
231 | fmt.Printf("%s_%s_[N]_WEIGHT = float\n", Prefix, strings.ToUpper(cfgPeers))
232 |
233 | os.Exit(0)
234 | case version != nil && *version:
235 | fmt.Printf("FrostFS HTTP Gateway\nVersion: %s\nGoVersion: %s\n", Version, runtime.Version())
236 | os.Exit(0)
237 | }
238 |
239 | if err := readInConfig(v); err != nil {
240 | panic(err)
241 | }
242 |
243 | if peers != nil && len(*peers) > 0 {
244 | for i := range *peers {
245 | v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".address", (*peers)[i])
246 | v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".weight", 1)
247 | v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".priority", 1)
248 | }
249 | }
250 |
251 | return v
252 | }
253 |
254 | func readInConfig(v *viper.Viper) error {
255 | if v.IsSet(cmdConfig) {
256 | if err := readConfig(v); err != nil {
257 | return err
258 | }
259 | }
260 |
261 | if v.IsSet(cmdConfigDir) {
262 | if err := readConfigDir(v); err != nil {
263 | return err
264 | }
265 | }
266 |
267 | return nil
268 | }
269 |
270 | func readConfigDir(v *viper.Viper) error {
271 | cfgSubConfigDir := v.GetString(cmdConfigDir)
272 | entries, err := os.ReadDir(cfgSubConfigDir)
273 | if err != nil {
274 | return err
275 | }
276 |
277 | for _, entry := range entries {
278 | if entry.IsDir() {
279 | continue
280 | }
281 | ext := path.Ext(entry.Name())
282 | if ext != ".yaml" && ext != ".yml" {
283 | continue
284 | }
285 |
286 | if err = mergeConfig(v, path.Join(cfgSubConfigDir, entry.Name())); err != nil {
287 | return err
288 | }
289 | }
290 |
291 | return nil
292 | }
293 |
294 | func readConfig(v *viper.Viper) error {
295 | for _, fileName := range v.GetStringSlice(cmdConfig) {
296 | if err := mergeConfig(v, fileName); err != nil {
297 | return err
298 | }
299 | }
300 | return nil
301 | }
302 |
303 | func mergeConfig(v *viper.Viper, fileName string) error {
304 | cfgFile, err := os.Open(fileName)
305 | if err != nil {
306 | return err
307 | }
308 |
309 | defer func() {
310 | if errClose := cfgFile.Close(); errClose != nil {
311 | panic(errClose)
312 | }
313 | }()
314 |
315 | if err = v.MergeConfig(cfgFile); err != nil {
316 | return err
317 | }
318 |
319 | return nil
320 | }
321 |
322 | // newLogger constructs a zap.Logger instance for current application.
323 | // Panics on failure.
324 | //
325 | // Logger is built from zap's production logging configuration with:
326 | // - parameterized level (debug by default)
327 | // - console encoding
328 | // - ISO8601 time encoding
329 | //
330 | // Logger records a stack trace for all messages at or above fatal level.
331 | //
332 | // See also zapcore.Level, zap.NewProductionConfig, zap.AddStacktrace.
333 | func newLogger(v *viper.Viper) (*zap.Logger, zap.AtomicLevel) {
334 | lvl, err := getLogLevel(v)
335 | if err != nil {
336 | panic(err)
337 | }
338 |
339 | c := zap.NewProductionConfig()
340 | c.Level = zap.NewAtomicLevelAt(lvl)
341 | c.Encoding = "console"
342 | c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
343 |
344 | l, err := c.Build(
345 | zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)),
346 | )
347 | if err != nil {
348 | panic(fmt.Sprintf("build zap logger instance: %v", err))
349 | }
350 |
351 | return l, c.Level
352 | }
353 |
354 | func getLogLevel(v *viper.Viper) (zapcore.Level, error) {
355 | var lvl zapcore.Level
356 | lvlStr := v.GetString(cfgLoggerLevel)
357 | err := lvl.UnmarshalText([]byte(lvlStr))
358 | if err != nil {
359 | return lvl, fmt.Errorf("incorrect logger level configuration %s (%v), "+
360 | "value should be one of %v", lvlStr, err, [...]zapcore.Level{
361 | zapcore.DebugLevel,
362 | zapcore.InfoLevel,
363 | zapcore.WarnLevel,
364 | zapcore.ErrorLevel,
365 | zapcore.DPanicLevel,
366 | zapcore.PanicLevel,
367 | zapcore.FatalLevel,
368 | })
369 | }
370 | return lvl, nil
371 | }
372 |
373 | func fetchServers(v *viper.Viper) []ServerInfo {
374 | var servers []ServerInfo
375 |
376 | for i := 0; ; i++ {
377 | key := cfgServer + "." + strconv.Itoa(i) + "."
378 |
379 | var serverInfo ServerInfo
380 | serverInfo.Address = v.GetString(key + "address")
381 | serverInfo.TLS.Enabled = v.GetBool(key + cfgTLSEnabled)
382 | serverInfo.TLS.KeyFile = v.GetString(key + cfgTLSKeyFile)
383 | serverInfo.TLS.CertFile = v.GetString(key + cfgTLSCertFile)
384 |
385 | if serverInfo.Address == "" {
386 | break
387 | }
388 |
389 | servers = append(servers, serverInfo)
390 | }
391 |
392 | return servers
393 | }
394 |
--------------------------------------------------------------------------------
/tokens/bearer-token.go:
--------------------------------------------------------------------------------
1 | package tokens
2 |
3 | import (
4 | "bytes"
5 | "context"
6 | "encoding/base64"
7 | "errors"
8 | "fmt"
9 |
10 | "github.com/TrueCloudLab/frostfs-sdk-go/bearer"
11 | "github.com/valyala/fasthttp"
12 | )
13 |
14 | type fromHandler = func(h *fasthttp.RequestHeader) []byte
15 |
16 | const (
17 | bearerTokenHdr = "Bearer"
18 | bearerTokenKey = "__context_bearer_token_key"
19 | )
20 |
21 | // BearerToken usage:
22 | //
23 | // if err = storeBearerToken(ctx); err != nil {
24 | // log.Error("could not fetch bearer token", zap.Error(err))
25 | // c.Error("could not fetch bearer token", fasthttp.StatusBadRequest)
26 | // return
27 | // }
28 |
29 | // BearerTokenFromHeader extracts a bearer token from Authorization request header.
30 | func BearerTokenFromHeader(h *fasthttp.RequestHeader) []byte {
31 | auth := h.Peek(fasthttp.HeaderAuthorization)
32 | if auth == nil || !bytes.HasPrefix(auth, []byte(bearerTokenHdr)) {
33 | return nil
34 | }
35 | if auth = bytes.TrimPrefix(auth, []byte(bearerTokenHdr+" ")); len(auth) == 0 {
36 | return nil
37 | }
38 | return auth
39 | }
40 |
41 | // BearerTokenFromCookie extracts a bearer token from cookies.
42 | func BearerTokenFromCookie(h *fasthttp.RequestHeader) []byte {
43 | auth := h.Cookie(bearerTokenHdr)
44 | if len(auth) == 0 {
45 | return nil
46 | }
47 |
48 | return auth
49 | }
50 |
51 | // StoreBearerToken extracts a bearer token from the header or cookie and stores
52 | // it in the request context.
53 | func StoreBearerToken(ctx *fasthttp.RequestCtx) error {
54 | tkn, err := fetchBearerToken(ctx)
55 | if err != nil {
56 | return err
57 | }
58 | // This is an analog of context.WithValue.
59 | ctx.SetUserValue(bearerTokenKey, tkn)
60 | return nil
61 | }
62 |
63 | // LoadBearerToken returns a bearer token stored in the context given (if it's
64 | // present there).
65 | func LoadBearerToken(ctx context.Context) (*bearer.Token, error) {
66 | if tkn, ok := ctx.Value(bearerTokenKey).(*bearer.Token); ok && tkn != nil {
67 | return tkn, nil
68 | }
69 | return nil, errors.New("found empty bearer token")
70 | }
71 |
72 | func fetchBearerToken(ctx *fasthttp.RequestCtx) (*bearer.Token, error) {
73 | // ignore empty value
74 | if ctx == nil {
75 | return nil, nil
76 | }
77 | var (
78 | lastErr error
79 |
80 | buf []byte
81 | tkn = new(bearer.Token)
82 | )
83 | for _, parse := range []fromHandler{BearerTokenFromHeader, BearerTokenFromCookie} {
84 | if buf = parse(&ctx.Request.Header); buf == nil {
85 | continue
86 | } else if data, err := base64.StdEncoding.DecodeString(string(buf)); err != nil {
87 | lastErr = fmt.Errorf("can't base64-decode bearer token: %w", err)
88 | continue
89 | } else if err = tkn.Unmarshal(data); err != nil {
90 | lastErr = fmt.Errorf("can't unmarshal bearer token: %w", err)
91 | continue
92 | }
93 |
94 | return tkn, nil
95 | }
96 |
97 | return nil, lastErr
98 | }
99 |
--------------------------------------------------------------------------------
/tokens/bearer-token_test.go:
--------------------------------------------------------------------------------
1 | package tokens
2 |
3 | import (
4 | "encoding/base64"
5 | "testing"
6 |
7 | "github.com/TrueCloudLab/frostfs-sdk-go/bearer"
8 | "github.com/TrueCloudLab/frostfs-sdk-go/user"
9 | "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
10 | "github.com/stretchr/testify/require"
11 | "github.com/valyala/fasthttp"
12 | )
13 |
14 | func makeTestCookie(value []byte) *fasthttp.RequestHeader {
15 | header := new(fasthttp.RequestHeader)
16 | header.SetCookie(bearerTokenHdr, string(value))
17 | return header
18 | }
19 |
20 | func makeTestHeader(value []byte) *fasthttp.RequestHeader {
21 | header := new(fasthttp.RequestHeader)
22 | if value != nil {
23 | header.Set(fasthttp.HeaderAuthorization, bearerTokenHdr+" "+string(value))
24 | }
25 | return header
26 | }
27 |
28 | func Test_fromCookie(t *testing.T) {
29 | cases := []struct {
30 | name string
31 | actual []byte
32 | expect []byte
33 | }{
34 | {name: "empty"},
35 | {name: "normal", actual: []byte("TOKEN"), expect: []byte("TOKEN")},
36 | }
37 |
38 | for _, tt := range cases {
39 | t.Run(tt.name, func(t *testing.T) {
40 | require.Equal(t, tt.expect, BearerTokenFromCookie(makeTestCookie(tt.actual)))
41 | })
42 | }
43 | }
44 |
45 | func Test_fromHeader(t *testing.T) {
46 | cases := []struct {
47 | name string
48 | actual []byte
49 | expect []byte
50 | }{
51 | {name: "empty"},
52 | {name: "normal", actual: []byte("TOKEN"), expect: []byte("TOKEN")},
53 | }
54 |
55 | for _, tt := range cases {
56 | t.Run(tt.name, func(t *testing.T) {
57 | require.Equal(t, tt.expect, BearerTokenFromHeader(makeTestHeader(tt.actual)))
58 | })
59 | }
60 | }
61 |
62 | func Test_fetchBearerToken(t *testing.T) {
63 | key, err := keys.NewPrivateKey()
64 | require.NoError(t, err)
65 | var uid user.ID
66 | user.IDFromKey(&uid, key.PrivateKey.PublicKey)
67 |
68 | tkn := new(bearer.Token)
69 | tkn.ForUser(uid)
70 |
71 | t64 := base64.StdEncoding.EncodeToString(tkn.Marshal())
72 | require.NotEmpty(t, t64)
73 |
74 | cases := []struct {
75 | name string
76 |
77 | cookie string
78 | header string
79 |
80 | error string
81 | expect *bearer.Token
82 | }{
83 | {name: "empty"},
84 |
85 | {name: "bad base64 header", header: "WRONG BASE64", error: "can't base64-decode bearer token"},
86 | {name: "bad base64 cookie", cookie: "WRONG BASE64", error: "can't base64-decode bearer token"},
87 |
88 | {name: "header token unmarshal error", header: "dGVzdAo=", error: "can't unmarshal bearer token"},
89 | {name: "cookie token unmarshal error", cookie: "dGVzdAo=", error: "can't unmarshal bearer token"},
90 |
91 | {
92 | name: "bad header and cookie",
93 | header: "WRONG BASE64",
94 | cookie: "dGVzdAo=",
95 | error: "can't unmarshal bearer token",
96 | },
97 |
98 | {
99 | name: "bad header, but good cookie",
100 | header: "dGVzdAo=",
101 | cookie: t64,
102 | expect: tkn,
103 | },
104 |
105 | {name: "ok for header", header: t64, expect: tkn},
106 | {name: "ok for cookie", cookie: t64, expect: tkn},
107 | }
108 |
109 | for _, tt := range cases {
110 | t.Run(tt.name, func(t *testing.T) {
111 | ctx := makeTestRequest(tt.cookie, tt.header)
112 | actual, err := fetchBearerToken(ctx)
113 |
114 | if tt.error == "" {
115 | require.NoError(t, err)
116 | require.Equal(t, tt.expect, actual)
117 |
118 | return
119 | }
120 |
121 | require.Contains(t, err.Error(), tt.error)
122 | })
123 | }
124 | }
125 |
126 | func makeTestRequest(cookie, header string) *fasthttp.RequestCtx {
127 | ctx := new(fasthttp.RequestCtx)
128 |
129 | if cookie != "" {
130 | ctx.Request.Header.SetCookie(bearerTokenHdr, cookie)
131 | }
132 |
133 | if header != "" {
134 | ctx.Request.Header.Set(fasthttp.HeaderAuthorization, bearerTokenHdr+" "+header)
135 | }
136 | return ctx
137 | }
138 |
139 | func Test_checkAndPropagateBearerToken(t *testing.T) {
140 | key, err := keys.NewPrivateKey()
141 | require.NoError(t, err)
142 | var uid user.ID
143 | user.IDFromKey(&uid, key.PrivateKey.PublicKey)
144 |
145 | tkn := new(bearer.Token)
146 | tkn.ForUser(uid)
147 |
148 | t64 := base64.StdEncoding.EncodeToString(tkn.Marshal())
149 | require.NotEmpty(t, t64)
150 |
151 | ctx := makeTestRequest(t64, "")
152 |
153 | // Expect to see the token within the context.
154 | require.NoError(t, StoreBearerToken(ctx))
155 |
156 | // Expect to see the same token without errors.
157 | actual, err := LoadBearerToken(ctx)
158 | require.NoError(t, err)
159 | require.Equal(t, tkn, actual)
160 | }
161 |
--------------------------------------------------------------------------------
/uploader/filter.go:
--------------------------------------------------------------------------------
1 | package uploader
2 |
3 | import (
4 | "bytes"
5 | "fmt"
6 | "math"
7 | "strconv"
8 | "time"
9 |
10 | "github.com/TrueCloudLab/frostfs-api-go/v2/object"
11 | "github.com/TrueCloudLab/frostfs-http-gw/utils"
12 | "github.com/valyala/fasthttp"
13 | "go.uber.org/zap"
14 | )
15 |
16 | var frostfsAttributeHeaderPrefixes = [...][]byte{[]byte("Neofs-"), []byte("NEOFS-"), []byte("neofs-")}
17 |
18 | func systemTranslator(key, prefix []byte) []byte {
19 | // replace the specified prefix with `__NEOFS__`
20 | key = bytes.Replace(key, prefix, []byte(utils.SystemAttributePrefix), 1)
21 |
22 | // replace `-` with `_`
23 | key = bytes.ReplaceAll(key, []byte("-"), []byte("_"))
24 |
25 | // replace with uppercase
26 | return bytes.ToUpper(key)
27 | }
28 |
29 | func filterHeaders(l *zap.Logger, header *fasthttp.RequestHeader) (map[string]string, error) {
30 | var err error
31 | result := make(map[string]string)
32 | prefix := []byte(utils.UserAttributeHeaderPrefix)
33 |
34 | header.VisitAll(func(key, val []byte) {
35 | // checks that the key and the val not empty
36 | if len(key) == 0 || len(val) == 0 {
37 | return
38 | }
39 |
40 | // checks that the key has attribute prefix
41 | if !bytes.HasPrefix(key, prefix) {
42 | return
43 | }
44 |
45 | // removing attribute prefix
46 | clearKey := bytes.TrimPrefix(key, prefix)
47 |
48 | // checks that it's a system NeoFS header
49 | for _, system := range frostfsAttributeHeaderPrefixes {
50 | if bytes.HasPrefix(clearKey, system) {
51 | clearKey = systemTranslator(clearKey, system)
52 | break
53 | }
54 | }
55 |
56 | // checks that the attribute key is not empty
57 | if len(clearKey) == 0 {
58 | return
59 | }
60 |
61 | // check if key gets duplicated
62 | // return error containing full key name (with prefix)
63 | if _, ok := result[string(clearKey)]; ok {
64 | err = fmt.Errorf("key duplication error: %s", string(key))
65 | return
66 | }
67 |
68 | // make string representation of key / val
69 | k, v := string(clearKey), string(val)
70 |
71 | result[k] = v
72 |
73 | l.Debug("add attribute to result object",
74 | zap.String("key", k),
75 | zap.String("val", v))
76 | })
77 |
78 | return result, err
79 | }
80 |
81 | func prepareExpirationHeader(headers map[string]string, epochDurations *epochDurations, now time.Time) error {
82 | expirationInEpoch := headers[object.SysAttributeExpEpoch]
83 |
84 | if timeRFC3339, ok := headers[utils.ExpirationRFC3339Attr]; ok {
85 | expTime, err := time.Parse(time.RFC3339, timeRFC3339)
86 | if err != nil {
87 | return fmt.Errorf("couldn't parse value %s of header %s", timeRFC3339, utils.ExpirationRFC3339Attr)
88 | }
89 |
90 | if expTime.Before(now) {
91 | return fmt.Errorf("value %s of header %s must be in the future", timeRFC3339, utils.ExpirationRFC3339Attr)
92 | }
93 | updateExpirationHeader(headers, epochDurations, expTime.Sub(now))
94 | delete(headers, utils.ExpirationRFC3339Attr)
95 | }
96 |
97 | if timestamp, ok := headers[utils.ExpirationTimestampAttr]; ok {
98 | value, err := strconv.ParseInt(timestamp, 10, 64)
99 | if err != nil {
100 | return fmt.Errorf("couldn't parse value %s of header %s", timestamp, utils.ExpirationTimestampAttr)
101 | }
102 | expTime := time.Unix(value, 0)
103 |
104 | if expTime.Before(now) {
105 | return fmt.Errorf("value %s of header %s must be in the future", timestamp, utils.ExpirationTimestampAttr)
106 | }
107 | updateExpirationHeader(headers, epochDurations, expTime.Sub(now))
108 | delete(headers, utils.ExpirationTimestampAttr)
109 | }
110 |
111 | if duration, ok := headers[utils.ExpirationDurationAttr]; ok {
112 | expDuration, err := time.ParseDuration(duration)
113 | if err != nil {
114 | return fmt.Errorf("couldn't parse value %s of header %s", duration, utils.ExpirationDurationAttr)
115 | }
116 | if expDuration <= 0 {
117 | return fmt.Errorf("value %s of header %s must be positive", expDuration, utils.ExpirationDurationAttr)
118 | }
119 | updateExpirationHeader(headers, epochDurations, expDuration)
120 | delete(headers, utils.ExpirationDurationAttr)
121 | }
122 |
123 | if expirationInEpoch != "" {
124 | headers[object.SysAttributeExpEpoch] = expirationInEpoch
125 | }
126 |
127 | return nil
128 | }
129 |
130 | func updateExpirationHeader(headers map[string]string, durations *epochDurations, expDuration time.Duration) {
131 | epochDuration := uint64(durations.msPerBlock) * durations.blockPerEpoch
132 | currentEpoch := durations.currentEpoch
133 | numEpoch := uint64(expDuration.Milliseconds()) / epochDuration
134 |
135 | if uint64(expDuration.Milliseconds())%epochDuration != 0 {
136 | numEpoch++
137 | }
138 |
139 | expirationEpoch := uint64(math.MaxUint64)
140 | if numEpoch < math.MaxUint64-currentEpoch {
141 | expirationEpoch = currentEpoch + numEpoch
142 | }
143 |
144 | headers[object.SysAttributeExpEpoch] = strconv.FormatUint(expirationEpoch, 10)
145 | }
146 |
--------------------------------------------------------------------------------
/uploader/filter_test.go:
--------------------------------------------------------------------------------
1 | package uploader
2 |
3 | import (
4 | "math"
5 | "strconv"
6 | "testing"
7 | "time"
8 |
9 | "github.com/TrueCloudLab/frostfs-api-go/v2/object"
10 | "github.com/TrueCloudLab/frostfs-http-gw/utils"
11 | "github.com/stretchr/testify/require"
12 | "github.com/valyala/fasthttp"
13 | "go.uber.org/zap"
14 | )
15 |
16 | func TestFilter(t *testing.T) {
17 | log := zap.NewNop()
18 |
19 | t.Run("duplicate keys error", func(t *testing.T) {
20 | req := &fasthttp.RequestHeader{}
21 | req.DisableNormalizing()
22 | req.Add("X-Attribute-DupKey", "first-value")
23 | req.Add("X-Attribute-DupKey", "second-value")
24 | _, err := filterHeaders(log, req)
25 | require.Error(t, err)
26 | })
27 |
28 | t.Run("duplicate system keys error", func(t *testing.T) {
29 | req := &fasthttp.RequestHeader{}
30 | req.DisableNormalizing()
31 | req.Add("X-Attribute-Neofs-DupKey", "first-value")
32 | req.Add("X-Attribute-Neofs-DupKey", "second-value")
33 | _, err := filterHeaders(log, req)
34 | require.Error(t, err)
35 | })
36 |
37 | req := &fasthttp.RequestHeader{}
38 | req.DisableNormalizing()
39 |
40 | req.Set("X-Attribute-Neofs-Expiration-Epoch1", "101")
41 | req.Set("X-Attribute-NEOFS-Expiration-Epoch2", "102")
42 | req.Set("X-Attribute-neofs-Expiration-Epoch3", "103")
43 | req.Set("X-Attribute-MyAttribute", "value")
44 |
45 | expected := map[string]string{
46 | "__NEOFS__EXPIRATION_EPOCH1": "101",
47 | "MyAttribute": "value",
48 | "__NEOFS__EXPIRATION_EPOCH3": "103",
49 | "__NEOFS__EXPIRATION_EPOCH2": "102",
50 | }
51 |
52 | result, err := filterHeaders(log, req)
53 | require.NoError(t, err)
54 |
55 | require.Equal(t, expected, result)
56 | }
57 |
58 | func TestPrepareExpirationHeader(t *testing.T) {
59 | tomorrow := time.Now().Add(24 * time.Hour)
60 | tomorrowUnix := tomorrow.Unix()
61 | tomorrowUnixNano := tomorrow.UnixNano()
62 | tomorrowUnixMilli := tomorrowUnixNano / 1e6
63 |
64 | epoch := "100"
65 | duration := "24h"
66 | timestampSec := strconv.FormatInt(tomorrowUnix, 10)
67 | timestampMilli := strconv.FormatInt(tomorrowUnixMilli, 10)
68 | timestampNano := strconv.FormatInt(tomorrowUnixNano, 10)
69 |
70 | defaultDurations := &epochDurations{
71 | currentEpoch: 10,
72 | msPerBlock: 1000,
73 | blockPerEpoch: 101,
74 | }
75 |
76 | msPerBlock := defaultDurations.blockPerEpoch * uint64(defaultDurations.msPerBlock)
77 | epochPerDay := uint64((24 * time.Hour).Milliseconds()) / msPerBlock
78 | if uint64((24*time.Hour).Milliseconds())%msPerBlock != 0 {
79 | epochPerDay++
80 | }
81 |
82 | defaultExpEpoch := strconv.FormatUint(defaultDurations.currentEpoch+epochPerDay, 10)
83 |
84 | for _, tc := range []struct {
85 | name string
86 | headers map[string]string
87 | durations *epochDurations
88 | err bool
89 | expected map[string]string
90 | }{
91 | {
92 | name: "valid epoch",
93 | headers: map[string]string{object.SysAttributeExpEpoch: epoch},
94 | expected: map[string]string{object.SysAttributeExpEpoch: epoch},
95 | },
96 | {
97 | name: "valid epoch, valid duration",
98 | headers: map[string]string{
99 | object.SysAttributeExpEpoch: epoch,
100 | utils.ExpirationDurationAttr: duration,
101 | },
102 | durations: defaultDurations,
103 | expected: map[string]string{object.SysAttributeExpEpoch: epoch},
104 | },
105 | {
106 | name: "valid epoch, valid rfc3339",
107 | headers: map[string]string{
108 | object.SysAttributeExpEpoch: epoch,
109 | utils.ExpirationRFC3339Attr: tomorrow.Format(time.RFC3339),
110 | },
111 | durations: defaultDurations,
112 | expected: map[string]string{object.SysAttributeExpEpoch: epoch},
113 | },
114 | {
115 | name: "valid epoch, valid timestamp sec",
116 | headers: map[string]string{
117 | object.SysAttributeExpEpoch: epoch,
118 | utils.ExpirationTimestampAttr: timestampSec,
119 | },
120 | durations: defaultDurations,
121 | expected: map[string]string{object.SysAttributeExpEpoch: epoch},
122 | },
123 | {
124 | name: "valid epoch, valid timestamp milli",
125 | headers: map[string]string{
126 | object.SysAttributeExpEpoch: epoch,
127 | utils.ExpirationTimestampAttr: timestampMilli,
128 | },
129 | durations: defaultDurations,
130 | expected: map[string]string{object.SysAttributeExpEpoch: epoch},
131 | },
132 | {
133 | name: "valid epoch, valid timestamp nano",
134 | headers: map[string]string{
135 | object.SysAttributeExpEpoch: epoch,
136 | utils.ExpirationTimestampAttr: timestampNano,
137 | },
138 | durations: defaultDurations,
139 | expected: map[string]string{object.SysAttributeExpEpoch: epoch},
140 | },
141 | {
142 | name: "valid timestamp sec",
143 | headers: map[string]string{utils.ExpirationTimestampAttr: timestampSec},
144 | durations: defaultDurations,
145 | expected: map[string]string{object.SysAttributeExpEpoch: defaultExpEpoch},
146 | },
147 | {
148 | name: "valid duration",
149 | headers: map[string]string{utils.ExpirationDurationAttr: duration},
150 | durations: defaultDurations,
151 | expected: map[string]string{object.SysAttributeExpEpoch: defaultExpEpoch},
152 | },
153 | {
154 | name: "valid rfc3339",
155 | headers: map[string]string{utils.ExpirationRFC3339Attr: tomorrow.Format(time.RFC3339)},
156 | durations: defaultDurations,
157 | expected: map[string]string{object.SysAttributeExpEpoch: defaultExpEpoch},
158 | },
159 | {
160 | name: "valid max uint 64",
161 | headers: map[string]string{utils.ExpirationRFC3339Attr: tomorrow.Format(time.RFC3339)},
162 | durations: &epochDurations{
163 | currentEpoch: math.MaxUint64 - 1,
164 | msPerBlock: defaultDurations.msPerBlock,
165 | blockPerEpoch: defaultDurations.blockPerEpoch,
166 | },
167 | expected: map[string]string{object.SysAttributeExpEpoch: strconv.FormatUint(uint64(math.MaxUint64), 10)},
168 | },
169 | {
170 | name: "invalid timestamp sec",
171 | headers: map[string]string{utils.ExpirationTimestampAttr: "abc"},
172 | err: true,
173 | },
174 | {
175 | name: "invalid timestamp sec zero",
176 | headers: map[string]string{utils.ExpirationTimestampAttr: "0"},
177 | err: true,
178 | },
179 | {
180 | name: "invalid duration",
181 | headers: map[string]string{utils.ExpirationDurationAttr: "1d"},
182 | err: true,
183 | },
184 | {
185 | name: "invalid duration negative",
186 | headers: map[string]string{utils.ExpirationDurationAttr: "-5h"},
187 | err: true,
188 | },
189 | {
190 | name: "invalid rfc3339",
191 | headers: map[string]string{utils.ExpirationRFC3339Attr: "abc"},
192 | err: true,
193 | },
194 | {
195 | name: "invalid rfc3339 zero",
196 | headers: map[string]string{utils.ExpirationRFC3339Attr: time.RFC3339},
197 | err: true,
198 | },
199 | } {
200 | t.Run(tc.name, func(t *testing.T) {
201 | err := prepareExpirationHeader(tc.headers, tc.durations, time.Now())
202 | if tc.err {
203 | require.Error(t, err)
204 | } else {
205 | require.NoError(t, err)
206 | require.Equal(t, tc.expected, tc.headers)
207 | }
208 | })
209 | }
210 | }
211 |
--------------------------------------------------------------------------------
/uploader/multipart.go:
--------------------------------------------------------------------------------
1 | package uploader
2 |
3 | import (
4 | "io"
5 |
6 | "github.com/TrueCloudLab/frostfs-http-gw/uploader/multipart"
7 | "go.uber.org/zap"
8 | )
9 |
10 | // MultipartFile provides standard ReadCloser interface and also allows one to
11 | // get file name, it's used for multipart uploads.
12 | type MultipartFile interface {
13 | io.ReadCloser
14 | FileName() string
15 | }
16 |
17 | func fetchMultipartFile(l *zap.Logger, r io.Reader, boundary string) (MultipartFile, error) {
18 | // To have a custom buffer (3mb) the custom multipart reader is used.
19 | // https://github.com/nspcc-dev/neofs-http-gw/issues/148
20 | reader := multipart.NewReader(r, boundary)
21 |
22 | for {
23 | part, err := reader.NextPart()
24 | if err != nil {
25 | return nil, err
26 | }
27 |
28 | name := part.FormName()
29 | if name == "" {
30 | l.Debug("ignore part, empty form name")
31 | continue
32 | }
33 |
34 | filename := part.FileName()
35 |
36 | // ignore multipart/form-data values
37 | if filename == "" {
38 | l.Debug("ignore part, empty filename", zap.String("form", name))
39 |
40 | continue
41 | }
42 |
43 | return part, nil
44 | }
45 | }
46 |
--------------------------------------------------------------------------------
/uploader/multipart/multipart.go:
--------------------------------------------------------------------------------
1 | // Copyright 2010 The Go Authors. All rights reserved.
2 | // Use of this source code is governed by a BSD-style
3 | // license that can be found in the LICENSE file.
4 | //
5 |
6 | /*
7 | Package multipart implements MIME multipart parsing, as defined in RFC
8 | 2046.
9 |
10 | The implementation is sufficient for HTTP (RFC 2388) and the multipart
11 | bodies generated by popular browsers.
12 | */
13 | package multipart
14 |
15 | import (
16 | "bufio"
17 | "bytes"
18 | "fmt"
19 | "io"
20 | "mime"
21 | "mime/quotedprintable"
22 | "net/textproto"
23 | "strings"
24 | )
25 |
26 | var emptyParams = make(map[string]string)
27 |
28 | // This constant needs to be at least 76 for this package to work correctly.
29 | // This is because \r\n--separator_of_len_70- would fill the buffer and it
30 | // wouldn't be safe to consume a single byte from it.
31 | // This constant is different from the constant in stdlib. The standard value is 4096.
32 | const peekBufferSize = 3 << 20
33 |
34 | // A Part represents a single part in a multipart body.
35 | type Part struct {
36 | // The headers of the body, if any, with the keys canonicalized
37 | // in the same fashion that the Go http.Request headers are.
38 | // For example, "foo-bar" changes case to "Foo-Bar"
39 | Header textproto.MIMEHeader
40 |
41 | mr *Reader
42 |
43 | disposition string
44 | dispositionParams map[string]string
45 |
46 | // r is either a reader directly reading from mr, or it's a
47 | // wrapper around such a reader, decoding the
48 | // Content-Transfer-Encoding
49 | r io.Reader
50 |
51 | n int // known data bytes waiting in mr.bufReader
52 | total int64 // total data bytes read already
53 | err error // error to return when n == 0
54 | readErr error // read error observed from mr.bufReader
55 | }
56 |
57 | // FormName returns the name parameter if p has a Content-Disposition
58 | // of type "form-data". Otherwise it returns the empty string.
59 | func (p *Part) FormName() string {
60 | // See https://tools.ietf.org/html/rfc2183 section 2 for EBNF
61 | // of Content-Disposition value format.
62 | if p.dispositionParams == nil {
63 | p.parseContentDisposition()
64 | }
65 | if p.disposition != "form-data" {
66 | return ""
67 | }
68 | return p.dispositionParams["name"]
69 | }
70 |
71 | // FileName returns the filename parameter of the Part's
72 | // Content-Disposition header.
73 | func (p *Part) FileName() string {
74 | if p.dispositionParams == nil {
75 | p.parseContentDisposition()
76 | }
77 | return p.dispositionParams["filename"]
78 | }
79 |
80 | func (p *Part) parseContentDisposition() {
81 | v := p.Header.Get("Content-Disposition")
82 | var err error
83 | p.disposition, p.dispositionParams, err = mime.ParseMediaType(v)
84 | if err != nil {
85 | p.dispositionParams = emptyParams
86 | }
87 | }
88 |
89 | // NewReader creates a new multipart Reader reading from r using the
90 | // given MIME boundary.
91 | //
92 | // The boundary is usually obtained from the "boundary" parameter of
93 | // the message's "Content-Type" header. Use mime.ParseMediaType to
94 | // parse such headers.
95 | func NewReader(r io.Reader, boundary string) *Reader {
96 | b := []byte("\r\n--" + boundary + "--")
97 | return &Reader{
98 | bufReader: bufio.NewReaderSize(&stickyErrorReader{r: r}, peekBufferSize),
99 | nl: b[:2],
100 | nlDashBoundary: b[:len(b)-2],
101 | dashBoundaryDash: b[2:],
102 | dashBoundary: b[2 : len(b)-2],
103 | }
104 | }
105 |
106 | // stickyErrorReader is an io.Reader which never calls Read on its
107 | // underlying Reader once an error has been seen. (the io.Reader
108 | // interface's contract promises nothing about the return values of
109 | // Read calls after an error, yet this package does do multiple Reads
110 | // after error).
111 | type stickyErrorReader struct {
112 | r io.Reader
113 | err error
114 | }
115 |
116 | func (r *stickyErrorReader) Read(p []byte) (n int, _ error) {
117 | if r.err != nil {
118 | return 0, r.err
119 | }
120 | n, r.err = r.r.Read(p)
121 | return n, r.err
122 | }
123 |
124 | func newPart(mr *Reader, rawPart bool) (*Part, error) {
125 | bp := &Part{
126 | Header: make(map[string][]string),
127 | mr: mr,
128 | }
129 | if err := bp.populateHeaders(); err != nil {
130 | return nil, err
131 | }
132 | bp.r = partReader{bp}
133 |
134 | // rawPart is used to switch between Part.NextPart and Part.NextRawPart.
135 | if !rawPart {
136 | const cte = "Content-Transfer-Encoding"
137 | if strings.EqualFold(bp.Header.Get(cte), "quoted-printable") {
138 | bp.Header.Del(cte)
139 | bp.r = quotedprintable.NewReader(bp.r)
140 | }
141 | }
142 | return bp, nil
143 | }
144 |
145 | func (p *Part) populateHeaders() error {
146 | r := textproto.NewReader(p.mr.bufReader)
147 | header, err := r.ReadMIMEHeader()
148 | if err == nil {
149 | p.Header = header
150 | }
151 | return err
152 | }
153 |
154 | // Read reads the body of a part, after its headers and before the
155 | // next part (if any) begins.
156 | func (p *Part) Read(d []byte) (n int, err error) {
157 | return p.r.Read(d)
158 | }
159 |
160 | // partReader implements io.Reader by reading raw bytes directly from the
161 | // wrapped *Part, without doing any Transfer-Encoding decoding.
162 | type partReader struct {
163 | p *Part
164 | }
165 |
166 | func (pr partReader) Read(d []byte) (int, error) {
167 | p := pr.p
168 | br := p.mr.bufReader
169 |
170 | // Read into buffer until we identify some data to return,
171 | // or we find a reason to stop (boundary or read error).
172 | for p.n == 0 && p.err == nil {
173 | peek, _ := br.Peek(br.Buffered())
174 | p.n, p.err = scanUntilBoundary(peek, p.mr.dashBoundary, p.mr.nlDashBoundary, p.total, p.readErr)
175 | if p.n == 0 && p.err == nil {
176 | // Force buffered I/O to read more into buffer.
177 | _, p.readErr = br.Peek(len(peek) + 1)
178 | if p.readErr == io.EOF {
179 | p.readErr = io.ErrUnexpectedEOF
180 | }
181 | }
182 | }
183 |
184 | // Read out from "data to return" part of buffer.
185 | if p.n == 0 {
186 | return 0, p.err
187 | }
188 | n := len(d)
189 | if n > p.n {
190 | n = p.n
191 | }
192 | n, _ = br.Read(d[:n])
193 | p.total += int64(n)
194 | p.n -= n
195 | if p.n == 0 {
196 | return n, p.err
197 | }
198 | return n, nil
199 | }
200 |
201 | // scanUntilBoundary scans buf to identify how much of it can be safely
202 | // returned as part of the Part body.
203 | // dashBoundary is "--boundary".
204 | // nlDashBoundary is "\r\n--boundary" or "\n--boundary", depending on what mode we are in.
205 | // The comments below (and the name) assume "\n--boundary", but either is accepted.
206 | // total is the number of bytes read out so far. If total == 0, then a leading "--boundary" is recognized.
207 | // readErr is the read error, if any, that followed reading the bytes in buf.
208 | // scanUntilBoundary returns the number of data bytes from buf that can be
209 | // returned as part of the Part body and also the error to return (if any)
210 | // once those data bytes are done.
211 | func scanUntilBoundary(buf, dashBoundary, nlDashBoundary []byte, total int64, readErr error) (int, error) {
212 | if total == 0 {
213 | // At beginning of body, allow dashBoundary.
214 | if bytes.HasPrefix(buf, dashBoundary) {
215 | switch matchAfterPrefix(buf, dashBoundary, readErr) {
216 | case -1:
217 | return len(dashBoundary), nil
218 | case 0:
219 | return 0, nil
220 | case +1:
221 | return 0, io.EOF
222 | }
223 | }
224 | if bytes.HasPrefix(dashBoundary, buf) {
225 | return 0, readErr
226 | }
227 | }
228 |
229 | // Search for "\n--boundary".
230 | if i := bytes.Index(buf, nlDashBoundary); i >= 0 {
231 | switch matchAfterPrefix(buf[i:], nlDashBoundary, readErr) {
232 | case -1:
233 | return i + len(nlDashBoundary), nil
234 | case 0:
235 | return i, nil
236 | case +1:
237 | return i, io.EOF
238 | }
239 | }
240 | if bytes.HasPrefix(nlDashBoundary, buf) {
241 | return 0, readErr
242 | }
243 |
244 | // Otherwise, anything up to the final \n is not part of the boundary
245 | // and so must be part of the body.
246 | // Also if the section from the final \n onward is not a prefix of the boundary,
247 | // it too must be part of the body.
248 | i := bytes.LastIndexByte(buf, nlDashBoundary[0])
249 | if i >= 0 && bytes.HasPrefix(nlDashBoundary, buf[i:]) {
250 | return i, nil
251 | }
252 | return len(buf), readErr
253 | }
254 |
255 | // matchAfterPrefix checks whether buf should be considered to match the boundary.
256 | // The prefix is "--boundary" or "\r\n--boundary" or "\n--boundary",
257 | // and the caller has verified already that bytes.HasPrefix(buf, prefix) is true.
258 | //
259 | // matchAfterPrefix returns +1 if the buffer does match the boundary,
260 | // meaning the prefix is followed by a dash, space, tab, cr, nl, or end of input.
261 | // It returns -1 if the buffer definitely does NOT match the boundary,
262 | // meaning the prefix is followed by some other character.
263 | // For example, "--foobar" does not match "--foo".
264 | // It returns 0 more input needs to be read to make the decision,
265 | // meaning that len(buf) == len(prefix) and readErr == nil.
266 | func matchAfterPrefix(buf, prefix []byte, readErr error) int {
267 | if len(buf) == len(prefix) {
268 | if readErr != nil {
269 | return +1
270 | }
271 | return 0
272 | }
273 | c := buf[len(prefix)]
274 | if c == ' ' || c == '\t' || c == '\r' || c == '\n' || c == '-' {
275 | return +1
276 | }
277 | return -1
278 | }
279 |
280 | func (p *Part) Close() error {
281 | _, _ = io.Copy(io.Discard, p)
282 | return nil
283 | }
284 |
285 | // Reader is an iterator over parts in a MIME multipart body.
286 | // Reader's underlying parser consumes its input as needed. Seeking
287 | // isn't supported.
288 | type Reader struct {
289 | bufReader *bufio.Reader
290 |
291 | currentPart *Part
292 | partsRead int
293 |
294 | nl []byte // "\r\n" or "\n" (set after seeing first boundary line)
295 | nlDashBoundary []byte // nl + "--boundary"
296 | dashBoundaryDash []byte // "--boundary--"
297 | dashBoundary []byte // "--boundary"
298 | }
299 |
300 | // NextPart returns the next part in the multipart or an error.
301 | // When there are no more parts, the error io.EOF is returned.
302 | //
303 | // As a special case, if the "Content-Transfer-Encoding" header
304 | // has a value of "quoted-printable", that header is instead
305 | // hidden and the body is transparently decoded during Read calls.
306 | func (r *Reader) NextPart() (*Part, error) {
307 | return r.nextPart(false)
308 | }
309 |
310 | // NextRawPart returns the next part in the multipart or an error.
311 | // When there are no more parts, the error io.EOF is returned.
312 | //
313 | // Unlike NextPart, it does not have special handling for
314 | // "Content-Transfer-Encoding: quoted-printable".
315 | func (r *Reader) NextRawPart() (*Part, error) {
316 | return r.nextPart(true)
317 | }
318 |
319 | func (r *Reader) nextPart(rawPart bool) (*Part, error) {
320 | if r.currentPart != nil {
321 | r.currentPart.Close()
322 | }
323 | if string(r.dashBoundary) == "--" {
324 | return nil, fmt.Errorf("multipart: boundary is empty")
325 | }
326 | expectNewPart := false
327 | for {
328 | line, err := r.bufReader.ReadSlice('\n')
329 |
330 | if err == io.EOF && r.isFinalBoundary(line) {
331 | // If the buffer ends in "--boundary--" without the
332 | // trailing "\r\n", ReadSlice will return an error
333 | // (since it's missing the '\n'), but this is a valid
334 | // multipart EOF so we need to return io.EOF instead of
335 | // a fmt-wrapped one.
336 | return nil, io.EOF
337 | }
338 | if err != nil {
339 | return nil, fmt.Errorf("multipart: NextPart: %v", err)
340 | }
341 |
342 | if r.isBoundaryDelimiterLine(line) {
343 | r.partsRead++
344 | bp, err := newPart(r, rawPart)
345 | if err != nil {
346 | return nil, err
347 | }
348 | r.currentPart = bp
349 | return bp, nil
350 | }
351 |
352 | if r.isFinalBoundary(line) {
353 | // Expected EOF
354 | return nil, io.EOF
355 | }
356 |
357 | if expectNewPart {
358 | return nil, fmt.Errorf("multipart: expecting a new Part; got line %q", string(line))
359 | }
360 |
361 | if r.partsRead == 0 {
362 | // skip line
363 | continue
364 | }
365 |
366 | // Consume the "\n" or "\r\n" separator between the
367 | // body of the previous part and the boundary line we
368 | // now expect will follow. (either a new part or the
369 | // end boundary)
370 | if bytes.Equal(line, r.nl) {
371 | expectNewPart = true
372 | continue
373 | }
374 |
375 | return nil, fmt.Errorf("multipart: unexpected line in Next(): %q", line)
376 | }
377 | }
378 |
379 | // isFinalBoundary reports whether line is the final boundary line
380 | // indicating that all parts are over.
381 | // It matches `^--boundary--[ \t]*(\r\n)?$`.
382 | func (r *Reader) isFinalBoundary(line []byte) bool {
383 | if !bytes.HasPrefix(line, r.dashBoundaryDash) {
384 | return false
385 | }
386 | rest := line[len(r.dashBoundaryDash):]
387 | rest = skipLWSPChar(rest)
388 | return len(rest) == 0 || bytes.Equal(rest, r.nl)
389 | }
390 |
391 | func (r *Reader) isBoundaryDelimiterLine(line []byte) (ret bool) {
392 | // https://tools.ietf.org/html/rfc2046#section-5.1
393 | // The boundary delimiter line is then defined as a line
394 | // consisting entirely of two hyphen characters ("-",
395 | // decimal value 45) followed by the boundary parameter
396 | // value from the Content-Type header field, optional linear
397 | // whitespace, and a terminating CRLF.
398 | if !bytes.HasPrefix(line, r.dashBoundary) {
399 | return false
400 | }
401 | rest := line[len(r.dashBoundary):]
402 | rest = skipLWSPChar(rest)
403 |
404 | // On the first part, see our lines are ending in \n instead of \r\n
405 | // and switch into that mode if so. This is a violation of the spec,
406 | // but occurs in practice.
407 | if r.partsRead == 0 && len(rest) == 1 && rest[0] == '\n' {
408 | r.nl = r.nl[1:]
409 | r.nlDashBoundary = r.nlDashBoundary[1:]
410 | }
411 | return bytes.Equal(rest, r.nl)
412 | }
413 |
414 | // skipLWSPChar returns b with leading spaces and tabs removed.
415 | // RFC 822 defines:
416 | //
417 | // LWSP-char = SPACE / HTAB
418 | func skipLWSPChar(b []byte) []byte {
419 | for len(b) > 0 && (b[0] == ' ' || b[0] == '\t') {
420 | b = b[1:]
421 | }
422 | return b
423 | }
424 |
--------------------------------------------------------------------------------
/uploader/multipart_test.go:
--------------------------------------------------------------------------------
1 | package uploader
2 |
3 | import (
4 | "crypto/rand"
5 | "fmt"
6 | "io"
7 | "mime/multipart"
8 | "os"
9 | "testing"
10 |
11 | "github.com/stretchr/testify/require"
12 | "go.uber.org/zap"
13 | )
14 |
15 | func generateRandomFile(size int64) (string, error) {
16 | file, err := os.CreateTemp("", "data")
17 | if err != nil {
18 | return "", err
19 | }
20 |
21 | _, err = io.CopyN(file, rand.Reader, size)
22 | if err != nil {
23 | return "", err
24 | }
25 |
26 | return file.Name(), file.Close()
27 | }
28 |
29 | func BenchmarkAll(b *testing.B) {
30 | fileName, err := generateRandomFile(1024 * 1024 * 256)
31 | require.NoError(b, err)
32 | fmt.Println(fileName)
33 | defer os.Remove(fileName)
34 |
35 | b.Run("bare", func(b *testing.B) {
36 | for i := 0; i < b.N; i++ {
37 | err := bareRead(fileName)
38 | require.NoError(b, err)
39 | }
40 | })
41 |
42 | b.Run("default", func(b *testing.B) {
43 | for i := 0; i < b.N; i++ {
44 | err := defaultMultipart(fileName)
45 | require.NoError(b, err)
46 | }
47 | })
48 |
49 | b.Run("custom", func(b *testing.B) {
50 | for i := 0; i < b.N; i++ {
51 | err := customMultipart(fileName)
52 | require.NoError(b, err)
53 | }
54 | })
55 | }
56 |
57 | func defaultMultipart(filename string) error {
58 | r, bound := multipartFile(filename)
59 |
60 | logger, err := zap.NewProduction()
61 | if err != nil {
62 | return err
63 | }
64 |
65 | file, err := fetchMultipartFileDefault(logger, r, bound)
66 | if err != nil {
67 | return err
68 | }
69 |
70 | _, err = io.Copy(io.Discard, file)
71 | return err
72 | }
73 |
74 | func TestName(t *testing.T) {
75 | fileName, err := generateRandomFile(1024 * 1024 * 256)
76 | require.NoError(t, err)
77 | fmt.Println(fileName)
78 | defer os.Remove(fileName)
79 |
80 | err = defaultMultipart(fileName)
81 | require.NoError(t, err)
82 | }
83 |
84 | func customMultipart(filename string) error {
85 | r, bound := multipartFile(filename)
86 |
87 | logger, err := zap.NewProduction()
88 | if err != nil {
89 | return err
90 | }
91 |
92 | file, err := fetchMultipartFile(logger, r, bound)
93 | if err != nil {
94 | return err
95 | }
96 |
97 | _, err = io.Copy(io.Discard, file)
98 | return err
99 | }
100 |
101 | func fetchMultipartFileDefault(l *zap.Logger, r io.Reader, boundary string) (MultipartFile, error) {
102 | reader := multipart.NewReader(r, boundary)
103 |
104 | for {
105 | part, err := reader.NextPart()
106 | if err != nil {
107 | return nil, err
108 | }
109 |
110 | name := part.FormName()
111 | if name == "" {
112 | l.Debug("ignore part, empty form name")
113 | continue
114 | }
115 |
116 | filename := part.FileName()
117 |
118 | // ignore multipart/form-data values
119 | if filename == "" {
120 | l.Debug("ignore part, empty filename", zap.String("form", name))
121 |
122 | continue
123 | }
124 |
125 | return part, nil
126 | }
127 | }
128 |
129 | func bareRead(filename string) error {
130 | r, _ := multipartFile(filename)
131 |
132 | _, err := io.Copy(io.Discard, r)
133 | return err
134 | }
135 |
136 | func multipartFile(filename string) (*io.PipeReader, string) {
137 | r, w := io.Pipe()
138 | m := multipart.NewWriter(w)
139 | go func() {
140 | defer w.Close()
141 | defer m.Close()
142 | part, err := m.CreateFormFile("myFile", "foo.txt")
143 | if err != nil {
144 | fmt.Println(err)
145 | return
146 | }
147 |
148 | file, err := os.Open(filename)
149 | if err != nil {
150 | fmt.Println(err)
151 | return
152 | }
153 | defer file.Close()
154 | if _, err = io.Copy(part, file); err != nil {
155 | fmt.Println(err)
156 | return
157 | }
158 | }()
159 |
160 | return r, m.Boundary()
161 | }
162 |
--------------------------------------------------------------------------------
/uploader/upload.go:
--------------------------------------------------------------------------------
1 | package uploader
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "fmt"
7 | "io"
8 | "net/http"
9 | "strconv"
10 | "time"
11 |
12 | "github.com/TrueCloudLab/frostfs-http-gw/resolver"
13 | "github.com/TrueCloudLab/frostfs-http-gw/response"
14 | "github.com/TrueCloudLab/frostfs-http-gw/tokens"
15 | "github.com/TrueCloudLab/frostfs-http-gw/utils"
16 | "github.com/TrueCloudLab/frostfs-sdk-go/bearer"
17 | "github.com/TrueCloudLab/frostfs-sdk-go/object"
18 | oid "github.com/TrueCloudLab/frostfs-sdk-go/object/id"
19 | "github.com/TrueCloudLab/frostfs-sdk-go/pool"
20 | "github.com/TrueCloudLab/frostfs-sdk-go/user"
21 | "github.com/valyala/fasthttp"
22 | "go.uber.org/atomic"
23 | "go.uber.org/zap"
24 | )
25 |
26 | const (
27 | jsonHeader = "application/json; charset=UTF-8"
28 | drainBufSize = 4096
29 | )
30 |
31 | // Uploader is an upload request handler.
32 | type Uploader struct {
33 | appCtx context.Context
34 | log *zap.Logger
35 | pool *pool.Pool
36 | ownerID *user.ID
37 | settings *Settings
38 | containerResolver *resolver.ContainerResolver
39 | }
40 |
41 | type epochDurations struct {
42 | currentEpoch uint64
43 | msPerBlock int64
44 | blockPerEpoch uint64
45 | }
46 |
47 | // Settings stores reloading parameters, so it has to provide atomic getters and setters.
48 | type Settings struct {
49 | defaultTimestamp atomic.Bool
50 | }
51 |
52 | func (s *Settings) DefaultTimestamp() bool {
53 | return s.defaultTimestamp.Load()
54 | }
55 |
56 | func (s *Settings) SetDefaultTimestamp(val bool) {
57 | s.defaultTimestamp.Store(val)
58 | }
59 |
60 | // New creates a new Uploader using specified logger, connection pool and
61 | // other options.
62 | func New(ctx context.Context, params *utils.AppParams, settings *Settings) *Uploader {
63 | return &Uploader{
64 | appCtx: ctx,
65 | log: params.Logger,
66 | pool: params.Pool,
67 | ownerID: params.Owner,
68 | settings: settings,
69 | containerResolver: params.Resolver,
70 | }
71 | }
72 |
73 | // Upload handles multipart upload request.
74 | func (u *Uploader) Upload(c *fasthttp.RequestCtx) {
75 | var (
76 | file MultipartFile
77 | idObj oid.ID
78 | addr oid.Address
79 | scid, _ = c.UserValue("cid").(string)
80 | log = u.log.With(zap.String("cid", scid))
81 | bodyStream = c.RequestBodyStream()
82 | drainBuf = make([]byte, drainBufSize)
83 | )
84 |
85 | if err := tokens.StoreBearerToken(c); err != nil {
86 | log.Error("could not fetch bearer token", zap.Error(err))
87 | response.Error(c, "could not fetch bearer token", fasthttp.StatusBadRequest)
88 | return
89 | }
90 |
91 | idCnr, err := utils.GetContainerID(u.appCtx, scid, u.containerResolver)
92 | if err != nil {
93 | log.Error("wrong container id", zap.Error(err))
94 | response.Error(c, "wrong container id", fasthttp.StatusBadRequest)
95 | return
96 | }
97 |
98 | defer func() {
99 | // If the temporary reader can be closed - let's close it.
100 | if file == nil {
101 | return
102 | }
103 | err := file.Close()
104 | log.Debug(
105 | "close temporary multipart/form file",
106 | zap.Stringer("address", addr),
107 | zap.String("filename", file.FileName()),
108 | zap.Error(err),
109 | )
110 | }()
111 | boundary := string(c.Request.Header.MultipartFormBoundary())
112 | if file, err = fetchMultipartFile(u.log, bodyStream, boundary); err != nil {
113 | log.Error("could not receive multipart/form", zap.Error(err))
114 | response.Error(c, "could not receive multipart/form: "+err.Error(), fasthttp.StatusBadRequest)
115 | return
116 | }
117 | filtered, err := filterHeaders(u.log, &c.Request.Header)
118 | if err != nil {
119 | log.Error("could not process headers", zap.Error(err))
120 | response.Error(c, err.Error(), fasthttp.StatusBadRequest)
121 | return
122 | }
123 | if needParseExpiration(filtered) {
124 | epochDuration, err := getEpochDurations(c, u.pool)
125 | if err != nil {
126 | log.Error("could not get epoch durations from network info", zap.Error(err))
127 | response.Error(c, "could not get epoch durations from network info: "+err.Error(), fasthttp.StatusBadRequest)
128 | return
129 | }
130 |
131 | now := time.Now()
132 | if rawHeader := c.Request.Header.Peek(fasthttp.HeaderDate); rawHeader != nil {
133 | if parsed, err := time.Parse(http.TimeFormat, string(rawHeader)); err != nil {
134 | log.Warn("could not parse client time", zap.String("Date header", string(rawHeader)), zap.Error(err))
135 | } else {
136 | now = parsed
137 | }
138 | }
139 |
140 | if err = prepareExpirationHeader(filtered, epochDuration, now); err != nil {
141 | log.Error("could not parse expiration header", zap.Error(err))
142 | response.Error(c, "could not parse expiration header: "+err.Error(), fasthttp.StatusBadRequest)
143 | return
144 | }
145 | }
146 |
147 | attributes := make([]object.Attribute, 0, len(filtered))
148 | // prepares attributes from filtered headers
149 | for key, val := range filtered {
150 | attribute := object.NewAttribute()
151 | attribute.SetKey(key)
152 | attribute.SetValue(val)
153 | attributes = append(attributes, *attribute)
154 | }
155 | // sets FileName attribute if it wasn't set from header
156 | if _, ok := filtered[object.AttributeFileName]; !ok {
157 | filename := object.NewAttribute()
158 | filename.SetKey(object.AttributeFileName)
159 | filename.SetValue(file.FileName())
160 | attributes = append(attributes, *filename)
161 | }
162 | // sets Timestamp attribute if it wasn't set from header and enabled by settings
163 | if _, ok := filtered[object.AttributeTimestamp]; !ok && u.settings.DefaultTimestamp() {
164 | timestamp := object.NewAttribute()
165 | timestamp.SetKey(object.AttributeTimestamp)
166 | timestamp.SetValue(strconv.FormatInt(time.Now().Unix(), 10))
167 | attributes = append(attributes, *timestamp)
168 | }
169 | id, bt := u.fetchOwnerAndBearerToken(c)
170 |
171 | obj := object.New()
172 | obj.SetContainerID(*idCnr)
173 | obj.SetOwnerID(id)
174 | obj.SetAttributes(attributes...)
175 |
176 | var prm pool.PrmObjectPut
177 | prm.SetHeader(*obj)
178 | prm.SetPayload(file)
179 |
180 | if bt != nil {
181 | prm.UseBearer(*bt)
182 | }
183 |
184 | if idObj, err = u.pool.PutObject(u.appCtx, prm); err != nil {
185 | log.Error("could not store file in frostfs", zap.Error(err))
186 | response.Error(c, "could not store file in frostfs: "+err.Error(), fasthttp.StatusBadRequest)
187 | return
188 | }
189 |
190 | addr.SetObject(idObj)
191 | addr.SetContainer(*idCnr)
192 |
193 | // Try to return the response, otherwise, if something went wrong, throw an error.
194 | if err = newPutResponse(addr).encode(c); err != nil {
195 | log.Error("could not encode response", zap.Error(err))
196 | response.Error(c, "could not encode response", fasthttp.StatusBadRequest)
197 |
198 | return
199 | }
200 | // Multipart is multipart and thus can contain more than one part which
201 | // we ignore at the moment. Also, when dealing with chunked encoding
202 | // the last zero-length chunk might be left unread (because multipart
203 | // reader only cares about its boundary and doesn't look further) and
204 | // it will be (erroneously) interpreted as the start of the next
205 | // pipelined header. Thus we need to drain the body buffer.
206 | for {
207 | _, err = bodyStream.Read(drainBuf)
208 | if err == io.EOF || err == io.ErrUnexpectedEOF {
209 | break
210 | }
211 | }
212 | // Report status code and content type.
213 | c.Response.SetStatusCode(fasthttp.StatusOK)
214 | c.Response.Header.SetContentType(jsonHeader)
215 | }
216 |
217 | func (u *Uploader) fetchOwnerAndBearerToken(ctx context.Context) (*user.ID, *bearer.Token) {
218 | if tkn, err := tokens.LoadBearerToken(ctx); err == nil && tkn != nil {
219 | issuer := bearer.ResolveIssuer(*tkn)
220 | return &issuer, tkn
221 | }
222 | return u.ownerID, nil
223 | }
224 |
225 | type putResponse struct {
226 | ObjectID string `json:"object_id"`
227 | ContainerID string `json:"container_id"`
228 | }
229 |
230 | func newPutResponse(addr oid.Address) *putResponse {
231 | return &putResponse{
232 | ObjectID: addr.Object().EncodeToString(),
233 | ContainerID: addr.Container().EncodeToString(),
234 | }
235 | }
236 |
237 | func (pr *putResponse) encode(w io.Writer) error {
238 | enc := json.NewEncoder(w)
239 | enc.SetIndent("", "\t")
240 | return enc.Encode(pr)
241 | }
242 |
243 | func getEpochDurations(ctx context.Context, p *pool.Pool) (*epochDurations, error) {
244 | networkInfo, err := p.NetworkInfo(ctx)
245 | if err != nil {
246 | return nil, err
247 | }
248 |
249 | res := &epochDurations{
250 | currentEpoch: networkInfo.CurrentEpoch(),
251 | msPerBlock: networkInfo.MsPerBlock(),
252 | blockPerEpoch: networkInfo.EpochDuration(),
253 | }
254 |
255 | if res.blockPerEpoch == 0 {
256 | return nil, fmt.Errorf("EpochDuration is empty")
257 | }
258 | return res, nil
259 | }
260 |
261 | func needParseExpiration(headers map[string]string) bool {
262 | _, ok1 := headers[utils.ExpirationDurationAttr]
263 | _, ok2 := headers[utils.ExpirationRFC3339Attr]
264 | _, ok3 := headers[utils.ExpirationTimestampAttr]
265 | return ok1 || ok2 || ok3
266 | }
267 |
--------------------------------------------------------------------------------
/utils/attributes.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | const (
4 | UserAttributeHeaderPrefix = "X-Attribute-"
5 | SystemAttributePrefix = "__NEOFS__"
6 |
7 | ExpirationDurationAttr = SystemAttributePrefix + "EXPIRATION_DURATION"
8 | ExpirationTimestampAttr = SystemAttributePrefix + "EXPIRATION_TIMESTAMP"
9 | ExpirationRFC3339Attr = SystemAttributePrefix + "EXPIRATION_RFC3339"
10 | )
11 |
--------------------------------------------------------------------------------
/utils/params.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "github.com/TrueCloudLab/frostfs-http-gw/resolver"
5 | "github.com/TrueCloudLab/frostfs-sdk-go/pool"
6 | "github.com/TrueCloudLab/frostfs-sdk-go/user"
7 | "go.uber.org/zap"
8 | )
9 |
10 | type AppParams struct {
11 | Logger *zap.Logger
12 | Pool *pool.Pool
13 | Owner *user.ID
14 | Resolver *resolver.ContainerResolver
15 | }
16 |
--------------------------------------------------------------------------------
/utils/util.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "context"
5 |
6 | "github.com/TrueCloudLab/frostfs-http-gw/resolver"
7 | cid "github.com/TrueCloudLab/frostfs-sdk-go/container/id"
8 | )
9 |
10 | // GetContainerID decode container id, if it's not a valid container id
11 | // then trey to resolve name using provided resolver.
12 | func GetContainerID(ctx context.Context, containerID string, resolver *resolver.ContainerResolver) (*cid.ID, error) {
13 | cnrID := new(cid.ID)
14 | err := cnrID.DecodeString(containerID)
15 | if err != nil {
16 | cnrID, err = resolver.Resolve(ctx, containerID)
17 | }
18 | return cnrID, err
19 | }
20 |
--------------------------------------------------------------------------------