├── .github └── workflows │ └── action.yml ├── .gitignore ├── .travis.yml ├── LICENSE ├── README.md ├── README.zh.md ├── TODO.md ├── azure-pipelines.yml ├── config.nims ├── doc ├── en │ └── articles │ │ └── client_mostly_single_buffer.md └── zh │ ├── articles │ ├── client_mostly_single_buffer.md │ └── http_design_strategy.md │ └── code │ ├── backup │ ├── client_msb_lab.nim │ └── headerfield.nim │ ├── netkit.nim │ └── netkit │ ├── buffer.nim │ ├── buffer │ ├── circular.nim │ ├── constants.nim │ └── vector.nim │ ├── http.nim │ ├── http │ ├── chunk.nim │ ├── connection.nim │ ├── cookies.nim │ ├── exception.nim │ ├── header.nim │ ├── headerfield.nim │ ├── httpmethod.nim │ ├── limits.nim │ ├── metadata.nim │ ├── parser.nim │ ├── reader.nim │ ├── server.nim │ ├── spec.nim │ ├── status.nim │ ├── uri.nim │ ├── version.nim │ └── writer.nim │ ├── locks.nim │ └── misc.nim ├── netkit.nim ├── netkit.nimble ├── netkit ├── buffer.nim ├── buffer │ ├── circular.nim │ ├── constants.nim │ └── vector.nim ├── http.nim ├── http │ ├── chunk.nim │ ├── connection.nim │ ├── cookies.nim │ ├── exception.nim │ ├── header.nim │ ├── headerfield.nim │ ├── httpmethod.nim │ ├── limits.nim │ ├── metadata.nim │ ├── parser.nim │ ├── reader.nim │ ├── server.nim │ ├── spec.nim │ ├── status.nim │ ├── uri.nim │ ├── version.nim │ └── writer.nim ├── locks.nim └── misc.nim ├── nim.cfg ├── tests ├── buffer │ └── tcircular_buffer.nim ├── http │ ├── tcookie.nim │ ├── thttpmethod.nim │ ├── tspec.nim │ ├── tstatus.nim │ └── tversion.nim ├── server │ ├── thttp_server.nim │ └── thttp_server.nim.cfg └── test.nim └── tools └── docplus ├── .gitignore ├── dochack.js ├── package.json └── polish.js /.github/workflows/action.yml: -------------------------------------------------------------------------------- 1 | name: Test Netkit 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | 8 | jobs: 9 | test_windows: 10 | runs-on: windows-latest 11 | strategy: 12 | matrix: 13 | version: 14 | - stable 15 | steps: 16 | - uses: actions/checkout@v1 17 | - uses: jiro4989/setup-nim-action@v1.0.1 18 | with: 19 | nim-version: ${{ matrix.version }} 20 | - name: Install Packages 21 | run: nimble install -y 22 | - name: Test command 23 | run: nimble test 24 | 25 | test_ubuntu: 26 | runs-on: ubuntu-latest 27 | strategy: 28 | matrix: 29 | version: 30 | - stable 31 | steps: 32 | - uses: actions/checkout@v1 33 | - uses: jiro4989/setup-nim-action@v1.0.1 34 | with: 35 | nim-version: ${{ matrix.version }} 36 | - name: Install Packages 37 | run: nimble install -y 38 | - name: Test command 39 | run: nimble test 40 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | build/ 3 | out/ 4 | .vscode 5 | testresults 6 | nimcache 7 | *.exe 8 | megatest.nim 9 | outputGotten.txt 10 | htmldocs -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: c 2 | 3 | cache: ccache 4 | cache: 5 | directories: 6 | - .cache 7 | 8 | matrix: 9 | include: 10 | # Build and test against the master (stable) and devel branches of Nim 11 | - os: linux 12 | env: CHANNEL=stable 13 | compiler: gcc 14 | 15 | # On OSX we only test against clang (gcc is mapped to clang by default) 16 | - os: osx 17 | env: CHANNEL=stable 18 | compiler: clang 19 | 20 | # blocked by https://github.com/dom96/choosenim/pull/201 21 | # - os: windows 22 | # env: CHANNEL=stable 23 | # compiler: gcc 24 | 25 | 26 | allow_failures: 27 | # Ignore failures when building against the devel Nim branch 28 | # Also ignore OSX, due to very long build queue 29 | - os: osx 30 | fast_finish: true 31 | 32 | install: 33 | - export CHOOSENIM_NO_ANALYTICS=1 34 | - curl https://nim-lang.org/choosenim/init.sh -sSf > init.sh 35 | - sh init.sh -y 36 | - export PATH=~/.nimble/bin:$PATH 37 | - echo "export PATH=~/.nimble/bin:$PATH" >> ~/.profile 38 | - choosenim $CHANNEL 39 | 40 | script: 41 | - nimble test 42 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2020 Wang Tong 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of 6 | this software and associated documentation files (the "Software"), to deal in 7 | the Software without restriction, including without limitation the rights to 8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software is furnished to do so, 10 | subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 17 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 18 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 19 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 20 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Netkit 2 | ========== 3 | 4 | [![Build Status](https://travis-ci.org/iocrate/netkit.svg?branch=master)](https://travis-ci.org/iocrate/netkit) 5 | [![Build Status](https://dev.azure.com/iocrate/netkit/_apis/build/status/iocrate.netkit?branchName=master)](https://dev.azure.com/iocrate/netkit/_build/latest?definitionId=1&branchName=master) 6 | 7 | Netkit hopes to serve as a versatile network development kit, providing tools commonly used in network programming. Netkit should be out of the box, stable and secure. Netkit contains a number of commonly used network programming tools, such as TCP, UDP, TLS, HTTP, HTTPS, WebSocket and related utilities. 8 | 9 | Netkit is not intended to be a high-level productivity development tool, but rather a reliable and efficient network infrastructure. Netkit consists of several submodules, each of which provides some network tools. 10 | 11 | **Now, Netkit is under active development.** 12 | 13 | - [Documentation - BTW: temporary, requires a more friendly homepage](https://iocrate.github.io/netkit.html) 14 | - [Documentation zh - BTW: temporary, requires a more friendly homepage](https://iocrate.github.io/zh/netkit.html) 15 | 16 | A new IO engine, inspired by Netty, which has a (selector) loop pool in multi-thread non-blocking mode, is being developed in **devel** branch. Indeed, we are no longer satisfied with the IO engine in standard library. 17 | 18 | Run Test 19 | --------- 20 | 21 | There is a script that automatically runs tests. Check config.nims for details. ``$ nim test -d:modules=`` tests the specified file, for example, ``$ nim test -d:modules=tbuffer`` tests the file **tests/tbuffer.nim**. ``$ nimble test`` tests all test files in the **tests** directory. 22 | 23 | Make Documentation 24 | ------------------- 25 | 26 | There is a script that automatically generate documentation. Check config.nims for details. ``$ nim docs -d:lang=en`` generates documentation for the source code, an English version. ``$ nimble docs -d:lang=zh`` generates a Chinese version of the documentation. ``$ nim docs`` generates both English version and Chinese version for the documentation. 27 | 28 | The code comments are written in English. The Chinese version of these comments is placed in ``${projectDir}/doc/zh/code``. 29 | 30 | TODO List 31 | ----------------------- 32 | 33 | - [ ] IO Engine - Event Loop Pool; Multi-thread mode; Non-blocking socket, pipe; Blocking regular file; 34 | - [x] buffer 35 | - [x] circular 36 | - [x] vector 37 | - [ ] tcp 38 | - [ ] udp 39 | - [ ] http 40 | - [x] limits 41 | - [x] exception 42 | - [x] spec 43 | - [x] httpmethod 44 | - [x] version 45 | - [x] status 46 | - [x] headerfield 47 | - [x] header 48 | - [x] chunk 49 | - [x] metadata 50 | - [x] cookie 51 | - [x] parser 52 | - [x] connection 53 | - [x] reader 54 | - [x] writer 55 | - [x] server 56 | - [ ] client 57 | - [ ] clientpool 58 | - [ ] websocket 59 | - [ ] Write document page and provide more friendly document management. 60 | - [ ] Enhance the function of docpolisher, add github link to the document and add return link of the previous page and the home page. 61 | 62 | Contributing to Netkit 63 | ----------------------- 64 | 65 | - Write and make more Chinese and English documents 66 | - Add more strict unit tests 67 | - Add benchmark or stress test 68 | - Add code to support new features 69 | - Fix bugs 70 | - Fix errors in documentation 71 | 72 | A little demonstration 73 | ----------------------- 74 | 75 | Streaming all your IO! 76 | 77 | ```nim 78 | var server: AsyncHttpServer 79 | 80 | proc serve() {.async.} = 81 | server = newAsyncHttpServer() 82 | 83 | server.onRequest = proc (req: ServerRequest, res: ServerResponse) {.async.} = 84 | try: 85 | var data = "" 86 | 87 | let r1 = req.read() 88 | let r2 = req.read() 89 | let r3 = req.read() 90 | let r4 = req.read() 91 | 92 | let s4 = await r4 93 | let s3 = await r3 94 | let s1 = await r1 95 | let s2 = await r2 96 | 97 | check: 98 | # thttp_server.nim.cfg should include: 99 | # 100 | # --define:BufferSize=16 101 | s1.len == 16 102 | s2.len == 16 103 | s3.len == 16 104 | s4.len == 16 105 | 106 | s1 == "foobar01foobar02" 107 | s2 == "foobar03foobar04" 108 | s3 == "foobar05foobar06" 109 | s4 == "foobar07foobar08" 110 | 111 | data.add(s1) 112 | data.add(s2) 113 | data.add(s3) 114 | data.add(s4) 115 | 116 | await res.write(Http200, { 117 | "Content-Length": $data.len 118 | }) 119 | var i = 0 120 | while i < data.len: 121 | await res.write(data[i..min(i+7, data.len-1)]) 122 | i.inc(8) 123 | res.writeEnd() 124 | except ReadAbortedError: 125 | echo "Got ReadAbortedError: ", getCurrentExceptionMsg() 126 | except WriteAbortedError: 127 | echo "Got WriteAbortedError: ", getCurrentExceptionMsg() 128 | except Exception: 129 | echo "Got Exception: ", getCurrentExceptionMsg() 130 | 131 | await server.serve(Port(8001), "127.0.0.1") 132 | 133 | 134 | proc request() {.async.} = 135 | let client = await asyncnet.dial("127.0.0.1", Port(8001)) 136 | await client.send(""" 137 | GET /iocrate/netkit HTTP/1.1 138 | Host: iocrate.com 139 | Content-Length: 64 140 | 141 | foobar01foobar02foobar03foobar04foobar05foobar06foobar07foobar08""") 142 | let statusLine = await client.recvLine() 143 | let contentLenLine = await client.recvLine() 144 | let crlfLine = await client.recvLine() 145 | let body = await client.recv(64) 146 | check: 147 | statusLine == "HTTP/1.1 200 OK" 148 | contentLenLine == "content-length: 64" 149 | crlfLine == "\r\L" 150 | body == "foobar01foobar02foobar03foobar04foobar05foobar06foobar07foobar08" 151 | client.close() 152 | 153 | 154 | asyncCheck serve() 155 | waitFor sleepAsync(10) 156 | waitFor request() 157 | server.close() 158 | ``` 159 | -------------------------------------------------------------------------------- /README.zh.md: -------------------------------------------------------------------------------- 1 | Netkit 2 | ========== 3 | 4 | [![Build Status](https://travis-ci.org/iocrate/netkit.svg?branch=master)](https://travis-ci.org/iocrate/netkit) 5 | [![Build Status](https://dev.azure.com/iocrate/netkit/_apis/build/status/iocrate.netkit?branchName=master)](https://dev.azure.com/iocrate/netkit/_build/latest?definitionId=1&branchName=master) 6 | 7 | Netkit 希望作为一个多才多艺的网络开发基础套件,提供网络编程常用的工具。Netkit 应该是开箱即用并且稳定、安全的。Netkit 包含了大多数常用的网络编程工具,比如 TCP、UDP、TLS、HTTP、HTTPS、WebSocket 以及相关的一些实用工具。 8 | 9 | Netkit 不打算成为高阶生产力开发工具,而是作为一个可靠效率的基础网络设施。Netkit 由多个子模块组成,每个子模块提供了一些网络工具。 10 | 11 | **这个软件包正在积极开发中。** 12 | 13 | - [文档 (英文) - PS:临时的,需要更友好的主页](https://iocrate.github.io/netkit.html) 14 | - [文档 (中文) - PS:临时的,需要更友好的主页](https://iocrate.github.io/zh/netkit.html) 15 | 16 | 运行测试 17 | --------- 18 | 19 | 软件包提供了一个自动测试的脚本,查看 config.nims 了解详情。``$ nim test -d:modules=<测试文件名>`` 可以测试指定的文件,比如 ``$ nim test -d:modules=tbuffer`` 将测试 tests/tbuffer.nim 文件。``$ nimble test`` 将会测试所有 tests 目录内的测试文件。 20 | 21 | 制作文档 22 | --------- 23 | 24 | 软件包提供了一个自动制作文档的脚本,查看 config.nims 了解详情。``$ nim docs -d:lang=en`` 制作源代码的文档,即英文文档; ``$ nim docs -d:lang=zh`` 制作源代码的中文文档;``$ nim docs`` 制作源代码的中文文档和英文文档。 25 | 26 | 源代码的文档以英文书写。源代码的中文版文档放置在 ``${projectDir}/doc/zh/code`` 目录内。 27 | 28 | 开发列表 29 | --------- 30 | 31 | - [x] buffer 32 | - [x] circular 33 | - [x] vector 34 | - [ ] tcp 35 | - [ ] udp 36 | - [ ] http 37 | - [x] limits 38 | - [x] exception 39 | - [x] spec 40 | - [x] httpmethod 41 | - [x] version 42 | - [x] status 43 | - [x] headerfield 44 | - [x] header 45 | - [x] chunk 46 | - [x] metadata 47 | - [x] cookie 48 | - [x] parser 49 | - [x] connection 50 | - [x] reader 51 | - [x] writer 52 | - [x] server 53 | - [ ] client 54 | - [ ] clientpool 55 | - [ ] websocket 56 | - [ ] 编写文档主页,提供更加友好的文档管理 57 | - [ ] 增强 docpolisher 的功能,为文档添加 github 链接和返回上一页、返回主页的功能 58 | 59 | 贡献项目 60 | ----------- 61 | 62 | - 编写和制作更多的中文、英文文档 63 | - 添加更严格的单元测试 64 | - 添加基准测试或者压力测试 65 | - 添加新的代码以支持新的功能 66 | - 修复 bugs 67 | - 修复文档错误 68 | -------------------------------------------------------------------------------- /TODO.md: -------------------------------------------------------------------------------- 1 | 2 | 2020-04-09 3 | 4 | - [x] 添加中文文档目录 doc/source_cn,中文注释写在该目录内。该目录内的文件对应源代码目录 netkit/ 5 | 内的文件,翻译后的注释追加到源代码文件 6 | - [x] 修订 netkit/buffer/circular 模块,使得 CircularBuffer API 更加完善和稳定 7 | - [x] 修订 netkit/buffer/circular 模块,使得 MarkableCircularBuffer API 更加完善和稳定 8 | - [x] 移动各源码文件的中文注释到中文文档目录 doc/source_cn 9 | - [x] 添加异步锁模块 locks 10 | - [x] 使用异步锁重写 Request 11 | - [x] 优化 HTTP Server Request 的读操作 12 | - [x] 优化 HTTP Server Request 的写操作 13 | - [x] 考虑统一抽象编码解码相关的内容,比如 chunked 解码、编码;HTTP version、method HTTP header 14 | 编码解码;等等 15 | - [x] 考虑 socket recv/write 异常如何处理,是否关闭连接 16 | - [x] 整理 HTTP Server 源码文件 17 | - [x] 添加 chunk Trailer 支持 18 | - [x] 添加 chunk Extensions 支持 19 | - [x] 优化 HTTP chunked 解码和编码 20 | - [x] 添加 HTTP 服务器单元测试,包含多种规则和不规则请求的模拟测试 21 | - [ ] 添加 HTTP server benchmark tests 22 | - [ ] 优化 write(statusCode, header) 和 write(data),在 benchmark 中影响性能达到 6 倍 --> 23 | 考虑将 statusCode, header 和第一块数据合并到一个缓冲区发送 24 | - [ ] 修复 parseSingleRule, parseMultiRule 25 | - [ ] 4 个线程,1000 个连接,30 秒持续请求的吞吐量测试,以优化 asyncdispatcher 的 io 26 | - [ ] response.writeEnd 支持 Connection: keepalive 控制 27 | - [ ] 添加 HTTP server 多线程支持 28 | - [ ] 添加 HTTP 客户端和 HTTP 客户端连接池 29 | - [ ] 修订各源码文件留下的 TODOs 30 | - [ ] 考虑使用 {.noInit.} 优化已经写的 procs iterators vars lets 31 | -------------------------------------------------------------------------------- /azure-pipelines.yml: -------------------------------------------------------------------------------- 1 | strategy: 2 | maxParallel: 10 3 | matrix: 4 | Windows_stable_64bit: 5 | VM: 'windows-latest' 6 | UCPU: amd64 7 | CHANNEL: stable 8 | TEST_LANG: c 9 | Windows_devel_64bit: 10 | VM: 'windows-latest' 11 | UCPU: amd64 12 | CHANNEL: devel 13 | TEST_LANG: c 14 | Windows_cpp_devel_64bit: 15 | VM: 'windows-latest' 16 | UCPU: amd64 17 | CHANNEL: devel 18 | TEST_LANG: cpp 19 | Linux_stable_64bit: 20 | VM: 'ubuntu-latest' 21 | UCPU: amd64 22 | CHANNEL: stable 23 | TEST_LANG: c 24 | Linux_devel_64bit: 25 | VM: 'ubuntu-latest' 26 | UCPU: amd64 27 | CHANNEL: devel 28 | TEST_LANG: c 29 | Linux_cpp_devel_64bit: 30 | VM: 'ubuntu-latest' 31 | UCPU: amd64 32 | CHANNEL: devel 33 | TEST_LANG: cpp 34 | # MacOS_stable_64bit: 35 | # VM: 'macOS-latest' 36 | # UCPU: amd64 37 | # CHANNEL: stable 38 | # TEST_LANG: c 39 | # MacOS_devel_64bit: 40 | # VM: 'macOS-latest' 41 | # UCPU: amd64 42 | # CHANNEL: devel 43 | # TEST_LANG: c 44 | pool: 45 | vmImage: $(VM) 46 | 47 | steps: 48 | - task: CacheBeta@1 49 | displayName: 'cache Nim binaries' 50 | inputs: 51 | key: NimBinaries | $(Agent.OS) | $(CHANNEL) | $(UCPU) 52 | path: NimBinaries 53 | 54 | - task: CacheBeta@1 55 | displayName: 'cache MinGW-w64' 56 | inputs: 57 | key: mingwCache | 8_1_0 | $(UCPU) 58 | path: mingwCache 59 | condition: eq(variables['Agent.OS'], 'Windows_NT') 60 | 61 | - powershell: | 62 | Set-ItemProperty -Path 'HKLM:\SYSTEM\CurrentControlSet\Control\FileSystem' -Name 'LongPathsEnabled' -Value 1 63 | displayName: 'long path support' 64 | condition: eq(variables['Agent.OS'], 'Windows_NT') 65 | - bash: | 66 | echo "PATH=${PATH}" 67 | set -e 68 | echo "Installing MinGW-w64" 69 | if [[ $UCPU == "i686" ]]; then 70 | MINGW_FILE="i686-8.1.0-release-posix-dwarf-rt_v6-rev0.7z" 71 | MINGW_URL="https://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win32/Personal%20Builds/mingw-builds/8.1.0/threads-posix/dwarf/${MINGW_FILE}" 72 | MINGW_DIR="mingw32" 73 | else 74 | MINGW_FILE="x86_64-8.1.0-release-posix-seh-rt_v6-rev0.7z" 75 | MINGW_URL="https://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win64/Personal%20Builds/mingw-builds/8.1.0/threads-posix/seh/${MINGW_FILE}" 76 | MINGW_DIR="mingw64" 77 | fi 78 | mkdir -p mingwCache 79 | pushd mingwCache 80 | if [[ ! -e "$MINGW_FILE" ]]; then 81 | rm -f *.7z 82 | curl -OLsS "$MINGW_URL" 83 | fi 84 | 7z x -y -bd "$MINGW_FILE" >/dev/null 85 | mkdir -p /c/custom 86 | mv "$MINGW_DIR" /c/custom/ 87 | popd 88 | # Workaround https://developercommunity.visualstudio.com/content/problem/891929/windows-2019-cygheap-base-mismatch-detected-git-ba.html 89 | echo "##vso[task.prependpath]/usr/bin" 90 | echo "##vso[task.prependpath]/mingw64/bin" 91 | echo "##vso[task.setvariable variable=MINGW_DIR;]$MINGW_DIR" 92 | displayName: 'Install dependencies (Windows)' 93 | condition: eq(variables['Agent.OS'], 'Windows_NT') 94 | - powershell: | 95 | # export custom mingw PATH to other tasks 96 | echo "##vso[task.prependpath]c:\custom\$(MINGW_DIR)\bin" 97 | displayName: 'Mingw PATH (Windows)' 98 | condition: eq(variables['Agent.OS'], 'Windows_NT') 99 | - bash: | 100 | echo "PATH=${PATH}" 101 | export ncpu= 102 | case '$(Agent.OS)' in 103 | 'Linux') 104 | ncpu=$(nproc) 105 | ;; 106 | 'Darwin') 107 | ncpu=$(sysctl -n hw.ncpu) 108 | ;; 109 | 'Windows_NT') 110 | ncpu=$NUMBER_OF_PROCESSORS 111 | ;; 112 | esac 113 | [[ -z "$ncpu" || $ncpu -le 0 ]] && ncpu=1 114 | echo "Found ${ncpu} cores" 115 | echo "##vso[task.setvariable variable=ncpu;]$ncpu" 116 | displayName: 'Detecting number of cores' 117 | - bash: | 118 | echo "PATH=${PATH}" 119 | gcc -v 120 | export ucpu=${UCPU} 121 | if [ "${CHANNEL}" = stable ]; then 122 | BRANCH="v$(curl https://nim-lang.org/channels/stable)" 123 | else 124 | BRANCH="${CHANNEL}" 125 | fi 126 | mkdir -p NimBinaries 127 | pushd NimBinaries 128 | if [ ! -x "nim-${CHANNEL}/bin/nim" ]; then 129 | git clone -b "${BRANCH}" https://github.com/nim-lang/nim "nim-${CHANNEL}/" 130 | pushd "nim-${CHANNEL}" 131 | git clone --depth 1 https://github.com/nim-lang/csources csources/ 132 | pushd csources 133 | make -j $ncpu ucpu=${UCPU} CC=gcc 134 | popd 135 | rm -rf csources 136 | bin/nim c koch 137 | ./koch boot -d:release 138 | ./koch tools 139 | else 140 | pushd "nim-${CHANNEL}" 141 | git fetch origin "${BRANCH}" 142 | if [[ $(git merge FETCH_HEAD | grep -c "Already up to date.") -ne 1 ]]; then 143 | bin/nim c koch 144 | ./koch boot -d:release 145 | ./koch tools 146 | fi 147 | fi 148 | popd # exit nim-${CHANNEL} 149 | popd # exit NimBinaries 150 | displayName: 'Building Nim' 151 | - powershell: | 152 | echo "##vso[task.prependpath]$pwd\NimBinaries\nim-$(CHANNEL)\bin" 153 | displayName: 'Set env variable (Windows)' 154 | condition: eq(variables['Agent.OS'], 'Windows_NT') 155 | - bash: | 156 | echo "##vso[task.prependpath]$PWD/NimBinaries/nim-${CHANNEL}/bin" 157 | displayName: 'Set env variable (Posix)' 158 | condition: ne(variables['Agent.OS'], 'Windows_NT') 159 | - bash: | 160 | echo "PATH=${PATH}" 161 | nimble refresh 162 | nimble install -y 163 | displayName: 'Building the package dependencies' 164 | - bash: | 165 | echo "PATH=${PATH}" 166 | export ucpu=${UCPU} 167 | nimble test 168 | displayName: 'Testing the package' 169 | -------------------------------------------------------------------------------- /config.nims: -------------------------------------------------------------------------------- 1 | import strutils 2 | import strformat 3 | import os 4 | 5 | const ProjectDir = projectDir() 6 | const TestDir = ProjectDir / "tests" 7 | const BuildDir = ProjectDir / "build" 8 | const TestBuildDir = BuildDir / "tests" 9 | const DocBuildEnDir = BuildDir / "doc/en" 10 | const DocBuildZhDir = BuildDir / "doc/zh" 11 | const DocCodeZhDir = ProjectDir / "doc/zh/code" 12 | const DocPolisher = ProjectDir / "tools/docplus/polish.js" 13 | 14 | task test, "Run my tests": 15 | # run the following command: 16 | # 17 | # nim test -d:modules=a,b/c,d/e/f 18 | # 19 | # equivalent to: 20 | # 21 | # test tests/a.nim 22 | # test tests/b/c.nim 23 | # test tests/d/e/f.nim 24 | # 25 | const modules {.strdefine.} = "" 26 | let targets = modules.split(",") 27 | for t in targets: 28 | if t.len > 0: 29 | withDir ProjectDir: 30 | var args: seq[string] = @["nim", "c"] 31 | args.add("--run") 32 | args.add("--verbosity:0") 33 | args.add("--hints:off") 34 | args.add(fmt"--out:{TestBuildDir / t}") 35 | args.add(fmt"--path:{ProjectDir}") 36 | args.add(TestDir / t) 37 | rmDir(BuildDir / t.parentDir()) 38 | mkDir(TestBuildDir / t.parentDir()) 39 | exec(args.join(" ")) 40 | 41 | task docs, "Gen docs": 42 | # **netkit.nim** is the entry file of this project. This task starts with **netkit.nim** to generate 43 | # the documentation of this project, and the output directory is **${projectDir}/build/doc**. 44 | # 45 | # run the following command: 46 | # 47 | # nim docs [-d:lang=zh|en] [-d:module=netkit/buffer/constants] 48 | # 49 | # Note: nodejs is required, and ``$ npm install`` should be done in **${projectDir}/tools/docplus**. 50 | const lang {.strdefine.} = "" 51 | const module {.strdefine.} = "" 52 | var dirs: seq[tuple[build: string, source: string]] = @[] 53 | case lang 54 | of "": 55 | dirs.add((DocBuildEnDir, ProjectDir)) 56 | dirs.add((DocBuildZhDir, DocCodeZhDir)) 57 | of "en": 58 | dirs.add((DocBuildEnDir, ProjectDir)) 59 | of "zh": 60 | dirs.add((DocBuildZhDir, DocCodeZhDir)) 61 | else: 62 | discard 63 | for dir in dirs: 64 | withDir dir.source: 65 | rmDir(dir.build) 66 | mkDir(dir.build) 67 | var args: seq[string] = @["nim", "doc2"] 68 | args.add("--verbosity:0") 69 | args.add("""--docCmd:"--hints:off"""") 70 | args.add("--warnings:off") 71 | args.add("--hints:off") 72 | args.add(fmt"--path:.") 73 | if module.len == 0: 74 | args.add("--project") 75 | args.add("--index:on") 76 | args.add("--git.url:https://github.com/iocrate/netkit") 77 | args.add("--git.commit:master") 78 | args.add(fmt"--out:{dir.build}") 79 | args.add(dir.source / "netkit.nim") 80 | else: 81 | args.add(fmt"--out:{dir.build / module}.html") 82 | args.add(dir.source / module) 83 | exec(args.join(" ")) 84 | exec(fmt"DOC_PLUS_ROOT={dir.build} {DocPolisher}") 85 | -------------------------------------------------------------------------------- /doc/en/articles/client_mostly_single_buffer.md: -------------------------------------------------------------------------------- 1 | ### A new streaming mode of asynchronous non blocking IO for Nim 2 | 3 | **Mostly Single Buffer**, more accurately **Mostly Single Buffer one connection**, means that most of the time, a connection always uses one buffer. In other words, there will be a situation where one connection uses two or more buffers. 4 | 5 | The goal of Mostly Single Buffer is to provide absolute IO consistency, as much as possible to ensure IO performance and reduce memory footprint, while minimizing the impact on user programming efficiency. 6 | 7 | When programming network IO, we usually have to deal with various protocol encapsulated data. In the case of HTTP, these data are encapsulated in "Request" (Request) units. For the same client connection, the process of sending HTTP packets is similar to this: 8 | 9 | |---request 1---|---request 2---|---request 3---| 10 | 11 | The figure above shows that the client has sent 3 requests. HTTP requests sent by the same client are always continuous. From the server's perspective, the server creates a "buffer" for each client, reads the requested data into the buffer, processes it, and then responds. Generally, the server's attitude towards the buffer is to create a separate buffer for each client connection. In other words, if there are 2000 clients connected to the server at the same time, the server usually has 2000 buffers, corresponding to each client. For each client, the server uses a completely independent buffer processing, which ensures IO consistency, that is, the processing of each client will not cross together; at the same time, it also means more memory consumption, each buffer must occupy a piece of memory. 12 | 13 | Now, turn the perspective back to the client. As a client, it usually establishes a connection to the server, and then continue to initiate requests. Let's talk about MySQL connection, which will be very representative, especially when it comes to asynchronous non-blocking IO, its internal operation process will become very heavy load and unstable. Look at the following pseudo code (1): 14 | 15 | ```nim 16 | var mysql = newMysqlClient() 17 | 18 | await mysql.query("select * from users") # first request 19 | await mysql.query("select * from blogs") # second request 20 | ``` 21 | 22 | This code initiates two query requests. It should be noted that `` await`` waits for the completion of the first request before initiating the second request. Please see the pseudocode (2) below: 23 | 24 | ```nim 25 | var mysql = newMysqlClient() 26 | 27 | var req1 = mysql.query("select * from users") # first request 28 | var req2 = mysql.query("select * from blogs") # second request 29 | 30 | await req1 31 | await req2 32 | ``` 33 | 34 | Now, we still initiates two query requests. The difference is that the second request starts before the first request is processed. The same is to wait for the first response to complete first, then wait for the second response to complete. Again, let’s look at the following pseudocode (3): 35 | 36 | ```nim 37 | var mysql = newMysqlClient() 38 | 39 | var req1 = mysql.query("select * from users") # first request 40 | var req2 = mysql.query("select * from blogs") # second request 41 | 42 | await req2 43 | await req1 44 | ``` 45 | 46 | This code first waits for the second response to complete, then waits for the first response to complete. 47 | 48 | The three pseudocodes above perform the same operation, but the impact involved is far different. As mentioned above, the server will create a buffer for each client connection, so what about the client? The usual method is that the client creates a buffer for each connection. It is unnecessary to create multiple buffers, because each connection can only handle one problem at a time, and additional buffers are usually wasted. 49 | 50 | However, the above three pseudo-codes will involve many problems. We now assume that there is only one buffer in the client connection of MySQL, and two query requests are issued, then the returned result is this: 51 | 52 | |---response 1---|---response 2---| 53 | 54 | The result is that they are sequentially arranged in the same buffer. 55 | 56 | For the pseudocode (1), this will not cause a problem, because it always waits for the first response to complete before processing the second response; the pseudocode (2) also does not constitute a problem, because its processing of the response is similar to Pseudo code (1). However, for the pseudocode (3), a big problem arises because it waits for the second response to complete before processing the first response. This means that response 2 will not be processed and response 1 will not be processed. Looking at the picture above, because response 1 and response 2 are stored in the same buffer in sequence, this causes response 2 to get the operation only after response 1 is fetched from the buffer. "Deadlock" has occurred! 57 | 58 | This is a bit like the "deadlock" often mentioned in multi-threaded programming. Lock A is locked before lock B, but the program handles lock B first, resulting in a "deadlock." Asynchronous non-blocking IO does not have the concept of locks, but there is also a "deadlock" problem here. This is because of the problem of `` await``. ` await` splits one line of program operation into two lines, turning the original one-time processing into two processing, "destroying" the atomic operation. However, we can’t force users to always write `` await`` as a line, and when dealing with large data streams, we must also use `` await`` multiple times to process "small blocks" of data. Take a look at this pseudo code: 59 | 60 | ```nim 61 | var stream = mysql.queryLargeResult(...) 62 | 63 | while stream.next(): 64 | await stream.readRow() 65 | ``` 66 | 67 | However, the problem always has to be solved. Recalling the server's attitude towards buffers, we may want to use the same idea to create a separate buffer for each request. Well, the solution for client IO is that for each client connection, instead of creating a separate buffer, each request creates a separate buffer. Look at the following pseudo code: 68 | 69 | ```nim 70 | var mysql = newMysqlClient() 71 | 72 | var req1 = mysql.query("select * from users") # first request 73 | var req2 = mysql.query("select * from blogs") # second request 74 | var req3 = mysql.query("select * from blogs") # third request 75 | var req4 = mysql.query("select * from blogs") # fourth reequest 76 | 77 | await req2 78 | await req1 79 | await req4 80 | await req3 81 | ``` 82 | 83 | The above code creates 4 independent buffers, but they are all located on the same client connection. 84 | 85 | However, this also brings a problem, that is, the memory is heavily occupied and wasted, because at each moment, the client has only one buffer is useful. Especially when you build a Web Server and then perform some MySQL queries on HTTP requests, your server memory starts to soar. This may not be what you want. 86 | 87 | Mostly Single Buffer expects to solve these problems. When the client establishes network IO, for each connection, the solution creates only one buffer as much as possible, and uses a tag to mark whether the buffer is in the "busy" state or in the "free" state. When in the "busy" state, if a new request operation is received, a new buffer is automatically created. When a "busy" buffer becomes "free" again, it is automatically recovered. 88 | 89 | Dump: If the user applies for multiple requests for the same connection, when processing the response, the `MSB`(Mostly Single Buffer) will view the read operation provided by the user. For example, to initiate a request [q1, q2, q3], after the `MSB` receives the response data, check the request queue, first query the read operation of q1, and then use the read operation to process the data. If all the read operations of q1 are completed, the response data of q1 If it is still not completely "read", then the `MSB` will "dump" and pour the remaining data of q1 into a new buffer for temporary storage, so that the subsequent q1 related read operations can be processed. Then process q2, q3, ... in turn, which will naturally form a situation where the remaining data and references of q1, q2, q3 will be temporarily stored in memory until the memory overflows. `` .clear (q1) `` allows immediate clearing of q1 related response data. 90 | 91 | With **Mostly Single Buffer**, if you are an experienced programmer, you can always arrange your program reasonably, that is, `` await '' every request at a reasonable time, then you can always minimize the memory usage of the buffer. For example only create a buffer. 92 | 93 | ```nim 94 | var req1 = mysql.query("select * from users") # first request 95 | await req1 96 | 97 | var req2 = mysql.query("select * from blogs") # second request 98 | await req2 99 | 100 | var req3 = mysql.query("select * from comments") # third request 101 | await req3 102 | ``` 103 | 104 | And if you are not skilled enough in IO programming, or the programming program is more casual, Mostly Single Buffer can always ensure that your program runs correctly, but it will consume some memory. For example (create three buffers): 105 | 106 | ```nim 107 | var req1 = mysql.query("select * from users") # first request 108 | var req2 = mysql.query("select * from blogs") # second request 109 | var req3 = mysql.query("select * from comments") # third request 110 | 111 | await req3 112 | await req1 113 | await req2 114 | ``` 115 | 116 | This IO buffer solution will be applied to [netkit](https://github.com/iocrate/netkit) Nim Network toolkit that is actively being developed, as well as some other network packages, such as MySQL connector. By the way, [asyncmysql](https://github.com/tulayang/asyncmysql) uses a callback function to deal with IO consistency issues, but it makes API calls more difficult to use, and future connectors will be changed. 117 | 118 | Enjoy yourself! :) 119 | -------------------------------------------------------------------------------- /doc/zh/articles/client_mostly_single_buffer.md: -------------------------------------------------------------------------------- 1 | 客户端异步非阻塞 IO 新的流模式 Mostly Single Buffer 2 | =============================================== 3 | 4 | > PS: 最新更新,最多增加一个缓冲区,并支持自动伸缩,直到某个极限然后抛出异常并关闭连接。 5 | 6 | > PS: 服务器的写操作将支持 MSB。 7 | 8 | > PS: 这篇文章描述的是比较底层的内容,主要设计传输层,而不是应用层。我不打算对传输层的知识做过多讨论,然而,如果你只是对应用层感兴趣,你仍然可以读读,并在文章最后了解这个模式对应用层的收益。 9 | 10 | Mostly Single Buffer 更精确点应该是 Mostly Single Buffer one connection,意思是:大多数时候,一个连接总是使用一个缓冲区。也就是说,会存在这样的情况,即一个连接使用两个甚至更多个缓冲区。 11 | 12 | Mostly Single Buffer 的目标是提供绝对的 IO 一致性,并尽可能保证 IO 性能和减少内存占用,而最小化对用户编程效率的影响。 13 | 14 | 在对网络 IO 进行编程时,我们通常要处理各种各样的协议封装数据。拿 HTTP 来说,这些数据是以 “请求” (Request) 为单元进行封装的。对于同一个客户端连接,其发送 HTTP 数据包的过程类似这样: 15 | 16 | |---request 1---|---request 2---|---request 3---| 17 | 18 | 上面图中表示,客户端发送了 3 个请求。同一个客户端发送的 HTTP 请求总是连续的。站在服务器的视角,服务器会为每一个客户端创建一个 “缓冲区”,将请求数据读入缓冲区,进行处理,然后作出响应。通常,服务器对于缓冲区的态度是,对每个客户端连接创建一个独立的缓冲区。也就是说,如果同时有 2000 个客户端连接到服务器,服务器通常会有 2000 个缓冲区,分别对应每一个客户端。对于每一个客户端,服务器都使用一个完全独立的缓冲区处理,这就保证了 IO 一致性,即各个客户端的处理不会交叉在一起;同时,也意味着更多的内存占用,每一个缓冲区都要占用一块内存。 19 | 20 | 现在,把视角转回客户端。作为客户端,通常向服务器建立一个连接,然后不断发起请求。让我们谈谈 MySQL 连接,这会非常有代表性,特别是涉及到异步非阻塞 IO 时,其内部的操作过程会变得非常负载和不稳定。看看下面这段伪代码 (1): 21 | 22 | ```nim 23 | var mysql = newMysqlClient() 24 | 25 | await mysql.query("select * from users") # 第一个请求 26 | await mysql.query("select * from blogs") # 第二个请求 27 | ``` 28 | 29 | 这段代码发起了两次查询请求,需要注意的是 ``await`` 适时地等待第一个请求完成,然后才发起第二个请求。请看下面伪代码(2): 30 | 31 | ```nim 32 | var mysql = newMysqlClient() 33 | 34 | var req1 = mysql.query("select * from users") # 第一个请求 35 | var req2 = mysql.query("select * from blogs") # 第二个请求 36 | 37 | await req1 38 | await req2 39 | ``` 40 | 41 | 仍是发起两次查询请求,不同之处在于,第二个请求还没等第一个请求处理完成,就开始发起。相同的是,先等待第一个响应完成,然后等待第二个响应完成。再请看下面伪代码(3): 42 | 43 | ```nim 44 | var mysql = newMysqlClient() 45 | 46 | var req1 = mysql.query("select * from users") # 第一个请求 47 | var req2 = mysql.query("select * from blogs") # 第二个请求 48 | 49 | await req2 50 | await req1 51 | ``` 52 | 53 | 这段代码先等待第二个响应完成,然后等待第一个响应完成。 54 | 55 | 以上三段伪代码执行了相同的操作,但是其涉及的影响却远远不同。上面说过,服务器会为每一个客户端连接创建一个缓冲区,那么客户端呢?通常的方法是,客户端为每一个连接创建一个缓冲区。创建多个缓冲区是没有必要的,因为每个连接一次只能处理一个问题,额外的缓冲区通常都是浪费。 56 | 57 | 然而,上面三段伪代码会牵扯出很多问题。我们现在假设 mysql 的客户端连接只有一个缓冲区,发出两个查询请求,那么返回来的结果则是这样的: 58 | 59 | |---response 1---|---response 2---| 60 | ^ 61 | 62 | 结果是顺序排列在同一个缓冲区当中。 63 | 64 | 对于伪代码(1),这不会产生问题,因为总是先等待第一个响应完成,再处理第二个响应;伪代码(2),也构不成问题,因为其对响应的处理,类似伪代码(1)。然而,对于伪代码(3),却产生了大问题,因为其先等待第二个响应完成,然后再处理第一个响应。这就表示,响应 2 不处理完成,响应 1 就不会处理。看看上图,因为响应 1 和响应 2 被顺序存储到同一个缓冲区,这就导致只有响应 1 从缓冲区提取后,响应 2 才会获得操作。“死锁” 产生了! 65 | 66 | 这有点像多线程编程中常常提到的 “死锁”,锁 A 先于锁 B 锁住,然而程序却先处理锁 B,导致 “死锁”。异步非阻塞 IO 并没有锁的概念,然而在此处却也产生了 “死锁” 的问题。这是因为 ``await`` 的问题。``await`` 将一行程序操作拆成了两行,将原来本可以一次处理变成了两次处理,“破坏了” 原子操作。然而,我们不能强制要求用户总是将 ``await`` 写作一行,而且,当处理大数据流的时候,我们还必须使用多次 ``await`` 来处理 “小块” 数据。看看这段伪代码: 67 | 68 | ```nim 69 | var stream = mysql.queryLargeResult(...) 70 | 71 | while stream.next(): 72 | await stream.readRow() 73 | ``` 74 | 75 | 然而,问题总要解决。回想起服务器对于缓冲区的态度,我们可以想要使用同样的思路,为每一次请求创建一块单独的缓冲区。好了,客户端 IO 的解决方法是,对于每一个客户端连接,不再是创建一个单独的缓冲区,而是每一个请求创建一个单独的缓冲区。看看下面的伪代码: 76 | 77 | ```nim 78 | var mysql = newMysqlClient() 79 | 80 | var req1 = mysql.query("select * from users") # 第一个请求 81 | var req2 = mysql.query("select * from blogs") # 第二个请求 82 | var req3 = mysql.query("select * from blogs") # 第三个请求 83 | var req4 = mysql.query("select * from blogs") # 第四个请求 84 | 85 | await req2 86 | await req1 87 | await req4 88 | await req3 89 | ``` 90 | 91 | 上面的代码创建 4 块独立的缓冲区,但是它们都是位于同一个客户端连接。 92 | 93 | 不过,这也同时带来问题,即内存被大量的占用,而且被浪费,因为在每一时刻,客户端只有一块缓冲区是有用的。特别是当你建立一个 Web Server,然后对 HTTP 请求进行一些 Mysql 查询时,你的服务器内存开始飙升。这可能不是你所想要的。 94 | 95 | Mostly Single Buffer 期望解决这些问题。客户端建立网络 IO 时,对每一个连接,该方案尽可能只创建一块缓冲区,并使用一个标记,标记该缓冲区是处于 “忙” 状态,还是处于 “空闲” 状态。当处于 “忙”状态时,如果收到新的请求操作,则自动创建一块新的缓冲区。当一块 “忙” 缓冲区重新变为 “空闲” 的时候,自动将其回收。 96 | 97 | 倾倒:如果用户对同一连接申请了多个请求,当处理响应的时候,MSB 会查看用户提供的读操作。比如发起请求 [q1, q2, q3],MSB 收到响应数据后,查看请求队列,先查询 q1 的读操作,然后使用读操作处理数据,如果 q1 的所有读操作都工作完,q1 的响应数据仍然未完全 “读” 完,那么 MSB 就进行 “倾倒”,将 q1 剩余的数据倒入一块新的缓冲区暂存起来,以便于后续 q1 的相关读操作进行处理。然后依次处理 q2,q3,... 这自然会形成一种情况,即 q1、q2、q3 剩余的数据及其引用会一直暂存在内存,直到内存溢出。``.clear(q1)`` 允许立刻清除 q1 相关响应数据。 98 | 99 | 使用 Mostly Single Buffer,如果你是经验丰富的程序员,总能合理安排你的程序,即在合理时刻 ``await`` 每一个请求,那么你总能最小化缓冲区的内存占用。比如 (只创建一块缓冲区) : 100 | 101 | ```nim 102 | var req1 = mysql.query("select * from users") # 第一个请求 103 | await req1 104 | 105 | var req2 = mysql.query("select * from blogs") # 第二个请求 106 | await req2 107 | 108 | var req3 = mysql.query("select * from comments") # 第三个请求 109 | await req3 110 | ``` 111 | 112 | 而如果你对 IO 编程掌握还不够熟练,或者编程的程序比较随意,Mostly Single Buffer 总能保证你的程序正确运行,但是会消耗一些内存。比如 (创建三块缓冲区): 113 | 114 | ```nim 115 | var req1 = mysql.query("select * from users") # 第一个请求 116 | var req2 = mysql.query("select * from blogs") # 第二个请求 117 | var req3 = mysql.query("select * from comments") # 第三个请求 118 | 119 | await req3 120 | await req1 121 | await req2 122 | ``` 123 | 124 | 这个 IO 缓冲区方案将会应用在 [netkit](https://github.com/iocrate/netkit) --- 一个正在积极开发的 Nim Network 工具包,以及其他的一些网络包中,比如 mysql connector。顺便一提的是,[asyncmysql](https://github.com/tulayang/asyncmysql) 使用了回调函数来处理 IO 一致性问题,但是却导致 API 调用比较难以使用,未来的连接器将会获得改善。 125 | 126 | Enjoy yourself! :) 127 | 128 | 设计策略 129 | -------- 130 | 131 | 假设共有请求 [1, 2, 3, 4, 5, 6],正在处理请求 1,则可能的缓冲策略是这样的: 132 | 133 | ``` 134 | [1] jobing ... 135 | [3] |...|...|...| 136 | [2] |...| 137 | [6] |...|...| 138 | 139 | [5] |...|...|...| 140 | ``` 141 | 142 | MostlgSingleBuffer = object [ 143 | { 144 | id: 2 145 | buffer: [,] 146 | }, 147 | { 148 | id: 3 149 | buffer: [,,] 150 | }, 151 | 152 | 153 | { 154 | id: 5 155 | buffer: [,,] 156 | }, 157 | { 158 | id: 6 159 | buffer: [,] 160 | } 161 | ] 162 | 163 | MostlgSingleBufferItem = object 164 | id, 165 | buffer: [] 166 | 167 | MSB.pop() 得到下一个 "序" 正确的处理目标,即依次得到 2,3,4,5,6 168 | MSB.pop().getBuffer() 得到其内部缓冲区 169 | 170 | MSB.push(req3, buffer3_1) 放入一个待处理目标 171 | MSB.push(req3, buffer3_2) 放入一个待处理目标 172 | MSB.push(req3, buffer3_3) 放入一个待处理目标 173 | 174 | MSB.push(req2, buffer2_1) 放入一个待处理目标 175 | 176 | 更新:将会添加 “异步排队” 的概念,用户层不会感觉到 “排队”,但是软件库底层通过 “排队” 的机制,保证读写顺序的正确。 -------------------------------------------------------------------------------- /doc/zh/articles/http_design_strategy.md: -------------------------------------------------------------------------------- 1 | HTTP 设计策略 2 | ===================== 3 | 4 | 关于请求解析 5 | --------------------- 6 | 7 | 1. 定义解析 HTTP Method 策略: 以 SP 结尾的字符序列(区分大小写),只支持 8 个常规请求方法;start line 长度不超过边界 8 | 9 | 2. 定义解析 HTTP URL 策略: 以 SP 结尾的字符序列,进行 URI 字符转义 (%符号);start line 长度不超过边界 10 | 11 | 3. 定义解析 HTTP Version 策略: 以 LF 或 CRLF 结尾的字符序列,只允许 HTTP/1.1 和 HTTP/1.0 两个字符串 (区分大小写),否则认为是请求错误;start line 长度不超过边界 12 | 13 | 4. 定义解析 Field Name 策略: 以 : 结尾的字符序列 (不区分大小写);前后不能有空白 (SP, HTAB);长度 >0;field line 长度不超过边界 14 | 15 | 5. 定义解析 Field Value 策略: 解析以 , 分割,解析以多个行分割,统一放置到 seq[string],每个 item 一定只表示一个值,即 "SET-COOKIE": @["...", "…"];前后允许有零到多个可选空白 (SP, HTAB);每个值的长度 >0;field line 长度不超过边界;以 LF 或 CRLF 结尾 16 | 17 | 6. 定义解析 HTTP Body 策略: 以 CRLF 作为边界;对 Field values 关键字段进行规范化 18 | 19 | 关于 incoming 请求读 20 | --------------------- 21 | 22 | 1. 定义 request header 自动解析,body 需要 read 手动读,直到 readEnded,以支持流式操作。当 contentLen=0 或者 chunked tail 时,readEnded <可以确定,read 总能保证正确的顺序,不会受到 API 调用顺序影响,因为 http1 的请求是串行的,软件库能控制 request 的读前进和安全> 23 | 24 | 2. 支持 chunked 读自动解析3. 自动处理特定 fields 25 | 26 | 关于 outgoing 响应写 27 | --------------------- 28 | 29 | 1. 定义写操作由 write writeEnd 两个 API 组成,write 写入 http 数据,writeEnd 发送写的结尾信号。以使写操作通用灵活,支持流操作;不猜测用户写的数据,包括写的 header,相反,由用户在 write 时明确指出写 flag,比如 chunked 30 | 31 | 2. 定义一条响应由 32 | 33 | 1 write([header]) 34 | * write([body]) 35 | 1 writeEnd() 36 | 37 | 组成。当第一次 writeEnd时,writeEnded,request 不再允许任何进一步写 <无法保证 write 顺序的正确性,为此,需要应用 MSB 策略,为错序的 write 提供缓存功能> 38 | 39 | 3. 定义 MSB write 缓存功能,确保错序的同一连接多个请求的响应 write 按照正确的顺序写到网络 40 | 41 | 4. 提供 chunked 工具函数 42 | 43 | 更新:下一条请求的触发条件能够精确控制 request handle 交给用户的时间,因此不再需要为 request write 提供 MSB 策略。request write 总能保证写的顺序。将会为其他客户端 API 提供 MSB 策略。 44 | 45 | 关于下一条请求的触发条件 46 | --------------------- 47 | 48 | 1. 定义 readedEnd 和 writeEnded,自动开始下一条 request 49 | 50 | 2. 查看 version 1.1 和 1.0 的支持情况,并根据请求 Connection 和写操作 flag 确定是关闭连接还是开始下一个请求 51 | 52 | 关于错误处理 53 | --------------------- 54 | 55 | 1. 解析的时候,会遇到各种错误问题 (比如,溢出边界,格式错误,其他),需要提供一套一致的 HttpError 表示 http 的各种错误问题 56 | 57 | 关于优化 58 | --------------------- 59 | 60 | 1. 优化 write / read call (async),尽可能减少 macro async 的层级,采用 future 封装,并且尽可能直接调用底层 socket api,减少消耗 -------------------------------------------------------------------------------- /doc/zh/code/backup/client_msb_lab.nim: -------------------------------------------------------------------------------- 1 | # 这个文件只是一个参考策略,请忽略 !!!!!!!!!!!!!!!!!!!!!!!!!!! 2 | 3 | import asyncdispatch 4 | 5 | # proc do1Async() {.async.} = 6 | # await sleepAsync(1000) 7 | # echo 1000 8 | 9 | # proc do2Async() {.async.} = 10 | # await sleepAsync(2000) 11 | # echo 2000 12 | 13 | # proc main = 14 | # var f1 = do1Async() 15 | # var f2 = do2Async() 16 | 17 | # f2.callback = proc () = 18 | # echo "f2 end" 19 | # sleepAsync(2000).callback = proc() = 20 | # f1.callback = proc () = 21 | # echo "f1 end" 22 | 23 | var buffer = @[1, 2, 3, 4, 5, 6, 7, 8] 24 | 25 | type 26 | Reader = ref object 27 | buffer: seq[int] 28 | queryLen: int 29 | 30 | QueryResult = ref object 31 | data: seq[int] 32 | futures: seq[Future[int]] 33 | hasData: bool 34 | 35 | var reader: Reader = new(Reader) 36 | reader.buffer = @[] 37 | reader.queryLen = 0 38 | 39 | proc notify(r: QueryResult) = 40 | if r.hasData: # 判断是否进行数据处理,有必要 41 | for fut in r.futures: 42 | fut.complete(1) 43 | r.futures = @[] 44 | 45 | proc query(r: Reader): QueryResult = 46 | var a = new(QueryResult) 47 | a.futures = @[] 48 | a.hasData = false 49 | result = a 50 | var f = sleepAsync(1000) 51 | f.callback = proc () = 52 | r.buffer = @[1,2,3,4,5,6,7,8] 53 | r.queryLen = 1 54 | a.data = r.buffer 55 | a.hasData = true # 使用一个字段判断数据是否填充,以避免 notify 时重复操作 56 | a.notify() # 通知,开始处理数据 57 | 58 | proc read(r: QueryResult): Future[int] = 59 | var future = newFuture[int]() 60 | result = future 61 | r.futures.add(future) 62 | r.notify() # 每次都要 notify 一下,以防止 await 顺序混乱导致没有回调被处理 63 | 64 | proc main() {.async.} = 65 | var stream1 = reader.query() 66 | var stream2 = reader.query() 67 | 68 | var f2 = stream2.read() 69 | var f3 = stream2.read() 70 | 71 | var r2 = await f2 72 | var r3 = await f3 73 | 74 | var r1 = await stream1.read() 75 | 76 | echo "r1:", r1 77 | echo "r2:", r2 78 | echo "r3:", r3 79 | 80 | # proc read(r: QueryResult): Future[int] {.async.} = 81 | 82 | 83 | # proc query(buffer: seq[int]): Stream = 84 | # result = new(Stream) 85 | # result.buffer = buffer 86 | # result.startPos = startPos 87 | 88 | # proc read(s: Stream): Future[int] = 89 | # var future = newFuture[int]() 90 | 91 | # if buffer.len 92 | 93 | # sleepAsync(2000).callback = proc() = 94 | # result = s.buffer[s.startPos] 95 | # s.startPos.inc() 96 | 97 | # proc main() {.async.} = 98 | # var stream1 = buffer.query() 99 | # var stream2 = buffer.query() 100 | 101 | # var r1 = await stream2.read() # 5 102 | # var r2 = await stream2.read() # 6 103 | 104 | # echo r1 105 | # echo r2 106 | 107 | # var r3 = await stream1.read() # 1 108 | # var r4 = await stream1.read() # 2 109 | 110 | # echo r3 111 | # echo r4 112 | 113 | 114 | asyncCheck main() 115 | runForever() 116 | -------------------------------------------------------------------------------- /doc/zh/code/netkit.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | import netkit/buffer 8 | import netkit/http 9 | import netkit/locks 10 | import netkit/misc 11 | 12 | export buffer 13 | export http 14 | export locks 15 | export misc -------------------------------------------------------------------------------- /doc/zh/code/netkit/buffer.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | import netkit/buffer/circular 8 | import netkit/buffer/constants 9 | import netkit/buffer/vector 10 | 11 | export circular 12 | export constants 13 | export vector 14 | -------------------------------------------------------------------------------- /doc/zh/code/netkit/buffer/circular.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | ## 这个模块实现了循环缓冲区 ``CircularBuffer`` 和支持增量标记的循环缓冲区 ``MarkableCircularBuffer`` 。 8 | ## 9 | ## Overview 10 | ## ======================== 11 | ## 12 | ## ``CircularBuffer`` 内部使用一个固定长度的数组作为存储容器,数组的两端如同连接在一起。当一个数据元素被消费后, 13 | ## 其余数据元素不需要移动其存储位置。这使其非常适合缓存数据流。 14 | ## 15 | ## .. image::https://upload.wikimedia.org/wikipedia/commons/thumb/b/b7/Circular_buffer.svg/400px-Circular_buffer.svg.png 16 | ## :align: center 17 | ## 18 | ## 某一时刻,其存储状态类似: 19 | ## 20 | ## .. code-block::nim 21 | ## 22 | ## [ 空 |---data---| 空 ] 23 | ## 24 | ## - 参看 `Circular buffer Wiki `_ 25 | ## - 参看 `Circular buffer Wiki 中文 `_ 26 | ## 27 | ## ``MarkableCircularBuffer`` 继承自 ``CircularBuffer`` ,增加了增量标记的功能,允许对存储的数据进行标记。所谓增量, 28 | ## 是指下一次操作总是从上一次结束的位置继续。增量标记简化了诸如 “扫描” 、“分析” 等等一些繁琐的工作。 29 | ## 30 | ## .. container:: r-fragment 31 | ## 32 | ## 存储方法 33 | ## ----------- 34 | ## 35 | ## 循环缓冲区提供两种存储方法: 手动存储和自动存储。 36 | ## 37 | ## 手动存储是低阶操作,能获得更优的性能但是操作更复杂。您需要直接操作 ``next`` ``pack`` 涉及到的可存储空间的指针, 38 | ## 利用 ``copyMem`` 、 ``read(file, pointer, size)`` 或者其他类似的方式直接存储数据。这是一种不安全的方式, 39 | ## 但是由于减少了额外的复制,性能更高。 40 | ## 41 | ## 自动存储是高阶操作,通过 ``add`` 存储数据。这是较为安全并且简单的存储方式,但是有额外的复制开销。 42 | ## 43 | ## .. container:: r-fragment 44 | ## 45 | ## 标记 46 | ## ----------- 47 | ## 48 | ## ``MarkableCircularBuffer`` 支持标记数据。 ``marks()`` 以字符 (字节) 的形式逐个迭代已存储的数据,同时标记该字符 (字节)。 49 | ## 这在进行数据分析时特别有用,比如查找 CRLF。当找到期望的标记后,您可以使用 ``popMarks()`` 把已标记的数据提取出来。 50 | ## 51 | ## .. container:: r-fragment 52 | ## 53 | ## 增量 54 | ## ----------- 55 | ## 56 | ## 在大部分 IO 场景里,数据并非一次性读取或者写入完成的,通常要经过多次循环操作。 ``MarkableCircularBuffer`` 57 | ## 特意为这种反复操作的环境提供增量支持,增量存储数据并且增量标记数据,这样您不必担心在循环操作的过程中丢失数据状态。 58 | ## 59 | ## .. container:: r-fragment 60 | ## 61 | ## 线程安全 62 | ## ----------- 63 | ## 64 | ## ``CircularBuffer`` 和 ``MarkableCircularBuffer`` 都不保证线程安全。当您在多线程环境使用时,您应该负责控制线程竞争。 65 | ## 66 | ## Usage 67 | ## ======================== 68 | ## 69 | ## .. container:: r-fragment 70 | ## 71 | ## 手动存储 72 | ## ----------- 73 | ## 74 | ## 手动存储的过程分为三步: 75 | ## 76 | ## 1. 获取可存储空间的地址和长度: 77 | ## 78 | ## .. code-block::nim 79 | ## 80 | ## var buffer = initMarkableCircularBuffer() 81 | ## var (regionPtr, regionLen) = buffer.next() 82 | ## 83 | ## 2. 直接操作可存储空间以存储数据: 84 | ## 85 | ## .. code-block::nim 86 | ## 87 | ## var readLen = socket.read(regionPtr, regionLen) 88 | ## 89 | ## 3. 告诉缓冲区,存储数据的长度: 90 | ## 91 | ## .. code-block::nim 92 | ## 93 | ## var packLen = buffer.pack(n) 94 | ## 95 | ## .. container:: r-fragment 96 | ## 97 | ## 自动存储 98 | ## ----------- 99 | ## 100 | ## 存入一个字符: 101 | ## 102 | ## .. code-block::nim 103 | ## 104 | ## var n = buffer.add('A') 105 | ## 106 | ## 存入一个字符串: 107 | ## 108 | ## .. code-block::nim 109 | ## 110 | ## var str = "ABC" 111 | ## var n = buffer.add(str.cstring, 3) 112 | ## 113 | ## .. container:: r-fragment 114 | ## 115 | ## 标记 116 | ## ----------- 117 | ## 118 | ## 查找以换行符为结尾的字符串: 119 | ## 120 | ## .. code-block::nim 121 | ## 122 | ## var buffer = initMarkableCircularBuffer() 123 | ## var str = "foo\Lbar\L" 124 | ## assert buffer.add(str.cstring, str.len) == str.len 125 | ## 126 | ## var lineString = "" 127 | ## 128 | ## for c in buffer.marks(): 129 | ## if c == '\L': 130 | ## lineString = buffer.popMarks(1) 131 | ## break 132 | ## assert lineString == "foo" 133 | ## 134 | ## for c in buffer.marks(): 135 | ## if c == '\L': 136 | ## lineString = buffer.popMarks(1) 137 | ## break 138 | ## assert lineString == "bar" 139 | ## 140 | ## ``markUntil`` 让这个过程更加简单: 141 | ## 142 | ## .. code-block::nim 143 | ## 144 | ## var buffer = initMarkableCircularBuffer() 145 | ## var str = "foo\Lbar\L" 146 | ## assert buffer.add(str.cstring, str.len) == str.len 147 | ## 148 | ## var lineString = "" 149 | ## 150 | ## assert buffer.markUntil('\L') 151 | ## assert lineString == "foo" 152 | ## 153 | ## assert buffer.markUntil('\L') 154 | ## assert lineString == "bar" 155 | ## 156 | ## .. container:: r-fragment 157 | ## 158 | ## 读取数据 159 | ## ----------- 160 | ## 161 | ## 将存储的数据复制到一块指定的内存,并删除数据: 162 | ## 163 | ## .. code-block::nim 164 | ## 165 | ## var buffer = initMarkableCircularBuffer() 166 | ## var str = "foo\Lbar\L" 167 | ## assert buffer.add(str.cstring, str.len) == str.len 168 | ## assert buffer.len == str.len 169 | ## 170 | ## var dest = newString(64) 171 | ## var getLen = buffer.get(dest, destLen) 172 | ## var delLen = buffer.del(getLen) 173 | ## dest.setLen(getLen) 174 | ## 175 | ## assert dest == "foo\Lbar\L" 176 | ## assert buffer.len == 0 177 | ## 178 | ## 将存储的数据复制到一个字符串,并删除数据: 179 | ## 180 | ## .. code-block::nim 181 | ## 182 | ## var buffer = initMarkableCircularBuffer() 183 | ## var str = "foo\Lbar\L" 184 | ## assert buffer.add(str.cstring, str.len) == str.len 185 | ## assert buffer.len == str.len 186 | ## 187 | ## var foo = buffer.get(3) 188 | ## var delLen = buffer.del(3) 189 | ## assert foo == "foo" 190 | 191 | import netkit/misc 192 | import netkit/buffer/constants 193 | 194 | type 195 | CircularBuffer* = object of RootObj 196 | ## 一个循环缓冲区。注意,其存储空间的最大长度是 ``BufferSize`` 。 197 | data: array[0..BufferSize, byte] 198 | startPos: Natural 199 | endPos: Natural 200 | endMirrorPos: Natural 201 | 202 | MarkableCircularBuffer* = object of CircularBuffer 203 | ## 一个可标记的循环缓冲区。 204 | markedPos: Natural 205 | 206 | proc initCircularBuffer*(): CircularBuffer = discard 207 | ## 初始化一个 ``CircularBuffer`` 。 208 | 209 | proc initMarkableCircularBuffer*(): MarkableCircularBuffer = discard 210 | ## 初始化一个 ``MarkableCircularBuffer`` 。 211 | 212 | proc capacity*(b: CircularBuffer): Natural = discard 213 | ## 返回缓冲区的容量。 214 | 215 | proc len*(b: CircularBuffer): Natural = discard 216 | ## 返回缓冲区存储的数据长度。 217 | 218 | proc next*(b: var CircularBuffer): (pointer, Natural) = discard 219 | ## 返回下一个安全的可存储区域。返回值包括可存储区域的地址和长度。 220 | ## 221 | ## 例子: 222 | ## 223 | ## .. code-block::nim 224 | ## 225 | ## var source = "Hello World" 226 | ## var (regionPtr, regionLen) = buffer.next() 227 | ## var length = min(regionLen, s.len) 228 | ## copyMem(regionPtr, source.cstring, length) 229 | 230 | proc pack*(b: var CircularBuffer, size: Natural): Natural = discard 231 | ## 告诉缓冲区,由当前存储位置向后 ``size`` 长度的字节晋升为数据。返回实际晋升的长度。 232 | ## 233 | ## 当调用 ``next()`` 时,仅仅向缓冲区内部的存储空间写入数据,但是缓冲区无法得知写入了多少数据。 234 | ## ``pack()`` 告诉缓冲区写入的数据长度。 235 | ## 236 | ## 每当调用 ``next()`` 时,都应当立刻调用 ``pack()`` 。 237 | ## 238 | ## 例子: 239 | ## 240 | ## .. code-block::nim 241 | ## 242 | ## var source = "Hello World" 243 | ## var (regionPtr, regionLen) = buffer.next() 244 | ## var length = min(regionLen, s.len) 245 | ## copyMem(regionPtr, source.cstring, length) 246 | ## var n = buffer.pack(length) 247 | 248 | proc add*(b: var CircularBuffer, source: pointer, size: Natural): Natural = discard 249 | ## 从 ``source`` 复制最多 ``size`` 长度的数据,存储到缓冲区。返回实际存储的长度。这个函数是 ``next()`` 250 | ## ``pack()`` 组合调用的简化版本,区别是额外执行一次复制。 251 | ## 252 | ## 当您非常看重性能时,使用 ``next`` ``pack`` 组合调用;当您比较看重使用方便时,使用 ``add`` 。 253 | ## 254 | ## .. code-block::nim 255 | ## 256 | ## var source = "Hello World" 257 | ## var n = buffer.add(source.cstring, source.len) 258 | 259 | proc add*(b: var CircularBuffer, c: char): Natural = discard 260 | ## 存储一个字符 ``c`` ,返回实际存储的长度。如果存储空间已满,则会返回 ``0`` ,否则返回 ``1`` 。 261 | 262 | proc get*(b: var CircularBuffer, dest: pointer, size: Natural): Natural = discard 263 | ## 获取存储的数据,最多 ``size`` 个,将数据复制到目标空间 ``dest`` 。返回实际复制的数量。 264 | 265 | proc get*(b: var CircularBuffer, size: Natural): string = discard 266 | ## 获取存储的数据,最多 ``size`` 个,以一个字符串返回。 267 | 268 | proc get*(b: var CircularBuffer): string = discard 269 | ## 获取存储的所有数据,以一个字符串返回。 270 | 271 | proc del*(b: var CircularBuffer, size: Natural): Natural = discard 272 | ## 删除存储的数据,最多 ``size`` 个。返回实际删除的数量。删除总是从存储队列的最前方开始。 273 | 274 | iterator items*(b: CircularBuffer): char = discard 275 | ## 迭代存储的数据。 276 | 277 | proc del*(b: var MarkableCircularBuffer, size: Natural): Natural = discard 278 | ## 删除存储的数据,最多 ``size`` 个。返回实际删除的数量。删除总是从存储队列的最前方开始。 279 | 280 | iterator marks*(b: var MarkableCircularBuffer): char = discard 281 | ## 迭代存储的数据,并进行标记。 282 | ## 283 | ## 注意,标记是增量进行的,也就是说,下一次操作将从上一次结束的位置继续。 284 | ## 285 | ## 例子: 286 | ## 287 | ## .. code-block::nim 288 | ## 289 | ## var s = "Hello World\R\L" 290 | ## var n = buffer.add(s.cstring, s.len) 291 | ## 292 | ## for c in buffer.marks(): 293 | ## if c == '\L': 294 | ## break 295 | 296 | proc mark*(b: var MarkableCircularBuffer, size: Natural): Natural = discard 297 | ## 立刻标记存储的数据,直到 ``size`` 个或者到达数据尾部。返回实际标记的数量。 298 | ## 299 | ## 注意,标记是增量进行的,也就是说,下一次操作将从上一次结束的位置继续。 300 | ## 301 | ## 例子: 302 | ## 303 | ## .. code-block::nim 304 | ## 305 | ## var buffer = initMarkableCircularBuffer() 306 | ## var str = "foo\Lbar\L" 307 | ## assert buffer.add(str.cstring, str.len) == str.len 308 | ## 309 | ## assert buffer.mark(3) == 3 310 | ## assert buffer.popMarks() == "foo" 311 | 312 | proc markUntil*(b: var MarkableCircularBuffer, c: char): bool = discard 313 | ## 逐个标记存储的数据,直到遇到一个字节是 ``c`` ,并返回 ``true`` ; 如果没有字节是 ``c`` ,则返回 ``false`` 。 314 | ## 315 | ## 注意,标记是增量进行的,也就是说,下一次操作将从上一次结束的位置继续。 316 | ## 317 | ## 例子: 318 | ## 319 | ## .. code-block::nim 320 | ## 321 | ## var buffer = initMarkableCircularBuffer() 322 | ## var str = "foo\Lbar\L" 323 | ## assert buffer.add(str.cstring, str.len) == str.len 324 | ## 325 | ## assert buffer.markUntil('\L') 326 | ## assert buffer.popMarks() == "foo\L" 327 | 328 | proc markAll*(b: var MarkableCircularBuffer) = discard 329 | ## 立刻标记存储的所有数据。 330 | ## 331 | ## 注意,标记是增量进行的,也就是说,下一次操作将从上一次结束的位置继续。 332 | ## 333 | ## 例子: 334 | ## 335 | ## .. code-block::nim 336 | ## 337 | ## var buffer = initMarkableCircularBuffer() 338 | ## var str = "foo\Lbar\L" 339 | ## assert buffer.add(str.cstring, str.len) == str.len 340 | ## 341 | ## buffer.markAll() 342 | ## assert buffer.popMarks() == "foo\Lbar\L" 343 | 344 | proc lenMarks*(b: MarkableCircularBuffer): Natural = discard 345 | ## 返回标记的数据长度。 346 | 347 | proc popMarks*(b: var MarkableCircularBuffer, n: Natural = 0): string = discard 348 | ## 获取标记的数据,将数据在尾部向前跳过 ``n`` 个字节,以一个字符串返回。同时,删除这些数据。 349 | ## 350 | ## 351 | ## 352 | ## -------------------------------------------------------------------------------- /doc/zh/code/netkit/buffer/constants.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | ## 这个模块包含了与缓冲区相关的常量。 8 | 9 | import netkit/misc 10 | 11 | const BufferSize* {.intdefine.}: Natural = 8*1024 12 | ## 描述一块缓冲区的字节数。 13 | ## 14 | ## 您可以在编译时通过开关选项 ``--define:BufferSize=`` 重写这个数值。 15 | ## 注意,值必须是自然数,即大于等于零的整数;否则,将会引起异常。 -------------------------------------------------------------------------------- /doc/zh/code/netkit/buffer/vector.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | ## 本模块实现了一个可增长的缓冲区 ``VectorBuffer`` 。该缓冲区可以根据需要成倍增长,直到某个临界值。当到达 8 | ## 临界值时,继续增长将引起异常。 9 | 10 | import netkit/misc 11 | import netkit/buffer/constants 12 | 13 | type 14 | VectorBuffer* = object of RootObj ## 一个可增长的缓冲区。 15 | value: seq[byte] 16 | endPos: Natural 17 | capacity: Natural 18 | minCapacity: Natural 19 | maxCapacity: Natural 20 | 21 | proc initVectorBuffer*( 22 | minCapacity: Natural = BufferSize, 23 | maxCapacity: Natural = BufferSize * 8 24 | ): VectorBuffer = discard 25 | ## 初始化一个 ``VectorBuffer` 。 ``minCapacity`` 指定最小容量,``maxCapacity`` 指定最大容量。 26 | 27 | proc capacity*(b: VectorBuffer): Natural = discard 28 | ## 返回缓冲区的当前容量。 29 | 30 | proc minCapacity*(b: VectorBuffer): Natural = discard 31 | ## 返回缓冲区的最小容量。 32 | 33 | proc maxCapacity*(b: VectorBuffer): Natural = discard 34 | ## 返回缓冲区的最大容量。 35 | 36 | proc len*(b: VectorBuffer): Natural = discard 37 | ## 返回缓冲区存储的数据长度。 38 | 39 | proc reset*(b: var VectorBuffer): Natural = discard 40 | ## 重置缓冲区,恢复到初始容量,同时清空所有已存储的数据。 41 | 42 | proc expand*(b: var VectorBuffer) {.raises: [OverflowError].} = discard 43 | ## 扩展缓冲区的容量,使其增长一倍。如果超过了最大容量,则抛出 ``OverflowError`` 。 44 | 45 | proc next*(b: var VectorBuffer): (pointer, Natural) = discard 46 | ## 返回下一个安全的可存储区域。返回值包括可存储区域的地址和长度。 47 | ## 48 | ## 例子: 49 | ## 50 | ## .. code-block::nim 51 | ## 52 | ## var source = "Hello World" 53 | ## var (regionPtr, regionLen) = buffer.next() 54 | ## var length = min(regionLen, s.len) 55 | ## copyMem(regionPtr, source.cstring, length) 56 | 57 | proc pack*(b: var VectorBuffer, size: Natural): Natural = discard 58 | ## 告诉缓冲区,由当前存储位置向后 ``size`` 长度的字节晋升为数据。返回实际晋升的长度。 59 | ## 60 | ## 当调用 ``next()`` 时,仅仅向缓冲区内部的存储空间写入数据,但是缓冲区无法得知写入了多少数据。 61 | ## ``pack()`` 告诉缓冲区写入的数据长度。 62 | ## 63 | ## 每当调用 ``next()`` 时,都应当立刻调用 ``pack()`` 。 64 | ## 65 | ## 例子: 66 | ## 67 | ## .. code-block::nim 68 | ## 69 | ## var source = "Hello World" 70 | ## var (regionPtr, regionLen) = buffer.next() 71 | ## var length = min(regionLen, s.len) 72 | ## copyMem(regionPtr, source.cstring, length) 73 | ## var n = buffer.pack(length) 74 | 75 | proc add*(b: var VectorBuffer, source: pointer, size: Natural): Natural = discard 76 | ## 从 ``source`` 复制最多 ``size`` 长度的数据,存储到缓冲区。返回实际存储的长度。这个函数是 ``next()`` 77 | ## ``pack()`` 组合调用的简化版本,区别是额外执行一次复制。 78 | ## 79 | ## 当您非常看重性能时,使用 ``next`` ``pack`` 组合调用;当您比较看重使用方便时,使用 ``add`` 。 80 | ## 81 | ## .. code-block::nim 82 | ## 83 | ## var source = "Hello World" 84 | ## var n = buffer.add(source.cstring, source.len) 85 | 86 | proc get*(b: var VectorBuffer, dest: pointer, size: Natural, start: Natural): Natural = discard 87 | ## 从 ``start`` 开始,获取最多 ``size`` 长度的数据,将它们复制到目标空间 ``dest`` ,返回实际复制的数量。 88 | 89 | proc clear*(b: var VectorBuffer): Natural = discard 90 | ## 删除所有已存储的数据。 -------------------------------------------------------------------------------- /doc/zh/code/netkit/http.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | import netkit/http/limits 8 | import netkit/http/exception 9 | import netkit/http/spec 10 | import netkit/http/httpmethod 11 | import netkit/http/version 12 | import netkit/http/status 13 | import netkit/http/headerfield 14 | import netkit/http/header 15 | import netkit/http/chunk 16 | import netkit/http/metadata 17 | import netkit/http/cookies 18 | import netkit/http/parser 19 | import netkit/http/connection 20 | import netkit/http/reader 21 | import netkit/http/writer 22 | import netkit/http/server 23 | 24 | export limits 25 | export exception 26 | export spec 27 | export httpmethod 28 | export version 29 | export status 30 | export headerfield 31 | export header 32 | export chunk 33 | export metadata 34 | export cookies 35 | export parser 36 | export connection 37 | export reader 38 | export writer 39 | export server -------------------------------------------------------------------------------- /doc/zh/code/netkit/http/chunk.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # destribution, for details about the copyright. 6 | 7 | ## HTTP 1.1 支持 chunked 编码,允许将 HTTP 消息拆分成多个块逐块地传输。通常,服务器最常使用 chunked 消息, 8 | ## 但是客户端也可以用来处理比较大的请求。 9 | ## 10 | ## 在消息头添加 ``Transfer-Encoding: chunked`` ,消息体就会进行 chunked 编码并且逐块地传输。 11 | ## 在传输过程中需要编码和解码,这个模块提供了针对这些编码解码的工具。 12 | ## 13 | ## 概述 14 | ## ======================== 15 | ## 16 | ## .. container:: r-fragment 17 | ## 18 | ## 块的格式 19 | ## ------------------------ 20 | ## 21 | ## 经过 chunked 编码的 HTTP 消息 (不管是由客户端发送还是服务器发送),其消息体都由零个到多个 chunks、一个 terminating chunk、trailers、 22 | ## 一个 final CRLF (即回车换行) 组成。 23 | ## 24 | ## 每个块 (chunk) 最开始是块大小和块扩展 (chunk extension) ,后面跟着块数据 (chunk data)。块大小是十六进制字符,表示块数据的实际尺寸。 25 | ## 块扩展是可选的,以分号 ``';'`` 作为分隔符,每一部分是一个名值对,名值对以 ``'='`` 作为分隔符。比如 ``"; a=1; b=2"`` 。 26 | ## 27 | ## 终止块 (terminating chunk) 是一个普通的块 (chunk),只不过其块大小总是 ``0`` ,表示没有数据。其后面跟着 trailers,trailers 也是可选的, 28 | ## 由常规的 HTTP 头字段组成,作为元数据挂载在消息尾部。 29 | ## 30 | ## HTTP 规范规定,只有在收到请求带有 ``TE`` 头字段时,才允许在响应中发送 trailers 。当然,这说明 trailers 只在服务器发出的响应消息中才有用。 31 | ## 32 | ## .. 33 | ## 34 | ## 看看 `Chunked transfer encoding `_ 了解更多。 35 | ## 36 | ## .. container:: r-fragment 37 | ## 38 | ## 例子 39 | ## ------------------------ 40 | ## 41 | ## 一个 chunked 消息体的例子: 42 | ## 43 | ## .. code-block::http 44 | ## 45 | ## 5;\r\n # chunk-size and chunk-extensions (empty) 46 | ## Hello\r\n # data 47 | ## 9; language=en; city=London\r\n # chunk-size and chunk-extensions 48 | ## Developer\r\n # data 49 | ## 0\r\n # terminating chunk --------------------- 50 | ## Expires: Wed, 21 Oct 2015 07:28:00 GMT\r\n # trailer 51 | ## \r\n # final CRLF----------------------------- 52 | ## 53 | ## .. container:: r-fragment 54 | ## 55 | ## 关于 \\n and \\L 56 | ## ------------------------ 57 | ## 58 | ## 由于在 Nim 语言中 \\n 不能表示为一个字符 (而是字符串),所以我们使用 `\\L` 表示换行符号。 59 | ## 60 | ## 用法 61 | ## ======================== 62 | ## 63 | ## .. container:: r-fragment 64 | ## 65 | ## 编码 66 | ## ------------------------ 67 | ## 68 | ## 实现上面例子的 chunked 消息体: 69 | ## 70 | ## .. code-block::nim 71 | ## 72 | ## import netkit/http/chunk 73 | ## import netkit/http/headerfield 74 | ## 75 | ## assert encodeChunk("Hello") == "5;\r\nHello\r\n" 76 | ## 77 | ## assert encodeChunk("Developer", { 78 | ## "language": "en", 79 | ## "city": "London" 80 | ## }) == "9; language=en; city=London\r\nDeveloper\r\n" 81 | ## 82 | ## assert encodeChunkEnd(initHeaderFields({ 83 | ## "Expires": "Wed, 21 Oct 2015 07:28:00 GMT" 84 | ## })) == "0\r\nExpires: Wed, 21 Oct 2015 07:28:00 GMT\r\n\r\n" 85 | ## 86 | ## 这个例子演示了编码函数的字符串版本。不过,netkit 也提供了更高效的方案,请参看下面。 87 | ## 88 | ## 使用指针缓冲区编码 89 | ## -------------------------------- 90 | ## 91 | ## 持续的从一个文件读数据,同时把数据编码: 92 | ## 93 | ## .. code-block::nim 94 | ## 95 | ## import netkit/http/chunk 96 | ## import netkit/http/headerfield 97 | ## 98 | ## var source: array[64, byte] 99 | ## var dest: array[128, byte] 100 | ## 101 | ## # open a large file 102 | ## var file = open("test.blob") 103 | ## 104 | ## while true: 105 | ## let readLen = file.readBuffer(source.addr, 64) 106 | ## 107 | ## if readLen > 0: 108 | ## let encodeLen = encodeChunk(source.addr, readLen, dest.addr, 128) 109 | ## # handle dest, encodeLen ... 110 | ## 111 | ## # read EOF 112 | ## if readLen < 64: 113 | ## echo encodeChunkEnd(initHeaderFields({ 114 | ## "Expires": "Wed, 21 Oct 2015 07:28:00 GMT" 115 | ## })) 116 | ## break 117 | ## 118 | ## .. 119 | ## 120 | ## 当您对性能非常关注或者正在处理大量数据时,考虑使用指针缓冲区方案。 121 | ## 122 | ## .. container:: r-fragment 123 | ## 124 | ## 解码 125 | ## ------------------------ 126 | ## 127 | ## 解析由块尺寸 (chunk size) 和块扩展 (chunk extensions) 组成的字符序列: 128 | ## 129 | ## .. code-block::nim 130 | ## 131 | ## import netkit/http/chunk 132 | ## 133 | ## let header = parseChunkHeader("1A; a1=v1; a2=v2") 134 | ## assert header.size == 26 135 | ## assert header.extensions == "; a1=v1; a2=v2" 136 | ## 137 | ## 解析块扩展 (chunk extensions) 相关的字符序列: 138 | ## 139 | ## .. code-block::nim 140 | ## 141 | ## import netkit/http/chunk 142 | ## 143 | ## let extensions = parseChunkExtensions("; a1=v1; a2=v2") 144 | ## assert extensions[0].name == "a1" 145 | ## assert extensions[0].value == "v1" 146 | ## assert extensions[1].name == "a2" 147 | ## assert extensions[1].value == "v2" 148 | ## 149 | ## 解析 trailers 相关的字符序列: 150 | ## 151 | ## .. code-block::nim 152 | ## 153 | ## import netkit/http/chunk 154 | ## 155 | ## let tailers = parseChunkTrailers(@["Expires: Wed, 21 Oct 2015 07:28:00 GMT"]) 156 | ## assert tailers["Expires"][0] == "Wed, 21 Oct 2015 07:28:00 GMT" 157 | 158 | import strutils 159 | import strtabs 160 | import netkit/misc 161 | import netkit/http/spec 162 | import netkit/http/limits 163 | import netkit/http/headerfield 164 | 165 | type 166 | ChunkHeader* = object ## 表示块 (chunk) 的头部。 167 | size*: Natural 168 | extensions*: string 169 | 170 | ChunkExtension* = tuple ## 表示块扩展 (chunk extensions)。 171 | name: string 172 | value: string 173 | 174 | proc parseChunkHeader*(s: string): ChunkHeader {.raises: [ValueError].} = discard 175 | ## 把字符串转换成一个 ``ChunkHeader`` 。 176 | ## 177 | ## 例子: 178 | ## 179 | ## .. code-block::nim 180 | ## 181 | ## parseChunkHeader("64") # => (100, "") 182 | ## parseChunkHeader("64; name=value") # => (100, "; name=value") 183 | 184 | proc parseChunkExtensions*(s: string): seq[ChunkExtension] = discard 185 | ## 把字符串转换成一组 ``(name, value)`` 对,该字符串表示块扩展。 186 | ## 187 | ## 例子: 188 | ## 189 | ## .. code-block::nim 190 | ## 191 | ## let extensions = parseChunkExtensions(";a1=v1;a2=v2") 192 | ## assert extensions[0].name == "a1" 193 | ## assert extensions[0].value == "v1" 194 | ## assert extensions[1].name == "a2" 195 | ## assert extensions[1].value == "v2" 196 | 197 | proc parseChunkTrailers*(ts: openArray[string]): HeaderFields = discard 198 | ## 把一组字符串转换为一个 ``HeaderFields`` ,该组字符串表示一些 trailers。 199 | ## 200 | ## 例子: 201 | ## 202 | ## .. code-block::nim 203 | ## 204 | ## let fields = parseChunkTrailers(@["Expires: Wed, 21 Oct 2015 07:28:00 GMT"]) 205 | ## # => ("Expires", "Wed, 21 Oct 2015 07:28:00 GMT") 206 | ## assert fields["Expires"][0] == "Wed, 21 Oct 2015 07:28:00 GMT" 207 | 208 | proc encodeChunk*( 209 | source: pointer, 210 | dest: pointer, 211 | size: Natural 212 | ): Natural = discard 213 | ## 编码一块数据, ``source`` 指定被编码的数据, ``size`` 指定数据的字节长度,编码后的结果存储到 ``dest`` 。 214 | ## 215 | ## 注意: ``dest`` 必须比 ``size`` 至少大 ``21`` 字节长度,否则,将没有足够的空间存储编码后的数据。 216 | ## 217 | ## 例子: 218 | ## 219 | ## .. code-block::nim 220 | ## 221 | ## let source = "Developer" 222 | ## let dest = newString(source.len + 21) 223 | ## encodeChunk(source.cstring, source.len, dest.cstring, dest.len) 224 | ## assert dest == "9\r\nDeveloper\r\n" 225 | 226 | proc encodeChunk*( 227 | source: pointer, 228 | dest: pointer, 229 | size: Natural, 230 | extensions = openArray[ChunkExtension] 231 | ): Natural = discard 232 | ## 编码一块数据, ``source`` 指定被编码的数据, ``size`` 指定数据的字节长度, ``extensions`` 指定块扩展。 233 | ## 编码后的结果存储到 ``dest`` 。 234 | ## 235 | ## 注意: ``dest`` 必须比 ``size`` 至少大 ``21 + extensions.len`` 字节长度,否则,将没有足够的空间存储编码后的数据。 236 | ## 237 | ## 例子: 238 | ## 239 | ## .. code-block::nim 240 | ## 241 | ## let source = "Developer" 242 | ## let extensions = "language=en; city=London" 243 | ## let dest = newString(source.len + 21 + extensions.len) 244 | ## encodeChunk(source.cstring, source.len, dest.cstring, dest.len, extensions) 245 | ## assert dest == "9; language=en; city=London\r\nDeveloper\r\n" 246 | 247 | proc encodeChunk*(source: string): string = discard 248 | ## 编码一块数据。 249 | ## 250 | ## 例子: 251 | ## 252 | ## .. code-block::nim 253 | ## 254 | ## let dest = encodeChunk("Developer") 255 | ## assert dest == "9\r\nDeveloper\r\n" 256 | 257 | proc encodeChunk*(source: string, extensions: openArray[ChunkExtension]): string = discard 258 | ## 编码一块数据。 ``extensions`` 指定块扩展。 259 | ## 260 | ## 例子: 261 | ## 262 | ## .. code-block::nim 263 | ## 264 | ## let dest = encodeChunk("Developer", { 265 | ## "language": "en", 266 | ## "city": "London" 267 | ## }) 268 | ## assert dest == "9; language=en; city=London\r\nDeveloper\r\n" 269 | 270 | proc encodeChunkEnd*(): string = discard 271 | ## 返回一个由 terminating chunk 和 final CRLF 组成的块,表示消息的尾部。 272 | ## 273 | ## 例子: 274 | ## 275 | ## .. code-block::nim 276 | ## 277 | ## let dest = encodeChunkEnd() 278 | ## assert dest == "0\r\n\r\n" 279 | 280 | proc encodeChunkEnd*(trailers: HeaderFields): string = discard 281 | ## 返回一个由 terminating chunk、trailers 和 final CRLF 组成的块,表示消息的尾部。 ``trailers`` 指定挂载的元数据。 282 | ## 283 | ## 例子: 284 | ## 285 | ## .. code-block::nim 286 | ## 287 | ## let dest = encodeChunkEnd(initHeaderFields({ 288 | ## "Expires": "Wed, 21 Oct 2015 07:28:00 GM" 289 | ## })) 290 | ## assert dest == "0\r\nExpires: Wed, 21 Oct 2015 07:28:00 GM\r\n\r\n" 291 | -------------------------------------------------------------------------------- /doc/zh/code/netkit/http/connection.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | ## 这个模块实现了一个介于客户端和服务器的 HTTP 连接。 ``HttpConnection`` 能够识别网络传输的 HTTP 消息。 8 | ## 9 | ## 使用 10 | ## ======================== 11 | ## 12 | ## .. container:: r-fragment 13 | ## 14 | ## 读消息头 15 | ## ---------------------- 16 | ## 17 | ## .. code-block::nim 18 | ## 19 | ## import netkit/http/connection 20 | ## import netkit/http/header 21 | ## 22 | ## type 23 | ## Packet = ref object 24 | ## header: HttpHeader 25 | ## 26 | ## var packet = new(Packet) 27 | ## packet.header = HttpHeader(kind: HttpHeaderKind.Request) 28 | ## 29 | ## var conn = newHttpConnection(socket, address) 30 | ## 31 | ## try: 32 | ## GC_ref(packet) 33 | ## await conn.readHttpHeader(packet.header.addr) 34 | ## finally: 35 | ## GC_unref(packet) 36 | ## 37 | ## .. container:: r-fragment 38 | ## 39 | ## 读消息体 40 | ## ------------------------ 41 | ## 42 | ## .. code-block::nim 43 | ## 44 | ## let readLen = await conn.readData(buf, 1024) 45 | ## 46 | ## .. container:: r-fragment 47 | ## 48 | ## 读 chunked 编码的消息体 49 | ## ------------------------------------------ 50 | ## 51 | ## .. code-block::nim 52 | ## 53 | ## type 54 | ## Packet = ref object 55 | ## header: ChunkHeader 56 | ## 57 | ## try: 58 | ## GC_ref(packet) 59 | ## await conn.readChunkHeader(packet.header.addr) 60 | ## finally: 61 | ## GC_unref(packet) 62 | ## 63 | ## if header.size == 0: # read tail 64 | ## var trailers: seq[string] 65 | ## await conn.readEnd(trailers) 66 | ## else: 67 | ## var chunkLen = header.size 68 | ## var buf = newString(header.size) 69 | ## let readLen = await conn.readData(buf, header.size) 70 | ## if readLen != header.size: 71 | ## echo "Connection closed prematurely" 72 | ## 73 | ## .. container:: r-fragment 74 | ## 75 | ## 写消息 76 | ## --------------- 77 | ## 78 | ## .. code-block::nim 79 | ## 80 | ## await conn.write(""" 81 | ## GET /iocrate/netkit HTTP/1.1 82 | ## Host: iocrate.com 83 | ## Content-Length: 12 84 | ## 85 | ## foobarfoobar 86 | ## """) 87 | 88 | # 关于 HTTP Server Request 的边界条件 89 | # ---------------------------------- 90 | # 91 | # 1. 不同连接的请求读,一定不存在竞争问题。 92 | # 93 | # 2. 同一个连接,不同请求的读,一定不存在竞争问题。因为后一个请求总是在前一个请求 EOF 后才能引用。也就谁说,对于 94 | # [req1, req2],req1.read() 总是立刻返回 EOF 。 95 | # 96 | # r1 = req1.read() # 立即返回 EOF,保存在 Future.value 97 | # r2 = req2.read() 98 | # 99 | # await r2 100 | # await r1 101 | # 102 | # ------------------------------------------------------ 103 | # 104 | # r2 = req2.read() 105 | # r1 = req1.read() # 立即返回 EOF,保存在 Future.value 106 | # 107 | # await r2 108 | # await r1 109 | # 110 | # 3. 同一个连接,同一个请求,不同次序的读,存在竞争问题,特别是非 chunked 编码的时候,必须进行排队。 111 | # 112 | # r1_1 = req1.read() 113 | # r1_2 = req1.read() 114 | # 115 | # await r1_2 116 | # await r1_1 117 | # 118 | # 4. 不同连接的响应写,一定不存在竞争问题。 119 | # 120 | # 5. 同一个连接,不同响应的写,一定不存在竞争问题。因为后一个请求总是在前一个请求 EOF 后才能引用。也就谁说,对于 121 | # [req1, req2],req1.write() 总是立刻返回 EOF 。 122 | # 123 | # 6. 同一个连接,同一个响应,不同次序的写,一定不存在竞争问题。因为不对写数据进行内部处理,而是直接交给底层 socket。 124 | # 125 | # 关于 HTTP Server Request 读的结果 126 | # -------------------------------- 127 | # 128 | # 1. NativeSocket.recv() => >0 129 | # 130 | # 表示: 正常。 131 | # 方法: 处理数据。 132 | # 133 | # 2. NativeSocket.recv() => 0 134 | # 135 | # 表示: 对端关闭写, 但是不知道对端是否关闭读。 136 | # 方法: 根据 HTTP 协议规则, 可以知道, 收到 0 时, 只有两个情况: 本条请求未开始; 本条请求数据 137 | # 不完整。 因此, 应当立刻关闭本端 socket 。 138 | # 139 | # 3. NativeSocket.recv() => Error 140 | # 141 | # 表示: 本端出现错误。 142 | # 方法: 该连接不可以继续使用, 否则将出现未知的错误序列。 因此, 应当立刻关闭本端 socket 。 143 | # 144 | # 关于 HTTP Server Request 写的结果 145 | # -------------------------------- 146 | # 147 | # 1. NativeSocket.write() => Void 148 | # 149 | # 表示: 正常。 150 | # 方法: 处理数据。 151 | # 152 | # 2. NativeSocket.write() => Error 153 | # 154 | # 表示: 本端出现错误。 155 | # 方法: 该连接不可以继续使用, 否则将出现未知的错误序列。 因此, 应当立刻关闭本端 socket 。 156 | 157 | import strutils 158 | import asyncdispatch 159 | import nativesockets 160 | import netkit/misc 161 | import netkit/buffer/circular 162 | import netkit/http/header 163 | import netkit/http/exception 164 | import netkit/http/parser 165 | import netkit/http/chunk 166 | 167 | type 168 | HttpConnection* = ref object ## HTTP 连接. 169 | buffer: MarkableCircularBuffer 170 | parser: HttpParser 171 | socket: AsyncFD 172 | address: string 173 | closed: bool 174 | readTimeout: Natural 175 | 176 | proc newHttpConnection*(socket: AsyncFD, address: string, readTimeout: Natural): HttpConnection = discard 177 | ## 创建一个新的 ``HttpConnection`` 。 ``socket`` 指定对端的套接字描述符, ``address`` 指定对端的网络地址, 178 | ## ``readTimeout`` 指定读操作的超时时间。 179 | ## 180 | ## 注意: ``readTimeout`` 也影响保持 keepalive 的超时时间。当发送完最后一条响应,超过 ``readTimeout`` 仍然没有新的请求时, 181 | ## 触发 ``ReadAbortedError`` 异常。 182 | 183 | proc close*(conn: HttpConnection) {.inline.} = discard 184 | ## 关闭连接以释放底层资源。 185 | 186 | proc closed*(conn: HttpConnection): bool {.inline.} = discard 187 | ## 判断连接是否已经关闭。 188 | 189 | proc readHttpHeader*(conn: HttpConnection, header: ptr HttpHeader): Future[void] = discard 190 | ## 读取一个消息头部。 191 | ## 192 | ## 如果读过程中出现系统错误,则会触发 ``OSError`` 异常;如果在成功读取之前连接断开,则会触发 ``ReadAbortedError`` 异常。 193 | 194 | proc readChunkHeader*(conn: HttpConnection, header: ptr ChunkHeader): Future[void] = discard 195 | ## 读取一个 chunked 编码的块的头部。 196 | ## 197 | ## 如果读过程中出现系统错误,则会触发 ``OSError`` 异常;如果在成功读取之前连接断开,则会触发 ``ReadAbortedError`` 异常。 198 | 199 | proc readChunkEnd*(conn: HttpConnection, trailer: ptr seq[string]): Future[void] = discard 200 | ## 读取一个 chunked 编码终止块 (terminating chunk)、trailers、和 final CRLF。 201 | ## 202 | ## 如果读过程中出现系统错误,则会触发 ``OSError`` 异常;如果在成功读取之前连接断开,则会触发 ``ReadAbortedError`` 异常。 203 | 204 | proc readData*(conn: HttpConnection, buf: pointer, size: Natural): Future[Natural] = discard 205 | ## 读取数据直到 ``size`` 字节,读取的数据填充在 ``buf`` ,返回实际读取的字节数。如果返回值不等于 ``size`` ,说明 206 | ## 出现错误或者连接已经断开。如果出现错误或连接已经断开,则立刻返回;否则,将一直等待读取,直到 ``size`` 字节。 207 | ## 208 | ## 这个函数应该用来读取消息体。 209 | ## 210 | ## 如果读过程中出现系统错误,则会触发 ``OSError`` 异常;如果在成功读取之前连接断开,则会触发 ``ReadAbortedError`` 异常。 211 | 212 | proc write*(conn: HttpConnection, buf: pointer, size: Natural): Future[void] {.inline.} = discard 213 | ## 写入数据。 ``buf`` 指定数据源, ``size`` 指定数据源的字节数。 214 | ## 215 | ## 如果写过程中出现系统错误,则会触发 ``OSError`` 异常。 216 | 217 | proc write*(conn: HttpConnection, data: string): Future[void] {.inline.} = discard 218 | ## 写入数据。 ``data`` 指定数据源。 219 | ## 220 | ## 如果写过程中出现系统错误,则会触发 ``OSError`` 异常。 -------------------------------------------------------------------------------- /doc/zh/code/netkit/http/cookies.nim: -------------------------------------------------------------------------------- 1 | ## 该模块实现了 ``Cookie`` 类型,``Cookie`` 会直接映射为 Set-Cookie HTTP 响应头, 2 | ## 还实现了 ``CookieJar`` 类型,``CookieJar`` 用于存储多个 cookie 信息。 3 | ## 概述 4 | ## ======================== 5 | ## 6 | ## ``Cookie`` type is used to generate Set-Cookie HTTP response headers. 7 | ## Server sends Set-Cookie HTTP response headers to the user agent. 8 | ## So the user agent can send them back to the server later. 9 | ## 10 | ## ``CookieJar`` contains many cookies from the user agent. 11 | ## 12 | 13 | 14 | import options, times, strtabs, parseutils, strutils 15 | 16 | 17 | type 18 | SameSite* {.pure.} = enum ## Cookie 属性 SameSite 19 | None, Lax, Strict 20 | 21 | Cookie* = object ## Cookie type represents Set-Cookie HTTP response headers. 22 | name*, value*: string 23 | expires*: string 24 | maxAge*: Option[int] 25 | domain*: string 26 | path*: string 27 | secure*: bool 28 | httpOnly*: bool 29 | sameSite*: SameSite 30 | 31 | CookieJar* = object ## CookieJar type is a collection of cookies. 32 | data: StringTableRef 33 | 34 | MissingValueError* = object of ValueError ## Indicates an error associated with Cookie. 35 | 36 | 37 | proc initCookie*(name, value: string, expires = "", maxAge: Option[int] = none(int), 38 | domain = "", path = "", 39 | secure = false, httpOnly = false, sameSite = Lax): Cookie {.inline.} = 40 | ## 初始化 Cookie 对象。 41 | ## .. code-block::nim 42 | ## let 43 | ## username = "admin" 44 | ## message = "ok" 45 | ## cookie = initCookie(username, message) 46 | ## 47 | ## doAssert cookie.name == username 48 | ## doAssert cookie.value == message 49 | 50 | result = Cookie(name: name, value: value, expires: expires, 51 | maxAge: maxAge, domain: domain, path: path, 52 | secure: secure, httpOnly: httpOnly, sameSite: sameSite) 53 | 54 | proc initCookie*(name, value: string, expires: DateTime|Time, 55 | maxAge: Option[int] = none(int), domain = "", path = "", secure = false, httpOnly = false, 56 | sameSite = Lax): Cookie {.inline.} = 57 | ## 初始化 Cookie 对象。 58 | ## .. code-block::nim 59 | ## import times 60 | ## 61 | ## 62 | ## let 63 | ## username = "admin" 64 | ## message = "ok" 65 | ## expires = now() 66 | ## cookie = initCookie(username, message, expires) 67 | ## 68 | ## doAssert cookie.name == username 69 | ## doAssert cookie.value == message 70 | 71 | result = initCookie(name, value, format(expires.utc, 72 | "ddd',' dd MMM yyyy HH:mm:ss 'GMT'"), maxAge, domain, path, secure, 73 | httpOnly, sameSite) 74 | 75 | proc parseParams(cookie: var Cookie, key: string, value: string) {.inline.} = 76 | ## 从键值对中解析 ``cookie`` 属性。 77 | case key.toLowerAscii 78 | of "expires": 79 | if value.len != 0: 80 | cookie.expires = value 81 | of "maxage": 82 | try: 83 | cookie.maxAge = some(parseInt(value)) 84 | except ValueError: 85 | cookie.maxAge = none(int) 86 | of "domain": 87 | if value.len != 0: 88 | cookie.domain = value 89 | of "path": 90 | if value.len != 0: 91 | cookie.path = value 92 | of "secure": 93 | cookie.secure = true 94 | of "httponly": 95 | cookie.httpOnly = true 96 | of "samesite": 97 | case value.toLowerAscii 98 | of "none": 99 | cookie.sameSite = None 100 | of "strict": 101 | cookie.sameSite = Strict 102 | else: 103 | cookie.sameSite = Lax 104 | else: 105 | discard 106 | 107 | proc initCookie*(text: string): Cookie {.inline.} = 108 | ## 从字符串中读取 ``Cookie`` 对象。 109 | runnableExamples: 110 | doAssert initCookie("foo=bar=baz").name == "foo" 111 | doAssert initCookie("foo=bar=baz").value == "bar=baz" 112 | doAssert initCookie("foo=bar; HttpOnly").httpOnly 113 | 114 | var 115 | pos = 0 116 | params: string 117 | name, value: string 118 | first = true 119 | 120 | while true: 121 | pos += skipWhile(text, {' ', '\t'}, pos) 122 | pos += parseUntil(text, params, ';', pos) 123 | 124 | var start = 0 125 | start += parseUntil(params, name, '=', start) 126 | inc(start) # skip '=' 127 | if start < params.len: 128 | value = params[start .. ^1] 129 | else: 130 | value = "" 131 | 132 | if first: 133 | if name.len == 0: 134 | raise newException(MissingValueError, "cookie name is missing!") 135 | if value.len == 0: 136 | raise newException(MissingValueError, "cookie valie is missing!") 137 | result.name = name 138 | result.value = value 139 | first = false 140 | else: 141 | parseParams(result, name, value) 142 | if pos >= text.len: 143 | break 144 | inc(pos) # skip '; 145 | 146 | proc setCookie*(cookie: Cookie): string = 147 | ## Stringifys Cookie object to get Set-Cookie HTTP response headers. 148 | runnableExamples: 149 | import strformat 150 | 151 | 152 | let 153 | username = "admin" 154 | message = "ok" 155 | cookie = initCookie(username, message) 156 | 157 | doAssert setCookie(cookie) == fmt"{username}={message}; SameSite=Lax" 158 | 159 | result.add cookie.name & "=" & cookie.value 160 | if cookie.domain.strip.len != 0: 161 | result.add("; Domain=" & cookie.domain) 162 | if cookie.path.strip.len != 0: 163 | result.add("; Path=" & cookie.path) 164 | if cookie.maxAge.isSome: 165 | result.add("; Max-Age=" & $cookie.maxAge.get()) 166 | if cookie.expires.strip.len != 0: 167 | result.add("; Expires=" & cookie.expires) 168 | if cookie.secure: 169 | result.add("; Secure") 170 | if cookie.httpOnly: 171 | result.add("; HttpOnly") 172 | if cookie.sameSite != None: 173 | result.add("; SameSite=" & $cookie.sameSite) 174 | 175 | proc `$`*(cookie: Cookie): string {.inline.} = 176 | ## Stringifys Cookie object to get Set-Cookie HTTP response headers. 177 | runnableExamples: 178 | import strformat 179 | 180 | 181 | let 182 | username = "admin" 183 | message = "ok" 184 | cookie = initCookie(username, message) 185 | 186 | doAssert $cookie == fmt"{username}={message}; SameSite=Lax" 187 | 188 | setCookie(cookie) 189 | 190 | proc initCookieJar*(): CookieJar {.inline.} = 191 | ## Creates a new cookieJar that is empty. 192 | CookieJar(data: newStringTable(mode = modeCaseSensitive)) 193 | 194 | proc len*(cookieJar: CookieJar): int {.inline.} = 195 | ## Returns the number of names in ``cookieJar``. 196 | cookieJar.data.len 197 | 198 | proc `[]`*(cookieJar: CookieJar, name: string): string {.inline.} = 199 | ## Retrieves the value at ``cookieJar[name]``. 200 | ## 201 | ## If ``name`` is not in ``cookieJar``, the ``KeyError`` exception is raised. 202 | cookieJar.data[name] 203 | 204 | proc getOrDefault*(cookieJar: CookieJar, name: string, default = ""): string {.inline.} = 205 | ## Retrieves the value at ``cookieJar[name]`` if ``name`` is in ``cookieJar``. Otherwise, the 206 | ## default value is returned(default is ""). 207 | cookieJar.data.getOrDefault(name, default) 208 | 209 | proc hasKey*(cookieJar: CookieJar, name: string): bool {.inline.} = 210 | ## Returns true if ``name`` is in the ``cookieJar``. 211 | cookieJar.data.hasKey(name) 212 | 213 | proc contains*(cookieJar: CookieJar, name: string): bool {.inline.} = 214 | ## Returns true if ``name`` is in the ``cookieJar``. 215 | ## Alias of ``hasKey`` for use with the ``in`` operator. 216 | cookieJar.data.contains(name) 217 | 218 | proc `[]=`*(cookieJar: var CookieJar, name: string, value: string) {.inline.} = 219 | ## Inserts a ``(name, value)`` pair into ``cookieJar``. 220 | cookieJar.data[name] = value 221 | 222 | proc parse*(cookieJar: var CookieJar, text: string) {.inline.} = 223 | ## Parses CookieJar from strings. 224 | runnableExamples: 225 | var cookieJar = initCookieJar() 226 | cookieJar.parse("username=netkit; message=ok") 227 | 228 | doAssert cookieJar["username"] == "netkit" 229 | doAssert cookieJar["message"] == "ok" 230 | 231 | var 232 | pos = 0 233 | name, value: string 234 | while true: 235 | pos += skipWhile(text, {' ', '\t'}, pos) 236 | pos += parseUntil(text, name, '=', pos) 237 | if pos >= text.len: 238 | break 239 | inc(pos) # skip '=' 240 | pos += parseUntil(text, value, ';', pos) 241 | cookieJar[name] = move(value) 242 | if pos >= text.len: 243 | break 244 | inc(pos) # skip ';' 245 | 246 | iterator pairs*(cookieJar: CookieJar): tuple[name, value: string] = 247 | ## Iterates over any ``(name, value)`` pair in the ``cookieJar``. 248 | for (name, value) in cookieJar.data.pairs: 249 | yield (name, value) 250 | 251 | iterator keys*(cookieJar: CookieJar): string = 252 | ## Iterates over any ``name`` in the ``cookieJar``. 253 | for name in cookieJar.data.keys: 254 | yield name 255 | 256 | iterator values*(cookieJar: CookieJar): string = 257 | ## Iterates over any ``value`` in the ``cookieJar``. 258 | for value in cookieJar.data.values: 259 | yield value 260 | -------------------------------------------------------------------------------- /doc/zh/code/netkit/http/exception.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | ## 这个模块定义了与 HTTP 操作相关的异常。 8 | 9 | import netkit/http/status 10 | 11 | type 12 | HttpError* = object of CatchableError ## 表示与 HTTP 协议相关的错误。 13 | code*: range[Http400..Http505] 14 | 15 | ReadAbortedError* = object of CatchableError ## 读操作在完成前被中断。 16 | timeout*: bool 17 | 18 | WriteAbortedError* = object of CatchableError ## 写操作在完成前被中断。 19 | 20 | proc newHttpError*( 21 | code: range[Http400..Http505], 22 | parentException: ref Exception = nil 23 | ): ref HttpError = discard 24 | ## 创建一个 ``ref HttpError``. 25 | 26 | proc newHttpError*( 27 | code: range[Http400..Http505], 28 | msg: string, 29 | parentException: ref Exception = nil 30 | ): ref HttpError = discard 31 | ## 创建一个 ``ref HttpError``. 32 | 33 | proc newReadAbortedError*( 34 | msg: string, 35 | timeout: bool = false, 36 | parentException: ref Exception = nil 37 | ): ref ReadAbortedError = discard 38 | ## 创建一个 ``ref ReadAbortedError``. 39 | 40 | proc newWriteAbortedError*( 41 | msg: string, 42 | parentException: ref Exception = nil 43 | ): ref WriteAbortedError = discard 44 | ## 创建一个 ``ref ReadAbortedError``. 45 | 46 | 47 | 48 | 49 | -------------------------------------------------------------------------------- /doc/zh/code/netkit/http/header.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | ## 这个模块包含了 HTTP 消息头的定义。 8 | ## 9 | ## 概述 10 | ## ======================== 11 | ## 12 | ## HTTP 消息由头部和体部组成。头部定义 HTTP 传输的操作参数;体部是传输的数据,紧跟在头部之后,有可能是空的。头部由起始行和头部字段组成。 13 | ## 14 | ## 客户端发出的消息称为请求消息,服务器发出的消息称为响应消息。 15 | ## 16 | ## 请求消息的起始行称为请求行,由请求方法、URL 和版本号组成;响应消息的起始行称为状态行,由状态码、原因和版本号组成。 17 | ## 18 | ## .. 19 | ## 20 | ## 看看 `Hypertext Transfer Protocol `_ 了解更多。 21 | ## 22 | ## 用法 23 | ## ======================== 24 | ## 25 | ## .. container::r-fragment 26 | ## 27 | ## 请求 28 | ## ------- 29 | ## 30 | ## 输出一个请求消息: 31 | ## 32 | ## .. code-block::nim 33 | ## 34 | ## import netkit/http/version 35 | ## import netkit/http/httpmethod 36 | ## import netkit/http/headerfields 37 | ## import netkit/http/header 38 | ## 39 | ## var header = HttpHeader( 40 | ## kind: HttpHeaderKind.Request, 41 | ## reqMethod: HttpGet, 42 | ## url: "/", 43 | ## version: HttpVer11, 44 | ## fields: initHeaderFields: { 45 | ## "Host": "www.iocrate.com" 46 | ## } 47 | ## ) 48 | ## assert toResponseStr(Http200) == "GET / HTTP/1.1\r\nHost: www.iocrate.com\r\n\r\n" 49 | ## 50 | ## .. container::r-fragment 51 | ## 52 | ## 响应 53 | ## -------- 54 | ## 55 | ## 输出一个响应消息: 56 | ## 57 | ## .. code-block::nim 58 | ## 59 | ## import netkit/http/version 60 | ## import netkit/http/status 61 | ## import netkit/http/headerfields 62 | ## import netkit/http/header 63 | ## 64 | ## var header = HttpHeader( 65 | ## kind: HttpHeaderKind.Response, 66 | ## statusCode: Http200, 67 | ## version: HttpVer11, 68 | ## fields: initHeaderFields: { 69 | ## "Host": "www.iocrate.com" 70 | ## } 71 | ## ) 72 | ## assert toResponseStr(Http200) == "200 OK HTTP/1.1\r\nHost: www.iocrate.com\r\n\r\n" 73 | ## 74 | ## 输出一个响应消息,但是不包含头字段: 75 | ## 76 | ## .. code-block::nim 77 | ## 78 | ## import netkit/http/status 79 | ## import netkit/http/header 80 | ## 81 | ## assert toResponseStr(Http200) == "200 OK HTTP/1.1\r\n\r\n" 82 | ## 83 | ## 84 | 85 | import netkit/http/uri 86 | import netkit/http/spec 87 | import netkit/http/httpmethod 88 | import netkit/http/version 89 | import netkit/http/status 90 | import netkit/http/headerfield 91 | 92 | type 93 | HttpHeaderKind* {.pure.} = enum ## HTTP 消息的类型。 94 | Request, Response 95 | 96 | HttpHeader* = object ## 表示 HTTP 消息头。每条消息只能有一个头部。 97 | case kind*: HttpHeaderKind 98 | of HttpHeaderKind.Request: 99 | reqMethod*: HttpMethod 100 | url*: string 101 | of HttpHeaderKind.Response: 102 | statusCode*: HttpCode 103 | version*: HttpVersion 104 | fields*: HeaderFields 105 | 106 | proc initRequestHeader*(reqMethod: HttpMethod, url: string, 107 | fields: HeaderFields): HttpHeader {.inline.} = discard 108 | ## 初始化一个 HTTP 请求头。 109 | 110 | proc initResponseHeader*(statusCode: HttpCode, fields: HeaderFields): HttpHeader {.inline.} = discard 111 | ## 初始化一个 HTTP 响应头。 112 | 113 | proc toResponseStr*(H: HttpHeader): string = discard 114 | ## 返回一个字符串,表示 HTTP 响应消息的头部, ``H`` 指定头部的内容。 115 | ## 116 | ## 例子: 117 | ## 118 | ## .. code-block::nim 119 | ## 120 | ## import netkit/http/version 121 | ## import netkit/http/status 122 | ## import netkit/http/headerfields 123 | ## import netkit/http/header 124 | ## 125 | ## var header = HttpHeader( 126 | ## kind: HttpHeaderKind.Response, 127 | ## statusCode: Http200, 128 | ## version: HttpVer11, 129 | ## fields: initHeaderFields: { 130 | ## "Host": "www.iocrate.com" 131 | ## } 132 | ## ) 133 | ## assert toResponseStr(Http200) == "200 OK HTTP/1.1\r\nHost: www.iocrate.com\r\n\r\n" 134 | 135 | proc toResponseStr*(code: HttpCode): string = discard 136 | ## 返回一个字符串,表示 HTTP 响应消息的头部, ``code`` 指定头部的状态码。注意,返回的头部不包含头字段。 137 | ## 138 | ## 例子: 139 | ## 140 | ## .. code-block::nim 141 | ## 142 | ## import netkit/http/status 143 | ## import netkit/http/header 144 | ## 145 | ## assert toResponseStr(Http200) == "200 OK HTTP/1.1\r\n\r\n" 146 | 147 | proc toRequestStr*(H: HttpHeader): string = discard 148 | ## 返回一个字符串,表示 HTTP 请求消息的头部, ``H`` 指定头部的内容。 149 | ## 150 | ## 例子: 151 | ## 152 | ## .. code-block::nim 153 | ## 154 | ## import netkit/http/version 155 | ## import netkit/http/httpmethod 156 | ## import netkit/http/headerfields 157 | ## import netkit/http/header 158 | ## 159 | ## var header = HttpHeader( 160 | ## kind: HttpHeaderKind.Request, 161 | ## reqMethod: HttpGet, 162 | ## url: "/", 163 | ## version: HttpVer11, 164 | ## fields: initHeaderFields: { 165 | ## "Host": "www.iocrate.com" 166 | ## } 167 | ## ) 168 | ## assert toResponseStr(Http200) == "GET / HTTP/1.1\r\nHost: www.iocrate.com\r\n\r\n" 169 | -------------------------------------------------------------------------------- /doc/zh/code/netkit/http/httpmethod.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | ## 这个模块包含 HTTP 请求方法的定义。 8 | ## 9 | ## 概述 10 | ## ======================== 11 | ## 12 | ## HTTP 定义了请求方法,以标识对资源执行的操作。资源代表什么,是预先存在的数据还是动态生成的数据,取决于服务器的实现。 13 | ## 通常,资源与服务器上驻留的文件或可执行文件的输出相对应。 14 | ## 15 | ## .. 16 | ## 17 | ## 看看 `Hypertext Transfer Protocol `_ 了解更多。 18 | 19 | type 20 | HttpMethod* = enum ## HTTP 请求方法。 21 | HttpHead = "HEAD", 22 | HttpGet = "GET", 23 | HttpPost = "POST", 24 | HttpPut = "PUT", 25 | HttpDelete = "DELETE", 26 | HttpTrace = "TRACE", 27 | HttpOptions = "OPTIONS", 28 | HttpConnect = "CONNECT", 29 | HttpPatch = "PATCH" 30 | 31 | proc parseHttpMethod*(s: string): HttpMethod {.raises: [ValueError].} = discard 32 | ## 将字符串转换为 HTTP 请求方法。如果 ``s`` 不是有效的请求方法,则会引发 ``ValueError`` 。 33 | ## 34 | ## 例子: 35 | ## 36 | ## .. code-block::nim 37 | ## 38 | ## doAssert parseHttpMethod("GET") == HttpGet 39 | ## doAssert parseHttpMethod("POST") == HttpPost -------------------------------------------------------------------------------- /doc/zh/code/netkit/http/limits.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | ## 该模块定义了一些与 HTTP 操作相关的常量。其中一些支持在编译时通过 ``--define`` 指令重定义。 8 | 9 | import netkit/misc 10 | 11 | const LimitStartLineLen* {.intdefine.}: Natural = 8*1024 12 | ## 指定 HTTP 起始行的最大字节数。此限制同时影响请求行和状态行。 13 | ## 14 | ## 由于请求行由 HTTP 请求方法、URL 和版本号组成,因此该指令对服务器端允许请求的 URL 长度进行了限制。 15 | ## 16 | ## 您可以在编译时通过开关选项 ``--define:BufferSize=`` 重写这个数值。 17 | ## 注意,值必须是自然数,即大于等于零的整数;否则,将会引起异常。 18 | 19 | const LimitHeaderFieldLen* {.intdefine.}: Natural = 8*1024 20 | ## 指定 HTTP 头字段的最大长度。此限制同时影响请求头字段和响应头字段。 21 | ## 22 | ## HTTP 头字段的大小在不同的实现中会有很大的不同,通常取决于用户对其浏览器配置支持内容协商的程度。 23 | ## 24 | ## 您可以在编译时通过开关选项 ``--define:BufferSize=`` 重写这个数值。 25 | ## 注意,值必须是自然数,即大于等于零的整数;否则,将会引起异常。 26 | 27 | const LimitHeaderFieldCount* {.intdefine.}: Natural = 100 28 | ## 指定 HTTP 头字段的最大数量。此限制同时影响请求头字段和响应头字段。 29 | ## 30 | ## 您可以在编译时通过开关选项 ``--define:BufferSize=`` 重写这个数值。 31 | ## 注意,值必须是自然数,即大于等于零的整数;否则,将会引起异常。 32 | 33 | const LimitChunkSizeLen*: Natural = 16 34 | ## 指定通过 chunked 编码的块数据其 size 部分的最大字节数。 35 | 36 | const LimitChunkHeaderLen* {.intdefine.}: Natural = 1*1024 37 | ## 指定通过 chunked 编码的块数据其 size 和扩展部分的最大字节数。 38 | ## 39 | ## 您可以在编译时通过开关选项 ``--define:BufferSize=`` 重写这个数值。 40 | ## 注意,值必须是自然数,即大于等于零的整数;否则,将会引起异常。 41 | ## 42 | ## 根据 HTTP 协议,数据的大小和扩展部分采用以下形式: 43 | ## 44 | ## .. code-block::http 45 | ## 46 | ## 7\r\n; foo=value1; bar=value2\r\n 47 | 48 | const LimitChunkDataLen* {.intdefine.}: Natural = 1*1024 49 | ## 指定通过 chunked 编码的块数据的数据部分的最大字节数。 50 | ## 51 | ## 您可以在编译时通过开关选项 ``--define:BufferSize=`` 重写这个数值。 52 | ## 注意,值必须是自然数,即大于等于零的整数;否则,将会引起异常。 53 | ## 54 | ## 根据 HTTP 协议,数据部分采用以下形式: 55 | ## 56 | ## .. code-block::http 57 | ## 58 | ## Hello World\r\n 59 | 60 | const LimitChunkTrailerLen* {.intdefine.}: Natural = 8*1024 61 | ## 指定通过 chunked 编码的消息其元数据部分的最大字节数。实际上,这些元数据是一些 ``trailers`` 。 62 | ## 63 | ## 您可以在编译时通过开关选项 ``--define:BufferSize=`` 重写这个数值。 64 | ## 注意,值必须是自然数,即大于等于零的整数;否则,将会引起异常。 65 | ## 66 | ## 例子: 67 | ## 68 | ## .. code-block::http 69 | ## 70 | ## HTTP/1.1 200 OK 71 | ## Transfer-Encoding: chunked 72 | ## Trailer: Expires 73 | ## 74 | ## 9\r\n 75 | ## Developer\r\n 76 | ## 0\r\n 77 | ## Expires: Wed, 21 Oct 2015 07:28:00 GMT\r\n 78 | ## \r\n 79 | 80 | const LimitChunkTrailerCount* {.intdefine.}: Natural = 100 81 | ## 指定通过 chunked 编码的消息其元数据部分的最大数量。实际上,这些元数据是一些 ``trailers`` 。 82 | ## 83 | ## 您可以在编译时通过开关选项 ``--define:BufferSize=`` 重写这个数值。 84 | ## 注意,值必须是自然数,即大于等于零的整数;否则,将会引起异常。 85 | ## 86 | ## -------------------------------------------------------------------------------- /doc/zh/code/netkit/http/metadata.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | ## 这个模块定义了一个通用对象 ``HttpMetadata`` ,该对象抽象了 HTTP 以简化对元数据的使用。 8 | ## 9 | ## 概述 10 | ## ======================== 11 | ## 12 | ## HTTP 消息支持挂载元数据。当前,有两种类型的元数据,都出现在 chunked 编码的消息。它们是: 13 | ## 14 | ## - Chunk Extensions 15 | ## - Trailers 16 | ## 17 | ## .. container:: r-fragment 18 | ## 19 | ## Chunk Extensions 20 | ## ----------------- 21 | ## 22 | ## 经过 chunked 编码的消息,每个数据块可以包含零个到多个块扩展。这些扩展紧跟在块大小之后,提供块的元数据(例如签名或哈希)。 23 | ## 24 | ## 每个扩展都是一个以 ``=`` 作为分隔符的名称/值对,例如 ``language = en``; 多个扩展名以 ``';'`` 作为分隔符组合在一起,例如 ``language=en; city=London`` 。 25 | ## 26 | ## 例子: 27 | ## 28 | ## .. code-block::http 29 | ## 30 | ## HTTP/1.1 200 OK 31 | ## Transfer-Encoding: chunked 32 | ## 33 | ## 9; language=en; city=London\r\n 34 | ## Developer\r\n 35 | ## 0\r\n 36 | ## \r\n 37 | ## 38 | ## .. container:: r-fragment 39 | ## 40 | ## Trailers 41 | ## -------- 42 | ## 43 | ## 经过 chunked 编码的消息,可以在尾部挂载元数据 trailers。trailers 实际上是一个或多个 HTTP 响应头字段,允许发送方在消息末尾添加其他元信息。 44 | ## 这些元信息可以随着消息正文的发送而动态生成,例如消息完整性检查,消息数字签名或处理后消息的最终状态等。 45 | ## 46 | ## 注意:仅当客户端在请求头包含 ``TE``( ``TE:trailers`` )时,服务器才能在响应中挂载 trailers。 47 | ## 48 | ## 例子: 49 | ## 50 | ## .. code-block::http 51 | ## 52 | ## HTTP/1.1 200 OK 53 | ## Transfer-Encoding: chunked 54 | ## Trailer: Expires 55 | ## 56 | ## 9\r\n 57 | ## Developer\r\n 58 | ## 0\r\n 59 | ## Expires: Wed, 21 Oct 2015 07:28:00 GMT\r\n 60 | ## \r\n 61 | ## 62 | ## 用法 63 | ## ======================== 64 | ## 65 | ## 出于性能方面的考虑, ``HttpMetadata`` 不会进一步解析 ``trailers`` 和 ``extensions`` 的内容。 66 | ## 您可以使用 ``parseChunkTrailers`` 和 ``parseChunkExtensions`` 分别提取它们的内容。 67 | ## 68 | ## .. container:: r-fragment 69 | ## 70 | ## Chunk Extensions 71 | ## ---------------- 72 | ## 73 | ## 提取内容: 74 | ## 75 | ## .. code-block::nim 76 | ## 77 | ## import netkit/http/metadata 78 | ## import netkit/http/chunk 79 | ## 80 | ## let metadata = HttpMetadata( 81 | ## kind: HttpMetadataKind.ChunkExtensions, 82 | ## extensions: "; a1=v1; a2=v2" 83 | ## ) 84 | ## let extensions = parseChunkExtensions(metadata.extensions) 85 | ## assert extensions[0].name == "a1" 86 | ## assert extensions[0].value == "v1" 87 | ## assert extensions[1].name == "a2" 88 | ## assert extensions[1].value == "v2" 89 | ## 90 | ## .. container:: r-fragment 91 | ## 92 | ## Trailers 93 | ## -------------- 94 | ## 95 | ## 提取内容: 96 | ## 97 | ## .. code-block::nim 98 | ## 99 | ## import netkit/http/metadata 100 | ## import netkit/http/chunk 101 | ## 102 | ## let metadata = HttpMetadata( 103 | ## kind: HttpMetadataKind.ChunkTrailers, 104 | ## trailers: @["Expires: Wed, 21 Oct 2015 07:28:00 GMT"] 105 | ## ) 106 | ## let tailers = parseChunkTrailers(metadata.trailers) 107 | ## assert tailers["Expires"][0] == "Wed, 21 Oct 2015 07:28:00 GMT" 108 | 109 | type 110 | HttpMetadataKind* {.pure.} = enum ## 元数据的类型。 111 | None, 112 | ChunkTrailers, 113 | ChunkExtensions 114 | 115 | HttpMetadata* = object ##元数据对象。 116 | case kind*: HttpMetadataKind 117 | of HttpMetadataKind.ChunkTrailers: 118 | trailers*: seq[string] 119 | of HttpMetadataKind.ChunkExtensions: 120 | extensions*: string 121 | of HttpMetadataKind.None: 122 | discard 123 | 124 | -------------------------------------------------------------------------------- /doc/zh/code/netkit/http/parser.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | ## 这个模块实现了一个增量的 HTTP 消息解析器。该解析器支持解析请求消息和响应消息。 8 | ## 9 | ## 解析器是增量的,这表示可以连续解析消息,而不管消息是一次交付还是分成多个部分。这使得该解析器特别适合复杂的 IO 传输环境。 10 | ## 11 | ## 用法 12 | ## ======================== 13 | ## 14 | ## .. container:: r-fragment 15 | ## 16 | ## 解析消息头 17 | ## ------------------------------ 18 | ## 19 | ## 例子: 20 | ## 21 | ## .. code-block::nim 22 | ## 23 | ## import netkit/http/parser 24 | ## import netkit/http/header 25 | ## import netkit/buffer/circular 26 | ## 27 | ## var parser = initHttpParser() 28 | ## var buffer = initMarkableCircularBuffer() 29 | ## var header = HttpHeader(kind: HttpHeaderKind.Request) 30 | ## var finished = false 31 | ## 32 | ## while not finished: 33 | ## put data to buffer ... 34 | ## finished = parser.parseHttpHeader(buffer, header) 35 | ## 36 | ## 另一个例子: 37 | ## 38 | ## .. code-block::nim 39 | ## 40 | ## import netkit/http/parser 41 | ## import netkit/http/header 42 | ## import netkit/buffer/circular 43 | ## 44 | ## var parser = initHttpParser() 45 | ## var buffer = initMarkableCircularBuffer() 46 | ## var header = HttpHeader(kind: HttpHeaderKind.Request) 47 | ## 48 | ## # 第一次解析 49 | ## let messageRequestLine = "GET / HTTP/1.1\r\n" 50 | ## buffer.add(messageRequestLine.cstring, messageRequestLine.len) 51 | ## assert parser.parseHttpHeader(buffer, header) == false 52 | ## buffer.del(messageRequestLine.len) 53 | ## 54 | ## # 第二次解析 55 | ## let messageHeaderFields = "Host: www.iocrate.com\r\n\r\n" 56 | ## buffer.add(messageHeaderFields.cstring, messageHeaderFields.len) 57 | ## assert parser.parseHttpHeader(buffer, header) == true 58 | ## buffer.del(messageHeaderFields.len) 59 | ## 60 | ## assert header.reqMethod == HttpGet 61 | ## assert header.url == "/" 62 | ## assert header.version.orig == "HTTP/1.1" 63 | ## assert header.fields["Host"][0] == "www.iocrate.com" 64 | ## 65 | ## .. container:: r-fragment 66 | ## 67 | ## 解析 chunked 编码消息的块的头部 68 | ## ------------------------------ 69 | ## 70 | ## 例子: 71 | ## 72 | ## .. code-block::nim 73 | ## 74 | ## import netkit/http/parser 75 | ## import netkit/http/chunk 76 | ## import netkit/buffer/circular 77 | ## 78 | ## var parser = initHttpParser() 79 | ## var buffer = initMarkableCircularBuffer() 80 | ## var header: ChunkHeader 81 | ## var finished = false 82 | ## 83 | ## while not finished: 84 | ## put data to buffer ... 85 | ## finished = parser.parseChunkHeader(buffer, header) 86 | ## 87 | ## 另一个例子: 88 | ## 89 | ## .. code-block::nim 90 | ## 91 | ## import netkit/http/parser 92 | ## import netkit/http/chunk 93 | ## import netkit/buffer/circular 94 | ## 95 | ## var parser = initHttpParser() 96 | ## var buffer = initMarkableCircularBuffer() 97 | ## var header: ChunkHeader 98 | ## 99 | ## let s = "9; language=en; city=London\r\n" 100 | ## buffer.add(s.cstring, s.len) 101 | ## assert parser.parseChunkHeader(buffer, header) == true 102 | ## buffer.del(s.len) 103 | ## 104 | ## assert header.size == 9 105 | ## assert header.extensions == "; language=en; city=London" 106 | ## 107 | ## See **chunk** module and **metadata** module for more information about chunked encoding. 108 | ## 109 | ## .. container:: r-fragment 110 | ## 111 | ## 解析 chunked 编码消息的尾部 112 | ## ------------------------------ 113 | ## 114 | ## 例子: 115 | ## 116 | ## .. code-block::nim 117 | ## 118 | ## import netkit/http/parser 119 | ## import netkit/http/chunk 120 | ## import netkit/buffer/circular 121 | ## 122 | ## var parser = initHttpParser() 123 | ## var buffer = initMarkableCircularBuffer() 124 | ## var trailers: seq[string] 125 | ## var finished = false 126 | ## 127 | ## while not finished: 128 | ## put data to buffer ... 129 | ## finished = parser.parseChunkEnd(buffer, trailers) 130 | ## 131 | ## 另一个例子: 132 | ## 133 | ## .. code-block::nim 134 | ## 135 | ## import netkit/http/parser 136 | ## import netkit/http/chunk 137 | ## import netkit/buffer/circular 138 | ## 139 | ## var parser = initHttpParser() 140 | ## var buffer = initMarkableCircularBuffer() 141 | ## var trailers: seq[string] 142 | ## 143 | ## let s = "\0\r\nExpires": "Wed, 21 Oct 2015 07:28:00 GMT\r\n\r\n" 144 | ## buffer.add(s.cstring, s.len) 145 | ## assert parser.parseChunkEnd(buffer, trailers) == true 146 | ## buffer.del(s.len) 147 | ## 148 | ## assert trailers[0] == "Expires": "Wed, 21 Oct 2015 07:28:00 GMT" 149 | ## 150 | ## 看看 **chunk** 模块和 **metadata** 模块了解更多关于 terminating chunk and trailers 的信息。 151 | 152 | 153 | import strutils 154 | import netkit/buffer/circular 155 | import netkit/http/limits 156 | import netkit/http/exception 157 | import netkit/http/spec 158 | import netkit/http/uri 159 | import netkit/http/httpmethod 160 | import netkit/http/version 161 | import netkit/http/status 162 | import netkit/http/headerfield 163 | import netkit/http/header 164 | import netkit/http/chunk 165 | 166 | type 167 | HttpParser* = object ## HTTP 消息解析器。 168 | secondaryBuffer: string 169 | currentLineLen: Natural 170 | currentFieldName: string 171 | currentFieldCount: Natural 172 | state: HttpParseState 173 | startLineState: StartLineState 174 | 175 | HttpParseState {.pure.} = enum 176 | StartLine, FieldName, FieldValue, Body 177 | 178 | StartLineState {.pure.} = enum 179 | Method, Url, Version, Code, Reason 180 | 181 | MarkProcessState {.pure.} = enum 182 | Unknown, Token, Crlf 183 | 184 | proc initHttpParser*(): HttpParser = 185 | ## 初始化一个 ``HttpParser`` 。 186 | discard 187 | 188 | proc clear*(p: var HttpParser) = discard 189 | ## 重置解析器以清除所有状态。 190 | ## 191 | ## 由于解析器是增量的,因此在解析过程中将保存许多状态。此函数将重置所有状态,以开始新的解析过程。 192 | 193 | proc parseHttpHeader*(p: var HttpParser, buf: var MarkableCircularBuffer, header: var HttpHeader): bool = discard 194 | ## 解析 HTTP 消息的头部。 ``buf`` 指定一个循环缓冲区,存储被解析的数据。 ``header`` 指定解析完成时输出的消息标头对象。 如果解析完成,则返回 ``true`` 。 195 | ## 196 | ## 根据 ``header`` 的 ``kind`` 属性值,采用不同的解析方案。当 ``kind = Request`` 时,消息被解析为请求。当 ``kind = Response`` 时,消息被解析为响应。 197 | ## 198 | ## 此过程是增量执行的,也就是说,下一次解析将从上一次结束的位置继续。 199 | 200 | proc parseChunkHeader*( 201 | p: var HttpParser, 202 | buf: var MarkableCircularBuffer, 203 | header: var ChunkHeader 204 | ): bool = discard 205 | ## 解析通过 chunked 编码消息的块的头部(大小和扩展名)。 206 | ## 207 | ## 此过程是增量执行的,也就是说,下一次解析将从上一次结束的位置继续。 208 | 209 | proc parseChunkEnd*( 210 | p: var HttpParser, 211 | buf: var MarkableCircularBuffer, 212 | trailers: var seq[string] 213 | ): bool = discard 214 | ## 解析通过 chunked 编码消息的尾部(终止块、trailers、final CRLF)。 215 | ## 216 | ## 此过程是增量执行的,也就是说,下一次解析将从上一次结束的位置继续。 217 | -------------------------------------------------------------------------------- /doc/zh/code/netkit/http/reader.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | ## 这个模块定义 HTTP 相关的读操作的抽象。 8 | ## 9 | ## 概述 10 | ## ======================== 11 | ## 12 | ## 服务器从客户端读取传入的请求,而客户端从服务器读取返回的响应。 13 | ## 14 | ## ``HttpReader`` 是读操作的基对象, ``ServerRequest`` 和 ``ClientResponse`` 继承自该对象。 ``ServerRequest`` 表示来自客户端的请求, 15 | ## ``ClientResponse`` 表示来自服务器的响应。 16 | 17 | import strutils 18 | import strtabs 19 | import asyncdispatch 20 | import nativesockets 21 | import netkit/locks 22 | import netkit/buffer/constants as buffer_constants 23 | import netkit/buffer/circular 24 | import netkit/http/limits 25 | import netkit/http/exception 26 | import netkit/http/spec 27 | import netkit/http/httpmethod 28 | import netkit/http/version 29 | import netkit/http/status 30 | import netkit/http/headerfield 31 | import netkit/http/header 32 | import netkit/http/connection 33 | import netkit/http/chunk 34 | import netkit/http/metadata 35 | 36 | type 37 | HttpReader* = ref object of RootObj ## 表示 HTTP 相关的读操作。 38 | conn: HttpConnection 39 | lock: AsyncLock 40 | header*: HttpHeader 41 | metadata: HttpMetadata 42 | onEnd: proc () {.gcsafe, closure.} 43 | contentLen: Natural 44 | chunked: bool 45 | readable: bool 46 | 47 | ServerRequest* = ref object of HttpReader ## 表示来自客户端的请求。 48 | ClientResponse* = ref object of HttpReader ## 表示来自服务器的响应。 49 | 50 | proc newServerRequest*(conn: HttpConnection, onEnd: proc () {.gcsafe, closure.}): ServerRequest = discard 51 | ## 创建一个新的 ``ServerRequest`` 。 52 | 53 | proc newClientResponse*(conn: HttpConnection, onEnd: proc () {.gcsafe, closure.}): ClientResponse = discard 54 | ## 创建一个新的 ``ClientResponse`` 。 55 | 56 | proc reqMethod*(req: ServerRequest): HttpMethod {.inline.} = discard 57 | ## 返回请求方法。 58 | 59 | proc url*(req: ServerRequest): string {.inline.} = discard 60 | ## 返回 url。 61 | 62 | proc status*(res: ClientResponse): HttpCode {.inline.} = discard 63 | ## 返回状态码。 64 | 65 | proc version*(reader: HttpReader): HttpVersion {.inline.} = discard 66 | ## 返回 HTTP 版本。 67 | 68 | proc fields*(reader: HttpReader): HeaderFields {.inline.} = discard 69 | ## 返回头字段集合。 70 | 71 | proc metadata*(reader: HttpReader): HttpMetadata {.inline.} = discard 72 | ## 返回元数据。 73 | 74 | proc ended*(reader: HttpReader): bool {.inline.} = discard 75 | ## 如果底部连接已断开或无法读取更多数据,则返回 ``true`` 。 76 | 77 | proc normalizeSpecificFields*(reader: HttpReader) = discard 78 | ## 规范化一些特殊的头字段。 79 | 80 | proc read*(reader: HttpReader, buf: pointer, size: range[int(LimitChunkDataLen)..high(int)]): Future[Natural] = discard 81 | ## 读取数据直到 ``size`` 字节,读取的数据填充在 ``buf`` 。 82 | ## 83 | ## 返回值是实际读取的字节数。这个值可能小于 ``size``。 ``0`` 值表示 ``EOF`` ,即无法读取更多数据。 84 | ## 85 | ## 如果读过程中出现系统错误,则会触发 ``OSError`` 异常;如果在成功读取之前连接断开,则会触发 ``ReadAbortedError`` 异常。 86 | 87 | proc read*(reader: HttpReader): Future[string] = discard 88 | ## 读取数据直到 ``size`` 字节,读取的数据以字符串返回。 89 | ## 90 | ## 如果返回值是 ``""``, 表示 ``EOF`` ,即无法读取更多数据。 91 | ## 92 | ## 如果读过程中出现系统错误,则会触发 ``OSError`` 异常;如果在成功读取之前连接断开,则会触发 ``ReadAbortedError`` 异常。 93 | 94 | proc readAll*(reader: HttpReader): Future[string] = discard 95 | ## 读取所有可读的数据,以字符串返回。 96 | ## 97 | ## 如果读过程中出现系统错误,则会触发 ``OSError`` 异常;如果在成功读取之前连接断开,则会触发 ``ReadAbortedError`` 异常。 98 | 99 | proc readDiscard*(reader: HttpReader): Future[void] = discard 100 | ## 读取所有可读的数据,并丢掉这些数据。 101 | ## 102 | ## 如果读过程中出现系统错误,则会触发 ``OSError`` 异常;如果在成功读取之前连接断开,则会触发 ``ReadAbortedError`` 异常。 -------------------------------------------------------------------------------- /doc/zh/code/netkit/http/server.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | ## 这个模块实现了一个 HTTP 服务器。 8 | 9 | import asyncdispatch 10 | import nativesockets 11 | import os 12 | import netkit/http/exception 13 | import netkit/http/spec 14 | import netkit/http/status 15 | import netkit/http/connection 16 | import netkit/http/reader 17 | import netkit/http/writer 18 | 19 | when defined(posix): 20 | from posix import EBADF 21 | 22 | type 23 | AsyncHttpServer* = ref object ## 服务器。 24 | socket: AsyncFD 25 | domain: Domain 26 | onRequest: RequestHandler 27 | closed: bool 28 | 29 | RequestHandler* = proc (req: ServerRequest, res: ServerResponse): Future[void] {.closure, gcsafe.} 30 | 31 | proc newAsyncHttpServer*(): AsyncHttpServer = discard 32 | ## 创建一个新的 ``AsyncHttpServer`` 。 33 | 34 | proc `onRequest=`*(server: AsyncHttpServer, handler: RequestHandler) = discard 35 | ## 为服务器设置 hook 函数。每当有一个新的请求到来时,触发这个 hook 函数。 36 | 37 | proc close*(server: AsyncHttpServer) = discard 38 | ## 关闭服务器以释放底部资源。 39 | 40 | proc serve*( 41 | server: AsyncHttpServer, 42 | port: Port, 43 | address: string = "", 44 | domain = AF_INET, 45 | readTimeout = 0 46 | ): Future[void] = discard 47 | ## 启动服务器,侦听 ``address`` 和 ``port`` 传入的 HTTP 连接。 ``readTimeout`` 指定读操作和 keepalive 的 48 | ## 超时时间。 -------------------------------------------------------------------------------- /doc/zh/code/netkit/http/spec.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | ## 这个模块包含 HTTP 规范相关的一些信息。 8 | 9 | const 10 | # [RFC5234](https://tools.ietf.org/html/rfc5234#appendix-B.1) 11 | COLON* = ':' 12 | COMMA* = ',' 13 | SEMICOLON* = ';' 14 | CR* = '\x0D' 15 | LF* = '\x0A' 16 | CRLF* = "\x0D\x0A" 17 | SP* = '\x20' 18 | HTAB* = '\x09' 19 | WSP* = {SP, HTAB} 20 | 21 | proc checkFieldName*(s: string) {.raises: [ValueError].} = discard 22 | ## 检查 ``s`` 是否为有效的 HTTP 头字段名称。 23 | ## 24 | ## `HTTP RFC 5234 `_ 25 | ## 26 | ## .. code-block::nim 27 | ## 28 | ## DIGIT = %x30-39 ; 0-9 29 | ## ALPHA = %x41-5A / %x61-7A ; A-Z / a-z 30 | ## 31 | ## `HTTP RFC 7230 `_ 32 | ## 33 | ## .. code-block::nim 34 | ## 35 | ## field-name = token 36 | ## token = 1*tchar 37 | ## tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" 38 | ## / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~" 39 | ## / DIGIT 40 | ## / ALPHA 41 | ## ; any VCHAR, except delimiters 42 | ## 43 | 44 | proc checkFieldValue*(s: string) {.raises: [ValueError].} = discard 45 | ## 检查 ``s`` 是否为有效的 HTTP 头字段值。 46 | ## 47 | ## `HTTP RFC 5234 `_ 48 | ## 49 | ## .. code-block::nim 50 | ## 51 | ## HTAB = %x09 ; horizontal tab 52 | ## SP = %x20 ; ' ' 53 | ## VCHAR = %x21-7E ; visible (printing) characters 54 | ## 55 | ## `HTTP RFC 7230 `_ 56 | ## 57 | ## .. code-block::nim 58 | ## 59 | ## field-value = \*( field-content / obs-fold ) 60 | ## field-content = field-vchar [ 1\*( SP / HTAB ) field-vchar ] 61 | ## field-vchar = VCHAR / obs-text 62 | ## obs-text = %x80-FF 63 | ## obs-fold = CRLF 1\*( SP / HTAB ) ; obsolete line folding 64 | ## 65 | 66 | -------------------------------------------------------------------------------- /doc/zh/code/netkit/http/status.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | ## 这个模块包含 HTTP 状态码。 8 | ## 9 | ## 概述 10 | ## ======================== 11 | ## 12 | ## 在 HTTP 1.0 及以后版本中,HTTP 响应的第一行称为状态行,包含数字状态代码(例如 ``404`` )和原因短语(例如 ``Not Found`` )。 13 | ## 14 | ## .. 15 | ## 16 | ## 看看 `Hypertext Transfer Protocol `_ 了解更多。 17 | 18 | type 19 | HttpCode* = enum ## HTTP 状态码。 20 | Http100 = "100 Continue" 21 | Http101 = "101 Switching Protocols" 22 | Http200 = "200 OK" 23 | Http201 = "201 Created" 24 | Http202 = "202 Accepted" 25 | Http203 = "203 Non-Authoritative Information" 26 | Http204 = "204 No Content" 27 | Http205 = "205 Reset Content" 28 | Http206 = "206 Partial Content" 29 | Http300 = "300 Multiple Choices" 30 | Http301 = "301 Moved Permanently" 31 | Http302 = "302 Found" 32 | Http303 = "303 See Other" 33 | Http304 = "304 Not Modified" 34 | Http305 = "305 Use Proxy" 35 | Http307 = "307 Temporary Redirect" 36 | Http400 = "400 Bad Request" 37 | Http401 = "401 Unauthorized" 38 | Http403 = "403 Forbidden" 39 | Http404 = "404 Not Found" 40 | Http405 = "405 Method Not Allowed" 41 | Http406 = "406 Not Acceptable" 42 | Http407 = "407 Proxy Authentication Required" 43 | Http408 = "408 Request Timeout" 44 | Http409 = "409 Conflict" 45 | Http410 = "410 Gone" 46 | Http411 = "411 Length Required" 47 | Http412 = "412 Precondition Failed" 48 | Http413 = "413 Request Entity Too Large" 49 | Http414 = "414 Request-URI Too Long" 50 | Http415 = "415 Unsupported Media Type" 51 | Http416 = "416 Requested Range Not Satisfiable" 52 | Http417 = "417 Expectation Failed" 53 | Http418 = "418 I'm a teapot" 54 | Http421 = "421 Misdirected Request" 55 | Http422 = "422 Unprocessable Entity" 56 | Http426 = "426 Upgrade Required" 57 | Http428 = "428 Precondition Required" 58 | Http429 = "429 Too Many Requests" 59 | Http431 = "431 Request Header Fields Too Large" 60 | Http451 = "451 Unavailable For Legal Reasons" 61 | Http500 = "500 Internal Server Error" 62 | Http501 = "501 Not Implemented" 63 | Http502 = "502 Bad Gateway" 64 | Http503 = "503 Service Unavailable" 65 | Http504 = "504 Gateway Timeout" 66 | Http505 = "505 HTTP Version Not Supported" 67 | 68 | proc parseHttpCode*(code: int): HttpCode {.raises: [ValueError].} = discard 69 | ## 将整数转换为状态码。当 ``code`` 不是有效的状态码时,引发 ``ValueError`` 。 70 | ## 71 | ## 例子: 72 | ## 73 | ## .. code-block::nim 74 | ## 75 | ## doAssert parseHttpCode(100) == Http100 76 | ## doAssert parseHttpCode(200) == Http200 -------------------------------------------------------------------------------- /doc/zh/code/netkit/http/uri.nim: -------------------------------------------------------------------------------- 1 | import std / uri 2 | 3 | 4 | export uri 5 | -------------------------------------------------------------------------------- /doc/zh/code/netkit/http/version.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | ## 这个模块包含 HTTP 版本的定义。 8 | 9 | type 10 | HttpVersion* = enum ## HTTP 版本。 11 | HttpVer10 = "HTTP/1.0", 12 | HttpVer11 = "HTTP/1.1" 13 | HttpVer20 = "HTTP/2.0" 14 | 15 | proc parseHttpVersion*(s: string): HttpVersion {.raises: [ValueError].} = discard 16 | ## 将字符串转换为状态码。当 ``s`` 不是有效的 HTTP 版本时,引发 ``ValueError`` 。当前只有 `"HTTP/1.0"` 和 `"HTTP/1.1"` 17 | ## 是有效的。 18 | ## 19 | ## 例子: 20 | ## 21 | ## .. code-block::nim 22 | ## 23 | ## let ver = parseHttpVersion("HTTP/1.1") 24 | ## doAssert ver == HttpVer11 -------------------------------------------------------------------------------- /doc/zh/code/netkit/http/writer.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | ## 这个模块定义 HTTP 相关的写操作的抽象。 8 | ## 9 | ## 概述 10 | ## ======================== 11 | ## 12 | ## 服务器将响应发给客户端,客户端将请求发给服务器。 13 | ## 14 | ## ``HttpWriter`` 是写操作的基对象, ``ServerResponse`` 和 ``ClientRequest`` 继承自该对象。 15 | ## ``ServerResponse`` 表示服务器发出的响应, ``ClientRequest`` 表示客户端发出的请求。 16 | 17 | import strutils 18 | import asyncdispatch 19 | import nativesockets 20 | import netkit/locks 21 | import netkit/http/exception 22 | import netkit/http/status 23 | import netkit/http/headerfield 24 | import netkit/http/header 25 | import netkit/http/connection 26 | 27 | type 28 | HttpWriter* = ref object of RootObj ## 表示 HTTP 相关的写操作。 29 | conn: HttpConnection 30 | lock: AsyncLock 31 | onEnd: proc () {.gcsafe, closure.} 32 | writable: bool 33 | 34 | ServerResponse* = ref object of HttpWriter ## 表示服务器发出的响应。 35 | ClientRequest* = ref object of HttpWriter ## 表示客户端发出的请求。 36 | 37 | proc newServerResponse*(conn: HttpConnection, onEnd: proc () {.gcsafe, closure.}): ServerResponse = discard 38 | ## 创建一个新的 ``ServerResponse`` 。 39 | 40 | proc newClientRequest*(conn: HttpConnection, onEnd: proc () {.gcsafe, closure.}): ClientRequest = discard 41 | ## 创建一个新的 ``ClientRequest`` 。 42 | 43 | proc ended*(writer: HttpWriter): bool {.inline.} = discard 44 | ## 如果底部连接已断开或写端已经关闭,则返回 ``true`` 。 45 | 46 | proc write*(writer: HttpWriter, buf: pointer, size: Natural): Future[void] = discard 47 | ## 从 ``buf`` 写入 ``size`` 字节的数据。 48 | ## 49 | ## 如果写过程中出现系统错误,则会触发 ``OSError`` 异常;如果在成功写之前连接断开或者写端已经关闭,则会触发 ``WriteAbortedError`` 异常。 50 | 51 | proc write*(writer: HttpWriter, data: string): Future[void] = discard 52 | ## 写入一个字符串。 53 | ## 54 | ## 如果写过程中出现系统错误,则会触发 ``OSError`` 异常;如果在成功写之前连接断开或者写端已经关闭,则会触发 ``WriteAbortedError`` 异常。 55 | 56 | proc write*( 57 | writer: HttpWriter, 58 | statusCode: HttpCode 59 | ): Future[void] = discard 60 | ## 写入一个消息头。 61 | ## 62 | ## 如果写过程中出现系统错误,则会触发 ``OSError`` 异常;如果在成功写之前连接断开或者写端已经关闭,则会触发 ``WriteAbortedError`` 异常。 63 | 64 | proc write*( 65 | writer: HttpWriter, 66 | statusCode: HttpCode, 67 | fields: openArray[tuple[name: string, value: string]] 68 | ): Future[void] = discard 69 | ## 写入一个消息头。 70 | ## 71 | ## 如果写过程中出现系统错误,则会触发 ``OSError`` 异常;如果在成功写之前连接断开或者写端已经关闭,则会触发 ``WriteAbortedError`` 异常。 72 | 73 | proc write*( 74 | writer: HttpWriter, 75 | statusCode: HttpCode, 76 | fields: openArray[tuple[name: string, value: seq[string]]] 77 | ): Future[void] = discard 78 | ## 写入一个消息头。 79 | ## 80 | ## 如果写过程中出现系统错误,则会触发 ``OSError`` 异常;如果在成功写之前连接断开或者写端已经关闭,则会触发 ``WriteAbortedError`` 异常。 81 | 82 | proc writeEnd*(writer: HttpWriter) = discard 83 | ## 关闭写端。之后,不能继续向 ``writer`` 写入数据,否则将引发 ``WriteAbortedError`` 异常。 84 | -------------------------------------------------------------------------------- /doc/zh/code/netkit/locks.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | ## 这个模块实现了异步锁。IO 在涉及到 “流” 的时候,不可避免的引入 “序” 的问题。为了保证多个读写 “序” 8 | ## 的正确,需要异步锁进行同步或者说是排队。通常,您不会直接使用异步锁。异步锁作为 Netkit 的底层机制 9 | ## 控制 IO “序” 的一致性,并对外提供 “锁” 无关的开放 API 。 10 | ## 11 | ## 与同步风格的锁一样,您应该总是以 “窗口” 的方式操作锁,并尽可能将锁与某个特定对象绑定,以避免 “死锁”、 12 | ## “活锁” 等问题。 13 | 14 | import asyncdispatch 15 | 16 | type 17 | AsyncLock* = object ## 异步锁对象。 18 | locked: bool 19 | 20 | proc initAsyncLock*(): AsyncLock = discard 21 | ## 初始化一个 ``AsyncLock`` 。 22 | 23 | proc acquire*(L: AsyncLock): Future[void] = discard 24 | ## 尝试获取一把锁。 25 | 26 | proc release*(L: AsyncLock) = discard 27 | ## 释放已经获取的锁。 28 | 29 | proc isLocked*(L: AsyncLock): bool = discard 30 | ## 判断 ``L`` 是否处于锁住状态。 -------------------------------------------------------------------------------- /doc/zh/code/netkit/misc.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | ## 这个模块包含了一些其他功能,这些功能不属于任何其他模块。 8 | 9 | template offset*(p: pointer, n: int): pointer = discard 10 | ## Returns a new pointer, which is offset ``n`` bytes backwards from ``p``. 11 | 12 | template checkDefNatural*(value: static[Natural], name: static[string]): untyped = discard 13 | ## 检查 ``value`` 是否是自然数 (零和正整数) 。 如果不是,则停止编译。 ``name`` 指定其符号名字。 -------------------------------------------------------------------------------- /netkit.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | import netkit/buffer 8 | import netkit/http 9 | import netkit/locks 10 | import netkit/misc 11 | 12 | export buffer 13 | export http 14 | export locks 15 | export misc -------------------------------------------------------------------------------- /netkit.nimble: -------------------------------------------------------------------------------- 1 | # Package 2 | 3 | version = "0.1.0" 4 | author = "Wang Tong" 5 | description = "A versatile network development kit providing tools commonly used in network programming." 6 | license = "MIT" 7 | 8 | 9 | # Dependencies 10 | 11 | requires "nim >= 1.0.6" 12 | 13 | task test, "Run all tests": 14 | exec "testament all" 15 | -------------------------------------------------------------------------------- /netkit/buffer.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | import netkit/buffer/circular 8 | import netkit/buffer/constants 9 | import netkit/buffer/vector 10 | 11 | export circular 12 | export constants 13 | export vector 14 | -------------------------------------------------------------------------------- /netkit/buffer/constants.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | ## This module contains constants related to the buffer. 8 | 9 | import netkit/misc 10 | 11 | const BufferSize* {.intdefine.}: Natural = 8*1024 12 | ## Describes the number of bytes for a buffer. 13 | ## 14 | ## You can override this value at compile time with the switch option ``--define:BufferSize=``. Note 15 | ## that the value must be a natural number, that is, an integer greater than or equal to zero. Otherwise, 16 | ## an exception will be raised. 17 | 18 | checkDefNatural BufferSize, "BufferSize" -------------------------------------------------------------------------------- /netkit/buffer/vector.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | ## This module implements a growable buffer ``VectorBuffer``. The buffer can grow exponentially as needed until 8 | ## it reaches a critical value. When the critical value is reached, continued growth will cause an exception. 9 | 10 | import netkit/misc 11 | import netkit/buffer/constants 12 | 13 | type 14 | VectorBuffer* = object of RootObj ## A growable buffer. 15 | value: seq[byte] 16 | endPos: Natural # 0..n-1 17 | capacity: Natural 18 | minCapacity: Natural 19 | maxCapacity: Natural 20 | 21 | proc initVectorBuffer*( 22 | minCapacity: Natural = BufferSize, 23 | maxCapacity: Natural = BufferSize * 8 24 | ): VectorBuffer = 25 | ## Initializes an ``VectorBuffer`` . 26 | result.capacity = minCapacity 27 | result.minCapacity = minCapacity 28 | result.maxCapacity = maxCapacity 29 | result.value = newSeqOfCap[byte](minCapacity) 30 | 31 | proc capacity*(b: VectorBuffer): Natural = 32 | ## Returns the capacity of this buffer. 33 | b.capacity 34 | 35 | proc minCapacity*(b: VectorBuffer): Natural = 36 | ## Returns the minimum capacity of this buffer. 37 | b.minCapacity 38 | 39 | proc maxCapacity*(b: VectorBuffer): Natural = 40 | ## Returns the maximum capacity of this buffer. 41 | b.maxCapacity 42 | 43 | proc len*(b: VectorBuffer): Natural = 44 | ## Returns the length of the data currently stored in this buffer. 45 | b.endPos 46 | 47 | proc reset*(b: var VectorBuffer): Natural = 48 | ## Resets the buffer to restore to the original capacity while clear all stored data. 49 | b.capacity = b.minCapacity 50 | b.endPos = 0 51 | b.value = newSeqOfCap[byte](b.capacity) 52 | 53 | proc expand*(b: var VectorBuffer) {.raises: [OverflowError].} = 54 | ## Expands the capacity of the buffer. If it exceeds the maximum capacity, an exception is raised. 55 | let newCapacity = b.capacity * 2 56 | if newCapacity > b.maxCapacity: 57 | raise newException(OverflowError, "capacity overflow") 58 | var newValue = newSeqOfCap[byte](newCapacity) 59 | copyMem(newValue.addr, b.value.addr, b.endPos) 60 | b.capacity = newCapacity 61 | b.value = move newValue 62 | 63 | proc next*(b: var VectorBuffer): (pointer, Natural) = 64 | ## Gets the next safe storage space. The return value includes the address and length of the storable 65 | ## space. 66 | ## 67 | ## Examples: 68 | ## 69 | ## .. code-block::nim 70 | ## 71 | ## var source = "Hello World" 72 | ## var (regionPtr, regionLen) = buffer.next() 73 | ## var length = min(regionLen, s.len) 74 | ## copyMem(regionPtr, source.cstring, length) 75 | result[0] = b.value.addr.offset(b.endPos) 76 | result[1] = b.capacity - b.endPos 77 | 78 | proc pack*(b: var VectorBuffer, size: Natural): Natural = 79 | ## Tells the buffer that ``size`` bytes from the current storage location are promoted to data. Returns the actual 80 | ## length promoted. 81 | ## 82 | ## When ``next()`` is called, data is written to the storage space inside the buffer, but the buffer cannot know 83 | ## how much data was written. ``pack ()`` tells the buffer the length of the data written. 84 | ## 85 | ## Whenever ``next()`` is called, ``pack()`` should be called immediately. 86 | ## 87 | ## Examples: 88 | ## 89 | ## .. code-block::nim 90 | ## 91 | ## var source = "Hello World" 92 | ## var (regionPtr, regionLen) = buffer.next() 93 | ## var length = min(regionLen, s.len) 94 | ## copyMem(regionPtr, source.cstring, length) 95 | ## var n = buffer.pack(length) 96 | result = min(size, b.capacity - b.endPos) 97 | b.endPos = b.endPos + result 98 | 99 | proc add*(b: var VectorBuffer, source: pointer, size: Natural): Natural = 100 | ## Copies up to ``size`` lengths of data from ``source`` and store the data in the buffer. Returns the actual length 101 | ## copied. This is a simplified version of the ``next`` ``pack`` combination call. The difference is that an 102 | ## additional copy operation is made instead of writing directly to the buffer. 103 | ## 104 | ## Examples: 105 | ## 106 | ## .. code-block::nim 107 | ## 108 | ## var source = "Hello World" 109 | ## var n = buffer.add(source.cstring, source.len) 110 | result = min(size, b.capacity - b.endPos) 111 | copyMem(b.value.addr.offset(b.endPos), source, result) 112 | b.endPos = b.endPos + result 113 | 114 | proc get*(b: var VectorBuffer, dest: pointer, size: Natural, start: Natural): Natural = 115 | ## Gets up to ``size`` of the stored data from ``start`` position, copy the data to the space ``dest``. Returns the 116 | ## actual number copied. 117 | if start >= b.endPos or size == 0: 118 | return 0 119 | result = min(size, b.endPos - start) 120 | copyMem(dest, b.value.addr.offset(start), result) 121 | 122 | proc clear*(b: var VectorBuffer): Natural = 123 | ## Deletes all the stored data. 124 | b.endPos = 0 -------------------------------------------------------------------------------- /netkit/http.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | import netkit/http/limits 8 | import netkit/http/exception 9 | import netkit/http/spec 10 | import netkit/http/httpmethod 11 | import netkit/http/version 12 | import netkit/http/status 13 | import netkit/http/headerfield 14 | import netkit/http/header 15 | import netkit/http/chunk 16 | import netkit/http/metadata 17 | import netkit/http/cookies 18 | import netkit/http/parser 19 | import netkit/http/connection 20 | import netkit/http/reader 21 | import netkit/http/writer 22 | import netkit/http/server 23 | 24 | export limits 25 | export exception 26 | export spec 27 | export httpmethod 28 | export version 29 | export status 30 | export headerfield 31 | export header 32 | export chunk 33 | export metadata 34 | export cookies 35 | export parser 36 | export connection 37 | export reader 38 | export writer 39 | export server -------------------------------------------------------------------------------- /netkit/http/connection.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | ## This module implements an HTTP connection between a client and a server. ``HttpConnection`` provides 8 | ## several routines that can recognize the structure of HTTP messages transmitted over the network. 9 | ## 10 | ## Usage 11 | ## ======================== 12 | ## 13 | ## .. container:: r-fragment 14 | ## 15 | ## Reads a message header 16 | ## ---------------------- 17 | ## 18 | ## .. code-block::nim 19 | ## 20 | ## import netkit/http/connection 21 | ## import netkit/http/header 22 | ## 23 | ## type 24 | ## Packet = ref object 25 | ## header: HttpHeader 26 | ## 27 | ## var packet = new(Packet) 28 | ## packet.header = HttpHeader(kind: HttpHeaderKind.Request) 29 | ## 30 | ## var conn = newHttpConnection(socket, address) 31 | ## 32 | ## try: 33 | ## GC_ref(packet) 34 | ## await conn.readHttpHeader(packet.header.addr) 35 | ## finally: 36 | ## GC_unref(packet) 37 | ## 38 | ## .. container:: r-fragment 39 | ## 40 | ## Reads a message body 41 | ## ------------------------ 42 | ## 43 | ## .. code-block::nim 44 | ## 45 | ## let readLen = await conn.readData(buf, 1024) 46 | ## 47 | ## .. container:: r-fragment 48 | ## 49 | ## Reads a message body that chunked 50 | ## ------------------------------------------ 51 | ## 52 | ## .. code-block::nim 53 | ## 54 | ## type 55 | ## Packet = ref object 56 | ## header: ChunkHeader 57 | ## 58 | ## try: 59 | ## GC_ref(packet) 60 | ## await conn.readChunkHeader(packet.header.addr) 61 | ## finally: 62 | ## GC_unref(packet) 63 | ## 64 | ## if header.size == 0: # read tail 65 | ## var trailers: seq[string] 66 | ## await conn.readEnd(trailers) 67 | ## else: 68 | ## var chunkLen = header.size 69 | ## var buf = newString(header.size) 70 | ## let readLen = await conn.readData(buf, header.size) 71 | ## if readLen != header.size: 72 | ## echo "Connection closed prematurely" 73 | ## 74 | ## .. container:: r-fragment 75 | ## 76 | ## Sends a message 77 | ## --------------- 78 | ## 79 | ## .. code-block::nim 80 | ## 81 | ## await conn.write(""" 82 | ## GET /iocrate/netkit HTTP/1.1 83 | ## Host: iocrate.com 84 | ## Content-Length: 12 85 | ## 86 | ## foobarfoobar 87 | ## """) 88 | 89 | import strutils 90 | import asyncdispatch 91 | import nativesockets 92 | import netkit/misc 93 | import netkit/buffer/circular 94 | import netkit/http/header 95 | import netkit/http/exception 96 | import netkit/http/parser 97 | import netkit/http/chunk 98 | 99 | type 100 | HttpConnection* = ref object ## HTTP connection object. 101 | buffer: MarkableCircularBuffer 102 | parser: HttpParser 103 | socket: AsyncFD 104 | address: string 105 | closed: bool 106 | readTimeout: Natural 107 | 108 | proc newHttpConnection*(socket: AsyncFD, address: string, readTimeout: Natural): HttpConnection = 109 | ## Creates a new ``HttpConnection``. ``socket`` specifies the peer's socket descriptor, ``address`` specifies 110 | ## the peer's network address, ``readTimeout`` specifies the timeout period of the read operation. 111 | ## 112 | ## Note that ``readTimeout`` also affects the keepalive timeout. When the last response is sent and there 113 | ## is no further request for more than ``readTimeout`` milliseconds, a ``ReadAbortedError`` will be raised. 114 | new(result) 115 | result.buffer = initMarkableCircularBuffer() 116 | result.parser = initHttpParser() 117 | result.socket = socket 118 | result.address = address 119 | result.closed = false 120 | result.readTimeout = readTimeout 121 | 122 | proc close*(conn: HttpConnection) {.inline.} = 123 | ## Closes this connection to release the resources. 124 | conn.socket.closeSocket() 125 | conn.closed = true 126 | 127 | proc closed*(conn: HttpConnection): bool {.inline.} = 128 | ## Returns ``true`` if this connection is closed. 129 | conn.closed 130 | 131 | proc read(conn: HttpConnection): Future[Natural] = 132 | ## If a system error occurs during reading, an ``OsError`` will be raised. 133 | let retFuture = newFuture[Natural]("read") 134 | result = retFuture 135 | 136 | let region = conn.buffer.next() 137 | let recvFuture = conn.socket.recvInto(region[0], region[1]) 138 | 139 | proc updateDate(fd: AsyncFD): bool = 140 | result = true 141 | if not recvFuture.finished: 142 | recvFuture.clearCallbacks() 143 | retFuture.fail(newReadAbortedError("Read timeout", true)) 144 | 145 | if conn.readTimeout > 0: 146 | addTimer(conn.readTimeout, false, updateDate) 147 | 148 | recvFuture.callback = proc (fut: Future[int]) = 149 | if fut.failed: 150 | retFuture.fail(fut.readError()) 151 | else: 152 | let readLen = fut.read() 153 | if readLen == 0: 154 | retFuture.fail(newReadAbortedError("Connection closed prematurely")) 155 | else: 156 | discard conn.buffer.pack(readLen) 157 | retFuture.complete(readLen) 158 | 159 | proc read(conn: HttpConnection, buf: pointer, size: Natural): Future[Natural] = 160 | ## If a system error occurs during reading, an ``OsError`` will be raised. 161 | let retFuture = newFuture[Natural]("read") 162 | result = retFuture 163 | 164 | let recvFuture = conn.socket.recvInto(buf, size) 165 | 166 | proc updateDate(fd: AsyncFD): bool = 167 | result = true 168 | if not recvFuture.finished: 169 | recvFuture.clearCallbacks() 170 | retFuture.fail(newReadAbortedError("Read timeout", true)) 171 | 172 | if conn.readTimeout > 0: 173 | addTimer(conn.readTimeout, false, updateDate) 174 | 175 | recvFuture.callback = proc (fut: Future[int]) = 176 | if fut.failed: 177 | retFuture.fail(fut.readError()) 178 | else: 179 | let readLen = recvFuture.read() 180 | if readLen == 0: 181 | retFuture.fail(newReadAbortedError("Connection closed prematurely")) 182 | else: 183 | retFuture.complete(readLen) 184 | 185 | proc readHttpHeader*(conn: HttpConnection, header: ptr HttpHeader): Future[void] {.async.} = 186 | ## Reads the header of a HTTP message. 187 | ## 188 | ## If a system error occurs during reading, an ``OsError`` will be raised. If the connection is 189 | ## disconnected before successful reading, a ``ReadAbortedError`` will be raised. 190 | var succ = false 191 | conn.parser.clear() 192 | if conn.buffer.len > 0: 193 | succ = conn.parser.parseHttpHeader(conn.buffer, header[]) 194 | while not succ: 195 | discard await conn.read() 196 | succ = conn.parser.parseHttpHeader(conn.buffer, header[]) 197 | 198 | proc readChunkHeader*(conn: HttpConnection, header: ptr ChunkHeader): Future[void] {.async.} = 199 | ## Reads the size and the extensions parts of a chunked data. 200 | ## 201 | ## If a system error occurs during reading, an ``OsError`` will be raised. If the connection is 202 | ## disconnected before successful reading, a ``ReadAbortedError`` will be raised. 203 | var succ = false 204 | if conn.buffer.len > 0: 205 | succ = conn.parser.parseChunkHeader(conn.buffer, header[]) 206 | while not succ: 207 | discard await conn.read() 208 | succ = conn.parser.parseChunkHeader(conn.buffer, header[]) 209 | 210 | proc readChunkEnd*(conn: HttpConnection, trailer: ptr seq[string]): Future[void] {.async.} = 211 | ## Reads the terminating chunk, trailer, and the final CRLF sequence of a chunked message. 212 | ## 213 | ## If a system error occurs during reading, an ``OsError`` will be raised. If the connection is 214 | ## disconnected before successful reading, a ``ReadAbortedError`` will be raised. 215 | var succ = false 216 | if conn.buffer.len > 0: 217 | succ = conn.parser.parseChunkEnd(conn.buffer, trailer[]) 218 | while not succ: 219 | discard await conn.read() 220 | succ = conn.parser.parseChunkEnd(conn.buffer, trailer[]) 221 | 222 | proc readData*(conn: HttpConnection, buf: pointer, size: Natural): Future[Natural] {.async.} = 223 | ## Reads up to ``size`` bytes from this connection, storing the results in the ``buf``. 224 | ## 225 | ## The return value is the number of bytes actually read. This might be less than ``size`` 226 | ## that indicates the connection is at EOF. 227 | ## 228 | ## This proc should only be used to read the message body. 229 | ## 230 | ## If a system error occurs during reading, an ``OsError`` will be raised. If the connection is 231 | ## disconnected before successful reading, a ``ReadAbortedError`` will be raised. 232 | result = conn.buffer.len 233 | if result >= size: 234 | discard conn.buffer.get(buf, size) 235 | discard conn.buffer.del(size) 236 | result = size 237 | else: 238 | if result > 0: 239 | discard conn.buffer.get(buf, result) 240 | discard conn.buffer.del(result) 241 | var remainingLen = size - result 242 | while remainingLen > 0: 243 | let n = await conn.read(buf.offset(result), remainingLen) 244 | discard conn.buffer.get(buf.offset(result), n) 245 | discard conn.buffer.del(n) 246 | result.inc(n) 247 | remainingLen.dec(n) 248 | 249 | proc write*(conn: HttpConnection, buf: pointer, size: Natural): Future[void] {.inline.} = 250 | ## Writes ``size`` bytes from ``buf`` to the connection. 251 | ## 252 | ## If a system error occurs during writing, an ``OsError`` will be raised. 253 | result = conn.socket.send(buf, size) 254 | 255 | proc write*(conn: HttpConnection, data: string): Future[void] {.inline.} = 256 | ## Writes a string to the connection. 257 | ## 258 | ## If a system error occurs during writing, an ``OsError`` will be raised. 259 | result = conn.socket.send(data) -------------------------------------------------------------------------------- /netkit/http/cookies.nim: -------------------------------------------------------------------------------- 1 | ## This module provides the ``Cookie`` type, which directly maps to Set-Cookie HTTP response headers, 2 | ## and the ``CookieJar`` type which contains many cookies. 3 | ## 4 | ## Overview 5 | ## ======================== 6 | ## 7 | ## ``Cookie`` type is used to generate Set-Cookie HTTP response headers. 8 | ## Server sends Set-Cookie HTTP response headers to the user agent. 9 | ## So the user agent can send them back to the server later. 10 | ## 11 | ## ``CookieJar`` contains many cookies from the user agent. 12 | ## 13 | 14 | 15 | import options, times, strtabs, parseutils, strutils 16 | 17 | 18 | type 19 | SameSite* {.pure.} = enum ## The SameSite cookie attribute. 20 | None, Lax, Strict 21 | 22 | Cookie* = object ## Cookie type represents Set-Cookie HTTP response headers. 23 | name*, value*: string 24 | expires*: string 25 | maxAge*: Option[int] 26 | domain*: string 27 | path*: string 28 | secure*: bool 29 | httpOnly*: bool 30 | sameSite*: SameSite 31 | 32 | CookieJar* = object ## CookieJar type is a collection of cookies. 33 | data: StringTableRef 34 | 35 | MissingValueError* = object of ValueError ## Indicates an error associated with Cookie. 36 | 37 | 38 | proc initCookie*(name, value: string, expires = "", maxAge: Option[int] = none(int), 39 | domain = "", path = "", 40 | secure = false, httpOnly = false, sameSite = Lax): Cookie {.inline.} = 41 | ## Initiates Cookie object. 42 | runnableExamples: 43 | let 44 | username = "admin" 45 | message = "ok" 46 | cookie = initCookie(username, message) 47 | 48 | doAssert cookie.name == username 49 | doAssert cookie.value == message 50 | 51 | result = Cookie(name: name, value: value, expires: expires, 52 | maxAge: maxAge, domain: domain, path: path, 53 | secure: secure, httpOnly: httpOnly, sameSite: sameSite) 54 | 55 | proc initCookie*(name, value: string, expires: DateTime|Time, 56 | maxAge: Option[int] = none(int), domain = "", path = "", secure = false, httpOnly = false, 57 | sameSite = Lax): Cookie {.inline.} = 58 | ## Initiates Cookie object. 59 | runnableExamples: 60 | import times 61 | 62 | 63 | let 64 | username = "admin" 65 | message = "ok" 66 | expires = now() 67 | cookie = initCookie(username, message, expires) 68 | 69 | doAssert cookie.name == username 70 | doAssert cookie.value == message 71 | 72 | result = initCookie(name, value, format(expires.utc, 73 | "ddd',' dd MMM yyyy HH:mm:ss 'GMT'"), maxAge, domain, path, secure, 74 | httpOnly, sameSite) 75 | 76 | proc parseParams(cookie: var Cookie, key: string, value: string) {.inline.} = 77 | ## Parse Cookie attributes from key-value pairs. 78 | case key.toLowerAscii 79 | of "expires": 80 | if value.len != 0: 81 | cookie.expires = value 82 | of "maxage": 83 | try: 84 | cookie.maxAge = some(parseInt(value)) 85 | except ValueError: 86 | cookie.maxAge = none(int) 87 | of "domain": 88 | if value.len != 0: 89 | cookie.domain = value 90 | of "path": 91 | if value.len != 0: 92 | cookie.path = value 93 | of "secure": 94 | cookie.secure = true 95 | of "httponly": 96 | cookie.httpOnly = true 97 | of "samesite": 98 | case value.toLowerAscii 99 | of "none": 100 | cookie.sameSite = None 101 | of "strict": 102 | cookie.sameSite = Strict 103 | else: 104 | cookie.sameSite = Lax 105 | else: 106 | discard 107 | 108 | proc initCookie*(text: string): Cookie {.inline.} = 109 | ## Initiates Cookie object from strings. 110 | runnableExamples: 111 | doAssert initCookie("foo=bar=baz").name == "foo" 112 | doAssert initCookie("foo=bar=baz").value == "bar=baz" 113 | doAssert initCookie("foo=bar; HttpOnly").httpOnly 114 | 115 | var 116 | pos = 0 117 | params: string 118 | name, value: string 119 | first = true 120 | 121 | while true: 122 | pos += skipWhile(text, {' ', '\t'}, pos) 123 | pos += parseUntil(text, params, ';', pos) 124 | 125 | var start = 0 126 | start += parseUntil(params, name, '=', start) 127 | inc(start) # skip '=' 128 | if start < params.len: 129 | value = params[start .. ^1] 130 | else: 131 | value = "" 132 | 133 | if first: 134 | if name.len == 0: 135 | raise newException(MissingValueError, "cookie name is missing!") 136 | if value.len == 0: 137 | raise newException(MissingValueError, "cookie valie is missing!") 138 | result.name = name 139 | result.value = value 140 | first = false 141 | else: 142 | parseParams(result, name, value) 143 | if pos >= text.len: 144 | break 145 | inc(pos) # skip '; 146 | 147 | proc setCookie*(cookie: Cookie): string = 148 | ## Stringifys Cookie object to get Set-Cookie HTTP response headers. 149 | runnableExamples: 150 | import strformat 151 | 152 | 153 | let 154 | username = "admin" 155 | message = "ok" 156 | cookie = initCookie(username, message) 157 | 158 | doAssert setCookie(cookie) == fmt"{username}={message}; SameSite=Lax" 159 | 160 | result.add cookie.name & "=" & cookie.value 161 | if cookie.domain.strip.len != 0: 162 | result.add("; Domain=" & cookie.domain) 163 | if cookie.path.strip.len != 0: 164 | result.add("; Path=" & cookie.path) 165 | if cookie.maxAge.isSome: 166 | result.add("; Max-Age=" & $cookie.maxAge.get()) 167 | if cookie.expires.strip.len != 0: 168 | result.add("; Expires=" & cookie.expires) 169 | if cookie.secure: 170 | result.add("; Secure") 171 | if cookie.httpOnly: 172 | result.add("; HttpOnly") 173 | if cookie.sameSite != None: 174 | result.add("; SameSite=" & $cookie.sameSite) 175 | 176 | proc `$`*(cookie: Cookie): string {.inline.} = 177 | ## Stringifys Cookie object to get Set-Cookie HTTP response headers. 178 | runnableExamples: 179 | import strformat 180 | 181 | 182 | let 183 | username = "admin" 184 | message = "ok" 185 | cookie = initCookie(username, message) 186 | 187 | doAssert $cookie == fmt"{username}={message}; SameSite=Lax" 188 | 189 | setCookie(cookie) 190 | 191 | proc initCookieJar*(): CookieJar {.inline.} = 192 | ## Creates a new cookieJar that is empty. 193 | CookieJar(data: newStringTable(mode = modeCaseSensitive)) 194 | 195 | proc len*(cookieJar: CookieJar): int {.inline.} = 196 | ## Returns the number of names in ``cookieJar``. 197 | cookieJar.data.len 198 | 199 | proc `[]`*(cookieJar: CookieJar, name: string): string {.inline.} = 200 | ## Retrieves the value at ``cookieJar[name]``. 201 | ## 202 | ## If ``name`` is not in ``cookieJar``, the ``KeyError`` exception is raised. 203 | cookieJar.data[name] 204 | 205 | proc getOrDefault*(cookieJar: CookieJar, name: string, default = ""): string {.inline.} = 206 | ## Retrieves the value at ``cookieJar[name]`` if ``name`` is in ``cookieJar``. Otherwise, the 207 | ## default value is returned(default is ""). 208 | cookieJar.data.getOrDefault(name, default) 209 | 210 | proc hasKey*(cookieJar: CookieJar, name: string): bool {.inline.} = 211 | ## Returns true if ``name`` is in the ``cookieJar``. 212 | cookieJar.data.hasKey(name) 213 | 214 | proc contains*(cookieJar: CookieJar, name: string): bool {.inline.} = 215 | ## Returns true if ``name`` is in the ``cookieJar``. 216 | ## Alias of ``hasKey`` for use with the ``in`` operator. 217 | cookieJar.data.contains(name) 218 | 219 | proc `[]=`*(cookieJar: var CookieJar, name: string, value: string) {.inline.} = 220 | ## Inserts a ``(name, value)`` pair into ``cookieJar``. 221 | cookieJar.data[name] = value 222 | 223 | proc parse*(cookieJar: var CookieJar, text: string) {.inline.} = 224 | ## Parses CookieJar from strings. 225 | runnableExamples: 226 | var cookieJar = initCookieJar() 227 | cookieJar.parse("username=netkit; message=ok") 228 | 229 | doAssert cookieJar["username"] == "netkit" 230 | doAssert cookieJar["message"] == "ok" 231 | 232 | var 233 | pos = 0 234 | name, value: string 235 | while true: 236 | pos += skipWhile(text, {' ', '\t'}, pos) 237 | pos += parseUntil(text, name, '=', pos) 238 | if pos >= text.len: 239 | break 240 | inc(pos) # skip '=' 241 | pos += parseUntil(text, value, ';', pos) 242 | cookieJar[name] = move(value) 243 | if pos >= text.len: 244 | break 245 | inc(pos) # skip ';' 246 | 247 | iterator pairs*(cookieJar: CookieJar): tuple[name, value: string] = 248 | ## Iterates over any ``(name, value)`` pair in the ``cookieJar``. 249 | for (name, value) in cookieJar.data.pairs: 250 | yield (name, value) 251 | 252 | iterator keys*(cookieJar: CookieJar): string = 253 | ## Iterates over any ``name`` in the ``cookieJar``. 254 | for name in cookieJar.data.keys: 255 | yield name 256 | 257 | iterator values*(cookieJar: CookieJar): string = 258 | ## Iterates over any ``value`` in the ``cookieJar``. 259 | for value in cookieJar.data.values: 260 | yield value 261 | -------------------------------------------------------------------------------- /netkit/http/exception.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | ## This module contains a few possible errors associated with HTTP operations. 8 | 9 | import netkit/http/status 10 | 11 | type 12 | HttpError* = object of CatchableError ## Indicates an error associated with a HTTP operation. 13 | code*: range[Http400..Http505] 14 | 15 | ReadAbortedError* = object of CatchableError ## Indicates that the read operation is aborted before completion. 16 | timeout*: bool 17 | 18 | WriteAbortedError* = object of CatchableError ## Indicates that the write operation is aborted before completion. 19 | 20 | proc newHttpError*( 21 | code: range[Http400..Http505], 22 | parentException: ref Exception = nil 23 | ): ref HttpError = 24 | ## Creates a new ``ref HttpError``. 25 | result = (ref HttpError)(msg: $code, code: code, parent: parentException) 26 | 27 | proc newHttpError*( 28 | code: range[Http400..Http505], 29 | msg: string, 30 | parentException: ref Exception = nil 31 | ): ref HttpError = 32 | ## Creates a new ``ref HttpError``. 33 | result = (ref HttpError)(msg: msg, code: code, parent: parentException) 34 | 35 | proc newReadAbortedError*( 36 | msg: string, 37 | timeout: bool = false, 38 | parentException: ref Exception = nil 39 | ): ref ReadAbortedError = 40 | ## Creates a new ``ref ReadAbortedError``. 41 | result = (ref ReadAbortedError)(msg: msg, timeout: timeout, parent: parentException) 42 | 43 | proc newWriteAbortedError*( 44 | msg: string, 45 | parentException: ref Exception = nil 46 | ): ref WriteAbortedError = 47 | ## Creates a new ``ref ReadAbortedError``. 48 | result = (ref WriteAbortedError)(msg: msg, parent: parentException) -------------------------------------------------------------------------------- /netkit/http/header.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | ## This module contains a defination of the header of a HTTP message. 8 | ## 9 | ## Overview 10 | ## ======================== 11 | ## 12 | ## A HTTP message consists of a header and a body. The header defines the operating parameters of an HTTP 13 | ## transaction, and the body is the data bytes transmitted in an HTTP transaction message immediately following 14 | ## the header. The header consists of a start line and zero or more header fields. 15 | ## 16 | ## A message sent by a client is called a request, and a message sent by a server is called a response. 17 | ## 18 | ## The start line of a request is called request line, which consists of a request method, a url and a version. 19 | ## The start line of a response is called status line, which consists of a status code, a reason and a version. 20 | ## 21 | ## .. 22 | ## 23 | ## See `Hypertext Transfer Protocol `_ for more information. 24 | ## 25 | ## Usage 26 | ## ======================== 27 | ## 28 | ## .. container::r-fragment 29 | ## 30 | ## Request 31 | ## ------- 32 | ## 33 | ## To output a request message: 34 | ## 35 | ## .. code-block::nim 36 | ## 37 | ## import netkit/http/version 38 | ## import netkit/http/httpmethod 39 | ## import netkit/http/headerfields 40 | ## import netkit/http/header 41 | ## 42 | ## var header = HttpHeader( 43 | ## kind: HttpHeaderKind.Request, 44 | ## reqMethod: HttpGet, 45 | ## url: "/", 46 | ## version: HttpVer11, 47 | ## fields: initHeaderFields: { 48 | ## "Host": "www.iocrate.com" 49 | ## } 50 | ## ) 51 | ## assert toResponseStr(header) == "GET / HTTP/1.1\r\nHost: www.iocrate.com\r\n\r\n" 52 | ## 53 | ## .. container::r-fragment 54 | ## 55 | ## Response 56 | ## -------- 57 | ## 58 | ## To output a response message: 59 | ## 60 | ## .. code-block::nim 61 | ## 62 | ## import netkit/http/version 63 | ## import netkit/http/status 64 | ## import netkit/http/headerfields 65 | ## import netkit/http/header 66 | ## 67 | ## var header = HttpHeader( 68 | ## kind: HttpHeaderKind.Response, 69 | ## statusCode: Http200, 70 | ## version: HttpVer11, 71 | ## fields: initHeaderFields: { 72 | ## "Host": "www.iocrate.com" 73 | ## } 74 | ## ) 75 | ## assert toResponseStr(header) == "200 OK HTTP/1.1\r\nHost: www.iocrate.com\r\n\r\n" 76 | ## 77 | ## To output a response message without fields: 78 | ## 79 | ## .. code-block::nim 80 | ## 81 | ## import netkit/http/status 82 | ## import netkit/http/header 83 | ## 84 | ## assert toResponseStr(Http200) == "200 OK HTTP/1.1\r\n\r\n" 85 | ## 86 | ## 87 | 88 | 89 | import netkit/http/spec 90 | import netkit/http/uri 91 | import netkit/http/httpmethod 92 | import netkit/http/version 93 | import netkit/http/status 94 | import netkit/http/headerfield 95 | 96 | type 97 | HttpHeaderKind* {.pure.} = enum ## Kind of HTTP message. 98 | Request, Response 99 | 100 | HttpHeader* = object ## Represents the header of a HTTP message. Each message must contain only one header. 101 | case kind*: HttpHeaderKind 102 | of HttpHeaderKind.Request: 103 | reqMethod*: HttpMethod 104 | url*: string 105 | of HttpHeaderKind.Response: 106 | statusCode*: HttpCode 107 | version*: HttpVersion 108 | fields*: HeaderFields 109 | 110 | proc initRequestHeader*(reqMethod: HttpMethod, url: string, fields: HeaderFields): HttpHeader {.inline.} = 111 | ## Initiates HTTP request header. 112 | HttpHeader(kind: HttpHeaderKind.Request, reqMethod: reqMethod, url: url, version: HttpVer11) 113 | 114 | proc initResponseHeader*(statusCode: HttpCode, fields: HeaderFields): HttpHeader {.inline.} = 115 | ## Initates HTTP response headers. 116 | HttpHeader(kind: HttpHeaderKind.Response, statusCode: statusCode, version: HttpVer11, fields: fields) 117 | 118 | proc toResponseStr*(H: HttpHeader): string = 119 | ## Returns a header of a response message. 120 | ## 121 | ## Examples: 122 | ## 123 | ## .. code-block::nim 124 | ## 125 | ## import netkit/http/version 126 | ## import netkit/http/status 127 | ## import netkit/http/headerfields 128 | ## import netkit/http/header 129 | ## 130 | ## var header = HttpHeader( 131 | ## kind: HttpHeaderKind.Response, 132 | ## statusCode: Http200, 133 | ## version: HttpVer11, 134 | ## fields: initHeaderFields: { 135 | ## "Host": "www.iocrate.com" 136 | ## } 137 | ## ) 138 | ## assert toResponseStr(header) == "200 OK HTTP/1.1\r\nHost: www.iocrate.com\r\n\r\n" 139 | assert H.kind == HttpHeaderKind.Response 140 | result.add($HttpVer11) 141 | result.add(SP) 142 | result.add($H.statusCode) 143 | result.add(CRLF) 144 | for key, value in H.fields.pairs(): 145 | result.add(key) 146 | result.add(": ") 147 | result.add(value) 148 | result.add(CRLF) 149 | result.add(CRLF) 150 | 151 | proc toResponseStr*(code: HttpCode): string = 152 | ## Returns a header of a response message, ``code`` specifies the status code. The header fields is empty. 153 | ## 154 | ## Examples: 155 | ## 156 | ## .. code-block::nim 157 | ## 158 | ## import netkit/http/status 159 | ## import netkit/http/header 160 | ## 161 | ## assert toResponseStr(Http200) == "200 OK HTTP/1.1\r\n\r\n" 162 | result.add($HttpVer11) 163 | result.add(SP) 164 | result.add($code) 165 | result.add(CRLF) 166 | result.add(CRLF) 167 | 168 | proc toRequestStr*(H: HttpHeader): string = 169 | ## Returns a header of a request message. 170 | ## 171 | ## Examples: 172 | ## 173 | ## .. code-block::nim 174 | ## 175 | ## import netkit/http/version 176 | ## import netkit/http/httpmethod 177 | ## import netkit/http/headerfields 178 | ## import netkit/http/header 179 | ## 180 | ## var header = HttpHeader( 181 | ## kind: HttpHeaderKind.Request, 182 | ## reqMethod: HttpGet, 183 | ## url: "/", 184 | ## version: HttpVer11, 185 | ## fields: initHeaderFields: { 186 | ## "Host": "www.iocrate.com" 187 | ## } 188 | ## ) 189 | ## assert toResponseStr(HttpHeader) == "GET / HTTP/1.1\r\nHost: www.iocrate.com\r\n\r\n" 190 | assert H.kind == HttpHeaderKind.Request 191 | result.add($H.reqMethod) 192 | result.add(SP) 193 | result.add($H.url.encodeUrl()) 194 | result.add(SP) 195 | result.add($HttpVer11) 196 | result.add(CRLF) 197 | for key, value in H.fields.pairs(): 198 | result.add(key) 199 | result.add(": ") 200 | result.add(value) 201 | result.add(CRLF) 202 | result.add(CRLF) 203 | -------------------------------------------------------------------------------- /netkit/http/httpmethod.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | ## This module contains a definition of HTTP request method. 8 | ## 9 | ## Overview 10 | ## ======================== 11 | ## 12 | ## HTTP defines methods to indicate the desired action to be performed on the identified resource. What this 13 | ## resource represents, whether pre-existing data or data that is generated dynamically, depends on the 14 | ## implementation of the server. Often, the resource corresponds to a file or the output of an executable 15 | ## residing on the server. 16 | ## 17 | ## .. 18 | ## 19 | ## See `Hypertext Transfer Protocol `_ for more information. 20 | 21 | type 22 | HttpMethod* = enum ## HTTP request method. 23 | HttpHead = "HEAD", 24 | HttpGet = "GET", 25 | HttpPost = "POST", 26 | HttpPut = "PUT", 27 | HttpDelete = "DELETE", 28 | HttpTrace = "TRACE", 29 | HttpOptions = "OPTIONS", 30 | HttpConnect = "CONNECT", 31 | HttpPatch = "PATCH" 32 | 33 | proc parseHttpMethod*(s: string): HttpMethod {.raises: [ValueError].} = 34 | ## Converts a string to an HTTP request method. A ``ValueError`` is raised when ``s`` is not a valid method. 35 | runnableExamples: 36 | doAssert parseHttpMethod("GET") == HttpGet 37 | doAssert parseHttpMethod("POST") == HttpPost 38 | 39 | result = 40 | case s 41 | of "GET": HttpGet 42 | of "POST": HttpPost 43 | of "HEAD": HttpHead 44 | of "PUT": HttpPut 45 | of "DELETE": HttpDelete 46 | of "PATCH": HttpPatch 47 | of "OPTIONS": HttpOptions 48 | of "CONNECT": HttpConnect 49 | of "TRACE": HttpTrace 50 | else: raise newException(ValueError, "Not Implemented") -------------------------------------------------------------------------------- /netkit/http/limits.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | ## This module defines some constants associated with HTTP operations. Some of them support redefinition 8 | ## through the ``--define`` instruction during compilation. 9 | 10 | import netkit/misc 11 | 12 | const LimitStartLineLen* {.intdefine.}: Natural = 8*1024 13 | ## Specifies the maximum number of bytes that will be allowed on the HTTP start-line. This limitation 14 | ## affects both request-line and status-line. 15 | ## 16 | ## Since the request-line consists of the HTTP method, URI, and protocol version, this directive places 17 | ## a restriction on the length of a request-URI allowed for a request on the server. 18 | 19 | const LimitHeaderFieldLen* {.intdefine.}: Natural = 8*1024 20 | ## Specifies the maximum number of bytes that will be allowed on an HTTP header field. This limitation 21 | ## affects both request and response header fields. 22 | ## 23 | ## The size of a normal HTTP header field will vary greatly among different implementations, often 24 | ## depending upon the extent to which a user has configured their browser to support detailed content 25 | ## negotiation. 26 | 27 | const LimitHeaderFieldCount* {.intdefine.}: Natural = 100 28 | ## Specifies the maximum number of HTTP header fields that will be allowed. This limitation affects both 29 | ## request and response header fields. 30 | 31 | const LimitChunkSizeLen*: Natural = 16 32 | ## Specifies the maximum number of bytes that will be allowed on the size part of an chunk data that is 33 | ## encoded by ``Transfer-Encoding: chunked``. 34 | 35 | const LimitChunkHeaderLen* {.intdefine.}: Natural = 1*1024 36 | ## Specifies the maximum number of bytes that will be allowed on the size and extensions parts of an chunk 37 | ## data that is encoded by ``Transfer-Encoding: chunked``. 38 | ## 39 | ## According to the HTTP protocol, the size and extensions parts of this kind of data are in this form: 40 | ## 41 | ## .. code-block::http 42 | ## 43 | ## 7\r\n; foo=value1; bar=value2\r\n 44 | 45 | const LimitChunkDataLen* {.intdefine.}: Natural = 1*1024 46 | ## Specifies the maximum number of bytes that will be allowed on the data part of an chunk data that is encoded 47 | ## by ``Transfer-Encoding: chunked``. 48 | ## 49 | ## According to the HTTP protocol, the data part of this kind of data are in this form: 50 | ## 51 | ## .. code-block::http 52 | ## 53 | ## Hello World\r\n 54 | 55 | const LimitChunkTrailerLen* {.intdefine.}: Natural = 8*1024 56 | ## Specifies the maximum number of bytes that will be allowed on the medatada part of a message that is encoded 57 | ## by ``Transfer-Encoding: chunked``. In fact, these metadata are some ``Trailer``. 58 | ## 59 | ## Examples: 60 | ## 61 | ## .. code-block::http 62 | ## 63 | ## HTTP/1.1 200 OK 64 | ## Transfer-Encoding: chunked 65 | ## Trailer: Expires 66 | ## 67 | ## 9\r\n 68 | ## Developer\r\n 69 | ## 0\r\n 70 | ## Expires: Wed, 21 Oct 2015 07:28:00 GMT\r\n 71 | ## \r\n 72 | 73 | const LimitChunkTrailerCount* {.intdefine.}: Natural = 100 74 | ## Specifies the maximum number of the medatada ``Trailer`` that will be allowed. 75 | 76 | checkDefNatural LimitStartLineLen, "LimitStartLineLen" 77 | checkDefNatural LimitHeaderFieldLen, "LimitHeaderFieldLen" 78 | checkDefNatural LimitHeaderFieldCount, "LimitHeaderFieldCount" 79 | checkDefNatural LimitChunkHeaderLen, "LimitChunkHeaderLen" 80 | checkDefNatural LimitChunkDataLen, "LimitChunkDataLen" 81 | checkDefNatural LimitChunkTrailerLen, "LimitChunkTrailerLen" 82 | checkDefNatural LimitChunkTrailerCount, "LimitChunkTrailerCount" -------------------------------------------------------------------------------- /netkit/http/metadata.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | ## This module defines a general object ``HttpMetadata`` to abstract HTTP metadata in order to simplify 8 | ## the use of metadata. 9 | ## 10 | ## Overview 11 | ## ======================== 12 | ## 13 | ## HTTP messages support carrying metadata. Currently, there are two kinds of metadata. Both of them appear 14 | ## in messages that encoded with ``Transfer-Encoding: chunked``. They are: 15 | ## 16 | ## - Chunk Extensions 17 | ## - Trailers 18 | ## 19 | ## .. container:: r-fragment 20 | ## 21 | ## Chunk Extensions 22 | ## ----------------- 23 | ## 24 | ## For a message encoded by ``Transfer-Encoding: chunked``, each data chunk is allowed to contain zero or more 25 | ## chunk-extensions. These extensions immediately follow the chunk-size, for the sake of supplying per-chunk 26 | ## metadata (such as a signature or hash), mid-message control information, or randomization of message body 27 | ## size. 28 | ## 29 | ## Each extension is a name-value pair with ``=`` as a separator, such as ``language = en``; multiple extensions 30 | ## are combined with ``;`` as a separator, such as ``language=en; city=London``. 31 | ## 32 | ## An example of carring chunk extensions: 33 | ## 34 | ## .. code-block::http 35 | ## 36 | ## HTTP/1.1 200 OK 37 | ## Transfer-Encoding: chunked 38 | ## 39 | ## 9; language=en; city=London\r\n 40 | ## Developer\r\n 41 | ## 0\r\n 42 | ## \r\n 43 | ## 44 | ## .. container:: r-fragment 45 | ## 46 | ## Trailers 47 | ## -------- 48 | ## 49 | ## Messages encoded with ``Transfer-Encoding: chunked`` are allowed to carry trailers at the end. Trailer is 50 | ## actually one or more HTTP response header fields, allowing the sender to add additional meta-information 51 | ## at the end of a message. These meta-information may be dynamically generated with the sending of the message 52 | ## body, such as message integrity check, message Digital signature, or the final state of the message after 53 | ## processing, etc. 54 | ## 55 | ## Note: Only when the client sets trailers in the request header ``TE`` (``TE: trailers``), the server can 56 | ## carry Trailer in the response. 57 | ## 58 | ## An example of carring trailers: 59 | ## 60 | ## .. code-block::http 61 | ## 62 | ## HTTP/1.1 200 OK 63 | ## Transfer-Encoding: chunked 64 | ## Trailer: Expires 65 | ## 66 | ## 9\r\n 67 | ## Developer\r\n 68 | ## 0\r\n 69 | ## Expires: Wed, 21 Oct 2015 07:28:00 GMT\r\n 70 | ## \r\n 71 | ## 72 | ## Usage 73 | ## ======================== 74 | ## 75 | ## For performance reasons, ``HttpMetadata`` does not further parse the content of ``trailers`` and ``extensions``. 76 | ## You can use ``parseChunkTrailers`` and ``parseChunkExtensions`` to extract the contents of them respectively. 77 | ## 78 | ## .. container:: r-fragment 79 | ## 80 | ## Chunk Extensions 81 | ## ---------------- 82 | ## 83 | ## To extract chunk extensions: 84 | ## 85 | ## .. code-block::nim 86 | ## 87 | ## import netkit/http/metadata 88 | ## import netkit/http/chunk 89 | ## 90 | ## let metadata = HttpMetadata( 91 | ## kind: HttpMetadataKind.ChunkExtensions, 92 | ## extensions: "; a1=v1; a2=v2" 93 | ## ) 94 | ## let extensions = parseChunkExtensions(metadata.extensions) 95 | ## assert extensions[0].name == "a1" 96 | ## assert extensions[0].value == "v1" 97 | ## assert extensions[1].name == "a2" 98 | ## assert extensions[1].value == "v2" 99 | ## 100 | ## .. container:: r-fragment 101 | ## 102 | ## Trailers 103 | ## -------------- 104 | ## 105 | ## To extract trailers: 106 | ## 107 | ## .. code-block::nim 108 | ## 109 | ## import netkit/http/metadata 110 | ## import netkit/http/chunk 111 | ## 112 | ## let metadata = HttpMetadata( 113 | ## kind: HttpMetadataKind.ChunkTrailers, 114 | ## trailers: @["Expires: Wed, 21 Oct 2015 07:28:00 GMT"] 115 | ## ) 116 | ## let tailers = parseChunkTrailers(metadata.trailers) 117 | ## assert tailers["Expires"][0] == "Wed, 21 Oct 2015 07:28:00 GMT" 118 | 119 | type 120 | HttpMetadataKind* {.pure.} = enum ## Kinds of metadata. 121 | None, 122 | ChunkTrailers, 123 | ChunkExtensions 124 | 125 | HttpMetadata* = object ## Metadata object. 126 | case kind*: HttpMetadataKind 127 | of HttpMetadataKind.ChunkTrailers: 128 | trailers*: seq[string] 129 | of HttpMetadataKind.ChunkExtensions: 130 | extensions*: string 131 | of HttpMetadataKind.None: 132 | discard 133 | 134 | -------------------------------------------------------------------------------- /netkit/http/reader.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | ## This module provides an abstraction of read operations related to HTTP. 8 | ## 9 | ## Overview 10 | ## ======================== 11 | ## 12 | ## A server reads the incoming request from a client, and a client reads the returned response from a 13 | ## server. 14 | ## 15 | ## ``HttpReader`` is a base object for read operations, ``ServerRequest`` and ``ClientResponse`` 16 | ## inherit from it. ``ServerRequest`` represents a incoming request from a client, and ``ClientResponse`` 17 | ## represents a returned response from a server. 18 | 19 | import strutils 20 | import strtabs 21 | import asyncdispatch 22 | import nativesockets 23 | import netkit/locks 24 | import netkit/buffer/constants as buffer_constants 25 | import netkit/buffer/circular 26 | import netkit/http/limits 27 | import netkit/http/exception 28 | import netkit/http/spec 29 | import netkit/http/httpmethod 30 | import netkit/http/version 31 | import netkit/http/status 32 | import netkit/http/headerfield 33 | import netkit/http/header 34 | import netkit/http/connection 35 | import netkit/http/chunk 36 | import netkit/http/metadata 37 | 38 | type 39 | HttpReader* = ref object of RootObj ## An abstraction of read operations related to HTTP. 40 | conn: HttpConnection 41 | lock: AsyncLock 42 | header*: HttpHeader 43 | metadata: HttpMetadata 44 | onEnd: proc () {.gcsafe, closure.} 45 | contentLen: Natural 46 | chunked: bool 47 | readable: bool 48 | 49 | ServerRequest* = ref object of HttpReader ## Represents a incoming request from a client. 50 | ClientResponse* = ref object of HttpReader ## Represents a returned response from a server. 51 | 52 | proc init(reader: HttpReader, conn: HttpConnection, onEnd: proc () {.gcsafe, closure.}) = 53 | reader.conn = conn 54 | reader.lock = initAsyncLock() 55 | reader.metadata = HttpMetadata(kind: HttpMetadataKind.None) 56 | reader.onEnd = onEnd 57 | reader.contentLen = 0 58 | reader.chunked = false 59 | reader.readable = true 60 | 61 | proc newServerRequest*(conn: HttpConnection, onEnd: proc () {.gcsafe, closure.}): ServerRequest = 62 | ## Creates a new ``ServerRequest``. 63 | new(result) 64 | result.init(conn, onEnd) 65 | result.header = HttpHeader(kind: HttpHeaderKind.Request) 66 | 67 | proc newClientResponse*(conn: HttpConnection, onEnd: proc () {.gcsafe, closure.}): ClientResponse = 68 | ## Creates a new ``ClientResponse``. 69 | new(result) 70 | result.init(conn, onEnd) 71 | result.header = HttpHeader(kind: HttpHeaderKind.Response) 72 | 73 | proc reqMethod*(req: ServerRequest): HttpMethod {.inline.} = 74 | ## Returns the request method. 75 | req.header.reqMethod 76 | 77 | proc url*(req: ServerRequest): string {.inline.} = 78 | ## Returns the url. 79 | req.header.url 80 | 81 | proc status*(res: ClientResponse): HttpCode {.inline.} = 82 | ## Returns the status code. 83 | res.header.statusCode 84 | 85 | proc version*(reader: HttpReader): HttpVersion {.inline.} = 86 | ## Returns the HTTP version. 87 | reader.header.version 88 | 89 | proc fields*(reader: HttpReader): HeaderFields {.inline.} = 90 | ## Returns the header fields. 91 | reader.header.fields 92 | 93 | proc metadata*(reader: HttpReader): HttpMetadata {.inline.} = 94 | ## Returns the metadata. 95 | reader.metadata 96 | 97 | proc ended*(reader: HttpReader): bool {.inline.} = 98 | ## Returns ``true`` if the underlying connection has been disconnected or no more data can be read. 99 | reader.conn.closed or not reader.readable 100 | 101 | proc normalizeContentLength(reader: HttpReader) = 102 | if reader.fields.contains("Content-Length"): 103 | if reader.fields["Content-Length"].len > 1: 104 | raise newHttpError(Http400, "Bad content length") 105 | reader.contentLen = reader.fields["Content-Length"][0].parseInt() 106 | if reader.contentLen < 0: 107 | raise newHttpError(Http400, "Bad content length") 108 | if reader.contentLen == 0: 109 | reader.readable = false 110 | 111 | proc normalizeTransforEncoding(reader: HttpReader) = 112 | if reader.fields.contains("Transfer-Encoding"): 113 | var encodings: seq[string] 114 | let items = reader.fields["Transfer-Encoding"] 115 | if items.len == 1: 116 | encodings = items[0].split(COMMA) 117 | elif items.len > 1: 118 | encodings.shallowCopy(items) 119 | else: 120 | return 121 | 122 | var i = 0 123 | let n = encodings.len - 1 124 | for encoding in encodings.items(): 125 | var vencoding = encoding 126 | vencoding.removePrefix(SP) 127 | vencoding.removePrefix(SP) 128 | if vencoding.toLowerAscii() == "chunked": 129 | if i != n: 130 | raise newHttpError(Http400, "Bad transfer encoding") 131 | reader.chunked = true 132 | reader.readable = true 133 | reader.contentLen = 0 134 | return 135 | i.inc() 136 | 137 | proc normalizeSpecificFields*(reader: HttpReader) = 138 | # TODO: more normalized header fields 139 | ## Normalizes a few special header fields. 140 | reader.normalizeContentLength() 141 | reader.normalizeTransforEncoding() 142 | 143 | template readByGuard(reader: HttpReader, buf: pointer, size: Natural) = 144 | let readFuture = reader.conn.readData(buf, size) 145 | yield readFuture 146 | if readFuture.failed: 147 | reader.conn.close() 148 | raise readFuture.readError() 149 | 150 | template readContent(reader: HttpReader, buf: pointer, size: Natural): Natural = 151 | assert not reader.conn.closed 152 | assert reader.readable 153 | assert reader.contentLen > 0 154 | let n = min(reader.contentLen, size) 155 | reader.readByGuard(buf, n) 156 | reader.contentLen.dec(n) 157 | if reader.contentLen == 0: 158 | reader.readable = false 159 | reader.onEnd() 160 | n 161 | 162 | template readContent(reader: HttpReader): string = 163 | assert not reader.conn.closed 164 | assert reader.readable 165 | let n = min(reader.contentLen, BufferSize) 166 | var buffer = newString(n) 167 | reader.readByGuard(buffer.cstring, n) # should need Gc_ref(result) ? 168 | buffer.shallow() # still ref result 169 | reader.contentLen.dec(n) 170 | if reader.contentLen == 0: 171 | reader.readable = false 172 | reader.onEnd() 173 | # if reader.writer.writable == false: 174 | # case reader.header.kind 175 | # of HttpHeaderKind.Request: 176 | # asyncCheck reader.conn.handleNextRequest() 177 | # of HttpHeaderKind.Response: 178 | # raise newException(Exception, "Not Implemented yet") 179 | buffer 180 | 181 | template readChunkHeaderByGuard(reader: HttpReader, header: ChunkHeader) = 182 | # TODO: 考虑内存泄漏 183 | let readFuture = reader.conn.readChunkHeader(header.addr) 184 | yield readFuture 185 | if readFuture.failed: 186 | reader.conn.close() 187 | raise readFuture.readError() 188 | if header.extensions.len > 0: 189 | header.extensions.shallow() 190 | reader.metadata = HttpMetadata(kind: HttpMetadataKind.ChunkExtensions, extensions: header.extensions) 191 | 192 | template readChunkEndByGuard(reader: HttpReader) = 193 | var trailersVar: seq[string] 194 | let readFuture = reader.conn.readChunkEnd(trailersVar.addr) 195 | yield readFuture 196 | if readFuture.failed: 197 | reader.conn.close() 198 | raise readFuture.readError() 199 | if trailersVar.len > 0: 200 | trailersVar.shallow() 201 | reader.metadata = HttpMetadata(kind: HttpMetadataKind.ChunkTrailers, trailers: trailersVar) 202 | 203 | template readChunk(reader: HttpReader, buf: pointer, n: int): Natural = 204 | assert reader.conn.closed 205 | assert reader.readable 206 | assert reader.chunked 207 | var header: ChunkHeader 208 | # TODO: 考虑内存泄漏 GC_ref GC_unref 209 | reader.readChunkHeaderByGuard(header) 210 | if header.size == 0: 211 | reader.readChunkEndByGuard() 212 | reader.readable = false 213 | reader.onEnd() 214 | else: 215 | assert header.size <= n 216 | reader.readByGuard(buf, header.size) 217 | header.size 218 | 219 | template readChunk(reader: HttpReader): string = 220 | assert reader.conn.closed 221 | assert reader.readable 222 | assert reader.chunked 223 | var data = "" 224 | var header: ChunkHeader 225 | reader.readChunkHeaderByGuard(header) 226 | if header.size == 0: 227 | reader.readChunkEndByGuard() 228 | reader.readable = false 229 | reader.onEnd() 230 | else: 231 | data = newString(header.size) 232 | reader.readByGuard(data.cstring, header.size) 233 | data.shallow() 234 | data 235 | 236 | proc read*(reader: HttpReader, buf: pointer, size: range[int(LimitChunkDataLen)..high(int)]): Future[Natural] {.async.} = 237 | ## Reads up to ``size`` bytes, storing the results in the ``buf``. 238 | ## 239 | ## The return value is the number of bytes actually read. This might be less than ``size``. 240 | ## A value of zero indicates ``EOF``, i.e. no more data can be read. 241 | ## 242 | ## If a system error occurs during reading, an ``OsError`` will be raised. If the connection is 243 | ## disconnected before successful reading, a ``ReadAbortedError`` will be raised. 244 | if not reader.ended: 245 | await reader.lock.acquire() 246 | try: 247 | if reader.chunked: 248 | result = reader.readChunk(buf, size) 249 | else: 250 | result = reader.readContent(buf, size) 251 | finally: 252 | reader.lock.release() 253 | 254 | proc read*(reader: HttpReader): Future[string] {.async.} = 255 | ## Reads up to ``size`` bytes, storing the results as a string. 256 | ## 257 | ## If the return value is ``""``, that indicates ``eof``, i.e. at the end of the request. 258 | ## 259 | ## If a system error occurs during reading, an ``OsError`` will be raised. If the connection is 260 | ## disconnected before successful reading, a ``ReadAbortedError`` will be raised. 261 | if not reader.ended: 262 | await reader.lock.acquire() 263 | try: 264 | if reader.chunked: 265 | result = reader.readChunk() 266 | else: 267 | result = reader.readContent() 268 | finally: 269 | reader.lock.release() 270 | 271 | proc readAll*(reader: HttpReader): Future[string] {.async.} = 272 | ## Reads all bytes, storing the results as a string. 273 | ## 274 | ## If a system error occurs during reading, an ``OsError`` will be raised. If the connection is 275 | ## disconnected before successful reading, a ``ReadAbortedError`` will be raised. 276 | if not reader.ended: 277 | await reader.lock.acquire() 278 | try: 279 | if reader.chunked: 280 | while not reader.ended: 281 | result.add(reader.readChunk()) 282 | else: 283 | result = newStringOfCap(reader.contentLen) 284 | while not reader.ended: 285 | result.add(reader.readContent()) 286 | finally: 287 | reader.lock.release() 288 | 289 | proc readDiscard*(reader: HttpReader): Future[void] {.async.} = 290 | ## Reads all bytes, discarding the results. 291 | ## 292 | ## If the return future is failed, ``OsError`` or ``ReadAbortedError`` may be raised. 293 | if not reader.ended: 294 | await reader.lock.acquire() 295 | let buffer = newString(LimitChunkDataLen) 296 | GC_ref(buffer) 297 | try: 298 | if reader.chunked: 299 | while not reader.ended: 300 | discard reader.readChunk(buffer.cstring, LimitChunkDataLen) 301 | else: 302 | while not reader.ended: 303 | discard reader.readContent(buffer.cstring, LimitChunkDataLen) 304 | finally: 305 | GC_unref(buffer) 306 | reader.lock.release() 307 | -------------------------------------------------------------------------------- /netkit/http/server.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | ## This module implements an HTTP server. 8 | 9 | import asyncdispatch 10 | import nativesockets 11 | import os 12 | import netkit/http/exception 13 | import netkit/http/connection 14 | import netkit/http/reader 15 | import netkit/http/writer 16 | 17 | when defined(posix): 18 | from posix import EBADF 19 | 20 | type 21 | AsyncHttpServer* = ref object ## Server object. 22 | socket: AsyncFD 23 | domain: Domain 24 | onRequest: RequestHandler 25 | closed: bool 26 | readTimeout: Natural 27 | 28 | RequestHandler* = proc (req: ServerRequest, res: ServerResponse): Future[void] {.closure, gcsafe.} 29 | 30 | proc bindAddr(fd: SocketHandle, port: Port, address = "", domain = AF_INET) {.tags: [ReadIOEffect].} = 31 | ## Binds ``address``:``port`` to the socket. 32 | ## 33 | ## If ``address`` is "" then ``ADDR_ANY`` will be bound. 34 | var realaddr = address 35 | if realaddr == "": 36 | case domain 37 | of AF_INET6: realaddr = "::" 38 | of AF_INET: realaddr = "0.0.0.0" 39 | else: 40 | raise newException(ValueError, "Unknown socket address family and no address specified to bindAddr") 41 | var aiList = getAddrInfo(realaddr, port, domain) 42 | if bindAddr(fd, aiList.ai_addr, aiList.ai_addrlen.SockLen) < 0'i32: 43 | aiList.freeAddrInfo() 44 | raiseOSError(osLastError()) 45 | aiList.freeAddrInfo() 46 | 47 | proc listen(fd: SocketHandle, backlog = SOMAXCONN) {.tags: [ReadIOEffect].} = 48 | ## Marks ``fd`` as accepting connections. ``Backlog`` specifies the maximum length of the 49 | ## queue of pending connections. 50 | ## 51 | ## Raises an OSError error upon failure. 52 | if nativesockets.listen(fd, backlog) < 0'i32: 53 | raiseOSError(osLastError()) 54 | 55 | proc newAsyncHttpServer*(): AsyncHttpServer = 56 | ## Creates a new ``AsyncHttpServer`` 。 57 | new(result) 58 | result.closed = false 59 | 60 | proc `onRequest=`*(server: AsyncHttpServer, handler: RequestHandler) = 61 | ## Sets a hook proc for the server. Whenever a new request comes, this hook function is triggered. 62 | server.onRequest = handler 63 | 64 | proc close*(server: AsyncHttpServer) = 65 | ## Closes the server to release the underlying resources. 66 | server.socket.closeSocket() 67 | server.closed = true 68 | 69 | proc handleNextRequest(server: AsyncHttpServer, conn: HttpConnection) {.async.} = 70 | var req: ServerRequest 71 | var res: ServerResponse 72 | 73 | proc onReadEnd() = 74 | assert not conn.closed 75 | if res.ended: 76 | req = nil 77 | res = nil 78 | asyncCheck server.handleNextRequest(conn) 79 | 80 | proc onWriteEnd() = 81 | assert not conn.closed 82 | if req.ended: 83 | req = nil 84 | res = nil 85 | asyncCheck server.handleNextRequest(conn) 86 | 87 | req = newServerRequest(conn, onReadEnd) 88 | 89 | try: 90 | await conn.readHttpHeader(req.header.addr) 91 | req.normalizeSpecificFields() 92 | except HttpError as e: 93 | yield conn.write("HTTP/1.1 " & $e.code & "\r\nConnection: close\r\n\r\n") 94 | conn.close() 95 | return 96 | except ValueError: 97 | yield conn.write("HTTP/1.1 400 Bad Request\r\nConnection: close\r\n\r\n") 98 | conn.close() 99 | return 100 | except ReadAbortedError as e: 101 | if e.timeout: 102 | yield conn.write("HTTP/1.1 408 Request Timeout\r\nConnection: close\r\n\r\n") 103 | conn.close() 104 | return 105 | except: 106 | conn.close() 107 | return 108 | 109 | res = newServerResponse(conn, onWriteEnd) 110 | await server.onRequest(req, res) 111 | 112 | proc serve*( 113 | server: AsyncHttpServer, 114 | port: Port, 115 | address: string = "", 116 | domain = AF_INET, 117 | readTimeout = 0 118 | ) {.async.} = 119 | ## Starts the process of listening for incoming HTTP connections on the 120 | ## specified ``address`` and ``port``. ``readTimeout`` specifies the timeout 121 | ## about read operations and keepalive. 122 | let fd = createNativeSocket(Domain.AF_INET, SOCK_STREAM, IPPROTO_TCP) 123 | if fd == osInvalidSocket: 124 | raiseOSError(osLastError()) 125 | fd.setSockOptInt(SOL_SOCKET, SO_REUSEADDR, 1) 126 | fd.setSockOptInt(SOL_SOCKET, SO_REUSEPORT, 1) 127 | when defined(macosx) and not defined(nimdoc): 128 | fd.setSockOptInt(SOL_SOCKET, SO_NOSIGPIPE, 1) 129 | fd.bindAddr(port, address, domain) 130 | fd.listen() 131 | fd.setBlocking(false) 132 | AsyncFD(fd).register() 133 | server.socket = AsyncFD(fd) 134 | server.domain = domain 135 | 136 | while not server.closed: 137 | var peer: tuple[address: string, client: AsyncFD] 138 | try: 139 | peer = await server.socket.acceptAddr() 140 | except: 141 | if server.closed: 142 | when defined(posix): 143 | if osLastError() == OSErrorCode(EBADF): 144 | break 145 | else: 146 | break 147 | raise getCurrentException() 148 | SocketHandle(peer.client).setBlocking(false) 149 | asyncCheck server.handleNextRequest(newHttpConnection(peer.client, peer.address, readTimeout)) 150 | -------------------------------------------------------------------------------- /netkit/http/spec.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | ## This module contains a few information about HTTP Specification. 8 | 9 | # Tip 10 | # --- 11 | # 12 | # The set type are implemented as high performance bit vectors: 13 | # 14 | # ..code-block::nim 15 | # 16 | # Chars: set[char] = {...} # => Chars: array[0..255, bit] = [0,0,0,0,1,1,0,1,0,0,0,1,...] 17 | # 'A' in {...} # => Chars[61] == 1 18 | 19 | const 20 | # [RFC5234](https://tools.ietf.org/html/rfc5234#appendix-B.1) 21 | COLON* = ':' 22 | COMMA* = ',' 23 | SEMICOLON* = ';' 24 | CR* = '\x0D' 25 | LF* = '\x0A' 26 | CRLF* = "\x0D\x0A" 27 | SP* = '\x20' 28 | HTAB* = '\x09' 29 | WSP* = {SP, HTAB} 30 | 31 | proc checkFieldName*(s: string) {.raises: [ValueError].} = 32 | ## Checks if ``s`` is a valid name of a HTTP header field. 33 | ## 34 | ## `HTTP RFC 5234 `_ 35 | ## 36 | ## .. code-block::nim 37 | ## 38 | ## DIGIT = %x30-39 ; 0-9 39 | ## ALPHA = %x41-5A / %x61-7A ; A-Z / a-z 40 | ## 41 | ## `HTTP RFC 7230 `_ 42 | ## 43 | ## .. code-block::nim 44 | ## 45 | ## field-name = token 46 | ## token = 1*tchar 47 | ## tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" 48 | ## / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~" 49 | ## / DIGIT 50 | ## / ALPHA 51 | ## ; any VCHAR, except delimiters 52 | ## 53 | const TokenChars = { 54 | '!', '#', '$', '%', '&', '\'', '*', 55 | '+', '-', '.', '^', '_', '`', '|', '~', 56 | '0'..'9', 57 | 'a'..'z', 58 | 'A'..'Z' 59 | } 60 | if s.len == 0: 61 | raise newException(ValueError, "Invalid field name") 62 | for c in s: 63 | if c notin TokenChars: 64 | raise newException(ValueError, "Invalid field name") 65 | 66 | proc checkFieldValue*(s: string) {.raises: [ValueError].} = 67 | ## Checks if ``s`` is a valid value of a HTTP header field. 68 | ## 69 | ## `HTTP RFC 5234 `_ 70 | ## 71 | ## .. code-block::nim 72 | ## 73 | ## HTAB = %x09 ; horizontal tab 74 | ## SP = %x20 ; ' ' 75 | ## VCHAR = %x21-7E ; visible (printing) characters 76 | ## 77 | ## `HTTP RFC 7230 `_ 78 | ## 79 | ## .. code-block::nim 80 | ## 81 | ## field-value = \*( field-content / obs-fold ) 82 | ## field-content = field-vchar [ 1\*( SP / HTAB ) field-vchar ] 83 | ## field-vchar = VCHAR / obs-text 84 | ## obs-text = %x80-FF 85 | ## obs-fold = CRLF 1\*( SP / HTAB ) ; obsolete line folding 86 | ## 87 | const ValueChars = { HTAB, SP, '\x21'..'\x7E', '\x80'..'\xFF' } 88 | for c in s: 89 | if c notin ValueChars: 90 | raise newException(ValueError, "Invalid field value") -------------------------------------------------------------------------------- /netkit/http/status.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | ## This module contains HTTP status code. 8 | ## 9 | ## Overview 10 | ## ======================== 11 | ## 12 | ## In HTTP/1.0 and since, the first line of the HTTP response is called the status line and includes a numeric 13 | ## status code (such as "404") and a textual reason phrase (such as "Not Found"). The way the user agent handles 14 | ## the response depends primarily on the code, and secondarily on the other response header fields. Custom 15 | ## status codes can be used, for if the user agent encounters a code it does not recognize, it can use the first 16 | ## digit of the code to determine the general class of the response. 17 | ## 18 | ## .. 19 | ## 20 | ## See `Hypertext Transfer Protocol `_ for more information. 21 | 22 | type 23 | HttpCode* = enum ## HTTP status code. 24 | Http100 = "100 Continue" 25 | Http101 = "101 Switching Protocols" 26 | Http200 = "200 OK" 27 | Http201 = "201 Created" 28 | Http202 = "202 Accepted" 29 | Http203 = "203 Non-Authoritative Information" 30 | Http204 = "204 No Content" 31 | Http205 = "205 Reset Content" 32 | Http206 = "206 Partial Content" 33 | Http300 = "300 Multiple Choices" 34 | Http301 = "301 Moved Permanently" 35 | Http302 = "302 Found" 36 | Http303 = "303 See Other" 37 | Http304 = "304 Not Modified" 38 | Http305 = "305 Use Proxy" 39 | Http307 = "307 Temporary Redirect" 40 | Http400 = "400 Bad Request" 41 | Http401 = "401 Unauthorized" 42 | Http403 = "403 Forbidden" 43 | Http404 = "404 Not Found" 44 | Http405 = "405 Method Not Allowed" 45 | Http406 = "406 Not Acceptable" 46 | Http407 = "407 Proxy Authentication Required" 47 | Http408 = "408 Request Timeout" 48 | Http409 = "409 Conflict" 49 | Http410 = "410 Gone" 50 | Http411 = "411 Length Required" 51 | Http412 = "412 Precondition Failed" 52 | Http413 = "413 Request Entity Too Large" 53 | Http414 = "414 Request-URI Too Long" 54 | Http415 = "415 Unsupported Media Type" 55 | Http416 = "416 Requested Range Not Satisfiable" 56 | Http417 = "417 Expectation Failed" 57 | Http418 = "418 I'm a teapot" 58 | Http421 = "421 Misdirected Request" 59 | Http422 = "422 Unprocessable Entity" 60 | Http426 = "426 Upgrade Required" 61 | Http428 = "428 Precondition Required" 62 | Http429 = "429 Too Many Requests" 63 | Http431 = "431 Request Header Fields Too Large" 64 | Http451 = "451 Unavailable For Legal Reasons" 65 | Http500 = "500 Internal Server Error" 66 | Http501 = "501 Not Implemented" 67 | Http502 = "502 Bad Gateway" 68 | Http503 = "503 Service Unavailable" 69 | Http504 = "504 Gateway Timeout" 70 | Http505 = "505 HTTP Version Not Supported" 71 | 72 | proc parseHttpCode*(code: int): HttpCode {.raises: [ValueError].} = 73 | ## Converts an integer to a status code. A ``ValueError`` is raised when ``code`` is not a valid code. 74 | runnableExamples: 75 | doAssert parseHttpCode(100) == Http100 76 | doAssert parseHttpCode(200) == Http200 77 | case code 78 | of 100: Http100 79 | of 101: Http101 80 | of 200: Http200 81 | of 201: Http201 82 | of 202: Http202 83 | of 203: Http203 84 | of 204: Http204 85 | of 205: Http205 86 | of 206: Http206 87 | of 300: Http300 88 | of 301: Http301 89 | of 302: Http302 90 | of 303: Http303 91 | of 304: Http304 92 | of 305: Http305 93 | of 307: Http307 94 | of 400: Http400 95 | of 401: Http401 96 | of 403: Http403 97 | of 404: Http404 98 | of 405: Http405 99 | of 406: Http406 100 | of 407: Http407 101 | of 408: Http408 102 | of 409: Http409 103 | of 410: Http410 104 | of 411: Http411 105 | of 412: Http412 106 | of 413: Http413 107 | of 414: Http414 108 | of 415: Http415 109 | of 416: Http416 110 | of 417: Http417 111 | of 418: Http418 112 | of 421: Http421 113 | of 422: Http422 114 | of 426: Http426 115 | of 428: Http428 116 | of 429: Http429 117 | of 431: Http431 118 | of 451: Http451 119 | of 500: Http500 120 | of 501: Http501 121 | of 502: Http502 122 | of 503: Http503 123 | of 504: Http504 124 | of 505: Http505 125 | else: raise newException(ValueError, "Not Implemented") -------------------------------------------------------------------------------- /netkit/http/uri.nim: -------------------------------------------------------------------------------- 1 | import std / uri 2 | 3 | 4 | export uri 5 | -------------------------------------------------------------------------------- /netkit/http/version.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | ## This module contains a definition of HTTP version. 8 | 9 | type 10 | HttpVersion* = enum ## HTTP version number. 11 | HttpVer10 = "HTTP/1.0", 12 | HttpVer11 = "HTTP/1.1" 13 | HttpVer20 = "HTTP/2.0" 14 | 15 | proc parseHttpVersion*(s: string): HttpVersion {.raises: [ValueError].} = 16 | ## Converts a string to HTTP version. A ``ValueError`` is raised when ``s`` is not a valid version. Currently 17 | ## only `"HTTP/1.0"` and `"HTTP/1.1"` are valid versions. 18 | runnableExamples: 19 | let ver = parseHttpVersion("HTTP/1.1") 20 | doAssert ver == HttpVer11 21 | 22 | if s.len != 8 or s[6] != '.': 23 | raise newException(ValueError, "Invalid Http Version") 24 | let major = s[5].ord - 48 25 | let minor = s[7].ord - 48 26 | if major != 1: 27 | raise newException(ValueError, "Invalid Http Version") 28 | case minor 29 | of 0: 30 | result = HttpVer10 31 | of 1: 32 | result = HttpVer11 33 | else: 34 | raise newException(ValueError, "Invalid Http Version") 35 | const name = "HTTP/" 36 | var i = 0 37 | while i < 5: 38 | if name[i] != s[i]: 39 | raise newException(ValueError, "Invalid Http Version") 40 | i.inc() 41 | 42 | -------------------------------------------------------------------------------- /netkit/http/writer.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | ## This module provides an abstraction of write operations related to HTTP. 8 | ## 9 | ## Overview 10 | ## ======================== 11 | ## 12 | ## A server writes a response to a client, and a client writes a request to a server. 13 | ## 14 | ## ``HttpWriter`` is a base object for write operations, ``ServerResponse`` and ``ClientRequest`` 15 | ## inherit from it. ``ServerResponse`` represents a response from a server, and ``ClientRequest`` 16 | ## represents a request from a client. 17 | 18 | import strutils 19 | import asyncdispatch 20 | import nativesockets 21 | import netkit/locks 22 | import netkit/http/exception 23 | import netkit/http/status 24 | import netkit/http/headerfield 25 | import netkit/http/header 26 | import netkit/http/connection 27 | 28 | type 29 | HttpWriter* = ref object of RootObj ## An abstraction of write operations related to HTTP. 30 | conn: HttpConnection 31 | lock: AsyncLock 32 | onEnd: proc () {.gcsafe, closure.} 33 | writable: bool 34 | 35 | ServerResponse* = ref object of HttpWriter ## Represents a response from a server. 36 | ClientRequest* = ref object of HttpWriter ## Represents a request from a client. 37 | 38 | proc init(writer: HttpWriter, conn: HttpConnection, onEnd: proc () {.gcsafe, closure.}) = 39 | writer.conn = conn 40 | writer.lock = initAsyncLock() 41 | writer.onEnd = onEnd 42 | writer.writable = true 43 | 44 | proc newServerResponse*(conn: HttpConnection, onEnd: proc () {.gcsafe, closure.}): ServerResponse = 45 | ## Creates a new ``ServerResponse``. 46 | new(result) 47 | result.init(conn, onEnd) 48 | 49 | proc newClientRequest*(conn: HttpConnection, onEnd: proc () {.gcsafe, closure.}): ClientRequest = 50 | ## Creates a new ``ClientRequest``. 51 | new(result) 52 | result.init(conn, onEnd) 53 | 54 | proc ended*(writer: HttpWriter): bool {.inline.} = 55 | ## Returns ``true`` if the underlying connection has been closed or writer has been shut down. 56 | writer.conn.closed or not writer.writable 57 | 58 | template writeByGuard(writer: HttpWriter, buf: pointer, size: Natural) = 59 | if writer.conn.closed: 60 | raise newException(WriteAbortedError, "Connection has been closed") 61 | if not writer.writable: 62 | raise newException(WriteAbortedError, "Write after ended") 63 | let writeFuture = writer.conn.write(buf, size) 64 | yield writeFuture 65 | if writeFuture.failed: 66 | writer.conn.close() 67 | raise writeFuture.readError() 68 | 69 | proc write*(writer: HttpWriter, buf: pointer, size: Natural): Future[void] {.async.} = 70 | ## Writes ``size`` bytes from ``buf`` to the writer. 71 | ## 72 | ## If a system error occurs during writing, an ``OsError`` will be raised. If the connection is 73 | ## disconnected before successful writing or the writer has been shut down, a ``WriteAbortedError`` will be raised. 74 | await writer.lock.acquire() 75 | try: 76 | writer.writeByGuard(buf, size) 77 | finally: 78 | writer.lock.release() 79 | 80 | proc write*(writer: HttpWriter, data: string): Future[void] {.async.} = 81 | ## Writes a string to the writer. 82 | ## 83 | ## If a system error occurs during writing, an ``OsError`` will be raised. If the connection is 84 | ## disconnected before successful writing or the writer has been shut down, a ``WriteAbortedError`` will be raised. 85 | await writer.lock.acquire() 86 | GC_ref(data) 87 | try: 88 | writer.writeByGuard(data.cstring, data.len) 89 | finally: 90 | GC_unref(data) 91 | writer.lock.release() 92 | 93 | proc write*( 94 | writer: HttpWriter, 95 | statusCode: HttpCode 96 | ): Future[void] = 97 | ## Writes a message header to the writer. 98 | ## 99 | ## If a system error occurs during writing, an ``OsError`` will be raised. If the connection is 100 | ## disconnected before successful writing or the writer has been shut down, a ``WriteAbortedError`` will be raised. 101 | return writer.write(statusCode.toResponseStr()) 102 | 103 | proc write*( 104 | writer: HttpWriter, 105 | statusCode: HttpCode, 106 | fields: openArray[tuple[name: string, value: string]] 107 | ): Future[void] = 108 | ## Writes a message header to the writer. 109 | ## 110 | ## If a system error occurs during writing, an ``OsError`` will be raised. If the connection is 111 | ## disconnected before successful writing or the writer has been shut down, a ``WriteAbortedError`` will be raised. 112 | return writer.write( 113 | HttpHeader( 114 | kind: HttpHeaderKind.Response, 115 | statusCode: statusCode, 116 | fields: initHeaderFields(fields)).toResponseStr()) 117 | 118 | proc write*( 119 | writer: HttpWriter, 120 | statusCode: HttpCode, 121 | fields: openArray[tuple[name: string, value: seq[string]]] 122 | ): Future[void] = 123 | ## Writes a message header to the writer. 124 | ## 125 | ## If a system error occurs during writing, an ``OsError`` will be raised. If the connection is 126 | ## disconnected before successful writing or the writer has been shut down, a ``WriteAbortedError`` will be raised. 127 | return writer.write( 128 | HttpHeader( 129 | kind: HttpHeaderKind.Response, 130 | statusCode: statusCode, 131 | fields: initHeaderFields(fields)).toResponseStr()) 132 | 133 | proc writeEnd*(writer: HttpWriter) = 134 | ## Shuts down writer. Data is no longer allowed to be written, otherwise an ``WriteAbortedError`` will be raised. 135 | if writer.writable: 136 | writer.writable = false 137 | if not writer.conn.closed: 138 | writer.onEnd() -------------------------------------------------------------------------------- /netkit/locks.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | ## This module implements an asynchronous lock. When performing asynchronous input and output, the order of 8 | ## reading and writing is very important. In order to ensure the correct order of reading and writing, locks 9 | ## are required for synchronization or queuing. 10 | ## 11 | ## Asynchronous locks are Netkit's underlying mechanism to provide IO consistency. Netkit provides open APIs 12 | ## independent of "locks". 13 | ## 14 | ## As with synchronous style locks, you should always operate a lock in window mode and bind a lock to a 15 | ## specific object as much as possible to avoid problems such as deadlock and livelock. 16 | 17 | import deques 18 | import asyncdispatch 19 | 20 | type 21 | AsyncLock* = object ## An asynchronous lock. 22 | locked: bool 23 | waiters: Deque[Future[void]] 24 | 25 | proc initAsyncLock*(): AsyncLock = 26 | ## Initializes an ``AsyncLock``. 27 | result.locked = false 28 | result.waiters = initDeque[Future[void]]() 29 | 30 | proc acquire*(L: var AsyncLock): Future[void] = 31 | ## Tries to acquire a lock. When this future is completed, it indicates that the lock is acquired. 32 | result = newFuture[void]("acquire") 33 | if L.locked: 34 | L.waiters.addLast(result) 35 | else: 36 | L.locked = true 37 | result.complete() 38 | 39 | proc release*(L: var AsyncLock) = 40 | ## Releases the lock that has been acquired. 41 | if L.locked: 42 | if L.waiters.len > 0: 43 | L.waiters.popFirst().complete() 44 | else: 45 | L.locked = false 46 | 47 | proc isLocked*(L: AsyncLock): bool = 48 | ## Returns ``true`` if ``L`` is locked. 49 | L.locked 50 | -------------------------------------------------------------------------------- /netkit/misc.nim: -------------------------------------------------------------------------------- 1 | # netkit 2 | # (c) Copyright 2020 Wang Tong 3 | # 4 | # See the file "LICENSE", included in this 5 | # distribution, for details about the copyright. 6 | 7 | ## This module contains miscellaneous functions that don’t really belong in any other module. 8 | 9 | template offset*(p: pointer, n: int): pointer = 10 | ## 返回一个新的指针,该指针从 ``p`` 向后偏移 ``n`` 个字节。 11 | cast[pointer](cast[ByteAddress](p) + n) 12 | 13 | template checkDefNatural*(value: static[Natural], name: static[string]): untyped = 14 | ## Checks whether ``value`` is a natural number (zero and positive integer). If not, then stop compiling. ``name`` 15 | ## specifies its symbolic name. 16 | when value < 0: 17 | {.fatal: "The value of '" & name & "' by ``--define`` must be greater than or equal to 0!".} -------------------------------------------------------------------------------- /nim.cfg: -------------------------------------------------------------------------------- 1 | --path:"." -------------------------------------------------------------------------------- /tests/buffer/tcircular_buffer.nim: -------------------------------------------------------------------------------- 1 | discard """ 2 | cmd: "nim c -r --styleCheck:hint --panics:on $options $file" 3 | matrix: "--gc:arc" 4 | targets: "c" 5 | nimout: "" 6 | action: "run" 7 | exitcode: 0 8 | timeout: 60.0 9 | """ 10 | 11 | # netkit 12 | # (c) Copyright 2020 Wang Tong 13 | # 14 | # See the file "LICENSE", included in this 15 | # distribution, for details about the copyright. 16 | 17 | import unittest 18 | import netkit/buffer/constants 19 | import netkit/buffer/circular 20 | 21 | suite "MarkableCircularBuffer": 22 | setup: 23 | var buffer = MarkableCircularBuffer() 24 | var str = "abcdefgh" 25 | 26 | var (regionPtr, regionLen) = buffer.next() 27 | check regionLen == BufferSize 28 | 29 | copyMem(regionPtr, str.cstring, min(str.len, regionLen.int)) 30 | discard buffer.pack(8) 31 | 32 | test "marks": 33 | for c in buffer.marks(): 34 | if c == 'c': 35 | break 36 | 37 | (regionPtr, regionLen) = buffer.next() 38 | check regionLen < BufferSize 39 | 40 | check buffer.lenMarks() == 3 41 | check buffer.popMarks() == "abc" 42 | check buffer.len == 5 43 | 44 | (regionPtr, regionLen) = buffer.next() 45 | check regionLen == BufferSize - 8 46 | 47 | check buffer.markUntil('f') 48 | 49 | check buffer.lenMarks() == 3 50 | check buffer.popMarks(2) == "d" 51 | check buffer.len == 2 52 | 53 | (regionPtr, regionLen) = buffer.next() 54 | check regionLen == BufferSize - 8 55 | 56 | check buffer.mark(100) == 2 57 | 58 | check buffer.lenMarks() == 2 59 | check buffer.popMarks(1) == "g" 60 | check buffer.len == 0 61 | 62 | (regionPtr, regionLen) = buffer.next() 63 | check regionLen == BufferSize 64 | 65 | test "get and del": 66 | var dest = newString(8) 67 | 68 | check buffer.get(dest.cstring, 3) == 3 69 | check buffer.del(3) == 3 70 | dest.setLen(3) 71 | check dest == "abc" 72 | check buffer.len == 5 73 | 74 | check buffer.get(dest.cstring, 3) == 3 75 | check buffer.del(3) == 3 76 | dest.setLen(3) 77 | check dest == "def" 78 | check buffer.len == 2 79 | 80 | check buffer.get(dest.cstring, 3) == 2 81 | check buffer.del(3) == 2 82 | dest.setLen(2) 83 | check dest == "gh" 84 | check buffer.len == 0 85 | 86 | check buffer.get(dest.cstring, 3) == 0 87 | check buffer.del(3) == 0 88 | 89 | test "get and del with marks": 90 | for c in buffer.marks(): 91 | if c == 'c': 92 | break 93 | 94 | (regionPtr, regionLen) = buffer.next() 95 | check regionLen < BufferSize 96 | 97 | check buffer.lenMarks() == 3 98 | check buffer.get(3) == "abc" 99 | check buffer.del(3) == 3 100 | check buffer.len == 5 101 | 102 | (regionPtr, regionLen) = buffer.next() 103 | check regionLen == BufferSize - 8 104 | 105 | check buffer.markUntil('f') 106 | 107 | check buffer.lenMarks() == 3 108 | check buffer.popMarks(2) == "d" 109 | check buffer.len == 2 110 | 111 | (regionPtr, regionLen) = buffer.next() 112 | check regionLen == BufferSize - 8 113 | 114 | check buffer.mark(100) == 2 115 | 116 | check buffer.lenMarks() == 2 117 | check buffer.get(1) == "g" 118 | check buffer.del(2) == 2 119 | check buffer.len == 0 120 | 121 | (regionPtr, regionLen) = buffer.next() 122 | check regionLen == BufferSize 123 | -------------------------------------------------------------------------------- /tests/http/tcookie.nim: -------------------------------------------------------------------------------- 1 | discard """ 2 | cmd: "nim c -r --styleCheck:hint --panics:on $options $file" 3 | matrix: "--gc:arc" 4 | targets: "c" 5 | nimout: "" 6 | action: "run" 7 | exitcode: 0 8 | timeout: 60.0 9 | """ 10 | import options 11 | import strformat 12 | import times 13 | import netkit/http/cookies 14 | 15 | # SetCookie 16 | block: 17 | let 18 | username = "admin" 19 | password = "root" 20 | 21 | # name-value 22 | block: 23 | let 24 | cookie = initCookie(username, password) 25 | 26 | doAssert cookie.name == username 27 | doAssert cookie.value == password 28 | doAssert cookie.expires.len == 0 29 | doAssert cookie.maxAge.isNone 30 | doAssert cookie.domain.len == 0 31 | doAssert cookie.path.len == 0 32 | doAssert not cookie.secure 33 | doAssert not cookie.httpOnly 34 | doAssert cookie.samesite == Lax 35 | doAssert setCookie(cookie) == fmt"{username}={password}; SameSite=Lax" 36 | doAssert $cookie == setCookie(cookie) 37 | 38 | # domain 39 | block: 40 | let 41 | domain = "www.netkit.com" 42 | cookie = initCookie(username, password, domain = domain) 43 | 44 | doAssert cookie.name == username 45 | doAssert cookie.value == password 46 | doAssert cookie.expires.len == 0 47 | doAssert cookie.maxAge.isNone 48 | doAssert cookie.domain == domain 49 | doAssert cookie.path.len == 0 50 | doAssert not cookie.secure 51 | doAssert not cookie.httpOnly 52 | doAssert cookie.samesite == Lax 53 | doAssert setCookie(cookie) == fmt"{username}={password}; Domain={cookie.domain}; SameSite=Lax" 54 | doAssert $cookie == setCookie(cookie) 55 | 56 | # path 57 | block: 58 | let 59 | path = "/index" 60 | cookie = initCookie(username, password, path = path) 61 | 62 | doAssert cookie.name == username 63 | doAssert cookie.value == password 64 | doAssert cookie.expires.len == 0 65 | doAssert cookie.maxAge.isNone 66 | doAssert cookie.domain.len == 0 67 | doAssert cookie.path == path 68 | doAssert not cookie.secure 69 | doAssert not cookie.httpOnly 70 | doAssert cookie.samesite == Lax 71 | doAssert setCookie(cookie) == fmt"{username}={password}; Path={cookie.path}; SameSite=Lax" 72 | doAssert $cookie == setCookie(cookie) 73 | 74 | # maxAge 75 | block: 76 | let 77 | maxAge = 10 78 | cookie = initCookie(username, password, maxAge = some(maxAge)) 79 | 80 | doAssert cookie.name == username 81 | doAssert cookie.value == password 82 | doAssert cookie.expires.len == 0 83 | doAssert cookie.maxAge.isSome 84 | doAssert cookie.domain.len == 0 85 | doAssert cookie.path.len == 0 86 | doAssert not cookie.secure 87 | doAssert not cookie.httpOnly 88 | doAssert cookie.samesite == Lax 89 | doAssert setCookie(cookie) == fmt"{username}={password}; Max-Age={maxAge}; SameSite=Lax" 90 | doAssert $cookie == setCookie(cookie) 91 | 92 | # expires string 93 | block: 94 | let 95 | expires = "Mon, 6 Apr 2020 12:55:00 GMT" 96 | cookie = initCookie(username, password, expires) 97 | 98 | doAssert cookie.name == username 99 | doAssert cookie.value == password 100 | doAssert cookie.expires == expires 101 | doAssert cookie.maxAge.isNone 102 | doAssert cookie.domain.len == 0 103 | doAssert cookie.path.len == 0 104 | doAssert not cookie.secure 105 | doAssert not cookie.httpOnly 106 | doAssert cookie.samesite == Lax 107 | doAssert setCookie(cookie) == fmt"{username}={password}; Expires={expires}; SameSite=Lax" 108 | doAssert $cookie == setCookie(cookie) 109 | 110 | # expires DateTime 111 | block: 112 | let 113 | dt = initDateTime(6, mApr, 2020, 13, 3, 0, 0, utc()) 114 | expires = format(dt, "ddd',' dd MMM yyyy HH:mm:ss 'GMT'") 115 | cookie = initCookie(username, password, expires) 116 | 117 | doAssert cookie.name == username 118 | doAssert cookie.value == password 119 | doAssert cookie.expires == expires 120 | doAssert cookie.maxAge.isNone 121 | doAssert cookie.domain.len == 0 122 | doAssert cookie.path.len == 0 123 | doAssert not cookie.secure 124 | doAssert not cookie.httpOnly 125 | doAssert cookie.samesite == Lax 126 | doAssert setCookie(cookie) == fmt"{username}={password}; Expires={expires}; SameSite=Lax" 127 | doAssert $cookie == setCookie(cookie) 128 | 129 | # secure 130 | block: 131 | let 132 | secure = true 133 | cookie = initCookie(username, password, secure = secure) 134 | 135 | doAssert cookie.name == username 136 | doAssert cookie.value == password 137 | doAssert cookie.expires.len == 0 138 | doAssert cookie.maxAge.isNone 139 | doAssert cookie.domain.len == 0 140 | doAssert cookie.path.len == 0 141 | doAssert cookie.secure 142 | doAssert not cookie.httpOnly 143 | doAssert cookie.samesite == Lax 144 | doAssert setCookie(cookie) == fmt"{username}={password}; Secure; SameSite=Lax" 145 | doAssert $cookie == setCookie(cookie) 146 | 147 | # http-only 148 | block: 149 | let 150 | httpOnly = true 151 | cookie = initCookie(username, password, httpOnly = httpOnly) 152 | 153 | doAssert cookie.name == username 154 | doAssert cookie.value == password 155 | doAssert cookie.expires.len == 0 156 | doAssert cookie.maxAge.isNone 157 | doAssert cookie.domain.len == 0 158 | doAssert cookie.path.len == 0 159 | doAssert not cookie.secure 160 | doAssert cookie.httpOnly 161 | doAssert cookie.samesite == Lax 162 | doAssert setCookie(cookie) == fmt"{username}={password}; HttpOnly; SameSite=Lax" 163 | doAssert $cookie == setCookie(cookie) 164 | 165 | # sameSite 166 | block: 167 | let 168 | sameSite = Strict 169 | cookie = initCookie(username, password, sameSite = sameSite) 170 | 171 | doAssert cookie.name == username 172 | doAssert cookie.value == password 173 | doAssert cookie.expires.len == 0 174 | doAssert cookie.maxAge.isNone 175 | doAssert cookie.domain.len == 0 176 | doAssert cookie.path.len == 0 177 | doAssert not cookie.secure 178 | doAssert not cookie.httpOnly 179 | doAssert cookie.samesite == sameSite 180 | doAssert setCookie(cookie) == fmt"{username}={password}; SameSite={sameSite}" 181 | doAssert $cookie == setCookie(cookie) 182 | 183 | 184 | # Parse 185 | block: 186 | # parse cookie from string 187 | block: 188 | let 189 | text = "admin=root; Domain=www.netkit.com; Secure; HttpOnly" 190 | cookie = initCookie(text) 191 | 192 | doAssert cookie.name == "admin" 193 | doAssert cookie.value == "root" 194 | doAssert cookie.domain == "www.netkit.com" 195 | doAssert cookie.secure 196 | doAssert cookie.httpOnly 197 | doAssert cookie.sameSite == None 198 | doAssert setCookie(cookie) == text 199 | doAssert $cookie == setCookie(cookie) 200 | 201 | # parse samesite 202 | block: 203 | let 204 | expectedLax = "foo=bar; SameSite=Lax" 205 | expectedStrict = "foo=bar; SameSite=Strict" 206 | expectedNone = "foo=bar" 207 | 208 | 209 | doAssert $initCookie("foo=bar; SameSite=Lax") == expectedLax 210 | doAssert $initCookie("foo=bar; SameSite=LAX") == expectedLax 211 | doAssert $initCookie("foo=bar; SameSite=lax") == expectedLax 212 | doAssert $initCookie("foo=bar; SAMESITE=Lax") == expectedLax 213 | doAssert $initCookie("foo=bar; samesite=Lax") == expectedLax 214 | 215 | doAssert $initCookie("foo=bar; SameSite=Strict") == expectedStrict 216 | doAssert $initCookie("foo=bar; SameSite=STRICT") == expectedStrict 217 | doAssert $initCookie("foo=bar; SameSite=strict") == expectedStrict 218 | doAssert $initCookie("foo=bar; SAMESITE=Strict") == expectedStrict 219 | doAssert $initCookie("foo=bar; samesite=Strict") == expectedStrict 220 | 221 | doAssert $initCookie("foo=bar; SameSite=None") == expectedNone 222 | doAssert $initCookie("foo=bar; SameSite=NONE") == expectedNone 223 | doAssert $initCookie("foo=bar; SameSite=none") == expectedNone 224 | doAssert $initCookie("foo=bar; SAMESITE=None") == expectedNone 225 | doAssert $initCookie("foo=bar; samesite=None") == expectedNone 226 | 227 | # parse error 228 | block: 229 | doAssertRaises(MissingValueError): 230 | discard initCookie("bar") 231 | 232 | doAssertRaises(MissingValueError): 233 | discard initCookie("=bar") 234 | 235 | doAssertRaises(MissingValueError): 236 | discard initCookie(" =bar") 237 | 238 | doAssertRaises(MissingValueError): 239 | discard initCookie("foo=") 240 | 241 | # parse pair 242 | block: 243 | doAssert $initCookie("foo", "bar=baz") == "foo=bar=baz; SameSite=Lax" 244 | 245 | 246 | doAssert initCookie("foo=bar=baz").name == "foo" 247 | doAssert initCookie("foo=bar=baz").value == "bar=baz" 248 | 249 | 250 | doAssert $initCookie("foo=bar") == "foo=bar" 251 | doAssert $initCookie(" foo = bar ") == "foo = bar " 252 | doAssert $initCookie(" foo=bar ;Path=") == "foo=bar " 253 | doAssert $initCookie(" foo=bar ; Path= ") == "foo=bar " 254 | doAssert $initCookie(" foo=bar ; Ignored ") == "foo=bar " 255 | 256 | 257 | doAssert $initCookie("foo=bar; HttpOnly") != "foo=bar" 258 | doAssert $initCookie("foo=bar;httpOnly") != "foo=bar" 259 | 260 | 261 | doAssert $initCookie("foo=bar; secure") == "foo=bar; Secure" 262 | doAssert $initCookie(" foo=bar;Secure") == "foo=bar; Secure" 263 | doAssert $initCookie(" foo=bar;SEcUrE=anything") == "foo=bar; Secure" 264 | doAssert $initCookie(" foo=bar;httponyl;SEcUrE") == "foo=bar; Secure" 265 | 266 | 267 | # CookieJar 268 | block: 269 | # parse 270 | block: 271 | var cookieJar = initCookieJar() 272 | cookieJar.parse("username=netkit; password=root") 273 | 274 | doAssert cookieJar["username"] == "netkit" 275 | doAssert cookieJar["password"] == "root" 276 | -------------------------------------------------------------------------------- /tests/http/thttpmethod.nim: -------------------------------------------------------------------------------- 1 | discard """ 2 | cmd: "nim c -r --styleCheck:hint --panics:on $options $file" 3 | matrix: "--gc:arc" 4 | targets: "c" 5 | nimout: "" 6 | action: "run" 7 | exitcode: 0 8 | timeout: 60.0 9 | """ 10 | import netkit/http/httpmethod 11 | 12 | doAssert parseHttpMethod("GET") == HttpGet 13 | doAssert parseHttpMethod("POST") == HttpPost 14 | doAssert parseHttpMethod("TRACE") == HttpTrace 15 | doAssertRaises(ValueError): 16 | discard parseHttpMethod("get") 17 | -------------------------------------------------------------------------------- /tests/http/tspec.nim: -------------------------------------------------------------------------------- 1 | discard """ 2 | cmd: "nim c -r --styleCheck:hint --panics:on $options $file" 3 | matrix: "--gc:arc" 4 | targets: "c" 5 | nimout: "" 6 | action: "run" 7 | exitcode: 0 8 | timeout: 60.0 9 | """ 10 | import netkit/http/spec 11 | 12 | 13 | block test_header_name: 14 | checkFieldName("hello!") 15 | checkFieldName("+._~`a|&*+-#") 16 | checkFieldName("flywind123456") 17 | checkFieldName("FLYWIND_-'+!") 18 | 19 | block test_header_value: 20 | # const ValueChars = { HTAB, SP, '\x21'..'\x7E', '\x80'..'\xFF' } 21 | checkFieldValue("hello!") 22 | checkFieldValue("+._~`a|&*+-#") 23 | checkFieldValue("flywind123456") 24 | checkFieldValue("FLYWIND_-'+!") 25 | checkFieldValue("fly\twind") 26 | checkFieldValue("fly?©wind®") 27 | -------------------------------------------------------------------------------- /tests/http/tstatus.nim: -------------------------------------------------------------------------------- 1 | discard """ 2 | cmd: "nim c -r --styleCheck:hint --panics:on $options $file" 3 | matrix: "--gc:arc" 4 | targets: "c" 5 | nimout: "" 6 | action: "run" 7 | exitcode: 0 8 | timeout: 60.0 9 | """ 10 | import netkit/http/status 11 | 12 | 13 | doAssert parseHttpCode(100) == Http100 14 | doAssert parseHttpCode(200) == Http200 15 | 16 | 17 | doAssertRaises(ValueError): 18 | discard parseHttpCode(377) 19 | -------------------------------------------------------------------------------- /tests/http/tversion.nim: -------------------------------------------------------------------------------- 1 | discard """ 2 | cmd: "nim c -r --styleCheck:hint --panics:on $options $file" 3 | matrix: "--gc:arc" 4 | targets: "c" 5 | nimout: "" 6 | action: "run" 7 | exitcode: 0 8 | timeout: 60.0 9 | """ 10 | import netkit/http/version 11 | 12 | 13 | doAssert parseHttpVersion("HTTP/1.1") == HttpVer11 14 | doAssert parseHttpVersion("HTTP/1.0") == HttpVer10 15 | 16 | doAssertRaises(ValueError): 17 | discard parseHttpVersion("HTTP/2.0") 18 | 19 | doAssertRaises(ValueError): 20 | discard parseHttpVersion("HTTP/1.2") 21 | 22 | doAssertRaises(ValueError): 23 | discard parseHttpVersion("HTTP/1.1.1") 24 | 25 | doAssertRaises(ValueError): 26 | discard parseHttpVersion("1.0") 27 | 28 | -------------------------------------------------------------------------------- /tests/server/thttp_server.nim.cfg: -------------------------------------------------------------------------------- 1 | --define:BufferSize=16 2 | --define:LimitChunkDataLen=16 -------------------------------------------------------------------------------- /tests/test.nim: -------------------------------------------------------------------------------- 1 | discard """ 2 | cmd: "nim c -r --styleCheck:hint --panics:on $options $file" 3 | matrix: "--gc:arc" 4 | targets: "c" 5 | nimout: "" 6 | action: "run" 7 | exitcode: 0 8 | timeout: 60.0 9 | """ 10 | 11 | # netkit 12 | # (c) Copyright 2020 Wang Tong 13 | # 14 | # See the file "LICENSE", included in this 15 | # distribution, for details about the copyright. 16 | 17 | import unittest 18 | import asyncdispatch 19 | 20 | # import netkit/http/base 21 | 22 | # template f(t: untyped) = 23 | # proc cb() = 24 | # let fut = t 25 | # fut.callback = proc () = 26 | # discard 27 | 28 | # echo "f()" 29 | # cb() 30 | 31 | # proc test(): char = 32 | # echo "..." 33 | # return 'a' 34 | 35 | # proc futDemo(): Future[int] = 36 | # echo "futDemo()" 37 | # result = newFuture[int]() 38 | 39 | # f: futDemo() 40 | 41 | type 42 | Opt = object 43 | x: string 44 | 45 | Test = ref object 46 | opt: Opt 47 | 48 | proc `=destroy`(a: var Opt) = 49 | echo "=destroy" 50 | 51 | # proc g() = 52 | # var t: Test = Test() 53 | 54 | # g() 55 | 56 | # GC_fullCollect() -------------------------------------------------------------------------------- /tools/docplus/.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | node_modules/ 3 | package-lock.json -------------------------------------------------------------------------------- /tools/docplus/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "kanban-redis", 3 | "version": "0.1.0", 4 | "author": { 5 | "name": "Wang Tong", 6 | "email": "itulayangi@outlook.com" 7 | }, 8 | "main": "./polish.js", 9 | "engines": { 10 | "node": ">=12.16.2" 11 | }, 12 | "dependencies": { 13 | "jsdom": "^16.2.2" 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /tools/docplus/polish.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | // 这个程序是一个 nim 文档的修复器。我对 nim doc 输出的 HTML 十分不满意,这个修复器会按照我所期望 4 | // 的 HTML 文档格式进行一些调整。 5 | // 6 | // 注意:这个程序依赖 NodeJS 以及一些软件库,主要有 JSDOM 7 | // We repair it! 8 | 9 | const Fs = require('fs') 10 | const Path = require('path') 11 | const JsDom = require('jsdom') 12 | 13 | const DOCK_HACK_JS = Path.resolve(__dirname, 'dochack.js') 14 | 15 | class DocPolisher { 16 | constructor(document) { 17 | this.document = document 18 | } 19 | 20 | polishCSS() { 21 | this.document.head.insertAdjacentHTML("beforeend", ``) 94 | } 95 | 96 | polishJS() { 97 | const scripts = this.document.querySelectorAll("script") 98 | for (let i = 0, len = scripts.length; i < len; i++) { 99 | if (scripts[i].src === "dochack.js") { 100 | scripts[i].src = "/dochack.js" 101 | } 102 | } 103 | } 104 | 105 | removeChildTexts(elem) { 106 | var childNodes = elem.childNodes 107 | var texts = [] 108 | for (let j = 0, len = childNodes.length; j < len; j++) { 109 | if (childNodes[j].nodeType == 3 /*Text*/) { 110 | texts.push(childNodes[j]) 111 | } 112 | } 113 | for (let text of texts) { 114 | elem.removeChild(text) 115 | } 116 | } 117 | 118 | polishEnums() { 119 | const keywordElems = this.document.querySelectorAll("pre > .Keyword") 120 | for (let i = 0, len = keywordElems.length; i < len; i++) { 121 | const elem = keywordElems[i] 122 | if (elem.innerHTML === 'enum') { 123 | this.removeChildTexts(elem.parentElement) 124 | 125 | const children = elem.parentElement.children 126 | for (let i = 0, len = children.length; i < len; i++) { 127 | if (children[i].classList.contains('Other')) { 128 | if (children[i].innerHTML == '=') { 129 | children[i].innerHTML = ' ' + children[i].innerHTML + ' ' 130 | } 131 | if (children[i].innerHTML == ',') { 132 | let j = i + 1 133 | if (j < len) { 134 | if (children[j].classList.contains('Comment')) { 135 | children[j].innerHTML = ' ' + children[j].innerHTML + '\n ' 136 | } 137 | } 138 | children[i].innerHTML = children[i].innerHTML + '\n ' 139 | } 140 | } else { 141 | let j = i + 1 142 | if (j < len) { 143 | if (children[j].classList.contains('Comment')) { 144 | children[j].innerHTML = '\n ' + children[j].innerHTML + '\n ' 145 | } 146 | } 147 | } 148 | } 149 | 150 | elem.innerHTML = elem.innerHTML + '\n ' 151 | } 152 | } 153 | } 154 | } 155 | 156 | class DocManager { 157 | constructor(rootDir) { 158 | this.rootDir = rootDir 159 | } 160 | 161 | run() { 162 | for (let file of this.files()) { 163 | console.log('Polishing:', file) 164 | const content = Fs.readFileSync(file, 'utf8') 165 | const dom = new JsDom.JSDOM(content) 166 | const polisher = new DocPolisher(dom.window.document) 167 | polisher.polishCSS() 168 | polisher.polishJS() 169 | polisher.polishEnums() 170 | Fs.writeFileSync(file, dom.serialize(), 'utf8') 171 | } 172 | Fs.copyFileSync(DOCK_HACK_JS, Path.join(this.rootDir, 'dochack.js')) 173 | } 174 | 175 | * files() { 176 | const stat = Fs.statSync(this.rootDir) 177 | if (stat.isDirectory()) { 178 | } else if (stat.isFile && (Path.extname(this.rootDir) === '.html' || Path.extname(this.rootDir) === '.htm')) { 179 | yield this.rootDir 180 | return 181 | } else { 182 | return 183 | } 184 | const dirs = [this.rootDir] 185 | for (let dir of dirs) { 186 | const names = Fs.readdirSync(dir) 187 | for (let name of names) { 188 | const filename = Path.join(dir, name) 189 | const stat = Fs.statSync(filename) 190 | if (stat.isDirectory()) { 191 | dirs.push(filename) 192 | } else if (stat.isFile && (Path.extname(filename) === '.html' || Path.extname(filename) === '.htm')) { 193 | yield filename 194 | } 195 | } 196 | } 197 | } 198 | } 199 | 200 | if (typeof process.env.DOC_PLUS_ROOT === 'string') { 201 | new DocManager(process.env.DOC_PLUS_ROOT).run() 202 | } --------------------------------------------------------------------------------