├── .eslintignore ├── .eslintrc.json ├── .github ├── dependabot.yml └── workflows │ ├── ci.yml │ └── codeql-analysis.yml ├── .gitignore ├── .npmrc ├── .prettierignore ├── .prettierrc.json ├── CHANGELOG.md ├── LICENSE ├── README.md ├── SECURITY.md ├── main.sublime-project ├── package.json ├── src ├── index.test.ts └── index.ts └── tsconfig.json /.eslintignore: -------------------------------------------------------------------------------- 1 | /dist 2 | /node_modules 3 | -------------------------------------------------------------------------------- /.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "parser": "@typescript-eslint/parser", 3 | "extends": ["plugin:@typescript-eslint/recommended", "prettier"], 4 | "rules": { 5 | "@typescript-eslint/no-explicit-any": [0], 6 | "@typescript-eslint/no-use-before-define": [0], 7 | "@typescript-eslint/no-inferrable-types": [0], 8 | "@typescript-eslint/array-type": [0], 9 | "@typescript-eslint/explicit-function-return-type": [ 10 | 1, 11 | { 12 | "allowExpressions": true, 13 | "allowTypedFunctionExpressions": true 14 | } 15 | ] 16 | }, 17 | "plugins": ["@typescript-eslint", "prettier"] 18 | } 19 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: npm 4 | directory: "/" 5 | schedule: 6 | interval: daily 7 | time: "13:00" 8 | open-pull-requests-limit: 10 9 | ignore: 10 | - dependency-name: "@types/node" 11 | versions: 12 | - 15.0.0 13 | - dependency-name: eslint-config-prettier 14 | versions: 15 | - 7.2.0 16 | - 8.0.0 17 | - 8.1.0 18 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: Continuous Integration 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | branches: [main] 8 | 9 | jobs: 10 | test: 11 | name: Test with Node.js v${{ matrix.node }} and ${{ matrix.os }} 12 | runs-on: ${{ matrix.os }} 13 | strategy: 14 | matrix: 15 | os: [ubuntu-latest, macos-latest] 16 | node: ["14", "16", "18"] 17 | steps: 18 | - uses: actions/checkout@v2 19 | - uses: actions/setup-node@v1 20 | with: 21 | node-version: ${{ matrix.node }} 22 | - name: Install dependancies 23 | run: yarn install --frozen-lockfile 24 | - name: Lint the source 25 | run: yarn lint 26 | - name: Transpile into dist 27 | run: yarn build 28 | - name: Run tests 29 | run: yarn test 30 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | name: "CodeQL" 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | branches: [main] 8 | schedule: 9 | - cron: "43 22 * * 6" 10 | 11 | jobs: 12 | analyze: 13 | name: Analyze 14 | runs-on: ubuntu-latest 15 | strategy: 16 | fail-fast: false 17 | matrix: 18 | language: ["javascript"] 19 | steps: 20 | - name: Checkout repository 21 | uses: actions/checkout@v2 22 | - name: Initialize CodeQL 23 | uses: github/codeql-action/init@v1 24 | with: 25 | languages: ${{ matrix.language }} 26 | - name: Autobuild 27 | uses: github/codeql-action/autobuild@v1 28 | - name: Perform CodeQL Analysis 29 | uses: github/codeql-action/analyze@v1 30 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.sublime-workspace 2 | .DS_Store 3 | /dist 4 | /node_modules 5 | /npm-debug.log 6 | /package-lock.json 7 | /yarn-error.log 8 | /yarn.lock -------------------------------------------------------------------------------- /.npmrc: -------------------------------------------------------------------------------- 1 | package-lock=false 2 | -------------------------------------------------------------------------------- /.prettierignore: -------------------------------------------------------------------------------- 1 | /dist 2 | /node_modules 3 | -------------------------------------------------------------------------------- /.prettierrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "overrides": [ 3 | { 4 | "files": ".ts", 5 | "options": { "parser": "typescript" } 6 | } 7 | ] 8 | } 9 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # fs-capacitor changelog 2 | 3 | ## 1.0.0 4 | 5 | - Initial release. 6 | 7 | ### 1.0.1 8 | 9 | - Use default fs flags and mode 10 | 11 | ## 2.0.0 12 | 13 | - Update dependencies. 14 | - Add tests for special stream scenarios. 15 | - **BREAKING:** Remove special handling of terminating events, see [jaydenseric/graphql-upload#131](https://github.com/jaydenseric/graphql-upload/issues/131) 16 | 17 | ### 2.0.1 18 | 19 | - Update dependencies. 20 | - Move configs out of package.json 21 | - Use `wx` file flag instead of default `w` (thanks to @mattbretl via #8) 22 | 23 | ### 2.0.2 24 | 25 | - Update dev dependencies. 26 | - Fix mjs structure to work with node v12. 27 | - Fix a bug that would pause consumption of read streams until completion. (thanks to @Nikosmonaut's investigation in #9). 28 | 29 | ### 2.0.3 30 | 31 | - Emit write event _after_ bytes have been written to the filesystem. 32 | 33 | ### 2.0.4 34 | 35 | - Revert support for Node.js v12 `--experimental-modules` mode that was published in [v2.0.2](https://github.com/mike-marcacci/fs-capacitor/releases/tag/v2.0.2) that broke compatibility with earlier Node.js versions and test both ESM and CJS builds (skipping `--experimental-modules` tests for Node.js v12), via [#11](https://github.com/mike-marcacci/fs-capacitor/pull/11). 36 | - Use package `browserslist` field instead of configuring `@babel/preset-env` directly. 37 | - Configure `@babel/preset-env` to use shipped proposals and loose mode. 38 | - Give dev tool config files `.json` extensions so they can be Prettier linted. 39 | - Don't Prettier ignore the `lib` directory; it's meant to be pretty. 40 | - Prettier ignore `package.json` and `package-lock.json` so npm can own the formatting. 41 | - Configure [`eslint-plugin-node`](https://npm.im/eslint-plugin-node) to resolve `.mjs` before `.js` and other extensions, for compatibility with the pre Node.js v12 `--experimental-modules` behavior. 42 | - Don't ESLint ignore `node_modules`, as it's already ignored by default. 43 | - Use the `classic` TAP reporter for tests as it has more compact output. 44 | 45 | ## 3.0.0 46 | 47 | - Update dev dependencies. 48 | - Add support for Node.js v13 by no longer extending `ReadStream` and `WriteStream` from node's `fs` library. 49 | - Specify `0o600` for buffer file permissions instead of node's default `0o666` 50 | - **BREAKING:** Remove several undocumented properties that existed for consistency with the extended classes. 51 | - **BREAKING:** No longer listen for `SIGINT`, and instead warn the application to add handlers for terminating signals. 52 | 53 | ## 4.0.0 54 | 55 | - Update source to typescript. 56 | - Add `WriteStream.release` replacing the functionality of an error-free `WriteStream.destroy()` 57 | - **BREAKING:** Change `WriteStream.destroy()` to immediately destroy attached `ReadStream`s even without an error. 58 | - **BREAKING:** Reluctantly remove exported `.mjs` files now that we have an external commonjs dependency and are still missing clear interop guidance from node. 59 | 60 | ### 4.0.1 61 | 62 | - Add cleanup example to README.md 63 | - Remove warnings about signal listeners. 64 | 65 | ## 5.0.0 66 | 67 | - Update dev dependencies. 68 | - Remove dependency on `readable-stream` to expose new (but internally unused) features of streams in node v13. 69 | - **BREAKING:** Remove support for node v8. 70 | 71 | ### 5.0.1 72 | 73 | - Add cleanup example to README.md 74 | - Remove warnings about signal listeners. 75 | 76 | ## 6.0.0 77 | 78 | - Update dev dependencies. 79 | - Add the ability to specify encoding and highWaterMark in `createReadStream()`. 80 | - **BREAKING:** Remove "name" argument from `createReadStream()`. 81 | 82 | ### 6.1.0 83 | 84 | - Add the ability to specify defaultEncoding and highWaterMark in `new WriteStream()`. 85 | - Export `ReadStreamOptions` and `WriteStreamOptions` interfaces. 86 | - Add tests for highWaterMark option in `createReadStream`. 87 | - Add documentation of configuration options to README. 88 | 89 | ### 6.2.0 90 | 91 | - Upgrade dependencies. 92 | - Update option types to match changes in @types/node. 93 | 94 | ## 7.0.0 95 | 96 | - Upgrade dependencies. 97 | - Prevent Node.js max listeners exceeded warnings if many `fs-capacitor` `ReadStream` instances are created at the same time, fixing [#30](https://github.com/mike-marcacci/fs-capacitor/issues/30) via [#42](https://github.com/mike-marcacci/fs-capacitor/pull/42). 98 | - Ensure initialization failures are reported, fixing [#45](https://github.com/mike-marcacci/fs-capacitor/issues/45) via [#46](https://github.com/mike-marcacci/fs-capacitor/pull/46/files). 99 | - **BREAKING:** Drop support for node 13. 100 | - **BREAKING:** Drop support for node 10. 101 | - **BREAKING:** Change module type to ES module. 102 | 103 | ### 7.0.1 104 | 105 | - Publish TS definitions and map via [#54](https://github.com/mike-marcacci/fs-capacitor/pull/54). 106 | 107 | ### 8.0.0 108 | 109 | - Upgrade development dependencies. 110 | - **BREAKING:** Drop support for node 12. 111 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Mike Marcacci 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![Continuous Integration](https://github.com/mike-marcacci/fs-capacitor/workflows/Continuous%20Integration/badge.svg) [![Current Version](https://badgen.net/npm/v/fs-capacitor)](https://npm.im/fs-capacitor) ![Supported Node.js Versions](https://badgen.net/npm/node/fs-capacitor) 2 | 3 | # FS Capacitor 4 | 5 | FS Capacitor is a filesystem buffer for finite node streams. It supports simultaneous read/write, and can be used to create multiple independent readable streams, each starting at the beginning of the buffer. 6 | 7 | This is useful for file uploads and other situations where you want to avoid delays to the source stream, but have slow downstream transformations to apply: 8 | 9 | ```js 10 | import fs from "fs"; 11 | import http from "http"; 12 | import { WriteStream } from "fs-capacitor"; 13 | 14 | http.createServer((req, res) => { 15 | const capacitor = new WriteStream(); 16 | const destination = fs.createWriteStream("destination.txt"); 17 | 18 | // pipe data to the capacitor 19 | req.pipe(capacitor); 20 | 21 | // read data from the capacitor 22 | capacitor 23 | .createReadStream() 24 | .pipe(/* some slow Transform streams here */) 25 | .pipe(destination); 26 | 27 | // read data from the very beginning 28 | setTimeout(() => { 29 | capacitor.createReadStream().pipe(/* elsewhere */); 30 | 31 | // you can destroy a capacitor as soon as no more read streams are needed 32 | // without worrying if existing streams are fully consumed 33 | capacitor.destroy(); 34 | }, 100); 35 | }); 36 | ``` 37 | 38 | It is especially important to use cases like [`graphql-upload`](https://github.com/jaydenseric/graphql-upload) where server code may need to stash earler parts of a stream until later parts have been processed, and needs to attach multiple consumers at different times. 39 | 40 | FS Capacitor creates its temporary files in the directory ideneified by `os.tmpdir()` and attempts to remove them: 41 | 42 | - after `writeStream.destroy()` has been called and all read streams are fully consumed or destroyed 43 | - before the process exits 44 | 45 | Please do note that FS Capacitor does NOT release disk space _as data is consumed_, and therefore is not suitable for use with infinite streams or those larger than the filesystem. 46 | 47 | ### Ensuring cleanup on termination by process signal 48 | 49 | FS Capacitor cleans up all of its temporary files before the process exits, by listening to the [node process's `exit` event](https://nodejs.org/api/process.html#process_event_exit). This event, however, is only emitted when the process is about to exit as a result of either: 50 | 51 | - The process.exit() method being called explicitly; 52 | - The Node.js event loop no longer having any additional work to perform. 53 | 54 | When the node process receives a `SIGINT`, `SIGTERM`, or `SIGHUP` signal and there is no handler, it will exit without emitting the `exit` event. 55 | 56 | Beginning in version 3, fs-capacitor will NOT listen for these signals. Instead, the application should handle these signals according to its own logic and call `process.exit()` when it is ready to exit. This allows the application to implement its own graceful shutdown procedures, such as waiting for a stream to finish. 57 | 58 | The following can be added to the application to ensure resources are cleaned up before a signal-induced exit: 59 | 60 | ```js 61 | function shutdown() { 62 | // Any sync or async graceful shutdown procedures can be run before exiting… 63 | process.exit(0); 64 | } 65 | 66 | process.on("SIGINT", shutdown); 67 | process.on("SIGTERM", shutdown); 68 | process.on("SIGHUP", shutdown); 69 | ``` 70 | 71 | ## API 72 | 73 | ### WriteStream 74 | 75 | `WriteStream` extends [`stream.Writable`](https://nodejs.org/api/stream.html#stream_implementing_a_writable_stream) 76 | 77 | #### `new WriteStream(options: WriteStreamOptions)` 78 | 79 | Create a new `WriteStream` instance. 80 | 81 | #### `.createReadStream(options?: ReadStreamOptions): ReadStream` 82 | 83 | Create a new `ReadStream` instance attached to the `WriteStream` instance. 84 | 85 | Calling `.createReadStream()` on a released `WriteStream` will throw a `ReadAfterReleasedError` error. 86 | 87 | Calling `.createReadStream()` on a destroyed `WriteStream` will throw a `ReadAfterDestroyedError` error. 88 | 89 | As soon as a `ReadStream` ends or is closed (such as by calling `readStream.destroy()`), it is detached from its `WriteStream`. 90 | 91 | #### `.release(): void` 92 | 93 | Release the `WriteStream`'s claim on the underlying resources. Once called, destruction of underlying resources is performed as soon as all attached `ReadStream`s are removed. 94 | 95 | #### `.destroy(error?: ?Error): void` 96 | 97 | Destroy the `WriteStream` and all attached `ReadStream`s. If `error` is present, attached `ReadStream`s are destroyed with the same error. 98 | 99 | ### WriteStreamOptions 100 | 101 | #### `.highWaterMark?: number` 102 | 103 | Uses node's default of `16384` (16kb). Optional buffer size at which the writable stream will begin returning `false`. See [node's docs for `stream.Writable`](https://nodejs.org/api/stream.html#stream_constructor_new_stream_writable_options). For the curious, node has [a guide on backpressure in streams](https://nodejs.org/es/docs/guides/backpressuring-in-streams/). 104 | 105 | #### `.defaultEncoding` 106 | 107 | Uses node's default of `utf8`. Optional default encoding to use when no encoding is specified as an argument to `stream.write()`. See [node's docs for `stream.Writable`](https://nodejs.org/api/stream.html#stream_constructor_new_stream_writable_options). Possible values depend on the version of node, and are [defined in node's buffer implementation](https://github.com/nodejs/node/blob/master/lib/buffer.js); 108 | 109 | #### `.tmpdir` 110 | 111 | Used node's [`os.tmpdir`](https://nodejs.org/api/os.html#os_os_tmpdir) by default. This function returns the directory used by fs-capacitor to store file buffers, and is intended primarily for testing and debugging. 112 | 113 | ### ReadStream 114 | 115 | `ReadStream` extends [`stream.Readable`](https://nodejs.org/api/stream.html#stream_new_stream_readable_options); 116 | 117 | ### ReadStreamOptions 118 | 119 | #### `.highWaterMark` 120 | 121 | Uses node's default of `16384` (16kb). Optional value to use as the readable stream's highWaterMark, specifying the number of bytes (for binary data) or characters (for strings) that will be bufferred into memory. See [node's docs for `stream.Readable`](https://nodejs.org/api/stream.html#stream_new_stream_readable_options). For the curious, node has [a guide on backpressure in streams](https://nodejs.org/es/docs/guides/backpressuring-in-streams/). 122 | 123 | #### `.encoding` 124 | 125 | Uses node's default of `utf8`. Optional encoding to use when the stream's output is desired as a string. See [node's docs for `stream.Readable`](https://nodejs.org/api/stream.html#stream_new_stream_readable_options). Possible values depend on the version of node, and are [defined in node's buffer implementation](https://github.com/nodejs/node/blob/master/lib/buffer.js). 126 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Supported Versions 4 | 5 | | Version | Supported | 6 | | ------- | ------------------ | 7 | | 7 | :white_check_mark: | 8 | | 6 | :white_check_mark: | 9 | | < 6 | :x: | 10 | 11 | ## Reporting a Vulnerability 12 | 13 | If you have discovered a security vulnerability with fs-capacitor, please reach out to me directly at [mike.marcacci@gmail.com](mailto:mike.marcacci@gmail.com) and I will create a [security advisory](https://docs.github.com/en/code-security/security-advisories/about-github-security-advisories) to track, patch, and disclose the issue. 14 | -------------------------------------------------------------------------------- /main.sublime-project: -------------------------------------------------------------------------------- 1 | { 2 | "folders": 3 | [ 4 | { 5 | "path": ".", 6 | "folder_exclude_patterns": ["**/node_modules", "**/dist"], 7 | "name": "fs-capacitor" 8 | } 9 | ] 10 | } 11 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "fs-capacitor", 3 | "version": "8.0.0", 4 | "description": "Filesystem-buffered, passthrough stream that buffers indefinitely rather than propagate backpressure from downstream consumers.", 5 | "license": "MIT", 6 | "author": { 7 | "name": "Mike Marcacci", 8 | "email": "mike.marcacci@gmail.com" 9 | }, 10 | "repository": "github:mike-marcacci/fs-capacitor", 11 | "homepage": "https://github.com/mike-marcacci/fs-capacitor#readme", 12 | "bugs": "https://github.com/mike-marcacci/fs-capacitor/issues", 13 | "keywords": [ 14 | "stream", 15 | "buffer", 16 | "file", 17 | "split", 18 | "clone" 19 | ], 20 | "files": [ 21 | "dist/index.d.ts", 22 | "dist/index.js", 23 | "dist/index.js.map" 24 | ], 25 | "main": "dist/index.js", 26 | "engines": { 27 | "node": "^14.17.0 || >=16.0.0" 28 | }, 29 | "browserslist": { 30 | "production": [ 31 | "node >= 14" 32 | ], 33 | "development": [ 34 | "node ^14.17.0", 35 | "node >=16.0.0" 36 | ] 37 | }, 38 | "ava": { 39 | "nodeArguments": [ 40 | "--experimental-specifier-resolution=node" 41 | ] 42 | }, 43 | "devDependencies": { 44 | "@types/node": "^18.0.6", 45 | "@typescript-eslint/eslint-plugin": "^5.30.7", 46 | "@typescript-eslint/parser": "^5.30.7", 47 | "ava": "^4.3.1", 48 | "eslint": "^8.20.0", 49 | "eslint-config-prettier": "^8.5.0", 50 | "eslint-plugin-prettier": "^4.2.1", 51 | "nodemon": "^2.0.19", 52 | "prettier": "^2.7.1", 53 | "typescript": "^4.7.4" 54 | }, 55 | "scripts": { 56 | "format": "prettier --list-different --write '**/*.{json,yml,md,ts}'", 57 | "lint": "prettier -c '**/*.{json,yml,md,ts}' && eslint src --ext ts", 58 | "build": "rm -rf dist && tsc", 59 | "build:development": "rm -rf dist && tsc --watch", 60 | "test": "ava --verbose dist/index.test.js", 61 | "test:development": "ava --verbose --watch dist/index.test.js", 62 | "prepare": "yarn build", 63 | "prepublishOnly": "yarn install && yarn lint && yarn build && yarn test" 64 | }, 65 | "dependencies": {}, 66 | "type": "module", 67 | "exports": "./dist/index.js" 68 | } 69 | -------------------------------------------------------------------------------- /src/index.test.ts: -------------------------------------------------------------------------------- 1 | import { existsSync } from "fs"; 2 | import { Readable } from "stream"; 3 | import { ReadAfterDestroyedError, WriteStream } from "./index"; 4 | import test from "ava"; 5 | 6 | process.on("SIGINT", () => process.exit(0)); 7 | process.on("SIGTERM", () => process.exit(0)); 8 | process.on("SIGHUP", () => process.exit(0)); 9 | 10 | function streamToString(stream: Readable): Promise { 11 | return new Promise((resolve, reject) => { 12 | let ended = false; 13 | let data = ""; 14 | stream 15 | .on("error", reject) 16 | .on("data", (chunk) => { 17 | if (ended) throw new Error("`data` emitted after `end`"); 18 | data += chunk; 19 | }) 20 | .on("end", () => { 21 | ended = true; 22 | resolve(data); 23 | }); 24 | }); 25 | } 26 | 27 | function waitForBytesWritten( 28 | stream: WriteStream, 29 | bytes: number, 30 | resolve: () => void 31 | ): void { 32 | if (stream["_pos"] >= bytes) { 33 | setImmediate(resolve); 34 | return; 35 | } 36 | 37 | setImmediate(() => waitForBytesWritten(stream, bytes, resolve)); 38 | } 39 | 40 | test("Data from a complete stream.", async (t) => { 41 | let data = ""; 42 | const source = new Readable({ 43 | read() { 44 | // Intentionally not implementing anything here. 45 | }, 46 | }); 47 | 48 | // Add the first chunk of data (without any consumer) 49 | const chunk1 = "1".repeat(10); 50 | source.push(chunk1); 51 | source.push(null); 52 | data += chunk1; 53 | 54 | // Create a new capacitor 55 | const capacitor1 = new WriteStream(); 56 | t.is(capacitor1["_readStreams"].size, 0, "should start with 0 read streams"); 57 | 58 | // Pipe data to the capacitor 59 | source.pipe(capacitor1); 60 | 61 | // Attach a read stream 62 | const capacitor1Stream1 = capacitor1.createReadStream(); 63 | t.is( 64 | capacitor1["_readStreams"].size, 65 | 1, 66 | "should attach a new read stream before receiving data" 67 | ); 68 | 69 | // Wait until capacitor is finished writing all data 70 | const result = await streamToString(capacitor1Stream1); 71 | t.is(result, data, "should stream all data"); 72 | t.is( 73 | capacitor1["_readStreams"].size, 74 | 0, 75 | "should no longer have any attacheds read streams" 76 | ); 77 | }); 78 | 79 | test("Error while initializing.", async (t) => { 80 | // Create a new capacitor 81 | const capacitor1 = new WriteStream({ tmpdir: () => "/tmp/does-not-exist" }); 82 | 83 | let resolve: () => void, reject: (error: Error) => void; 84 | const promise = new Promise((_resolve, _reject) => { 85 | resolve = _resolve; 86 | reject = _reject; 87 | }); 88 | 89 | // Synchronously attach an error listener. 90 | capacitor1.on("error", (error) => { 91 | try { 92 | t.is((error as any).code, "ENOENT"); 93 | resolve(); 94 | } catch (error) { 95 | reject(error as Error); 96 | } 97 | }); 98 | 99 | await promise; 100 | }); 101 | 102 | test("Allows specification of encoding in createReadStream.", async (t) => { 103 | const data = Buffer.from("1".repeat(10), "utf8"); 104 | const source = new Readable({ 105 | read() { 106 | // Intentionally not implementing anything here. 107 | }, 108 | }); 109 | 110 | // Add the first chunk of data (without any consumer) 111 | source.push(data); 112 | source.push(null); 113 | 114 | // Create a new capacitor 115 | const capacitor1 = new WriteStream(); 116 | 117 | // Pipe data to the capacitor 118 | source.pipe(capacitor1); 119 | 120 | // Attach a read stream 121 | const capacitor1Stream1 = capacitor1.createReadStream({ 122 | encoding: "base64", 123 | }); 124 | 125 | // Wait until capacitor is finished writing all data 126 | const result = await streamToString(capacitor1Stream1); 127 | t.is(result, data.toString("base64"), "should stream all data"); 128 | t.is( 129 | capacitor1["_readStreams"].size, 130 | 0, 131 | "should no longer have any attacheds read streams" 132 | ); 133 | }); 134 | 135 | test("Allows specification of defaultEncoding in new WriteStream.", async (t) => { 136 | const data = Buffer.from("1".repeat(10), "utf8"); 137 | const source = new Readable({ 138 | encoding: "base64", 139 | read() { 140 | // Intentionally not implementing anything here. 141 | }, 142 | }); 143 | 144 | // Add the first chunk of data (without any consumer) 145 | source.push(data); 146 | source.push(null); 147 | 148 | // Create a new capacitor 149 | const capacitor1 = new WriteStream({ defaultEncoding: "base64" }); 150 | 151 | // Pipe data to the capacitor 152 | source.pipe(capacitor1); 153 | 154 | // Attach a read stream 155 | const capacitor1Stream1 = capacitor1.createReadStream({}); 156 | 157 | // Wait until capacitor is finished writing all data 158 | const result = await streamToString(capacitor1Stream1); 159 | t.is(result, data.toString("utf8"), "should stream all data"); 160 | t.is( 161 | capacitor1["_readStreams"].size, 162 | 0, 163 | "should no longer have any attacheds read streams" 164 | ); 165 | }); 166 | 167 | test("Allows specification of highWaterMark.", async (t) => { 168 | // Create a new capacitor 169 | const capacitor1 = new WriteStream({ highWaterMark: 10001 }); 170 | t.is( 171 | capacitor1.writableHighWaterMark, 172 | 10001, 173 | "allow specification of highWaterMark in new WriteStream" 174 | ); 175 | 176 | // Attach a read stream 177 | const capacitor1Stream1 = capacitor1.createReadStream({ 178 | highWaterMark: 10002, 179 | }); 180 | t.is( 181 | capacitor1Stream1.readableHighWaterMark, 182 | 10002, 183 | "allow specification of highWaterMark in new WriteStream" 184 | ); 185 | 186 | capacitor1Stream1.destroy(); 187 | capacitor1.destroy(); 188 | }); 189 | 190 | test("Data from an open stream, 1 chunk, no read streams.", async (t) => { 191 | let data = ""; 192 | const source = new Readable({ 193 | read() { 194 | // Intentionally not implementing anything here. 195 | }, 196 | }); 197 | 198 | // Create a new capacitor 199 | const capacitor1 = new WriteStream(); 200 | t.is(capacitor1["_readStreams"].size, 0, "should start with 0 read streams"); 201 | 202 | // Pipe data to the capacitor 203 | source.pipe(capacitor1); 204 | 205 | // Add the first chunk of data (without any read streams) 206 | const chunk1 = "1".repeat(10); 207 | source.push(chunk1); 208 | source.push(null); 209 | data += chunk1; 210 | 211 | // Attach a read stream 212 | const capacitor1Stream1 = capacitor1.createReadStream(); 213 | t.is( 214 | capacitor1["_readStreams"].size, 215 | 1, 216 | "should attach a new read stream before receiving data" 217 | ); 218 | 219 | // Wait until capacitor is finished writing all data 220 | const result = await streamToString(capacitor1Stream1); 221 | t.is(result, data, "should stream all data"); 222 | t.is( 223 | capacitor1["_readStreams"].size, 224 | 0, 225 | "should no longer have any attacheds read streams" 226 | ); 227 | }); 228 | 229 | test("Data from an open stream, 1 chunk, 1 read stream.", async (t) => { 230 | let data = ""; 231 | const source = new Readable({ 232 | read() { 233 | // Intentionally not implementing anything here. 234 | }, 235 | }); 236 | 237 | // Create a new capacitor 238 | const capacitor1 = new WriteStream(); 239 | t.is(capacitor1["_readStreams"].size, 0, "should start with 0 read streams"); 240 | 241 | // Pipe data to the capacitor 242 | source.pipe(capacitor1); 243 | 244 | // Attach a read stream 245 | const capacitor1Stream1 = capacitor1.createReadStream(); 246 | t.is( 247 | capacitor1["_readStreams"].size, 248 | 1, 249 | "should attach a new read stream before receiving data" 250 | ); 251 | 252 | // Add the first chunk of data (with 1 read stream) 253 | const chunk1 = "1".repeat(10); 254 | source.push(chunk1); 255 | source.push(null); 256 | data += chunk1; 257 | 258 | // Wait until capacitor is finished writing all data 259 | const result = await streamToString(capacitor1Stream1); 260 | t.is(result, data, "should stream all data"); 261 | t.is( 262 | capacitor1["_readStreams"].size, 263 | 0, 264 | "should no longer have any attacheds read streams" 265 | ); 266 | }); 267 | 268 | test("Destroy with error.", async (t) => { 269 | const capacitor2 = new WriteStream(); 270 | const capacitor2Stream1 = capacitor2.createReadStream(); 271 | const capacitor2Stream2 = capacitor2.createReadStream(); 272 | 273 | const capacitor2ReadStream1Destroyed = new Promise((resolve) => 274 | capacitor2Stream1.on("close", resolve) 275 | ); 276 | const capacitor2Destroyed = new Promise((resolve) => 277 | capacitor2.on("close", resolve) 278 | ); 279 | 280 | capacitor2Stream1.destroy(); 281 | await capacitor2ReadStream1Destroyed; 282 | 283 | const error = new Error("test"); 284 | let capacitor2Stream2Error; 285 | capacitor2Stream2.on("error", (error: Error) => { 286 | capacitor2Stream2Error = error; 287 | }); 288 | let capacitor2Error; 289 | capacitor2.on("error", (error: Error) => { 290 | capacitor2Error = error; 291 | }); 292 | capacitor2.destroy(error); 293 | await capacitor2Destroyed; 294 | 295 | t.is(capacitor2.destroyed, true, "should mark capacitor as destroyed"); 296 | t.is( 297 | capacitor2Stream2.destroyed, 298 | true, 299 | "should mark attached read streams as destroyed" 300 | ); 301 | t.is( 302 | capacitor2Stream2Error, 303 | error, 304 | "should emit the original error on read stream" 305 | ); 306 | t.is( 307 | capacitor2Error, 308 | error, 309 | "should emit the original error on write stream" 310 | ); 311 | }); 312 | 313 | test("Destroy without error.", async (t) => { 314 | const capacitor3 = new WriteStream(); 315 | const capacitor3Stream1 = capacitor3.createReadStream(); 316 | const capacitor3Stream2 = capacitor3.createReadStream(); 317 | 318 | const capacitor3ReadStream1Destroyed = new Promise((resolve) => 319 | capacitor3Stream1.on("close", resolve) 320 | ); 321 | const capacitor3Destroyed = new Promise((resolve) => 322 | capacitor3.on("close", resolve) 323 | ); 324 | 325 | capacitor3Stream1.destroy(); 326 | await capacitor3ReadStream1Destroyed; 327 | 328 | let capacitor3Stream2Error; 329 | capacitor3Stream2.on("error", (error: Error) => { 330 | capacitor3Stream2Error = error; 331 | }); 332 | let capacitor3Error; 333 | capacitor3.on("error", (error: Error) => { 334 | capacitor3Error = error; 335 | }); 336 | capacitor3.destroy(); 337 | await capacitor3Destroyed; 338 | 339 | t.is(capacitor3.destroyed, true, "should mark capacitor as destroyed"); 340 | t.is( 341 | capacitor3Stream2.destroyed, 342 | true, 343 | "should mark attached read streams as destroyed" 344 | ); 345 | t.is( 346 | capacitor3Stream2Error, 347 | undefined, 348 | "should not emit an error on read stream" 349 | ); 350 | t.is(capacitor3Error, undefined, "should not emit am error on write stream"); 351 | }); 352 | 353 | function withChunkSize(size: number): void { 354 | test(`End-to-end with chunk size: ${size}`, async (t) => { 355 | let data = ""; 356 | const source = new Readable({ 357 | read() { 358 | // Intentionally not implementing anything here. 359 | }, 360 | }); 361 | 362 | // Create a new capacitor and read stream before any data has been written. 363 | let capacitor1Closed = false; 364 | const capacitor1 = new WriteStream(); 365 | capacitor1.on("close", () => (capacitor1Closed = true)); 366 | t.is( 367 | capacitor1["_readStreams"].size, 368 | 0, 369 | "should start with 0 read streams" 370 | ); 371 | const capacitor1Stream1 = capacitor1.createReadStream(); 372 | t.is( 373 | capacitor1["_readStreams"].size, 374 | 1, 375 | "should attach a new read stream before receiving data" 376 | ); 377 | 378 | // Make sure a temporary file was created. 379 | await new Promise((resolve) => capacitor1.on("ready", resolve)); 380 | const path = capacitor1["_path"] as string; 381 | const fd = capacitor1["_fd"] as number; 382 | t.is( 383 | typeof capacitor1["_path"], 384 | "string", 385 | "capacitor1._path should be a string" 386 | ); 387 | t.is(typeof fd, "number", "capacitor1._fd should be a number"); 388 | t.is(existsSync(path), true, "creates a temp file"); 389 | 390 | // Pipe data to the capacitor. 391 | source.pipe(capacitor1); 392 | 393 | // Add the first chunk of data (without any read streams). 394 | const chunk1 = "1".repeat(size); 395 | source.push(chunk1); 396 | data += chunk1; 397 | 398 | // Wait until this chunk has been written to the buffer 399 | await new Promise((resolve) => 400 | waitForBytesWritten(capacitor1, size, resolve) 401 | ); 402 | 403 | // Create a new stream after some data has been written. 404 | const capacitor1Stream2 = capacitor1.createReadStream(); 405 | t.is( 406 | capacitor1["_readStreams"].size, 407 | 2, 408 | "should attach a new read stream after first write" 409 | ); 410 | 411 | const writeEventBytesWritten = new Promise((resolve) => { 412 | capacitor1.once("write", () => { 413 | resolve(capacitor1["_pos"]); 414 | }); 415 | }); 416 | 417 | // Add a second chunk of data 418 | const chunk2 = "2".repeat(size); 419 | source.push(chunk2); 420 | data += chunk2; 421 | 422 | // Wait until this chunk has been written to the buffer 423 | await new Promise((resolve) => 424 | waitForBytesWritten(capacitor1, 2 * size, resolve) 425 | ); 426 | 427 | // Make sure write event is called after bytes are written to the filesystem 428 | t.is( 429 | await writeEventBytesWritten, 430 | 2 * size, 431 | "bytesWritten should include new chunk" 432 | ); 433 | 434 | // End the source & wait until capacitor is finished. 435 | const finished = new Promise((resolve) => 436 | capacitor1.once("finish", resolve) 437 | ); 438 | source.push(null); 439 | await finished; 440 | 441 | // Create a new stream after the source has ended. 442 | const capacitor1Stream3 = capacitor1.createReadStream(); 443 | const capacitor1Stream4 = capacitor1.createReadStream(); 444 | t.is( 445 | capacitor1["_readStreams"].size, 446 | 4, 447 | "should attach new read streams after end" 448 | ); 449 | 450 | // Make sure complete data is sent to a read stream. 451 | const result2 = await streamToString(capacitor1Stream2); 452 | t.is( 453 | (capacitor1Stream2 as any)._readableState.ended, 454 | true, 455 | "should mark read stream as ended" 456 | ); 457 | t.is(result2, data, "should stream complete data"); 458 | 459 | const result4 = await streamToString(capacitor1Stream4); 460 | t.is( 461 | (capacitor1Stream4 as any)._readableState.ended, 462 | true, 463 | "should mark read stream as ended" 464 | ); 465 | t.is(result4, data, "should stream complete data"); 466 | 467 | t.is( 468 | capacitor1["_readStreams"].size, 469 | 2, 470 | "should detach an ended read stream" 471 | ); 472 | 473 | // Make sure a read stream can be destroyed. 474 | await new Promise((resolve) => { 475 | capacitor1Stream1.once("error", resolve); 476 | capacitor1Stream1.destroy(new Error("test")); 477 | }); 478 | t.is( 479 | capacitor1Stream1.destroyed, 480 | true, 481 | "should mark read stream as destroyed" 482 | ); 483 | t.is( 484 | capacitor1["_readStreams"].size, 485 | 1, 486 | "should detach a destroyed read stream" 487 | ); 488 | 489 | // Release the capacitor. 490 | capacitor1.release(); 491 | 492 | t.is( 493 | capacitor1Closed, 494 | false, 495 | "should not destroy while read streams exist" 496 | ); 497 | t.true(capacitor1["_released"], "should mark for future destruction"); 498 | 499 | // Make sure the capacitor is destroyed once no read streams exist 500 | const readStreamDestroyed = new Promise((resolve) => 501 | capacitor1Stream3.on("close", resolve) 502 | ); 503 | const capacitorDestroyed = new Promise((resolve) => 504 | capacitor1.on("close", resolve) 505 | ); 506 | capacitor1Stream3.destroy(); 507 | await readStreamDestroyed; 508 | t.is( 509 | capacitor1Stream3.destroyed, 510 | true, 511 | "should mark read stream as destroyed" 512 | ); 513 | t.is( 514 | capacitor1["_readStreams"].size, 515 | 0, 516 | "should detach a destroyed read stream" 517 | ); 518 | await capacitorDestroyed; 519 | t.is(capacitor1Closed, true, "should mark capacitor as closed"); 520 | t.is(capacitor1["_fd"], null, "should set fd to null"); 521 | t.is(capacitor1.destroyed, true, "should mark capacitor as destroyed"); 522 | t.is( 523 | existsSync(capacitor1["_path"] as string), 524 | false, 525 | "removes its temp file" 526 | ); 527 | 528 | // Make sure a new read stream cannot be created after destruction 529 | try { 530 | capacitor1.createReadStream(); 531 | throw new Error(); 532 | } catch (error) { 533 | t.is( 534 | error instanceof ReadAfterDestroyedError, 535 | true, 536 | "should not create a read stream once destroyed" 537 | ); 538 | } 539 | }); 540 | } 541 | 542 | // Test with small (sub-highWaterMark, 16384) chunks 543 | withChunkSize(10); 544 | 545 | // Test with large (above-highWaterMark, 16384) chunks 546 | withChunkSize(100000); 547 | -------------------------------------------------------------------------------- /src/index.ts: -------------------------------------------------------------------------------- 1 | import { randomBytes } from "crypto"; 2 | import { read, open, closeSync, unlinkSync, write, close, unlink } from "fs"; 3 | import { tmpdir } from "os"; 4 | import { join } from "path"; 5 | import { Readable, ReadableOptions, Writable, WritableOptions } from "stream"; 6 | import { EventEmitter } from "events"; 7 | 8 | export class ReadAfterDestroyedError extends Error {} 9 | export class ReadAfterReleasedError extends Error {} 10 | 11 | export interface ReadStreamOptions { 12 | highWaterMark?: ReadableOptions["highWaterMark"]; 13 | encoding?: ReadableOptions["encoding"]; 14 | } 15 | 16 | // Use a “proxy” event emitter configured to have an infinite maximum number of 17 | // listeners to prevent Node.js max listeners exceeded warnings if many 18 | // `fs-capacitor` `ReadStream` instances are created at the same time. See: 19 | // https://github.com/mike-marcacci/fs-capacitor/issues/30 20 | const processExitProxy = new EventEmitter(); 21 | processExitProxy.setMaxListeners(Infinity); 22 | process.once("exit", () => processExitProxy.emit("exit")); 23 | 24 | export class ReadStream extends Readable { 25 | private _pos: number = 0; 26 | private _writeStream: WriteStream; 27 | 28 | constructor(writeStream: WriteStream, options?: ReadStreamOptions) { 29 | super({ 30 | highWaterMark: options?.highWaterMark, 31 | encoding: options?.encoding, 32 | autoDestroy: true, 33 | }); 34 | this._writeStream = writeStream; 35 | } 36 | 37 | _read(n: number): void { 38 | if (this.destroyed) return; 39 | 40 | if (typeof this._writeStream["_fd"] !== "number") { 41 | this._writeStream.once("ready", () => this._read(n)); 42 | return; 43 | } 44 | 45 | // Using `allocUnsafe` here is OK because we return a slice the length of 46 | // `bytesRead`, and discard the rest. This prevents node from having to zero 47 | // out the entire allocation first. 48 | const buf = Buffer.allocUnsafe(n); 49 | read(this._writeStream["_fd"], buf, 0, n, this._pos, (error, bytesRead) => { 50 | if (error) this.destroy(error); 51 | 52 | // Push any read bytes into the local stream buffer. 53 | if (bytesRead) { 54 | this._pos += bytesRead; 55 | this.push(buf.slice(0, bytesRead)); 56 | return; 57 | } 58 | 59 | // If there were no more bytes to read and the write stream is finished, 60 | // then this stream has reached the end. 61 | if ( 62 | ( 63 | this._writeStream as any as { 64 | _writableState: { finished: boolean }; 65 | } 66 | )._writableState.finished 67 | ) { 68 | // Check if we have consumed the whole file up to where 69 | // the write stream has written before ending the stream 70 | if (this._pos < (this._writeStream as any as { _pos: number })._pos) 71 | this._read(n); 72 | else this.push(null); 73 | return; 74 | } 75 | 76 | // Otherwise, wait for the write stream to add more data or finish. 77 | const retry = (): void => { 78 | this._writeStream.off("finish", retry); 79 | this._writeStream.off("write", retry); 80 | this._read(n); 81 | }; 82 | 83 | this._writeStream.on("finish", retry); 84 | this._writeStream.on("write", retry); 85 | }); 86 | } 87 | } 88 | 89 | export interface WriteStreamOptions { 90 | highWaterMark?: WritableOptions["highWaterMark"]; 91 | defaultEncoding?: WritableOptions["defaultEncoding"]; 92 | tmpdir?: () => string; 93 | } 94 | 95 | export class WriteStream extends Writable { 96 | private _fd: null | number = null; 97 | private _path: null | string = null; 98 | private _pos: number = 0; 99 | private _readStreams: Set = new Set(); 100 | private _released: boolean = false; 101 | 102 | constructor(options?: WriteStreamOptions) { 103 | super({ 104 | highWaterMark: options?.highWaterMark, 105 | defaultEncoding: options?.defaultEncoding, 106 | autoDestroy: false, 107 | }); 108 | 109 | // Generate a random filename. 110 | randomBytes(16, (error, buffer) => { 111 | if (error) { 112 | this.destroy(error); 113 | return; 114 | } 115 | 116 | this._path = join( 117 | (options?.tmpdir ?? tmpdir)(), 118 | `capacitor-${buffer.toString("hex")}.tmp` 119 | ); 120 | 121 | // Create a file in the OS's temporary files directory. 122 | open(this._path, "wx+", 0o600, (error, fd) => { 123 | if (error) { 124 | this.destroy(error); 125 | return; 126 | } 127 | 128 | // Cleanup when the process exits or is killed. 129 | processExitProxy.once("exit", this._cleanupSync); 130 | 131 | this._fd = fd; 132 | this.emit("ready"); 133 | }); 134 | }); 135 | } 136 | 137 | _cleanup = (callback: (error: null | Error) => void): void => { 138 | const fd = this._fd; 139 | const path = this._path; 140 | 141 | if (typeof fd !== "number" || typeof path !== "string") { 142 | callback(null); 143 | return; 144 | } 145 | 146 | // Close the file descriptor. 147 | close(fd, (closeError) => { 148 | // An error here probably means the fd was already closed, but we can 149 | // still try to unlink the file. 150 | unlink(path, (unlinkError) => { 151 | // If we are unable to unlink the file, the operating system will 152 | // clean up on next restart, since we use store thes in `os.tmpdir()` 153 | this._fd = null; 154 | 155 | // We avoid removing this until now in case an exit occurs while 156 | // asyncronously cleaning up. 157 | processExitProxy.off("exit", this._cleanupSync); 158 | callback(unlinkError ?? closeError); 159 | }); 160 | }); 161 | }; 162 | 163 | _cleanupSync = (): void => { 164 | processExitProxy.off("exit", this._cleanupSync); 165 | 166 | if (typeof this._fd === "number") 167 | try { 168 | closeSync(this._fd); 169 | } catch (error) { 170 | // An error here probably means the fd was already closed, but we can 171 | // still try to unlink the file. 172 | } 173 | 174 | try { 175 | if (this._path !== null) { 176 | unlinkSync(this._path); 177 | } 178 | } catch (error) { 179 | // If we are unable to unlink the file, the operating system will clean 180 | // up on next restart, since we use store thes in `os.tmpdir()` 181 | } 182 | }; 183 | 184 | _final(callback: (error?: null | Error) => any): void { 185 | if (typeof this._fd !== "number") { 186 | this.once("ready", () => this._final(callback)); 187 | return; 188 | } 189 | callback(); 190 | } 191 | 192 | _write( 193 | chunk: Buffer, 194 | encoding: string, 195 | callback: (error?: null | Error) => any 196 | ): void { 197 | if (typeof this._fd !== "number") { 198 | this.once("ready", () => this._write(chunk, encoding, callback)); 199 | return; 200 | } 201 | 202 | write(this._fd, chunk, 0, chunk.length, this._pos, (error) => { 203 | if (error) { 204 | callback(error); 205 | return; 206 | } 207 | 208 | // It's safe to increment `this._pos` after flushing to the filesystem 209 | // because node streams ensure that only one `_write()` is active at a 210 | // time. If this assumption is broken, the behavior of this library is 211 | // undefined, regardless of where this is incremented. Relocating this 212 | // to increment syncronously would result in correct file contents, but 213 | // the out-of-order writes would still open the potential for read streams 214 | // to scan positions that have not yet been written. 215 | this._pos += chunk.length; 216 | this.emit("write"); 217 | callback(); 218 | }); 219 | } 220 | 221 | release(): void { 222 | this._released = true; 223 | if (this._readStreams.size === 0) this.destroy(); 224 | } 225 | 226 | _destroy( 227 | error: undefined | null | Error, 228 | callback: (error?: null | Error) => any 229 | ): void { 230 | // Destroy all attached read streams. 231 | for (const readStream of this._readStreams) { 232 | readStream.destroy(error || undefined); 233 | } 234 | 235 | // This capacitor is fully initialized. 236 | if (typeof this._fd === "number" && typeof this._path === "string") { 237 | this._cleanup((cleanupError) => callback(cleanupError ?? error)); 238 | return; 239 | } 240 | 241 | // This capacitor has not yet finished initialization; if initialization 242 | // does complete, immediately clean up after. 243 | this.once("ready", () => { 244 | this._cleanup((cleanupError) => { 245 | if (cleanupError) { 246 | this.emit("error", cleanupError); 247 | } 248 | }); 249 | }); 250 | 251 | callback(error); 252 | } 253 | 254 | createReadStream(options?: ReadStreamOptions): ReadStream { 255 | if (this.destroyed) 256 | throw new ReadAfterDestroyedError( 257 | "A ReadStream cannot be created from a destroyed WriteStream." 258 | ); 259 | 260 | if (this._released) 261 | throw new ReadAfterReleasedError( 262 | "A ReadStream cannot be created from a released WriteStream." 263 | ); 264 | 265 | const readStream = new ReadStream(this, options); 266 | this._readStreams.add(readStream); 267 | 268 | readStream.once("close", (): void => { 269 | this._readStreams.delete(readStream); 270 | 271 | if (this._released && this._readStreams.size === 0) { 272 | this.destroy(); 273 | } 274 | }); 275 | 276 | return readStream; 277 | } 278 | } 279 | 280 | export default { 281 | WriteStream, 282 | ReadStream, 283 | ReadAfterDestroyedError, 284 | ReadAfterReleasedError, 285 | }; 286 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "lib": ["es2018"], 4 | "target": "es2018", 5 | "module": "es2020", 6 | "outDir": "./dist", 7 | "declaration": true, 8 | "declarationDir": "./dist", 9 | "moduleResolution": "node", 10 | "rootDir": "./src", 11 | "strict": true, 12 | "esModuleInterop": true, 13 | "skipLibCheck": false, 14 | "sourceMap": true 15 | }, 16 | "include": ["./src/**/*"] 17 | } 18 | --------------------------------------------------------------------------------