├── .github ├── FUNDING.yml └── workflows │ └── publish.yml ├── .gitignore ├── LICENSE ├── README.md ├── brotli └── mod.ts ├── changelog.md ├── deflate ├── deflate_raw.ts ├── inflate_raw.ts └── mod.ts ├── deno.json ├── deno.lock ├── deps.ts ├── gzip ├── gzip.ts ├── gzip_file.ts ├── gzip_stream.ts ├── mod.ts ├── writer_gunzip.ts └── writer_gzip.ts ├── interface.ts ├── mod.ts ├── tar └── mod.ts ├── tar_archive └── mod.ts ├── test ├── brotli.test.ts ├── deflate.test.ts ├── deno.tar ├── dir │ ├── root.txt │ └── subdir │ │ └── subfile.txt ├── tar.test.ts └── zip.test.ts ├── tgz └── mod.ts ├── tgz_archive └── mod.ts ├── utils └── uint8.ts ├── zip └── mod.ts ├── zip_archive └── archive.ts └── zlib ├── deflate.ts ├── inflate.ts ├── mod.ts └── zlib ├── adler32.ts ├── crc32.ts ├── deflate.ts ├── gzheader.ts ├── inffast.ts ├── inflate.ts ├── inftrees.ts ├── messages.ts ├── status.ts ├── trees.ts └── zstream.ts /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | 2 | # These are supported funding model platforms 3 | github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] 4 | patreon: # Replace with a single Patreon username 5 | open_collective: deno-library # Replace with a single Open Collective username 6 | ko_fi: # Replace with a single Ko-fi username 7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel 8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry 9 | liberapay: # Replace with a single Liberapay username 10 | issuehunt: # Replace with a single IssueHunt username 11 | lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry 12 | polar: # Replace with a single Polar username 13 | buy_me_a_coffee: # Replace with a single Buy Me a Coffee username 14 | thanks_dev: # Replace with a single thanks.dev username 15 | custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] 16 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish 2 | on: 3 | push: 4 | branches: 5 | - master 6 | 7 | jobs: 8 | publish: 9 | runs-on: ubuntu-latest 10 | 11 | permissions: 12 | contents: read 13 | id-token: write 14 | 15 | steps: 16 | - uses: actions/checkout@v4 17 | 18 | - name: Publish package 19 | run: npx jsr publish 20 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Deno Library 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # compress 2 | 3 | [![JSR Version](https://jsr.io/badges/@deno-library/compress)](https://jsr.io/@deno-library/compress) 4 | [![deno.land/x/compress](https://deno.land/badge/compress/version)](https://deno.land/x/compress) 5 | [![LICENSE](https://img.shields.io/badge/License-MIT-yellow.svg)](https://github.com/deno-library/compress/blob/main/LICENSE) 6 | 7 | Utilities to compress and uncompress for Deno! 8 | 9 | - [x] `tar` 10 | - [x] `deflate` 11 | - [x] `gzip` 12 | - [x] `tgz` 13 | - [x] `zip` 14 | - [x] `brotli` 15 | 16 | ## Changelog 17 | 18 | [changelog](./changelog.md) 19 | 20 | # Usage 21 | 22 | If you want to read and write files, you need the following [permissions](https://deno.land/manual/getting_started/permissions): 23 | 24 | > --allow-read --allow-write 25 | 26 | ## `tar` 27 | 28 | v0.5.5 29 | Use `@std/tar` instead of `@std/archive` due to deprecation 30 | If you want to use the old API, you can do it 31 | 32 | > import { tar } from "jsr:@deno-library/compress/tar_archive/mod.ts"; 33 | 34 | ### Definition 35 | 36 | ```ts 37 | // JSR 38 | import { tar } from "jsr:@deno-library/compress"; 39 | // or 40 | // import { tar } from "jsr:@deno-library/compress@0.5.5"; 41 | // or 42 | // import { tar } from "https://deno.land/x/compress@v0.5.5/mod.ts"; 43 | // or only import tar 44 | // import { tar } from "https://deno.land/x/compress@v0.5.5/tar/mod.ts"; 45 | export interface compressInterface { 46 | excludeSrc?: boolean; // does not contain the src directory 47 | debug?: boolean; // list the files and folders 48 | } 49 | export interface uncompressInterface { 50 | debug?: boolean; // list the files and folders 51 | } 52 | tar.compress(src, dest, options?: compressInterface): Promise; 53 | tar.uncompress(src, dest, options?: uncompressInterface): Promise; 54 | ``` 55 | 56 | ### Example 57 | 58 | ```ts 59 | // JSR 60 | import { tar } from "jsr:@deno-library/compress"; 61 | // or 62 | // import { tar } from "https://deno.land/x/compress@v0.5.5/mod.ts"; 63 | // compress folder 64 | await tar.compress("./test", "./test.tar"); 65 | // compress folder, exclude src directory 66 | await tar.compress("./test", "./test.tar", { excludeSrc: true }); 67 | // compress file 68 | await tar.compress("./test.txt", "./test.tar"); 69 | // uncompress 70 | await tar.uncompress("./test.tar", "./dest"); 71 | ``` 72 | 73 | ## `deflate` 74 | 75 | This library contains a pure TypeScript implementation of 76 | [deflate](https://en.wikipedia.org/wiki/Deflate), and you can 77 | use deflate on its own: 78 | 79 | ```ts 80 | // JSR 81 | import { 82 | deflate, 83 | /** Compress data using deflate, and do not append a zlib header. */ 84 | deflateRaw, 85 | inflate, 86 | inflateRaw, 87 | } from "jsr:@deno-library/compress"; 88 | // or 89 | // import { deflate, inflate, deflateRaw, inflateRaw } from "https://deno.land/x/compress@v0.5.5/mod.ts"; 90 | // or only import deflate, inflate, deflateRaw, inflateRaw 91 | // import { deflate, inflate, deflateRaw, inflateRaw } from "https://deno.land/x/compress@v0.5.5/zlib/mod.ts"; 92 | const str = "hello world!"; 93 | const bytes = new TextEncoder().encode(str); 94 | // with zlib header 95 | const compressed = deflate(bytes); 96 | const decompressed = inflate(compressed); 97 | // no zlib header 98 | const compressed = deflateRaw(bytes); 99 | const decompressed = inflateRaw(compressed); 100 | ``` 101 | 102 | ## `gzip` 103 | 104 | ### Definition 105 | 106 | ```ts 107 | interface GzipOptions { 108 | level: number; 109 | timestamp?: number; 110 | name?: string; 111 | } 112 | gzip(bytes: Uint8Array, options?:GzipOptions): Uint8Array; 113 | gunzip(bytes: Uint8Array): Uint8Array; 114 | gzipFile(src: string, dest: string): Promise; 115 | gunzipFile(src: string, dest: string): Promise; 116 | class GzipStream { 117 | compress(src: string, dest: string): Promise; 118 | uncompress(src: string, dest: string): Promise; 119 | on(event: "progress", listener: (percent: string) => void): this; 120 | } 121 | ``` 122 | 123 | ### Example 124 | 125 | Let's compress and uncompress a file. (`gzip` only supports compressing 126 | and decompressing a single file.) 127 | 128 | **stream mode**\ 129 | Useful for reading and writing large files. 130 | 131 | ```ts 132 | import { GzipStream } from "jsr:@deno-library/compress"; 133 | // or 134 | // import { GzipStream } from "https://deno.land/x/compress@v0.5.5/mod.ts"; 135 | // or only import GzipStream 136 | // import { GzipStream } from "https://deno.land/x/compress@v0.5.5/gzip/mod.ts"; 137 | const gzip = new GzipStream(); 138 | gzip.on("progress", (progress: string) => { 139 | console.log(progress); // 0.00% => 100.00% 140 | }); 141 | await gzip.compress("./big.mkv", "./big.mkv.gz"); 142 | await gzip.uncompress("./big.mkv.gz", "./big.mkv"); 143 | ``` 144 | 145 | **no stream mode**\ 146 | (This is loading all data into memory, so we can't get a `progress` event.) 147 | 148 | ```ts 149 | import { gunzipFile, gzipFile } from "jsr:@deno-library/compress"; 150 | // or 151 | // import { gunzipFile, gzipFile } from "https://deno.land/x/compress@v0.5.5/mod.ts"; 152 | // or only import gzipFile, gunzipFile 153 | // import { gzipFile, gunzipFile } from "https://deno.land/x/compress@v0.5.5/gzip/mod.ts"; 154 | await gzipFile("./deno.txt", "./deno.txt.gz"); 155 | await gunzipFile("./deno.txt.gz", "./deno.txt"); 156 | ``` 157 | 158 | **`gzip` a string or a byte array** 159 | 160 | > This is a pure TypeScript implementation, almost as fast as a Rust 161 | > implementation. 162 | 163 | ```ts 164 | import { gunzip, gzip } from "jsr:@deno-library/compress"; 165 | // or 166 | // import { gunzip, gzip } from "https://deno.land/x/compress@v0.5.5/mod.ts"; 167 | // or only import gzip, gunzip 168 | // import { gzip, gunzip } from "https://deno.land/x/compress@v0.5.5/zlib/mod.ts"; 169 | // gzip 170 | const bytes = new TextEncoder().encode("hello"); 171 | const compressed = gzip(bytes); 172 | // gunzip 173 | const decompressed = gunzip(compressed); 174 | ``` 175 | 176 | ## `tgz` 177 | 178 | v0.5.5 179 | Use `@std/tar` instead of `@std/archive` due to deprecation 180 | If you want to use the old API, you can do it 181 | 182 | > import { tgz } from "jsr:@deno-library/compress/tgz_archive/mod.ts"; 183 | 184 | ### Definition 185 | 186 | ```ts 187 | import { tgz } from "jsr:@deno-library/compress"; 188 | // or 189 | // import { tgz } from "https://deno.land/x/compress@v0.5.5/mod.ts"; 190 | // or only import tgz 191 | // import { tgz } from "https://deno.land/x/compress@v0.5.5/tgz/mod.ts"; 192 | export interface compressInterface { 193 | excludeSrc?: boolean; // does not contain the src directory 194 | debug?: boolean; // list the files and folders 195 | } 196 | export interface uncompressInterface { 197 | debug?: boolean; // list the files and folders 198 | } 199 | tgz.compress(src, dest, options?: compressInterface): Promise; 200 | tgz.uncompress(src, dest, options?: uncompressInterface): Promise; 201 | ``` 202 | 203 | ### Example 204 | 205 | ```ts 206 | import { tgz } from "jsr:@deno-library/compress"; 207 | // or 208 | // import { tgz } from "https://deno.land/x/compress@v0.5.5/mod.ts"; 209 | // compress folder 210 | await tgz.compress("./test", "./test.tar.gz"); 211 | // compress folder, exclude src directory 212 | await tgz.compress("./test", "./test.tar.gz", { excludeSrc: true }); 213 | // compress file 214 | await tgz.compress("./test.txt", "./test.tar.gz"); 215 | // uncompress 216 | await tgz.uncompress("./test.tar.gz", "./dest"); 217 | ``` 218 | 219 | ## `zip` 220 | 221 | ### Definition 222 | 223 | ```ts 224 | import { zip } from "jsr:@deno-library/compress"; 225 | // or 226 | // import { zip } from "https://deno.land/x/compress@v0.5.5/mod.ts"; 227 | // or only import zip 228 | // import { zip } from "https://deno.land/x/compress@v0.5.5/zip/mod.ts"; 229 | 230 | export interface compressInterface { 231 | excludeSrc?: boolean; // does not contain the src directory 232 | debug?: boolean; // list the files and folders 233 | } 234 | export interface uncompressInterface { 235 | debug?: boolean; // list the files and folders 236 | } 237 | zip.compress(src, dest, options?: compressInterface): Promise; 238 | zip.uncompress(src, dest, options?: uncompressInterface): Promise; 239 | ``` 240 | 241 | ### Example 242 | 243 | ```ts 244 | import { zip } from "jsr:@deno-library/compress"; 245 | // or 246 | // import { zip } from "https://deno.land/x/compress@v0.5.5/mod.ts"; 247 | // compress folder 248 | await zip.compress("./test", "./test.tar.gz"); 249 | // compress folder, exclude src directory 250 | await zip.compress("./test", "./test.tar.gz", { excludeSrc: true }); 251 | // compress file 252 | await zip.compress("./test.txt", "./test.tar.gz"); 253 | // uncompress 254 | await zip.uncompress("./test.tar.gz", "./dest"); 255 | ``` 256 | 257 | ## `brotli` 258 | 259 | ### Example 260 | 261 | ```ts 262 | import { brotli } from "jsr:@deno-library/compress"; 263 | const compressedBuffer = await brotli.compress(inputString); 264 | //const compressedBuffer = brotli.compressSync(inputString); 265 | 266 | // deno version >= v2.1.8 / 2025.01.30 267 | // https://github.com/denoland/deno/pull/27815 268 | const uncompressedBuffer = await brotli.uncompress(compressedBuffer); 269 | // const uncompressedBuffer = brotli.uncompressSync(compressedBuffer); 270 | ``` 271 | 272 | [brotli example file](test/brotli.test.ts) 273 | 274 | # test 275 | 276 | ```ts 277 | deno test --allow-read --allow-write 278 | ``` 279 | -------------------------------------------------------------------------------- /brotli/mod.ts: -------------------------------------------------------------------------------- 1 | import zlib from "node:zlib"; 2 | import { promisify } from "node:util"; 3 | import type { Buffer } from "node:buffer"; 4 | 5 | const compress: ( 6 | buffer: zlib.InputType, 7 | options?: zlib.BrotliOptions, 8 | ) => Promise = promisify(zlib.brotliCompress); 9 | const compressSync: ( 10 | buf: zlib.InputType, 11 | options?: zlib.BrotliOptions, 12 | ) => Buffer = zlib.brotliCompressSync; 13 | const uncompress: ( 14 | buffer: zlib.InputType, 15 | options?: zlib.BrotliOptions, 16 | ) => Promise = promisify(zlib.brotliDecompress); 17 | const uncompressSync: ( 18 | buf: zlib.InputType, 19 | options?: zlib.BrotliOptions, 20 | ) => Buffer = zlib.brotliDecompressSync; 21 | export { compress, compressSync, uncompress, uncompressSync }; 22 | -------------------------------------------------------------------------------- /changelog.md: -------------------------------------------------------------------------------- 1 | ## Changelog 2 | 3 | ### v0.5.6 - 2025.03.26 4 | 5 | - feat: add Brotli support(#21) 6 | 7 | ### v0.5.5 - 2024.11.20 8 | 9 | - fix: fix exists usage error 10 | 11 | ### v0.5.4 - 2024.11.20 12 | 13 | - fix: zip.compress does not close the dest file if src does not exist[#19] 14 | 15 | ### v0.5.3 - 2024.11.20 16 | 17 | - docs: update readme 18 | 19 | ### v0.5.2 - 2024.11.6 20 | 21 | - docs: Add symbol documentation to existing interfaces 22 | 23 | ### v0.5.1 - 2024.11.6 24 | 25 | - refactor(zip): Use `zip-js` instead of using a terminal 26 | - refactor(gzip): Optimize code with the keyword `using` 27 | 28 | ### v0.5.0 - 2024.11.5 29 | 30 | - refactor: Use `@std/tar` instead of `@std/archive` due to deprecation 31 | - refactor: use Deno API for gzip file handling 32 | 33 | ### v0.4.9 - 2024.11.5 34 | 35 | - Add symbol documentation 36 | 37 | ### v0.4.8 - 2024.11.5 38 | 39 | - Added support for zip[#14] 40 | 41 | ### v0.4.7 - 2024.11.5 42 | 43 | - Added support for JSR 44 | - Replace 'any' types with specific types 45 | - Migrate from Deno 1.x to 2.x 46 | - fix[#18] 47 | -------------------------------------------------------------------------------- /deflate/inflate_raw.ts: -------------------------------------------------------------------------------- 1 | /* constant parameters */ 2 | const WSIZE = 32768; // Sliding Window size 3 | const STORED_BLOCK = 0; 4 | const STATIC_TREES = 1; 5 | const DYN_TREES = 2; 6 | /* for inflate */ 7 | const lbits = 9; // bits in base literal/length lookup table 8 | const dbits = 6; // bits in base distance lookup table 9 | /* variables (inflate) */ 10 | let slide: number[]; 11 | 12 | let wp: number; // current position in slide 13 | let fixed_tl: null | HuftList = null; // inflate static 14 | let fixed_td: null | HuftList; // inflate static 15 | let fixed_bl: number; // inflate static 16 | let fixed_bd: number; // inflate static 17 | let bit_buf: number; // bit buffer 18 | let bit_len: number; // bits in bit buffer 19 | let method: number; 20 | let eof: boolean; 21 | let copy_leng: number; 22 | let copy_dist: number; 23 | let tl: HuftList | null; // literal length decoder table 24 | let td: HuftList | null; // literal distance decoder table 25 | let bl: number; // number of bits decoded by tl 26 | let bd: number; // number of bits decoded by td 27 | let inflate_data: Uint8Array; 28 | let inflate_pos: number; 29 | /* constant tables (inflate) */ 30 | const MASK_BITS = [ 31 | 0x0000, 32 | 0x0001, 33 | 0x0003, 34 | 0x0007, 35 | 0x000f, 36 | 0x001f, 37 | 0x003f, 38 | 0x007f, 39 | 0x00ff, 40 | 0x01ff, 41 | 0x03ff, 42 | 0x07ff, 43 | 0x0fff, 44 | 0x1fff, 45 | 0x3fff, 46 | 0x7fff, 47 | 0xffff, 48 | ]; 49 | // Tables for deflate from PKZIP's appnote.txt. 50 | // Copy lengths for literal codes 257..285 51 | const cplens = [ 52 | 3, 53 | 4, 54 | 5, 55 | 6, 56 | 7, 57 | 8, 58 | 9, 59 | 10, 60 | 11, 61 | 13, 62 | 15, 63 | 17, 64 | 19, 65 | 23, 66 | 27, 67 | 31, 68 | 35, 69 | 43, 70 | 51, 71 | 59, 72 | 67, 73 | 83, 74 | 99, 75 | 115, 76 | 131, 77 | 163, 78 | 195, 79 | 227, 80 | 258, 81 | 0, 82 | 0, 83 | ]; 84 | /* note: see note #13 above about the 258 in this list. */ 85 | // Extra bits for literal codes 257..285 86 | const cplext = [ 87 | 0, 88 | 0, 89 | 0, 90 | 0, 91 | 0, 92 | 0, 93 | 0, 94 | 0, 95 | 1, 96 | 1, 97 | 1, 98 | 1, 99 | 2, 100 | 2, 101 | 2, 102 | 2, 103 | 3, 104 | 3, 105 | 3, 106 | 3, 107 | 4, 108 | 4, 109 | 4, 110 | 4, 111 | 5, 112 | 5, 113 | 5, 114 | 5, 115 | 0, 116 | 99, 117 | 99, // 99==invalid 118 | ]; 119 | // Copy offsets for distance codes 0..29 120 | const cpdist = [ 121 | 1, 122 | 2, 123 | 3, 124 | 4, 125 | 5, 126 | 7, 127 | 9, 128 | 13, 129 | 17, 130 | 25, 131 | 33, 132 | 49, 133 | 65, 134 | 97, 135 | 129, 136 | 193, 137 | 257, 138 | 385, 139 | 513, 140 | 769, 141 | 1025, 142 | 1537, 143 | 2049, 144 | 3073, 145 | 4097, 146 | 6145, 147 | 8193, 148 | 12289, 149 | 16385, 150 | 24577, 151 | ]; 152 | // Extra bits for distance codes 153 | const cpdext = [ 154 | 0, 155 | 0, 156 | 0, 157 | 0, 158 | 1, 159 | 1, 160 | 2, 161 | 2, 162 | 3, 163 | 3, 164 | 4, 165 | 4, 166 | 5, 167 | 5, 168 | 6, 169 | 6, 170 | 7, 171 | 7, 172 | 8, 173 | 8, 174 | 9, 175 | 9, 176 | 10, 177 | 10, 178 | 11, 179 | 11, 180 | 12, 181 | 12, 182 | 13, 183 | 13, 184 | ]; 185 | // Order of the bit length code lengths 186 | const border = [ 187 | 16, 188 | 17, 189 | 18, 190 | 0, 191 | 8, 192 | 7, 193 | 9, 194 | 6, 195 | 10, 196 | 5, 197 | 11, 198 | 4, 199 | 12, 200 | 3, 201 | 13, 202 | 2, 203 | 14, 204 | 1, 205 | 15, 206 | ]; 207 | /* objects (inflate) */ 208 | class HuftList { 209 | next: HuftList | null = null; 210 | list: HuftNode[] | null = null; 211 | } 212 | 213 | class HuftNode { 214 | e: number = 0; // number of extra bits or operation 215 | b: number = 0; // number of bits in this code or subcode 216 | 217 | // union 218 | n: number = 0; // literal, length base, or distance base 219 | t: HuftNode[] | null = null; // (HuftNode) pointer to next level of table 220 | } 221 | 222 | class HuftBuild { 223 | BMAX = 16; // maximum bit length of any code 224 | N_MAX = 288; // maximum number of codes in any set 225 | status = 0; // 0: success, 1: incomplete table, 2: bad input 226 | root: HuftList | null = null; // (HuftList) starting table 227 | m = 0; // maximum lookup bits, returns actual 228 | /* 229 | * @param b- code lengths in bits (all assumed <= BMAX) 230 | * @param n- number of codes (assumed <= N_MAX) 231 | * @param s- number of simple-valued codes (0..s-1) 232 | * @param d- list of base values for non-simple codes 233 | * @param e- list of extra bits for non-simple codes 234 | * @param mm- maximum lookup bits 235 | */ 236 | constructor( 237 | b: number[], 238 | n: number, 239 | s: number, 240 | d: number[] | null, 241 | e: number[] | null, 242 | mm: number, 243 | ) { 244 | /* Given a list of code lengths and a maximum table size, make a set of 245 | tables to decode that set of codes. Return zero on success, one if 246 | the given code set is incomplete (the tables are still built in this 247 | case), two if the input is invalid (all zero length codes or an 248 | oversubscribed set of lengths), and three if not enough memory. 249 | The code with value 256 is special, and the tables are constructed 250 | so that no bits beyond that code are fetched when that code is 251 | decoded. */ 252 | let a; // counter for codes of length k 253 | const c = []; 254 | let f; // i repeats in table every f entries 255 | let h; // table level 256 | let i; // counter, current code 257 | let j; // counter 258 | let k; // number of bits in current code 259 | const lx = []; 260 | let p; // pointer into c[], b[], or v[] 261 | let pidx; // index of p 262 | let q; // (HuftNode) points to current table 263 | const r = new HuftNode(); // table entry for structure assignment 264 | const u: null[] | HuftNode[][] = []; 265 | const v = []; 266 | let w; 267 | const x = []; 268 | let xp; // pointer into x or c 269 | let y; // number of dummy codes added 270 | let z; // number of entries in current table 271 | let o; 272 | let tail: HuftList | null; // (HuftList) 273 | 274 | tail = this.root = null; 275 | 276 | // bit length count table 277 | for (i = 0; i < this.BMAX + 1; i++) { 278 | c[i] = 0; 279 | } 280 | // stack of bits per table 281 | for (i = 0; i < this.BMAX + 1; i++) { 282 | lx[i] = 0; 283 | } 284 | // HuftNode[BMAX][] table stack 285 | for (i = 0; i < this.BMAX; i++) { 286 | u[i] = null; 287 | } 288 | // values in order of bit length 289 | for (i = 0; i < this.N_MAX; i++) { 290 | v[i] = 0; 291 | } 292 | // bit offsets, then code stack 293 | for (i = 0; i < this.BMAX + 1; i++) { 294 | x[i] = 0; 295 | } 296 | 297 | // Generate counts for each bit length 298 | // length of EOB code (value 256) 299 | const el = n > 256 ? b[256] : this.BMAX; // set length of EOB code, if any 300 | p = b; 301 | pidx = 0; 302 | i = n; 303 | do { 304 | c[p[pidx]]++; // assume all entries <= BMAX 305 | pidx++; 306 | } while (--i > 0); 307 | if (c[0] === n) { // null input--all zero length codes 308 | this.root = null; 309 | this.m = 0; 310 | this.status = 0; 311 | return; 312 | } 313 | 314 | // Find minimum and maximum length, bound *m by those 315 | for (j = 1; j <= this.BMAX; j++) { 316 | if (c[j] !== 0) { 317 | break; 318 | } 319 | } 320 | k = j; // minimum code length 321 | if (mm < j) { 322 | mm = j; 323 | } 324 | for (i = this.BMAX; i !== 0; i--) { 325 | if (c[i] !== 0) { 326 | break; 327 | } 328 | } 329 | const g = i; // maximum code length 330 | if (mm > i) { 331 | mm = i; 332 | } 333 | 334 | // Adjust last length count to fill out codes, if needed 335 | for (y = 1 << j; j < i; j++, y <<= 1) { 336 | if ((y -= c[j]) < 0) { 337 | this.status = 2; // bad input: more codes than bits 338 | this.m = mm; 339 | return; 340 | } 341 | } 342 | if ((y -= c[i]) < 0) { 343 | this.status = 2; 344 | this.m = mm; 345 | return; 346 | } 347 | c[i] += y; 348 | 349 | // Generate starting offsets into the value table for each length 350 | x[1] = j = 0; 351 | p = c; 352 | pidx = 1; 353 | xp = 2; 354 | while (--i > 0) { // note that i == g from above 355 | x[xp++] = (j += p[pidx++]); 356 | } 357 | 358 | // Make a table of values in order of bit lengths 359 | p = b; 360 | pidx = 0; 361 | i = 0; 362 | do { 363 | if ((j = p[pidx++]) !== 0) { 364 | v[x[j]++] = i; 365 | } 366 | } while (++i < n); 367 | n = x[g]; // set n to length of v 368 | 369 | // Generate the Huffman codes and for each, make the table entries 370 | x[0] = i = 0; // first Huffman code is zero 371 | p = v; 372 | pidx = 0; // grab values in bit order 373 | h = -1; // no tables yet--level -1 374 | w = lx[0] = 0; // no bits decoded yet 375 | q = null; // ditto 376 | z = 0; // ditto 377 | 378 | // go through the bit lengths (k already is bits in shortest code) 379 | for (null; k <= g; k++) { 380 | a = c[k]; 381 | while (a-- > 0) { 382 | // here i is the Huffman code of length k bits for value p[pidx] 383 | // make tables up to required level 384 | while (k > w + lx[1 + h]) { 385 | w += lx[1 + h]; // add bits already decoded 386 | h++; 387 | 388 | // compute minimum size table less than or equal to *m bits 389 | z = (z = g - w) > mm ? mm : z; // upper limit 390 | if ((f = 1 << (j = k - w)) > a + 1) { // try a k-w bit table 391 | // too few codes for k-w bit table 392 | f -= a + 1; // deduct codes from patterns left 393 | xp = k; 394 | while (++j < z) { // try smaller tables up to z bits 395 | if ((f <<= 1) <= c[++xp]) { 396 | break; // enough codes to use up j bits 397 | } 398 | f -= c[xp]; // else deduct codes from patterns 399 | } 400 | } 401 | if (w + j > el && w < el) { 402 | j = el - w; // make EOB code end at table 403 | } 404 | z = 1 << j; // table entries for j-bit table 405 | lx[1 + h] = j; // set table size in stack 406 | 407 | // allocate and link in new table 408 | q = []; 409 | for (o = 0; o < z; o++) { 410 | q[o] = new HuftNode(); 411 | } 412 | 413 | if (!tail) { 414 | tail = this.root = new HuftList(); 415 | } else { 416 | tail = new HuftList(); 417 | tail.next = new HuftList(); 418 | } 419 | tail.next = null; 420 | tail.list = q; 421 | u[h] = q; // table starts after link 422 | 423 | /* connect to last table, if there is one */ 424 | if (h > 0) { 425 | x[h] = i; // save pattern for backing up 426 | r.b = lx[h]; // bits to dump before this table 427 | r.e = 16 + j; // bits in this table 428 | r.t = q; // pointer to this table 429 | j = (i & ((1 << w) - 1)) >> (w - lx[h]); 430 | const tmp = u[h - 1]; 431 | if (tmp) { 432 | tmp[j].e = r.e; 433 | tmp[j].b = r.b; 434 | tmp[j].n = r.n; 435 | tmp[j].t = r.t; 436 | } 437 | } 438 | } 439 | 440 | // set up table entry in r 441 | r.b = k - w; 442 | if (pidx >= n) { 443 | r.e = 99; // out of values--invalid code 444 | } else if (p[pidx] < s) { 445 | r.e = (p[pidx] < 256 ? 16 : 15); // 256 is end-of-block code 446 | r.n = p[pidx++]; // simple code is just the value 447 | } else { 448 | if (e) r.e = e[p[pidx] - s]; // non-simple--look up in lists 449 | if (d) r.n = d[p[pidx++] - s]; 450 | } 451 | 452 | // fill code-like entries with r // 453 | f = 1 << (k - w); 454 | for (j = i >> w; j < z; j += f) { 455 | if (q) { 456 | q[j].e = r.e; 457 | q[j].b = r.b; 458 | q[j].n = r.n; 459 | q[j].t = r.t; 460 | } 461 | } 462 | 463 | // backwards increment the k-bit code i 464 | for (j = 1 << (k - 1); (i & j) !== 0; j >>= 1) { 465 | i ^= j; 466 | } 467 | i ^= j; 468 | 469 | // backup over finished tables 470 | while ((i & ((1 << w) - 1)) !== x[h]) { 471 | w -= lx[h]; // don't need to update q 472 | h--; 473 | } 474 | } 475 | } 476 | 477 | /* return actual size of base table */ 478 | this.m = lx[1]; 479 | 480 | /* Return true (1) if we were given an incomplete table */ 481 | this.status = ((y !== 0 && g !== 1) ? 1 : 0); 482 | } 483 | } 484 | /* routines (inflate) */ 485 | 486 | function GET_BYTE() { 487 | if (inflate_data.length === inflate_pos) { 488 | return -1; 489 | } 490 | return inflate_data[inflate_pos++] & 0xff; 491 | } 492 | 493 | function NEEDBITS(n: number) { 494 | while (bit_len < n) { 495 | bit_buf |= GET_BYTE() << bit_len; 496 | bit_len += 8; 497 | } 498 | } 499 | 500 | function GETBITS(n: number) { 501 | return bit_buf & MASK_BITS[n]; 502 | } 503 | 504 | function DUMPBITS(n: number) { 505 | bit_buf >>= n; 506 | bit_len -= n; 507 | } 508 | 509 | function inflate_codes(buff: number[], off: number, size: number) { 510 | // inflate (decompress) the codes in a deflated (compressed) block. 511 | // Return an error code or zero if it all goes ok. 512 | let e; // table entry flag/number of extra bits 513 | let t; // (HuftNode) pointer to table entry 514 | let n; 515 | 516 | if (size === 0) { 517 | return 0; 518 | } 519 | 520 | // inflate the coded data 521 | n = 0; 522 | for (;;) { // do until end of block 523 | NEEDBITS(bl); 524 | if (!tl || !tl.list) break; 525 | t = tl.list[GETBITS(bl)]; 526 | e = t.e; 527 | while (e > 16) { 528 | if (e === 99) { 529 | return -1; 530 | } 531 | DUMPBITS(t.b); 532 | e -= 16; 533 | NEEDBITS(e); 534 | t = t.t![GETBITS(e)]; 535 | e = t.e; 536 | } 537 | DUMPBITS(t.b); 538 | 539 | if (e === 16) { // then it's a literal 540 | wp &= WSIZE - 1; 541 | buff[off + n++] = slide[wp++] = t.n; 542 | if (n === size) { 543 | return size; 544 | } 545 | continue; 546 | } 547 | 548 | // exit if end of block 549 | if (e === 15) { 550 | break; 551 | } 552 | 553 | // it's an EOB or a length 554 | 555 | // get length of block to copy 556 | NEEDBITS(e); 557 | copy_leng = t.n + GETBITS(e); 558 | DUMPBITS(e); 559 | 560 | // decode distance of block to copy 561 | NEEDBITS(bd); 562 | t = td!.list![GETBITS(bd)]; 563 | e = t.e; 564 | 565 | while (e > 16) { 566 | if (e === 99) { 567 | return -1; 568 | } 569 | DUMPBITS(t.b); 570 | e -= 16; 571 | NEEDBITS(e); 572 | t = t.t![GETBITS(e)]; 573 | e = t.e; 574 | } 575 | DUMPBITS(t.b); 576 | NEEDBITS(e); 577 | copy_dist = wp - t.n - GETBITS(e); 578 | DUMPBITS(e); 579 | 580 | // do the copy 581 | while (copy_leng > 0 && n < size) { 582 | copy_leng--; 583 | copy_dist &= WSIZE - 1; 584 | wp &= WSIZE - 1; 585 | buff[off + n++] = slide[wp++] = slide[copy_dist++]; 586 | } 587 | 588 | if (n === size) { 589 | return size; 590 | } 591 | } 592 | 593 | method = -1; // done 594 | return n; 595 | } 596 | 597 | function inflate_stored(buff: number[], off: number, size: number) { 598 | /* "decompress" an inflated type 0 (stored) block. */ 599 | let n; 600 | 601 | // go to byte boundary 602 | n = bit_len & 7; 603 | DUMPBITS(n); 604 | 605 | // get the length and its complement 606 | NEEDBITS(16); 607 | n = GETBITS(16); 608 | DUMPBITS(16); 609 | NEEDBITS(16); 610 | if (n !== ((~bit_buf) & 0xffff)) { 611 | return -1; // error in compressed data 612 | } 613 | DUMPBITS(16); 614 | 615 | // read and output the compressed data 616 | copy_leng = n; 617 | 618 | n = 0; 619 | while (copy_leng > 0 && n < size) { 620 | copy_leng--; 621 | wp &= WSIZE - 1; 622 | NEEDBITS(8); 623 | buff[off + n++] = slide[wp++] = GETBITS(8); 624 | DUMPBITS(8); 625 | } 626 | 627 | if (copy_leng === 0) { 628 | method = -1; // done 629 | } 630 | return n; 631 | } 632 | 633 | function inflate_fixed(buff: number[], off: number, size: number) { 634 | // decompress an inflated type 1 (fixed Huffman codes) block. We should 635 | // either replace this with a custom decoder, or at least precompute the 636 | // Huffman tables. 637 | 638 | // if first time, set up tables for fixed blocks 639 | if (!fixed_tl) { 640 | let i; // temporary variable 641 | const l = []; // 288 length list for huft_build (initialized below) 642 | let h; // HuftBuild 643 | 644 | // literal table 645 | for (i = 0; i < 144; i++) { 646 | l[i] = 8; 647 | } 648 | for (null; i < 256; i++) { 649 | l[i] = 9; 650 | } 651 | for (null; i < 280; i++) { 652 | l[i] = 7; 653 | } 654 | for (null; i < 288; i++) { // make a complete, but wrong code set 655 | l[i] = 8; 656 | } 657 | fixed_bl = 7; 658 | 659 | h = new HuftBuild(l, 288, 257, cplens, cplext, fixed_bl); 660 | if (h.status !== 0) { 661 | console.error("HufBuild error: " + h.status); 662 | return -1; 663 | } 664 | fixed_tl = h.root; 665 | fixed_bl = h.m; 666 | 667 | // distance table 668 | for (i = 0; i < 30; i++) { // make an incomplete code set 669 | l[i] = 5; 670 | } 671 | fixed_bd = 5; 672 | 673 | h = new HuftBuild(l, 30, 0, cpdist, cpdext, fixed_bd); 674 | if (h.status > 1) { 675 | fixed_tl = null; 676 | console.error("HufBuild error: " + h.status); 677 | return -1; 678 | } 679 | fixed_td = h.root; 680 | fixed_bd = h.m; 681 | } 682 | 683 | tl = fixed_tl; 684 | td = fixed_td; 685 | bl = fixed_bl; 686 | bd = fixed_bd; 687 | return inflate_codes(buff, off, size); 688 | } 689 | 690 | function inflate_dynamic(buff: number[], off: number, size: number) { 691 | // decompress an inflated type 2 (dynamic Huffman codes) block. 692 | let i; // temporary variables 693 | let j; 694 | let l; // last length 695 | let t; // (HuftNode) literal/length code table 696 | const ll = []; 697 | let h; // (HuftBuild) 698 | 699 | // literal/length and distance code lengths 700 | for (i = 0; i < 286 + 30; i++) { 701 | ll[i] = 0; 702 | } 703 | 704 | // read in table lengths 705 | NEEDBITS(5); 706 | const nl = 257 + GETBITS(5); // number of literal/length codes 707 | DUMPBITS(5); 708 | NEEDBITS(5); 709 | const nd = 1 + GETBITS(5); // number of distance codes 710 | DUMPBITS(5); 711 | NEEDBITS(4); 712 | const nb = 4 + GETBITS(4); // number of bit length codes 713 | DUMPBITS(4); 714 | if (nl > 286 || nd > 30) { 715 | return -1; // bad lengths 716 | } 717 | 718 | // read in bit-length-code lengths 719 | for (j = 0; j < nb; j++) { 720 | NEEDBITS(3); 721 | ll[border[j]] = GETBITS(3); 722 | DUMPBITS(3); 723 | } 724 | for (null; j < 19; j++) { 725 | ll[border[j]] = 0; 726 | } 727 | 728 | // build decoding table for trees--single level, 7 bit lookup 729 | bl = 7; 730 | h = new HuftBuild(ll, 19, 19, null, null, bl); 731 | if (h.status !== 0) { 732 | return -1; // incomplete code set 733 | } 734 | 735 | tl = h.root; 736 | bl = h.m; 737 | 738 | // read in literal and distance code lengths 739 | const n = nl + nd; // number of lengths to get 740 | i = l = 0; 741 | while (i < n) { 742 | NEEDBITS(bl); 743 | t = tl!.list![GETBITS(bl)]; 744 | j = t.b; 745 | DUMPBITS(j); 746 | j = t.n; 747 | if (j < 16) { // length of code in bits (0..15) 748 | ll[i++] = l = j; // save last length in l 749 | } else if (j === 16) { // repeat last length 3 to 6 times 750 | NEEDBITS(2); 751 | j = 3 + GETBITS(2); 752 | DUMPBITS(2); 753 | if (i + j > n) { 754 | return -1; 755 | } 756 | while (j-- > 0) { 757 | ll[i++] = l; 758 | } 759 | } else if (j === 17) { // 3 to 10 zero length codes 760 | NEEDBITS(3); 761 | j = 3 + GETBITS(3); 762 | DUMPBITS(3); 763 | if (i + j > n) { 764 | return -1; 765 | } 766 | while (j-- > 0) { 767 | ll[i++] = 0; 768 | } 769 | l = 0; 770 | } else { // j === 18: 11 to 138 zero length codes 771 | NEEDBITS(7); 772 | j = 11 + GETBITS(7); 773 | DUMPBITS(7); 774 | if (i + j > n) { 775 | return -1; 776 | } 777 | while (j-- > 0) { 778 | ll[i++] = 0; 779 | } 780 | l = 0; 781 | } 782 | } 783 | 784 | // build the decoding tables for literal/length and distance codes 785 | bl = lbits; 786 | h = new HuftBuild(ll, nl, 257, cplens, cplext, bl); 787 | if (bl === 0) { // no literals or lengths 788 | h.status = 1; 789 | } 790 | if (h.status !== 0) { 791 | if (h.status !== 1) { 792 | return -1; // incomplete code set 793 | } 794 | // **incomplete literal tree** 795 | } 796 | tl = h.root; 797 | bl = h.m; 798 | 799 | for (i = 0; i < nd; i++) { 800 | ll[i] = ll[i + nl]; 801 | } 802 | bd = dbits; 803 | h = new HuftBuild(ll, nd, 0, cpdist, cpdext, bd); 804 | td = h.root; 805 | bd = h.m; 806 | 807 | if (bd === 0 && nl > 257) { // lengths but no distances 808 | // **incomplete distance tree** 809 | return -1; 810 | } 811 | /* 812 | if (h.status === 1) { 813 | // **incomplete distance tree** 814 | } 815 | */ 816 | if (h.status !== 0) { 817 | return -1; 818 | } 819 | 820 | // decompress until an end-of-block code 821 | return inflate_codes(buff, off, size); 822 | } 823 | 824 | function inflate_start() { 825 | if (!slide) { 826 | slide = []; // new Array(2 * WSIZE); // slide.length is never called 827 | } 828 | wp = 0; 829 | bit_buf = 0; 830 | bit_len = 0; 831 | method = -1; 832 | eof = false; 833 | copy_leng = copy_dist = 0; 834 | tl = null; 835 | } 836 | 837 | function inflate_internal(buff: number[], off: number, size: number) { 838 | // decompress an inflated entry 839 | let n, i; 840 | 841 | n = 0; 842 | while (n < size) { 843 | if (eof && method === -1) { 844 | return n; 845 | } 846 | 847 | if (copy_leng > 0) { 848 | if (method !== STORED_BLOCK) { 849 | // STATIC_TREES or DYN_TREES 850 | while (copy_leng > 0 && n < size) { 851 | copy_leng--; 852 | copy_dist &= WSIZE - 1; 853 | wp &= WSIZE - 1; 854 | buff[off + n++] = slide[wp++] = slide[copy_dist++]; 855 | } 856 | } else { 857 | while (copy_leng > 0 && n < size) { 858 | copy_leng--; 859 | wp &= WSIZE - 1; 860 | NEEDBITS(8); 861 | buff[off + n++] = slide[wp++] = GETBITS(8); 862 | DUMPBITS(8); 863 | } 864 | if (copy_leng === 0) { 865 | method = -1; // done 866 | } 867 | } 868 | if (n === size) { 869 | return n; 870 | } 871 | } 872 | 873 | if (method === -1) { 874 | if (eof) { 875 | break; 876 | } 877 | 878 | // read in last block bit 879 | NEEDBITS(1); 880 | if (GETBITS(1) !== 0) { 881 | eof = true; 882 | } 883 | DUMPBITS(1); 884 | 885 | // read in block type 886 | NEEDBITS(2); 887 | method = GETBITS(2); 888 | DUMPBITS(2); 889 | tl = null; 890 | copy_leng = 0; 891 | } 892 | 893 | switch (method) { 894 | case STORED_BLOCK: 895 | i = inflate_stored(buff, off + n, size - n); 896 | break; 897 | 898 | case STATIC_TREES: 899 | if (tl) { 900 | i = inflate_codes(buff, off + n, size - n); 901 | } else { 902 | i = inflate_fixed(buff, off + n, size - n); 903 | } 904 | break; 905 | 906 | case DYN_TREES: 907 | if (tl) { 908 | i = inflate_codes(buff, off + n, size - n); 909 | } else { 910 | i = inflate_dynamic(buff, off + n, size - n); 911 | } 912 | break; 913 | 914 | default: // error 915 | i = -1; 916 | break; 917 | } 918 | 919 | if (i === -1) { 920 | if (eof) { 921 | return 0; 922 | } 923 | return -1; 924 | } 925 | n += i; 926 | } 927 | return n; 928 | } 929 | 930 | /** 931 | * Decompresses a raw Uint8Array data. 932 | * @param arr - The Uint8Array data to decompress. 933 | * @returns The decompressed Uint8Array data. 934 | */ 935 | export function inflateRaw(arr: Uint8Array): Uint8Array { 936 | let i; 937 | const buff: number[] = []; 938 | 939 | inflate_start(); 940 | inflate_data = arr; 941 | inflate_pos = 0; 942 | 943 | do { 944 | i = inflate_internal(buff, buff.length, 1024); 945 | } while (i > 0); 946 | 947 | return new Uint8Array(buff); 948 | } 949 | -------------------------------------------------------------------------------- /deflate/mod.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Compresses the input data using the DEFLATE algorithm. 3 | * 4 | * @param bytes - The input data as a Uint8Array to be compressed. 5 | * @param level - The compression level (default is DEFAULT_LEVEL). 6 | * @returns A Uint8Array containing the compressed data. 7 | */ 8 | export { deflateRaw } from "./deflate_raw.ts"; 9 | /** 10 | * Decompresses a raw Uint8Array data. 11 | * @param arr - The Uint8Array data to decompress. 12 | * @returns The decompressed Uint8Array data. 13 | */ 14 | export { inflateRaw } from "./inflate_raw.ts"; 15 | -------------------------------------------------------------------------------- /deno.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@deno-library/compress", 3 | "version": "0.5.6", 4 | "exports": "./mod.ts" 5 | } -------------------------------------------------------------------------------- /deno.lock: -------------------------------------------------------------------------------- 1 | { 2 | "version": "4", 3 | "specifiers": { 4 | "jsr:@deno-library/crc32@1.0.2": "1.0.2", 5 | "jsr:@std/archive@0.225.4": "0.225.4", 6 | "jsr:@std/assert@*": "1.0.6", 7 | "jsr:@std/bytes@^1.0.2": "1.0.3", 8 | "jsr:@std/fs@1.0.5": "1.0.5", 9 | "jsr:@std/internal@^1.0.4": "1.0.5", 10 | "jsr:@std/io@0.225.0": "0.225.0", 11 | "jsr:@std/io@~0.224.9": "0.224.9", 12 | "jsr:@std/path@1.0.8": "1.0.8", 13 | "jsr:@std/path@^1.0.7": "1.0.8", 14 | "jsr:@std/streams@^1.0.7": "1.0.8", 15 | "jsr:@std/tar@0.1.3": "0.1.3", 16 | "jsr:@std/testing@*": "1.0.4", 17 | "jsr:@zip-js/zip-js@*": "2.7.53", 18 | "jsr:@zip-js/zip-js@2.7.53": "2.7.53", 19 | "npm:@types/node@*": "22.5.4" 20 | }, 21 | "jsr": { 22 | "@deno-library/crc32@1.0.2": { 23 | "integrity": "d2061bfee30c87c97f285dfca0fdc4458e632dc072a33ecfc73ca5177a5a39a0" 24 | }, 25 | "@std/archive@0.225.4": { 26 | "integrity": "59fe5d1834cbb6a2a7913b102d41c11d51475328d5b843bea75b94a40b44a115", 27 | "dependencies": [ 28 | "jsr:@std/io@~0.224.9" 29 | ] 30 | }, 31 | "@std/assert@1.0.6": { 32 | "integrity": "1904c05806a25d94fe791d6d883b685c9e2dcd60e4f9fc30f4fc5cf010c72207", 33 | "dependencies": [ 34 | "jsr:@std/internal" 35 | ] 36 | }, 37 | "@std/bytes@1.0.3": { 38 | "integrity": "e5d5b9e685966314e4edb4be60dfc4bd7624a075bfd4ec8109252b4320f76452" 39 | }, 40 | "@std/fs@1.0.5": { 41 | "integrity": "41806ad6823d0b5f275f9849a2640d87e4ef67c51ee1b8fb02426f55e02fd44e", 42 | "dependencies": [ 43 | "jsr:@std/path@^1.0.7" 44 | ] 45 | }, 46 | "@std/internal@1.0.5": { 47 | "integrity": "54a546004f769c1ac9e025abd15a76b6671ddc9687e2313b67376125650dc7ba" 48 | }, 49 | "@std/io@0.224.9": { 50 | "integrity": "4414664b6926f665102e73c969cfda06d2c4c59bd5d0c603fd4f1b1c840d6ee3", 51 | "dependencies": [ 52 | "jsr:@std/bytes" 53 | ] 54 | }, 55 | "@std/io@0.225.0": { 56 | "integrity": "c1db7c5e5a231629b32d64b9a53139445b2ca640d828c26bf23e1c55f8c079b3", 57 | "dependencies": [ 58 | "jsr:@std/bytes" 59 | ] 60 | }, 61 | "@std/path@1.0.8": { 62 | "integrity": "548fa456bb6a04d3c1a1e7477986b6cffbce95102d0bb447c67c4ee70e0364be" 63 | }, 64 | "@std/streams@1.0.8": { 65 | "integrity": "b41332d93d2cf6a82fe4ac2153b930adf1a859392931e2a19d9fabfb6f154fb3" 66 | }, 67 | "@std/tar@0.1.3": { 68 | "integrity": "531270fc707b37ab9b5f051aa4943e7b16b86905e0398a4ebe062983b0c93115", 69 | "dependencies": [ 70 | "jsr:@std/streams" 71 | ] 72 | }, 73 | "@std/testing@1.0.4": { 74 | "integrity": "ca1368d720b183f572d40c469bb9faf09643ddd77b54f8b44d36ae6b94940576" 75 | }, 76 | "@zip-js/zip-js@2.7.53": { 77 | "integrity": "acea5bd8e01feb3fe4c242cfbde7d33dd5e006549a4eb1d15283bc0c778ed672" 78 | } 79 | }, 80 | "npm": { 81 | "@types/node@22.5.4": { 82 | "integrity": "sha512-FDuKUJQm/ju9fT/SeX/6+gBzoPzlVCzfzmGkwKvRHQVxi4BntVbyIwf6a4Xn62mrvndLiml6z/UBXIdEVjQLXg==", 83 | "dependencies": [ 84 | "undici-types" 85 | ] 86 | }, 87 | "undici-types@6.19.8": { 88 | "integrity": "sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw==" 89 | } 90 | }, 91 | "redirects": { 92 | "https://deno.land/std/testing/asserts.ts": "https://deno.land/std@0.224.0/testing/asserts.ts" 93 | }, 94 | "remote": { 95 | "https://deno.land/std@0.129.0/fmt/colors.ts": "30455035d6d728394781c10755351742dd731e3db6771b1843f9b9e490104d37", 96 | "https://deno.land/std@0.129.0/testing/_diff.ts": "9d849cd6877694152e01775b2d93f9d6b7aef7e24bfe3bfafc4d7a1ac8e9f392", 97 | "https://deno.land/std@0.129.0/testing/asserts.ts": "0a95d9e8076dd3e7f0eeb605a67c148078b4b11f4abcd5eef115b0361b0736a2", 98 | "https://deno.land/std@0.147.0/_util/assert.ts": "e94f2eb37cebd7f199952e242c77654e43333c1ac4c5c700e929ea3aa5489f74", 99 | "https://deno.land/std@0.147.0/_util/os.ts": "3b4c6e27febd119d36a416d7a97bd3b0251b77c88942c8f16ee5953ea13e2e49", 100 | "https://deno.land/std@0.147.0/flags/mod.ts": "594472736e24b2f2afd3451cf7ccd58a21706ce91006478a544fdfa056c69697", 101 | "https://deno.land/std@0.147.0/path/_constants.ts": "df1db3ffa6dd6d1252cc9617e5d72165cd2483df90e93833e13580687b6083c3", 102 | "https://deno.land/std@0.147.0/path/_interface.ts": "ee3b431a336b80cf445441109d089b70d87d5e248f4f90ff906820889ecf8d09", 103 | "https://deno.land/std@0.147.0/path/_util.ts": "c1e9686d0164e29f7d880b2158971d805b6e0efc3110d0b3e24e4b8af2190d2b", 104 | "https://deno.land/std@0.147.0/path/common.ts": "bee563630abd2d97f99d83c96c2fa0cca7cee103e8cb4e7699ec4d5db7bd2633", 105 | "https://deno.land/std@0.147.0/path/glob.ts": "cb5255638de1048973c3e69e420c77dc04f75755524cb3b2e160fe9277d939ee", 106 | "https://deno.land/std@0.147.0/path/mod.ts": "4945b430b759b0b3d98f2a278542cbcf95e0ad2bd8eaaed3c67322b306b2b346", 107 | "https://deno.land/std@0.147.0/path/posix.ts": "c1f7afe274290ea0b51da07ee205653b2964bd74909a82deb07b69a6cc383aaa", 108 | "https://deno.land/std@0.147.0/path/separator.ts": "fe1816cb765a8068afb3e8f13ad272351c85cbc739af56dacfc7d93d710fe0f9", 109 | "https://deno.land/std@0.147.0/path/win32.ts": "bd7549042e37879c68ff2f8576a25950abbfca1d696d41d82c7bca0b7e6f452c", 110 | "https://deno.land/std@0.224.0/assert/_constants.ts": "a271e8ef5a573f1df8e822a6eb9d09df064ad66a4390f21b3e31f820a38e0975", 111 | "https://deno.land/std@0.224.0/assert/assert.ts": "09d30564c09de846855b7b071e62b5974b001bb72a4b797958fe0660e7849834", 112 | "https://deno.land/std@0.224.0/assert/assert_almost_equals.ts": "9e416114322012c9a21fa68e187637ce2d7df25bcbdbfd957cd639e65d3cf293", 113 | "https://deno.land/std@0.224.0/assert/assert_array_includes.ts": "14c5094471bc8e4a7895fc6aa5a184300d8a1879606574cb1cd715ef36a4a3c7", 114 | "https://deno.land/std@0.224.0/assert/assert_equals.ts": "3bbca947d85b9d374a108687b1a8ba3785a7850436b5a8930d81f34a32cb8c74", 115 | "https://deno.land/std@0.224.0/assert/assert_exists.ts": "43420cf7f956748ae6ed1230646567b3593cb7a36c5a5327269279c870c5ddfd", 116 | "https://deno.land/std@0.224.0/assert/assert_false.ts": "3e9be8e33275db00d952e9acb0cd29481a44fa0a4af6d37239ff58d79e8edeff", 117 | "https://deno.land/std@0.224.0/assert/assert_greater.ts": "5e57b201fd51b64ced36c828e3dfd773412c1a6120c1a5a99066c9b261974e46", 118 | "https://deno.land/std@0.224.0/assert/assert_greater_or_equal.ts": "9870030f997a08361b6f63400273c2fb1856f5db86c0c3852aab2a002e425c5b", 119 | "https://deno.land/std@0.224.0/assert/assert_instance_of.ts": "e22343c1fdcacfaea8f37784ad782683ec1cf599ae9b1b618954e9c22f376f2c", 120 | "https://deno.land/std@0.224.0/assert/assert_is_error.ts": "f856b3bc978a7aa6a601f3fec6603491ab6255118afa6baa84b04426dd3cc491", 121 | "https://deno.land/std@0.224.0/assert/assert_less.ts": "60b61e13a1982865a72726a5fa86c24fad7eb27c3c08b13883fb68882b307f68", 122 | "https://deno.land/std@0.224.0/assert/assert_less_or_equal.ts": "d2c84e17faba4afe085e6c9123a63395accf4f9e00150db899c46e67420e0ec3", 123 | "https://deno.land/std@0.224.0/assert/assert_match.ts": "ace1710dd3b2811c391946954234b5da910c5665aed817943d086d4d4871a8b7", 124 | "https://deno.land/std@0.224.0/assert/assert_not_equals.ts": "78d45dd46133d76ce624b2c6c09392f6110f0df9b73f911d20208a68dee2ef29", 125 | "https://deno.land/std@0.224.0/assert/assert_not_instance_of.ts": "3434a669b4d20cdcc5359779301a0588f941ffdc2ad68803c31eabdb4890cf7a", 126 | "https://deno.land/std@0.224.0/assert/assert_not_match.ts": "df30417240aa2d35b1ea44df7e541991348a063d9ee823430e0b58079a72242a", 127 | "https://deno.land/std@0.224.0/assert/assert_not_strict_equals.ts": "37f73880bd672709373d6dc2c5f148691119bed161f3020fff3548a0496f71b8", 128 | "https://deno.land/std@0.224.0/assert/assert_object_match.ts": "411450fd194fdaabc0089ae68f916b545a49d7b7e6d0026e84a54c9e7eed2693", 129 | "https://deno.land/std@0.224.0/assert/assert_rejects.ts": "4bee1d6d565a5b623146a14668da8f9eb1f026a4f338bbf92b37e43e0aa53c31", 130 | "https://deno.land/std@0.224.0/assert/assert_strict_equals.ts": "b4f45f0fd2e54d9029171876bd0b42dd9ed0efd8f853ab92a3f50127acfa54f5", 131 | "https://deno.land/std@0.224.0/assert/assert_string_includes.ts": "496b9ecad84deab72c8718735373feb6cdaa071eb91a98206f6f3cb4285e71b8", 132 | "https://deno.land/std@0.224.0/assert/assert_throws.ts": "c6508b2879d465898dab2798009299867e67c570d7d34c90a2d235e4553906eb", 133 | "https://deno.land/std@0.224.0/assert/assertion_error.ts": "ba8752bd27ebc51f723702fac2f54d3e94447598f54264a6653d6413738a8917", 134 | "https://deno.land/std@0.224.0/assert/equal.ts": "bddf07bb5fc718e10bb72d5dc2c36c1ce5a8bdd3b647069b6319e07af181ac47", 135 | "https://deno.land/std@0.224.0/assert/fail.ts": "0eba674ffb47dff083f02ced76d5130460bff1a9a68c6514ebe0cdea4abadb68", 136 | "https://deno.land/std@0.224.0/assert/mod.ts": "48b8cb8a619ea0b7958ad7ee9376500fe902284bb36f0e32c598c3dc34cbd6f3", 137 | "https://deno.land/std@0.224.0/assert/unimplemented.ts": "8c55a5793e9147b4f1ef68cd66496b7d5ba7a9e7ca30c6da070c1a58da723d73", 138 | "https://deno.land/std@0.224.0/assert/unreachable.ts": "5ae3dbf63ef988615b93eb08d395dda771c96546565f9e521ed86f6510c29e19", 139 | "https://deno.land/std@0.224.0/fmt/colors.ts": "508563c0659dd7198ba4bbf87e97f654af3c34eb56ba790260f252ad8012e1c5", 140 | "https://deno.land/std@0.224.0/internal/diff.ts": "6234a4b493ebe65dc67a18a0eb97ef683626a1166a1906232ce186ae9f65f4e6", 141 | "https://deno.land/std@0.224.0/internal/format.ts": "0a98ee226fd3d43450245b1844b47003419d34d210fa989900861c79820d21c2", 142 | "https://deno.land/std@0.224.0/internal/mod.ts": "534125398c8e7426183e12dc255bb635d94e06d0f93c60a297723abe69d3b22e", 143 | "https://deno.land/std@0.224.0/testing/asserts.ts": "d0cdbabadc49cc4247a50732ee0df1403fdcd0f95360294ad448ae8c240f3f5c" 144 | } 145 | } 146 | -------------------------------------------------------------------------------- /deps.ts: -------------------------------------------------------------------------------- 1 | export { EventEmitter } from "node:events"; 2 | export { ensureDir } from "jsr:@std/fs@1.0.5"; 3 | export * as path from "jsr:@std/path@1.0.8"; 4 | export { Buffer, copy, readAll, writeAll } from "jsr:@std/io@0.225.0"; 5 | export { crc32, Crc32Stream } from "jsr:@deno-library/crc32@1.0.2"; 6 | export type { Reader, Writer } from "jsr:@std/io@0.225.0/types"; 7 | export { 8 | TarStream, 9 | type TarStreamInput, 10 | UntarStream, 11 | } from "jsr:@std/tar@0.1.3"; 12 | export { 13 | type EntryMetaData, 14 | terminateWorkers, 15 | ZipReader, 16 | ZipReaderStream, 17 | ZipWriter, 18 | ZipWriterStream, 19 | } from "jsr:@zip-js/zip-js@2.7.53"; 20 | -------------------------------------------------------------------------------- /gzip/gzip.ts: -------------------------------------------------------------------------------- 1 | import { crc32 } from "../deps.ts"; 2 | /** very fast */ 3 | import { deflateRaw, inflateRaw } from "../zlib/mod.ts"; 4 | /** slow */ 5 | // import { deflateRaw, inflateRaw } from "../deflate/mod.ts"; 6 | 7 | // magic numbers marking this file as GZIP 8 | const ID1 = 0x1F; 9 | const ID2 = 0x8B; 10 | 11 | const compressionMethods = { 12 | "deflate": 8, 13 | }; 14 | const possibleFlags = { 15 | "FTEXT": 0x01, 16 | "FHCRC": 0x02, 17 | "FEXTRA": 0x04, 18 | "FNAME": 0x08, 19 | "FCOMMENT": 0x10, 20 | }; 21 | // const osMap = { 22 | // "fat": 0, // FAT file system (DOS, OS/2, NT) + PKZIPW 2.50 VFAT, NTFS 23 | // "amiga": 1, // Amiga 24 | // "vmz": 2, // VMS (VAX or Alpha AXP) 25 | // "unix": 3, // Unix 26 | // "vm/cms": 4, // VM/CMS 27 | // "atari": 5, // Atari 28 | // "hpfs": 6, // HPFS file system (OS/2, NT 3.x) 29 | // "macintosh": 7, // Macintosh 30 | // "z-system": 8, // Z-System 31 | // "cplm": 9, // CP/M 32 | // "tops-20": 10, // TOPS-20 33 | // "ntfs": 11, // NTFS file system (NT) 34 | // "qdos": 12, // SMS/QDOS 35 | // "acorn": 13, // Acorn RISC OS 36 | // "vfat": 14, // VFAT file system (Win95, NT) 37 | // "vms": 15, // MVS (code also taken for PRIMOS) 38 | // "beos": 16, // BeOS (BeBox or PowerMac) 39 | // "tandem": 17, // Tandem/NSK 40 | // "theos": 18, // THEOS 41 | // }; 42 | const os = { 43 | "darwin": 3, 44 | "linux": 3, 45 | "windows": 0, 46 | }; 47 | 48 | const osCode = os[Deno.build.os as keyof typeof os] ?? 255; 49 | export const DEFAULT_LEVEL = 6; 50 | 51 | function putByte(n: number, arr: number[]) { 52 | arr.push(n & 0xFF); 53 | } 54 | 55 | // LSB first 56 | function putShort(n: number, arr: number[]) { 57 | arr.push(n & 0xFF); 58 | arr.push(n >>> 8); 59 | } 60 | 61 | // LSB first 62 | export function putLong(n: number, arr: number[]) { 63 | putShort(n & 0xffff, arr); 64 | putShort(n >>> 16, arr); 65 | } 66 | 67 | function putString(s: string, arr: number[]) { 68 | for (let i = 0, len = s.length; i < len; i += 1) { 69 | putByte(s.charCodeAt(i), arr); 70 | } 71 | } 72 | 73 | function readByte(arr: number[]): number { 74 | return arr.shift()!; 75 | } 76 | 77 | function readShort(arr: number[]) { 78 | return arr.shift()! | (arr.shift()! << 8); 79 | } 80 | 81 | function readLong(arr: number[]) { 82 | const n1 = readShort(arr); 83 | let n2 = readShort(arr); 84 | 85 | // JavaScript can't handle bits in the position 32 86 | // we'll emulate this by removing the left-most bit (if it exists) 87 | // and add it back in via multiplication, which does work 88 | if (n2 > 32768) { 89 | n2 -= 32768; 90 | 91 | return ((n2 << 16) | n1) + 32768 * Math.pow(2, 16); 92 | } 93 | 94 | return (n2 << 16) | n1; 95 | } 96 | 97 | function readString(arr: number[]) { 98 | const charArr = []; 99 | 100 | // turn all bytes into chars until the terminating null 101 | while (arr[0] !== 0) { 102 | charArr.push(String.fromCharCode(arr.shift()!)); 103 | } 104 | 105 | // throw away terminating null 106 | arr.shift(); 107 | 108 | // join all characters into a cohesive string 109 | return charArr.join(""); 110 | } 111 | 112 | function readBytes(arr: number[], n: number) { 113 | const ret = []; 114 | for (let i = 0; i < n; i += 1) { 115 | ret.push(arr.shift()); 116 | } 117 | return ret; 118 | } 119 | 120 | interface Options { 121 | level?: number; 122 | timestamp?: number; 123 | name?: string; 124 | } 125 | 126 | /** 127 | * Generates the header for a GZIP file. 128 | * @param {Options} [options={}] - Optional parameters. 129 | * @returns {Uint8Array} - The byte array of the GZIP file header. 130 | */ 131 | export function getHeader( 132 | options: Options = {}, 133 | ): Uint8Array { 134 | let flags = 0; 135 | const level: number = options.level ?? DEFAULT_LEVEL; 136 | const out: number[] = []; 137 | 138 | putByte(ID1, out); 139 | putByte(ID2, out); 140 | 141 | putByte(compressionMethods["deflate"], out); 142 | 143 | if (options.name) { 144 | flags |= possibleFlags["FNAME"]; 145 | } 146 | 147 | putByte(flags, out); 148 | putLong(options.timestamp ?? Math.floor(Date.now() / 1000), out); 149 | 150 | // put deflate args (extra flags) 151 | if (level === 1) { 152 | // fastest algorithm 153 | putByte(4, out); 154 | } else if (level === 9) { 155 | // maximum compression (fastest algorithm) 156 | putByte(2, out); 157 | } else { 158 | putByte(0, out); 159 | } 160 | 161 | // OS identifier 162 | putByte(osCode, out); 163 | 164 | if (options.name) { 165 | // ignore the directory part 166 | putString(options.name.substring(options.name.lastIndexOf("/") + 1), out); 167 | 168 | // terminating null 169 | putByte(0, out); 170 | } 171 | 172 | return new Uint8Array(out); 173 | } 174 | 175 | /** 176 | * @module gzip 177 | * @description This module provides functions to compress and decompress data using the GZIP format. 178 | * @example 179 | * const compressed = gzip(data); 180 | * const decompressed = gunzip(compressed); 181 | * @typedef {Object} Options 182 | * @property {number} [level] - Compression level (default is 6). 183 | * @property {number} [timestamp] - Timestamp for the header. 184 | * @property {string} [name] - Original name of the file. 185 | */ 186 | export function gzip( 187 | bytes: Uint8Array, 188 | options: Options = {}, 189 | ): Uint8Array { 190 | let flags = 0; 191 | const level: number = options.level ?? DEFAULT_LEVEL; 192 | const out: number[] = []; 193 | 194 | putByte(ID1, out); 195 | putByte(ID2, out); 196 | 197 | putByte(compressionMethods["deflate"], out); 198 | 199 | if (options.name) { 200 | flags |= possibleFlags["FNAME"]; 201 | } 202 | 203 | putByte(flags, out); 204 | putLong(options.timestamp ?? Math.floor(Date.now() / 1000), out); 205 | 206 | // put deflate args (extra flags) 207 | if (level === 1) { 208 | // fastest algorithm 209 | putByte(4, out); 210 | } else if (level === 9) { 211 | // maximum compression (fastest algorithm) 212 | putByte(2, out); 213 | } else { 214 | putByte(0, out); 215 | } 216 | 217 | // OS identifier 218 | putByte(osCode, out); 219 | 220 | if (options.name) { 221 | // ignore the directory part 222 | putString(options.name.substring(options.name.lastIndexOf("/") + 1), out); 223 | 224 | // terminating null 225 | putByte(0, out); 226 | } 227 | 228 | deflateRaw(bytes).forEach(function (byte) { 229 | putByte(byte, out); 230 | }); 231 | // import { deflateRaw, inflateRaw } from "../deflate/mod.ts"; 232 | // deflateRaw(bytes, level).forEach(function (byte) { 233 | // putByte(byte, out); 234 | // }); 235 | 236 | putLong(parseInt(crc32(bytes), 16), out); 237 | putLong(bytes.length, out); 238 | 239 | return new Uint8Array(out); 240 | } 241 | 242 | /** 243 | * Decompresses a GZIP formatted byte array. 244 | * @param {Uint8Array} bytes - The byte array to decompress. 245 | * @returns {Uint8Array} - The decompressed byte array. 246 | */ 247 | export function gunzip(bytes: Uint8Array): Uint8Array { 248 | const arr = Array.from(bytes); 249 | 250 | checkHeader(arr); 251 | 252 | // give deflate everything but the last 8 bytes 253 | // the last 8 bytes are for the CRC32 checksum and filesize 254 | const res: Uint8Array = inflateRaw( 255 | new Uint8Array(arr.splice(0, arr.length - 8)), 256 | ); 257 | 258 | // if (flags & possibleFlags["FTEXT"]) { 259 | // res = Array.prototype.map.call(res, function (byte) { 260 | // return String.fromCharCode(byte); 261 | // }).join(""); 262 | // } 263 | 264 | const crc: number = readLong(arr) >>> 0; 265 | if (crc !== parseInt(crc32(res), 16)) { 266 | throw "Checksum does not match"; 267 | } 268 | 269 | const size: number = readLong(arr); 270 | if (size !== res.length) { 271 | throw "Size of decompressed file not correct"; 272 | } 273 | 274 | return res; 275 | } 276 | 277 | /** 278 | * Checks the validity of the GZIP file header. 279 | * @param {number[]} arr - The array containing GZIP data. 280 | * @throws {string} - Throws an error if the header is invalid. 281 | */ 282 | export function checkHeader(arr: number[]) { 283 | // check the first two bytes for the magic numbers 284 | if (readByte(arr) !== ID1 || readByte(arr) !== ID2) { 285 | throw "Not a GZIP file"; 286 | } 287 | if (readByte(arr) !== 8) { 288 | throw "Unsupported compression method"; 289 | } 290 | 291 | const flags: number = readByte(arr); 292 | readLong(arr); // mtime 293 | readByte(arr); // xFlags 294 | readByte(arr); // os, throw away 295 | 296 | // just throw away the bytes for now 297 | if (flags & possibleFlags["FEXTRA"]) { 298 | const t: number = readShort(arr); 299 | readBytes(arr, t); 300 | } 301 | 302 | // just throw away for now 303 | if (flags & possibleFlags["FNAME"]) { 304 | readString(arr); 305 | } 306 | 307 | // just throw away for now 308 | if (flags & possibleFlags["FCOMMENT"]) { 309 | readString(arr); 310 | } 311 | 312 | // just throw away for now 313 | if (flags & possibleFlags["FHCRC"]) { 314 | readShort(arr); 315 | } 316 | } 317 | 318 | /** 319 | * Checks the GZIP file's tail for CRC32 checksum and file size. 320 | * @param {number[]} arr - The array containing GZIP data. 321 | * @returns {{ crc32: number, size: number }} - An object containing CRC32 and file size. 322 | */ 323 | export function checkTail(arr: number[]) { 324 | const tail = arr.splice(arr.length - 8); 325 | 326 | const crc32: number = readLong(tail) >>> 0; 327 | const size: number = readLong(tail); 328 | 329 | return { 330 | crc32, 331 | size, 332 | }; 333 | } 334 | -------------------------------------------------------------------------------- /gzip/gzip_file.ts: -------------------------------------------------------------------------------- 1 | // import { readAll, writeAll } from "../deps.ts"; 2 | /** very fast */ 3 | // import { gunzip, gzip } from "../zlib/mod.ts"; 4 | /** slow */ 5 | // import { gzip, gunzip } from "./gzip.ts"; 6 | 7 | /** 8 | * Compress a file 9 | * @param src Source file path 10 | * @param dest Destination file path 11 | */ 12 | export async function gzipFile(src: string, dest: string): Promise { 13 | // v0.0.1 - v0.4.9 14 | // const reader = await Deno.open(src, { 15 | // read: true, 16 | // }); 17 | // const writer = await Deno.open(dest, { 18 | // write: true, 19 | // create: true, 20 | // truncate: true, 21 | // }); 22 | // await writeAll(writer, gzip(await readAll(reader), undefined)); 23 | // writer.close(); 24 | // reader.close(); 25 | 26 | // >= v0.5.0 27 | using input = await Deno.open(src); 28 | using output = await Deno.create(dest); 29 | 30 | await input.readable 31 | .pipeThrough(new DecompressionStream("gzip")) 32 | .pipeTo(output.writable); 33 | } 34 | 35 | /** 36 | * Decompress a file 37 | * @param src Source file path 38 | * @param dest Destination file path 39 | */ 40 | export async function gunzipFile(src: string, dest: string): Promise { 41 | // v0.0.1 - v0.4.9 42 | // const reader = await Deno.open(src, { 43 | // read: true, 44 | // }); 45 | // const writer = await Deno.open(dest, { 46 | // write: true, 47 | // create: true, 48 | // truncate: true, 49 | // }); 50 | // await writeAll(writer, gunzip(await readAll(reader))); 51 | 52 | // >= v0.5.0 53 | using input = await Deno.open(src); 54 | using output = await Deno.create(dest); 55 | 56 | await input.readable 57 | .pipeThrough(new CompressionStream("gzip")) 58 | .pipeTo(output.writable); 59 | } -------------------------------------------------------------------------------- /gzip/gzip_stream.ts: -------------------------------------------------------------------------------- 1 | import { copy, EventEmitter } from "../deps.ts"; 2 | import GzipWriter from "./writer_gzip.ts"; 3 | import GunzipWriter from "./writer_gunzip.ts"; 4 | 5 | /** 6 | * @symbol GzipStream 7 | * @description A class for compressing and uncompressing files using Gzip. 8 | */ 9 | export class GzipStream extends EventEmitter { 10 | constructor() { 11 | super(); 12 | } 13 | 14 | async compress(src: string, dest: string): Promise { 15 | // reader 16 | const stat = await Deno.stat(src); 17 | const size = stat.size; 18 | using reader = await Deno.open(src, { 19 | read: true, 20 | }); 21 | // writer 22 | using writer = new GzipWriter(dest, { 23 | onceSize: size > 50 * 1024 * 1024 ? 1024 * 1024 : 512 * 1024, 24 | }); 25 | await writer.setup( 26 | src, 27 | stat.mtime ? Math.round(stat.mtime.getTime() / 1000) : 0, 28 | ); 29 | writer.on("bytesWritten", (bytesWritten: number) => { 30 | const progress = (100 * bytesWritten / size).toFixed(2) + "%"; 31 | this.emit("progress", progress); 32 | }); 33 | 34 | /** 1: use Deno.copy */ 35 | await copy(reader, writer, { 36 | bufSize: 1024 * 1024, 37 | }); 38 | } 39 | 40 | async uncompress(src: string, dest: string): Promise { 41 | // reader 42 | const size = (await Deno.stat(src)).size; 43 | using reader = await Deno.open(src, { 44 | read: true, 45 | }); 46 | // writer 47 | using writer = new GunzipWriter(dest, { 48 | onceSize: size > 50 * 1024 * 1024 ? 1024 * 1024 : 512 * 1024, 49 | }); 50 | await writer.setup(); 51 | writer.on("bytesWritten", (bytesWritten: number) => { 52 | const progress = (100 * bytesWritten / size).toFixed(2) + "%"; 53 | this.emit("progress", progress); 54 | }); 55 | // write 56 | await copy(reader, writer, { 57 | bufSize: 1024 * 1024, 58 | }); 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /gzip/mod.ts: -------------------------------------------------------------------------------- 1 | export { gunzip, gzip } from "./gzip.ts"; 2 | export { GzipStream } from "./gzip_stream.ts"; 3 | export { gunzipFile, gzipFile } from "./gzip_file.ts"; 4 | -------------------------------------------------------------------------------- /gzip/writer_gunzip.ts: -------------------------------------------------------------------------------- 1 | import { Crc32Stream, EventEmitter, writeAll } from "../deps.ts"; 2 | import { concatUint8Array } from "../utils/uint8.ts"; 3 | import { checkHeader, checkTail } from "./gzip.ts"; 4 | import { Inflate } from "../zlib/mod.ts"; 5 | import type { Writer as StdWriter } from "../deps.ts"; 6 | 7 | type File = Deno.FsFile; 8 | 9 | interface Options { 10 | onceSize?: number; 11 | } 12 | 13 | /** 14 | * @module writer_gunzip 15 | * @description This module provides a Writer class for handling GZIP decompression and writing to a file. 16 | * @example 17 | * const writer = new Writer('output.txt', { onceSize: 2048 }); 18 | * await writer.setup(); 19 | * await writer.write(data); 20 | * writer.close(); 21 | */ 22 | export default class Writer extends EventEmitter implements StdWriter { 23 | protected writer!: File; 24 | protected bytesWritten = 0; // readed size of reader 25 | private path: string; 26 | private chuncks: Uint8Array[] = []; 27 | private onceSize: number; 28 | private chuncksBytes = 0; 29 | private isCheckHeader = false; 30 | private writtenSize = 0; // written size of writer 31 | private crc32Stream = new Crc32Stream(); 32 | private inflate: Inflate = new Inflate({ raw: true }); 33 | 34 | constructor( 35 | path: string, 36 | options?: Options, 37 | ) { 38 | super(); 39 | this.path = path; 40 | this.onceSize = options?.onceSize ?? 1024 * 1024; 41 | } 42 | 43 | async setup(): Promise { 44 | this.writer = await Deno.open(this.path, { 45 | write: true, 46 | create: true, 47 | truncate: true, 48 | }); 49 | } 50 | 51 | async write(p: Uint8Array): Promise { 52 | const readed = p.byteLength; 53 | this.chuncksBytes += readed; 54 | this.bytesWritten += readed; 55 | const arr = Array.from(p); 56 | if (!this.isCheckHeader) { 57 | this.isCheckHeader = true; 58 | checkHeader(arr); 59 | } 60 | if (readed < 16384) { 61 | const { size, crc32 } = checkTail(arr); 62 | this.chuncks.push(new Uint8Array(arr)); 63 | const buf = concatUint8Array(this.chuncks); 64 | const decompressed = this.inflate.push(buf, true); 65 | this.writtenSize += decompressed.byteLength; 66 | await writeAll(this.writer, decompressed); 67 | this.crc32Stream.append(decompressed); 68 | if (crc32 !== parseInt(this.crc32Stream.crc32, 16)) { 69 | throw "Checksum does not match"; 70 | } 71 | if (size !== this.writtenSize) { 72 | throw "Size of decompressed file not correct"; 73 | } 74 | return readed; 75 | } 76 | this.chuncks.push(new Uint8Array(arr)); 77 | if (this.chuncksBytes >= this.onceSize) { 78 | const buf = concatUint8Array(this.chuncks); 79 | const decompressed = this.inflate.push(buf, false); 80 | this.writtenSize += decompressed.byteLength; 81 | await writeAll(this.writer, decompressed); 82 | this.crc32Stream.append(decompressed); 83 | this.chuncks.length = 0; 84 | this.chuncksBytes = 0; 85 | this.emit("bytesWritten", this.bytesWritten); 86 | } 87 | return readed; 88 | } 89 | 90 | close(): void { 91 | this.emit("bytesWritten", this.bytesWritten); 92 | this.writer.close(); 93 | } 94 | 95 | [Symbol.dispose]() { 96 | this.close(); 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /gzip/writer_gzip.ts: -------------------------------------------------------------------------------- 1 | import { Crc32Stream, EventEmitter, writeAll } from "../deps.ts"; 2 | import { concatUint8Array } from "../utils/uint8.ts"; 3 | import { getHeader, putLong } from "./gzip.ts"; 4 | import { Deflate } from "../zlib/mod.ts"; 5 | import type { Writer as StdWriter } from "../deps.ts"; 6 | 7 | type File = Deno.FsFile; 8 | 9 | interface Options { 10 | onceSize?: number; 11 | } 12 | 13 | /** 14 | * @class Writer 15 | * @extends EventEmitter 16 | * @description A class for writing GZIP compressed data to a file. 17 | * @param {string} path - The file path where the data will be written. 18 | * @param {Options} [options] - Optional settings for the writer. 19 | * @property {number} onceSize - The size threshold for writing chunks. 20 | * @property {number} bytesWritten - The total number of bytes written. 21 | * @method setup - Prepares the writer by opening the file and writing headers. 22 | * @method write - Writes a chunk of data to the file, compressing it if necessary. 23 | * @method close - Closes the writer and emits the total bytes written. 24 | */ 25 | export default class Writer extends EventEmitter implements StdWriter { 26 | private writer!: File; 27 | private bytesWritten = 0; 28 | private path: string; 29 | private chuncks: Uint8Array[] = []; 30 | private onceSize: number; 31 | private chuncksBytes = 0; 32 | private crc32Stream = new Crc32Stream(); 33 | private deflate: Deflate = new Deflate({ raw: true }); 34 | 35 | constructor( 36 | path: string, 37 | options?: Options, 38 | ) { 39 | super(); 40 | this.path = path; 41 | this.onceSize = options?.onceSize ?? 1024 * 1024; 42 | } 43 | 44 | async setup(name?: string, timestamp?: number): Promise { 45 | this.writer = await Deno.open(this.path, { 46 | write: true, 47 | create: true, 48 | truncate: true, 49 | }); 50 | const headers = getHeader({ 51 | timestamp, 52 | name, 53 | }); 54 | await this.writer.write(headers); 55 | } 56 | 57 | async write(p: Uint8Array): Promise { 58 | const readed = p.byteLength; 59 | const copy = new Uint8Array(p); 60 | this.chuncks.push(copy); 61 | this.chuncksBytes += readed; 62 | this.bytesWritten += readed; 63 | this.crc32Stream.append(copy); 64 | if (readed < 16384) { 65 | const buf = concatUint8Array(this.chuncks); 66 | const compressed = this.deflate.push(buf, true); 67 | await writeAll(this.writer, compressed); 68 | const tail = this.getTail(); 69 | await this.writer.write(tail); 70 | } else if (this.chuncksBytes >= this.onceSize) { 71 | const buf = concatUint8Array(this.chuncks); 72 | const compressed = this.deflate.push(buf, false); 73 | await writeAll(this.writer, compressed); 74 | this.chuncks.length = 0; 75 | this.chuncksBytes = 0; 76 | this.emit("bytesWritten", this.bytesWritten); 77 | } 78 | return readed; 79 | } 80 | 81 | private getTail() { 82 | const arr: number[] = []; 83 | putLong(parseInt(this.crc32Stream.crc32, 16), arr); 84 | putLong(this.bytesWritten, arr); 85 | return new Uint8Array(arr); 86 | } 87 | 88 | close(): void { 89 | this.emit("bytesWritten", this.bytesWritten); 90 | this.writer.close(); 91 | } 92 | 93 | [Symbol.dispose]() { 94 | this.close(); 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /interface.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Configuration interface for compression operations. 3 | * 4 | * @interface compressInterface 5 | */ 6 | export interface compressInterface { 7 | /** 8 | * Specifies whether to exclude the source directory from the compression. 9 | * If set to true, the `src` directory will not be included in the compressed output. 10 | * By default, the `src` directory is included. 11 | * 12 | * @type {boolean} 13 | * @memberof compressInterface 14 | */ 15 | excludeSrc?: boolean; 16 | 17 | /** 18 | * Specifies whether to enable debug mode. 19 | * In debug mode, all files and folders being processed will be listed. 20 | * By default, no such information is displayed. 21 | * 22 | * @type {boolean} 23 | * @memberof compressInterface 24 | */ 25 | debug?: boolean; 26 | } 27 | 28 | /** 29 | * Configuration interface for uncompression (decompression) operations. 30 | * 31 | * @interface uncompressInterface 32 | */ 33 | export interface uncompressInterface { 34 | /** 35 | * Specifies whether to enable debug mode. 36 | * In debug mode, all files and folders being processed will be listed. 37 | * By default, no such information is displayed. 38 | * 39 | * @type {boolean} 40 | * @memberof uncompressInterface 41 | */ 42 | debug?: boolean; 43 | } -------------------------------------------------------------------------------- /mod.ts: -------------------------------------------------------------------------------- 1 | export * as tar from "./tar/mod.ts"; 2 | export * as tgz from "./tgz/mod.ts"; 3 | export * as zip from "./zip/mod.ts"; 4 | export * as brotli from "./brotli/mod.ts"; 5 | export { 6 | gunzipFile, 7 | gzipFile, 8 | GzipStream, 9 | /** slow */ 10 | // gzip, 11 | // gunzip, 12 | } from "./gzip/mod.ts"; 13 | /** slow */ 14 | // export { deflateRaw, inflateRaw } from "./deflate/mod.ts"; 15 | /** fast */ 16 | export { 17 | deflate, 18 | deflateRaw, 19 | gunzip, 20 | gzip, 21 | inflate, 22 | inflateRaw, 23 | } from "./zlib/mod.ts"; 24 | -------------------------------------------------------------------------------- /tar/mod.ts: -------------------------------------------------------------------------------- 1 | import type { compressInterface, uncompressInterface } from "../interface.ts"; 2 | import { path } from "../deps.ts"; 3 | import { UntarStream } from "../deps.ts"; 4 | import { TarStream, type TarStreamInput } from "../deps.ts"; 5 | 6 | /** 7 | * Uncompresses a file. 8 | * @param {string} src - Source file path. 9 | * @param {string} dest - Destination file path. 10 | * @param {uncompressInterface} [options] - Optional parameters. 11 | */ 12 | export async function uncompress( 13 | src: string, 14 | dest: string, 15 | options?: uncompressInterface, 16 | ): Promise { 17 | const stat = await Deno.stat(src); 18 | if(stat.isDirectory) { 19 | throw new Error("The source path is a directory, not a file: ${src}") 20 | } 21 | using srcFile = await Deno.open(src); 22 | for await ( 23 | const entry of srcFile.readable.pipeThrough(new UntarStream()) 24 | ) { 25 | const filePath = path.resolve(dest, entry.path); 26 | if (options?.debug) console.log(filePath); 27 | await Deno.mkdir(path.dirname(filePath), { recursive: true }); 28 | await entry.readable?.pipeTo((await Deno.create(filePath)).writable); 29 | } 30 | } 31 | 32 | /** 33 | * Compresses a file. 34 | * @param {string} src - Source file path. 35 | * @param {string} dest - Destination file path. 36 | * @param {compressInterface} [options] - Optional parameters. 37 | */ 38 | export async function compress( 39 | src: string, 40 | dest: string, 41 | options?: compressInterface, 42 | ): Promise { 43 | const stat = await Deno.stat(src); 44 | const inputs: TarStreamInput[] = []; 45 | if (stat.isFile) { 46 | inputs.push({ 47 | type: "file", 48 | path: path.basename(src), 49 | size: stat.size, 50 | readable: (await Deno.open(src)).readable, 51 | options: { 52 | mtime: (stat?.mtime ?? new Date()).valueOf() / 1000 | 0, 53 | }, 54 | }); 55 | if (options?.debug) console.log(path.resolve(src)); 56 | } else { 57 | const appendFolder = async (folder: string, prefix?: string) => { 58 | let nowLoopList: string[][] = [[folder, prefix || ""]]; 59 | let nextLoopList: string[][] = []; 60 | 61 | while (nowLoopList.length > 0) { 62 | for (const [folder, prefix] of nowLoopList) { 63 | for await (const entry of Deno.readDir(folder)) { 64 | const { isDirectory, name } = entry; 65 | const fileName = prefix ? `${prefix}/${name}` : name; 66 | const filePath = path.resolve(folder, name); 67 | if (options?.debug) console.log(path.resolve(filePath)); 68 | const stat = await Deno.stat(filePath); 69 | if (isDirectory) { 70 | inputs.push({ 71 | type: "directory", 72 | path: `${fileName}/`, 73 | options: { 74 | mtime: ~~((stat?.mtime ?? new Date()) 75 | .valueOf() / 76 | 1000), 77 | }, 78 | }); 79 | nextLoopList.push([filePath, fileName]); 80 | } else { 81 | inputs.push({ 82 | type: "file", 83 | path: fileName, 84 | size: stat.size, 85 | readable: (await Deno.open(filePath)).readable, 86 | options: { 87 | mtime: (stat?.mtime ?? new Date()).valueOf() / 88 | 1000 | 0, 89 | }, 90 | }); 91 | } 92 | } 93 | } 94 | nowLoopList = nextLoopList; 95 | nextLoopList = []; 96 | } 97 | }; 98 | if (options?.excludeSrc) { 99 | await appendFolder(src); 100 | } else { 101 | const folderName = path.basename(src); 102 | inputs.push({ 103 | type: "directory", 104 | path: path.basename(`${folderName}/`), 105 | }); 106 | if (options?.debug) console.log(path.resolve(src)); 107 | await appendFolder(src, folderName); 108 | } 109 | } 110 | 111 | await ReadableStream.from(inputs) 112 | .pipeThrough(new TarStream()) 113 | .pipeTo((await Deno.create(dest)).writable); 114 | } 115 | -------------------------------------------------------------------------------- /tar_archive/mod.ts: -------------------------------------------------------------------------------- 1 | import { Buffer, copy, ensureDir, path } from "../deps.ts"; 2 | import type { compressInterface, uncompressInterface } from "../interface.ts"; 3 | import { Tar } from "jsr:@std/archive@0.225.4/tar"; 4 | import { Untar } from "jsr:@std/archive@0.225.4/untar"; 5 | 6 | /** 7 | * Uncompresses a file. 8 | * @param {string} src - Source file path. 9 | * @param {string} dest - Destination file path. 10 | * @param {uncompressInterface} [options] - Optional parameters. 11 | */ 12 | export async function uncompress( 13 | src: string, 14 | dest: string, 15 | options?: uncompressInterface, 16 | ): Promise { 17 | const stat = await Deno.stat(src); 18 | if(stat.isDirectory) { 19 | throw new Error("The source path is a directory, not a file: ${src}") 20 | } 21 | using reader = await Deno.open(src, { read: true }); 22 | const untar = new Untar(reader); 23 | for await (const entry of untar) { 24 | const filePath = path.resolve(dest, entry.fileName); 25 | if (options?.debug) console.log(filePath); 26 | if (entry.type === "directory") { 27 | await ensureDir(filePath); 28 | continue; 29 | } 30 | await ensureDir(path.dirname(filePath)); 31 | using file = await Deno.open(filePath, { write: true, create: true }); 32 | await copy(entry, file); 33 | } 34 | } 35 | 36 | // iteratively 37 | // fix for issue: https://github.com/deno-library/compress/issues/8 38 | /** 39 | * Compresses a file. 40 | * @param {string} src - Source file path. 41 | * @param {string} dest - Destination file path. 42 | * @param {compressInterface} [options] - Optional parameters. 43 | */ 44 | export async function compress( 45 | src: string, 46 | dest: string, 47 | options?: compressInterface, 48 | ): Promise { 49 | const tar = new Tar(); 50 | const stat = await Deno.stat(src); 51 | if (stat.isFile) { 52 | await tar.append(path.basename(src), { 53 | filePath: src, 54 | contentSize: stat.size, 55 | mtime: (stat?.mtime ?? new Date()).valueOf() / 1000, 56 | }); 57 | if (options?.debug) console.log(path.resolve(src)); 58 | } else { 59 | const appendFolder = async (folder: string, prefix?: string) => { 60 | let nowLoopList: string[][] = [[folder, prefix || ""]]; 61 | let nextLoopList: string[][] = []; 62 | 63 | while (nowLoopList.length > 0) { 64 | for (const [folder, prefix] of nowLoopList) { 65 | for await (const entry of Deno.readDir(folder)) { 66 | const { isDirectory, name } = entry; 67 | const fileName = prefix ? `${prefix}/${name}` : name; 68 | const filePath = path.resolve(folder, name); 69 | if (options?.debug) console.log(path.resolve(filePath)); 70 | const stat = await Deno.stat(filePath); 71 | if (isDirectory) { 72 | await tar.append( 73 | `${fileName}/`, 74 | { 75 | reader: new Buffer(), 76 | contentSize: 0, 77 | type: "directory", 78 | mtime: (stat?.mtime ?? new Date()).valueOf() / 1000, 79 | }, 80 | ); 81 | nextLoopList.push([filePath, fileName]); 82 | } else { 83 | await tar.append(fileName, { 84 | filePath, 85 | mtime: (stat?.mtime ?? new Date()).valueOf() / 1000, 86 | contentSize: stat.size, 87 | }); 88 | } 89 | } 90 | } 91 | nowLoopList = nextLoopList; 92 | nextLoopList = []; 93 | } 94 | }; 95 | if (options?.excludeSrc) { 96 | await appendFolder(src); 97 | } else { 98 | const folderName = path.basename(src); 99 | await tar.append( 100 | `${folderName}/`, 101 | { 102 | filePath: src, 103 | // type: "directory", 104 | // mtime: (stat?.mtime ?? new Date()).valueOf() / 1000, 105 | // contentSize: 0, 106 | // reader: new Deno.Buffer(), 107 | }, 108 | ); 109 | if (options?.debug) console.log(path.resolve(src)); 110 | await appendFolder(src, folderName); 111 | } 112 | } 113 | using writer = await Deno.open(dest, { write: true, create: true }); 114 | await copy(tar.getReader(), writer); 115 | } 116 | 117 | // Recursive way 118 | // export async function compress( 119 | // src: string, 120 | // dest: string, 121 | // options?: compressInterface, 122 | // ): Promise { 123 | // const tar = new Tar(); 124 | // const stat = await Deno.stat(src); 125 | // if (stat.isFile) { 126 | // await tar.append(path.basename(src), { 127 | // filePath: src, 128 | // contentSize: stat.size, 129 | // mtime: (stat?.mtime ?? new Date()).valueOf() / 1000, 130 | // }); 131 | // } else { 132 | // const appendFolder = async (folder: string, prefix?: string) => { 133 | // for await (const entry of Deno.readDir(folder)) { 134 | // const { isDirectory, name } = entry; 135 | // const fileName = prefix ? `${prefix}/${name}` : name; 136 | // const filePath = path.resolve(folder, name); 137 | // const stat = await Deno.stat(filePath); 138 | // if (isDirectory) { 139 | // await tar.append( 140 | // `${fileName}/`, 141 | // { 142 | // reader: new Buffer(), 143 | // contentSize: 0, 144 | // type: "directory", 145 | // mtime: (stat?.mtime ?? new Date()).valueOf() / 1000, 146 | // }, 147 | // ); 148 | // await appendFolder(filePath, fileName); 149 | // } else { 150 | // await tar.append(fileName, { 151 | // filePath, 152 | // mtime: (stat?.mtime ?? new Date()).valueOf() / 1000, 153 | // contentSize: stat.size, 154 | // }); 155 | // } 156 | // } 157 | // }; 158 | // if (options?.excludeSrc) { 159 | // await appendFolder(src); 160 | // } else { 161 | // const folderName = path.basename(src); 162 | // await tar.append( 163 | // `${folderName}/`, 164 | // { 165 | // filePath: src, 166 | // // type: "directory", 167 | // // mtime: (stat?.mtime ?? new Date()).valueOf() / 1000, 168 | // // contentSize: 0, 169 | // // reader: new Deno.Buffer(), 170 | // }, 171 | // ); 172 | // await appendFolder(src, folderName); 173 | // } 174 | // } 175 | // const writer = await Deno.open(dest, { write: true, create: true }); 176 | // await copy(tar.getReader(), writer); 177 | // writer.close(); 178 | // } 179 | -------------------------------------------------------------------------------- /test/brotli.test.ts: -------------------------------------------------------------------------------- 1 | import { assert } from "jsr:@std/assert"; 2 | import { brotli } from "../mod.ts"; 3 | import { Buffer } from "node:buffer"; 4 | 5 | const inputString = 'ΩΩLorem ipsum dolor sit amet, consectetur adipiscing eli' + 6 | 't. Morbi faucibus, purus at gravida dictum, libero arcu ' + 7 | 'convallis lacus, in commodo libero metus eu nisi. Nullam' + 8 | ' commodo, neque nec porta placerat, nisi est fermentum a' + 9 | 'ugue, vitae gravida tellus sapien sit amet tellus. Aenea' + 10 | 'n non diam orci. Proin quis elit turpis. Suspendisse non' + 11 | ' diam ipsum. Suspendisse nec ullamcorper odio. Vestibulu' + 12 | 'm arcu mi, sodales non suscipit id, ultrices ut massa. S' + 13 | 'ed ac sem sit amet arcu malesuada fermentum. Nunc sed. '; 14 | const compressedString = 'G/gBQBwHdky2aHV5KK9Snf05//1pPdmNw/7232fnIm1IB' + 15 | 'K1AA8RsN8OB8Nb7Lpgk3UWWUlzQXZyHQeBBbXMTQXC1j7' + 16 | 'wg3LJs9LqOGHRH2bj/a2iCTLLx8hBOyTqgoVuD1e+Qqdn' + 17 | 'f1rkUNyrWq6LtOhWgxP3QUwdhKGdZm3rJWaDDBV7+pDk1' + 18 | 'MIkrmjp4ma2xVi5MsgJScA3tP1I7mXeby6MELozrwoBQD' + 19 | 'mVTnEAicZNj4lkGqntJe2qSnGyeMmcFgraK94vCg/4iLu' + 20 | 'Tw5RhKhnVY++dZ6niUBmRqIutsjf5TzwF5iAg8a9UkjF5' + 21 | '2eZ0tB2vo6v8SqVfNMkBmmhxr0NT9LkYF69aEjlYzj7IE' + 22 | 'KmEUQf1HBogRYhFIt4ymRNEgHAIzOyNEsQM='; 23 | 24 | Deno.test("brotli", async () => { 25 | // async 26 | const compressedBuffer = await brotli.compress(inputString); 27 | assert(compressedBuffer.toString('base64') === compressedString); 28 | 29 | // deno version >= v2.1.8 / 2025.01.30 30 | // https://github.com/denoland/deno/pull/27815 31 | const uncompressedBuffer = await brotli.uncompress(compressedBuffer); 32 | assert(uncompressedBuffer.toString() === inputString); 33 | 34 | // sync 35 | const buffer = Buffer.from(compressedString, 'base64'); 36 | const d = brotli.uncompressSync(buffer); 37 | assert(d.toString() === inputString); 38 | }); 39 | -------------------------------------------------------------------------------- /test/deflate.test.ts: -------------------------------------------------------------------------------- 1 | import { assert } from "jsr:@std/assert"; 2 | import { deflate, inflate } from "../mod.ts"; 3 | 4 | Deno.test("deflate", () => { 5 | const str = "hello world!"; 6 | const bytes = new TextEncoder().encode(str); 7 | const compressed = deflate(bytes); 8 | const decompressed = inflate(compressed); 9 | assert(str === new TextDecoder().decode(decompressed)); 10 | }); -------------------------------------------------------------------------------- /test/deno.tar: -------------------------------------------------------------------------------- 1 | archive/0000775000175100017510000000000013670171677011156 5ustar denodenoarchive/deno/0000775000175100017510000000000013670167523012076 5ustar denodenoarchive/deno/land/0000775000175100017510000000000013670167554013020 5ustar denodenoarchive/deno/land/land.txt0000664000175100017510000000000513670167554014472 0ustar denodenoland 2 | archive/file.txt0000664000175100017510000000000513670167512012622 0ustar denodenofile 3 | archive/deno.txt0000664000175100017510000000000513670167532012632 0ustar denodenodeno 4 | -------------------------------------------------------------------------------- /test/dir/root.txt: -------------------------------------------------------------------------------- 1 | tar -------------------------------------------------------------------------------- /test/dir/subdir/subfile.txt: -------------------------------------------------------------------------------- 1 | sub file -------------------------------------------------------------------------------- /test/tar.test.ts: -------------------------------------------------------------------------------- 1 | import { assert, assertEquals } from "jsr:@std/assert"; 2 | import { tar } from "../mod.ts"; 3 | 4 | Deno.test("tar.compress file", async () => { 5 | const src = "./test/dir/root.txt"; 6 | const dest = "./test.tar"; 7 | try { 8 | await tar.compress(src, dest, { debug: true }); 9 | const stat = await Deno.stat(dest); 10 | /** 11 | * 2048 = 512 (header) + 512 (content) + 1024 (footer) 12 | */ 13 | assertEquals(stat.size, 2048); 14 | await Deno.remove(dest); 15 | } catch (error) { 16 | console.error(error); 17 | assert(false); 18 | } 19 | }); 20 | 21 | Deno.test("tar.compress folder", async () => { 22 | const src = "./test/dir"; 23 | const dest = "./test.tar"; 24 | try { 25 | await tar.compress(src, dest, { debug: true }); 26 | const stat = await Deno.stat(dest); 27 | /** 28 | * 4096 = 512 (header) + 0 (content) + // tar folder 29 | * 512 (header) + 512 (content) + // tar.txt 30 | * 512 (header) + 0 (content) + // subdir folder 31 | * 512 (header) + 512 (content) + // subfile.txt 32 | * 1024 (footer) // footer 33 | */ 34 | assertEquals(stat.size, 4096); 35 | await Deno.remove(dest); 36 | } catch (error) { 37 | console.error(error); 38 | assert(false); 39 | } 40 | }); 41 | 42 | Deno.test("tar.uncompress", async () => { 43 | const src = "./test/deno.tar"; 44 | const dest = "./tar-test"; 45 | const landTxtPath = "./tar-test/archive/deno/land/land.txt"; 46 | const landTxtSize = 5; 47 | const landTxtContent = "land\n"; 48 | try { 49 | await tar.uncompress(src, dest, { debug: true }); 50 | const stat = await Deno.stat(landTxtPath); 51 | assertEquals(stat.size, landTxtSize); 52 | const buf = await Deno.readFile(landTxtPath); 53 | const content = new TextDecoder().decode(buf); 54 | assertEquals(content, landTxtContent); 55 | await Deno.remove(dest, { recursive: true }); 56 | } catch (error) { 57 | console.error(error); 58 | assert(false); 59 | } 60 | }); 61 | -------------------------------------------------------------------------------- /test/zip.test.ts: -------------------------------------------------------------------------------- 1 | import { assert, assertEquals } from "jsr:@std/assert"; 2 | import { zip } from "../mod.ts"; 3 | 4 | Deno.test("zip.compress file", async () => { 5 | const src = "./test/dir/root.txt"; 6 | const dest = "./test.zip"; 7 | try { 8 | await zip.compress(src, dest, { debug: true }); 9 | const stat = await Deno.stat(dest); 10 | assertEquals(stat.size, 255); 11 | await Deno.remove(dest); 12 | } catch (error) { 13 | console.error(error); 14 | assert(false); 15 | } 16 | }); 17 | 18 | Deno.test("zip.compress folder", async () => { 19 | const src = "./test/dir"; 20 | const dest = "./deno.zip"; 21 | try { 22 | await zip.compress(src, dest, { debug: true }); 23 | const stat = await Deno.stat(dest); 24 | assertEquals(stat.size, 943); 25 | await Deno.remove(dest); 26 | } catch (error) { 27 | console.error(error); 28 | assert(false); 29 | } 30 | }); 31 | -------------------------------------------------------------------------------- /tgz/mod.ts: -------------------------------------------------------------------------------- 1 | import type { compressInterface, uncompressInterface } from "../interface.ts"; 2 | import { path } from "../deps.ts"; 3 | import { UntarStream } from "../deps.ts"; 4 | import { TarStream, type TarStreamInput } from "../deps.ts"; 5 | 6 | /** 7 | * @module 8 | * @description This module provides functions to compress and uncompress files using tar and gzip formats. 9 | * @exports uncompress 10 | * @exports compress 11 | */ 12 | 13 | /** 14 | * Uncompresses a .tgz or .gz file to a specified destination. 15 | * @param {string} src - The source file path. 16 | * @param {string} dest - The destination directory path. 17 | * @param {uncompressInterface} [options] - Optional parameters for uncompression. 18 | * @returns {Promise} - A promise that resolves when the operation is complete. 19 | */ 20 | export async function uncompress( 21 | src: string, 22 | dest: string, 23 | options?: uncompressInterface, 24 | ): Promise { 25 | const stat = await Deno.stat(src); 26 | if(stat.isDirectory) { 27 | throw new Error("The source path is a directory, not a file: ${src}") 28 | } 29 | using srcFile = await Deno.open(src); 30 | for await ( 31 | const entry of srcFile.readable.pipeThrough( 32 | new DecompressionStream("gzip"), 33 | ).pipeThrough(new UntarStream()) 34 | ) { 35 | const filePath = path.resolve(dest, entry.path); 36 | if (options?.debug) console.log(filePath); 37 | await Deno.mkdir(path.dirname(filePath), { recursive: true }); 38 | await entry.readable?.pipeTo((await Deno.create(filePath)).writable); 39 | } 40 | } 41 | 42 | /** 43 | * Compresses a file to a .tgz format. 44 | * @param {string} src - The source file path. 45 | * @param {string} dest - The destination file path. 46 | * @param {compressInterface} [options] - Optional parameters for compression. 47 | * @returns {Promise} - A promise that resolves when the operation is complete. 48 | */ 49 | export async function compress( 50 | src: string, 51 | dest: string, 52 | options?: compressInterface, 53 | ): Promise { 54 | const stat = await Deno.stat(src); 55 | const inputs: TarStreamInput[] = []; 56 | if (stat.isFile) { 57 | inputs.push({ 58 | type: "file", 59 | path: path.basename(src), 60 | size: stat.size, 61 | readable: (await Deno.open(src)).readable, 62 | options: { 63 | mtime: (stat?.mtime ?? new Date()).valueOf() / 1000 | 0, 64 | }, 65 | }); 66 | if (options?.debug) console.log(path.resolve(src)); 67 | } else { 68 | const appendFolder = async (folder: string, prefix?: string) => { 69 | let nowLoopList: string[][] = [[folder, prefix || ""]]; 70 | let nextLoopList: string[][] = []; 71 | 72 | while (nowLoopList.length > 0) { 73 | for (const [folder, prefix] of nowLoopList) { 74 | for await (const entry of Deno.readDir(folder)) { 75 | const { isDirectory, name } = entry; 76 | const fileName = prefix ? `${prefix}/${name}` : name; 77 | const filePath = path.resolve(folder, name); 78 | if (options?.debug) console.log(path.resolve(filePath)); 79 | const stat = await Deno.stat(filePath); 80 | if (isDirectory) { 81 | inputs.push({ 82 | type: "directory", 83 | path: `${fileName}/`, 84 | options: { 85 | mtime: ~~((stat?.mtime ?? new Date()) 86 | .valueOf() / 87 | 1000), 88 | }, 89 | }); 90 | nextLoopList.push([filePath, fileName]); 91 | } else { 92 | inputs.push({ 93 | type: "file", 94 | path: fileName, 95 | size: stat.size, 96 | readable: (await Deno.open(filePath)).readable, 97 | options: { 98 | mtime: (stat?.mtime ?? new Date()).valueOf() / 99 | 1000 | 0, 100 | }, 101 | }); 102 | } 103 | } 104 | } 105 | nowLoopList = nextLoopList; 106 | nextLoopList = []; 107 | } 108 | }; 109 | if (options?.excludeSrc) { 110 | await appendFolder(src); 111 | } else { 112 | const folderName = path.basename(src); 113 | inputs.push({ 114 | type: "directory", 115 | path: path.basename(`${folderName}/`), 116 | }); 117 | if (options?.debug) console.log(path.resolve(src)); 118 | await appendFolder(src, folderName); 119 | } 120 | } 121 | 122 | await ReadableStream.from(inputs) 123 | .pipeThrough(new TarStream()) 124 | .pipeThrough(new CompressionStream("gzip")) 125 | .pipeTo((await Deno.create(dest)).writable); 126 | } 127 | -------------------------------------------------------------------------------- /tgz_archive/mod.ts: -------------------------------------------------------------------------------- 1 | import * as tar from "../tar_archive/mod.ts"; 2 | import { gunzipFile, gzipFile } from "../gzip/gzip_file.ts"; 3 | import type { compressInterface, uncompressInterface } from "../interface.ts"; 4 | import { path } from "../deps.ts"; 5 | 6 | /** 7 | * @module 8 | * @description This module provides functions to compress and uncompress files using tar and gzip formats. 9 | * @exports uncompress 10 | * @exports compress 11 | */ 12 | 13 | /** 14 | * Uncompresses a .tgz or .gz file to a specified destination. 15 | * @param {string} src - The source file path. 16 | * @param {string} dest - The destination directory path. 17 | * @param {uncompressInterface} [options] - Optional parameters for uncompression. 18 | * @returns {Promise} - A promise that resolves when the operation is complete. 19 | */ 20 | export async function uncompress(src: string, dest: string, options?: uncompressInterface): Promise { 21 | const filename = path.basename(src); 22 | const extname = path.extname(filename); 23 | const tarFilename = extname === ".tgz" 24 | ? filename.slice(0, -3) + "tar" 25 | : (extname === ".gz" ? filename.slice(0, -3) : filename); 26 | const tmpDir = await Deno.makeTempDir(); 27 | const tmpPath = path.join(tmpDir, tarFilename); 28 | await gunzipFile(src, tmpPath); 29 | await tar.uncompress(tmpPath, dest, options); 30 | await Deno.remove(tmpDir, { recursive: true }); 31 | } 32 | 33 | /** 34 | * Compresses a file to a .tgz format. 35 | * @param {string} src - The source file path. 36 | * @param {string} dest - The destination file path. 37 | * @param {compressInterface} [options] - Optional parameters for compression. 38 | * @returns {Promise} - A promise that resolves when the operation is complete. 39 | */ 40 | export async function compress( 41 | src: string, 42 | dest: string, 43 | options?: compressInterface, 44 | ): Promise { 45 | const filename = path.basename(src); 46 | const tmpDir = await Deno.makeTempDir(); 47 | const tmpPath = path.join(tmpDir, filename); 48 | await tar.compress(src, tmpPath, options); 49 | await gzipFile(tmpPath, dest); 50 | await Deno.remove(tmpDir, { recursive: true }); 51 | } 52 | -------------------------------------------------------------------------------- /utils/uint8.ts: -------------------------------------------------------------------------------- 1 | import type { Reader } from "../deps.ts"; 2 | 3 | /** 4 | * Reads a message from the provided Deno.Reader instance. 5 | * @param reader - The Deno.Reader instance to read from. 6 | * @returns A Promise that resolves to a Uint8Array or null if no more data is available. 7 | */ 8 | export async function readMsg(reader: Reader): Promise { 9 | const arr: Uint8Array[] = []; 10 | const n = 100; 11 | let readed: number | null; 12 | while (true) { 13 | const p: Uint8Array = new Uint8Array(n); 14 | readed = await reader.read(p); 15 | if (readed === null) break; 16 | if (readed < n) { 17 | arr.push(p.subarray(0, readed)); 18 | break; 19 | } else { 20 | arr.push(p); 21 | } 22 | } 23 | if (readed === null) return null; 24 | const result = concatUint8Array(arr); 25 | return result; 26 | } 27 | 28 | /** 29 | * Concatenates multiple Uint8Array instances into a single Uint8Array. 30 | * @param arr - An array of Uint8Array instances to concatenate. 31 | * @returns A single Uint8Array containing all the concatenated data. 32 | */ 33 | export function concatUint8Array(arr: Uint8Array[]): Uint8Array { 34 | const length = arr.reduce((pre, next) => pre + next.length, 0); 35 | const result = new Uint8Array(length); 36 | let offset = 0; 37 | for (const v of arr) { 38 | result.set(v, offset); 39 | offset += v.length; 40 | } 41 | return result; 42 | } 43 | -------------------------------------------------------------------------------- /zip/mod.ts: -------------------------------------------------------------------------------- 1 | import type { compressInterface, uncompressInterface } from "../interface.ts"; 2 | import { path } from "../deps.ts"; 3 | import { 4 | type EntryMetaData, 5 | terminateWorkers, 6 | ZipReaderStream, 7 | ZipWriter, 8 | } from "../deps.ts"; 9 | 10 | /** 11 | * Uncompresses a file. 12 | * @param {string} src - Source file path. 13 | * @param {string} dest - Destination file path. 14 | * @param {uncompressInterface} [options] - Optional parameters. 15 | */ 16 | export async function uncompress( 17 | src: string, 18 | dest: string, 19 | options?: uncompressInterface, 20 | ): Promise { 21 | const stat = await Deno.stat(src); 22 | if(stat.isDirectory) { 23 | throw new Error("The source path is a directory, not a file: ${src}") 24 | } 25 | using srcFile = await Deno.open(src); 26 | for await ( 27 | const entry of srcFile.readable.pipeThrough(new ZipReaderStream()) 28 | ) { 29 | const filePath = path.resolve(dest, entry.filename); 30 | if (options?.debug) console.log(filePath); 31 | await Deno.mkdir(path.dirname(filePath), { recursive: true }); 32 | if (entry.directory) continue; 33 | await entry.readable?.pipeTo((await Deno.create(filePath)).writable); 34 | } 35 | await terminateWorkers(); 36 | } 37 | 38 | /** 39 | * Compresses a file. 40 | * @param {string} src - Source file path. 41 | * @param {string} dest - Destination file path. 42 | * @param {compressInterface} [options] - Optional parameters. 43 | */ 44 | export async function compress( 45 | src: string, 46 | dest: string, 47 | options?: compressInterface, 48 | ): Promise { 49 | const stat = await Deno.stat(src); 50 | const zipWriter = new ZipWriter((await Deno.create(dest)).writable); 51 | const inputs: Promise[] = []; 52 | 53 | if (stat.isFile) { 54 | inputs.push( 55 | zipWriter.add(path.basename(src), (await Deno.open(src)).readable, { 56 | directory: false, 57 | uncompressedSize: stat.size, 58 | }), 59 | ); 60 | if (options?.debug) console.log(path.resolve(src)); 61 | } else { 62 | const appendFolder = async (folder: string, prefix?: string) => { 63 | let nowLoopList: string[][] = [[folder, prefix || ""]]; 64 | let nextLoopList: string[][] = []; 65 | 66 | while (nowLoopList.length > 0) { 67 | for (const [folder, prefix] of nowLoopList) { 68 | for await (const entry of Deno.readDir(folder)) { 69 | const { isDirectory, name } = entry; 70 | const fileName = prefix ? `${prefix}/${name}` : name; 71 | const filePath = path.resolve(folder, name); 72 | if (options?.debug) console.log(path.resolve(filePath)); 73 | const stat = await Deno.stat(filePath); 74 | if (isDirectory) { 75 | inputs.push(zipWriter.add(`${fileName}/`, undefined, { 76 | directory: true, 77 | })); 78 | nextLoopList.push([filePath, fileName]); 79 | } else { 80 | inputs.push( 81 | zipWriter.add(fileName, (await Deno.open(filePath)).readable, { 82 | directory: false, 83 | uncompressedSize: stat.size, 84 | }), 85 | ); 86 | } 87 | } 88 | } 89 | nowLoopList = nextLoopList; 90 | nextLoopList = []; 91 | } 92 | }; 93 | if (options?.excludeSrc) { 94 | await appendFolder(src); 95 | } else { 96 | const folderName = path.basename(src); 97 | inputs.push(zipWriter.add(path.basename(`${folderName}/`), undefined, { 98 | directory: true, 99 | })); 100 | if (options?.debug) console.log(path.resolve(src)); 101 | await appendFolder(src, folderName); 102 | } 103 | } 104 | 105 | try { 106 | await Promise.all(inputs); 107 | await zipWriter.close(); 108 | // print progress 109 | // await zipWriter.close(undefined, { 110 | // onprogress: (progress, total, entry) => { 111 | // console.log(progress, total, entry) 112 | // return undefined 113 | // } 114 | // }); 115 | } finally { 116 | await terminateWorkers(); 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /zip_archive/archive.ts: -------------------------------------------------------------------------------- 1 | import type { compressInterface, uncompressInterface } from "../interface.ts"; 2 | import { path } from "../deps.ts"; 3 | import { 4 | terminateWorkers, 5 | ZipReader, 6 | ZipWriterStream, 7 | } from "../deps.ts"; 8 | 9 | /** 10 | * Uncompresses a file. 11 | * @param {string} src - Source file path. 12 | * @param {string} dest - Destination file path. 13 | * @param {uncompressInterface} [options] - Optional parameters. 14 | */ 15 | export async function uncompress( 16 | src: string, 17 | dest: string, 18 | options?: uncompressInterface, 19 | ): Promise { 20 | const stat = await Deno.stat(src); 21 | if(stat.isDirectory) { 22 | throw new Error("The source path is a directory, not a file: ${src}") 23 | } 24 | using srcFile = await Deno.open(src); 25 | const zipReader = new ZipReader(srcFile); 26 | try { 27 | const entries = await zipReader.getEntries(); 28 | for (const entry of entries) { 29 | const filePath = path.resolve(dest, entry.filename); 30 | if (options?.debug) console.log(filePath); 31 | await Deno.mkdir(path.dirname(filePath), { recursive: true }); 32 | if (entry.directory || !entry.getData) continue; 33 | await entry.getData((await Deno.create(filePath)).writable); 34 | } 35 | } finally { 36 | await zipReader.close(); 37 | await terminateWorkers(); 38 | } 39 | } 40 | 41 | /** 42 | * Compresses a file. 43 | * @param {string} src - Source file path. 44 | * @param {string} dest - Destination file path. 45 | * @param {compressInterface} [options] - Optional parameters. 46 | */ 47 | export async function compress( 48 | src: string, 49 | dest: string, 50 | options?: compressInterface, 51 | ): Promise { 52 | const stat = await Deno.stat(src); 53 | const zipper = new ZipWriterStream(); 54 | zipper.readable.pipeTo((await Deno.create(dest)).writable); 55 | 56 | if (stat.isFile) { 57 | (await Deno.open(src)).readable.pipeTo( 58 | zipper.writable(path.basename(src)), 59 | ); 60 | if (options?.debug) console.log(path.resolve(src)); 61 | } else { 62 | const appendFolder = async (folder: string, prefix?: string) => { 63 | let nowLoopList: string[][] = [[folder, prefix || ""]]; 64 | let nextLoopList: string[][] = []; 65 | 66 | while (nowLoopList.length > 0) { 67 | for (const [folder, prefix] of nowLoopList) { 68 | for await (const entry of Deno.readDir(folder)) { 69 | const { isDirectory, name } = entry; 70 | const fileName = prefix ? `${prefix}/${name}` : name; 71 | const filePath = path.resolve(folder, name); 72 | if (options?.debug) console.log(path.resolve(filePath)); 73 | if (isDirectory) { 74 | emptyReadableStream().pipeTo( 75 | zipper.writable(`${fileName}/`), 76 | ); 77 | nextLoopList.push([filePath, fileName]); 78 | } else { 79 | (await Deno.open(filePath)).readable.pipeTo( 80 | zipper.writable(fileName), 81 | ); 82 | } 83 | } 84 | } 85 | nowLoopList = nextLoopList; 86 | nextLoopList = []; 87 | } 88 | }; 89 | if (options?.excludeSrc) { 90 | await appendFolder(src); 91 | } else { 92 | const folderName = path.basename(src); 93 | emptyReadableStream().pipeTo(zipper.writable(`${folderName}/`)); 94 | if (options?.debug) console.log(path.resolve(src)); 95 | await appendFolder(src, folderName); 96 | } 97 | } 98 | await zipper.close(); 99 | await terminateWorkers(); 100 | } 101 | 102 | const emptyReadableStream = () => { 103 | return new ReadableStream({ 104 | start(controller) { 105 | controller.close(); 106 | }, 107 | }); 108 | }; 109 | -------------------------------------------------------------------------------- /zlib/deflate.ts: -------------------------------------------------------------------------------- 1 | // from https://github.com/nodeca/pako 2 | import * as zlibDeflate from "./zlib/deflate.ts"; 3 | import { concatUint8Array } from "../utils/uint8.ts"; 4 | import { type CODE, message as msg } from "./zlib/messages.ts"; 5 | import ZStream from "./zlib/zstream.ts"; 6 | import STATUS from "./zlib/status.ts"; 7 | 8 | /** 9 | * Options for the Deflate class. 10 | */ 11 | export interface DeflateOptions { 12 | level?: number; // Compression level (0-9) 13 | method?: number; // Compression method 14 | chunkSize?: number; // Size of each output chunk 15 | windowBits?: number; // Size of the history buffer 16 | memLevel?: number; // Memory level 17 | strategy?: number; // Compression strategy 18 | to?: string; // Output type 19 | raw?: boolean; // Raw deflate (no header) 20 | gzip?: boolean; // Gzip format 21 | dictionary?: Uint8Array; // Dictionary for compression 22 | header?: zlibDeflate.Header; // Custom header 23 | } 24 | 25 | /** 26 | * Required Options for the Deflate class. 27 | */ 28 | interface DeflateOptionsRequired { 29 | level: number; 30 | method: number; 31 | chunkSize: number; 32 | windowBits: number; 33 | memLevel: number; 34 | strategy: number; 35 | to: string; 36 | raw?: boolean; 37 | gzip?: boolean; 38 | dictionary?: Uint8Array; 39 | header?: zlibDeflate.Header; 40 | } 41 | 42 | /** 43 | * Class for handling deflate compression. 44 | */ 45 | export class Deflate { 46 | err: STATUS = 0; // error code, if happens (0 = Z_OK) 47 | msg = ""; // error message 48 | ended = false; // used to avoid multiple onEnd() calls 49 | strm: ZStream; 50 | _dict_set = false; 51 | options: DeflateOptionsRequired; 52 | 53 | constructor(options: DeflateOptions = {}) { 54 | this.options = { 55 | level: STATUS.Z_DEFAULT_COMPRESSION, 56 | method: STATUS.Z_DEFLATED, 57 | chunkSize: 16384, 58 | windowBits: 15, 59 | memLevel: 8, 60 | strategy: STATUS.Z_DEFAULT_STRATEGY, 61 | to: "", 62 | ...options, 63 | }; 64 | 65 | const opt = this.options; 66 | 67 | if (opt.raw && (opt.windowBits > 0)) { 68 | opt.windowBits = -opt.windowBits; 69 | } else if (opt.gzip && (opt.windowBits > 0) && (opt.windowBits < 16)) { 70 | opt.windowBits += 16; 71 | } 72 | 73 | this.strm = new ZStream(); 74 | this.strm.avail_out = 0; 75 | 76 | let status = zlibDeflate.deflateInit2( 77 | this.strm, 78 | opt.level, 79 | opt.method, 80 | opt.windowBits, 81 | opt.memLevel, 82 | opt.strategy, 83 | ); 84 | 85 | if (status !== STATUS.Z_OK) { 86 | throw new Error(msg[status]); 87 | } 88 | 89 | if (opt.header) { 90 | zlibDeflate.deflateSetHeader(this.strm, opt.header); 91 | } 92 | 93 | if (opt.dictionary) { 94 | status = zlibDeflate.deflateSetDictionary(this.strm, opt.dictionary); 95 | 96 | if (status !== STATUS.Z_OK) { 97 | throw new Error(msg[status]); 98 | } 99 | 100 | this._dict_set = true; 101 | } 102 | } 103 | 104 | /** 105 | * Pushes data to be compressed. 106 | * @param data - The data to compress. 107 | * @param mode - The compression mode. 108 | * @returns The compressed data as a Uint8Array. 109 | */ 110 | push(data: Uint8Array, mode: boolean | number): Uint8Array { 111 | const strm = this.strm; 112 | const chunkSize = this.options.chunkSize; 113 | const chunks: Uint8Array[] = []; 114 | let status; 115 | 116 | if (this.ended) { 117 | throw new Error("can not call after ended"); 118 | } 119 | 120 | const _mode = mode === ~~mode 121 | ? mode 122 | : (mode === true ? STATUS.Z_FINISH : STATUS.Z_NO_FLUSH); 123 | 124 | strm.input = data; 125 | strm.next_in = 0; 126 | strm.avail_in = strm.input.length; 127 | 128 | do { 129 | if (strm.avail_out === 0) { 130 | strm.output = new Uint8Array(chunkSize); 131 | strm.next_out = 0; 132 | strm.avail_out = chunkSize; 133 | } 134 | status = zlibDeflate.deflate(strm, _mode); /* no bad return value */ 135 | 136 | if (status !== STATUS.Z_STREAM_END && status !== STATUS.Z_OK) { 137 | this.ended = true; 138 | throw new Error(this.strm.msg); 139 | } 140 | if ( 141 | strm.avail_out === 0 || 142 | (strm.avail_in === 0 && 143 | (_mode === STATUS.Z_FINISH || _mode === STATUS.Z_SYNC_FLUSH)) 144 | ) { 145 | chunks.push(strm.output!.subarray(0, strm.next_out)); 146 | } 147 | } while ( 148 | (strm.avail_in > 0 || strm.avail_out === 0) && 149 | status !== STATUS.Z_STREAM_END 150 | ); 151 | 152 | // Finalize on the last chunk. 153 | if (_mode === STATUS.Z_FINISH) { 154 | status = zlibDeflate.deflateEnd(this.strm); 155 | this.ended = true; 156 | if (status !== STATUS.Z_OK) throw new Error(this.strm.msg); 157 | } 158 | 159 | // callback interim results if Z_SYNC_FLUSH. 160 | if (_mode === STATUS.Z_SYNC_FLUSH) { 161 | strm.avail_out = 0; 162 | } 163 | 164 | return concatUint8Array(chunks); 165 | } 166 | } 167 | 168 | /** 169 | * Compresses the input data using deflate algorithm. 170 | * @param input - The data to compress. 171 | * @param options - Options for the deflate operation. 172 | * @returns The compressed data as a Uint8Array. 173 | */ 174 | export function deflate( 175 | input: Uint8Array, 176 | options: DeflateOptions = {}, 177 | ): Uint8Array { 178 | const deflator = new Deflate(options); 179 | const result = deflator.push(input, true); 180 | // That will never happens, if you don't cheat with options :) 181 | if (deflator.err) throw deflator.msg || msg[deflator.err as CODE]; 182 | return result; 183 | } 184 | 185 | /** 186 | * Compresses the input data using raw deflate algorithm. 187 | * @param input - The data to compress. 188 | * @param options - Options for the deflate operation. 189 | * @returns The compressed data as a Uint8Array. 190 | */ 191 | export function deflateRaw( 192 | input: Uint8Array, 193 | options: DeflateOptions = {}, 194 | ): Uint8Array { 195 | options.raw = true; 196 | return deflate(input, options); 197 | } 198 | 199 | /** 200 | * Compresses the input data using gzip format. 201 | * @param input - The data to compress. 202 | * @param options - Options for the deflate operation. 203 | * @returns The compressed data as a Uint8Array. 204 | */ 205 | export function gzip( 206 | input: Uint8Array, 207 | options: DeflateOptions = {}, 208 | ): Uint8Array { 209 | options.gzip = true; 210 | return deflate(input, options); 211 | } 212 | -------------------------------------------------------------------------------- /zlib/inflate.ts: -------------------------------------------------------------------------------- 1 | // from https://github.com/nodeca/pako 2 | import { concatUint8Array } from "../utils/uint8.ts"; 3 | import * as zlibInflate from "./zlib/inflate.ts"; 4 | import STATUS from "./zlib/status.ts"; 5 | import { type CODE, message as msg } from "./zlib/messages.ts"; 6 | import ZStream from "./zlib/zstream.ts"; 7 | import GZheader from "./zlib/gzheader.ts"; 8 | 9 | /** 10 | * Options for the Inflate class. 11 | */ 12 | export interface InflateOptions { 13 | windowBits?: number; // Size of the window for compression 14 | dictionary?: Uint8Array; // Dictionary for decompression 15 | chunkSize?: number; // Size of chunks to process 16 | to?: string; // Output format 17 | raw?: boolean; // Indicates if raw inflation is required 18 | } 19 | 20 | /** 21 | * Required options for the Inflate class. 22 | */ 23 | interface InflateOptionsRequired { 24 | windowBits: number; // Required window size 25 | dictionary?: Uint8Array; // Optional dictionary 26 | chunkSize: number; // Required chunk size 27 | to: string; // Required output format 28 | raw?: boolean; // Indicates if raw inflation is required 29 | } 30 | 31 | /** 32 | * Class for inflating compressed data. 33 | */ 34 | export class Inflate { 35 | err: STATUS = 0; // error code, if happens (0 = Z_OK) 36 | msg = ""; // error message 37 | ended = false; // used to avoid multiple onEnd() calls 38 | strm: ZStream; 39 | options: InflateOptionsRequired; 40 | header: GZheader; 41 | 42 | /** 43 | * Creates an instance of Inflate. 44 | * @param options - Options for the inflation process. 45 | */ 46 | constructor(options: InflateOptions) { 47 | this.options = { 48 | chunkSize: 16384, 49 | windowBits: 0, 50 | to: "", 51 | ...options, 52 | }; 53 | 54 | const opt = this.options; 55 | 56 | // Force window size for `raw` data, if not set directly, 57 | // because we have no header for autodetect. 58 | if (opt.raw && (opt.windowBits >= 0) && (opt.windowBits < 16)) { 59 | opt.windowBits = -opt.windowBits; 60 | if (opt.windowBits === 0) opt.windowBits = -15; 61 | } 62 | 63 | // If `windowBits` not defined (and mode not raw) - set autodetect flag for gzip/deflate 64 | if ( 65 | (opt.windowBits >= 0) && (opt.windowBits < 16) && 66 | !(options && options.windowBits) 67 | ) { 68 | opt.windowBits += 32; 69 | } 70 | 71 | // Gzip header has no info about windows size, we can do autodetect only 72 | // for deflate. So, if window size not set, force it to max when gzip possible 73 | if ((opt.windowBits > 15) && (opt.windowBits < 48)) { 74 | // bit 3 (16) -> gzipped data 75 | // bit 4 (32) -> autodetect gzip/deflate 76 | if ((opt.windowBits & 15) === 0) { 77 | opt.windowBits |= 15; 78 | } 79 | } 80 | 81 | this.strm = new ZStream(); 82 | this.strm.avail_out = 0; 83 | 84 | let status = zlibInflate.inflateInit2( 85 | this.strm, 86 | opt.windowBits, 87 | ); 88 | 89 | if (status !== STATUS.Z_OK) { 90 | throw new Error(msg[status as CODE]); 91 | } 92 | 93 | this.header = new GZheader(); 94 | zlibInflate.inflateGetHeader(this.strm, this.header); 95 | 96 | // Setup dictionary 97 | if (opt.dictionary) { 98 | if (opt.raw) { //In raw mode we need to set the dictionary early 99 | status = zlibInflate.inflateSetDictionary(this.strm, opt.dictionary); 100 | if (status !== STATUS.Z_OK) { 101 | throw new Error(msg[status as CODE]); 102 | } 103 | } 104 | } 105 | } 106 | 107 | /** 108 | * Pushes data to be inflated. 109 | * @param data - The compressed data to inflate. 110 | * @param mode - The mode for inflation (finish or no flush). 111 | * @returns The inflated data as a Uint8Array. 112 | */ 113 | push(data: Uint8Array, mode: boolean | number): Uint8Array { 114 | const strm = this.strm; 115 | const chunkSize = this.options.chunkSize; 116 | const dictionary = this.options.dictionary; 117 | const chunks: Uint8Array[] = []; 118 | let status; 119 | 120 | // Flag to properly process Z_BUF_ERROR on testing inflate call 121 | // when we check that all output data was flushed. 122 | let allowBufError = false; 123 | 124 | if (this.ended) { 125 | throw new Error("can not call after ended"); 126 | } 127 | 128 | let _mode = (mode === ~~mode) 129 | ? mode 130 | : ((mode === true) ? STATUS.Z_FINISH : STATUS.Z_NO_FLUSH); 131 | 132 | strm.input = data; 133 | strm.next_in = 0; 134 | strm.avail_in = strm.input.length; 135 | 136 | do { 137 | if (strm.avail_out === 0) { 138 | strm.output = new Uint8Array(chunkSize); 139 | strm.next_out = 0; 140 | strm.avail_out = chunkSize; 141 | } 142 | 143 | status = zlibInflate.inflate( 144 | strm, 145 | STATUS.Z_NO_FLUSH, 146 | ); /* no bad return value */ 147 | 148 | if (status === STATUS.Z_NEED_DICT && dictionary) { 149 | status = zlibInflate.inflateSetDictionary(this.strm, dictionary); 150 | } 151 | 152 | if (status === STATUS.Z_BUF_ERROR && allowBufError === true) { 153 | status = STATUS.Z_OK; 154 | allowBufError = false; 155 | } 156 | 157 | if (status !== STATUS.Z_STREAM_END && status !== STATUS.Z_OK) { 158 | this.ended = true; 159 | throw new Error(this.strm.msg); 160 | } 161 | 162 | if (strm.next_out) { 163 | if ( 164 | strm.avail_out === 0 || status === STATUS.Z_STREAM_END || 165 | (strm.avail_in === 0 && 166 | (_mode === STATUS.Z_FINISH || _mode === STATUS.Z_SYNC_FLUSH)) 167 | ) { 168 | chunks.push(strm.output!.subarray(0, strm.next_out)); 169 | } 170 | } 171 | 172 | // When no more input data, we should check that internal inflate buffers 173 | // are flushed. The only way to do it when avail_out = 0 - run one more 174 | // inflate pass. But if output data not exists, inflate return Z_BUF_ERROR. 175 | // Here we set flag to process this error properly. 176 | // 177 | // NOTE. Deflate does not return error in this case and does not needs such 178 | // logic. 179 | if (strm.avail_in === 0 && strm.avail_out === 0) { 180 | allowBufError = true; 181 | } 182 | } while ( 183 | (strm.avail_in > 0 || strm.avail_out === 0) && 184 | status !== STATUS.Z_STREAM_END 185 | ); 186 | 187 | if (status === STATUS.Z_STREAM_END) { 188 | _mode = STATUS.Z_FINISH; 189 | } 190 | 191 | // Finalize on the last chunk. 192 | if (_mode === STATUS.Z_FINISH) { 193 | status = zlibInflate.inflateEnd(this.strm); 194 | this.ended = true; 195 | if (status !== STATUS.Z_OK) throw new Error(this.strm.msg); 196 | } 197 | 198 | // callback interim results if Z_SYNC_FLUSH. 199 | if (_mode === STATUS.Z_SYNC_FLUSH) { 200 | strm.avail_out = 0; 201 | } 202 | 203 | return concatUint8Array(chunks); 204 | } 205 | } 206 | 207 | /** 208 | * Inflates the input data with the given options. 209 | * @param input - The compressed data to inflate. 210 | * @param options - Options for the inflation process. 211 | * @returns The inflated data as a Uint8Array. 212 | */ 213 | export function inflate( 214 | input: Uint8Array, 215 | options: InflateOptions = {}, 216 | ): Uint8Array { 217 | const inflator = new Inflate(options); 218 | const result = inflator.push(input, true); 219 | // That will never happens, if you don't cheat with options :) 220 | if (inflator.err) throw inflator.msg || msg[inflator.err as CODE]; 221 | return result; 222 | } 223 | 224 | /** 225 | * Inflates raw compressed data with the given options. 226 | * @param input - The raw compressed data to inflate. 227 | * @param options - Options for the inflation process. 228 | * @returns The inflated data as a Uint8Array. 229 | */ 230 | export function inflateRaw( 231 | input: Uint8Array, 232 | options: InflateOptions = {}, 233 | ): Uint8Array { 234 | options.raw = true; 235 | return inflate(input, options); 236 | } 237 | 238 | /** 239 | * Alias for the inflate function, specifically for gzip. 240 | */ 241 | export const gunzip = inflate; 242 | -------------------------------------------------------------------------------- /zlib/mod.ts: -------------------------------------------------------------------------------- 1 | export * from "./deflate.ts"; 2 | export * from "./inflate.ts"; 3 | -------------------------------------------------------------------------------- /zlib/zlib/adler32.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Computes the Adler-32 checksum. 3 | * 4 | * The Adler-32 is a checksum algorithm which may be used to validate data integrity. 5 | * This algorithm divides the input data into two parts for calculation, resulting in a final 32-bit checksum. 6 | * 7 | * @param {number} adler - The initial Adler-32 value, usually set to 1. 8 | * @param {Uint8Array} buf - The buffer containing the data to be processed. 9 | * @param {number} len - The number of bytes from the buffer to process. 10 | * @param {number} pos - The starting position in the buffer. 11 | * @returns {number} The computed Adler-32 checksum. 12 | */ 13 | export default function adler32(adler: number, buf: Uint8Array, len: number, pos: number) { 14 | let s1 = (adler & 0xffff) | 0; 15 | let s2 = ((adler >>> 16) & 0xffff) | 0; 16 | let n = 0; 17 | 18 | while (len !== 0) { 19 | // Set limit ~ twice less than 5552, to keep 20 | // s2 in 31-bits, because we force signed ints. 21 | // in other case %= will fail. 22 | n = len > 2000 ? 2000 : len; 23 | len -= n; 24 | 25 | do { 26 | s1 = (s1 + buf[pos++]) | 0; 27 | s2 = (s2 + s1) | 0; 28 | } while (--n); 29 | 30 | s1 %= 65521; 31 | s2 %= 65521; 32 | } 33 | 34 | return (s1 | (s2 << 16)) | 0; 35 | } 36 | -------------------------------------------------------------------------------- /zlib/zlib/crc32.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Generates a lookup table for the CRC-32 algorithm. 3 | * 4 | * This function creates a table that is used to speed up the computation of CRC-32 checksums. 5 | * The table is precomputed using the polynomial `0xEDB88320`, which is a common choice for CRC-32. 6 | * 7 | * @returns {number[]} An array of 256 32-bit integers representing the lookup table. 8 | */ 9 | export function makeTable() { 10 | let c: number; 11 | const table = []; 12 | const m = 0xEDB88320; 13 | 14 | for (let n = 0; n < 256; n++) { 15 | c = n; 16 | for (let k = 0; k < 8; k++) { 17 | c = ((c & 1) ? (m ^ (c >>> 1)) : (c >>> 1)); 18 | } 19 | table[n] = c; 20 | } 21 | 22 | return table; 23 | } 24 | 25 | // Create table on load. Just 255 signed longs. Not a problem. 26 | const crcTable = makeTable(); 27 | 28 | /** 29 | * Computes the CRC-32 checksum for a given buffer. 30 | * 31 | * This function calculates the CRC-32 checksum using a precomputed lookup table. 32 | * The initial CRC value is XORed with -1, and the result is XORed again with -1 before returning. 33 | * This is a common practice to ensure the checksum is consistent with standard CRC-32 implementations. 34 | * 35 | * @param {number} crc - The initial CRC-32 value, usually set to 0. 36 | * @param {Uint8Array} buf - The buffer containing the data to be processed. 37 | * @param {number} len - The number of bytes from the buffer to process. 38 | * @param {number} pos - The starting position in the buffer. 39 | * @returns {number} The computed CRC-32 checksum. 40 | */ 41 | export function crc32(crc: number, buf: Uint8Array, len: number, pos: number) { 42 | const t = crcTable; 43 | const end = pos + len; 44 | const f = 0xFF; 45 | 46 | crc ^= -1; 47 | 48 | for (let i = pos; i < end; i++) { 49 | crc = (crc >>> 8) ^ t[(crc ^ buf[i]) & f]; 50 | } 51 | 52 | return (crc ^ (-1)); // >>> 0; 53 | } 54 | -------------------------------------------------------------------------------- /zlib/zlib/gzheader.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Represents the gzip header structure. 3 | * 4 | * This class encapsulates the fields and properties of a gzip header, which are used during the gzip compression and decompression processes. 5 | * It includes various metadata fields such as text flag, modification time, extra flags, operating system, extra field, file name, comment, header CRC, and completion status. 6 | */ 7 | export default class GZheader { 8 | /* true if compressed data believed to be text */ 9 | text: number = 0; 10 | /* modification time */ 11 | time: number = 0; 12 | /* extra flags (not used when writing a gzip file) */ 13 | xflags: number = 0; 14 | /* operating system */ 15 | os: number = 0; 16 | /* pointer to extra field or Z_NULL if none */ 17 | extra: Uint8Array | null = null; 18 | /* extra field length (valid if extra != Z_NULL) */ 19 | extra_len: number = 0; // Actually, we don't need it in JS, 20 | // but leave for few code modifications 21 | 22 | // 23 | // Setup limits is not necessary because in js we should not preallocate memory 24 | // for inflate use constant limit in 65536 bytes 25 | // 26 | 27 | /* space at extra (only when reading header) */ 28 | // extra_max = 0; 29 | /* pointer to zero-terminated file name or Z_NULL */ 30 | name: string | null = ""; 31 | /* space at name (only when reading header) */ 32 | // name_max = 0; 33 | /* pointer to zero-terminated comment or Z_NULL */ 34 | comment: string | null = ""; 35 | /* space at comment (only when reading header) */ 36 | // comm_max = 0; 37 | /* true if there was or will be a header crc */ 38 | hcrc: number = 0; 39 | /* true when done reading gzip header (not used when writing a gzip file) */ 40 | done: boolean = false; 41 | } 42 | -------------------------------------------------------------------------------- /zlib/zlib/inffast.ts: -------------------------------------------------------------------------------- 1 | import type { InflateState } from "./inflate.ts"; 2 | import type ZStream from "./zstream.ts"; 3 | 4 | // See state defs from inflate.js 5 | const BAD = 30; /* got a data error -- remain here until reset */ 6 | const TYPE = 12; /* i: waiting for type bits, including last-flag bit */ 7 | 8 | /* 9 | Decode literal, length, and distance codes and write out the resulting 10 | literal and match bytes until either not enough input or output is 11 | available, an end-of-block is encountered, or a data error is encountered. 12 | When large enough input and output buffers are supplied to inflate(), for 13 | example, a 16K input buffer and a 64K output buffer, more than 95% of the 14 | inflate execution time is spent in this routine. 15 | 16 | Entry assumptions: 17 | 18 | state.mode === LEN 19 | strm.avail_in >= 6 20 | strm.avail_out >= 258 21 | start >= strm.avail_out 22 | state.bits < 8 23 | 24 | On return, state.mode is one of: 25 | 26 | LEN -- ran out of enough output space or enough available input 27 | TYPE -- reached end of block code, inflate() to interpret next block 28 | BAD -- error in block data 29 | 30 | Notes: 31 | 32 | - The maximum input bits used by a length/distance pair is 15 bits for the 33 | length code, 5 bits for the length extra, 15 bits for the distance code, 34 | and 13 bits for the distance extra. This totals 48 bits, or six bytes. 35 | Therefore if strm.avail_in >= 6, then there is enough input to avoid 36 | checking for available input while decoding. 37 | 38 | - The maximum bytes that a single length/distance pair can output is 258 39 | bytes, which is the maximum length that can be coded. inflate_fast() 40 | requires strm.avail_out >= 258 for each loop to avoid checking for 41 | output space. 42 | */ 43 | export default function inflate_fast(strm: ZStream, start: number) { 44 | let _in; /* local strm.input */ 45 | let _out; /* local strm.output */ 46 | // Use `s_window` instead `window`, avoid conflict with instrumentation tools 47 | let hold; /* local strm.hold */ 48 | let bits; /* local strm.bits */ 49 | let here; /* retrieved table entry */ 50 | let op; /* code bits, operation, extra bits, or */ 51 | /* window position, window bytes to copy */ 52 | let len; /* match length, unused bytes */ 53 | let dist; /* match distance */ 54 | let from; /* where to copy match from */ 55 | let from_source; 56 | 57 | /* copy state to local variables */ 58 | const state = strm.state as InflateState; 59 | //here = state.here; 60 | _in = strm.next_in; 61 | const input = strm.input!; // JS specific, because we have no pointers 62 | const last = _in + (strm.avail_in - 5); /* have enough input while in < last */ 63 | _out = strm.next_out; 64 | const output = strm.output!; // JS specific, because we have no pointers 65 | const beg = _out - (start - strm.avail_out); /* inflate()'s initial strm.output */ 66 | const end = _out + (strm.avail_out - 257); /* while out < end, enough space available */ 67 | //#ifdef INFLATE_STRICT 68 | const dmax = state.dmax; /* maximum distance from zlib header */ 69 | //#endif 70 | const wsize = state.wsize; /* window size or zero if not using window */ 71 | const whave = state.whave; /* valid bytes in the window */ 72 | const wnext = state.wnext; /* window write index */ 73 | const s_window = state.window!; /* allocated sliding window, if wsize != 0 */ 74 | hold = state.hold; 75 | bits = state.bits; 76 | const lcode = state.lencode!; /* local strm.lencode */ 77 | const dcode = state.distcode!; /* local strm.distcode */ 78 | const lmask = (1 << state.lenbits) - 1; /* mask for first level of length codes */ 79 | const dmask = (1 << state.distbits) - 1; /* mask for first level of distance codes */ 80 | 81 | /* decode literals and length/distances until end-of-block or not enough 82 | input data or output space */ 83 | 84 | top: 85 | do { 86 | if (bits < 15) { 87 | hold += input[_in++] << bits; 88 | bits += 8; 89 | hold += input[_in++] << bits; 90 | bits += 8; 91 | } 92 | 93 | here = lcode[hold & lmask]; 94 | 95 | dolen: 96 | for (;;) { // Goto emulation 97 | op = here >>> 24 /*here.bits*/; 98 | hold >>>= op; 99 | bits -= op; 100 | op = (here >>> 16) & 0xff /*here.op*/; 101 | if (op === 0) { 102 | /* literal */ 103 | //Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? 104 | // "inflate: literal '%c'\n" : 105 | // "inflate: literal 0x%02x\n", here.val)); 106 | output[_out++] = here & 0xffff /*here.val*/; 107 | } else if (op & 16) { 108 | /* length base */ 109 | len = here & 0xffff /*here.val*/; 110 | op &= 15; /* number of extra bits */ 111 | if (op) { 112 | if (bits < op) { 113 | hold += input[_in++] << bits; 114 | bits += 8; 115 | } 116 | len += hold & ((1 << op) - 1); 117 | hold >>>= op; 118 | bits -= op; 119 | } 120 | //Tracevv((stderr, "inflate: length %u\n", len)); 121 | if (bits < 15) { 122 | hold += input[_in++] << bits; 123 | bits += 8; 124 | hold += input[_in++] << bits; 125 | bits += 8; 126 | } 127 | here = dcode[hold & dmask]; 128 | 129 | dodist: 130 | for (;;) { // goto emulation 131 | op = here >>> 24 /*here.bits*/; 132 | hold >>>= op; 133 | bits -= op; 134 | op = (here >>> 16) & 0xff /*here.op*/; 135 | 136 | if (op & 16) { 137 | /* distance base */ 138 | dist = here & 0xffff /*here.val*/; 139 | op &= 15; /* number of extra bits */ 140 | if (bits < op) { 141 | hold += input[_in++] << bits; 142 | bits += 8; 143 | if (bits < op) { 144 | hold += input[_in++] << bits; 145 | bits += 8; 146 | } 147 | } 148 | dist += hold & ((1 << op) - 1); 149 | //#ifdef INFLATE_STRICT 150 | if (dist > dmax) { 151 | strm.msg = "invalid distance too far back"; 152 | state.mode = BAD; 153 | break top; 154 | } 155 | //#endif 156 | hold >>>= op; 157 | bits -= op; 158 | //Tracevv((stderr, "inflate: distance %u\n", dist)); 159 | op = _out - beg; /* max distance in output */ 160 | if (dist > op) { 161 | /* see if copy from window */ 162 | op = dist - op; /* distance back in window */ 163 | if (op > whave) { 164 | if (state.sane) { 165 | strm.msg = "invalid distance too far back"; 166 | state.mode = BAD; 167 | break top; 168 | } 169 | 170 | // (!) This block is disabled in zlib defaults, 171 | // don't enable it for binary compatibility 172 | //#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR 173 | // if (len <= op - whave) { 174 | // do { 175 | // output[_out++] = 0; 176 | // } while (--len); 177 | // continue top; 178 | // } 179 | // len -= op - whave; 180 | // do { 181 | // output[_out++] = 0; 182 | // } while (--op > whave); 183 | // if (op === 0) { 184 | // from = _out - dist; 185 | // do { 186 | // output[_out++] = output[from++]; 187 | // } while (--len); 188 | // continue top; 189 | // } 190 | //#endif 191 | } 192 | from = 0; // window index 193 | from_source = s_window; 194 | if (wnext === 0) { 195 | /* very common case */ 196 | from += wsize - op; 197 | if (op < len) { 198 | /* some from window */ 199 | len -= op; 200 | do { 201 | output[_out++] = s_window[from++]; 202 | } while (--op); 203 | from = _out - dist; /* rest from output */ 204 | from_source = output; 205 | } 206 | } else if (wnext < op) { 207 | /* wrap around window */ 208 | from += wsize + wnext - op; 209 | op -= wnext; 210 | if (op < len) { 211 | /* some from end of window */ 212 | len -= op; 213 | do { 214 | output[_out++] = s_window[from++]; 215 | } while (--op); 216 | from = 0; 217 | if (wnext < len) { 218 | /* some from start of window */ 219 | op = wnext; 220 | len -= op; 221 | do { 222 | output[_out++] = s_window[from++]; 223 | } while (--op); 224 | from = _out - dist; /* rest from output */ 225 | from_source = output; 226 | } 227 | } 228 | } else { 229 | /* contiguous in window */ 230 | from += wnext - op; 231 | if (op < len) { 232 | /* some from window */ 233 | len -= op; 234 | do { 235 | output[_out++] = s_window[from++]; 236 | } while (--op); 237 | from = _out - dist; /* rest from output */ 238 | from_source = output; 239 | } 240 | } 241 | while (len > 2) { 242 | output[_out++] = from_source[from++]; 243 | output[_out++] = from_source[from++]; 244 | output[_out++] = from_source[from++]; 245 | len -= 3; 246 | } 247 | if (len) { 248 | output[_out++] = from_source[from++]; 249 | if (len > 1) { 250 | output[_out++] = from_source[from++]; 251 | } 252 | } 253 | } else { 254 | from = _out - dist; /* copy direct from output */ 255 | do { 256 | /* minimum length is three */ 257 | output[_out++] = output[from++]; 258 | output[_out++] = output[from++]; 259 | output[_out++] = output[from++]; 260 | len -= 3; 261 | } while (len > 2); 262 | if (len) { 263 | output[_out++] = output[from++]; 264 | if (len > 1) { 265 | output[_out++] = output[from++]; 266 | } 267 | } 268 | } 269 | } else if ((op & 64) === 0) { 270 | /* 2nd level distance code */ 271 | here = 272 | dcode[(here & 0xffff) /*here.val*/ + (hold & ((1 << op) - 1))]; 273 | continue dodist; 274 | } else { 275 | strm.msg = "invalid distance code"; 276 | state.mode = BAD; 277 | break top; 278 | } 279 | 280 | break; // need to emulate goto via "continue" 281 | } 282 | } else if ((op & 64) === 0) { 283 | /* 2nd level length code */ 284 | here = lcode[(here & 0xffff) /*here.val*/ + (hold & ((1 << op) - 1))]; 285 | continue dolen; 286 | } else if (op & 32) { 287 | /* end-of-block */ 288 | //Tracevv((stderr, "inflate: end of block\n")); 289 | state.mode = TYPE; 290 | break top; 291 | } else { 292 | strm.msg = "invalid literal/length code"; 293 | state.mode = BAD; 294 | break top; 295 | } 296 | 297 | break; // need to emulate goto via "continue" 298 | } 299 | } while (_in < last && _out < end); 300 | 301 | /* return unused bytes (on entry, bits < 8, so in won't go too far back) */ 302 | len = bits >> 3; 303 | _in -= len; 304 | bits -= len << 3; 305 | hold &= (1 << bits) - 1; 306 | 307 | /* update state and return */ 308 | strm.next_in = _in; 309 | strm.next_out = _out; 310 | strm.avail_in = (_in < last ? 5 + (last - _in) : 5 - (_in - last)); 311 | strm.avail_out = (_out < end ? 257 + (end - _out) : 257 - (_out - end)); 312 | state.hold = hold; 313 | state.bits = bits; 314 | return; 315 | } 316 | -------------------------------------------------------------------------------- /zlib/zlib/inftrees.ts: -------------------------------------------------------------------------------- 1 | const MAXBITS = 15; 2 | const ENOUGH_LENS = 852; 3 | const ENOUGH_DISTS = 592; 4 | //const ENOUGH = (ENOUGH_LENS+ENOUGH_DISTS); 5 | 6 | const CODES = 0; 7 | const LENS = 1; 8 | const DISTS = 2; 9 | 10 | const lbase = [ 11 | /* Length codes 257..285 base */ 12 | 3, 13 | 4, 14 | 5, 15 | 6, 16 | 7, 17 | 8, 18 | 9, 19 | 10, 20 | 11, 21 | 13, 22 | 15, 23 | 17, 24 | 19, 25 | 23, 26 | 27, 27 | 31, 28 | 35, 29 | 43, 30 | 51, 31 | 59, 32 | 67, 33 | 83, 34 | 99, 35 | 115, 36 | 131, 37 | 163, 38 | 195, 39 | 227, 40 | 258, 41 | 0, 42 | 0, 43 | ]; 44 | 45 | const lext = [ 46 | /* Length codes 257..285 extra */ 47 | 16, 48 | 16, 49 | 16, 50 | 16, 51 | 16, 52 | 16, 53 | 16, 54 | 16, 55 | 17, 56 | 17, 57 | 17, 58 | 17, 59 | 18, 60 | 18, 61 | 18, 62 | 18, 63 | 19, 64 | 19, 65 | 19, 66 | 19, 67 | 20, 68 | 20, 69 | 20, 70 | 20, 71 | 21, 72 | 21, 73 | 21, 74 | 21, 75 | 16, 76 | 72, 77 | 78, 78 | ]; 79 | 80 | const dbase = [ 81 | /* Distance codes 0..29 base */ 82 | 1, 83 | 2, 84 | 3, 85 | 4, 86 | 5, 87 | 7, 88 | 9, 89 | 13, 90 | 17, 91 | 25, 92 | 33, 93 | 49, 94 | 65, 95 | 97, 96 | 129, 97 | 193, 98 | 257, 99 | 385, 100 | 513, 101 | 769, 102 | 1025, 103 | 1537, 104 | 2049, 105 | 3073, 106 | 4097, 107 | 6145, 108 | 8193, 109 | 12289, 110 | 16385, 111 | 24577, 112 | 0, 113 | 0, 114 | ]; 115 | 116 | const dext = [ 117 | /* Distance codes 0..29 extra */ 118 | 16, 119 | 16, 120 | 16, 121 | 16, 122 | 17, 123 | 17, 124 | 18, 125 | 18, 126 | 19, 127 | 19, 128 | 20, 129 | 20, 130 | 21, 131 | 21, 132 | 22, 133 | 22, 134 | 23, 135 | 23, 136 | 24, 137 | 24, 138 | 25, 139 | 25, 140 | 26, 141 | 26, 142 | 27, 143 | 27, 144 | 28, 145 | 28, 146 | 29, 147 | 29, 148 | 64, 149 | 64, 150 | ]; 151 | 152 | interface inflateTableOption { 153 | bits: number; 154 | //table_index: number; 155 | } 156 | 157 | /** 158 | * Generates a Huffman table for the inflate process. 159 | * 160 | * This function constructs a Huffman table based on the given code lengths and other parameters. It is used during the inflation (decompression) process to decode the compressed data. 161 | * 162 | * @param {number} type - The type of table to generate (e.g., CODES, LENS, DISTS). 163 | * @param {Uint16Array} lens - An array of code lengths. 164 | * @param {number} lens_index - The starting index in the `lens` array. 165 | * @param {number} codes - The number of codes. 166 | * @param {Uint32Array} table - The table to be filled with Huffman codes. 167 | * @param {number} table_index - The starting index in the `table` array. 168 | * @param {Uint16Array} work - A work array used for sorting. 169 | * @param {inflateTableOption} opts - Options for the table generation. 170 | * @returns {number} - Returns `0` if the operation is successful, or an error code if there are issues with the input parameters or the generated table. 171 | */ 172 | export default function inflate_table( 173 | type: number, 174 | lens: Uint16Array, 175 | lens_index: number, 176 | codes: number, 177 | table: Uint32Array, 178 | table_index: number, 179 | work: Uint16Array, 180 | opts: inflateTableOption, 181 | ): number { 182 | const bits = opts.bits; 183 | //here = opts.here; /* table entry for duplication */ 184 | 185 | let len = 0; /* a code's length in bits */ 186 | let sym = 0; /* index of code symbols */ 187 | let min = 0, max = 0; /* minimum and maximum code lengths */ 188 | let root = 0; /* number of index bits for root table */ 189 | let curr = 0; /* number of index bits for current table */ 190 | let drop = 0; /* code bits to drop for sub-table */ 191 | let left = 0; /* number of prefix codes available */ 192 | let used = 0; /* code entries in table used */ 193 | let huff = 0; /* Huffman code */ 194 | let incr; /* for incrementing code, index */ 195 | let fill; /* index for replicating entries */ 196 | let low; /* low bits for current root entry */ 197 | let next; /* next available space in table */ 198 | let base = null; /* base value table to use */ 199 | let base_index = 0; 200 | // let shoextra; /* extra bits table to use */ 201 | let end; /* use base and extra for symbol > end */ 202 | const count = new Uint16Array(MAXBITS + 1); //[MAXBITS+1]; /* number of codes of each length */ 203 | const offs = new Uint16Array(MAXBITS + 1); //[MAXBITS+1]; /* offsets in table for each length */ 204 | let extra = null; 205 | let extra_index = 0; 206 | 207 | let here_bits, here_op, here_val; 208 | 209 | /* accumulate lengths for codes (assumes lens[] all in 0..MAXBITS) */ 210 | for (len = 0; len <= MAXBITS; len++) { 211 | count[len] = 0; 212 | } 213 | for (sym = 0; sym < codes; sym++) { 214 | count[lens[lens_index + sym]]++; 215 | } 216 | 217 | /* bound code lengths, force root to be within code lengths */ 218 | root = bits; 219 | for (max = MAXBITS; max >= 1; max--) { 220 | if (count[max] !== 0) break; 221 | } 222 | if (root > max) { 223 | root = max; 224 | } 225 | if (max === 0) { 226 | /* no symbols to code at all */ 227 | //table.op[opts.table_index] = 64; //here.op = (let char)64; /* invalid code marker */ 228 | //table.bits[opts.table_index] = 1; //here.bits = (let char)1; 229 | //table.val[opts.table_index++] = 0; //here.val = (let short)0; 230 | table[table_index++] = (1 << 24) | (64 << 16) | 0; 231 | 232 | //table.op[opts.table_index] = 64; 233 | //table.bits[opts.table_index] = 1; 234 | //table.val[opts.table_index++] = 0; 235 | table[table_index++] = (1 << 24) | (64 << 16) | 0; 236 | 237 | opts.bits = 1; 238 | return 0; /* no symbols, but wait for decoding to report error */ 239 | } 240 | for (min = 1; min < max; min++) { 241 | if (count[min] !== 0) break; 242 | } 243 | if (root < min) { 244 | root = min; 245 | } 246 | 247 | /* check for an over-subscribed or incomplete set of lengths */ 248 | left = 1; 249 | for (len = 1; len <= MAXBITS; len++) { 250 | left <<= 1; 251 | left -= count[len]; 252 | if (left < 0) { 253 | return -1; 254 | } /* over-subscribed */ 255 | } 256 | if (left > 0 && (type === CODES || max !== 1)) { 257 | return -1; /* incomplete set */ 258 | } 259 | 260 | /* generate offsets into symbol table for each length for sorting */ 261 | offs[1] = 0; 262 | for (len = 1; len < MAXBITS; len++) { 263 | offs[len + 1] = offs[len] + count[len]; 264 | } 265 | 266 | /* sort symbols by length, by symbol order within each length */ 267 | for (sym = 0; sym < codes; sym++) { 268 | if (lens[lens_index + sym] !== 0) { 269 | work[offs[lens[lens_index + sym]]++] = sym; 270 | } 271 | } 272 | 273 | /* set up for code type */ 274 | // poor man optimization - use if-else instead of switch, 275 | // to avoid deopts in old v8 276 | if (type === CODES) { 277 | base = extra = work; /* dummy value--not used */ 278 | end = 19; 279 | } else if (type === LENS) { 280 | base = lbase; 281 | base_index -= 257; 282 | extra = lext; 283 | extra_index -= 257; 284 | end = 256; 285 | } else { 286 | /* DISTS */ 287 | base = dbase; 288 | extra = dext; 289 | end = -1; 290 | } 291 | 292 | /* initialize opts for loop */ 293 | huff = 0; /* starting code */ 294 | sym = 0; /* starting code symbol */ 295 | len = min; /* starting code length */ 296 | next = table_index; /* current table to fill in */ 297 | curr = root; /* current table index bits */ 298 | drop = 0; /* current bits to drop from code for index */ 299 | low = -1; /* trigger new sub-table when len > root */ 300 | used = 1 << root; /* use root table entries */ 301 | const mask = used - 1; /* mask for comparing low */ 302 | 303 | /* check available table space */ 304 | if ( 305 | (type === LENS && used > ENOUGH_LENS) || 306 | (type === DISTS && used > ENOUGH_DISTS) 307 | ) { 308 | return 1; 309 | } 310 | 311 | /* process all codes and make table entries */ 312 | for (;;) { 313 | /* create table entry */ 314 | here_bits = len - drop; 315 | if (work[sym] < end) { 316 | here_op = 0; 317 | here_val = work[sym]; 318 | } else if (work[sym] > end) { 319 | here_op = extra[extra_index + work[sym]]; 320 | here_val = base[base_index + work[sym]]; 321 | } else { 322 | here_op = 32 + 64; /* end of block */ 323 | here_val = 0; 324 | } 325 | 326 | /* replicate for those indices with low len bits equal to huff */ 327 | incr = 1 << (len - drop); 328 | fill = 1 << curr; 329 | min = fill; /* save offset to next table */ 330 | do { 331 | fill -= incr; 332 | table[next + (huff >> drop) + fill] = (here_bits << 24) | 333 | (here_op << 16) | here_val | 0; 334 | } while (fill !== 0); 335 | 336 | /* backwards increment the len-bit code huff */ 337 | incr = 1 << (len - 1); 338 | while (huff & incr) { 339 | incr >>= 1; 340 | } 341 | if (incr !== 0) { 342 | huff &= incr - 1; 343 | huff += incr; 344 | } else { 345 | huff = 0; 346 | } 347 | 348 | /* go to next symbol, update count, len */ 349 | sym++; 350 | if (--count[len] === 0) { 351 | if (len === max) break; 352 | len = lens[lens_index + work[sym]]; 353 | } 354 | 355 | /* create new sub-table if needed */ 356 | if (len > root && (huff & mask) !== low) { 357 | /* if first time, transition to sub-tables */ 358 | if (drop === 0) { 359 | drop = root; 360 | } 361 | 362 | /* increment past last table */ 363 | next += min; /* here min is 1 << curr */ 364 | 365 | /* determine length of next table */ 366 | curr = len - drop; 367 | left = 1 << curr; 368 | while (curr + drop < max) { 369 | left -= count[curr + drop]; 370 | if (left <= 0) break; 371 | curr++; 372 | left <<= 1; 373 | } 374 | 375 | /* check for enough space */ 376 | used += 1 << curr; 377 | if ( 378 | (type === LENS && used > ENOUGH_LENS) || 379 | (type === DISTS && used > ENOUGH_DISTS) 380 | ) { 381 | return 1; 382 | } 383 | 384 | /* point entry in root table to sub-table */ 385 | low = huff & mask; 386 | /*table.op[low] = curr; 387 | table.bits[low] = root; 388 | table.val[low] = next - opts.table_index;*/ 389 | table[low] = (root << 24) | (curr << 16) | (next - table_index) | 0; 390 | } 391 | } 392 | 393 | /* fill in remaining table entry if code is incomplete (guaranteed to have 394 | at most one remaining entry, since if the code is incomplete, the 395 | maximum code length that was allowed to get this far is one bit) */ 396 | if (huff !== 0) { 397 | //table.op[next + huff] = 64; /* invalid code marker */ 398 | //table.bits[next + huff] = len - drop; 399 | //table.val[next + huff] = 0; 400 | table[next + huff] = ((len - drop) << 24) | (64 << 16) | 0; 401 | } 402 | 403 | /* set return parameters */ 404 | //opts.table_index += used; 405 | opts.bits = root; 406 | return 0; 407 | } 408 | -------------------------------------------------------------------------------- /zlib/zlib/messages.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Maps error codes to their corresponding error messages. 3 | * 4 | * This object provides a mapping from integer error codes to human-readable error messages. 5 | * These messages can be used to provide more detailed feedback about the status of operations. 6 | */ 7 | export const message = { 8 | 2: "need dictionary", /* Z_NEED_DICT 2 */ 9 | 1: "stream end", /* Z_STREAM_END 1 */ 10 | 0: "", /* Z_OK 0 */ 11 | "-1": "file error", /* Z_ERRNO (-1) */ 12 | "-2": "stream error", /* Z_STREAM_ERROR (-2) */ 13 | "-3": "data error", /* Z_DATA_ERROR (-3) */ 14 | "-4": "insufficient memory", /* Z_MEM_ERROR (-4) */ 15 | "-5": "buffer error", /* Z_BUF_ERROR (-5) */ 16 | "-6": "incompatible version", /* Z_VERSION_ERROR (-6) */ 17 | }; 18 | 19 | /** 20 | * Type representing the possible error codes. 21 | * 22 | * This type defines the set of valid error codes that can be returned by various functions in the compression/decompression process. 23 | */ 24 | export type CODE = 2 | 1 | 0 | "-1" | "-2" | "-3" | "-4" | "-5" | "-6"; 25 | -------------------------------------------------------------------------------- /zlib/zlib/status.ts: -------------------------------------------------------------------------------- 1 | enum STATUS { 2 | /* Allowed flush values; see deflate() and inflate() below for details */ 3 | Z_NO_FLUSH = 0, 4 | Z_PARTIAL_FLUSH = 1, 5 | Z_SYNC_FLUSH = 2, 6 | Z_FULL_FLUSH = 3, 7 | Z_FINISH = 4, 8 | Z_BLOCK = 5, 9 | Z_TREES = 6, 10 | 11 | /* Return codes for the compression/decompression functions. Negative values 12 | * are errors, positive values are used for special but normal events. 13 | */ 14 | Z_OK = 0, 15 | Z_STREAM_END = 1, 16 | Z_NEED_DICT = 2, 17 | Z_ERRNO = -1, 18 | Z_STREAM_ERROR = -2, 19 | Z_DATA_ERROR = -3, 20 | //Z_MEM_ERROR= -4, 21 | Z_BUF_ERROR = -5, 22 | //Z_VERSION_ERROR= -6, 23 | 24 | /* compression levels */ 25 | Z_NO_COMPRESSION = 0, 26 | Z_BEST_SPEED = 1, 27 | Z_BEST_COMPRESSION = 9, 28 | Z_DEFAULT_COMPRESSION = -1, 29 | 30 | Z_FILTERED = 1, 31 | Z_HUFFMAN_ONLY = 2, 32 | Z_RLE = 3, 33 | Z_FIXED = 4, 34 | Z_DEFAULT_STRATEGY = 0, 35 | 36 | /* Possible values of the data_type field (though see inflate()) */ 37 | Z_BINARY = 0, 38 | Z_TEXT = 1, 39 | //Z_ASCII= 1, // = Z_TEXT (deprecated) 40 | Z_UNKNOWN = 2, 41 | 42 | /* The deflate compression method */ 43 | Z_DEFLATED = 8, 44 | //Z_NULL= null // Use -1 or null inline, depending on var type 45 | } 46 | 47 | /** 48 | * Enum representing various statuses and constants used in the compression and decompression process. 49 | * 50 | * This enum defines the allowed flush values, return codes, compression levels, strategies, data types, and the compression method. 51 | */ 52 | export default STATUS; 53 | -------------------------------------------------------------------------------- /zlib/zlib/trees.ts: -------------------------------------------------------------------------------- 1 | import type { DeflateState } from "./deflate.ts"; 2 | 3 | //const Z_FILTERED = 1; 4 | //const Z_HUFFMAN_ONLY = 2; 5 | //const Z_RLE = 3; 6 | const Z_FIXED = 4; 7 | //const Z_DEFAULT_STRATEGY = 0; 8 | 9 | /* Possible values of the data_type field (though see inflate()) */ 10 | const Z_BINARY = 0; 11 | const Z_TEXT = 1; 12 | //const Z_ASCII = 1; // = Z_TEXT 13 | const Z_UNKNOWN = 2; 14 | 15 | function zero(buf: Uint16Array) { 16 | buf.fill(0, 0, buf.length); 17 | } 18 | 19 | // From zutil.h 20 | 21 | const STORED_BLOCK = 0; 22 | const STATIC_TREES = 1; 23 | const DYN_TREES = 2; 24 | /* The three kinds of block type */ 25 | 26 | const MIN_MATCH = 3; 27 | const MAX_MATCH = 258; 28 | /* The minimum and maximum match lengths */ 29 | 30 | // From deflate.h 31 | /* =========================================================================== 32 | * Internal compression state. 33 | */ 34 | 35 | const LENGTH_CODES = 29; 36 | /* number of length codes, not counting the special END_BLOCK code */ 37 | 38 | const LITERALS = 256; 39 | /* number of literal bytes 0..255 */ 40 | 41 | const L_CODES = LITERALS + 1 + LENGTH_CODES; 42 | /* number of Literal or Length codes, including the END_BLOCK code */ 43 | 44 | const D_CODES = 30; 45 | /* number of distance codes */ 46 | 47 | const BL_CODES = 19; 48 | /* number of codes used to transfer the bit lengths */ 49 | 50 | const HEAP_SIZE = 2 * L_CODES + 1; 51 | /* maximum heap size */ 52 | 53 | const MAX_BITS = 15; 54 | /* All codes must not exceed MAX_BITS bits */ 55 | 56 | const Buf_size = 16; 57 | /* size of bit buffer in bi_buf */ 58 | 59 | /* =========================================================================== 60 | * Constants 61 | */ 62 | 63 | const MAX_BL_BITS = 7; 64 | /* Bit length codes must not exceed MAX_BL_BITS bits */ 65 | 66 | const END_BLOCK = 256; 67 | /* end of block literal code */ 68 | 69 | const REP_3_6 = 16; 70 | /* repeat previous bit length 3-6 times (2 bits of repeat count) */ 71 | 72 | const REPZ_3_10 = 17; 73 | /* repeat a zero length 3-10 times (3 bits of repeat count) */ 74 | 75 | const REPZ_11_138 = 18; 76 | /* repeat a zero length 11-138 times (7 bits of repeat count) */ 77 | 78 | /* eslint-disable comma-spacing,array-bracket-spacing */ 79 | const extra_lbits = /* extra bits for each length code */ 80 | [ 81 | 0, 82 | 0, 83 | 0, 84 | 0, 85 | 0, 86 | 0, 87 | 0, 88 | 0, 89 | 1, 90 | 1, 91 | 1, 92 | 1, 93 | 2, 94 | 2, 95 | 2, 96 | 2, 97 | 3, 98 | 3, 99 | 3, 100 | 3, 101 | 4, 102 | 4, 103 | 4, 104 | 4, 105 | 5, 106 | 5, 107 | 5, 108 | 5, 109 | 0, 110 | ]; 111 | 112 | const extra_dbits = /* extra bits for each distance code */ 113 | [ 114 | 0, 115 | 0, 116 | 0, 117 | 0, 118 | 1, 119 | 1, 120 | 2, 121 | 2, 122 | 3, 123 | 3, 124 | 4, 125 | 4, 126 | 5, 127 | 5, 128 | 6, 129 | 6, 130 | 7, 131 | 7, 132 | 8, 133 | 8, 134 | 9, 135 | 9, 136 | 10, 137 | 10, 138 | 11, 139 | 11, 140 | 12, 141 | 12, 142 | 13, 143 | 13, 144 | ]; 145 | 146 | const extra_blbits = /* extra bits for each bit length code */ 147 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 7]; 148 | 149 | const bl_order = [ 150 | 16, 151 | 17, 152 | 18, 153 | 0, 154 | 8, 155 | 7, 156 | 9, 157 | 6, 158 | 10, 159 | 5, 160 | 11, 161 | 4, 162 | 12, 163 | 3, 164 | 13, 165 | 2, 166 | 14, 167 | 1, 168 | 15, 169 | ]; 170 | /* eslint-enable comma-spacing,array-bracket-spacing */ 171 | 172 | /* The lengths of the bit length codes are sent in order of decreasing 173 | * probability, to avoid transmitting the lengths for unused bit length codes. 174 | */ 175 | 176 | /* =========================================================================== 177 | * Local data. These are initialized only once. 178 | */ 179 | 180 | // We pre-fill arrays with 0 to avoid uninitialized gaps 181 | 182 | const DIST_CODE_LEN = 512; /* see definition of array dist_code below */ 183 | 184 | // !!!! Use flat array instead of structure, Freq = i*2, Len = i*2+1 185 | const static_ltree = new Uint16Array((L_CODES + 2) * 2); 186 | zero(static_ltree); 187 | /* The static literal tree. Since the bit lengths are imposed, there is no 188 | * need for the L_CODES extra codes used during heap construction. However 189 | * The codes 286 and 287 are needed to build a canonical tree (see _tr_init 190 | * below). 191 | */ 192 | 193 | const static_dtree = new Uint16Array(D_CODES * 2); 194 | zero(static_dtree); 195 | /* The static distance tree. (Actually a trivial tree since all codes use 196 | * 5 bits.) 197 | */ 198 | 199 | const _dist_code = new Uint16Array(DIST_CODE_LEN); 200 | zero(_dist_code); 201 | /* Distance codes. The first 256 values correspond to the distances 202 | * 3 .. 258, the last 256 values correspond to the top 8 bits of 203 | * the 15 bit distances. 204 | */ 205 | 206 | const _length_code = new Uint16Array(MAX_MATCH - MIN_MATCH + 1); 207 | zero(_length_code); 208 | /* length code for each normalized match length (0 == MIN_MATCH) */ 209 | 210 | const base_length = new Uint16Array(LENGTH_CODES); 211 | zero(base_length); 212 | /* First normalized length for each code (0 = MIN_MATCH) */ 213 | 214 | const base_dist = new Uint16Array(D_CODES); 215 | zero(base_dist); 216 | /* First normalized distance for each code (0 = distance of 1) */ 217 | 218 | class StaticTreeDesc { 219 | static_tree: Uint16Array | null; /* static tree or NULL */ 220 | extra_bits: number[] | null; /* extra bits for each code or NULL */ 221 | extra_base: number; /* base index for extra_bits */ 222 | elems: number; /* max number of elements in the tree */ 223 | max_length: number; /* max bit length for the codes */ 224 | 225 | // show if `static_tree` has data or dummy - needed for monomorphic objects 226 | has_stree: boolean; 227 | 228 | constructor( 229 | static_tree: Uint16Array | null, 230 | extra_bits: number[] | null, 231 | extra_base: number, 232 | elems: number, 233 | max_length: number, 234 | ) { 235 | this.static_tree = static_tree; /* static tree or NULL */ 236 | this.extra_bits = extra_bits; /* extra bits for each code or NULL */ 237 | this.extra_base = extra_base; /* base index for extra_bits */ 238 | this.elems = elems; /* max number of elements in the tree */ 239 | this.max_length = max_length; /* max bit length for the codes */ 240 | 241 | // show if `static_tree` has data or dummy - needed for monomorphic objects 242 | this.has_stree = static_tree != null && static_tree.length > 0; 243 | } 244 | } 245 | 246 | let static_l_desc: StaticTreeDesc; 247 | let static_d_desc: StaticTreeDesc; 248 | let static_bl_desc: StaticTreeDesc; 249 | 250 | /** 251 | * Represents a tree descriptor used in the compression process. 252 | * 253 | * This class encapsulates the dynamic tree, the maximum code with a non-zero frequency, and the corresponding static tree. 254 | * It is used to manage and manipulate the trees during the compression and decompression processes. 255 | */ 256 | export class TreeDesc { 257 | /** 258 | * The dynamic tree used in the compression process. 259 | * @type {Uint16Array} 260 | */ 261 | dyn_tree: Uint16Array; 262 | 263 | /** 264 | * The largest code with a non-zero frequency in the dynamic tree. 265 | * @type {number} 266 | */ 267 | max_code: number; 268 | 269 | /** 270 | * The corresponding static tree used in the compression process. 271 | * @type {StaticTreeDesc} 272 | */ 273 | stat_desc: StaticTreeDesc; 274 | 275 | /** 276 | * Constructs a new TreeDesc instance. 277 | * 278 | * @param {Uint16Array} dyn_tree - The dynamic tree. 279 | * @param {StaticTreeDesc} stat_desc - The corresponding static tree. 280 | */ 281 | constructor(dyn_tree: Uint16Array, stat_desc: StaticTreeDesc) { 282 | this.dyn_tree = dyn_tree; /* the dynamic tree */ 283 | this.max_code = 0; /* largest code with non zero frequency */ 284 | this.stat_desc = stat_desc; /* the corresponding static tree */ 285 | } 286 | } 287 | 288 | function d_code(dist: number) { 289 | return dist < 256 ? _dist_code[dist] : _dist_code[256 + (dist >>> 7)]; 290 | } 291 | 292 | /* =========================================================================== 293 | * Output a short LSB first on the stream. 294 | * IN assertion: there is enough room in pendingBuf. 295 | */ 296 | function put_short(s: DeflateState, w: number) { 297 | // put_byte(s, (uch)((w) & 0xff)); 298 | // put_byte(s, (uch)((ush)(w) >> 8)); 299 | s.pending_buf![s.pending++] = (w) & 0xff; 300 | s.pending_buf![s.pending++] = (w >>> 8) & 0xff; 301 | } 302 | 303 | /* =========================================================================== 304 | * Send a value on a given number of bits. 305 | * IN assertion: length <= 16 and value fits in length bits. 306 | */ 307 | function send_bits(s: DeflateState, value: number, length: number) { 308 | if (s.bi_valid > (Buf_size - length)) { 309 | s.bi_buf |= (value << s.bi_valid) & 0xffff; 310 | put_short(s, s.bi_buf); 311 | s.bi_buf = value >> (Buf_size - s.bi_valid); 312 | s.bi_valid += length - Buf_size; 313 | } else { 314 | s.bi_buf |= (value << s.bi_valid) & 0xffff; 315 | s.bi_valid += length; 316 | } 317 | } 318 | 319 | function send_code(s: DeflateState, c: number, tree: Uint16Array) { 320 | send_bits(s, tree[c * 2], /*.Code*/ tree[c * 2 + 1] /*.Len*/); 321 | } 322 | 323 | /* =========================================================================== 324 | * Reverse the first len bits of a code, using straightforward code (a faster 325 | * method would use a table) 326 | * IN assertion: 1 <= len <= 15 327 | */ 328 | function bi_reverse(code: number, len: number) { 329 | let res = 0; 330 | do { 331 | res |= code & 1; 332 | code >>>= 1; 333 | res <<= 1; 334 | } while (--len > 0); 335 | return res >>> 1; 336 | } 337 | 338 | /* =========================================================================== 339 | * Flush the bit buffer, keeping at most 7 bits in it. 340 | */ 341 | function bi_flush(s: DeflateState) { 342 | if (s.bi_valid === 16) { 343 | put_short(s, s.bi_buf); 344 | s.bi_buf = 0; 345 | s.bi_valid = 0; 346 | } else if (s.bi_valid >= 8) { 347 | s.pending_buf![s.pending++] = s.bi_buf & 0xff; 348 | s.bi_buf >>= 8; 349 | s.bi_valid -= 8; 350 | } 351 | } 352 | 353 | /* =========================================================================== 354 | * Compute the optimal bit lengths for a tree and update the total bit length 355 | * for the current block. 356 | * IN assertion: the fields freq and dad are set, heap[heap_max] and 357 | * above are the tree nodes sorted by increasing frequency. 358 | * OUT assertions: the field len is set to the optimal bit length, the 359 | * array bl_count contains the frequencies for each bit length. 360 | * The length opt_len is updated; static_len is also updated if stree is 361 | * not null. 362 | */ 363 | function gen_bitlen(s: DeflateState, desc: TreeDesc) { 364 | const tree = desc.dyn_tree; 365 | const max_code = desc.max_code; 366 | const stree = desc.stat_desc.static_tree!; 367 | const has_stree = desc.stat_desc.has_stree; 368 | const extra = desc.stat_desc.extra_bits!; 369 | const base = desc.stat_desc.extra_base; 370 | const max_length = desc.stat_desc.max_length; 371 | let h; /* heap index */ 372 | let n, m; /* iterate over the tree elements */ 373 | let bits; /* bit length */ 374 | let xbits; /* extra bits */ 375 | let f; /* frequency */ 376 | let overflow = 0; /* number of elements with bit length too large */ 377 | 378 | for (bits = 0; bits <= MAX_BITS; bits++) { 379 | s.bl_count[bits] = 0; 380 | } 381 | 382 | /* In a first pass, compute the optimal bit lengths (which may 383 | * overflow in the case of the bit length tree). 384 | */ 385 | tree[s.heap[s.heap_max] * 2 + 1] /*.Len*/ = 0; /* root of the heap */ 386 | 387 | for (h = s.heap_max + 1; h < HEAP_SIZE; h++) { 388 | n = s.heap[h]; 389 | bits = tree[tree[n * 2 + 1] /*.Dad*/ * 2 + 1] /*.Len*/ + 1; 390 | if (bits > max_length) { 391 | bits = max_length; 392 | overflow++; 393 | } 394 | tree[n * 2 + 1] /*.Len*/ = bits; 395 | /* We overwrite tree[n].Dad which is no longer needed */ 396 | 397 | if (n > max_code) continue; /* not a leaf node */ 398 | 399 | s.bl_count[bits]++; 400 | xbits = 0; 401 | if (n >= base) { 402 | xbits = extra[n - base]; 403 | } 404 | f = tree[n * 2] /*.Freq*/; 405 | s.opt_len += f * (bits + xbits); 406 | if (has_stree) { 407 | s.static_len += f * (stree[n * 2 + 1] /*.Len*/ + xbits); 408 | } 409 | } 410 | if (overflow === 0) return; 411 | 412 | // Trace((stderr,"\nbit length overflow\n")); 413 | /* This happens for example on obj2 and pic of the Calgary corpus */ 414 | 415 | /* Find the first bit length which could increase: */ 416 | do { 417 | bits = max_length - 1; 418 | while (s.bl_count[bits] === 0) bits--; 419 | s.bl_count[bits]--; /* move one leaf down the tree */ 420 | s.bl_count[bits + 1] += 2; /* move one overflow item as its brother */ 421 | s.bl_count[max_length]--; 422 | /* The brother of the overflow item also moves one step up, 423 | * but this does not affect bl_count[max_length] 424 | */ 425 | overflow -= 2; 426 | } while (overflow > 0); 427 | 428 | /* Now recompute all bit lengths, scanning in increasing frequency. 429 | * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all 430 | * lengths instead of fixing only the wrong ones. This idea is taken 431 | * from 'ar' written by Haruhiko Okumura.) 432 | */ 433 | for (bits = max_length; bits !== 0; bits--) { 434 | n = s.bl_count[bits]; 435 | while (n !== 0) { 436 | m = s.heap[--h]; 437 | if (m > max_code) continue; 438 | if (tree[m * 2 + 1] /*.Len*/ !== bits) { 439 | // Trace((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits)); 440 | s.opt_len += (bits - tree[m * 2 + 1] /*.Len*/) * tree[m * 2] /*.Freq*/; 441 | tree[m * 2 + 1] /*.Len*/ = bits; 442 | } 443 | n--; 444 | } 445 | } 446 | } 447 | 448 | /* =========================================================================== 449 | * Generate the codes for a given tree and bit counts (which need not be 450 | * optimal). 451 | * IN assertion: the array bl_count contains the bit length statistics for 452 | * the given tree and the field len is set for all tree elements. 453 | * OUT assertion: the field code is set for all tree elements of non 454 | * zero code length. 455 | */ 456 | function gen_codes(tree: Uint16Array, max_code: number, bl_count: Uint16Array) { 457 | const next_code = new Array( 458 | MAX_BITS + 1, 459 | ); /* next code value for each bit length */ 460 | let code = 0; /* running code value */ 461 | let bits; /* bit index */ 462 | let n; /* code index */ 463 | 464 | /* The distribution counts are first used to generate the code values 465 | * without bit reversal. 466 | */ 467 | for (bits = 1; bits <= MAX_BITS; bits++) { 468 | next_code[bits] = code = (code + bl_count[bits - 1]) << 1; 469 | } 470 | /* Check that the bit counts in bl_count are consistent. The last code 471 | * must be all ones. 472 | */ 473 | //Assert (code + bl_count[MAX_BITS]-1 == (1< length code (0..28) */ 513 | length = 0; 514 | for (code = 0; code < LENGTH_CODES - 1; code++) { 515 | base_length[code] = length; 516 | for (n = 0; n < (1 << extra_lbits[code]); n++) { 517 | _length_code[length++] = code; 518 | } 519 | } 520 | //Assert (length == 256, "tr_static_init: length != 256"); 521 | /* Note that the length 255 (match length 258) can be represented 522 | * in two different ways: code 284 + 5 bits or code 285, so we 523 | * overwrite length_code[255] to use the best encoding: 524 | */ 525 | _length_code[length - 1] = code; 526 | 527 | /* Initialize the mapping dist (0..32K) -> dist code (0..29) */ 528 | dist = 0; 529 | for (code = 0; code < 16; code++) { 530 | base_dist[code] = dist; 531 | for (n = 0; n < (1 << extra_dbits[code]); n++) { 532 | _dist_code[dist++] = code; 533 | } 534 | } 535 | //Assert (dist == 256, "tr_static_init: dist != 256"); 536 | dist >>= 7; /* from now on, all distances are divided by 128 */ 537 | for (; code < D_CODES; code++) { 538 | base_dist[code] = dist << 7; 539 | for (n = 0; n < (1 << (extra_dbits[code] - 7)); n++) { 540 | _dist_code[256 + dist++] = code; 541 | } 542 | } 543 | //Assert (dist == 256, "tr_static_init: 256+dist != 512"); 544 | 545 | /* Construct the codes of the static literal tree */ 546 | for (bits = 0; bits <= MAX_BITS; bits++) { 547 | bl_count[bits] = 0; 548 | } 549 | 550 | n = 0; 551 | while (n <= 143) { 552 | static_ltree[n * 2 + 1] /*.Len*/ = 8; 553 | n++; 554 | bl_count[8]++; 555 | } 556 | while (n <= 255) { 557 | static_ltree[n * 2 + 1] /*.Len*/ = 9; 558 | n++; 559 | bl_count[9]++; 560 | } 561 | while (n <= 279) { 562 | static_ltree[n * 2 + 1] /*.Len*/ = 7; 563 | n++; 564 | bl_count[7]++; 565 | } 566 | while (n <= 287) { 567 | static_ltree[n * 2 + 1] /*.Len*/ = 8; 568 | n++; 569 | bl_count[8]++; 570 | } 571 | /* Codes 286 and 287 do not exist, but we must include them in the 572 | * tree construction to get a canonical Huffman tree (longest code 573 | * all ones) 574 | */ 575 | gen_codes(static_ltree, L_CODES + 1, bl_count); 576 | 577 | /* The static distance tree is trivial: */ 578 | for (n = 0; n < D_CODES; n++) { 579 | static_dtree[n * 2 + 1] /*.Len*/ = 5; 580 | static_dtree[n * 2] /*.Code*/ = bi_reverse(n, 5); 581 | } 582 | 583 | // Now data ready and we can init static trees 584 | static_l_desc = new StaticTreeDesc( 585 | static_ltree, 586 | extra_lbits, 587 | LITERALS + 1, 588 | L_CODES, 589 | MAX_BITS, 590 | ); 591 | static_d_desc = new StaticTreeDesc( 592 | static_dtree, 593 | extra_dbits, 594 | 0, 595 | D_CODES, 596 | MAX_BITS, 597 | ); 598 | static_bl_desc = new StaticTreeDesc( 599 | new Uint16Array(0), 600 | extra_blbits, 601 | 0, 602 | BL_CODES, 603 | MAX_BL_BITS, 604 | ); 605 | 606 | //static_init_done = true; 607 | } 608 | 609 | /* =========================================================================== 610 | * Initialize a new block. 611 | */ 612 | function init_block(s: DeflateState) { 613 | let n; /* iterates over tree elements */ 614 | 615 | /* Initialize the trees. */ 616 | for (n = 0; n < L_CODES; n++) s.dyn_ltree[n * 2] /*.Freq*/ = 0; 617 | for (n = 0; n < D_CODES; n++) s.dyn_dtree[n * 2] /*.Freq*/ = 0; 618 | for (n = 0; n < BL_CODES; n++) s.bl_tree[n * 2] /*.Freq*/ = 0; 619 | 620 | s.dyn_ltree[END_BLOCK * 2] /*.Freq*/ = 1; 621 | s.opt_len = s.static_len = 0; 622 | s.last_lit = s.matches = 0; 623 | } 624 | 625 | /* =========================================================================== 626 | * Flush the bit buffer and align the output on a byte boundary 627 | */ 628 | function bi_windup(s: DeflateState) { 629 | if (s.bi_valid > 8) { 630 | put_short(s, s.bi_buf); 631 | } else if (s.bi_valid > 0) { 632 | //put_byte(s, (Byte)s->bi_buf); 633 | s.pending_buf![s.pending++] = s.bi_buf; 634 | } 635 | s.bi_buf = 0; 636 | s.bi_valid = 0; 637 | } 638 | 639 | /* =========================================================================== 640 | * Copy a stored block, storing first the length and its 641 | * one's complement if requested. 642 | */ 643 | function copy_block(s: DeflateState, buf: number, len: number, header: boolean) { 644 | bi_windup(s); /* align on byte boundary */ 645 | 646 | if (header) { 647 | put_short(s, len); 648 | put_short(s, ~len); 649 | } 650 | // while (len--) { 651 | // put_byte(s, *buf++); 652 | // } 653 | s.pending_buf!.set(s.window!.subarray(buf, buf + len), s.pending); 654 | s.pending += len; 655 | } 656 | 657 | /* =========================================================================== 658 | * Compares to subtrees, using the tree depth as tie breaker when 659 | * the subtrees have equal frequency. This minimizes the worst case length. 660 | */ 661 | function smaller(tree: Uint16Array, n: number, m: number, depth: Uint16Array) { 662 | const _n2 = n * 2; 663 | const _m2 = m * 2; 664 | return (tree[_n2] /*.Freq*/ < tree[_m2] /*.Freq*/ || 665 | (tree[_n2] /*.Freq*/ === tree[_m2] /*.Freq*/ && depth[n] <= depth[m])); 666 | } 667 | 668 | /* =========================================================================== 669 | * Restore the heap property by moving down the tree starting at node k, 670 | * exchanging a node with the smallest of its two sons if necessary, stopping 671 | * when the heap property is re-established (each father smaller than its 672 | * two sons). 673 | */ 674 | function pqdownheap(s: DeflateState, tree: Uint16Array, k: number) // deflate_state *s; 675 | // ct_data *tree; /* the tree to restore */ 676 | // int k; /* node to move down */ 677 | { 678 | const v = s.heap[k]; 679 | let j = k << 1; /* left son of k */ 680 | while (j <= s.heap_len) { 681 | /* Set j to the smallest of the two sons: */ 682 | if ( 683 | j < s.heap_len && 684 | smaller(tree, s.heap[j + 1], s.heap[j], s.depth) 685 | ) { 686 | j++; 687 | } 688 | /* Exit if v is smaller than both sons */ 689 | if (smaller(tree, v, s.heap[j], s.depth)) break; 690 | 691 | /* Exchange v with the smallest son */ 692 | s.heap[k] = s.heap[j]; 693 | k = j; 694 | 695 | /* And continue down the tree, setting j to the left son of k */ 696 | j <<= 1; 697 | } 698 | s.heap[k] = v; 699 | } 700 | 701 | // inlined manually 702 | // let SMALLEST = 1; 703 | 704 | /* =========================================================================== 705 | * Send the block data compressed using the given Huffman trees 706 | */ 707 | function compress_block(s: DeflateState, ltree: Uint16Array, dtree: Uint16Array) { 708 | let dist; /* distance of matched string */ 709 | let lc; /* match length or unmatched char (if dist == 0) */ 710 | let lx = 0; /* running index in l_buf */ 711 | let code; /* the code to send */ 712 | let extra; /* number of extra bits to send */ 713 | 714 | if (s.last_lit !== 0) { 715 | do { 716 | dist = (s.pending_buf![s.d_buf + lx * 2] << 8) | 717 | (s.pending_buf![s.d_buf + lx * 2 + 1]); 718 | lc = s.pending_buf![s.l_buf + lx]; 719 | lx++; 720 | 721 | if (dist === 0) { 722 | send_code(s, lc, ltree); /* send a literal byte */ 723 | //Tracecv(isgraph(lc), (stderr," '%c' ", lc)); 724 | } else { 725 | /* Here, lc is the match length - MIN_MATCH */ 726 | code = _length_code[lc]; 727 | send_code(s, code + LITERALS + 1, ltree); /* send the length code */ 728 | extra = extra_lbits[code]; 729 | if (extra !== 0) { 730 | lc -= base_length[code]; 731 | send_bits(s, lc, extra); /* send the extra length bits */ 732 | } 733 | dist--; /* dist is now the match distance - 1 */ 734 | code = d_code(dist); 735 | //Assert (code < D_CODES, "bad d_code"); 736 | 737 | send_code(s, code, dtree); /* send the distance code */ 738 | extra = extra_dbits[code]; 739 | if (extra !== 0) { 740 | dist -= base_dist[code]; 741 | send_bits(s, dist, extra); /* send the extra distance bits */ 742 | } 743 | } /* literal or match pair ? */ 744 | 745 | /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */ 746 | //Assert((uInt)(s->pending) < s->lit_bufsize + 2*lx, 747 | // "pendingBuf overflow"); 748 | } while (lx < s.last_lit); 749 | } 750 | 751 | send_code(s, END_BLOCK, ltree); 752 | } 753 | 754 | /* =========================================================================== 755 | * Construct one Huffman tree and assigns the code bit strings and lengths. 756 | * Update the total bit length for the current block. 757 | * IN assertion: the field freq is set for all tree elements. 758 | * OUT assertions: the fields len and code are set to the optimal bit length 759 | * and corresponding code. The length opt_len is updated; static_len is 760 | * also updated if stree is not null. The field max_code is set. 761 | */ 762 | function build_tree(s: DeflateState, desc: TreeDesc) { 763 | const tree = desc.dyn_tree; 764 | const stree = desc.stat_desc.static_tree; 765 | const has_stree = desc.stat_desc.has_stree; 766 | const elems = desc.stat_desc.elems; 767 | let n, m; /* iterate over heap elements */ 768 | let max_code = -1; /* largest code with non zero frequency */ 769 | let node; /* new node being created */ 770 | 771 | /* Construct the initial heap, with least frequent element in 772 | * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1]. 773 | * heap[0] is not used. 774 | */ 775 | s.heap_len = 0; 776 | s.heap_max = HEAP_SIZE; 777 | 778 | for (n = 0; n < elems; n++) { 779 | if (tree[n * 2] /*.Freq*/ !== 0) { 780 | s.heap[++s.heap_len] = max_code = n; 781 | s.depth[n] = 0; 782 | } else { 783 | tree[n * 2 + 1] /*.Len*/ = 0; 784 | } 785 | } 786 | 787 | /* The pkzip format requires that at least one distance code exists, 788 | * and that at least one bit should be sent even if there is only one 789 | * possible code. So to avoid special checks later on we force at least 790 | * two codes of non zero frequency. 791 | */ 792 | while (s.heap_len < 2) { 793 | node = s.heap[++s.heap_len] = (max_code < 2 ? ++max_code : 0); 794 | tree[node * 2] /*.Freq*/ = 1; 795 | s.depth[node] = 0; 796 | s.opt_len--; 797 | 798 | if (has_stree) { 799 | s.static_len -= stree![node * 2 + 1] /*.Len*/; 800 | } 801 | /* node is 0 or 1 so it does not have extra bits */ 802 | } 803 | desc.max_code = max_code; 804 | 805 | /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree, 806 | * establish sub-heaps of increasing lengths: 807 | */ 808 | for (n = (s.heap_len >> 1 /*int /2*/); n >= 1; n--) pqdownheap(s, tree, n); 809 | 810 | /* Construct the Huffman tree by repeatedly combining the least two 811 | * frequent nodes. 812 | */ 813 | node = elems; /* next internal node of the tree */ 814 | do { 815 | //pqremove(s, tree, n); /* n = node of least frequency */ 816 | /*** pqremove ***/ 817 | n = s.heap[1 /*SMALLEST*/]; 818 | s.heap[1 /*SMALLEST*/] = s.heap[s.heap_len--]; 819 | pqdownheap(s, tree, 1 /*SMALLEST*/); 820 | /***/ 821 | 822 | m = s.heap[1 /*SMALLEST*/]; /* m = node of next least frequency */ 823 | 824 | s.heap[--s.heap_max] = n; /* keep the nodes sorted by frequency */ 825 | s.heap[--s.heap_max] = m; 826 | 827 | /* Create a new node father of n and m */ 828 | tree[node * 2] /*.Freq*/ = tree[n * 2] /*.Freq*/ + tree[m * 2] /*.Freq*/; 829 | s.depth[node] = (s.depth[n] >= s.depth[m] ? s.depth[n] : s.depth[m]) + 1; 830 | tree[n * 2 + 1] /*.Dad*/ = tree[m * 2 + 1] /*.Dad*/ = node; 831 | 832 | /* and insert the new node in the heap */ 833 | s.heap[1 /*SMALLEST*/] = node++; 834 | pqdownheap(s, tree, 1 /*SMALLEST*/); 835 | } while (s.heap_len >= 2); 836 | 837 | s.heap[--s.heap_max] = s.heap[1 /*SMALLEST*/]; 838 | 839 | /* At this point, the fields freq and dad are set. We can now 840 | * generate the bit lengths. 841 | */ 842 | gen_bitlen(s, desc); 843 | 844 | /* The field len is now set, we can generate the bit codes */ 845 | gen_codes(tree, max_code, s.bl_count); 846 | } 847 | 848 | /* =========================================================================== 849 | * Scan a literal or distance tree to determine the frequencies of the codes 850 | * in the bit length tree. 851 | */ 852 | function scan_tree(s: DeflateState, tree: Uint16Array, max_code: number) { 853 | let n; /* iterates over all tree elements */ 854 | let prevlen = -1; /* last emitted length */ 855 | let curlen; /* length of current code */ 856 | 857 | let nextlen = tree[0 * 2 + 1] /*.Len*/; /* length of next code */ 858 | 859 | let count = 0; /* repeat count of the current code */ 860 | let max_count = 7; /* max repeat count */ 861 | let min_count = 4; /* min repeat count */ 862 | 863 | if (nextlen === 0) { 864 | max_count = 138; 865 | min_count = 3; 866 | } 867 | tree[(max_code + 1) * 2 + 1] /*.Len*/ = 0xffff; /* guard */ 868 | 869 | for (n = 0; n <= max_code; n++) { 870 | curlen = nextlen; 871 | nextlen = tree[(n + 1) * 2 + 1] /*.Len*/; 872 | 873 | if (++count < max_count && curlen === nextlen) { 874 | continue; 875 | } else if (count < min_count) { 876 | s.bl_tree[curlen * 2] /*.Freq*/ += count; 877 | } else if (curlen !== 0) { 878 | if (curlen !== prevlen) s.bl_tree[curlen * 2] /*.Freq*/++; 879 | s.bl_tree[REP_3_6 * 2] /*.Freq*/++; 880 | } else if (count <= 10) { 881 | s.bl_tree[REPZ_3_10 * 2] /*.Freq*/++; 882 | } else { 883 | s.bl_tree[REPZ_11_138 * 2] /*.Freq*/++; 884 | } 885 | 886 | count = 0; 887 | prevlen = curlen; 888 | 889 | if (nextlen === 0) { 890 | max_count = 138; 891 | min_count = 3; 892 | } else if (curlen === nextlen) { 893 | max_count = 6; 894 | min_count = 3; 895 | } else { 896 | max_count = 7; 897 | min_count = 4; 898 | } 899 | } 900 | } 901 | 902 | /* =========================================================================== 903 | * Send a literal or distance tree in compressed form, using the codes in 904 | * bl_tree. 905 | */ 906 | function send_tree(s: DeflateState, tree: Uint16Array, max_code: number) { 907 | let n; /* iterates over all tree elements */ 908 | let prevlen = -1; /* last emitted length */ 909 | let curlen; /* length of current code */ 910 | 911 | let nextlen = tree[0 * 2 + 1] /*.Len*/; /* length of next code */ 912 | 913 | let count = 0; /* repeat count of the current code */ 914 | let max_count = 7; /* max repeat count */ 915 | let min_count = 4; /* min repeat count */ 916 | 917 | /* tree[max_code+1].Len = -1; */ 918 | /* guard already set */ 919 | if (nextlen === 0) { 920 | max_count = 138; 921 | min_count = 3; 922 | } 923 | 924 | for (n = 0; n <= max_code; n++) { 925 | curlen = nextlen; 926 | nextlen = tree[(n + 1) * 2 + 1] /*.Len*/; 927 | 928 | if (++count < max_count && curlen === nextlen) { 929 | continue; 930 | } else if (count < min_count) { 931 | do { 932 | send_code(s, curlen, s.bl_tree); 933 | } while (--count !== 0); 934 | } else if (curlen !== 0) { 935 | if (curlen !== prevlen) { 936 | send_code(s, curlen, s.bl_tree); 937 | count--; 938 | } 939 | //Assert(count >= 3 && count <= 6, " 3_6?"); 940 | send_code(s, REP_3_6, s.bl_tree); 941 | send_bits(s, count - 3, 2); 942 | } else if (count <= 10) { 943 | send_code(s, REPZ_3_10, s.bl_tree); 944 | send_bits(s, count - 3, 3); 945 | } else { 946 | send_code(s, REPZ_11_138, s.bl_tree); 947 | send_bits(s, count - 11, 7); 948 | } 949 | 950 | count = 0; 951 | prevlen = curlen; 952 | if (nextlen === 0) { 953 | max_count = 138; 954 | min_count = 3; 955 | } else if (curlen === nextlen) { 956 | max_count = 6; 957 | min_count = 3; 958 | } else { 959 | max_count = 7; 960 | min_count = 4; 961 | } 962 | } 963 | } 964 | 965 | /* =========================================================================== 966 | * Construct the Huffman tree for the bit lengths and return the index in 967 | * bl_order of the last bit length code to send. 968 | */ 969 | function build_bl_tree(s: DeflateState) { 970 | let max_blindex; /* index of last bit length code of non zero freq */ 971 | 972 | /* Determine the bit length frequencies for literal and distance trees */ 973 | scan_tree(s, s.dyn_ltree, s.l_desc!.max_code); 974 | scan_tree(s, s.dyn_dtree, s.d_desc!.max_code); 975 | 976 | /* Build the bit length tree: */ 977 | build_tree(s, s.bl_desc!); 978 | /* opt_len now includes the length of the tree representations, except 979 | * the lengths of the bit lengths codes and the 5+5+4 bits for the counts. 980 | */ 981 | 982 | /* Determine the number of bit length codes to send. The pkzip format 983 | * requires that at least 4 bit length codes be sent. (appnote.txt says 984 | * 3 but the actual value used is 4.) 985 | */ 986 | for (max_blindex = BL_CODES - 1; max_blindex >= 3; max_blindex--) { 987 | if (s.bl_tree[bl_order[max_blindex] * 2 + 1] /*.Len*/ !== 0) { 988 | break; 989 | } 990 | } 991 | /* Update opt_len to include the bit length tree and counts */ 992 | s.opt_len += 3 * (max_blindex + 1) + 5 + 5 + 4; 993 | //Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld", 994 | // s->opt_len, s->static_len)); 995 | 996 | return max_blindex; 997 | } 998 | 999 | /* =========================================================================== 1000 | * Send the header for a block using dynamic Huffman trees: the counts, the 1001 | * lengths of the bit length codes, the literal tree and the distance tree. 1002 | * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4. 1003 | */ 1004 | function send_all_trees(s: DeflateState, lcodes: number, dcodes: number, blcodes: number) { 1005 | let rank; /* index in bl_order */ 1006 | 1007 | //Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes"); 1008 | //Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES, 1009 | // "too many codes"); 1010 | //Tracev((stderr, "\nbl counts: ")); 1011 | send_bits(s, lcodes - 257, 5); /* not +255 as stated in appnote.txt */ 1012 | send_bits(s, dcodes - 1, 5); 1013 | send_bits(s, blcodes - 4, 4); /* not -3 as stated in appnote.txt */ 1014 | for (rank = 0; rank < blcodes; rank++) { 1015 | //Tracev((stderr, "\nbl code %2d ", bl_order[rank])); 1016 | send_bits(s, s.bl_tree[bl_order[rank] * 2 + 1], /*.Len*/ 3); 1017 | } 1018 | //Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent)); 1019 | 1020 | send_tree(s, s.dyn_ltree, lcodes - 1); /* literal tree */ 1021 | //Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent)); 1022 | 1023 | send_tree(s, s.dyn_dtree, dcodes - 1); /* distance tree */ 1024 | //Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent)); 1025 | } 1026 | 1027 | /* =========================================================================== 1028 | * Check if the data type is TEXT or BINARY, using the following algorithm: 1029 | * - TEXT if the two conditions below are satisfied: 1030 | * a) There are no non-portable control characters belonging to the 1031 | * "black list" (0..6, 14..25, 28..31). 1032 | * b) There is at least one printable character belonging to the 1033 | * "white list" (9 {TAB}, 10 {LF}, 13 {CR}, 32..255). 1034 | * - BINARY otherwise. 1035 | * - The following partially-portable control characters form a 1036 | * "gray list" that is ignored in this detection algorithm: 1037 | * (7 {BEL}, 8 {BS}, 11 {VT}, 12 {FF}, 26 {SUB}, 27 {ESC}). 1038 | * IN assertion: the fields Freq of dyn_ltree are set. 1039 | */ 1040 | function detect_data_type(s: DeflateState) { 1041 | /* black_mask is the bit mask of black-listed bytes 1042 | * set bits 0..6, 14..25, and 28..31 1043 | * 0xf3ffc07f = binary 11110011111111111100000001111111 1044 | */ 1045 | let black_mask = 0xf3ffc07f; 1046 | let n; 1047 | 1048 | /* Check for non-textual ("black-listed") bytes. */ 1049 | for (n = 0; n <= 31; n++, black_mask >>>= 1) { 1050 | if ((black_mask & 1) && (s.dyn_ltree[n * 2] /*.Freq*/ !== 0)) { 1051 | return Z_BINARY; 1052 | } 1053 | } 1054 | 1055 | /* Check for textual ("white-listed") bytes. */ 1056 | if ( 1057 | s.dyn_ltree[9 * 2] /*.Freq*/ !== 0 || 1058 | s.dyn_ltree[10 * 2] /*.Freq*/ !== 0 || 1059 | s.dyn_ltree[13 * 2] /*.Freq*/ !== 0 1060 | ) { 1061 | return Z_TEXT; 1062 | } 1063 | for (n = 32; n < LITERALS; n++) { 1064 | if (s.dyn_ltree[n * 2] /*.Freq*/ !== 0) { 1065 | return Z_TEXT; 1066 | } 1067 | } 1068 | 1069 | /* There are no "black-listed" or "white-listed" bytes: 1070 | * this stream either is empty or has tolerated ("gray-listed") bytes only. 1071 | */ 1072 | return Z_BINARY; 1073 | } 1074 | 1075 | let static_init_done = false; 1076 | 1077 | /* =========================================================================== 1078 | * Initialize the tree data structures for a new zlib stream. 1079 | */ 1080 | export function _tr_init(s: DeflateState) { 1081 | if (!static_init_done) { 1082 | tr_static_init(); 1083 | static_init_done = true; 1084 | } 1085 | 1086 | s.l_desc = new TreeDesc(s.dyn_ltree, static_l_desc); 1087 | s.d_desc = new TreeDesc(s.dyn_dtree, static_d_desc); 1088 | s.bl_desc = new TreeDesc(s.bl_tree, static_bl_desc); 1089 | 1090 | s.bi_buf = 0; 1091 | s.bi_valid = 0; 1092 | 1093 | /* Initialize the first block of the first file: */ 1094 | init_block(s); 1095 | } 1096 | 1097 | /* =========================================================================== 1098 | * Send a stored block 1099 | */ 1100 | export function _tr_stored_block(s: DeflateState, buf: number, stored_len: number, last: boolean) //DeflateState *s; 1101 | //charf *buf; /* input block */ 1102 | //ulg stored_len; /* length of input block */ 1103 | //int last; /* one if this is the last block for a file */ 1104 | { 1105 | send_bits(s, (STORED_BLOCK << 1) + (last ? 1 : 0), 3); /* send block type */ 1106 | copy_block(s, buf, stored_len, true); /* with header */ 1107 | } 1108 | 1109 | /* =========================================================================== 1110 | * Send one empty static block to give enough lookahead for inflate. 1111 | * This takes 10 bits, of which 7 may remain in the bit buffer. 1112 | */ 1113 | export function _tr_align(s: DeflateState) { 1114 | send_bits(s, STATIC_TREES << 1, 3); 1115 | send_code(s, END_BLOCK, static_ltree); 1116 | bi_flush(s); 1117 | } 1118 | 1119 | /* =========================================================================== 1120 | * Determine the best encoding for the current block: dynamic trees, static 1121 | * trees or store, and output the encoded block to the zip file. 1122 | */ 1123 | export function _tr_flush_block(s: DeflateState, buf: number, stored_len: number, last: boolean) { 1124 | let opt_lenb, static_lenb; /* opt_len and static_len in bytes */ 1125 | let max_blindex = 0; /* index of last bit length code of non zero freq */ 1126 | 1127 | /* Build the Huffman trees unless a stored block is forced */ 1128 | if (s.level > 0) { 1129 | /* Check if the file is binary or text */ 1130 | if (s.strm!.data_type === Z_UNKNOWN) { 1131 | s.strm!.data_type = detect_data_type(s); 1132 | } 1133 | 1134 | /* Construct the literal and distance trees */ 1135 | build_tree(s, s.l_desc!); 1136 | // Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len, 1137 | // s->static_len)); 1138 | 1139 | build_tree(s, s.d_desc!); 1140 | // Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len, 1141 | // s->static_len)); 1142 | /* At this point, opt_len and static_len are the total bit lengths of 1143 | * the compressed block data, excluding the tree representations. 1144 | */ 1145 | 1146 | /* Build the bit length tree for the above two trees, and get the index 1147 | * in bl_order of the last bit length code to send. 1148 | */ 1149 | max_blindex = build_bl_tree(s); 1150 | 1151 | /* Determine the best encoding. Compute the block lengths in bytes. */ 1152 | opt_lenb = (s.opt_len + 3 + 7) >>> 3; 1153 | static_lenb = (s.static_len + 3 + 7) >>> 3; 1154 | 1155 | // Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ", 1156 | // opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len, 1157 | // s->last_lit)); 1158 | 1159 | if (static_lenb <= opt_lenb) opt_lenb = static_lenb; 1160 | } else { 1161 | // Assert(buf != (char*)0, "lost buf"); 1162 | opt_lenb = static_lenb = stored_len + 5; /* force a stored block */ 1163 | } 1164 | 1165 | if ((stored_len + 4 <= opt_lenb) && (buf !== -1)) { 1166 | /* 4: two words for the lengths */ 1167 | 1168 | /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE. 1169 | * Otherwise we can't have processed more than WSIZE input bytes since 1170 | * the last block flush, because compression would have been 1171 | * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to 1172 | * transform a block into a stored block. 1173 | */ 1174 | _tr_stored_block(s, buf, stored_len, last); 1175 | } else if (s.strategy === Z_FIXED || static_lenb === opt_lenb) { 1176 | send_bits(s, (STATIC_TREES << 1) + (last ? 1 : 0), 3); 1177 | compress_block(s, static_ltree, static_dtree); 1178 | } else { 1179 | send_bits(s, (DYN_TREES << 1) + (last ? 1 : 0), 3); 1180 | send_all_trees( 1181 | s, 1182 | s.l_desc!.max_code + 1, 1183 | s.d_desc!.max_code + 1, 1184 | max_blindex + 1, 1185 | ); 1186 | compress_block(s, s.dyn_ltree, s.dyn_dtree); 1187 | } 1188 | // Assert (s->compressed_len == s->bits_sent, "bad compressed size"); 1189 | /* The above check is made mod 2^32, for files larger than 512 MB 1190 | * and uLong implemented on 32 bits. 1191 | */ 1192 | init_block(s); 1193 | 1194 | if (last) { 1195 | bi_windup(s); 1196 | } 1197 | // Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3, 1198 | // s->compressed_len-7*last)); 1199 | } 1200 | 1201 | /* =========================================================================== 1202 | * Save the match info and tally the frequency counts. Return true if 1203 | * the current block must be flushed. 1204 | */ 1205 | export function _tr_tally(s: DeflateState, dist: number, lc: number) { 1206 | //let out_length, in_length, dcode; 1207 | 1208 | s.pending_buf![s.d_buf + s.last_lit * 2] = (dist >>> 8) & 0xff; 1209 | s.pending_buf![s.d_buf + s.last_lit * 2 + 1] = dist & 0xff; 1210 | 1211 | s.pending_buf![s.l_buf + s.last_lit] = lc & 0xff; 1212 | s.last_lit++; 1213 | 1214 | if (dist === 0) { 1215 | /* lc is the unmatched char */ 1216 | s.dyn_ltree[lc * 2] /*.Freq*/++; 1217 | } else { 1218 | s.matches++; 1219 | /* Here, lc is the match length - MIN_MATCH */ 1220 | dist--; /* dist = match distance - 1 */ 1221 | //Assert((ush)dist < (ush)MAX_DIST(s) && 1222 | // (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) && 1223 | // (ush)d_code(dist) < (ush)D_CODES, "_tr_tally: bad match"); 1224 | 1225 | s.dyn_ltree[(_length_code[lc] + LITERALS + 1) * 2] /*.Freq*/++; 1226 | s.dyn_dtree[d_code(dist) * 2] /*.Freq*/++; 1227 | } 1228 | 1229 | // (!) This block is disabled in zlib defaults, 1230 | // don't enable it for binary compatibility 1231 | 1232 | //#ifdef TRUNCATE_BLOCK 1233 | // /* Try to guess if it is profitable to stop the current block here */ 1234 | // if ((s.last_lit & 0x1fff) === 0 && s.level > 2) { 1235 | // /* Compute an upper bound for the compressed length */ 1236 | // out_length = s.last_lit*8; 1237 | // in_length = s.strstart - s.block_start; 1238 | // 1239 | // for (dcode = 0; dcode < D_CODES; dcode++) { 1240 | // out_length += s.dyn_dtree[dcode*2]/*.Freq*/ * (5 + extra_dbits[dcode]); 1241 | // } 1242 | // out_length >>>= 3; 1243 | // //Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ", 1244 | // // s->last_lit, in_length, out_length, 1245 | // // 100L - out_length*100L/in_length)); 1246 | // if (s.matches < (s.last_lit>>1)/*int /2*/ && out_length < (in_length>>1)/*int /2*/) { 1247 | // return true; 1248 | // } 1249 | // } 1250 | //#endif 1251 | 1252 | return (s.last_lit === s.lit_bufsize - 1); 1253 | /* We avoid equality with lit_bufsize because of wraparound at 64K 1254 | * on 16 bit machines and because stored blocks are restricted to 1255 | * 64K-1 bytes. 1256 | */ 1257 | } 1258 | -------------------------------------------------------------------------------- /zlib/zlib/zstream.ts: -------------------------------------------------------------------------------- 1 | import type { DeflateState } from "./deflate.ts"; 2 | import type { InflateState } from "./inflate.ts"; 3 | 4 | /** 5 | * Represents a zlib stream used for compression and decompression. 6 | * 7 | * This class encapsulates the input and output buffers, state information, and other metadata required for the compression and decompression processes. 8 | */ 9 | export default class ZStream { 10 | /** 11 | * The next input byte to be processed. 12 | * @type {Uint8Array | null} 13 | */ 14 | input: Uint8Array | null = null; // JS specific, because we have no pointers 15 | 16 | /** 17 | * The index of the next input byte to be processed. 18 | * @type {number} 19 | */ 20 | next_in = 0; 21 | 22 | /** 23 | * The number of bytes available at the input buffer. 24 | * @type {number} 25 | */ 26 | avail_in = 0; 27 | 28 | /** 29 | * The total number of input bytes read so far. 30 | * @type {number} 31 | */ 32 | total_in = 0; 33 | 34 | /** 35 | * The next output byte should be placed here. 36 | * @type {Uint8Array | null} 37 | */ 38 | output: Uint8Array | null = null; // JS specific, because we have no pointers 39 | 40 | /** 41 | * The index of the next output byte to be placed. 42 | * @type {number} 43 | */ 44 | next_out = 0; 45 | 46 | /** 47 | * The remaining free space at the output buffer. 48 | * @type {number} 49 | */ 50 | avail_out = 0; 51 | 52 | /** 53 | * The total number of bytes output so far. 54 | * @type {number} 55 | */ 56 | total_out = 0; 57 | 58 | /** 59 | * The last error message, or an empty string if no error. 60 | * @type {string} 61 | */ 62 | msg = ""; /*Z_NULL*/ 63 | 64 | /** 65 | * The internal state of the stream, not visible to applications. 66 | * @type {InflateState | DeflateState | null} 67 | */ 68 | state: InflateState | DeflateState | null = null; 69 | 70 | /** 71 | * The best guess about the data type: binary or text. 72 | * @type {number} 73 | */ 74 | data_type = 2 /*Z_UNKNOWN*/; 75 | 76 | /** 77 | * The Adler-32 value of the uncompressed data. 78 | * @type {number} 79 | */ 80 | adler = 0; 81 | } 82 | --------------------------------------------------------------------------------