├── .gitignore
├── public
├── images
│ └── venetian_crossroads_1k.hdr
├── index.html
├── z-worker.js
└── deflate.js
├── src
├── shaders
│ ├── vertex.glsl
│ └── fragment.glsl
├── materials
│ └── sphereMaterial.js
├── textures
│ └── hdrTexture.js
├── hdrConverterEmissive.js
├── app.js
├── workers
│ └── hdrEmissiveWorker.js
└── externalLibs
│ └── RGBELoader.js
├── package.json
├── webpack.config.js
└── README.md
/.gitignore:
--------------------------------------------------------------------------------
1 | node_modules/
2 | /package-lock.json
3 | public/bundle.js
--------------------------------------------------------------------------------
/public/images/venetian_crossroads_1k.hdr:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/matheowis/threejs-canvas-save-as-hdr/HEAD/public/images/venetian_crossroads_1k.hdr
--------------------------------------------------------------------------------
/src/shaders/vertex.glsl:
--------------------------------------------------------------------------------
1 | varying vec2 vUv;
2 |
3 | void main() {
4 | vUv = uv;
5 | gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
6 | }
--------------------------------------------------------------------------------
/src/shaders/fragment.glsl:
--------------------------------------------------------------------------------
1 | uniform sampler2D tDiffuse;
2 | varying vec2 vUv;
3 |
4 | void main() {
5 | vec4 texelColor = texture2D( tDiffuse, vUv );
6 | gl_FragColor = vec4(texelColor);
7 | }
--------------------------------------------------------------------------------
/public/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | THREE setup
7 |
8 |
9 |
10 | Download
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/src/materials/sphereMaterial.js:
--------------------------------------------------------------------------------
1 | import {ShaderMaterial} from 'three';
2 | import vertexShader from '../shaders/vertex.glsl';
3 | import fragmentShader from '../shaders/fragment.glsl';
4 | import {HDRTexture} from '../textures/hdrTexture'
5 |
6 | export const sphereMaterial = new ShaderMaterial({
7 | vertexShader,
8 | fragmentShader,
9 | uniforms:{
10 | tDiffuse:{value:HDRTexture}
11 | },
12 | transparent:true
13 | })
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "webpack-three-setup",
3 | "version": "1.0.0",
4 | "description": "Webpack with three",
5 | "main": "index.js",
6 | "scripts": {
7 | "start": "webpack-dev-server --mode development",
8 | "build": "webpack --mode production"
9 | },
10 | "author": "Mateusz Wisniowski",
11 | "license": "ISC",
12 | "devDependencies": {
13 | "raw-loader": "^0.5.1",
14 | "three": "^0.94.0",
15 | "webpack": "^4.17.1",
16 | "webpack-cli": "^3.1.0",
17 | "webpack-dev-server": "^3.1.6"
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/webpack.config.js:
--------------------------------------------------------------------------------
1 | const path = require('path');
2 |
3 | module.exports = {
4 | entry: './src/app.js',
5 | module:{
6 | rules:[
7 | {
8 | test: /(\.glsl|\.vs|\.fs)$/,
9 | exclude: /node_modules/,
10 | use: 'raw-loader'
11 | }
12 | ]
13 | },
14 | resolve: {
15 | extensions: ['*', '.js', '.jsx']
16 | },
17 | output: {
18 | path: path.join(__dirname, 'public'),
19 | filename: 'bundle.js'
20 | },
21 | devServer: {
22 | contentBase: path.join(__dirname, 'public'),
23 | historyApiFallback: true,
24 | }
25 | }
--------------------------------------------------------------------------------
/src/textures/hdrTexture.js:
--------------------------------------------------------------------------------
1 | import { RGBEEncoding, NearestFilter, DataTexture } from 'three';
2 | import { RGBELoader } from '../externalLibs/RGBELoader';
3 |
4 | export const HDRTexture = new DataTexture();
5 |
6 | const loader = new RGBELoader();
7 | loader.load(
8 | 'images/venetian_crossroads_1k.hdr',
9 | tex => {
10 | console.log('tex=', tex);
11 | tex.encoding = RGBEEncoding;
12 | tex.minFilter = NearestFilter;
13 | tex.magFilter = NearestFilter;
14 | tex.flipY = true;
15 | HDRTexture.copy(tex);
16 | HDRTexture.needsUpdate = true;
17 | }
18 | )
19 |
--------------------------------------------------------------------------------
/src/hdrConverterEmissive.js:
--------------------------------------------------------------------------------
1 | import { hadrEmmisiveWorker } from './workers/hdrEmissiveWorker'
2 | export const hdrConverterEmmisive = (
3 | width,
4 | height,
5 | rgbeBuffer = new Uint8Array(),
6 | ) => {
7 | return new Promise((resolve, reject) => {
8 | var blobURL = URL.createObjectURL(new Blob(['(', hadrEmmisiveWorker.toString(), ')()'], { type: 'application/javascript' }));
9 | const worker = new Worker(blobURL);
10 | worker.postMessage({ rgbeBuffer, width, height });
11 |
12 | worker.addEventListener('message', event => {
13 | if (event.data.progress) {
14 | // possible implementation for bigger images
15 | console.log('dataProgress=', event.data.progress);
16 | } else {
17 | console.log('dataBack', event.data);
18 | resolve(event.data.binary) //hdr binary
19 | }
20 | })
21 | })
22 | }
--------------------------------------------------------------------------------
/src/app.js:
--------------------------------------------------------------------------------
1 | import * as THREE from 'three';
2 | import { hdrConverterEmmisive } from './hdrConverterEmissive';
3 | import { sphereMaterial } from './materials/sphereMaterial';
4 | const cWidth = 1280, cHeight = 720;
5 |
6 | const renderer = new THREE.WebGLRenderer({ antialias: true, alpha: true });
7 | const renderTarget = new THREE.WebGLRenderTarget(cWidth, cHeight);
8 | const scene = new THREE.Scene();
9 | const camera = new THREE.OrthographicCamera(cWidth / -2, cWidth / 2, cHeight / 2, cHeight / -2, 1, 1000);
10 | renderer.setSize(cWidth, cHeight);
11 |
12 | document.body.appendChild(renderer.domElement);
13 | const sphereGeo = new THREE.SphereGeometry(300, 100, 100);
14 | const shereMesh = new THREE.Mesh(sphereGeo, sphereMaterial);
15 | shereMesh.position.z = -400;
16 |
17 |
18 | scene.add(shereMesh);
19 |
20 | render()
21 | function render() {
22 | requestAnimationFrame(render)
23 | renderer.render(scene, camera);
24 | }
25 |
26 | const a = document.getElementById('download');
27 | a.addEventListener('click', e => {
28 | renderer.render(scene, camera, renderTarget);
29 | const pixelData = new Uint8Array(cWidth * cHeight * 4);
30 | renderer.readRenderTargetPixels(renderTarget, 0, 0, cWidth, cHeight, pixelData);
31 | console.log('The pixel data!', pixelData);
32 | hdrConverterEmmisive(cWidth, cHeight, pixelData).then(binary => {
33 | const header = 'FORMAT=32-bit_rle_rgbe\n';
34 | const blankSpace = '\n';
35 | const Resolution = `-Y ${cHeight} +X ${cWidth}\n`;
36 | let text = header + blankSpace + Resolution;
37 |
38 | var blob = new Blob([text, binary], { type: "octet/stream" });
39 | var url = URL.createObjectURL(blob);
40 | a.href = url;
41 | a.download = 'shouldWork.hdr';
42 | })
43 | })
44 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Saving canvas to hdr file format
2 | ## Project overview
3 |
4 | In this project I render a sphere whith custom shader on it, that takes an hdr texture as paramter,
5 | the shader sets opacity to equal emmisive
6 |
7 | then i take pixel data of the canvas and transform it in the same way i describe in **hdr format tutorial**
8 |
9 | ### Demo
10 |
11 | You can check out Demo on https://matheowis.github.io/threejs-canvas-save-as-hdr
12 |
13 | You have to click download text twice to download the image, then open it in a software that supports hdr
14 |
15 | ## HDR format tutorial
16 | 
17 |
18 | Lets say we want to save the image above to hdr format
19 | ```
20 | Color(red, green, blue, emmisive)
21 | leftColor = Color(20,70,150,128)
22 | rightColor = Color(150,130,40,128)
23 | ```
24 | Each file contains header, resolution and pixelData
25 | ```
26 | var header = "FORMAT=32-bit_rle_rgbe\n"
27 | var blankSpace = "\n";
28 | var Resolution = "-Y 180 +X 320\n";
29 | ```
30 | headers and ressolution have to be separated by empty line
31 |
32 | Now lets take care of pixelData
33 |
34 | We have to write each line of pixels starting from upper left image corner.
35 | First two bytes of each line have to be (2, 2) - that defines the format, which is **adaptive run length encoding**,
36 | another two bytes defines the length of pixel row in this situation its 320 so in bytes (1,64).
37 |
38 | Now we can start writting pixel values
39 |
40 | Wy start with amount of repetition, where 128 means 0, this part i couldn't really understand, but based on photoshop files, I found out that only positive values are used, so maximum repetition is 255 which means 127
41 |
42 | ### Channels
43 | We define each channel separately, start with all reds in the row,then greens and so on.
44 | reds:
45 | ```
46 | 255, 20, 161, 20, 255, 150, 161, 150,
47 | ```
48 | greens:
49 | ```
50 | 255, 70, 161, 70, 255, 130, 161, 130,
51 | ```
52 | blues:
53 | ```
54 | 255, 150, 161, 150, 255, 40, 161, 40,
55 | ```
56 | emmisives:
57 | ```
58 | 255, 128, 255, 128, 194, 128
59 | ```
60 | ### Full line would look like
61 | ```
62 | 2, 2, 1, 64, 255, 20, 161, 20, 255, 150, 161, 150, 255, 70, 161, 70, 255, 130, 161, 130, 255, 150, 161, 150, 255, 40, 161, 40, 255, 128, 255, 128, 194, 128
63 | ```
64 | ### Binary Data
65 |
66 | You can save binary with new Uint8Array()
67 | ```
68 | var text = header + blankSpace + Resolution;
69 | var binary = new Uint8Array([pixelData]) // bytes that we created above
70 | var blob = new Blob([text, binary], { type: "octet/stream" });
71 | ```
72 | That blob contains image in hdr format
73 |
74 | more info about hdr format at http://radsite.lbl.gov/radiance/refer/filefmts.pdf page 28
75 |
76 |
77 |
78 |
79 |
--------------------------------------------------------------------------------
/src/workers/hdrEmissiveWorker.js:
--------------------------------------------------------------------------------
1 | export const hadrEmmisiveWorker = () => {
2 | // You can't push Uint8Array, so i made a class to do that
3 | class ByteData {
4 | constructor(size) {
5 | this.binaryData = new Uint8Array(size);
6 | this._cIndex = 0;
7 | this.push = this.push.bind(this);
8 | }
9 | push(...bytes) {
10 | for (var i = 0; i < arguments.length; i++) {
11 | this.binaryData[this._cIndex] = arguments[i];
12 | this._cIndex++;
13 | }
14 | }
15 | }
16 | self.addEventListener('message', event => {
17 | const width = event.data.width;
18 | const height = event.data.height;
19 | const rgbeBuffer = event.data.rgbeBuffer;
20 | // pixel data starts at lower left corner, but we are writing hdr from upper left one,
21 | // this function gives me upper left pixel row based on y, where y = 0 -> top row
22 | const topIndex = y => (width * height * 4) - (width * 4) - (width * y * 4);
23 | // calculates repetitions in line for given channel
24 | const getLine = (y = 0, channel = 0) => {
25 | const array = [];
26 | let localVal = 0, localLength = 0;
27 | const lengthConstant = 128;
28 | for (var i = 0; i < width * 4; i += 4) {
29 | if (localLength === 0) {
30 | localVal = rgbeBuffer[topIndex(y) + i + channel];
31 | localLength++;
32 | } else if (localVal === rgbeBuffer[topIndex(y) + i + channel] && localLength < 127) {
33 | localLength++;
34 | } else {
35 | array.push({ value: localVal, length: localLength + lengthConstant });
36 | localVal = rgbeBuffer[topIndex(y) + i + channel];
37 | localLength = 1;
38 | }
39 | }
40 | array.push({ value: localVal, length: localLength + lengthConstant });
41 | return array;
42 | }
43 |
44 | const compressed = [];
45 | let fileSize = 0;
46 | for (var i = 0; i < height; i++) {
47 | const lineReds = getLine(i, 0);
48 | const lineGreens = getLine(i, 1);
49 | const lineBlues = getLine(i, 2);
50 | const lineEmissive = getLine(i, 3);
51 | const lineInitiator = 4;
52 | // multiplied channels by 2, because they contain value and length if that value
53 | fileSize += lineInitiator + lineReds.length * 2 + lineGreens.length * 2 + lineBlues.length * 2 + lineEmissive.length * 2;
54 | compressed.push([lineReds, lineGreens, lineBlues, lineEmissive]);
55 | }
56 | console.log(`Worker, hdr file size = ${(fileSize / 1024).toFixed(2)}kb`);
57 | const lineSize = new Uint8Array(new Uint16Array([width]).buffer);
58 | const byteData = new ByteData(fileSize);
59 |
60 | for (var i = 0; i < height; i++) {
61 | // Each line starts the same
62 | byteData.push(2, 2, lineSize[1], lineSize[0]);//line iniciators // no idea why but linesize is flipped
63 | for (var k = 0; k < 4; k++) {
64 | compressed[i][k].map(channel => { byteData.push(channel.length, channel.value); })
65 | }
66 | }
67 | self.postMessage({ binary: byteData.binaryData });
68 | });
69 | }
--------------------------------------------------------------------------------
/public/z-worker.js:
--------------------------------------------------------------------------------
1 | /* jshint worker:true */
2 | (function main(global) {
3 | "use strict";
4 |
5 | if (global.zWorkerInitialized)
6 | throw new Error('z-worker.js should be run only once');
7 | global.zWorkerInitialized = true;
8 |
9 | addEventListener("message", function(event) {
10 | var message = event.data, type = message.type, sn = message.sn;
11 | var handler = handlers[type];
12 | if (handler) {
13 | try {
14 | handler(message);
15 | } catch (e) {
16 | onError(type, sn, e);
17 | }
18 | }
19 | //for debug
20 | //postMessage({type: 'echo', originalType: type, sn: sn});
21 | });
22 |
23 | var handlers = {
24 | importScripts: doImportScripts,
25 | newTask: newTask,
26 | append: processData,
27 | flush: processData,
28 | };
29 |
30 | // deflater/inflater tasks indexed by serial numbers
31 | var tasks = {};
32 |
33 | function doImportScripts(msg) {
34 | if (msg.scripts && msg.scripts.length > 0)
35 | importScripts.apply(undefined, msg.scripts);
36 | postMessage({type: 'importScripts'});
37 | }
38 |
39 | function newTask(msg) {
40 | var CodecClass = global[msg.codecClass];
41 | var sn = msg.sn;
42 | if (tasks[sn])
43 | throw Error('duplicated sn');
44 | tasks[sn] = {
45 | codec: new CodecClass(msg.options),
46 | crcInput: msg.crcType === 'input',
47 | crcOutput: msg.crcType === 'output',
48 | crc: new Crc32(),
49 | };
50 | postMessage({type: 'newTask', sn: sn});
51 | }
52 |
53 | // performance may not be supported
54 | var now = global.performance ? global.performance.now.bind(global.performance) : Date.now;
55 |
56 | function processData(msg) {
57 | var sn = msg.sn, type = msg.type, input = msg.data;
58 | var task = tasks[sn];
59 | // allow creating codec on first append
60 | if (!task && msg.codecClass) {
61 | newTask(msg);
62 | task = tasks[sn];
63 | }
64 | var isAppend = type === 'append';
65 | var start = now();
66 | var output;
67 | if (isAppend) {
68 | try {
69 | output = task.codec.append(input, function onprogress(loaded) {
70 | postMessage({type: 'progress', sn: sn, loaded: loaded});
71 | });
72 | } catch (e) {
73 | delete tasks[sn];
74 | throw e;
75 | }
76 | } else {
77 | delete tasks[sn];
78 | output = task.codec.flush();
79 | }
80 | var codecTime = now() - start;
81 |
82 | start = now();
83 | if (input && task.crcInput)
84 | task.crc.append(input);
85 | if (output && task.crcOutput)
86 | task.crc.append(output);
87 | var crcTime = now() - start;
88 |
89 | var rmsg = {type: type, sn: sn, codecTime: codecTime, crcTime: crcTime};
90 | var transferables = [];
91 | if (output) {
92 | rmsg.data = output;
93 | transferables.push(output.buffer);
94 | }
95 | if (!isAppend && (task.crcInput || task.crcOutput))
96 | rmsg.crc = task.crc.get();
97 |
98 | // posting a message with transferables will fail on IE10
99 | try {
100 | postMessage(rmsg, transferables);
101 | } catch(ex) {
102 | postMessage(rmsg); // retry without transferables
103 | }
104 | }
105 |
106 | function onError(type, sn, e) {
107 | var msg = {
108 | type: type,
109 | sn: sn,
110 | error: formatError(e)
111 | };
112 | postMessage(msg);
113 | }
114 |
115 | function formatError(e) {
116 | return { message: e.message, stack: e.stack };
117 | }
118 |
119 | // Crc32 code copied from file zip.js
120 | function Crc32() {
121 | this.crc = -1;
122 | }
123 | Crc32.prototype.append = function append(data) {
124 | var crc = this.crc | 0, table = this.table;
125 | for (var offset = 0, len = data.length | 0; offset < len; offset++)
126 | crc = (crc >>> 8) ^ table[(crc ^ data[offset]) & 0xFF];
127 | this.crc = crc;
128 | };
129 | Crc32.prototype.get = function get() {
130 | return ~this.crc;
131 | };
132 | Crc32.prototype.table = (function() {
133 | var i, j, t, table = []; // Uint32Array is actually slower than []
134 | for (i = 0; i < 256; i++) {
135 | t = i;
136 | for (j = 0; j < 8; j++)
137 | if (t & 1)
138 | t = (t >>> 1) ^ 0xEDB88320;
139 | else
140 | t = t >>> 1;
141 | table[i] = t;
142 | }
143 | return table;
144 | })();
145 |
146 | // "no-op" codec
147 | function NOOP() {}
148 | global.NOOP = NOOP;
149 | NOOP.prototype.append = function append(bytes, onprogress) {
150 | return bytes;
151 | };
152 | NOOP.prototype.flush = function flush() {};
153 | })(this);
154 |
--------------------------------------------------------------------------------
/src/externalLibs/RGBELoader.js:
--------------------------------------------------------------------------------
1 | import * as THREE from 'three';
2 | /**
3 | * @author Nikos M. / https://github.com/foo123/
4 | */
5 |
6 | // https://github.com/mrdoob/three.js/issues/5552
7 | // http://en.wikipedia.org/wiki/RGBE_image_format
8 |
9 | const RGBELoader = function (manager) {
10 |
11 | this.manager = (manager !== undefined) ? manager : THREE.DefaultLoadingManager;
12 |
13 | };
14 |
15 | // extend THREE.DataTextureLoader
16 | RGBELoader.prototype = Object.create(THREE.DataTextureLoader.prototype);
17 |
18 | // adapted from http://www.graphics.cornell.edu/~bjw/rgbe.html
19 | RGBELoader.prototype._parser = function (buffer) {
20 |
21 | var
22 | /* return codes for rgbe routines */
23 | RGBE_RETURN_SUCCESS = 0,
24 | RGBE_RETURN_FAILURE = - 1,
25 |
26 | /* default error routine. change this to change error handling */
27 | rgbe_read_error = 1,
28 | rgbe_write_error = 2,
29 | rgbe_format_error = 3,
30 | rgbe_memory_error = 4,
31 | rgbe_error = function (rgbe_error_code, msg) {
32 |
33 | switch (rgbe_error_code) {
34 |
35 | case rgbe_read_error: console.error("THREE.RGBELoader Read Error: " + (msg || ''));
36 | break;
37 | case rgbe_write_error: console.error("THREE.RGBELoader Write Error: " + (msg || ''));
38 | break;
39 | case rgbe_format_error: console.error("THREE.RGBELoader Bad File Format: " + (msg || ''));
40 | break;
41 | default:
42 | case rgbe_memory_error: console.error("THREE.RGBELoader: Error: " + (msg || ''));
43 |
44 | }
45 | return RGBE_RETURN_FAILURE;
46 |
47 | },
48 |
49 | /* offsets to red, green, and blue components in a data (float) pixel */
50 | RGBE_DATA_RED = 0,
51 | RGBE_DATA_GREEN = 1,
52 | RGBE_DATA_BLUE = 2,
53 |
54 | /* number of floats per pixel, use 4 since stored in rgba image format */
55 | RGBE_DATA_SIZE = 4,
56 |
57 | /* flags indicating which fields in an rgbe_header_info are valid */
58 | RGBE_VALID_PROGRAMTYPE = 1,
59 | RGBE_VALID_FORMAT = 2,
60 | RGBE_VALID_DIMENSIONS = 4,
61 |
62 | NEWLINE = "\n",
63 |
64 | fgets = function (buffer, lineLimit, consume) {
65 |
66 | lineLimit = !lineLimit ? 1024 : lineLimit;
67 | var p = buffer.pos,
68 | i = - 1, len = 0, s = '', chunkSize = 128,
69 | chunk = String.fromCharCode.apply(null, new Uint16Array(buffer.subarray(p, p + chunkSize)))
70 | ;
71 | while ((0 > (i = chunk.indexOf(NEWLINE))) && (len < lineLimit) && (p < buffer.byteLength)) {
72 |
73 | s += chunk; len += chunk.length;
74 | p += chunkSize;
75 | chunk += String.fromCharCode.apply(null, new Uint16Array(buffer.subarray(p, p + chunkSize)));
76 |
77 | }
78 |
79 | if (- 1 < i) {
80 |
81 | /*for (i=l-1; i>=0; i--) {
82 | byteCode = m.charCodeAt(i);
83 | if (byteCode > 0x7f && byteCode <= 0x7ff) byteLen++;
84 | else if (byteCode > 0x7ff && byteCode <= 0xffff) byteLen += 2;
85 | if (byteCode >= 0xDC00 && byteCode <= 0xDFFF) i--; //trail surrogate
86 | }*/
87 | if (false !== consume) buffer.pos += len + i + 1;
88 | return s + chunk.slice(0, i);
89 |
90 | }
91 | return false;
92 |
93 | },
94 |
95 | /* minimal header reading. modify if you want to parse more information */
96 | RGBE_ReadHeader = function (buffer) {
97 |
98 | var line, match,
99 |
100 | // regexes to parse header info fields
101 | magic_token_re = /^#\?(\S+)$/,
102 | gamma_re = /^\s*GAMMA\s*=\s*(\d+(\.\d+)?)\s*$/,
103 | exposure_re = /^\s*EXPOSURE\s*=\s*(\d+(\.\d+)?)\s*$/,
104 | format_re = /^\s*FORMAT=(\S+)\s*$/,
105 | dimensions_re = /^\s*\-Y\s+(\d+)\s+\+X\s+(\d+)\s*$/,
106 |
107 | // RGBE format header struct
108 | header = {
109 |
110 | valid: 0, /* indicate which fields are valid */
111 |
112 | string: '', /* the actual header string */
113 |
114 | comments: '', /* comments found in header */
115 |
116 | programtype: 'RGBE', /* listed at beginning of file to identify it after "#?". defaults to "RGBE" */
117 |
118 | format: '', /* RGBE format, default 32-bit_rle_rgbe */
119 |
120 | gamma: 1.0, /* image has already been gamma corrected with given gamma. defaults to 1.0 (no correction) */
121 |
122 | exposure: 1.0, /* a value of 1.0 in an image corresponds to watts/steradian/m^2. defaults to 1.0 */
123 |
124 | width: 0, height: 0 /* image dimensions, width/height */
125 |
126 | };
127 |
128 | if (buffer.pos >= buffer.byteLength || !(line = fgets(buffer))) {
129 |
130 | return rgbe_error(rgbe_read_error, "no header found");
131 |
132 | }
133 | /* if you want to require the magic token then uncomment the next line */
134 | if (!(match = line.match(magic_token_re))) {
135 |
136 | return rgbe_error(rgbe_format_error, "bad initial token");
137 |
138 | }
139 | header.valid |= RGBE_VALID_PROGRAMTYPE;
140 | header.programtype = match[1];
141 | header.string += line + "\n";
142 |
143 | while (true) {
144 |
145 | line = fgets(buffer);
146 | if (false === line) break;
147 | header.string += line + "\n";
148 |
149 | if ('#' === line.charAt(0)) {
150 |
151 | header.comments += line + "\n";
152 | continue; // comment line
153 |
154 | }
155 |
156 | if (match = line.match(gamma_re)) {
157 |
158 | header.gamma = parseFloat(match[1], 10);
159 |
160 | }
161 | if (match = line.match(exposure_re)) {
162 |
163 | header.exposure = parseFloat(match[1], 10);
164 |
165 | }
166 | if (match = line.match(format_re)) {
167 |
168 | header.valid |= RGBE_VALID_FORMAT;
169 | header.format = match[1];//'32-bit_rle_rgbe';
170 |
171 | }
172 | if (match = line.match(dimensions_re)) {
173 |
174 | header.valid |= RGBE_VALID_DIMENSIONS;
175 | header.height = parseInt(match[1], 10);
176 | header.width = parseInt(match[2], 10);
177 |
178 | }
179 |
180 | if ((header.valid & RGBE_VALID_FORMAT) && (header.valid & RGBE_VALID_DIMENSIONS)) break;
181 |
182 | }
183 |
184 | if (!(header.valid & RGBE_VALID_FORMAT)) {
185 |
186 | return rgbe_error(rgbe_format_error, "missing format specifier");
187 |
188 | }
189 | if (!(header.valid & RGBE_VALID_DIMENSIONS)) {
190 |
191 | return rgbe_error(rgbe_format_error, "missing image size specifier");
192 |
193 | }
194 |
195 | return header;
196 |
197 | },
198 |
199 | RGBE_ReadPixels_RLE = function (buffer, w, h) {
200 |
201 | var data_rgba, offset, pos, count, byteValue,
202 | scanline_buffer, ptr, ptr_end, i, l, off, isEncodedRun,
203 | scanline_width = w, num_scanlines = h, rgbeStart
204 | ;
205 |
206 | if (
207 | // run length encoding is not allowed so read flat
208 | ((scanline_width < 8) || (scanline_width > 0x7fff)) ||
209 | // this file is not run length encoded
210 | ((2 !== buffer[0]) || (2 !== buffer[1]) || (buffer[2] & 0x80))
211 | ) {
212 |
213 | // return the flat buffer
214 | return new Uint8Array(buffer);
215 |
216 | }
217 |
218 | if (scanline_width !== ((buffer[2] << 8) | buffer[3])) {
219 |
220 | return rgbe_error(rgbe_format_error, "wrong scanline width");
221 |
222 | }
223 |
224 | data_rgba = new Uint8Array(4 * w * h);
225 |
226 | if (!data_rgba || !data_rgba.length) {
227 |
228 | return rgbe_error(rgbe_memory_error, "unable to allocate buffer space");
229 |
230 | }
231 |
232 | offset = 0; pos = 0; ptr_end = 4 * scanline_width;
233 | rgbeStart = new Uint8Array(4);
234 | scanline_buffer = new Uint8Array(ptr_end);
235 |
236 | // read in each successive scanline
237 | while ((num_scanlines > 0) && (pos < buffer.byteLength)) {
238 |
239 | if (pos + 4 > buffer.byteLength) {
240 |
241 | return rgbe_error(rgbe_read_error);
242 |
243 | }
244 |
245 | rgbeStart[0] = buffer[pos++];
246 | rgbeStart[1] = buffer[pos++];
247 | rgbeStart[2] = buffer[pos++];
248 | rgbeStart[3] = buffer[pos++];
249 |
250 | if ((2 != rgbeStart[0]) || (2 != rgbeStart[1]) || (((rgbeStart[2] << 8) | rgbeStart[3]) != scanline_width)) {
251 |
252 | return rgbe_error(rgbe_format_error, "bad rgbe scanline format");
253 |
254 | }
255 |
256 | // read each of the four channels for the scanline into the buffer
257 | // first red, then green, then blue, then exponent
258 | ptr = 0;
259 | while ((ptr < ptr_end) && (pos < buffer.byteLength)) {
260 |
261 | count = buffer[pos++];
262 | isEncodedRun = count > 128;
263 | if (isEncodedRun) count -= 128;
264 |
265 | if ((0 === count) || (ptr + count > ptr_end)) {
266 |
267 | return rgbe_error(rgbe_format_error, "bad scanline data");
268 |
269 | }
270 |
271 | if (isEncodedRun) {
272 |
273 | // a (encoded) run of the same value
274 | byteValue = buffer[pos++];
275 | for (i = 0; i < count; i++) {
276 |
277 | scanline_buffer[ptr++] = byteValue;
278 |
279 | }
280 | //ptr += count;
281 |
282 | } else {
283 |
284 | // a literal-run
285 | scanline_buffer.set(buffer.subarray(pos, pos + count), ptr);
286 | ptr += count; pos += count;
287 |
288 | }
289 |
290 | }
291 |
292 |
293 | // now convert data from buffer into rgba
294 | // first red, then green, then blue, then exponent (alpha)
295 | l = scanline_width; //scanline_buffer.byteLength;
296 | for (i = 0; i < l; i++) {
297 |
298 | off = 0;
299 | data_rgba[offset] = scanline_buffer[i + off];
300 | off += scanline_width; //1;
301 | data_rgba[offset + 1] = scanline_buffer[i + off];
302 | off += scanline_width; //1;
303 | data_rgba[offset + 2] = scanline_buffer[i + off];
304 | off += scanline_width; //1;
305 | data_rgba[offset + 3] = scanline_buffer[i + off];
306 | offset += 4;
307 |
308 | }
309 |
310 | num_scanlines--;
311 |
312 | }
313 |
314 | return data_rgba;
315 |
316 | }
317 | ;
318 |
319 | var byteArray = new Uint8Array(buffer),
320 | byteLength = byteArray.byteLength;
321 | byteArray.pos = 0;
322 | var rgbe_header_info = RGBE_ReadHeader(byteArray);
323 |
324 | if (RGBE_RETURN_FAILURE !== rgbe_header_info) {
325 |
326 | var w = rgbe_header_info.width,
327 | h = rgbe_header_info.height,
328 | image_rgba_data = RGBE_ReadPixels_RLE(byteArray.subarray(byteArray.pos), w, h)
329 | ;
330 | if (RGBE_RETURN_FAILURE !== image_rgba_data) {
331 |
332 | return {
333 | width: w, height: h,
334 | data: image_rgba_data,
335 | header: rgbe_header_info.string,
336 | gamma: rgbe_header_info.gamma,
337 | exposure: rgbe_header_info.exposure,
338 | format: THREE.RGBEFormat, // handled as THREE.RGBAFormat in shaders
339 | type: THREE.UnsignedByteType
340 | };
341 |
342 | }
343 |
344 | }
345 | return null;
346 |
347 | };
348 | export { RGBELoader }
--------------------------------------------------------------------------------
/public/deflate.js:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright (c) 2013 Gildas Lormeau. All rights reserved.
3 |
4 | Redistribution and use in source and binary forms, with or without
5 | modification, are permitted provided that the following conditions are met:
6 |
7 | 1. Redistributions of source code must retain the above copyright notice,
8 | this list of conditions and the following disclaimer.
9 |
10 | 2. Redistributions in binary form must reproduce the above copyright
11 | notice, this list of conditions and the following disclaimer in
12 | the documentation and/or other materials provided with the distribution.
13 |
14 | 3. The names of the authors may not be used to endorse or promote products
15 | derived from this software without specific prior written permission.
16 |
17 | THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
18 | INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
19 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
20 | INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
21 | INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
23 | OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
24 | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
25 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
26 | EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 | */
28 |
29 | /*
30 | * This program is based on JZlib 1.0.2 ymnk, JCraft,Inc.
31 | * JZlib is based on zlib-1.1.3, so all credit should go authors
32 | * Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu)
33 | * and contributors of zlib.
34 | */
35 |
36 | (function(global) {
37 | "use strict";
38 |
39 | // Global
40 |
41 | var MAX_BITS = 15;
42 | var D_CODES = 30;
43 | var BL_CODES = 19;
44 |
45 | var LENGTH_CODES = 29;
46 | var LITERALS = 256;
47 | var L_CODES = (LITERALS + 1 + LENGTH_CODES);
48 | var HEAP_SIZE = (2 * L_CODES + 1);
49 |
50 | var END_BLOCK = 256;
51 |
52 | // Bit length codes must not exceed MAX_BL_BITS bits
53 | var MAX_BL_BITS = 7;
54 |
55 | // repeat previous bit length 3-6 times (2 bits of repeat count)
56 | var REP_3_6 = 16;
57 |
58 | // repeat a zero length 3-10 times (3 bits of repeat count)
59 | var REPZ_3_10 = 17;
60 |
61 | // repeat a zero length 11-138 times (7 bits of repeat count)
62 | var REPZ_11_138 = 18;
63 |
64 | // The lengths of the bit length codes are sent in order of decreasing
65 | // probability, to avoid transmitting the lengths for unused bit
66 | // length codes.
67 |
68 | var Buf_size = 8 * 2;
69 |
70 | // JZlib version : "1.0.2"
71 | var Z_DEFAULT_COMPRESSION = -1;
72 |
73 | // compression strategy
74 | var Z_FILTERED = 1;
75 | var Z_HUFFMAN_ONLY = 2;
76 | var Z_DEFAULT_STRATEGY = 0;
77 |
78 | var Z_NO_FLUSH = 0;
79 | var Z_PARTIAL_FLUSH = 1;
80 | var Z_FULL_FLUSH = 3;
81 | var Z_FINISH = 4;
82 |
83 | var Z_OK = 0;
84 | var Z_STREAM_END = 1;
85 | var Z_NEED_DICT = 2;
86 | var Z_STREAM_ERROR = -2;
87 | var Z_DATA_ERROR = -3;
88 | var Z_BUF_ERROR = -5;
89 |
90 | // Tree
91 |
92 | // see definition of array dist_code below
93 | var _dist_code = [ 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
94 | 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
95 | 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
96 | 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
97 | 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
98 | 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
99 | 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 0, 0, 16, 17, 18, 18, 19, 19,
100 | 20, 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
101 | 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
102 | 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
103 | 27, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
104 | 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 29,
105 | 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
106 | 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29 ];
107 |
108 | function Tree() {
109 | var that = this;
110 |
111 | // dyn_tree; // the dynamic tree
112 | // max_code; // largest code with non zero frequency
113 | // stat_desc; // the corresponding static tree
114 |
115 | // Compute the optimal bit lengths for a tree and update the total bit
116 | // length
117 | // for the current block.
118 | // IN assertion: the fields freq and dad are set, heap[heap_max] and
119 | // above are the tree nodes sorted by increasing frequency.
120 | // OUT assertions: the field len is set to the optimal bit length, the
121 | // array bl_count contains the frequencies for each bit length.
122 | // The length opt_len is updated; static_len is also updated if stree is
123 | // not null.
124 | function gen_bitlen(s) {
125 | var tree = that.dyn_tree;
126 | var stree = that.stat_desc.static_tree;
127 | var extra = that.stat_desc.extra_bits;
128 | var base = that.stat_desc.extra_base;
129 | var max_length = that.stat_desc.max_length;
130 | var h; // heap index
131 | var n, m; // iterate over the tree elements
132 | var bits; // bit length
133 | var xbits; // extra bits
134 | var f; // frequency
135 | var overflow = 0; // number of elements with bit length too large
136 |
137 | for (bits = 0; bits <= MAX_BITS; bits++)
138 | s.bl_count[bits] = 0;
139 |
140 | // In a first pass, compute the optimal bit lengths (which may
141 | // overflow in the case of the bit length tree).
142 | tree[s.heap[s.heap_max] * 2 + 1] = 0; // root of the heap
143 |
144 | for (h = s.heap_max + 1; h < HEAP_SIZE; h++) {
145 | n = s.heap[h];
146 | bits = tree[tree[n * 2 + 1] * 2 + 1] + 1;
147 | if (bits > max_length) {
148 | bits = max_length;
149 | overflow++;
150 | }
151 | tree[n * 2 + 1] = bits;
152 | // We overwrite tree[n*2+1] which is no longer needed
153 |
154 | if (n > that.max_code)
155 | continue; // not a leaf node
156 |
157 | s.bl_count[bits]++;
158 | xbits = 0;
159 | if (n >= base)
160 | xbits = extra[n - base];
161 | f = tree[n * 2];
162 | s.opt_len += f * (bits + xbits);
163 | if (stree)
164 | s.static_len += f * (stree[n * 2 + 1] + xbits);
165 | }
166 | if (overflow === 0)
167 | return;
168 |
169 | // This happens for example on obj2 and pic of the Calgary corpus
170 | // Find the first bit length which could increase:
171 | do {
172 | bits = max_length - 1;
173 | while (s.bl_count[bits] === 0)
174 | bits--;
175 | s.bl_count[bits]--; // move one leaf down the tree
176 | s.bl_count[bits + 1] += 2; // move one overflow item as its brother
177 | s.bl_count[max_length]--;
178 | // The brother of the overflow item also moves one step up,
179 | // but this does not affect bl_count[max_length]
180 | overflow -= 2;
181 | } while (overflow > 0);
182 |
183 | for (bits = max_length; bits !== 0; bits--) {
184 | n = s.bl_count[bits];
185 | while (n !== 0) {
186 | m = s.heap[--h];
187 | if (m > that.max_code)
188 | continue;
189 | if (tree[m * 2 + 1] != bits) {
190 | s.opt_len += (bits - tree[m * 2 + 1]) * tree[m * 2];
191 | tree[m * 2 + 1] = bits;
192 | }
193 | n--;
194 | }
195 | }
196 | }
197 |
198 | // Reverse the first len bits of a code, using straightforward code (a
199 | // faster
200 | // method would use a table)
201 | // IN assertion: 1 <= len <= 15
202 | function bi_reverse(code, // the value to invert
203 | len // its bit length
204 | ) {
205 | var res = 0;
206 | do {
207 | res |= code & 1;
208 | code >>>= 1;
209 | res <<= 1;
210 | } while (--len > 0);
211 | return res >>> 1;
212 | }
213 |
214 | // Generate the codes for a given tree and bit counts (which need not be
215 | // optimal).
216 | // IN assertion: the array bl_count contains the bit length statistics for
217 | // the given tree and the field len is set for all tree elements.
218 | // OUT assertion: the field code is set for all tree elements of non
219 | // zero code length.
220 | function gen_codes(tree, // the tree to decorate
221 | max_code, // largest code with non zero frequency
222 | bl_count // number of codes at each bit length
223 | ) {
224 | var next_code = []; // next code value for each
225 | // bit length
226 | var code = 0; // running code value
227 | var bits; // bit index
228 | var n; // code index
229 | var len;
230 |
231 | // The distribution counts are first used to generate the code values
232 | // without bit reversal.
233 | for (bits = 1; bits <= MAX_BITS; bits++) {
234 | next_code[bits] = code = ((code + bl_count[bits - 1]) << 1);
235 | }
236 |
237 | // Check that the bit counts in bl_count are consistent. The last code
238 | // must be all ones.
239 | // Assert (code + bl_count[MAX_BITS]-1 == (1<= 1; n--)
300 | s.pqdownheap(tree, n);
301 |
302 | // Construct the Huffman tree by repeatedly combining the least two
303 | // frequent nodes.
304 |
305 | node = elems; // next internal node of the tree
306 | do {
307 | // n = node of least frequency
308 | n = s.heap[1];
309 | s.heap[1] = s.heap[s.heap_len--];
310 | s.pqdownheap(tree, 1);
311 | m = s.heap[1]; // m = node of next least frequency
312 |
313 | s.heap[--s.heap_max] = n; // keep the nodes sorted by frequency
314 | s.heap[--s.heap_max] = m;
315 |
316 | // Create a new node father of n and m
317 | tree[node * 2] = (tree[n * 2] + tree[m * 2]);
318 | s.depth[node] = Math.max(s.depth[n], s.depth[m]) + 1;
319 | tree[n * 2 + 1] = tree[m * 2 + 1] = node;
320 |
321 | // and insert the new node in the heap
322 | s.heap[1] = node++;
323 | s.pqdownheap(tree, 1);
324 | } while (s.heap_len >= 2);
325 |
326 | s.heap[--s.heap_max] = s.heap[1];
327 |
328 | // At this point, the fields freq and dad are set. We can now
329 | // generate the bit lengths.
330 |
331 | gen_bitlen(s);
332 |
333 | // The field len is now set, we can generate the bit codes
334 | gen_codes(tree, that.max_code, s.bl_count);
335 | };
336 |
337 | }
338 |
339 | Tree._length_code = [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16,
340 | 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 18, 18, 18, 18, 18, 18, 18, 18, 19, 19, 19, 19, 19, 19, 19, 19, 20, 20, 20, 20, 20, 20, 20, 20, 20,
341 | 20, 20, 20, 20, 20, 20, 20, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
342 | 22, 22, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
343 | 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
344 | 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
345 | 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28 ];
346 |
347 | Tree.base_length = [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 0 ];
348 |
349 | Tree.base_dist = [ 0, 1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192, 256, 384, 512, 768, 1024, 1536, 2048, 3072, 4096, 6144, 8192, 12288, 16384,
350 | 24576 ];
351 |
352 | // Mapping from a distance to a distance code. dist is the distance - 1 and
353 | // must not have side effects. _dist_code[256] and _dist_code[257] are never
354 | // used.
355 | Tree.d_code = function(dist) {
356 | return ((dist) < 256 ? _dist_code[dist] : _dist_code[256 + ((dist) >>> 7)]);
357 | };
358 |
359 | // extra bits for each length code
360 | Tree.extra_lbits = [ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0 ];
361 |
362 | // extra bits for each distance code
363 | Tree.extra_dbits = [ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13 ];
364 |
365 | // extra bits for each bit length code
366 | Tree.extra_blbits = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 7 ];
367 |
368 | Tree.bl_order = [ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 ];
369 |
370 | // StaticTree
371 |
372 | function StaticTree(static_tree, extra_bits, extra_base, elems, max_length) {
373 | var that = this;
374 | that.static_tree = static_tree;
375 | that.extra_bits = extra_bits;
376 | that.extra_base = extra_base;
377 | that.elems = elems;
378 | that.max_length = max_length;
379 | }
380 |
381 | StaticTree.static_ltree = [ 12, 8, 140, 8, 76, 8, 204, 8, 44, 8, 172, 8, 108, 8, 236, 8, 28, 8, 156, 8, 92, 8, 220, 8, 60, 8, 188, 8, 124, 8, 252, 8, 2, 8,
382 | 130, 8, 66, 8, 194, 8, 34, 8, 162, 8, 98, 8, 226, 8, 18, 8, 146, 8, 82, 8, 210, 8, 50, 8, 178, 8, 114, 8, 242, 8, 10, 8, 138, 8, 74, 8, 202, 8, 42,
383 | 8, 170, 8, 106, 8, 234, 8, 26, 8, 154, 8, 90, 8, 218, 8, 58, 8, 186, 8, 122, 8, 250, 8, 6, 8, 134, 8, 70, 8, 198, 8, 38, 8, 166, 8, 102, 8, 230, 8,
384 | 22, 8, 150, 8, 86, 8, 214, 8, 54, 8, 182, 8, 118, 8, 246, 8, 14, 8, 142, 8, 78, 8, 206, 8, 46, 8, 174, 8, 110, 8, 238, 8, 30, 8, 158, 8, 94, 8,
385 | 222, 8, 62, 8, 190, 8, 126, 8, 254, 8, 1, 8, 129, 8, 65, 8, 193, 8, 33, 8, 161, 8, 97, 8, 225, 8, 17, 8, 145, 8, 81, 8, 209, 8, 49, 8, 177, 8, 113,
386 | 8, 241, 8, 9, 8, 137, 8, 73, 8, 201, 8, 41, 8, 169, 8, 105, 8, 233, 8, 25, 8, 153, 8, 89, 8, 217, 8, 57, 8, 185, 8, 121, 8, 249, 8, 5, 8, 133, 8,
387 | 69, 8, 197, 8, 37, 8, 165, 8, 101, 8, 229, 8, 21, 8, 149, 8, 85, 8, 213, 8, 53, 8, 181, 8, 117, 8, 245, 8, 13, 8, 141, 8, 77, 8, 205, 8, 45, 8,
388 | 173, 8, 109, 8, 237, 8, 29, 8, 157, 8, 93, 8, 221, 8, 61, 8, 189, 8, 125, 8, 253, 8, 19, 9, 275, 9, 147, 9, 403, 9, 83, 9, 339, 9, 211, 9, 467, 9,
389 | 51, 9, 307, 9, 179, 9, 435, 9, 115, 9, 371, 9, 243, 9, 499, 9, 11, 9, 267, 9, 139, 9, 395, 9, 75, 9, 331, 9, 203, 9, 459, 9, 43, 9, 299, 9, 171, 9,
390 | 427, 9, 107, 9, 363, 9, 235, 9, 491, 9, 27, 9, 283, 9, 155, 9, 411, 9, 91, 9, 347, 9, 219, 9, 475, 9, 59, 9, 315, 9, 187, 9, 443, 9, 123, 9, 379,
391 | 9, 251, 9, 507, 9, 7, 9, 263, 9, 135, 9, 391, 9, 71, 9, 327, 9, 199, 9, 455, 9, 39, 9, 295, 9, 167, 9, 423, 9, 103, 9, 359, 9, 231, 9, 487, 9, 23,
392 | 9, 279, 9, 151, 9, 407, 9, 87, 9, 343, 9, 215, 9, 471, 9, 55, 9, 311, 9, 183, 9, 439, 9, 119, 9, 375, 9, 247, 9, 503, 9, 15, 9, 271, 9, 143, 9,
393 | 399, 9, 79, 9, 335, 9, 207, 9, 463, 9, 47, 9, 303, 9, 175, 9, 431, 9, 111, 9, 367, 9, 239, 9, 495, 9, 31, 9, 287, 9, 159, 9, 415, 9, 95, 9, 351, 9,
394 | 223, 9, 479, 9, 63, 9, 319, 9, 191, 9, 447, 9, 127, 9, 383, 9, 255, 9, 511, 9, 0, 7, 64, 7, 32, 7, 96, 7, 16, 7, 80, 7, 48, 7, 112, 7, 8, 7, 72, 7,
395 | 40, 7, 104, 7, 24, 7, 88, 7, 56, 7, 120, 7, 4, 7, 68, 7, 36, 7, 100, 7, 20, 7, 84, 7, 52, 7, 116, 7, 3, 8, 131, 8, 67, 8, 195, 8, 35, 8, 163, 8,
396 | 99, 8, 227, 8 ];
397 |
398 | StaticTree.static_dtree = [ 0, 5, 16, 5, 8, 5, 24, 5, 4, 5, 20, 5, 12, 5, 28, 5, 2, 5, 18, 5, 10, 5, 26, 5, 6, 5, 22, 5, 14, 5, 30, 5, 1, 5, 17, 5, 9, 5,
399 | 25, 5, 5, 5, 21, 5, 13, 5, 29, 5, 3, 5, 19, 5, 11, 5, 27, 5, 7, 5, 23, 5 ];
400 |
401 | StaticTree.static_l_desc = new StaticTree(StaticTree.static_ltree, Tree.extra_lbits, LITERALS + 1, L_CODES, MAX_BITS);
402 |
403 | StaticTree.static_d_desc = new StaticTree(StaticTree.static_dtree, Tree.extra_dbits, 0, D_CODES, MAX_BITS);
404 |
405 | StaticTree.static_bl_desc = new StaticTree(null, Tree.extra_blbits, 0, BL_CODES, MAX_BL_BITS);
406 |
407 | // Deflate
408 |
409 | var MAX_MEM_LEVEL = 9;
410 | var DEF_MEM_LEVEL = 8;
411 |
412 | function Config(good_length, max_lazy, nice_length, max_chain, func) {
413 | var that = this;
414 | that.good_length = good_length;
415 | that.max_lazy = max_lazy;
416 | that.nice_length = nice_length;
417 | that.max_chain = max_chain;
418 | that.func = func;
419 | }
420 |
421 | var STORED = 0;
422 | var FAST = 1;
423 | var SLOW = 2;
424 | var config_table = [ new Config(0, 0, 0, 0, STORED), new Config(4, 4, 8, 4, FAST), new Config(4, 5, 16, 8, FAST), new Config(4, 6, 32, 32, FAST),
425 | new Config(4, 4, 16, 16, SLOW), new Config(8, 16, 32, 32, SLOW), new Config(8, 16, 128, 128, SLOW), new Config(8, 32, 128, 256, SLOW),
426 | new Config(32, 128, 258, 1024, SLOW), new Config(32, 258, 258, 4096, SLOW) ];
427 |
428 | var z_errmsg = [ "need dictionary", // Z_NEED_DICT
429 | // 2
430 | "stream end", // Z_STREAM_END 1
431 | "", // Z_OK 0
432 | "", // Z_ERRNO (-1)
433 | "stream error", // Z_STREAM_ERROR (-2)
434 | "data error", // Z_DATA_ERROR (-3)
435 | "", // Z_MEM_ERROR (-4)
436 | "buffer error", // Z_BUF_ERROR (-5)
437 | "",// Z_VERSION_ERROR (-6)
438 | "" ];
439 |
440 | // block not completed, need more input or more output
441 | var NeedMore = 0;
442 |
443 | // block flush performed
444 | var BlockDone = 1;
445 |
446 | // finish started, need only more output at next deflate
447 | var FinishStarted = 2;
448 |
449 | // finish done, accept no more input or output
450 | var FinishDone = 3;
451 |
452 | // preset dictionary flag in zlib header
453 | var PRESET_DICT = 0x20;
454 |
455 | var INIT_STATE = 42;
456 | var BUSY_STATE = 113;
457 | var FINISH_STATE = 666;
458 |
459 | // The deflate compression method
460 | var Z_DEFLATED = 8;
461 |
462 | var STORED_BLOCK = 0;
463 | var STATIC_TREES = 1;
464 | var DYN_TREES = 2;
465 |
466 | var MIN_MATCH = 3;
467 | var MAX_MATCH = 258;
468 | var MIN_LOOKAHEAD = (MAX_MATCH + MIN_MATCH + 1);
469 |
470 | function smaller(tree, n, m, depth) {
471 | var tn2 = tree[n * 2];
472 | var tm2 = tree[m * 2];
473 | return (tn2 < tm2 || (tn2 == tm2 && depth[n] <= depth[m]));
474 | }
475 |
476 | function Deflate() {
477 |
478 | var that = this;
479 | var strm; // pointer back to this zlib stream
480 | var status; // as the name implies
481 | // pending_buf; // output still pending
482 | var pending_buf_size; // size of pending_buf
483 | // pending_out; // next pending byte to output to the stream
484 | // pending; // nb of bytes in the pending buffer
485 | var method; // STORED (for zip only) or DEFLATED
486 | var last_flush; // value of flush param for previous deflate call
487 |
488 | var w_size; // LZ77 window size (32K by default)
489 | var w_bits; // log2(w_size) (8..16)
490 | var w_mask; // w_size - 1
491 |
492 | var window;
493 | // Sliding window. Input bytes are read into the second half of the window,
494 | // and move to the first half later to keep a dictionary of at least wSize
495 | // bytes. With this organization, matches are limited to a distance of
496 | // wSize-MAX_MATCH bytes, but this ensures that IO is always
497 | // performed with a length multiple of the block size. Also, it limits
498 | // the window size to 64K, which is quite useful on MSDOS.
499 | // To do: use the user input buffer as sliding window.
500 |
501 | var window_size;
502 | // Actual size of window: 2*wSize, except when the user input buffer
503 | // is directly used as sliding window.
504 |
505 | var prev;
506 | // Link to older string with same hash index. To limit the size of this
507 | // array to 64K, this link is maintained only for the last 32K strings.
508 | // An index in this array is thus a window index modulo 32K.
509 |
510 | var head; // Heads of the hash chains or NIL.
511 |
512 | var ins_h; // hash index of string to be inserted
513 | var hash_size; // number of elements in hash table
514 | var hash_bits; // log2(hash_size)
515 | var hash_mask; // hash_size-1
516 |
517 | // Number of bits by which ins_h must be shifted at each input
518 | // step. It must be such that after MIN_MATCH steps, the oldest
519 | // byte no longer takes part in the hash key, that is:
520 | // hash_shift * MIN_MATCH >= hash_bits
521 | var hash_shift;
522 |
523 | // Window position at the beginning of the current output block. Gets
524 | // negative when the window is moved backwards.
525 |
526 | var block_start;
527 |
528 | var match_length; // length of best match
529 | var prev_match; // previous match
530 | var match_available; // set if previous match exists
531 | var strstart; // start of string to insert
532 | var match_start; // start of matching string
533 | var lookahead; // number of valid bytes ahead in window
534 |
535 | // Length of the best match at previous step. Matches not greater than this
536 | // are discarded. This is used in the lazy match evaluation.
537 | var prev_length;
538 |
539 | // To speed up deflation, hash chains are never searched beyond this
540 | // length. A higher limit improves compression ratio but degrades the speed.
541 | var max_chain_length;
542 |
543 | // Attempt to find a better match only when the current match is strictly
544 | // smaller than this value. This mechanism is used only for compression
545 | // levels >= 4.
546 | var max_lazy_match;
547 |
548 | // Insert new strings in the hash table only if the match length is not
549 | // greater than this length. This saves time but degrades compression.
550 | // max_insert_length is used only for compression levels <= 3.
551 |
552 | var level; // compression level (1..9)
553 | var strategy; // favor or force Huffman coding
554 |
555 | // Use a faster search when the previous match is longer than this
556 | var good_match;
557 |
558 | // Stop searching when current match exceeds this
559 | var nice_match;
560 |
561 | var dyn_ltree; // literal and length tree
562 | var dyn_dtree; // distance tree
563 | var bl_tree; // Huffman tree for bit lengths
564 |
565 | var l_desc = new Tree(); // desc for literal tree
566 | var d_desc = new Tree(); // desc for distance tree
567 | var bl_desc = new Tree(); // desc for bit length tree
568 |
569 | // that.heap_len; // number of elements in the heap
570 | // that.heap_max; // element of largest frequency
571 | // The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used.
572 | // The same heap array is used to build all trees.
573 |
574 | // Depth of each subtree used as tie breaker for trees of equal frequency
575 | that.depth = [];
576 |
577 | var l_buf; // index for literals or lengths */
578 |
579 | // Size of match buffer for literals/lengths. There are 4 reasons for
580 | // limiting lit_bufsize to 64K:
581 | // - frequencies can be kept in 16 bit counters
582 | // - if compression is not successful for the first block, all input
583 | // data is still in the window so we can still emit a stored block even
584 | // when input comes from standard input. (This can also be done for
585 | // all blocks if lit_bufsize is not greater than 32K.)
586 | // - if compression is not successful for a file smaller than 64K, we can
587 | // even emit a stored file instead of a stored block (saving 5 bytes).
588 | // This is applicable only for zip (not gzip or zlib).
589 | // - creating new Huffman trees less frequently may not provide fast
590 | // adaptation to changes in the input data statistics. (Take for
591 | // example a binary file with poorly compressible code followed by
592 | // a highly compressible string table.) Smaller buffer sizes give
593 | // fast adaptation but have of course the overhead of transmitting
594 | // trees more frequently.
595 | // - I can't count above 4
596 | var lit_bufsize;
597 |
598 | var last_lit; // running index in l_buf
599 |
600 | // Buffer for distances. To simplify the code, d_buf and l_buf have
601 | // the same number of elements. To use different lengths, an extra flag
602 | // array would be necessary.
603 |
604 | var d_buf; // index of pendig_buf
605 |
606 | // that.opt_len; // bit length of current block with optimal trees
607 | // that.static_len; // bit length of current block with static trees
608 | var matches; // number of string matches in current block
609 | var last_eob_len; // bit length of EOB code for last block
610 |
611 | // Output buffer. bits are inserted starting at the bottom (least
612 | // significant bits).
613 | var bi_buf;
614 |
615 | // Number of valid bits in bi_buf. All bits above the last valid bit
616 | // are always zero.
617 | var bi_valid;
618 |
619 | // number of codes at each bit length for an optimal tree
620 | that.bl_count = [];
621 |
622 | // heap used to build the Huffman trees
623 | that.heap = [];
624 |
625 | dyn_ltree = [];
626 | dyn_dtree = [];
627 | bl_tree = [];
628 |
629 | function lm_init() {
630 | var i;
631 | window_size = 2 * w_size;
632 |
633 | head[hash_size - 1] = 0;
634 | for (i = 0; i < hash_size - 1; i++) {
635 | head[i] = 0;
636 | }
637 |
638 | // Set the default configuration parameters:
639 | max_lazy_match = config_table[level].max_lazy;
640 | good_match = config_table[level].good_length;
641 | nice_match = config_table[level].nice_length;
642 | max_chain_length = config_table[level].max_chain;
643 |
644 | strstart = 0;
645 | block_start = 0;
646 | lookahead = 0;
647 | match_length = prev_length = MIN_MATCH - 1;
648 | match_available = 0;
649 | ins_h = 0;
650 | }
651 |
652 | function init_block() {
653 | var i;
654 | // Initialize the trees.
655 | for (i = 0; i < L_CODES; i++)
656 | dyn_ltree[i * 2] = 0;
657 | for (i = 0; i < D_CODES; i++)
658 | dyn_dtree[i * 2] = 0;
659 | for (i = 0; i < BL_CODES; i++)
660 | bl_tree[i * 2] = 0;
661 |
662 | dyn_ltree[END_BLOCK * 2] = 1;
663 | that.opt_len = that.static_len = 0;
664 | last_lit = matches = 0;
665 | }
666 |
667 | // Initialize the tree data structures for a new zlib stream.
668 | function tr_init() {
669 |
670 | l_desc.dyn_tree = dyn_ltree;
671 | l_desc.stat_desc = StaticTree.static_l_desc;
672 |
673 | d_desc.dyn_tree = dyn_dtree;
674 | d_desc.stat_desc = StaticTree.static_d_desc;
675 |
676 | bl_desc.dyn_tree = bl_tree;
677 | bl_desc.stat_desc = StaticTree.static_bl_desc;
678 |
679 | bi_buf = 0;
680 | bi_valid = 0;
681 | last_eob_len = 8; // enough lookahead for inflate
682 |
683 | // Initialize the first block of the first file:
684 | init_block();
685 | }
686 |
687 | // Restore the heap property by moving down the tree starting at node k,
688 | // exchanging a node with the smallest of its two sons if necessary,
689 | // stopping
690 | // when the heap property is re-established (each father smaller than its
691 | // two sons).
692 | that.pqdownheap = function(tree, // the tree to restore
693 | k // node to move down
694 | ) {
695 | var heap = that.heap;
696 | var v = heap[k];
697 | var j = k << 1; // left son of k
698 | while (j <= that.heap_len) {
699 | // Set j to the smallest of the two sons:
700 | if (j < that.heap_len && smaller(tree, heap[j + 1], heap[j], that.depth)) {
701 | j++;
702 | }
703 | // Exit if v is smaller than both sons
704 | if (smaller(tree, v, heap[j], that.depth))
705 | break;
706 |
707 | // Exchange v with the smallest son
708 | heap[k] = heap[j];
709 | k = j;
710 | // And continue down the tree, setting j to the left son of k
711 | j <<= 1;
712 | }
713 | heap[k] = v;
714 | };
715 |
716 | // Scan a literal or distance tree to determine the frequencies of the codes
717 | // in the bit length tree.
718 | function scan_tree(tree,// the tree to be scanned
719 | max_code // and its largest code of non zero frequency
720 | ) {
721 | var n; // iterates over all tree elements
722 | var prevlen = -1; // last emitted length
723 | var curlen; // length of current code
724 | var nextlen = tree[0 * 2 + 1]; // length of next code
725 | var count = 0; // repeat count of the current code
726 | var max_count = 7; // max repeat count
727 | var min_count = 4; // min repeat count
728 |
729 | if (nextlen === 0) {
730 | max_count = 138;
731 | min_count = 3;
732 | }
733 | tree[(max_code + 1) * 2 + 1] = 0xffff; // guard
734 |
735 | for (n = 0; n <= max_code; n++) {
736 | curlen = nextlen;
737 | nextlen = tree[(n + 1) * 2 + 1];
738 | if (++count < max_count && curlen == nextlen) {
739 | continue;
740 | } else if (count < min_count) {
741 | bl_tree[curlen * 2] += count;
742 | } else if (curlen !== 0) {
743 | if (curlen != prevlen)
744 | bl_tree[curlen * 2]++;
745 | bl_tree[REP_3_6 * 2]++;
746 | } else if (count <= 10) {
747 | bl_tree[REPZ_3_10 * 2]++;
748 | } else {
749 | bl_tree[REPZ_11_138 * 2]++;
750 | }
751 | count = 0;
752 | prevlen = curlen;
753 | if (nextlen === 0) {
754 | max_count = 138;
755 | min_count = 3;
756 | } else if (curlen == nextlen) {
757 | max_count = 6;
758 | min_count = 3;
759 | } else {
760 | max_count = 7;
761 | min_count = 4;
762 | }
763 | }
764 | }
765 |
766 | // Construct the Huffman tree for the bit lengths and return the index in
767 | // bl_order of the last bit length code to send.
768 | function build_bl_tree() {
769 | var max_blindex; // index of last bit length code of non zero freq
770 |
771 | // Determine the bit length frequencies for literal and distance trees
772 | scan_tree(dyn_ltree, l_desc.max_code);
773 | scan_tree(dyn_dtree, d_desc.max_code);
774 |
775 | // Build the bit length tree:
776 | bl_desc.build_tree(that);
777 | // opt_len now includes the length of the tree representations, except
778 | // the lengths of the bit lengths codes and the 5+5+4 bits for the
779 | // counts.
780 |
781 | // Determine the number of bit length codes to send. The pkzip format
782 | // requires that at least 4 bit length codes be sent. (appnote.txt says
783 | // 3 but the actual value used is 4.)
784 | for (max_blindex = BL_CODES - 1; max_blindex >= 3; max_blindex--) {
785 | if (bl_tree[Tree.bl_order[max_blindex] * 2 + 1] !== 0)
786 | break;
787 | }
788 | // Update opt_len to include the bit length tree and counts
789 | that.opt_len += 3 * (max_blindex + 1) + 5 + 5 + 4;
790 |
791 | return max_blindex;
792 | }
793 |
794 | // Output a byte on the stream.
795 | // IN assertion: there is enough room in pending_buf.
796 | function put_byte(p) {
797 | that.pending_buf[that.pending++] = p;
798 | }
799 |
800 | function put_short(w) {
801 | put_byte(w & 0xff);
802 | put_byte((w >>> 8) & 0xff);
803 | }
804 |
805 | function putShortMSB(b) {
806 | put_byte((b >> 8) & 0xff);
807 | put_byte((b & 0xff) & 0xff);
808 | }
809 |
810 | function send_bits(value, length) {
811 | var val, len = length;
812 | if (bi_valid > Buf_size - len) {
813 | val = value;
814 | // bi_buf |= (val << bi_valid);
815 | bi_buf |= ((val << bi_valid) & 0xffff);
816 | put_short(bi_buf);
817 | bi_buf = val >>> (Buf_size - bi_valid);
818 | bi_valid += len - Buf_size;
819 | } else {
820 | // bi_buf |= (value) << bi_valid;
821 | bi_buf |= (((value) << bi_valid) & 0xffff);
822 | bi_valid += len;
823 | }
824 | }
825 |
826 | function send_code(c, tree) {
827 | var c2 = c * 2;
828 | send_bits(tree[c2] & 0xffff, tree[c2 + 1] & 0xffff);
829 | }
830 |
831 | // Send a literal or distance tree in compressed form, using the codes in
832 | // bl_tree.
833 | function send_tree(tree,// the tree to be sent
834 | max_code // and its largest code of non zero frequency
835 | ) {
836 | var n; // iterates over all tree elements
837 | var prevlen = -1; // last emitted length
838 | var curlen; // length of current code
839 | var nextlen = tree[0 * 2 + 1]; // length of next code
840 | var count = 0; // repeat count of the current code
841 | var max_count = 7; // max repeat count
842 | var min_count = 4; // min repeat count
843 |
844 | if (nextlen === 0) {
845 | max_count = 138;
846 | min_count = 3;
847 | }
848 |
849 | for (n = 0; n <= max_code; n++) {
850 | curlen = nextlen;
851 | nextlen = tree[(n + 1) * 2 + 1];
852 | if (++count < max_count && curlen == nextlen) {
853 | continue;
854 | } else if (count < min_count) {
855 | do {
856 | send_code(curlen, bl_tree);
857 | } while (--count !== 0);
858 | } else if (curlen !== 0) {
859 | if (curlen != prevlen) {
860 | send_code(curlen, bl_tree);
861 | count--;
862 | }
863 | send_code(REP_3_6, bl_tree);
864 | send_bits(count - 3, 2);
865 | } else if (count <= 10) {
866 | send_code(REPZ_3_10, bl_tree);
867 | send_bits(count - 3, 3);
868 | } else {
869 | send_code(REPZ_11_138, bl_tree);
870 | send_bits(count - 11, 7);
871 | }
872 | count = 0;
873 | prevlen = curlen;
874 | if (nextlen === 0) {
875 | max_count = 138;
876 | min_count = 3;
877 | } else if (curlen == nextlen) {
878 | max_count = 6;
879 | min_count = 3;
880 | } else {
881 | max_count = 7;
882 | min_count = 4;
883 | }
884 | }
885 | }
886 |
887 | // Send the header for a block using dynamic Huffman trees: the counts, the
888 | // lengths of the bit length codes, the literal tree and the distance tree.
889 | // IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4.
890 | function send_all_trees(lcodes, dcodes, blcodes) {
891 | var rank; // index in bl_order
892 |
893 | send_bits(lcodes - 257, 5); // not +255 as stated in appnote.txt
894 | send_bits(dcodes - 1, 5);
895 | send_bits(blcodes - 4, 4); // not -3 as stated in appnote.txt
896 | for (rank = 0; rank < blcodes; rank++) {
897 | send_bits(bl_tree[Tree.bl_order[rank] * 2 + 1], 3);
898 | }
899 | send_tree(dyn_ltree, lcodes - 1); // literal tree
900 | send_tree(dyn_dtree, dcodes - 1); // distance tree
901 | }
902 |
903 | // Flush the bit buffer, keeping at most 7 bits in it.
904 | function bi_flush() {
905 | if (bi_valid == 16) {
906 | put_short(bi_buf);
907 | bi_buf = 0;
908 | bi_valid = 0;
909 | } else if (bi_valid >= 8) {
910 | put_byte(bi_buf & 0xff);
911 | bi_buf >>>= 8;
912 | bi_valid -= 8;
913 | }
914 | }
915 |
916 | // Send one empty static block to give enough lookahead for inflate.
917 | // This takes 10 bits, of which 7 may remain in the bit buffer.
918 | // The current inflate code requires 9 bits of lookahead. If the
919 | // last two codes for the previous block (real code plus EOB) were coded
920 | // on 5 bits or less, inflate may have only 5+3 bits of lookahead to decode
921 | // the last real code. In this case we send two empty static blocks instead
922 | // of one. (There are no problems if the previous block is stored or fixed.)
923 | // To simplify the code, we assume the worst case of last real code encoded
924 | // on one bit only.
925 | function _tr_align() {
926 | send_bits(STATIC_TREES << 1, 3);
927 | send_code(END_BLOCK, StaticTree.static_ltree);
928 |
929 | bi_flush();
930 |
931 | // Of the 10 bits for the empty block, we have already sent
932 | // (10 - bi_valid) bits. The lookahead for the last real code (before
933 | // the EOB of the previous block) was thus at least one plus the length
934 | // of the EOB plus what we have just sent of the empty static block.
935 | if (1 + last_eob_len + 10 - bi_valid < 9) {
936 | send_bits(STATIC_TREES << 1, 3);
937 | send_code(END_BLOCK, StaticTree.static_ltree);
938 | bi_flush();
939 | }
940 | last_eob_len = 7;
941 | }
942 |
943 | // Save the match info and tally the frequency counts. Return true if
944 | // the current block must be flushed.
945 | function _tr_tally(dist, // distance of matched string
946 | lc // match length-MIN_MATCH or unmatched char (if dist==0)
947 | ) {
948 | var out_length, in_length, dcode;
949 | that.pending_buf[d_buf + last_lit * 2] = (dist >>> 8) & 0xff;
950 | that.pending_buf[d_buf + last_lit * 2 + 1] = dist & 0xff;
951 |
952 | that.pending_buf[l_buf + last_lit] = lc & 0xff;
953 | last_lit++;
954 |
955 | if (dist === 0) {
956 | // lc is the unmatched char
957 | dyn_ltree[lc * 2]++;
958 | } else {
959 | matches++;
960 | // Here, lc is the match length - MIN_MATCH
961 | dist--; // dist = match distance - 1
962 | dyn_ltree[(Tree._length_code[lc] + LITERALS + 1) * 2]++;
963 | dyn_dtree[Tree.d_code(dist) * 2]++;
964 | }
965 |
966 | if ((last_lit & 0x1fff) === 0 && level > 2) {
967 | // Compute an upper bound for the compressed length
968 | out_length = last_lit * 8;
969 | in_length = strstart - block_start;
970 | for (dcode = 0; dcode < D_CODES; dcode++) {
971 | out_length += dyn_dtree[dcode * 2] * (5 + Tree.extra_dbits[dcode]);
972 | }
973 | out_length >>>= 3;
974 | if ((matches < Math.floor(last_lit / 2)) && out_length < Math.floor(in_length / 2))
975 | return true;
976 | }
977 |
978 | return (last_lit == lit_bufsize - 1);
979 | // We avoid equality with lit_bufsize because of wraparound at 64K
980 | // on 16 bit machines and because stored blocks are restricted to
981 | // 64K-1 bytes.
982 | }
983 |
984 | // Send the block data compressed using the given Huffman trees
985 | function compress_block(ltree, dtree) {
986 | var dist; // distance of matched string
987 | var lc; // match length or unmatched char (if dist === 0)
988 | var lx = 0; // running index in l_buf
989 | var code; // the code to send
990 | var extra; // number of extra bits to send
991 |
992 | if (last_lit !== 0) {
993 | do {
994 | dist = ((that.pending_buf[d_buf + lx * 2] << 8) & 0xff00) | (that.pending_buf[d_buf + lx * 2 + 1] & 0xff);
995 | lc = (that.pending_buf[l_buf + lx]) & 0xff;
996 | lx++;
997 |
998 | if (dist === 0) {
999 | send_code(lc, ltree); // send a literal byte
1000 | } else {
1001 | // Here, lc is the match length - MIN_MATCH
1002 | code = Tree._length_code[lc];
1003 |
1004 | send_code(code + LITERALS + 1, ltree); // send the length
1005 | // code
1006 | extra = Tree.extra_lbits[code];
1007 | if (extra !== 0) {
1008 | lc -= Tree.base_length[code];
1009 | send_bits(lc, extra); // send the extra length bits
1010 | }
1011 | dist--; // dist is now the match distance - 1
1012 | code = Tree.d_code(dist);
1013 |
1014 | send_code(code, dtree); // send the distance code
1015 | extra = Tree.extra_dbits[code];
1016 | if (extra !== 0) {
1017 | dist -= Tree.base_dist[code];
1018 | send_bits(dist, extra); // send the extra distance bits
1019 | }
1020 | } // literal or match pair ?
1021 |
1022 | // Check that the overlay between pending_buf and d_buf+l_buf is
1023 | // ok:
1024 | } while (lx < last_lit);
1025 | }
1026 |
1027 | send_code(END_BLOCK, ltree);
1028 | last_eob_len = ltree[END_BLOCK * 2 + 1];
1029 | }
1030 |
1031 | // Flush the bit buffer and align the output on a byte boundary
1032 | function bi_windup() {
1033 | if (bi_valid > 8) {
1034 | put_short(bi_buf);
1035 | } else if (bi_valid > 0) {
1036 | put_byte(bi_buf & 0xff);
1037 | }
1038 | bi_buf = 0;
1039 | bi_valid = 0;
1040 | }
1041 |
1042 | // Copy a stored block, storing first the length and its
1043 | // one's complement if requested.
1044 | function copy_block(buf, // the input data
1045 | len, // its length
1046 | header // true if block header must be written
1047 | ) {
1048 | bi_windup(); // align on byte boundary
1049 | last_eob_len = 8; // enough lookahead for inflate
1050 |
1051 | if (header) {
1052 | put_short(len);
1053 | put_short(~len);
1054 | }
1055 |
1056 | that.pending_buf.set(window.subarray(buf, buf + len), that.pending);
1057 | that.pending += len;
1058 | }
1059 |
1060 | // Send a stored block
1061 | function _tr_stored_block(buf, // input block
1062 | stored_len, // length of input block
1063 | eof // true if this is the last block for a file
1064 | ) {
1065 | send_bits((STORED_BLOCK << 1) + (eof ? 1 : 0), 3); // send block type
1066 | copy_block(buf, stored_len, true); // with header
1067 | }
1068 |
1069 | // Determine the best encoding for the current block: dynamic trees, static
1070 | // trees or store, and output the encoded block to the zip file.
1071 | function _tr_flush_block(buf, // input block, or NULL if too old
1072 | stored_len, // length of input block
1073 | eof // true if this is the last block for a file
1074 | ) {
1075 | var opt_lenb, static_lenb;// opt_len and static_len in bytes
1076 | var max_blindex = 0; // index of last bit length code of non zero freq
1077 |
1078 | // Build the Huffman trees unless a stored block is forced
1079 | if (level > 0) {
1080 | // Construct the literal and distance trees
1081 | l_desc.build_tree(that);
1082 |
1083 | d_desc.build_tree(that);
1084 |
1085 | // At this point, opt_len and static_len are the total bit lengths
1086 | // of
1087 | // the compressed block data, excluding the tree representations.
1088 |
1089 | // Build the bit length tree for the above two trees, and get the
1090 | // index
1091 | // in bl_order of the last bit length code to send.
1092 | max_blindex = build_bl_tree();
1093 |
1094 | // Determine the best encoding. Compute first the block length in
1095 | // bytes
1096 | opt_lenb = (that.opt_len + 3 + 7) >>> 3;
1097 | static_lenb = (that.static_len + 3 + 7) >>> 3;
1098 |
1099 | if (static_lenb <= opt_lenb)
1100 | opt_lenb = static_lenb;
1101 | } else {
1102 | opt_lenb = static_lenb = stored_len + 5; // force a stored block
1103 | }
1104 |
1105 | if ((stored_len + 4 <= opt_lenb) && buf != -1) {
1106 | // 4: two words for the lengths
1107 | // The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE.
1108 | // Otherwise we can't have processed more than WSIZE input bytes
1109 | // since
1110 | // the last block flush, because compression would have been
1111 | // successful. If LIT_BUFSIZE <= WSIZE, it is never too late to
1112 | // transform a block into a stored block.
1113 | _tr_stored_block(buf, stored_len, eof);
1114 | } else if (static_lenb == opt_lenb) {
1115 | send_bits((STATIC_TREES << 1) + (eof ? 1 : 0), 3);
1116 | compress_block(StaticTree.static_ltree, StaticTree.static_dtree);
1117 | } else {
1118 | send_bits((DYN_TREES << 1) + (eof ? 1 : 0), 3);
1119 | send_all_trees(l_desc.max_code + 1, d_desc.max_code + 1, max_blindex + 1);
1120 | compress_block(dyn_ltree, dyn_dtree);
1121 | }
1122 |
1123 | // The above check is made mod 2^32, for files larger than 512 MB
1124 | // and uLong implemented on 32 bits.
1125 |
1126 | init_block();
1127 |
1128 | if (eof) {
1129 | bi_windup();
1130 | }
1131 | }
1132 |
1133 | function flush_block_only(eof) {
1134 | _tr_flush_block(block_start >= 0 ? block_start : -1, strstart - block_start, eof);
1135 | block_start = strstart;
1136 | strm.flush_pending();
1137 | }
1138 |
1139 | // Fill the window when the lookahead becomes insufficient.
1140 | // Updates strstart and lookahead.
1141 | //
1142 | // IN assertion: lookahead < MIN_LOOKAHEAD
1143 | // OUT assertions: strstart <= window_size-MIN_LOOKAHEAD
1144 | // At least one byte has been read, or avail_in === 0; reads are
1145 | // performed for at least two bytes (required for the zip translate_eol
1146 | // option -- not supported here).
1147 | function fill_window() {
1148 | var n, m;
1149 | var p;
1150 | var more; // Amount of free space at the end of the window.
1151 |
1152 | do {
1153 | more = (window_size - lookahead - strstart);
1154 |
1155 | // Deal with !@#$% 64K limit:
1156 | if (more === 0 && strstart === 0 && lookahead === 0) {
1157 | more = w_size;
1158 | } else if (more == -1) {
1159 | // Very unlikely, but possible on 16 bit machine if strstart ==
1160 | // 0
1161 | // and lookahead == 1 (input done one byte at time)
1162 | more--;
1163 |
1164 | // If the window is almost full and there is insufficient
1165 | // lookahead,
1166 | // move the upper half to the lower one to make room in the
1167 | // upper half.
1168 | } else if (strstart >= w_size + w_size - MIN_LOOKAHEAD) {
1169 | window.set(window.subarray(w_size, w_size + w_size), 0);
1170 |
1171 | match_start -= w_size;
1172 | strstart -= w_size; // we now have strstart >= MAX_DIST
1173 | block_start -= w_size;
1174 |
1175 | // Slide the hash table (could be avoided with 32 bit values
1176 | // at the expense of memory usage). We slide even when level ==
1177 | // 0
1178 | // to keep the hash table consistent if we switch back to level
1179 | // > 0
1180 | // later. (Using level 0 permanently is not an optimal usage of
1181 | // zlib, so we don't care about this pathological case.)
1182 |
1183 | n = hash_size;
1184 | p = n;
1185 | do {
1186 | m = (head[--p] & 0xffff);
1187 | head[p] = (m >= w_size ? m - w_size : 0);
1188 | } while (--n !== 0);
1189 |
1190 | n = w_size;
1191 | p = n;
1192 | do {
1193 | m = (prev[--p] & 0xffff);
1194 | prev[p] = (m >= w_size ? m - w_size : 0);
1195 | // If n is not on any hash chain, prev[n] is garbage but
1196 | // its value will never be used.
1197 | } while (--n !== 0);
1198 | more += w_size;
1199 | }
1200 |
1201 | if (strm.avail_in === 0)
1202 | return;
1203 |
1204 | // If there was no sliding:
1205 | // strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
1206 | // more == window_size - lookahead - strstart
1207 | // => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
1208 | // => more >= window_size - 2*WSIZE + 2
1209 | // In the BIG_MEM or MMAP case (not yet supported),
1210 | // window_size == input_size + MIN_LOOKAHEAD &&
1211 | // strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
1212 | // Otherwise, window_size == 2*WSIZE so more >= 2.
1213 | // If there was sliding, more >= WSIZE. So in all cases, more >= 2.
1214 |
1215 | n = strm.read_buf(window, strstart + lookahead, more);
1216 | lookahead += n;
1217 |
1218 | // Initialize the hash value now that we have some input:
1219 | if (lookahead >= MIN_MATCH) {
1220 | ins_h = window[strstart] & 0xff;
1221 | ins_h = (((ins_h) << hash_shift) ^ (window[strstart + 1] & 0xff)) & hash_mask;
1222 | }
1223 | // If the whole input has less than MIN_MATCH bytes, ins_h is
1224 | // garbage,
1225 | // but this is not important since only literal bytes will be
1226 | // emitted.
1227 | } while (lookahead < MIN_LOOKAHEAD && strm.avail_in !== 0);
1228 | }
1229 |
1230 | // Copy without compression as much as possible from the input stream,
1231 | // return
1232 | // the current block state.
1233 | // This function does not insert new strings in the dictionary since
1234 | // uncompressible data is probably not useful. This function is used
1235 | // only for the level=0 compression option.
1236 | // NOTE: this function should be optimized to avoid extra copying from
1237 | // window to pending_buf.
1238 | function deflate_stored(flush) {
1239 | // Stored blocks are limited to 0xffff bytes, pending_buf is limited
1240 | // to pending_buf_size, and each stored block has a 5 byte header:
1241 |
1242 | var max_block_size = 0xffff;
1243 | var max_start;
1244 |
1245 | if (max_block_size > pending_buf_size - 5) {
1246 | max_block_size = pending_buf_size - 5;
1247 | }
1248 |
1249 | // Copy as much as possible from input to output:
1250 | while (true) {
1251 | // Fill the window as much as possible:
1252 | if (lookahead <= 1) {
1253 | fill_window();
1254 | if (lookahead === 0 && flush == Z_NO_FLUSH)
1255 | return NeedMore;
1256 | if (lookahead === 0)
1257 | break; // flush the current block
1258 | }
1259 |
1260 | strstart += lookahead;
1261 | lookahead = 0;
1262 |
1263 | // Emit a stored block if pending_buf will be full:
1264 | max_start = block_start + max_block_size;
1265 | if (strstart === 0 || strstart >= max_start) {
1266 | // strstart === 0 is possible when wraparound on 16-bit machine
1267 | lookahead = (strstart - max_start);
1268 | strstart = max_start;
1269 |
1270 | flush_block_only(false);
1271 | if (strm.avail_out === 0)
1272 | return NeedMore;
1273 |
1274 | }
1275 |
1276 | // Flush if we may have to slide, otherwise block_start may become
1277 | // negative and the data will be gone:
1278 | if (strstart - block_start >= w_size - MIN_LOOKAHEAD) {
1279 | flush_block_only(false);
1280 | if (strm.avail_out === 0)
1281 | return NeedMore;
1282 | }
1283 | }
1284 |
1285 | flush_block_only(flush == Z_FINISH);
1286 | if (strm.avail_out === 0)
1287 | return (flush == Z_FINISH) ? FinishStarted : NeedMore;
1288 |
1289 | return flush == Z_FINISH ? FinishDone : BlockDone;
1290 | }
1291 |
1292 | function longest_match(cur_match) {
1293 | var chain_length = max_chain_length; // max hash chain length
1294 | var scan = strstart; // current string
1295 | var match; // matched string
1296 | var len; // length of current match
1297 | var best_len = prev_length; // best match length so far
1298 | var limit = strstart > (w_size - MIN_LOOKAHEAD) ? strstart - (w_size - MIN_LOOKAHEAD) : 0;
1299 | var _nice_match = nice_match;
1300 |
1301 | // Stop when cur_match becomes <= limit. To simplify the code,
1302 | // we prevent matches with the string of window index 0.
1303 |
1304 | var wmask = w_mask;
1305 |
1306 | var strend = strstart + MAX_MATCH;
1307 | var scan_end1 = window[scan + best_len - 1];
1308 | var scan_end = window[scan + best_len];
1309 |
1310 | // The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of
1311 | // 16.
1312 | // It is easy to get rid of this optimization if necessary.
1313 |
1314 | // Do not waste too much time if we already have a good match:
1315 | if (prev_length >= good_match) {
1316 | chain_length >>= 2;
1317 | }
1318 |
1319 | // Do not look for matches beyond the end of the input. This is
1320 | // necessary
1321 | // to make deflate deterministic.
1322 | if (_nice_match > lookahead)
1323 | _nice_match = lookahead;
1324 |
1325 | do {
1326 | match = cur_match;
1327 |
1328 | // Skip to next match if the match length cannot increase
1329 | // or if the match length is less than 2:
1330 | if (window[match + best_len] != scan_end || window[match + best_len - 1] != scan_end1 || window[match] != window[scan]
1331 | || window[++match] != window[scan + 1])
1332 | continue;
1333 |
1334 | // The check at best_len-1 can be removed because it will be made
1335 | // again later. (This heuristic is not always a win.)
1336 | // It is not necessary to compare scan[2] and match[2] since they
1337 | // are always equal when the other bytes match, given that
1338 | // the hash keys are equal and that HASH_BITS >= 8.
1339 | scan += 2;
1340 | match++;
1341 |
1342 | // We check for insufficient lookahead only every 8th comparison;
1343 | // the 256th check will be made at strstart+258.
1344 | do {
1345 | } while (window[++scan] == window[++match] && window[++scan] == window[++match] && window[++scan] == window[++match]
1346 | && window[++scan] == window[++match] && window[++scan] == window[++match] && window[++scan] == window[++match]
1347 | && window[++scan] == window[++match] && window[++scan] == window[++match] && scan < strend);
1348 |
1349 | len = MAX_MATCH - (strend - scan);
1350 | scan = strend - MAX_MATCH;
1351 |
1352 | if (len > best_len) {
1353 | match_start = cur_match;
1354 | best_len = len;
1355 | if (len >= _nice_match)
1356 | break;
1357 | scan_end1 = window[scan + best_len - 1];
1358 | scan_end = window[scan + best_len];
1359 | }
1360 |
1361 | } while ((cur_match = (prev[cur_match & wmask] & 0xffff)) > limit && --chain_length !== 0);
1362 |
1363 | if (best_len <= lookahead)
1364 | return best_len;
1365 | return lookahead;
1366 | }
1367 |
1368 | // Compress as much as possible from the input stream, return the current
1369 | // block state.
1370 | // This function does not perform lazy evaluation of matches and inserts
1371 | // new strings in the dictionary only for unmatched strings or for short
1372 | // matches. It is used only for the fast compression options.
1373 | function deflate_fast(flush) {
1374 | // short hash_head = 0; // head of the hash chain
1375 | var hash_head = 0; // head of the hash chain
1376 | var bflush; // set if current block must be flushed
1377 |
1378 | while (true) {
1379 | // Make sure that we always have enough lookahead, except
1380 | // at the end of the input file. We need MAX_MATCH bytes
1381 | // for the next match, plus MIN_MATCH bytes to insert the
1382 | // string following the next match.
1383 | if (lookahead < MIN_LOOKAHEAD) {
1384 | fill_window();
1385 | if (lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
1386 | return NeedMore;
1387 | }
1388 | if (lookahead === 0)
1389 | break; // flush the current block
1390 | }
1391 |
1392 | // Insert the string window[strstart .. strstart+2] in the
1393 | // dictionary, and set hash_head to the head of the hash chain:
1394 | if (lookahead >= MIN_MATCH) {
1395 | ins_h = (((ins_h) << hash_shift) ^ (window[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask;
1396 |
1397 | // prev[strstart&w_mask]=hash_head=head[ins_h];
1398 | hash_head = (head[ins_h] & 0xffff);
1399 | prev[strstart & w_mask] = head[ins_h];
1400 | head[ins_h] = strstart;
1401 | }
1402 |
1403 | // Find the longest match, discarding those <= prev_length.
1404 | // At this point we have always match_length < MIN_MATCH
1405 |
1406 | if (hash_head !== 0 && ((strstart - hash_head) & 0xffff) <= w_size - MIN_LOOKAHEAD) {
1407 | // To simplify the code, we prevent matches with the string
1408 | // of window index 0 (in particular we have to avoid a match
1409 | // of the string with itself at the start of the input file).
1410 | if (strategy != Z_HUFFMAN_ONLY) {
1411 | match_length = longest_match(hash_head);
1412 | }
1413 | // longest_match() sets match_start
1414 | }
1415 | if (match_length >= MIN_MATCH) {
1416 | // check_match(strstart, match_start, match_length);
1417 |
1418 | bflush = _tr_tally(strstart - match_start, match_length - MIN_MATCH);
1419 |
1420 | lookahead -= match_length;
1421 |
1422 | // Insert new strings in the hash table only if the match length
1423 | // is not too large. This saves time but degrades compression.
1424 | if (match_length <= max_lazy_match && lookahead >= MIN_MATCH) {
1425 | match_length--; // string at strstart already in hash table
1426 | do {
1427 | strstart++;
1428 |
1429 | ins_h = ((ins_h << hash_shift) ^ (window[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask;
1430 | // prev[strstart&w_mask]=hash_head=head[ins_h];
1431 | hash_head = (head[ins_h] & 0xffff);
1432 | prev[strstart & w_mask] = head[ins_h];
1433 | head[ins_h] = strstart;
1434 |
1435 | // strstart never exceeds WSIZE-MAX_MATCH, so there are
1436 | // always MIN_MATCH bytes ahead.
1437 | } while (--match_length !== 0);
1438 | strstart++;
1439 | } else {
1440 | strstart += match_length;
1441 | match_length = 0;
1442 | ins_h = window[strstart] & 0xff;
1443 |
1444 | ins_h = (((ins_h) << hash_shift) ^ (window[strstart + 1] & 0xff)) & hash_mask;
1445 | // If lookahead < MIN_MATCH, ins_h is garbage, but it does
1446 | // not
1447 | // matter since it will be recomputed at next deflate call.
1448 | }
1449 | } else {
1450 | // No match, output a literal byte
1451 |
1452 | bflush = _tr_tally(0, window[strstart] & 0xff);
1453 | lookahead--;
1454 | strstart++;
1455 | }
1456 | if (bflush) {
1457 |
1458 | flush_block_only(false);
1459 | if (strm.avail_out === 0)
1460 | return NeedMore;
1461 | }
1462 | }
1463 |
1464 | flush_block_only(flush == Z_FINISH);
1465 | if (strm.avail_out === 0) {
1466 | if (flush == Z_FINISH)
1467 | return FinishStarted;
1468 | else
1469 | return NeedMore;
1470 | }
1471 | return flush == Z_FINISH ? FinishDone : BlockDone;
1472 | }
1473 |
1474 | // Same as above, but achieves better compression. We use a lazy
1475 | // evaluation for matches: a match is finally adopted only if there is
1476 | // no better match at the next window position.
1477 | function deflate_slow(flush) {
1478 | // short hash_head = 0; // head of hash chain
1479 | var hash_head = 0; // head of hash chain
1480 | var bflush; // set if current block must be flushed
1481 | var max_insert;
1482 |
1483 | // Process the input block.
1484 | while (true) {
1485 | // Make sure that we always have enough lookahead, except
1486 | // at the end of the input file. We need MAX_MATCH bytes
1487 | // for the next match, plus MIN_MATCH bytes to insert the
1488 | // string following the next match.
1489 |
1490 | if (lookahead < MIN_LOOKAHEAD) {
1491 | fill_window();
1492 | if (lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
1493 | return NeedMore;
1494 | }
1495 | if (lookahead === 0)
1496 | break; // flush the current block
1497 | }
1498 |
1499 | // Insert the string window[strstart .. strstart+2] in the
1500 | // dictionary, and set hash_head to the head of the hash chain:
1501 |
1502 | if (lookahead >= MIN_MATCH) {
1503 | ins_h = (((ins_h) << hash_shift) ^ (window[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask;
1504 | // prev[strstart&w_mask]=hash_head=head[ins_h];
1505 | hash_head = (head[ins_h] & 0xffff);
1506 | prev[strstart & w_mask] = head[ins_h];
1507 | head[ins_h] = strstart;
1508 | }
1509 |
1510 | // Find the longest match, discarding those <= prev_length.
1511 | prev_length = match_length;
1512 | prev_match = match_start;
1513 | match_length = MIN_MATCH - 1;
1514 |
1515 | if (hash_head !== 0 && prev_length < max_lazy_match && ((strstart - hash_head) & 0xffff) <= w_size - MIN_LOOKAHEAD) {
1516 | // To simplify the code, we prevent matches with the string
1517 | // of window index 0 (in particular we have to avoid a match
1518 | // of the string with itself at the start of the input file).
1519 |
1520 | if (strategy != Z_HUFFMAN_ONLY) {
1521 | match_length = longest_match(hash_head);
1522 | }
1523 | // longest_match() sets match_start
1524 |
1525 | if (match_length <= 5 && (strategy == Z_FILTERED || (match_length == MIN_MATCH && strstart - match_start > 4096))) {
1526 |
1527 | // If prev_match is also MIN_MATCH, match_start is garbage
1528 | // but we will ignore the current match anyway.
1529 | match_length = MIN_MATCH - 1;
1530 | }
1531 | }
1532 |
1533 | // If there was a match at the previous step and the current
1534 | // match is not better, output the previous match:
1535 | if (prev_length >= MIN_MATCH && match_length <= prev_length) {
1536 | max_insert = strstart + lookahead - MIN_MATCH;
1537 | // Do not insert strings in hash table beyond this.
1538 |
1539 | // check_match(strstart-1, prev_match, prev_length);
1540 |
1541 | bflush = _tr_tally(strstart - 1 - prev_match, prev_length - MIN_MATCH);
1542 |
1543 | // Insert in hash table all strings up to the end of the match.
1544 | // strstart-1 and strstart are already inserted. If there is not
1545 | // enough lookahead, the last two strings are not inserted in
1546 | // the hash table.
1547 | lookahead -= prev_length - 1;
1548 | prev_length -= 2;
1549 | do {
1550 | if (++strstart <= max_insert) {
1551 | ins_h = (((ins_h) << hash_shift) ^ (window[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask;
1552 | // prev[strstart&w_mask]=hash_head=head[ins_h];
1553 | hash_head = (head[ins_h] & 0xffff);
1554 | prev[strstart & w_mask] = head[ins_h];
1555 | head[ins_h] = strstart;
1556 | }
1557 | } while (--prev_length !== 0);
1558 | match_available = 0;
1559 | match_length = MIN_MATCH - 1;
1560 | strstart++;
1561 |
1562 | if (bflush) {
1563 | flush_block_only(false);
1564 | if (strm.avail_out === 0)
1565 | return NeedMore;
1566 | }
1567 | } else if (match_available !== 0) {
1568 |
1569 | // If there was no match at the previous position, output a
1570 | // single literal. If there was a match but the current match
1571 | // is longer, truncate the previous match to a single literal.
1572 |
1573 | bflush = _tr_tally(0, window[strstart - 1] & 0xff);
1574 |
1575 | if (bflush) {
1576 | flush_block_only(false);
1577 | }
1578 | strstart++;
1579 | lookahead--;
1580 | if (strm.avail_out === 0)
1581 | return NeedMore;
1582 | } else {
1583 | // There is no previous match to compare with, wait for
1584 | // the next step to decide.
1585 |
1586 | match_available = 1;
1587 | strstart++;
1588 | lookahead--;
1589 | }
1590 | }
1591 |
1592 | if (match_available !== 0) {
1593 | bflush = _tr_tally(0, window[strstart - 1] & 0xff);
1594 | match_available = 0;
1595 | }
1596 | flush_block_only(flush == Z_FINISH);
1597 |
1598 | if (strm.avail_out === 0) {
1599 | if (flush == Z_FINISH)
1600 | return FinishStarted;
1601 | else
1602 | return NeedMore;
1603 | }
1604 |
1605 | return flush == Z_FINISH ? FinishDone : BlockDone;
1606 | }
1607 |
1608 | function deflateReset(strm) {
1609 | strm.total_in = strm.total_out = 0;
1610 | strm.msg = null; //
1611 |
1612 | that.pending = 0;
1613 | that.pending_out = 0;
1614 |
1615 | status = BUSY_STATE;
1616 |
1617 | last_flush = Z_NO_FLUSH;
1618 |
1619 | tr_init();
1620 | lm_init();
1621 | return Z_OK;
1622 | }
1623 |
1624 | that.deflateInit = function(strm, _level, bits, _method, memLevel, _strategy) {
1625 | if (!_method)
1626 | _method = Z_DEFLATED;
1627 | if (!memLevel)
1628 | memLevel = DEF_MEM_LEVEL;
1629 | if (!_strategy)
1630 | _strategy = Z_DEFAULT_STRATEGY;
1631 |
1632 | // byte[] my_version=ZLIB_VERSION;
1633 |
1634 | //
1635 | // if (!version || version[0] != my_version[0]
1636 | // || stream_size != sizeof(z_stream)) {
1637 | // return Z_VERSION_ERROR;
1638 | // }
1639 |
1640 | strm.msg = null;
1641 |
1642 | if (_level == Z_DEFAULT_COMPRESSION)
1643 | _level = 6;
1644 |
1645 | if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || _method != Z_DEFLATED || bits < 9 || bits > 15 || _level < 0 || _level > 9 || _strategy < 0
1646 | || _strategy > Z_HUFFMAN_ONLY) {
1647 | return Z_STREAM_ERROR;
1648 | }
1649 |
1650 | strm.dstate = that;
1651 |
1652 | w_bits = bits;
1653 | w_size = 1 << w_bits;
1654 | w_mask = w_size - 1;
1655 |
1656 | hash_bits = memLevel + 7;
1657 | hash_size = 1 << hash_bits;
1658 | hash_mask = hash_size - 1;
1659 | hash_shift = Math.floor((hash_bits + MIN_MATCH - 1) / MIN_MATCH);
1660 |
1661 | window = new Uint8Array(w_size * 2);
1662 | prev = [];
1663 | head = [];
1664 |
1665 | lit_bufsize = 1 << (memLevel + 6); // 16K elements by default
1666 |
1667 | // We overlay pending_buf and d_buf+l_buf. This works since the average
1668 | // output size for (length,distance) codes is <= 24 bits.
1669 | that.pending_buf = new Uint8Array(lit_bufsize * 4);
1670 | pending_buf_size = lit_bufsize * 4;
1671 |
1672 | d_buf = Math.floor(lit_bufsize / 2);
1673 | l_buf = (1 + 2) * lit_bufsize;
1674 |
1675 | level = _level;
1676 |
1677 | strategy = _strategy;
1678 | method = _method & 0xff;
1679 |
1680 | return deflateReset(strm);
1681 | };
1682 |
1683 | that.deflateEnd = function() {
1684 | if (status != INIT_STATE && status != BUSY_STATE && status != FINISH_STATE) {
1685 | return Z_STREAM_ERROR;
1686 | }
1687 | // Deallocate in reverse order of allocations:
1688 | that.pending_buf = null;
1689 | head = null;
1690 | prev = null;
1691 | window = null;
1692 | // free
1693 | that.dstate = null;
1694 | return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK;
1695 | };
1696 |
1697 | that.deflateParams = function(strm, _level, _strategy) {
1698 | var err = Z_OK;
1699 |
1700 | if (_level == Z_DEFAULT_COMPRESSION) {
1701 | _level = 6;
1702 | }
1703 | if (_level < 0 || _level > 9 || _strategy < 0 || _strategy > Z_HUFFMAN_ONLY) {
1704 | return Z_STREAM_ERROR;
1705 | }
1706 |
1707 | if (config_table[level].func != config_table[_level].func && strm.total_in !== 0) {
1708 | // Flush the last buffer:
1709 | err = strm.deflate(Z_PARTIAL_FLUSH);
1710 | }
1711 |
1712 | if (level != _level) {
1713 | level = _level;
1714 | max_lazy_match = config_table[level].max_lazy;
1715 | good_match = config_table[level].good_length;
1716 | nice_match = config_table[level].nice_length;
1717 | max_chain_length = config_table[level].max_chain;
1718 | }
1719 | strategy = _strategy;
1720 | return err;
1721 | };
1722 |
1723 | that.deflateSetDictionary = function(strm, dictionary, dictLength) {
1724 | var length = dictLength;
1725 | var n, index = 0;
1726 |
1727 | if (!dictionary || status != INIT_STATE)
1728 | return Z_STREAM_ERROR;
1729 |
1730 | if (length < MIN_MATCH)
1731 | return Z_OK;
1732 | if (length > w_size - MIN_LOOKAHEAD) {
1733 | length = w_size - MIN_LOOKAHEAD;
1734 | index = dictLength - length; // use the tail of the dictionary
1735 | }
1736 | window.set(dictionary.subarray(index, index + length), 0);
1737 |
1738 | strstart = length;
1739 | block_start = length;
1740 |
1741 | // Insert all strings in the hash table (except for the last two bytes).
1742 | // s->lookahead stays null, so s->ins_h will be recomputed at the next
1743 | // call of fill_window.
1744 |
1745 | ins_h = window[0] & 0xff;
1746 | ins_h = (((ins_h) << hash_shift) ^ (window[1] & 0xff)) & hash_mask;
1747 |
1748 | for (n = 0; n <= length - MIN_MATCH; n++) {
1749 | ins_h = (((ins_h) << hash_shift) ^ (window[(n) + (MIN_MATCH - 1)] & 0xff)) & hash_mask;
1750 | prev[n & w_mask] = head[ins_h];
1751 | head[ins_h] = n;
1752 | }
1753 | return Z_OK;
1754 | };
1755 |
1756 | that.deflate = function(_strm, flush) {
1757 | var i, header, level_flags, old_flush, bstate;
1758 |
1759 | if (flush > Z_FINISH || flush < 0) {
1760 | return Z_STREAM_ERROR;
1761 | }
1762 |
1763 | if (!_strm.next_out || (!_strm.next_in && _strm.avail_in !== 0) || (status == FINISH_STATE && flush != Z_FINISH)) {
1764 | _strm.msg = z_errmsg[Z_NEED_DICT - (Z_STREAM_ERROR)];
1765 | return Z_STREAM_ERROR;
1766 | }
1767 | if (_strm.avail_out === 0) {
1768 | _strm.msg = z_errmsg[Z_NEED_DICT - (Z_BUF_ERROR)];
1769 | return Z_BUF_ERROR;
1770 | }
1771 |
1772 | strm = _strm; // just in case
1773 | old_flush = last_flush;
1774 | last_flush = flush;
1775 |
1776 | // Write the zlib header
1777 | if (status == INIT_STATE) {
1778 | header = (Z_DEFLATED + ((w_bits - 8) << 4)) << 8;
1779 | level_flags = ((level - 1) & 0xff) >> 1;
1780 |
1781 | if (level_flags > 3)
1782 | level_flags = 3;
1783 | header |= (level_flags << 6);
1784 | if (strstart !== 0)
1785 | header |= PRESET_DICT;
1786 | header += 31 - (header % 31);
1787 |
1788 | status = BUSY_STATE;
1789 | putShortMSB(header);
1790 | }
1791 |
1792 | // Flush as much pending output as possible
1793 | if (that.pending !== 0) {
1794 | strm.flush_pending();
1795 | if (strm.avail_out === 0) {
1796 | // console.log(" avail_out==0");
1797 | // Since avail_out is 0, deflate will be called again with
1798 | // more output space, but possibly with both pending and
1799 | // avail_in equal to zero. There won't be anything to do,
1800 | // but this is not an error situation so make sure we
1801 | // return OK instead of BUF_ERROR at next call of deflate:
1802 | last_flush = -1;
1803 | return Z_OK;
1804 | }
1805 |
1806 | // Make sure there is something to do and avoid duplicate
1807 | // consecutive
1808 | // flushes. For repeated and useless calls with Z_FINISH, we keep
1809 | // returning Z_STREAM_END instead of Z_BUFF_ERROR.
1810 | } else if (strm.avail_in === 0 && flush <= old_flush && flush != Z_FINISH) {
1811 | strm.msg = z_errmsg[Z_NEED_DICT - (Z_BUF_ERROR)];
1812 | return Z_BUF_ERROR;
1813 | }
1814 |
1815 | // User must not provide more input after the first FINISH:
1816 | if (status == FINISH_STATE && strm.avail_in !== 0) {
1817 | _strm.msg = z_errmsg[Z_NEED_DICT - (Z_BUF_ERROR)];
1818 | return Z_BUF_ERROR;
1819 | }
1820 |
1821 | // Start a new block or continue the current one.
1822 | if (strm.avail_in !== 0 || lookahead !== 0 || (flush != Z_NO_FLUSH && status != FINISH_STATE)) {
1823 | bstate = -1;
1824 | switch (config_table[level].func) {
1825 | case STORED:
1826 | bstate = deflate_stored(flush);
1827 | break;
1828 | case FAST:
1829 | bstate = deflate_fast(flush);
1830 | break;
1831 | case SLOW:
1832 | bstate = deflate_slow(flush);
1833 | break;
1834 | default:
1835 | }
1836 |
1837 | if (bstate == FinishStarted || bstate == FinishDone) {
1838 | status = FINISH_STATE;
1839 | }
1840 | if (bstate == NeedMore || bstate == FinishStarted) {
1841 | if (strm.avail_out === 0) {
1842 | last_flush = -1; // avoid BUF_ERROR next call, see above
1843 | }
1844 | return Z_OK;
1845 | // If flush != Z_NO_FLUSH && avail_out === 0, the next call
1846 | // of deflate should use the same flush parameter to make sure
1847 | // that the flush is complete. So we don't have to output an
1848 | // empty block here, this will be done at next call. This also
1849 | // ensures that for a very small output buffer, we emit at most
1850 | // one empty block.
1851 | }
1852 |
1853 | if (bstate == BlockDone) {
1854 | if (flush == Z_PARTIAL_FLUSH) {
1855 | _tr_align();
1856 | } else { // FULL_FLUSH or SYNC_FLUSH
1857 | _tr_stored_block(0, 0, false);
1858 | // For a full flush, this empty block will be recognized
1859 | // as a special marker by inflate_sync().
1860 | if (flush == Z_FULL_FLUSH) {
1861 | // state.head[s.hash_size-1]=0;
1862 | for (i = 0; i < hash_size/*-1*/; i++)
1863 | // forget history
1864 | head[i] = 0;
1865 | }
1866 | }
1867 | strm.flush_pending();
1868 | if (strm.avail_out === 0) {
1869 | last_flush = -1; // avoid BUF_ERROR at next call, see above
1870 | return Z_OK;
1871 | }
1872 | }
1873 | }
1874 |
1875 | if (flush != Z_FINISH)
1876 | return Z_OK;
1877 | return Z_STREAM_END;
1878 | };
1879 | }
1880 |
1881 | // ZStream
1882 |
1883 | function ZStream() {
1884 | var that = this;
1885 | that.next_in_index = 0;
1886 | that.next_out_index = 0;
1887 | // that.next_in; // next input byte
1888 | that.avail_in = 0; // number of bytes available at next_in
1889 | that.total_in = 0; // total nb of input bytes read so far
1890 | // that.next_out; // next output byte should be put there
1891 | that.avail_out = 0; // remaining free space at next_out
1892 | that.total_out = 0; // total nb of bytes output so far
1893 | // that.msg;
1894 | // that.dstate;
1895 | }
1896 |
1897 | ZStream.prototype = {
1898 | deflateInit : function(level, bits) {
1899 | var that = this;
1900 | that.dstate = new Deflate();
1901 | if (!bits)
1902 | bits = MAX_BITS;
1903 | return that.dstate.deflateInit(that, level, bits);
1904 | },
1905 |
1906 | deflate : function(flush) {
1907 | var that = this;
1908 | if (!that.dstate) {
1909 | return Z_STREAM_ERROR;
1910 | }
1911 | return that.dstate.deflate(that, flush);
1912 | },
1913 |
1914 | deflateEnd : function() {
1915 | var that = this;
1916 | if (!that.dstate)
1917 | return Z_STREAM_ERROR;
1918 | var ret = that.dstate.deflateEnd();
1919 | that.dstate = null;
1920 | return ret;
1921 | },
1922 |
1923 | deflateParams : function(level, strategy) {
1924 | var that = this;
1925 | if (!that.dstate)
1926 | return Z_STREAM_ERROR;
1927 | return that.dstate.deflateParams(that, level, strategy);
1928 | },
1929 |
1930 | deflateSetDictionary : function(dictionary, dictLength) {
1931 | var that = this;
1932 | if (!that.dstate)
1933 | return Z_STREAM_ERROR;
1934 | return that.dstate.deflateSetDictionary(that, dictionary, dictLength);
1935 | },
1936 |
1937 | // Read a new buffer from the current input stream, update the
1938 | // total number of bytes read. All deflate() input goes through
1939 | // this function so some applications may wish to modify it to avoid
1940 | // allocating a large strm->next_in buffer and copying from it.
1941 | // (See also flush_pending()).
1942 | read_buf : function(buf, start, size) {
1943 | var that = this;
1944 | var len = that.avail_in;
1945 | if (len > size)
1946 | len = size;
1947 | if (len === 0)
1948 | return 0;
1949 | that.avail_in -= len;
1950 | buf.set(that.next_in.subarray(that.next_in_index, that.next_in_index + len), start);
1951 | that.next_in_index += len;
1952 | that.total_in += len;
1953 | return len;
1954 | },
1955 |
1956 | // Flush as much pending output as possible. All deflate() output goes
1957 | // through this function so some applications may wish to modify it
1958 | // to avoid allocating a large strm->next_out buffer and copying into it.
1959 | // (See also read_buf()).
1960 | flush_pending : function() {
1961 | var that = this;
1962 | var len = that.dstate.pending;
1963 |
1964 | if (len > that.avail_out)
1965 | len = that.avail_out;
1966 | if (len === 0)
1967 | return;
1968 |
1969 | // if (that.dstate.pending_buf.length <= that.dstate.pending_out || that.next_out.length <= that.next_out_index
1970 | // || that.dstate.pending_buf.length < (that.dstate.pending_out + len) || that.next_out.length < (that.next_out_index +
1971 | // len)) {
1972 | // console.log(that.dstate.pending_buf.length + ", " + that.dstate.pending_out + ", " + that.next_out.length + ", " +
1973 | // that.next_out_index + ", " + len);
1974 | // console.log("avail_out=" + that.avail_out);
1975 | // }
1976 |
1977 | that.next_out.set(that.dstate.pending_buf.subarray(that.dstate.pending_out, that.dstate.pending_out + len), that.next_out_index);
1978 |
1979 | that.next_out_index += len;
1980 | that.dstate.pending_out += len;
1981 | that.total_out += len;
1982 | that.avail_out -= len;
1983 | that.dstate.pending -= len;
1984 | if (that.dstate.pending === 0) {
1985 | that.dstate.pending_out = 0;
1986 | }
1987 | }
1988 | };
1989 |
1990 | // Deflater
1991 |
1992 | function Deflater(options) {
1993 | var that = this;
1994 | var z = new ZStream();
1995 | var bufsize = 512;
1996 | var flush = Z_NO_FLUSH;
1997 | var buf = new Uint8Array(bufsize);
1998 | var level = options ? options.level : Z_DEFAULT_COMPRESSION;
1999 | if (typeof level == "undefined")
2000 | level = Z_DEFAULT_COMPRESSION;
2001 | z.deflateInit(level);
2002 | z.next_out = buf;
2003 |
2004 | that.append = function(data, onprogress) {
2005 | var err, buffers = [], lastIndex = 0, bufferIndex = 0, bufferSize = 0, array;
2006 | if (!data.length)
2007 | return;
2008 | z.next_in_index = 0;
2009 | z.next_in = data;
2010 | z.avail_in = data.length;
2011 | do {
2012 | z.next_out_index = 0;
2013 | z.avail_out = bufsize;
2014 | err = z.deflate(flush);
2015 | if (err != Z_OK)
2016 | throw new Error("deflating: " + z.msg);
2017 | if (z.next_out_index)
2018 | if (z.next_out_index == bufsize)
2019 | buffers.push(new Uint8Array(buf));
2020 | else
2021 | buffers.push(new Uint8Array(buf.subarray(0, z.next_out_index)));
2022 | bufferSize += z.next_out_index;
2023 | if (onprogress && z.next_in_index > 0 && z.next_in_index != lastIndex) {
2024 | onprogress(z.next_in_index);
2025 | lastIndex = z.next_in_index;
2026 | }
2027 | } while (z.avail_in > 0 || z.avail_out === 0);
2028 | array = new Uint8Array(bufferSize);
2029 | buffers.forEach(function(chunk) {
2030 | array.set(chunk, bufferIndex);
2031 | bufferIndex += chunk.length;
2032 | });
2033 | return array;
2034 | };
2035 | that.flush = function() {
2036 | var err, buffers = [], bufferIndex = 0, bufferSize = 0, array;
2037 | do {
2038 | z.next_out_index = 0;
2039 | z.avail_out = bufsize;
2040 | err = z.deflate(Z_FINISH);
2041 | if (err != Z_STREAM_END && err != Z_OK)
2042 | throw new Error("deflating: " + z.msg);
2043 | if (bufsize - z.avail_out > 0)
2044 | buffers.push(new Uint8Array(buf.subarray(0, z.next_out_index)));
2045 | bufferSize += z.next_out_index;
2046 | } while (z.avail_in > 0 || z.avail_out === 0);
2047 | z.deflateEnd();
2048 | array = new Uint8Array(bufferSize);
2049 | buffers.forEach(function(chunk) {
2050 | array.set(chunk, bufferIndex);
2051 | bufferIndex += chunk.length;
2052 | });
2053 | return array;
2054 | };
2055 | }
2056 |
2057 | // 'zip' may not be defined in z-worker and some tests
2058 | var env = global.zip || global;
2059 | env.Deflater = env._jzlib_Deflater = Deflater;
2060 | })(this);
2061 |
--------------------------------------------------------------------------------