├── .gitignore
├── .npmignore
├── .travis.yml
├── CHANGELOG.md
├── Gruntfile.coffee
├── LICENSE.md
├── README.md
├── TODO.txt
├── bin
├── find-tags.js
└── find-uids.js
├── doc
├── dicom-decoder.md
├── dicom-json-encoder.md
├── dicom-json-sink.md
├── dicom-json-source.md
└── dicom-json.md
├── extra
├── commandelements.xml
├── dataelements.xml
├── make_tagdict.coffee
├── make_uiddict.coffee
└── uids.xml
├── index.js
├── package.json
├── src
├── decoder.coffee
├── encoder.coffee
├── json.coffee
├── json
│ ├── encoder.coffee
│ ├── sink.coffee
│ └── source.coffee
├── logger.coffee
├── pdu.coffee
├── readbuffer.coffee
├── tags.coffee
├── uids.coffee
└── vrs.coffee
└── test
├── README.md
├── charsettests
├── DICOMDIR.gz
├── SCSARAB.gz
├── SCSFREN.gz
├── SCSGERM.gz
├── SCSGREEK.gz
├── SCSH31.gz
├── SCSH32.gz
├── SCSHBRW.gz
├── SCSI2.gz
├── SCSRUSS.gz
├── SCSX1.gz
├── SCSX2.gz
└── dcmdump.sh
├── deflate_tests
├── image.gz
├── image_dfl.gz
├── report.gz
├── report_dfl.gz
├── wave.gz
└── wave_dfl.gz
├── hebrew_ivrle.gz
├── metainfo_tests
└── dodgy_metainfo_length.dcm.gz
├── private_report.gz
├── quotes_jpls.dcm.gz
├── report_default_ts.gz
├── report_undef_len.gz
├── scsarab_be.gz
├── test_decoder.coffee
├── test_dicom2json.coffee
├── test_json2json.coffee
├── test_metainfo.coffee
├── test_pdu.coffee
├── test_readbuffer.coffee
├── test_tags.coffee
├── test_uids.coffee
└── test_vrs.coffee
/.gitignore:
--------------------------------------------------------------------------------
1 | /node_modules/
2 | /.idea/
3 | /lib/
4 | *~
5 | *.x
6 | x.*
7 | *.swp
8 | npm-debug.log
9 | /package-lock.json
10 | tags
11 |
--------------------------------------------------------------------------------
/.npmignore:
--------------------------------------------------------------------------------
1 | src
2 | Cakefile
3 | TODO.txt
4 | extra
5 | node_modules
6 | tags
7 | test
8 | *.x
9 | x.*
10 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: node_js
2 | node_js:
3 | - "7"
4 | - "6"
5 | before_install:
6 | - npm install coffee-script
7 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Changelog
2 | All notable changes to this project will be documented in this file.
3 |
4 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
5 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
6 |
7 | ## [Unreleased]
8 |
9 | No unreleased changes.
10 |
11 | ## [0.4.4] - 2019-03-01
12 | ### Fixed
13 | - Decoding File Metainfo with the Transfer Syntax UID not in the first chunk produced an error. By [@mch](https://github.com/mch)
14 |
15 | ### Added
16 | - CHANGELOG.md, more prominent documentation
17 |
18 | ## [0.4.3] - 2017-07-27
19 | ### Fixed
20 | - Postinstall did not work on windows.
21 |
22 | ## [0.4.2] - 2016-07-31
23 | ### Fixed
24 | - Problems with metadata content group length and fixed length sequences from a SIEMENS modality.
25 |
26 | ## [0.4.1] - 2015-05-21
27 | ### Added
28 | - Support for VR US, by [@soichih](https://github.com/soichih).
29 |
30 | ## [0.4.0] - 2014-10-30
31 | ### Fixed
32 | - Handling of empty elements at end of buffer
33 |
34 | ### Added
35 | - Documentation in doc directory
36 | - JSON Source emits DICOM Events, like DICOM Decoder
37 | - Incomplete PDU Encoder/Decoder. Stopped working on this because of QIDO-RS - I did not need this anymore.
38 | - Possibly incomplete DICOM encoder. Was needed for PDU Encoder, status questionable but maybe useful to some.
39 |
40 | [Unreleased]: https://github.com/grmble/node-dicom/compare/v0.4.4...HEAD
41 | [0.4.4]: https://github.com/grmble/node-dicom/compare/v0.4.3...v0.4.4
42 | [0.4.3]: https://github.com/grmble/node-dicom/compare/v0.4.2...v0.4.3
43 | [0.4.2]: https://github.com/grmble/node-dicom/compare/v0.4.1...v0.4.2
44 | [0.4.1]: https://github.com/grmble/node-dicom/compare/v0.4.0...v0.4.1
45 | [0.4.0]: https://github.com/grmble/node-dicom/compare/v0.3.0...v0.4.0
46 |
--------------------------------------------------------------------------------
/Gruntfile.coffee:
--------------------------------------------------------------------------------
1 | path = require 'path'
2 |
3 | srcDir = 'src'
4 | dstDir = 'lib'
5 | tstDir = 'test'
6 |
7 | sourceMap = false
8 |
9 | srcRe = new RegExp "^#{srcDir}/"
10 |
11 | module.exports = (grunt) ->
12 | grunt.loadNpmTasks 'grunt-contrib-coffee'
13 | grunt.loadNpmTasks 'grunt-contrib-watch'
14 | grunt.loadNpmTasks 'grunt-contrib-nodeunit'
15 |
16 |
17 | grunt.initConfig
18 | watch:
19 | coffee:
20 | files: ["#{srcDir}/**/*.coffee", "#{tstDir}/test_*.coffee"]
21 | tasks: ['coffee:watched', 'nodeunit']
22 | options:
23 | spawn: false
24 |
25 | coffee:
26 | compile:
27 | expand: true,
28 | cwd: "#{srcDir}/",
29 | src: ['**/*.coffee'],
30 | dest: "#{dstDir}/",
31 | ext: '.js'
32 | options:
33 | sourceMap: sourceMap
34 | watched:
35 | expand: true,
36 | cwd: "#{srcDir}/",
37 | src: ['**/*.coffee'],
38 | dest: "#{dstDir}/",
39 | ext: '.js'
40 | options:
41 | sourceMap: sourceMap
42 |
43 | nodeunit:
44 | all: ["#{tstDir}/test_*.coffee"]
45 |
46 | grunt.event.on 'watch', (action, filepath) ->
47 | if srcRe.test filepath
48 | coffeeConfig = grunt.config "coffee"
49 | coffeeConfig.watched.src = path.relative(srcDir, filepath)
50 | grunt.config "coffee", coffeeConfig
51 | else
52 | coffeeConfig = grunt.config "coffee"
53 | coffeeConfig.watched.src = []
54 | grunt.config "coffee", coffeeConfig
55 |
56 |
57 | grunt.registerTask 'default', ['coffee', 'nodeunit', 'watch']
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | Copyright (c) 2012-2014 Juergen Gmeiner
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
4 |
5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
6 |
7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
8 |
9 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | Node.js DICOM
2 | =============
3 |
4 | [](https://travis-ci.org/grmble/node-dicom)
5 |
6 | The package provides the following:
7 |
8 | * Data dictionary according to the 2014a standard.
9 | * Streaming DICOM Decoder that reads a DICOM stream and
10 | emits DicomEvent instances.
11 | * Streaming JSON Encoder that turns a DicomEvent stream
12 | into a DICOM JSON Model
13 | * JSON Sink that consumes the JSON Model stream and
14 | produces an in-memory JSON Object.
15 |
16 | Limitations:
17 | ------------
18 |
19 | * ISO 2022 character sets are not in iconv-lite,
20 | this means the decoder does not currently
21 | support ISO 2022 encodings,
22 | multi-valued (0008,0005) Specific Character Set
23 | and DICOM characterset extensions.
24 | * Dicom Elements with a value length above a
25 | configurable threshold are not constructed
26 | in-memory, but emitted as `start_element`,
27 | a sequence of raw events with the encoded value
28 | and an `end_element` event. The JSON Encoder
29 | emits these as bulkdata URLs, but currently
30 | there is no way to use these urls (except parsing
31 | the url and extracting the bulkdata using
32 | offset and length from the url).
33 | * `Other` DICOM VRs (`OB`, `OW`, `OF`, `OD`, `UN`)
34 | do not provide a way to interpret the data,
35 | i.e. it's just passed on as a byte array, unchanged.
36 |
37 | Documentation:
38 | --------------
39 |
40 | There is documentation for the various pieces in the doc directory.
41 | Most notably:
42 |
43 | * [DICOM Decoder](https://github.com/grmble/node-dicom/blob/master/doc/dicom-decoder.md)
44 | * [DICOM JSON Model Utilities](https://github.com/grmble/node-dicom/blob/master/doc/dicom-json.md)
45 |
46 | Examples:
47 | ---------
48 |
49 | Read a DICOM file, produce JSON Model, and print some data:
50 |
51 | ```coffeescript
52 | dicom = require "dicom"
53 |
54 | decoder = dicom.decoder {guess_header: true}
55 | encoder = new dicom.json.JsonEncoder()
56 | sink = new dicom.json.JsonSink (err, json) ->
57 | if err
58 | console.log "Error:", err
59 | process.exit 10
60 | print_element json, dicom.tags.PatientID
61 | print_element json, dicom.tags.IssuerOfPatientID
62 | print_element json, dicom.tags.StudyInstanceUID
63 | print_element json, dicom.tags.AccessionNumber
64 |
65 | print_element = (json, path...) ->
66 | console.log dicom.json.get_value(json, path...)
67 |
68 | require("fs").createReadStream(process.argv[2]).pipe decoder
69 | .pipe encoder
70 | .pipe sink
71 | ```
72 |
73 |
74 | And the same thing in Javascript:
75 |
76 | ```javascript
77 | "use strict";
78 |
79 | var dicom = require("dicom");
80 |
81 | var decoder = dicom.decoder({
82 | guess_header: true
83 | });
84 |
85 | var encoder = new dicom.json.JsonEncoder();
86 |
87 | var print_element = function(json, elem) {
88 | console.log(dicom.json.get_value(json, elem));
89 | };
90 |
91 | var sink = new dicom.json.JsonSink(function(err, json) {
92 | if (err) {
93 | console.log("Error:", err);
94 | process.exit(10);
95 | }
96 | print_element(json, dicom.tags.PatientID);
97 | print_element(json, dicom.tags.IssuerOfPatientID);
98 | print_element(json, dicom.tags.StudyInstanceUID);
99 | print_element(json, dicom.tags.AccessionNumber);
100 | });
101 |
102 | require("fs").createReadStream(process.argv[2]).pipe(decoder).pipe(encoder).pipe(sink);
103 | ```
104 |
--------------------------------------------------------------------------------
/TODO.txt:
--------------------------------------------------------------------------------
1 | (W) empty element decoding for non-stringlike VRs
2 | (X) Documentation (ronn?), installation +Docs
3 | (X) Implement Encoder +Encoder
4 | (X) empty element encoding
5 | (Y) JsonSource: handle InlineBinary
6 | (Y) Encoder: use encoder context, not vr passed in from whatever source
7 | (Y) Test BigEndian explicit length sequence/items
8 | (Z) Implement Dataset +Dataset +DoWeNeedThis
9 | (Z) Support multi-valued 0008,0005 + charset extension
10 | (Z) undefined instead of 'undefined' for undefined VR elements +Decoder
11 | Endianess changes for OW/OF +DoWeNeedThis +BigEndianRetired
12 |
13 | x (A) do not trust metadata content group length
14 | x (A) end_sequence triggered too early for defined-length sequence with one empty item
15 |
--------------------------------------------------------------------------------
/bin/find-tags.js:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env node
2 | // Generated by CoffeeScript 1.8.0
3 | (function() {
4 | var regex, tags, what, _i, _len, _ref;
5 |
6 | tags = require("../lib/tags");
7 |
8 | _ref = process.argv.slice(2);
9 | for (_i = 0, _len = _ref.length; _i < _len; _i++) {
10 | what = _ref[_i];
11 | regex = new RegExp(what, "i");
12 | tags.find(regex);
13 | }
14 |
15 | }).call(this);
16 |
--------------------------------------------------------------------------------
/bin/find-uids.js:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env node
2 | // Generated by CoffeeScript 1.8.0
3 | (function() {
4 | var regex, uids, what, _i, _len, _ref;
5 |
6 | uids = require("../lib/uids");
7 |
8 | _ref = process.argv.slice(2);
9 | for (_i = 0, _len = _ref.length; _i < _len; _i++) {
10 | what = _ref[_i];
11 | regex = new RegExp(what, "i");
12 | uids.find(regex);
13 | }
14 |
15 | }).call(this);
16 |
--------------------------------------------------------------------------------
/doc/dicom-decoder.md:
--------------------------------------------------------------------------------
1 | dicom.decoder(3) - Dicom Decoder
2 | ================================
3 |
4 | ##SYNOPSIS
5 |
6 | var decoder = dicom.decoder(options);
7 |
8 | ##DESCRIPTION
9 |
10 | `dicom.decoder` is a transform stream that takes a DICOM file
11 | and outputs `DicomEvent`s.
12 |
13 | Valid options are:
14 | * `streaming_value_length_minimum`: minimum value length, longer values will be
15 | streamed out. I.e. the element is not constructed in memory and then emitted,
16 | but rather a `start_element` event is emitted, a number of raw events with
17 | the DICOM encoded content is emitted, and finally an `end_element` is emitted.
18 | This serves to reduce memory footprint, assuming that we are only really
19 | interested in the shorter elements (e.g. for putting them into a database),
20 | while allowing us to stream the dicom contents on (e.g. to store them in a file).
21 |
22 | * `read_header`: read preamble / dicom header, defaults to false.
23 | This also implied reading metainfo with a transfer syntax switch.
24 | * `transfer_syntax`: transfer syntax, defaults to `ExplicitVRLittleEndian`.
25 | * `guess_header`: if true, the decoder will the to guess the encoding.
26 | First it will try to find a DICOM header (`DICM` at position 128),
27 | if that fails it will try to recognize a `SpecificCharacterSet` tag
28 | at the start of the file.
29 |
30 | ##SEE ALSO
31 | * dicom.json.encoder
32 | * dicom.encoder
33 |
--------------------------------------------------------------------------------
/doc/dicom-json-encoder.md:
--------------------------------------------------------------------------------
1 | dicom.json.encoder(3) - Dicom JSON Model Encoder
2 | ================================================
3 |
4 | ##SYNOPSIS
5 |
6 | var encoder = dicom.json.encoder({bulkdata_uri: "file:///tmp/x.dcm"});
7 |
8 | ##DESCRIPTION
9 |
10 | `JsonEncoder` is a transform stream that takes a stream of DicomEvent instances
11 | and produces chunks of JSON.
12 |
13 | `start_element`/`end_element` blocks produced by `dicom.decoder`
14 | are emitted as bulkdata uri elements, so you can't get at that data
15 | anymore except by parsing the uri and going to the original input.
16 |
17 | If no `bulkdata_uri` is given, bulkdata elements will not be emitted at all.
18 |
19 |
20 | ##DETAILS
21 |
22 | The Dicom JSON Model is defined at
23 | http://medical.nema.org/dicom/2013/output/chtml/part18/sect_F.2.html
24 |
25 | ##SEE ALSO
26 | * dicom.decoder
27 | * dicom.json.sink
28 | * dicom.json
29 |
--------------------------------------------------------------------------------
/doc/dicom-json-sink.md:
--------------------------------------------------------------------------------
1 | dicom.json.sink(3) - Dicom JSON Sink
2 | ====================================
3 |
4 | ##SYNOPSIS
5 |
6 | var sink = dicom.json.sink(function(err, data) {
7 | // data contains the parsed JSON model
8 | });
9 |
10 | ##DESCRIPTION
11 |
12 | `JsonSink` is a transform stream that collects the JSON chunks emitted
13 | by `dicom.json.encoder` and parses the result.
14 |
15 | The supplied callback will be called with an error or
16 | the parsed JSON as the second argument.
17 |
18 |
19 | ##SEE ALSO
20 | * dicom.json
21 | * dicom.json.encoder
22 |
--------------------------------------------------------------------------------
/doc/dicom-json-source.md:
--------------------------------------------------------------------------------
1 | dicom.json.source(3) - Dicom JSON Source
2 | ========================================
3 |
4 | ##SYNOPSIS
5 |
6 | data = {
7 | "00100020": {vr: "LO", Value: ["007"]},
8 | "00100021": {vr: "LO", Value: ["MI6"]},
9 | "00101002": {vr: "SQ", Value: [{
10 | "00100020": {vr: "LO", Value: ["0815"]},
11 | "00100021": {vr: "LO", Value: ["BND"]}}
12 | ]}}
13 | var source = dicom.json.source(source, {transfer_syntax: "ExplicitVRLittleEndian"});
14 | source.pipe(...);
15 |
16 | ##DESCRIPTION
17 |
18 | `JsonSource` is a readable stream that emits `DicomEvent`s from
19 | a Dicom JSON Model.
20 |
21 | For details on the Dicom JSON Model, see
22 | http://medical.nema.org/dicom/2013/output/chtml/part18/sect_F.2.html
23 |
24 | Additionaly, the JSON model is normalized before emitting data,
25 | it may be given in a simplified form:
26 |
27 | This means:
28 | * Dicom tags are processed by `tags.for_tag`, this means you can
29 | specify them as `tags.PatientName` (this might give you tab completion),
30 | by their name (`"PatientName"`) or using their 8-digit hex representation
31 | (`"00100010"`).
32 | * `vr` may be omitted, the vr from the element dictionary will be used.
33 | * Instead of an object with `vr` and `Value`, you can only give the
34 | `Value` array
35 | * A single string or number will be interpreted a single-value value.
36 |
37 | As an example, the short JSON model from above can also be given as:
38 |
39 | {"PatientID": "007",
40 | "IssuerOfPatientID": "MI6",
41 | "OtherPatientIDsSequence": [{
42 | "PatientID": "0815",
43 | "IssuerOfPatientID": "BND"}]}
44 |
45 |
46 | ##Limitations
47 | `JsonSource` can not handle a JSON model with BulkDataURI data.
48 |
49 | Using a simplified JSON model is only possible in javascript source code,
50 | as it is not legal JSON.
51 |
52 |
53 | ##SEE ALSO
54 | * dicom.json
55 | * dicom.json.encoder
56 |
--------------------------------------------------------------------------------
/doc/dicom-json.md:
--------------------------------------------------------------------------------
1 | dicom.json(3) - Dicom JSON Model utilities
2 | ===========================================
3 |
4 | ##SYNOPSIS
5 |
6 | file2json("/tmp/example.dcm", err_data_cb);
7 | gunzip2json("/tmp/example.dcm.gz", err_data_cb);
8 | file2jsonstream("/tmp/example.dcm", err_data_cb).pipe(dicom.json.sink(err_data_cb));
9 | gunzip2jsonstream("/tmp/example.dcm.gz", err_data_cb).pipe(dicom.json.sink(err_data_cb));
10 |
11 | // in a callback with parsed json
12 | var el = dicom.json.get_element(data, dicom.tags.StudyInstanceUID);
13 | var val = dicom.json.get_value(data, dicom.tags.StudyInstanceUID);
14 | var vr = dicom.json.get_vr(data, dicom.tags.StudyInstanceUID);
15 |
16 | ##DESCRIPTION
17 |
18 | ###Pipe helpers
19 | `file2json` and `gunzip2json` set up the whole pipeline from a
20 | (gzipped) file to parsed Dicom JSON Model. They also take care
21 | to pass errors along to the supplied callback.
22 |
23 | `file2jsonstream` and `gunzip2jsonstream` set up the pipeline
24 | only until `dicom.json.JsonEncoder`, e.g. for writing JSON to a
25 | file or a network socket.
26 |
27 | These 4 functions take the following arguments:
28 |
29 | * `filename`: a filename (string) or a filename specifier,
30 | an object with the properties `.filename` and `.bulkdata_uri`.
31 | A string filename will be re-used as `bulkdata_uri`.
32 | * `callback`: standard node callback, error or parsed json data.
33 | The 2 streaming calls will only call the callback on error.
34 |
35 | ###JSON Model helpers
36 |
37 | Helper functions are provided to access the data in the JSON Model.
38 |
39 | A short example (of a much, much larger file) to show why this might be needed:
40 |
41 | {"20010010": {"vr":"LO","Value":["Philips Imaging DD 001"]},
42 | "20010090": {"vr":"LO","Value":["Philips Imaging DD 129"]},
43 | "20011063": {"vr":"UN","InlineBinary":["UklTIA=="]},
44 | "2001106E": {"vr":"UN","BulkDataURI":"xxx?offset=6290&length=666"},
45 | "20019000": {"vr":"SQ", "Value": [{
46 | "00080000": {"vr":"UL","Value":[350]},
47 | "00080016": {"vr":"UI","Value":["1.2.840.10008.5.1.4.1.1.11.1"]},
48 | "00080018": {"vr":"UI","Value":["1.3.46.670589.30.1.6.1.963334011378.1349417319250.1"]},
49 | "00081115": {"vr":"SQ", "Value": [{
50 | "00080000": {"vr":"UL","Value":[138]},
51 | "00081140": {"vr":"SQ", "Value": [{
52 | "00080000": {"vr":"UL","Value":[94]},
53 | "00081150": {"vr":"UI","Value":["1.2.840.10008.5.1.4.1.1.1"]},
54 | "00081155": {"vr":"UI","Value":["1.3.46.670589.30.1.6.1.963334011378.1349417318484.2"]}}]},
55 | "00200000": {"vr":"UL","Value":[60]},
56 | "0020000E": {"vr":"UI","Value":["1.3.46.670589.30.1.6.1.963334011378.1349417318546.1"]}}]}}]}}
57 |
58 | `get_element(data, list_of_tags_or_item_idx)` gives you access
59 | to an element in a potentially deeply nested structure. Numbers
60 | are taken to be indexes to multiple values (in nested `SQ` elements),
61 | while anything else is supposed to be a tag (see `dicom.tags.for_tag`).
62 |
63 | `get_values(data, list_of_tags_or_item_idx)` does the same,
64 | but gives you the final `Value` property.
65 |
66 | `get_value(data, list_of_tags_or_item_idx)` gives you the first element of
67 | the `Value` property.
68 |
69 | `get_vr(data, ....)` gives you the `vr`.
70 |
71 | ##DETAILS
72 |
73 | ###Dicom JSON Model
74 |
75 | The Dicom JSON Model is defined at
76 | http://medical.nema.org/dicom/2013/output/chtml/part18/sect_F.2.html
77 |
78 | ##SEE ALSO
79 | * dicom.decoder
80 | * dicom.json.encoder
81 | * dicom.json.sink
82 |
--------------------------------------------------------------------------------
/extra/commandelements.xml:
--------------------------------------------------------------------------------
1 |
2 | Command Group Length
3 | Command Length to End
4 | Affected SOP Class UID
5 | Requested SOP Class UID
6 | Command Recognition Code
7 | Command Field
8 | Message ID
9 | Message ID Being Responded To
10 | Initiator
11 | Receiver
12 | Find Location
13 | Move Destination
14 | Priority
15 | Command Data Set Type
16 | Number of Matches
17 | Response Sequence Number
18 | Status
19 | Offending Element
20 | Error Comment
21 | Error ID
22 | Affected SOP Instance UID
23 | Requested SOP Instance UID
24 | Event Type ID
25 | Attribute Identifier List
26 | Action Type ID
27 | Number of Remaining Sub-operations
28 | Number of Completed Sub-operations
29 | Number of Failed Sub-operations
30 | Number of Warning Sub-operations
31 | Move Originator Application Entity Title
32 | Move Originator Message ID
33 | Dialog Receiver
34 | Terminal Type
35 | Message Set ID
36 | End Message ID
37 | Display Format
38 | Page Position ID
39 | Text Format ID
40 | Normal/Reverse
41 | Add Gray Scale
42 | Borders
43 | Copies
44 | Command Magnification Type
45 | Erase
46 | Print
47 | Overlays
48 |
49 |
--------------------------------------------------------------------------------
/extra/make_tagdict.coffee:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env coffee
2 |
3 | fs = require("fs")
4 | printf = require("printf")
5 | xml2js = require("xml2js")
6 | tags = require("../lib/tags.js")
7 |
8 | parse_file = (fn, cb) ->
9 | fs.readFile fn, "utf8", (err, content) ->
10 | if err
11 | return cb err
12 | xml2js.parseString content, (err, x) ->
13 | if err
14 | return cb err
15 | cb x
16 |
17 | _exports = []
18 | _TAG_DICT = []
19 | _masks = []
20 | _TAG_MASKS = []
21 |
22 | err_cb = (good_cb) ->
23 | (err, args...) ->
24 | if err
25 | console.error("Error:", err)
26 | process.exit 20
27 | good_cb args...
28 |
29 | collect_tag_dict = (root, cb) ->
30 | (data) ->
31 | console.log "# collect tag_dict #{root}"
32 | for x in data[root].el
33 | mask = x.$.tag
34 | name = x.$.keyword
35 | if name
36 | tag = parseInt(mask.replace(/[xX]/g, '0'), 16)
37 | tag_str = printf "%08x", tag
38 | _exports.push "exports.#{x.$.keyword} = new Element(#{tag}, '#{x.$.keyword}', '#{x.$.vr}', '#{x.$.vm}', '#{mask}', #{x.$.retired})"
39 | _TAG_DICT.push " '#{tag_str}': exports.#{x.$.keyword},"
40 | if 'x' in mask
41 | _masks.push mask
42 | cb()
43 |
44 | calc_masks = () ->
45 | for [cnt, and_mask, base_tag] in tags.calc_bitmasks(_masks)
46 | _TAG_MASKS.push printf(" [%d, 0x%08x, 0x%08x],", cnt, and_mask, base_tag)
47 |
48 | dump_dicts = () ->
49 | for x in _exports
50 | console.log x
51 | console.log "_TAG_DICT ="
52 | for x in _TAG_DICT
53 | console.log x
54 | console.log "_TAG_MASKS = ["
55 | for x in _TAG_MASKS
56 | console.log x
57 | console.log "]"
58 |
59 | parse_commandelements = (cb) ->
60 | parse_file "commandelements.xml", collect_tag_dict("commandelements", cb)
61 | parse_dataelements = (cb) ->
62 | parse_file "dataelements.xml", collect_tag_dict("dataelements", cb)
63 |
64 | postprocess = () ->
65 | console.log("# postprocessing")
66 | calc_masks()
67 | dump_dicts()
68 |
69 | parse_commandelements err_cb () ->
70 | parse_dataelements err_cb () ->
71 | postprocess()
72 |
--------------------------------------------------------------------------------
/extra/make_uiddict.coffee:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env coffee
2 |
3 | fs = require("fs")
4 | printf = require("printf")
5 | xml2js = require("xml2js")
6 |
7 | parse_file = (fn, cb) ->
8 | fs.readFile fn, "utf8", (err, content) ->
9 | if err
10 | return cb err
11 | xml2js.parseString content, (err, x) ->
12 | if err
13 | return cb err
14 | cb x
15 |
16 | _exports = []
17 | _UID_DICT = []
18 |
19 | err_cb = (good_cb) ->
20 | (err, args...) ->
21 | if err
22 | console.error("Error:", err)
23 | process.exit 20
24 | good_cb args...
25 |
26 | collect_uid_dict = (cb) ->
27 | (data) ->
28 | for x in data.uids.uid
29 | uid = x.$.value
30 | name = x.$.keyword
31 | typ = x.$.type
32 | _exports.push "exports.#{name} = _make_uid('#{uid}', '#{name}', '#{typ}')"
33 | _UID_DICT.push " '#{uid}': exports.#{name},"
34 | cb()
35 |
36 | dump_dicts = () ->
37 | for x in _exports
38 | console.log x
39 | console.log "_UID_DICT ="
40 | for x in _UID_DICT
41 | console.log x
42 |
43 | parse_uids = (cb) ->
44 | parse_file "uids.xml", collect_uid_dict(cb)
45 |
46 | postprocess = () ->
47 | dump_dicts()
48 |
49 | parse_uids err_cb () ->
50 | postprocess()
51 |
--------------------------------------------------------------------------------
/extra/uids.xml:
--------------------------------------------------------------------------------
1 |
2 | Verification SOP Class
3 | Implicit VR Little Endian
4 | Explicit VR Little Endian
5 | Deflated Explicit VR Little Endian
6 | Explicit VR Big Endian (Retired)
7 | JPEG Baseline (Process 1)
8 | JPEG Extended (Process 2 & 4)
9 | JPEG Extended (Process 3 & 5) (Retired)
10 | JPEG Spectral Selection, Non-Hierarchical (Process 6 & 8) (Retired)
11 | JPEG Spectral Selection, Non-Hierarchical (Process 7 & 9) (Retired)
12 | JPEG Full Progression, Non-Hierarchical (Process 10 & 12) (Retired)
13 | JPEG Full Progression, Non-Hierarchical (Process 11 & 13) (Retired)
14 | JPEG Lossless, Non-Hierarchical (Process 14)
15 | JPEG Lossless, Non-Hierarchical (Process 15) (Retired)
16 | JPEG Extended, Hierarchical (Process 16 & 18) (Retired)
17 | JPEG Extended, Hierarchical (Process 17 & 19) (Retired)
18 | JPEG Spectral Selection, Hierarchical (Process 20 & 22) (Retired)
19 | JPEG Spectral Selection, Hierarchical (Process 21 & 23) (Retired)
20 | JPEG Full Progression, Hierarchical (Process 24 & 26) (Retired)
21 | JPEG Full Progression, Hierarchical (Process 25 & 27) (Retired)
22 | JPEG Lossless, Hierarchical (Process 28) (Retired)
23 | JPEG Lossless, Hierarchical (Process 29) (Retired)
24 | JPEG Lossless, Non-Hierarchical, First-Order Prediction (Process 14 [Selection Value 1])
25 | JPEG-LS Lossless Image Compression
26 | JPEG-LS Lossy (Near-Lossless) Image Compression
27 | JPEG 2000 Image Compression (Lossless Only)
28 | JPEG 2000 Image Compression
29 | JPEG 2000 Part 2 Multi-component Image Compression (Lossless Only)
30 | JPEG 2000 Part 2 Multi-component Image Compression
31 | JPIP Referenced
32 | JPIP Referenced Deflate
33 | MPEG2 Main Profile @ Main Level
34 | MPEG2 Main Profile @ High Level
35 | MPEG-4 AVC/H.264 High Profile / Level 4.1
36 | MPEG-4 AVC/H.264 BD-compatible High Profile / Level 4.1
37 | RLE Lossless
38 | RFC 2557 MIME encapsulation
39 | XML Encoding
40 | Media Storage Directory Storage
41 | Talairach Brain Atlas Frame of Reference
42 | SPM2 T1 Frame of Reference
43 | SPM2 T2 Frame of Reference
44 | SPM2 PD Frame of Reference
45 | SPM2 EPI Frame of Reference
46 | SPM2 FIL T1 Frame of Reference
47 | SPM2 PET Frame of Reference
48 | SPM2 TRANSM Frame of Reference
49 | SPM2 SPECT Frame of Reference
50 | SPM2 GRAY Frame of Reference
51 | SPM2 WHITE Frame of Reference
52 | SPM2 CSF Frame of Reference
53 | SPM2 BRAINMASK Frame of Reference
54 | SPM2 AVG305T1 Frame of Reference
55 | SPM2 AVG152T1 Frame of Reference
56 | SPM2 AVG152T2 Frame of Reference
57 | SPM2 AVG152PD Frame of Reference
58 | SPM2 SINGLESUBJT1 Frame of Reference
59 | ICBM 452 T1 Frame of Reference
60 | ICBM Single Subject MRI Frame of Reference
61 | Hot Iron Color Palette SOP Instance
62 | PET Color Palette SOP Instance
63 | Hot Metal Blue Color Palette SOP Instance
64 | PET 20 Step Color Palette SOP Instance
65 | Basic Study Content Notification SOP Class (Retired)
66 | Storage Commitment Push Model SOP Class
67 | Storage Commitment Push Model SOP Instance
68 | Storage Commitment Pull Model SOP Class (Retired)
69 | Storage Commitment Pull Model SOP Instance (Retired)
70 | Procedural Event Logging SOP Class
71 | Procedural Event Logging SOP Instance
72 | Substance Administration Logging SOP Class
73 | Substance Administration Logging SOP Instance
74 | DICOM UID Registry
75 | DICOM Controlled Terminology
76 | DICOM Application Context Name
77 | Detached Patient Management SOP Class (Retired)
78 | Detached Patient Management Meta SOP Class (Retired)
79 | Detached Visit Management SOP Class (Retired)
80 | Detached Study Management SOP Class (Retired)
81 | Study Component Management SOP Class (Retired)
82 | Modality Performed Procedure Step SOP Class
83 | Modality Performed Procedure Step Retrieve SOP Class
84 | Modality Performed Procedure Step Notification SOP Class
85 | Detached Results Management SOP Class (Retired)
86 | Detached Results Management Meta SOP Class (Retired)
87 | Detached Study Management Meta SOP Class (Retired)
88 | Detached Interpretation Management SOP Class (Retired)
89 | Storage Service Class
90 | Basic Film Session SOP Class
91 | Basic Film Box SOP Class
92 | Basic Grayscale Image Box SOP Class
93 | Basic Color Image Box SOP Class
94 | Referenced Image Box SOP Class (Retired)
95 | Basic Grayscale Print Management Meta SOP Class
96 | Referenced Grayscale Print Management Meta SOP Class (Retired)
97 | Print Job SOP Class
98 | Basic Annotation Box SOP Class
99 | Printer SOP Class
100 | Printer Configuration Retrieval SOP Class
101 | Printer SOP Instance
102 | Printer Configuration Retrieval SOP Instance
103 | Basic Color Print Management Meta SOP Class
104 | Referenced Color Print Management Meta SOP Class (Retired)
105 | VOI LUT Box SOP Class
106 | Presentation LUT SOP Class
107 | Image Overlay Box SOP Class (Retired)
108 | Basic Print Image Overlay Box SOP Class (Retired)
109 | Print Queue SOP Instance (Retired)
110 | Print Queue Management SOP Class (Retired)
111 | Stored Print Storage SOP Class (Retired)
112 | Hardcopy Grayscale Image Storage SOP Class (Retired)
113 | Hardcopy Color Image Storage SOP Class (Retired)
114 | Pull Print Request SOP Class (Retired)
115 | Pull Stored Print Management Meta SOP Class (Retired)
116 | Media Creation Management SOP Class UID
117 | Display System SOP Class
118 | Display System SOP Instance
119 | Computed Radiography Image Storage
120 | Digital X-Ray Image Storage - For Presentation
121 | Digital X-Ray Image Storage - For Processing
122 | Digital Mammography X-Ray Image Storage - For Presentation
123 | Digital Mammography X-Ray Image Storage - For Processing
124 | Digital Intra-Oral X-Ray Image Storage - For Presentation
125 | Digital Intra-Oral X-Ray Image Storage - For Processing
126 | CT Image Storage
127 | Enhanced CT Image Storage
128 | Legacy Converted Enhanced CT Image Storage
129 | Ultrasound Multi-frame Image Storage (Retired)
130 | Ultrasound Multi-frame Image Storage
131 | MR Image Storage
132 | Enhanced MR Image Storage
133 | MR Spectroscopy Storage
134 | Enhanced MR Color Image Storage
135 | Legacy Converted Enhanced MR Image Storage
136 | Nuclear Medicine Image Storage (Retired)
137 | Ultrasound Image Storage (Retired)
138 | Ultrasound Image Storage
139 | Enhanced US Volume Storage
140 | Secondary Capture Image Storage
141 | Multi-frame Single Bit Secondary Capture Image Storage
142 | Multi-frame Grayscale Byte Secondary Capture Image Storage
143 | Multi-frame Grayscale Word Secondary Capture Image Storage
144 | Multi-frame True Color Secondary Capture Image Storage
145 | Standalone Overlay Storage (Retired)
146 | Standalone Curve Storage (Retired)
147 | Waveform Storage - Trial (Retired)
148 | 12-lead ECG Waveform Storage
149 | General ECG Waveform Storage
150 | Ambulatory ECG Waveform Storage
151 | Hemodynamic Waveform Storage
152 | Cardiac Electrophysiology Waveform Storage
153 | Basic Voice Audio Waveform Storage
154 | General Audio Waveform Storage
155 | Arterial Pulse Waveform Storage
156 | Respiratory Waveform Storage
157 | Standalone Modality LUT Storage (Retired)
158 | Standalone VOI LUT Storage (Retired)
159 | Grayscale Softcopy Presentation State Storage SOP Class
160 | Color Softcopy Presentation State Storage SOP Class
161 | Pseudo-Color Softcopy Presentation State Storage SOP Class
162 | Blending Softcopy Presentation State Storage SOP Class
163 | XA/XRF Grayscale Softcopy Presentation State Storage
164 | X-Ray Angiographic Image Storage
165 | Enhanced XA Image Storage
166 | X-Ray Radiofluoroscopic Image Storage
167 | Enhanced XRF Image Storage
168 | X-Ray Angiographic Bi-Plane Image Storage (Retired)
169 | X-Ray 3D Angiographic Image Storage
170 | X-Ray 3D Craniofacial Image Storage
171 | Breast Tomosynthesis Image Storage
172 | Breast Projection X-Ray Image Storage - For Presentation
173 | Breast Projection X-Ray Image Storage - For Processing
174 | Intravascular Optical Coherence Tomography Image Storage - For Presentation
175 | Intravascular Optical Coherence Tomography Image Storage - For Processing
176 | Nuclear Medicine Image Storage
177 | Raw Data Storage
178 | Spatial Registration Storage
179 | Spatial Fiducials Storage
180 | Deformable Spatial Registration Storage
181 | Segmentation Storage
182 | Surface Segmentation Storage
183 | Real World Value Mapping Storage
184 | Surface Scan Mesh Storage
185 | Surface Scan Point Cloud Storage
186 | VL Image Storage - Trial (Retired)
187 | VL Multi-frame Image Storage - Trial (Retired)
188 | VL Endoscopic Image Storage
189 | Video Endoscopic Image Storage
190 | VL Microscopic Image Storage
191 | Video Microscopic Image Storage
192 | VL Slide-Coordinates Microscopic Image Storage
193 | VL Photographic Image Storage
194 | Video Photographic Image Storage
195 | Ophthalmic Photography 8 Bit Image Storage
196 | Ophthalmic Photography 16 Bit Image Storage
197 | Stereometric Relationship Storage
198 | Ophthalmic Tomography Image Storage
199 | VL Whole Slide Microscopy Image Storage
200 | Lensometry Measurements Storage
201 | Autorefraction Measurements Storage
202 | Keratometry Measurements Storage
203 | Subjective Refraction Measurements Storage
204 | Visual Acuity Measurements Storage
205 | Spectacle Prescription Report Storage
206 | Ophthalmic Axial Measurements Storage
207 | Intraocular Lens Calculations Storage
208 | Macular Grid Thickness and Volume Report Storage
209 | Ophthalmic Visual Field Static Perimetry Measurements Storage
210 | Ophthalmic Thickness Map Storage
211 | Corneal Topography Map Storage
212 | Text SR Storage - Trial (Retired)
213 | Audio SR Storage - Trial (Retired)
214 | Detail SR Storage - Trial (Retired)
215 | Comprehensive SR Storage - Trial (Retired)
216 | Basic Text SR Storage
217 | Enhanced SR Storage
218 | Comprehensive SR Storage
219 | Comprehensive 3D SR Storage
220 | Procedure Log Storage
221 | Mammography CAD SR Storage
222 | Key Object Selection Document Storage
223 | Chest CAD SR Storage
224 | X-Ray Radiation Dose SR Storage
225 | Radiopharmaceutical Radiation Dose SR Storage
226 | Colon CAD SR Storage
227 | Implantation Plan SR Storage
228 | Encapsulated PDF Storage
229 | Encapsulated CDA Storage
230 | Positron Emission Tomography Image Storage
231 | Legacy Converted Enhanced PET Image Storage
232 | Standalone PET Curve Storage (Retired)
233 | Enhanced PET Image Storage
234 | Basic Structured Display Storage
235 | RT Image Storage
236 | RT Dose Storage
237 | RT Structure Set Storage
238 | RT Beams Treatment Record Storage
239 | RT Plan Storage
240 | RT Brachy Treatment Record Storage
241 | RT Treatment Summary Record Storage
242 | RT Ion Plan Storage
243 | RT Ion Beams Treatment Record Storage
244 | DICOS CT Image Storage
245 | DICOS Digital X-Ray Image Storage - For Presentation
246 | DICOS Digital X-Ray Image Storage - For Processing
247 | DICOS Threat Detection Report Storage
248 | DICOS 2D AIT Storage
249 | DICOS 3D AIT Storage
250 | DICOS Quadrupole Resonance (QR) Storage
251 | Eddy Current Image Storage
252 | Eddy Current Multi-frame Image Storage
253 | Patient Root Query/Retrieve Information Model - FIND
254 | Patient Root Query/Retrieve Information Model - MOVE
255 | Patient Root Query/Retrieve Information Model - GET
256 | Study Root Query/Retrieve Information Model - FIND
257 | Study Root Query/Retrieve Information Model - MOVE
258 | Study Root Query/Retrieve Information Model - GET
259 | Patient/Study Only Query/Retrieve Information Model - FIND (Retired)
260 | Patient/Study Only Query/Retrieve Information Model - MOVE (Retired)
261 | Patient/Study Only Query/Retrieve Information Model - GET (Retired)
262 | Composite Instance Root Retrieve - MOVE
263 | Composite Instance Root Retrieve - GET
264 | Composite Instance Retrieve Without Bulk Data - GET
265 | Modality Worklist Information Model - FIND
266 | General Purpose Worklist Management Meta SOP Class (Retired)
267 | General Purpose Worklist Information Model - FIND (Retired)
268 | General Purpose Scheduled Procedure Step SOP Class (Retired)
269 | General Purpose Performed Procedure Step SOP Class (Retired)
270 | Instance Availability Notification SOP Class
271 | RT Beams Delivery Instruction Storage - Trial (Retired)
272 | RT Conventional Machine Verification - Trial (Retired)
273 | RT Ion Machine Verification - Trial (Retired)
274 | Unified Worklist and Procedure Step Service Class - Trial (Retired)
275 | Unified Procedure Step - Push SOP Class - Trial (Retired)
276 | Unified Procedure Step - Watch SOP Class - Trial (Retired)
277 | Unified Procedure Step - Pull SOP Class - Trial (Retired)
278 | Unified Procedure Step - Event SOP Class - Trial (Retired)
279 | Unified Worklist and Procedure Step SOP Instance
280 | Unified Worklist and Procedure Step Service Class
281 | Unified Procedure Step - Push SOP Class
282 | Unified Procedure Step - Watch SOP Class
283 | Unified Procedure Step - Pull SOP Class
284 | Unified Procedure Step - Event SOP Class
285 | RT Beams Delivery Instruction Storage
286 | RT Conventional Machine Verification
287 | RT Ion Machine Verification
288 | General Relevant Patient Information Query
289 | Breast Imaging Relevant Patient Information Query
290 | Cardiac Relevant Patient Information Query
291 | Hanging Protocol Storage
292 | Hanging Protocol Information Model - FIND
293 | Hanging Protocol Information Model - MOVE
294 | Hanging Protocol Information Model - GET
295 | Color Palette Storage
296 | Color Palette Information Model - FIND
297 | Color Palette Information Model - MOVE
298 | Color Palette Information Model - GET
299 | Product Characteristics Query SOP Class
300 | Substance Approval Query SOP Class
301 | Generic Implant Template Storage
302 | Generic Implant Template Information Model - FIND
303 | Generic Implant Template Information Model - MOVE
304 | Generic Implant Template Information Model - GET
305 | Implant Assembly Template Storage
306 | Implant Assembly Template Information Model - FIND
307 | Implant Assembly Template Information Model - MOVE
308 | Implant Assembly Template Information Model - GET
309 | Implant Template Group Storage
310 | Implant Template Group Information Model - FIND
311 | Implant Template Group Information Model - MOVE
312 | Implant Template Group Information Model - GET
313 | Native DICOM Model
314 | Abstract Multi-Dimensional Image Model
315 | dicomDeviceName
316 | dicomDescription
317 | dicomManufacturer
318 | dicomManufacturerModelName
319 | dicomSoftwareVersion
320 | dicomVendorData
321 | dicomAETitle
322 | dicomNetworkConnectionReference
323 | dicomApplicationCluster
324 | dicomAssociationInitiator
325 | dicomAssociationAcceptor
326 | dicomHostname
327 | dicomPort
328 | dicomSOPClass
329 | dicomTransferRole
330 | dicomTransferSyntax
331 | dicomPrimaryDeviceType
332 | dicomRelatedDeviceReference
333 | dicomPreferredCalledAETitle
334 | dicomTLSCyphersuite
335 | dicomAuthorizedNodeCertificateReference
336 | dicomThisNodeCertificateReference
337 | dicomInstalled
338 | dicomStationName
339 | dicomDeviceSerialNumber
340 | dicomInstitutionName
341 | dicomInstitutionAddress
342 | dicomInstitutionDepartmentName
343 | dicomIssuerOfPatientID
344 | dicomPreferredCallingAETitle
345 | dicomSupportedCharacterSet
346 | dicomConfigurationRoot
347 | dicomDevicesRoot
348 | dicomUniqueAETitlesRegistryRoot
349 | dicomDevice
350 | dicomNetworkAE
351 | dicomNetworkConnection
352 | dicomUniqueAETitle
353 | dicomTransferCapability
354 | Universal Coordinated Time
355 |
356 |
--------------------------------------------------------------------------------
/index.js:
--------------------------------------------------------------------------------
1 | "use strict";
2 |
3 | exports.uids = require('./lib/uids');
4 | exports.tags = require('./lib/tags');
5 | exports.vrs = require('./lib/vrs');
6 | exports.decoder = require('./lib/decoder');
7 | exports.json = require('./lib/json');
8 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "author": "Juergen Gmeiner",
3 | "name": "dicom",
4 | "description": "DICOM for node-js",
5 | "keywords": [
6 | "DICOM"
7 | ],
8 | "license": "MIT",
9 | "version": "0.4.4",
10 | "repository": {
11 | "type": "git",
12 | "url": "git@github.com:grmble/node-dicom.git"
13 | },
14 | "bin": {
15 | "find-tags": "./bin/find-tags.js",
16 | "find-uids": "./bin/find-uids.js"
17 | },
18 | "scripts": {
19 | "build": "coffee --compile --output lib/ src/",
20 | "prepublish": "coffee --compile --output lib/ src/",
21 | "test": "nodeunit test"
22 | },
23 | "engines": {
24 | "//": "node v0.10.31 is bad, seg faults. use 0.10.30 or 0.11",
25 | "node": ">0.10.0"
26 | },
27 | "dependencies": {
28 | "bunyan": "^1.0.1",
29 | "concat-stream": "^1.4.6",
30 | "iconv-lite": "^0.4.4",
31 | "minimist": "^1.1.0",
32 | "printf": "0.2.x"
33 | },
34 | "devDependencies": {
35 | "coffee-script": "^1.8.0",
36 | "grunt": "^0.4.5",
37 | "grunt-contrib-coffee": "^0.12.0",
38 | "grunt-contrib-nodeunit": "^0.4.1",
39 | "grunt-contrib-watch": "^0.6.1",
40 | "nodeunit": "0.9.x",
41 | "xml2js": "0.4.x"
42 | },
43 | "directories": {
44 | "lib": "./lib"
45 | }
46 | }
47 |
--------------------------------------------------------------------------------
/src/decoder.coffee:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env coffee
2 | #
3 |
4 | vrs = require("./vrs")
5 | readbuffer = require("./readbuffer")
6 | uids = require("../lib/uids")
7 | tags = require("../lib/tags")
8 |
9 | fs = require("fs")
10 | stream = require("stream")
11 | printf = require("printf")
12 |
13 | log = require("./logger")('decoder')
14 |
15 | ##
16 | #
17 | # Dicom Decoder
18 | #
19 | # Transforms IO events into DICOM parse events
20 | #
21 | # stream.Readable / stream.Writeable / Stream.Transform options are accepted
22 | #
23 | # also:
24 | # * streaming_value_length_minimum: minimum value length, longer values will be
25 | # streamed
26 | # * read_header: read preamble / dicom header, defaults to false.
27 | # Also implies reading metainfo.
28 | # * transfer_syntax: transfer syntax, defaults to ExplicitVRLittleEndian
29 | # * guess_header: will try to guess if preamble/dicom header are present
30 | ##
31 | class Decoder extends stream.Transform
32 | constructor: (options)->
33 | if not (this instanceof Decoder)
34 | return new Decoder(options)
35 | super(options)
36 | @streaming_value_length_minimum = options?.streaming_value_length_minimum
37 | @_writableState.objectMode = false
38 | @_readableState.objectMode = true
39 | @context = new vrs.ContextStack()
40 | ts_name = options?.transfer_syntax
41 | ts_name = 'ExplicitVRLittleEndian' if not ts_name
42 | ts = uids.for_uid(ts_name)
43 | @context.push(ts.make_context())
44 | @buffer = readbuffer()
45 | if options?.read_header
46 | log.debug "initial state: read_header"
47 | @state = @_decode_datafile
48 | else if options?.guess_header
49 | log.debug "initial state: guess_header"
50 | @state = @_guess_header
51 | else
52 | log.debug "initial state: decode_dataset"
53 | @state = @_decode_dataset
54 | log.debug({decoder: @log_summary()}, "decoder initialized")
55 |
56 | # log summary for bunyan
57 | log_summary: () ->
58 | summary =
59 | buffer: @buffer.log_summary()
60 | context: @context.log_summary()
61 |
62 | _transform: (chunk, encoding, cb) ->
63 | @buffer.push chunk
64 | log.debug({buffer: @buffer.log_summary()}, "_transform") if log.debug()
65 | @_action_wrapper(@state)
66 | log.debug({buffer: @buffer.log_summary()}, "_transform done, calling cb") if log.debug()
67 | cb()
68 |
69 | _flush: (cb) ->
70 | @_action_wrapper(@state)
71 | if @buffer.length == 0 and @context.stack_depth() == 1 and @saved.stream_position == @buffer.stream_position
72 | log.debug "_flush successful, all is well with our decode"
73 | cb()
74 | else
75 | log.debug({buffer: @buffer.length, context: @context.stack_depth(), saved: @saved.stream_position, position: @buffer.stream_position},
76 | "_flush: can not flush (length should be 0, stack depth 1)")
77 | @emit('error', new vrs.UnexpectedEofOfFile())
78 |
79 | _switch_state: (state, msg) ->
80 | if not state
81 | state = @_decode_dataset
82 | log.debug {state: state}, "switching state: #{msg} ==> #{state}"
83 | @state = state
84 |
85 | _metainfo_done_cb: () =>
86 | log.debug "metainfo callback, setting metainfo_done"
87 | @metainfo_done = true
88 | @_switch_state @_decode_dataset, "metainfo done, decoding dataset"
89 | if @metainfo_listener
90 | @removeListener 'data', @metainfo_listener
91 | @metainfo_listener = undefined
92 | log.debug "ts=#{@metainfo_ts}"
93 | ts = uids.for_uid(@metainfo_ts)
94 | log.debug {ts: ts}, "_decode_metainfo: switching transfer syntax"
95 | @context.replace_root(ts.make_context())
96 |
97 | _decode_metainfo: () =>
98 | if not @metainfo_ts
99 | @saved = @buffer.copy()
100 | start_pos = @buffer.stream_position
101 | @metainfo_done = false
102 |
103 | if not @metainfo_listener
104 | @metainfo_listener = (event) =>
105 | if event.element.tag == 0x00020010
106 | @metainfo_ts = event.vr.value()
107 | log.debug {ts: @metainfo_ts}, "metainfo transfer syntax found"
108 | @on 'data', @metainfo_listener
109 |
110 | while not @metainfo_done
111 | @_decode_metaelement()
112 |
113 | @_decode_dataset()
114 |
115 | _decode_dataset: () =>
116 | while true
117 | @_decode_dataelement()
118 | return undefined
119 |
120 | # decode metaelement - only for group 0002
121 | _decode_metaelement: () =>
122 | @saved = @buffer.copy()
123 | log.trace({buffer: @saved.log_summary()}, "_decode_metaelement: saved buffer state") if log.trace()
124 | element_position = @buffer.stream_position
125 | @context.handle_autopops(element_position)
126 | tag = (new vrs.AT(@context.top())).consume_value(@buffer)
127 | log.debug({tag: printf("%08x", tag)}, "decoded tag") if log.debug()
128 | tag_str = printf("%08X", tag)
129 | # end of metadata is indicated by tag not in group 0002
130 | group_str = tag_str.substr(0, 4)
131 | if group_str != "0002"
132 | log.debug({tag: tag_str}, "end of metainfo")
133 | @buffer = @saved
134 | return @_metainfo_done_cb()
135 | # comparing tags somehow does not work ...
136 | switch tag_str
137 | when tags.Item.mask
138 | @_handle_item(tag, element_position)
139 | when tags.ItemDelimitationItem.mask
140 | @_handle_itemdelimitation(tag, element_position)
141 | when tags.SequenceDelimitationItem.mask
142 | @_handle_sequencedelimitation(tag, element_position)
143 | else
144 | @_handle_element(tag, element_position)
145 |
146 | _decode_dataelement: () =>
147 | @saved = @buffer.copy()
148 | log.trace({buffer: @saved.log_summary()}, "_decode_dataelement: saved buffer state") if log.trace()
149 | element_position = @buffer.stream_position
150 | @context.handle_autopops(element_position)
151 | tag = (new vrs.AT(@context.top())).consume_value(@buffer)
152 | log.debug({tag: printf("%08x", tag)}, "decoded tag") if log.debug()
153 | tag_str = printf("%08X", tag)
154 | # comparing tags somehow does not work ...
155 | switch tag_str
156 | when tags.Item.mask
157 | @_handle_item(tag, element_position)
158 | when tags.ItemDelimitationItem.mask
159 | @_handle_itemdelimitation(tag, element_position)
160 | when tags.SequenceDelimitationItem.mask
161 | @_handle_sequencedelimitation(tag, element_position)
162 | else
163 | @_handle_element(tag, element_position)
164 |
165 | # wrap the action
166 | # this does the housekeeping like exception handling
167 | _action_wrapper: (func) ->
168 | try
169 | func()
170 | catch err
171 | if err?.doNotRestore
172 | log.debug "_action_wrapper: streaming NeedMoreInput - no need to restore"
173 | # @saved = @buffer ???
174 | else if err?.needMoreInput
175 | log.debug({buffer: @buffer.log_summary()}, "_action_wrapper: need to restore")
176 | @buffer = @saved
177 | log.debug({needMoreInput: err.needMoreInput, buffer: @buffer.log_summary(), error: err},
178 | "_action_wrapper: restored buffer after NeedMoreInput")
179 | else
180 | log.error {error: err}, "_action_wrapper: emitting error"
181 | @emit 'error', err
182 |
183 | ##
184 | # try to guess the format
185 | # DICM at offset 128+ => header present
186 | # file starts with: 0800 0500 4353 ==> SpecificCharacterSet, ExplicitVRLittleEndian
187 | # file starts with: 0800 0500 ==> SpecificCharacterSet, ImplicitVRLittleEndian
188 | _guess_header: () =>
189 | @saved = @buffer.copy()
190 | header = @buffer.easy_consume(132)
191 | if header.length == 132 and header.toString("binary", 128, 132) == 'DICM'
192 | log.debug "_guess_header: dicom header present, reading dicom datafile"
193 | @buffer = @saved
194 | return @_decode_datafile()
195 | if header.length >= 6 and header.slice(0, 6).equals(new Buffer([0x08, 0x00, 0x05, 0x00, 0x43, 0x53]))
196 | log.debug "_guess_header: start with specific character set, ExplicitVRLittleEndian"
197 | @buffer = @saved
198 | return @_decode_dataset()
199 | if header.length >= 4 and header.slice(0, 4).equals(new Buffer([0x08, 0x00, 0x05, 0x00]))
200 | log.debug "_guess_header: start with specific character set, ImplicitVRLittleEndian"
201 | ts = uids.for_uid('ImplicitVRLittleEndian')
202 | @context.replace_root(ts.make_context())
203 | @buffer = @saved
204 | return @_decode_dataset()
205 | throw new vrs.DicomError("Unable to guess DICOM encoding")
206 |
207 | _decode_datafile: () =>
208 | @_switch_state @_decode_datafile, "decoding preamble/header"
209 | @saved = @buffer.copy()
210 | header = @buffer.consume(132)
211 | if header.toString("binary", 128, 132) != 'DICM'
212 | throw new vrs.DicomError("No DICOM header found")
213 | @_switch_state @_decode_metainfo, "header decoded, decoding metainfo now"
214 | @_decode_metainfo()
215 |
216 | _consume_std_value_length: () =>
217 | length_element = new vrs.UL(@context.top_little_endian())
218 | return length_element.consume_value(@buffer)
219 |
220 | _handle_element: (tag, start_position) ->
221 | is_explicit = @context.top().explicit
222 | tagdef = tags.for_tag(tag)
223 | if not is_explicit
224 | vrstr = tagdef.vr
225 | else
226 | vrstr = @buffer.consume(2).toString('binary')
227 | log.debug({vr: vrstr}, "_handle_element") if log.debug()
228 | vr = vrs.for_name(vrstr, @context.top())
229 | vr.consume_and_emit(tagdef, @buffer, this, start_position)
230 |
231 | _handle_item: (tag, start_pos) ->
232 | # item is always in standard ts
233 | value_length = @_consume_std_value_length()
234 | element = tags.for_tag(tag)
235 | if @context.top().encapsulated
236 | # we are in encapsulated OB ... just stream the content out
237 | bd_offset = @buffer.stream_position
238 | bd_length = value_length
239 | obj = new vrs.DicomEvent(element, null, start_pos, "start_item", null, bd_offset, bd_length)
240 | @log_and_push obj
241 | _obj = new vrs.DicomEvent(element, null, start_pos, "end_item", null, bd_offset, bd_length)
242 | @_stream_bytes(value_length, _obj)
243 | return undefined # no emit by main loop, thank you
244 | else
245 | end_position = undefined
246 | if value_length != vrs.UNDEFINED_LENGTH
247 | end_position = @buffer.stream_position + value_length
248 | end_cb = () =>
249 | _obj = new vrs.DicomEvent(element, null, start_pos, "end_item")
250 | @log_and_push _obj
251 | @context.push({}, end_position, end_cb)
252 | obj = new vrs.DicomEvent(element, null, start_pos, "start_item")
253 | @log_and_push obj
254 |
255 | _handle_itemdelimitation: (tag, start_position) ->
256 | # always standard ts
257 | value_length = @_consume_std_value_length()
258 | obj = new vrs.DicomEvent(tags.for_tag(tag), null, start_position, 'end_item')
259 | @context.pop()
260 | @log_and_push obj
261 |
262 | _handle_sequencedelimitation: (tag, start_position) ->
263 | # always standard ts
264 | value_length = @_consume_std_value_length()
265 | command = 'end_sequence'
266 | popped = @context.pop()
267 | if popped?.encapsulated and not @context.top().encapsulated
268 | # we were inside encapsulated pixeldata - SequenceDelimitationItem
269 | # ends the pixeldata element, not some sequence
270 | command = 'end_element'
271 | obj = new vrs.DicomEvent(tags.for_tag(tag), null, start_position, command)
272 | @log_and_push obj
273 |
274 | # stream x bytes out
275 | # this switches states to itself in case the buffer
276 | # runs short (very likely with streaming).
277 | # once all bytes have been consumed/emitted,
278 | # emitObj will be emitted (if any).
279 | # Finally the state will be switched to nextState.
280 | # nextState defaults to _decode_dataset
281 | _stream_bytes: (bytes, emitObj, nextState) ->
282 | log.debug "_stream_bytes: arranging to stream #{bytes}"
283 | streamer = new ByteStreamer({bytes: bytes, emitObj:emitObj, nextState: nextState, buffer: @buffer, decoder: this})
284 | @_switch_state streamer.stream_bytes, "byte_streamer_state"
285 | streamer.stream_bytes()
286 |
287 | log_and_push: (obj) ->
288 | log.debug({event: obj.log_summary?()}, "Decoder: emitting dicom event") if log.debug()
289 | @push obj
290 |
291 | class ByteStreamer
292 | constructor: (options) ->
293 | {@bytes,@emitObj,@nextState,@buffer,@decoder} = options
294 | stream_bytes: () =>
295 | while @bytes > 0
296 | buff = @buffer.easy_consume(@bytes)
297 | @bytes -= buff.length
298 | obj = new vrs.DicomEvent(undefined, undefined, undefined, undefined, buff)
299 | @decoder.log_and_push obj
300 | if @emitObj?
301 | @decoder.log_and_push @emitObj
302 | if not @nextState?
303 | @nextState = @decoder.decode_dataset
304 | @decoder._switch_state(@nextState, "stream_bytes nextState")
305 |
306 | module.exports = Decoder
307 |
308 | if require.main is module
309 | fs.createReadStream process.argv[2] #, {highWaterMark: 32}
310 | .pipe new Decoder {guess_header: true}
311 |
--------------------------------------------------------------------------------
/src/encoder.coffee:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env coffee
2 | #
3 |
4 | vrs = require("../lib/vrs")
5 | uids = require("../lib/uids")
6 | tags = require("../lib/tags")
7 | stream = require("stream")
8 | printf = require("printf")
9 |
10 | log = require("./logger")('encoder')
11 |
12 | ##
13 | #
14 | # Dicom Encoder
15 | #
16 | # Transforms DICOM Events into IO events
17 | #
18 | # stream.Readable / stream.Writeable / Stream.Transform options are accepted
19 | #
20 | # also:
21 | # * transfer_syntax: transfer syntax, defaults to ExplicitVRLittleEndian
22 | ##
23 | class Encoder extends stream.Transform
24 | constructor: (options)->
25 | if not (this instanceof Encoder)
26 | return new Encoder(options)
27 | super(options)
28 | @_writableState.objectMode = true
29 | @_readableState.objectMode = false
30 | @context = new vrs.ContextStack()
31 | ts_name = options?.transfer_syntax
32 | ts_name = 'ExplicitVRLittleEndian' if not ts_name
33 | ts = uids.for_uid(ts_name)
34 | @context.push(ts.make_context())
35 | log.debug({encoder: @log_summary()}, "encoder initialized")
36 |
37 | _transform: (obj, encoding, cb) ->
38 | try
39 | log.trace(obj?.log_summary?(), "Encoder _transform") if log.trace()
40 | switch obj.command
41 | when 'element'
42 | obj.vr._encode_and_emit(obj.element, this)
43 | when 'start_sequence'
44 | obj.vr._encode_and_emit_seq(obj.element, this)
45 | when 'end_sequence'
46 | @_emit_std_tag_and_value_length(tags.SequenceDelimitationItem.tag, 0)
47 | when 'start_item'
48 | @_emit_std_tag_and_value_length(tags.Item.tag, vrs.UNDEFINED_LENGTH)
49 | when 'end_item'
50 | @_emit_std_tag_and_value_length(tags.ItemDelimitationItem.tag, 0)
51 | when 'start_element'
52 | @_handle_start_element(obj)
53 | when 'end_element'
54 | @_handle_end_element(obj)
55 | else
56 | @_handle_raw(obj)
57 | return cb()
58 | catch err
59 | @emit 'error', err
60 |
61 | # emit item, item_delimitation and sequence delimitation
62 | # these are always in implicitvrle
63 | _emit_std_tag_and_value_length: (tag, value_length) ->
64 | tag = new vrs.AT(@context.top(), null, [tag])
65 | log.trace({tag: tag, value_length: value_length}, "_emit_std_element_and_value_length")
66 | @push(tag.buffer)
67 | ul = new vrs.UL(@context.top_little_endian(), null, [value_length])
68 | @push(ul.buffer)
69 |
70 |
71 |
72 | # log summary for bunyan
73 | log_summary: () ->
74 | summary =
75 | context: @context.log_summary()
76 |
77 | module.exports = Encoder
78 |
79 | err_cb = (err) ->
80 | console.error "Error:", err
81 | console.error "Stack trace:", err.stack
82 | process.exit 10
83 |
84 | if require.main is module
85 | fs = require "fs"
86 | sink = require "./json/sink"
87 | source = require "./json/source"
88 | fs.createReadStream process.argv[2]
89 | .pipe sink (err, data) ->
90 | return err_cb(err) if err
91 | log.trace {json: data}, "Processing JSON:"
92 | source data
93 | .on 'error', err_cb
94 | .pipe new Encoder()
95 | .on 'error', err_cb
96 | .pipe fs.createWriteStream process.argv[3] || "/tmp/x.x"
97 | .on 'error', err_cb
98 |
99 |
100 |
101 |
--------------------------------------------------------------------------------
/src/json.coffee:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env coffee
2 |
3 | ##
4 | #
5 | # DICOM Json Model
6 | #
7 | # http://medical.nema.org/dicom/2013/output/chtml/part18/sect_F.2.html
8 | #
9 | ##
10 |
11 | fs = require("fs")
12 | stream = require("stream")
13 | zlib = require("zlib")
14 |
15 | printf = require("printf")
16 | ConcatStream = require("concat-stream")
17 | minimist = require("minimist")
18 |
19 | tags = require("../lib/tags")
20 | decoder = require("../lib/decoder")
21 | log = require("./logger")("json")
22 |
23 | JsonEncoder = require("./json/encoder")
24 | JsonSink = require("./json/sink")
25 | JsonSource = require("./json/source")
26 |
27 |
28 | # remain compatible with old, all-in-one json.coffee
29 | _COMPATIBILITY = true
30 | if _COMPATIBILITY
31 | exports.JsonEncoder = JsonEncoder
32 | exports.JsonSink = JsonSink
33 | exports.JsonSource = JsonSource
34 |
35 | # helper functions
36 | # path elements may be anything that can be
37 | # tags.for_tag-ed except NUBMERS - they
38 | # represent sequence item access
39 | get_element = (json, path...) ->
40 | lookup = []
41 | must_pop = false
42 | for p in path
43 | if (typeof p) == 'number'
44 | lookup.push p
45 | must_pop = false
46 | else
47 | lookup.push tags.for_tag(p).tag_str
48 | lookup.push "Value"
49 | must_pop = true
50 | if must_pop
51 | lookup.pop()
52 | result = json
53 | for x in lookup
54 | result = result?[x]
55 | return result
56 |
57 | get_values = (json, path...) ->
58 | return get_element(json, path...)?.Value
59 |
60 | get_value = (json, path...) ->
61 | return get_values(json, path...)?[0]
62 |
63 | get_vr = (json, path...) ->
64 | return get_element(json, path...)?.vr
65 |
66 | _get_filename = (obj_or_fn) ->
67 | if typeof(obj_or_fn) == 'string'
68 | obj_or_fn
69 | else
70 | obj_or_fn.filename
71 |
72 | _get_bulkdata_uri = (obj_or_fn) ->
73 | if typeof(obj_or_fn) == 'string'
74 | obj_or_fn
75 | else
76 | obj_or_fn.bulkdata_uri ? obj_or_fn.filename
77 |
78 | file2jsonstream = (fn, cb) ->
79 | fs.createReadStream _get_filename(fn)
80 | .on 'error', cb
81 | .pipe decoder {guess_header: true}
82 | .on 'error', cb
83 | .pipe new JsonEncoder({bulkdata_uri: _get_bulkdata_uri(fn)})
84 | .on 'error', cb
85 |
86 | file2json = (fn, cb) ->
87 | file2jsonstream(fn, cb)
88 | .pipe new JsonSink(cb)
89 | .on 'error', cb
90 |
91 | # cb is called for errors
92 | gunzip2jsonstream = (fn, cb) ->
93 | fs.createReadStream _get_filename(fn)
94 | .on 'error', cb
95 | .pipe zlib.createGunzip()
96 | .on 'error', cb
97 | .pipe decoder {guess_header: true}
98 | .on 'error', cb
99 | .pipe new JsonEncoder({bulkdata_uri: _get_bulkdata_uri(fn)})
100 | .on 'error', cb
101 |
102 | gunzip2json = (fn, cb) ->
103 | gunzip2jsonstream(fn, cb)
104 | .pipe new JsonSink(cb)
105 | .on 'error', cb
106 |
107 |
108 | # make a decoder piping into json sink
109 | # errors are correctly chained,
110 | # returns the DECODER
111 | # options: transfer_syntax (for decoder), bulkdata_uri for encoder
112 | decoder2json = (opts, cb) ->
113 | _dec = new decoder(opts)
114 | _dec.on 'error', cb
115 | .pipe new JsonEncoder(opts)
116 | .on 'error', cb
117 | .pipe new JsonSink(cb)
118 | .on 'error', cb
119 | return _dec
120 |
121 | exports.get_element = get_element
122 | exports.get_values = get_values
123 | exports.get_value = get_value
124 | exports.get_vr = get_vr
125 | exports.file2jsonstream = file2jsonstream
126 | exports.gunzip2jsonstream = gunzip2jsonstream
127 | exports.file2json = file2json
128 | exports.gunzip2json = gunzip2json
129 | exports.decoder2json = decoder2json
130 |
131 |
132 | _err_cb = (err) ->
133 | console.error "Error:", err.stack
134 | process.exit 1
135 |
136 | if require.main is module
137 | options = minimist(process.argv.slice(2),
138 | {boolean: ['gunzip', 'emit'], alias: {z: 'gunzip', 'e': 'emit'}})
139 | filename = options._[0]
140 | if options.gunzip
141 | input = gunzip2jsonstream(filename, _err_cb)
142 | else
143 | input = file2jsonstream(filename, _err_cb)
144 |
145 | if options.emit
146 | sink = new JsonSink (err, data) ->
147 | throw err if err
148 | log.info "setting up json source"
149 | source = new JsonSource(data)
150 | source.pipe process.stdout
151 | input.pipe sink
152 | else
153 | input.pipe process.stdout
154 |
155 |
--------------------------------------------------------------------------------
/src/json/encoder.coffee:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env coffee
2 |
3 | stream = require("stream")
4 | printf = require("printf")
5 |
6 | tags = require("../../lib/tags")
7 | log = require("../logger")("json", "encoder")
8 |
9 | ##
10 | # JsonEncoder
11 | #
12 | # takes a stream of DicomEvents and produces
13 | # JSON.
14 | #
15 | # * bulkdata_uri: uri for emitting bulkdata - ?offset=x&length=y will be appended
16 | ##
17 | class JsonEncoder extends stream.Transform
18 | constructor: (options)->
19 | if not (this instanceof JsonEncoder)
20 | return new JsonEncoder(options)
21 | super(options)
22 | @_bulkdata_uri = options?.bulkdata_uri
23 | @_writableState.objectMode = true
24 | @_readableState.objectMode = false
25 | @depth = 0
26 | @fresh = true
27 | @ignore = 0
28 |
29 | _transform: (event, encoding, cb) ->
30 | try
31 | log.debug({command: event.command, element: event.element?.name, depth: @depth},
32 | "Json:_transform received dicom event") if log.debug()
33 | command = event.command
34 | switch command
35 | when 'element' then @handle_element(event)
36 | when 'start_sequence' then @start_sequence(event)
37 | when 'end_sequence' then @end_sequence(event)
38 | when 'start_item' then @start_item(event)
39 | when 'end_item' then @end_item(event)
40 | when 'start_element' then @start_element(event)
41 | when 'end_element' then @end_element(event)
42 | else
43 | log.trace({command: command}, "_transform: ignoring") if log.trace()
44 | cb(null)
45 | catch err
46 | log.error err
47 | cb(err)
48 |
49 | _flush: (cb) ->
50 | @push "}\n"
51 | cb(null)
52 |
53 |
54 | handle_element: (event) ->
55 | return if @ignore
56 | key = printf '"%08X"', event.element.tag
57 | key = printf "%*s", key, key.length + @depth
58 | obj = {vr: event.vr.name}
59 | if event.vr.base64_values
60 | obj.InlineBinary = event.vr.values()
61 | else
62 | obj.Value = event.vr.values()
63 | start = ',\n'
64 | if @fresh
65 | start = '{\n'
66 | @fresh = false
67 | @push printf('%s%s: %s', start, key, JSON.stringify(obj))
68 |
69 | start_sequence: (event) ->
70 | return if @ignore
71 | key = printf '"%08X"', event.element.tag
72 | key = printf "%*s", key, key.length + @depth
73 | start = ',\n'
74 | if @fresh
75 | start = '{\n'
76 | @push printf('%s%s: {"vr":"SQ", "Value": [', start, key)
77 | @fresh = true
78 | @depth++
79 |
80 | end_sequence: (event) ->
81 | return if @ignore
82 | @fresh = false
83 | @push ']}'
84 | @depth--
85 |
86 | start_item: (event) ->
87 | return if @ignore
88 | if not @fresh
89 | @push ","
90 | @fresh = true
91 | if event.bulkdata_offset and event.bulkdata_length
92 | # encapsulated pixeldata
93 | bd_uri = @_bulkdata_uri + "?offset=" + event.bulkdata_offset + "&length=" + event.bulkdata_length
94 | @push printf('{"BulkDataURI":%s', JSON.stringify(bd_uri))
95 | @fresh = false
96 |
97 | end_item: (event) ->
98 | return if @ignore
99 | if @fresh
100 | @push "{}"
101 | else
102 | @push "}"
103 | @fresh = false
104 |
105 | # ignore everything inside start_element / end_element
106 | start_element: (event) ->
107 | if @_bulkdata_uri
108 | key = printf '"%08X"', event.element.tag
109 | key = printf "%*s", key, key.length + @depth
110 | start = ',\n'
111 | if @fresh
112 | start = '{\n'
113 | if event.bulkdata_offset and event.bulkdata_length
114 | bd_uri = @_bulkdata_uri + "?offset=" + event.bulkdata_offset + "&length=" + event.bulkdata_length
115 | @push printf('%s%s: {"vr":"%s","BulkDataURI":%s', start, key, event.vr.name, JSON.stringify(bd_uri))
116 | else
117 | @push printf('%s%s: {"vr":"%s","DataFragment": [', start, key, event.vr.name)
118 | @fresh = true
119 | @depth++
120 | else
121 | @ignore++
122 |
123 | end_element: (event) ->
124 | if @ignore
125 | @ignore--
126 | return
127 | if @_bulkdata_uri
128 | @fresh = false
129 | if event.bulkdata_offset and event.bulkdata_length
130 | @push '}'
131 | else
132 | @push ']}'
133 | @depth--
134 |
135 | module.exports = JsonEncoder
136 |
--------------------------------------------------------------------------------
/src/json/sink.coffee:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env coffee
2 |
3 | ConcatStream = require("concat-stream")
4 | log = require("../logger")("json", "sink")
5 |
6 | ##
7 | #
8 | # Calls cb with JSON or error
9 | ##
10 | class JsonSink extends ConcatStream
11 | constructor: (cb) ->
12 | if not (this instanceof JsonSink)
13 | return new JsonSink(cb)
14 | super {}, (json_string) ->
15 | try
16 | json = JSON.parse(json_string)
17 | cb null, json
18 | catch err
19 | cb(err)
20 | undefined
21 | @on 'error', (err) ->
22 | log.debug {error: err}, "JsonSink: on error ... calling cb"
23 | cb(err)
24 |
25 | module.exports = JsonSink
26 |
--------------------------------------------------------------------------------
/src/json/source.coffee:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env coffee
2 |
3 | stream = require("stream")
4 | util = require("util")
5 | tags = require("../../lib/tags")
6 | uids = require("../../lib/uids")
7 | vrs = require("../../lib/vrs")
8 | log = require("../logger")("json", "source")
9 |
10 | ##
11 | # store queue of keys and their data
12 | # _end is for emitting of end_item
13 | ##
14 | class ItemEntry
15 | constructor: (data, @_end_event)->
16 | @_data = {}
17 | for k,v of data
18 | @_data[tags.for_tag(k).tag_str] = v
19 | @_queue = (k for k of @_data)
20 | @_queue.sort()
21 | @_queue.reverse()
22 | # ItemEntry unshifts strings containing 8-digit hex tag
23 | unshift: () ->
24 | return @_queue.pop()
25 | end_event: () ->
26 | return @_end_event
27 | data: (k) ->
28 | v = @_data[k]
29 | el = tags.for_tag(k)
30 | if util.isArray(v)
31 | v = {vr: el.vr, Value: v}
32 | else if typeof(v) in ['string', 'number']
33 | v = {vr: el.vr, Value: [v]}
34 | else if typeof(v) == 'object'
35 | if v.BulkDataURI
36 | throw new vrs.DicomError("can not emit json model with bulkdata uri: " + v)
37 | if not v.vr?
38 | v.vr = el.vr
39 | else
40 | throw new vrs.DicomError("can not recognize dicom json model: " + v)
41 | return [el, v]
42 |
43 | ## store queue of items
44 | class SeqEntry
45 | constructor: (items) ->
46 | @_queue = (x for x in items)
47 | @_queue.reverse()
48 | log.trace({length: @_queue.length}, "SeqEntry")
49 | # SeqEntry unshifts json model, which pull be pushed
50 | # on the stack in an ItemEntry
51 | unshift: () ->
52 | return @_queue.pop()
53 | end_event: () ->
54 | return new vrs.DicomEvent(tags.SequenceDelimitationItem, null, null, "end_sequence")
55 |
56 |
57 | ##
58 | # stack structure for traversal
59 | ##
60 | class EmitStack
61 | constructor: () ->
62 | @_len_1 = -1
63 | @_stack = []
64 | push: (data, end_event) ->
65 | @_len_1++
66 | @_stack.push(new ItemEntry(data, end_event))
67 | push_seq: (items) ->
68 | @_len_1++
69 | @_stack.push(new SeqEntry(items))
70 | pop: () ->
71 | @_len_1--
72 | return @_stack.pop()
73 | top: () ->
74 | return @_stack[@_len_1]
75 | unshift: () ->
76 | return @top().unshift()
77 | data: (k) ->
78 | return @top().data(k)
79 | eof: () ->
80 | return @_len_1 == -1
81 |
82 |
83 | ##
84 | #
85 | # Emits DicomEvents for JSON Model
86 | #
87 | # valid options: all stream.readable options
88 | # * transfer_syntax: transfer syntax, defaults to ExplicitVRLittleEndian
89 | ##
90 | class JsonSource extends stream.Readable
91 | constructor: (data, options) ->
92 | if not (this instanceof JsonSource)
93 | return new JsonSource(data, options)
94 | if not options?
95 | options = {}
96 | options.objectMode = true
97 | super(options)
98 | @_stack = new EmitStack()
99 | @_stack.push(data, null)
100 | ts_name = options?.transfer_syntax
101 | ts_name = 'ExplicitVRLittleEndian' if not ts_name
102 | ts = uids.for_uid(ts_name)
103 | @_context = new vrs.Context({}, ts.make_context())
104 | log.trace({context: @_context}, "JsonSource context")
105 | _read: (size) ->
106 | try
107 | log.trace({size: size}, "JsonSource _read")
108 | read_more = true
109 | while read_more
110 | if @_stack.eof()
111 | log.trace "_stack eof: we are done"
112 | @push(null)
113 | read_more = false
114 | return
115 | else
116 | k = @_stack.unshift()
117 | if k?
118 | if typeof(k) == 'string'
119 | [el, v] = @_stack.data(k)
120 | obj = @_dicom_event(el, v)
121 | log.trace obj.log_summary?(), "emitting"
122 | read_more = @push(obj)
123 | if obj.command == 'start_sequence'
124 | log.trace v, "pushing sequence items"
125 | @_stack.push_seq(v.Value)
126 | else
127 | # emitting an item in a sequence
128 | @_stack.push(k, new vrs.DicomEvent(tags.ItemDelimitationItem, null, null, "end_item"))
129 | obj = new vrs.DicomEvent(tags.Item, null, null, "start_item")
130 | log.trace obj.log_summary?(), "emitting start item"
131 | read_more = @push(obj)
132 | else
133 | entry = @_stack.pop()
134 | obj = entry.end_event()
135 | if obj
136 | log.trace obj.log_summary?(), "emitstack end event"
137 | read_more = @push(obj)
138 | return undefined
139 | catch err
140 | @emit 'error', err
141 |
142 | _dicom_event: (el, v) ->
143 | if @_is_seq_value(v)
144 | if (v.vr == 'UN' || v.vr =='SQ')
145 | return new vrs.DicomEvent(el, vrs.for_name(v.vr, @_context), null, "start_sequence")
146 | else
147 | throw new vrs.DicomError("can not emit sequence values for vr " + v.vr + " tag " + el.tag_str)
148 | else
149 | return new vrs.DicomEvent(el, vrs.for_name(v.vr, @_context, null, v.Value), null, "element")
150 |
151 | _is_seq_value: (v) ->
152 | is_seq = true
153 | for _v in v.Value
154 | if !((typeof(_v)=='object') && !util.isArray(_v))
155 | is_seq = false
156 | return is_seq
157 |
158 |
159 | module.exports = JsonSource
160 |
161 |
162 | if require.main is module
163 | tags = require "../../lib/tags"
164 | data = {
165 | "00100020": {vr: "LO", Value: ["007"]},
166 | "00100021": {vr: "LO", Value: ["MI6"]},
167 | "00101002": {vr: "SQ", Value: [{
168 | "00100020": {vr: "LO", Value: ["0815"]},
169 | "00100021": {vr: "LO", Value: ["BND"]}}
170 | ]}}
171 | simple_data = {
172 | "PatientID": "007",
173 | "IssuerOfPatientID": "MI6",
174 | "OtherPatientIDsSequence": [{
175 | "PatientID": "0815",
176 | "IssuerOfPatientID": "BND"}]}
177 | source = new JsonSource(data)
178 | source.pipe process.stdout
179 |
--------------------------------------------------------------------------------
/src/logger.coffee:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env coffee
2 | #
3 |
4 | ##
5 | #
6 | # configurable logging
7 | #
8 | #
9 | ##
10 |
11 | bunyan = require "bunyan"
12 |
13 | ##
14 | #
15 | # Prefix for ENV var based configuration
16 | #
17 | ##
18 | ENV_PREFIX = "NDCM"
19 | DEFAULTS =
20 | 'LEVEL': 'info'
21 | STREAM_DICT =
22 | 'process.stdout': process.stdout,
23 | 'process.stderr': process.stderr
24 |
25 | _env = (what, name) ->
26 | _what = what.toUpperCase()
27 | _name = name.toUpperCase()
28 | return (process.env["#{ENV_PREFIX}_#{_what}_#{_name}"] ?
29 | process.env["#{ENV_PREFIX}_#{_what}"] ?
30 | DEFAULTS[_what])
31 |
32 | logger = (names...) ->
33 | name = names.join("_")
34 | obj =
35 | 'name': name,
36 | 'level': _env('level', name)
37 | stream = _env('stream', name)
38 | if stream and STREAM_DICT[stream]
39 | obj.stream = STREAM_DICT[stream]
40 | path = _env('path', name)
41 | if path
42 | obj.path = path
43 | if not path? and not stream?
44 | obj.stream = process.stderr
45 | return bunyan.createLogger(obj)
46 |
47 | module.exports = logger
48 |
--------------------------------------------------------------------------------
/src/pdu.coffee:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env coffee
2 |
3 | stream = require("stream")
4 | printf = require("printf")
5 | readbuffer = require("./readbuffer")
6 | vrs = require("../lib/vrs")
7 |
8 | log = require("./logger")('pdu')
9 |
10 | # helper: total length for array of buffers
11 | total_length = (arr) ->
12 | arr.reduce((sum, buff) ->
13 | return sum + buff.length
14 | , 0)
15 |
16 | ##
17 | # PDUDecoder
18 | #
19 | # Tranform-Stream reading from a socket
20 | # and emitting Dicom PDU events
21 | ##
22 | class PDUDecoder extends stream.Transform
23 | constructor: (options)->
24 | if not (this instanceof PDUDecoder)
25 | return new PDUDecoder(options)
26 | super(options)
27 | @_writableState.objectMode = false
28 | @_readableState.objectMode = true
29 | @__buffer = readbuffer()
30 | @__saved = @__buffer.copy()
31 |
32 | _transform: (chunk, encoding, cb) ->
33 | @__buffer.push chunk
34 | log.debug({buffer: @__buffer.log_summary()}, "_transform") if log.debug()
35 | @__consume_pdu()
36 | log.debug({buffer: @__buffer.log_summary()}, "_transform done, calling cb") if log.debug()
37 | cb()
38 |
39 | _flush: (cb) ->
40 | log.debug("_flush")
41 | cb()
42 |
43 | __consume_pdu: () ->
44 | try
45 | @__saved = @__buffer.copy()
46 | __header = @__buffer.consume(6)
47 | __type = __header[0]
48 | __length = __header.readUInt32BE(2)
49 | __pdubuff = @__buffer.consume(__length)
50 | __constr = pdu_constructor(__type)
51 | if not __constr?
52 | throw new vrs.DicomError("Unrecognized PDU: #{__type}")
53 | __pdu = new __constr(__pdubuff)
54 | log.trace({pdu: __pdu}, "__consume_pdu")
55 | # log.trace({header: __header, pdubuff: __pdubuff}, "pdu buffers")
56 | @push(__pdu)
57 | catch err
58 | if err?.needMoreInput
59 | @__buffer = @__saved
60 | log.debug({needMoreInput: err.needMoreInput, buffer: @__buffer.log_summary(), error: err},
61 | "_action_wrapper: restored buffer after NeedMoreInput")
62 | else
63 | log.error({error: err}, "__consume_pdu: error")
64 | @emit 'error', err
65 |
66 |
67 | class PDU
68 | _json_name: true
69 | _single_value: false
70 |
71 | constructor: (buff_or_json) ->
72 | if buff_or_json instanceof Buffer
73 | @_buff = buff_or_json
74 | @decode()
75 | delete @_buff
76 | else
77 | @from_json buff_or_json
78 | return this
79 |
80 | decode_var_items: (start, end) ->
81 | log.trace({start: start, end: end}, "PDU.decode_var_items") if log.trace()
82 | while start < end
83 | log.trace({start: start, end: end}, "PDU.decode_var_items") if log.trace()
84 | _item = @decode_item(start)
85 | _name = _item.name
86 | _cnt = @var_item_counts[_name]
87 | if _cnt is 1
88 | if this[_name]
89 | throw new vrs.DicomError("Only one #{_name} allowed")
90 | else
91 | this[_name] = _item
92 | else
93 | if not this[_name]?
94 | this[_name] = []
95 | this[_name].push _item
96 | start = _item._end
97 | return undefined
98 |
99 | decode_item: (start) ->
100 | _type = @_buff[start]
101 | _length = @_buff.readUInt16BE(start + 2)
102 | _constr = item_constructor(_type)
103 | if not _constr?
104 | log.warn({type: printf("%02X", _type), start: start, length: _length}, "PDU item not implemented")
105 | return {type: _type, name: 'unknown', _start: start, _end: start + 4 + _length}
106 | else
107 | _item = new _constr(@_buff, start, start + 4 + _length)
108 | log.trace(_item.log_summary(), "decoded item") if log.trace()
109 | return _item
110 |
111 | log_summary: () ->
112 | _summary = {}
113 | for k,v of this
114 | if k != '_buff'
115 | if v?.log_summary?
116 | v = v.log_summary()
117 | _summary[k] = v
118 | return _summary
119 |
120 | to_json: () ->
121 | if @value?
122 | return @value
123 | _json = if @_json_name then {name: @name} else {}
124 | _item_value = (item) ->
125 | if item?.to_json?
126 | item = item.to_json()
127 | return item
128 | for _k,_v of @var_item_counts
129 | if _v == 1
130 | _item = this[_k]
131 | _json[_k] = _item_value(_item)
132 | else if this[_k]?
133 | _json[_k] = for _item in this[_k]
134 | _item_value(_item)
135 | return _json
136 |
137 | from_json: (json) ->
138 | if @_single_value
139 | @value = json
140 | return
141 | for _k, _v of @var_item_counts
142 | _constr = ITEM_BY_NAME[_k]
143 | if not _constr?
144 | throw new vrs.DicomError "no such item: #{_k}"
145 | log.trace {name: _k, count: _v, constr: _constr}, "named item"
146 | if _v == 1
147 | this[_k] = new _constr(json[_k])
148 | else
149 | if json[_k]
150 | this[_k] = for _x in json[_k]
151 | new _constr(_x)
152 | log.trace {json: json[_k], result: this[_k]}, "from json result"
153 | return
154 |
155 | class PDUAssociateRq extends PDU
156 | type: 0x01
157 | name: 'association_rq'
158 | var_item_counts:
159 | application_context: 1
160 | presentation_context: -1
161 | user_information: 1
162 |
163 | decode: () ->
164 | @_protocol_version = @_buff.readUInt16BE(0)
165 | @called_aet_title = @_buff.slice(4, 20).toString().trim()
166 | @calling_aet_title = @_buff.slice(20, 36).toString().trim()
167 | @decode_var_items(68, @_buff.length)
168 |
169 | encode: () ->
170 | _buffers = ['']
171 | _buffers.push @application_context.encode()
172 | for item in @presentation_context
173 | _buffers.push item.encode()
174 | _buffers.push @user_information.encode()
175 | # _buffers.push @user_information.encode()
176 | _var_len = total_length(_buffers)
177 | _header = Buffer.concat([new Buffer([0x01, 0x00]), mk_uint32(68 + _var_len),
178 | # protocol version
179 | mk_uint16(1),
180 | # reserved
181 | new Buffer([0x00, 0x00]),
182 | # called & calling aet title
183 | new Buffer(printf("%-16s%-16s", @called_aet_title.substr(0,16),
184 | @calling_aet_title.substr(0,16)), 'binary'),
185 | # 32 reserved bytes
186 | ZERO_BUFF.slice(0, 32) ])
187 | _buffers[0] = _header
188 | return Buffer.concat(_buffers)
189 |
190 | to_json: () ->
191 | _json = super()
192 | _json.called_aet_title = @called_aet_title
193 | _json.calling_aet_title = @calling_aet_title
194 | return _json
195 |
196 | from_json: (json) ->
197 | @called_aet_title = json.called_aet_title
198 | @calling_aet_title = json.calling_aet_title
199 | return super(json)
200 |
201 | class PDUAssociateAc extends PDUAssociateRq
202 |
203 | class Item extends PDU
204 | _json_name: false
205 |
206 | constructor: (buff_or_json, start, end) ->
207 | if buff_or_json instanceof Buffer
208 | @_start = start
209 | @_end = end
210 | super(buff_or_json)
211 |
212 | ui_str: (offset) ->
213 | _start = @_start + offset
214 | _str = @_buff.toString('binary', _start, @_end)
215 | _ui = trim_ui(_str)
216 | # log.trace(start: _start, end: @_end, str: _str, ui:_ui, length: @_buff.length, buff: @_buff.slice(_start, @_end), "ui_str")
217 | return _ui
218 |
219 | encode_value_str: () ->
220 | # log.trace(length: @value.length, value: @value, "encode_value_str")
221 | return Buffer.concat [new Buffer([@type, 0]), mk_uint16(@value.length), new Buffer(@value, 'binary')]
222 |
223 | class ApplicationContextItem extends Item
224 | type: 0x10
225 | name: 'application_context'
226 | _single_value: true
227 | decode: () ->
228 | @value = @ui_str(4)
229 | encode: () ->
230 | return @encode_value_str()
231 |
232 | class PresentationContextItem extends Item
233 | type: 0x20
234 | name: 'presentation_context'
235 | var_item_counts:
236 | abstract_syntax: 1
237 | transfer_syntax: -1
238 | decode: () ->
239 | @id = @_buff[@_start + 4]
240 | @decode_var_items(@_start + 8, @_end)
241 | encode: () ->
242 | _buffers = [
243 | '',
244 | @fixed_fields(),
245 | @abstract_syntax.encode()
246 | Buffer.concat(_ts.encode() for _ts in @transfer_syntax)
247 | ]
248 | _len = total_length(_buffers)
249 | _header = Buffer.concat([new Buffer([0x20, 0]), mk_uint16(_len)])
250 | _buffers[0] = _header
251 | return Buffer.concat(_buffers)
252 |
253 | fixed_fields: () ->
254 | new Buffer([@id, 0, 0, 0])
255 |
256 | to_json: () ->
257 | _json = super()
258 | _json.id = @id
259 | return _json
260 |
261 | from_json: (json) ->
262 | @id = json.id
263 | return super(json)
264 |
265 | class PresentationContextItemAc extends PresentationContextItem
266 | type: 0x21
267 | name: 'presentation_context_ac'
268 | decode: () ->
269 | super()
270 | @resultReason = @_buff[@_start + 6]
271 | fixed_fields: () ->
272 | new Buffer([@id, 0, @resultReason, 0])
273 | to_json: () ->
274 | _json = super()
275 | _json.resultReason = @resultReason
276 | return _json
277 | from_json: (json) ->
278 | @resultReason = json.resultReason
279 | return super(json)
280 |
281 |
282 | class AbstractSyntaxItem extends Item
283 | type: 0x30
284 | name: 'abstract_syntax'
285 | _single_value: true
286 | decode: () ->
287 | @value = @ui_str(4)
288 | encode: () ->
289 | return @encode_value_str()
290 |
291 | class TransferSyntaxItem extends Item
292 | type: 0x40
293 | name: 'transfer_syntax'
294 | _single_value: true
295 | decode: () ->
296 | @value = @ui_str(4)
297 | encode: () ->
298 | return @encode_value_str()
299 |
300 | class UserInformationItem extends Item
301 | type: 0x50
302 | name: 'user_information'
303 | var_item_counts:
304 | maximum_length: 1
305 | asynchronous_operations_window: 1
306 | implementation_class_uid: 1
307 | implementation_version_name: 1
308 | scp_scu_role_selection: -1
309 | decode: () ->
310 | @decode_var_items(@_start + 4, @_end)
311 | encode: () ->
312 | _buffers = [
313 | '',
314 | @maximum_length.encode()
315 | @implementation_class_uid.encode()
316 | @asynchronous_operations_window.encode()
317 | ]
318 | if @scp_scu_role_selection
319 | _buffers.push Buffer.concat(_rs.encode() for _rs in @scp_scu_role_selection)
320 | _buffers.push @implementation_version_name.encode()
321 | _len = total_length(_buffers)
322 | _header = Buffer.concat([new Buffer([@type, 0]), mk_uint16(_len)])
323 | _buffers[0] = _header
324 | return Buffer.concat(_buffers)
325 |
326 |
327 | class MaximumLengthItem extends Item
328 | type: 0x51
329 | name: 'maximum_length'
330 | _single_value: true
331 | decode: () ->
332 | @value = @_buff.readUInt32BE(@_start + 4)
333 | encode: () ->
334 | _vbuff = new Buffer(4)
335 | _vbuff.writeUInt32BE(@value, 0)
336 | return Buffer.concat [new Buffer([@type, 0]), mk_uint16(4), _vbuff]
337 |
338 | class ImplementationClassUidItem extends Item
339 | type: 0x52
340 | name: 'implementation_class_uid'
341 | _single_value: true
342 | decode: () ->
343 | @value = @ui_str(4)
344 | encode: () ->
345 | return @encode_value_str()
346 |
347 | class AsynchronousOperationsWindowItem extends Item
348 | type: 0x53
349 | name: 'asynchronous_operations_window'
350 | decode: () ->
351 | @maximum_number_operations_invoked = @_buff.readUInt16BE(@_start + 4)
352 | @maximum_number_operations_performed = @_buff.readUInt16BE(@_start + 6)
353 | to_json: () ->
354 | _json = super()
355 | _json.maximum_number_operations_invoked = @maximum_number_operations_invoked
356 | _json.maximum_number_operations_performed = @maximum_number_operations_performed
357 | return _json
358 | from_json: (json) ->
359 | @maximum_number_operations_invoked = json.maximum_number_operations_invoked
360 | @maximum_number_operations_performed = json.maximum_number_operations_performed
361 | return super(json)
362 | encode: () ->
363 | _vbuff = new Buffer(4)
364 | _vbuff.writeUInt16BE(@maximum_number_operations_invoked, 0)
365 | _vbuff.writeUInt16BE(@maximum_number_operations_performed, 2)
366 | return Buffer.concat [new Buffer([@type, 0]), mk_uint16(4), _vbuff]
367 |
368 |
369 | class ScpScuRoleSelectionItem extends Item
370 | type: 0x54
371 | name: 'scp_scu_role_selection'
372 | decode: () ->
373 | _uid_length = @_buff.readUInt16BE(@_start + 4)
374 | _start = @_start + 6
375 | _end = @_start + 6 + _uid_length
376 | @sop_class_uid = trim_ui(@_buff.toString('binary', _start, _end))
377 | @scu_role = @_buff[_end]
378 | @scp_role = @_buff[_end + 1]
379 | to_json: () ->
380 | _json = super()
381 | _json.sop_class_uid = @sop_class_uid
382 | _json.scu_role = @scu_role
383 | _json.scp_role = @scp_role
384 | return _json
385 | from_json: (json) ->
386 | @sop_class_uid = json.sop_class_uid
387 | @scu_role = json.scu_role
388 | @scp_reole = json.scp_role
389 | return super(json)
390 | encode: () ->
391 | _buffers = [
392 | '',
393 | mk_uint16(@sop_class_uid.length)
394 | new Buffer(@sop_class_uid, 'binary')
395 | new Buffer([@scu_role, @scp_role])
396 | ]
397 | _len = total_length(_buffers)
398 | _header = Buffer.concat([new Buffer([@type, 0]), mk_uint16(_len)])
399 | _buffers[0] = _header
400 | # log.trace(buffers: _buffers, "ScpScuRoleSelection>>encode")
401 | return Buffer.concat(_buffers)
402 |
403 |
404 |
405 | class ImplementationVersionNameItem extends Item
406 | type: 0x55
407 | name: 'implementation_version_name'
408 | _single_value: true
409 | decode: () ->
410 | @value = @ui_str(4)
411 | encode: () ->
412 | return @encode_value_str()
413 |
414 | trim_ui = (str) ->
415 | _len = str.length
416 | if _len > 0 and str[_len - 1] == '\x00'
417 | str.slice(0, -1)
418 | else
419 | str
420 |
421 | PDU_BY_TYPE =
422 | '01': PDUAssociateRq
423 | '02': PDUAssociateAc
424 |
425 | PDU_BY_NAME =
426 | 'association_rq': PDUAssociateRq
427 | 'association_ac': PDUAssociateAc
428 | pdu_constructor = (type) ->
429 | _hex = printf("%02X", type)
430 | return PDU_BY_TYPE[_hex]
431 | pdu_by_name = (arg) ->
432 | if arg instanceof PDU
433 | return arg
434 | return new (PDU_BY_NAME[arg.name])(arg)
435 |
436 |
437 | ITEM_BY_TYPE =
438 | '10': ApplicationContextItem
439 | '20': PresentationContextItem
440 | '21': PresentationContextItemAc
441 | '30': AbstractSyntaxItem
442 | '40': TransferSyntaxItem
443 | '50': UserInformationItem
444 | '51': MaximumLengthItem
445 | '52': ImplementationClassUidItem
446 | '53': AsynchronousOperationsWindowItem
447 | '54': ScpScuRoleSelectionItem
448 | '55': ImplementationVersionNameItem
449 |
450 | ITEM_BY_NAME =
451 | 'application_context': ApplicationContextItem
452 | 'presentation_context': PresentationContextItem
453 | 'presentation_context_ac': PresentationContextItemAc
454 | 'abstract_syntax': AbstractSyntaxItem
455 | 'transfer_syntax': TransferSyntaxItem
456 | 'user_information': UserInformationItem
457 | 'maximum_length': MaximumLengthItem
458 | 'implementation_class_uid': ImplementationClassUidItem
459 | 'asynchronous_operations_window': AsynchronousOperationsWindowItem
460 | 'scp_scu_role_selection': ScpScuRoleSelectionItem
461 | 'implementation_version_name': ImplementationVersionNameItem
462 |
463 | item_constructor = (type) ->
464 | _hex = printf("%02X", type)
465 | return ITEM_BY_TYPE[_hex]
466 |
467 | ZERO_BUFF = new Buffer(128)
468 |
469 | ##
470 | # PDUEncoder
471 | #
472 | # Tranform-Stream reading pdu js object
473 | # and emitting pdu buffer
474 | ##
475 | class PDUEncoder extends stream.Transform
476 | constructor: (options)->
477 | if not (this instanceof PDUEncoder)
478 | return new PDUEncoder(options)
479 | super(options)
480 | @_writableState.objectMode = true
481 | @_readableState.objectMode = false
482 |
483 | _transform: (pdu, _, cb) ->
484 | try
485 | __buff = pdu_by_name(pdu).encode()
486 | log.trace({length: __buff.length}, "_transform: emitting pdu buffer")
487 | @push __buff
488 | cb()
489 | catch err
490 | log.error({error: err}, "_transform: error")
491 | cb(err)
492 |
493 | _flush: () ->
494 | log.debug("_flush")
495 |
496 | mk_uint16 = (num) ->
497 | _buff = new Buffer(2)
498 | _buff.writeUInt16BE(num, 0)
499 | return _buff
500 | mk_uint32 = (num) ->
501 | _buff = new Buffer(4)
502 | _buff.writeUInt32BE(num, 0)
503 | return _buff
504 |
505 | exports.PDUDecoder = PDUDecoder
506 | exports.PDUEncoder = PDUEncoder
507 | exports.PDUAssociateRq = PDUAssociateRq
508 |
509 |
510 |
511 | net = require 'net'
512 |
513 | echo_scu = (opts, cb) ->
514 | _aet = opts.aet
515 | _local = opts.local_aet
516 | _conn = net.connect opts, () ->
517 | console.log "connected to", opts
518 | _enc.write {
519 | "name": "association_rq",
520 | "called_aet_title": _aet,
521 | "calling_aet_title": _local,
522 | "application_context": "1.2.840.10008.3.1.1.1"
523 | "presentation_context": [
524 | "id": 1,
525 | "abstract_syntax": "1.2.840.10008.1.1",
526 | "transfer_syntax": ["1.2.840.10008.1.2"]]
527 | "user_information":
528 | "maximum_length": 16384
529 | # "implementation_class_uid": "1.2.40.0.13.1.1" # dcm4chee
530 | "implementation_class_uid": "1.2.40.0.13.1.1.47"
531 | "asynchronous_operations_window":
532 | "maximum_number_operations_invoked": 0
533 | "maximum_number_operations_performed": 0
534 | "implementation_version_name": "node-dicom"
535 | }
536 |
537 | _enc = new PDUEncoder()
538 | _dec = new PDUDecoder()
539 | _enc.pipe _conn
540 | _conn.pipe _dec
541 | # _conn.on 'data', (data) ->
542 | # require('fs').writeFileSync("/tmp/x.x", data)
543 |
544 | _conn.on 'error', cb
545 | _enc.on 'error', cb
546 | _dec.on 'error', cb
547 |
548 | _dec.on 'data', (data) ->
549 | console.log "RECEIVED:", JSON.stringify(data, null, 2)
550 |
551 |
552 | if require.main is module
553 | if true
554 | echo_scu host: "192.168.2.19", port: 11112, aet: "TESTME", local_aet: "XXX", (err, data) ->
555 | if err
556 | console.log "ERROR: ", err
557 | console.log "stack:", err.stack
558 | process.exit(1)
559 | console.log "DATA:", data
560 | if false
561 | require('fs').createReadStream("/tmp/x.x")
562 | .pipe new PDUDecoder()
--------------------------------------------------------------------------------
/src/readbuffer.coffee:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | #
3 |
4 | log = require("./logger")("readbuffer")
5 |
6 | ##
7 | # ReadBuffer for TransformStream
8 | #
9 | # can push buffers into it
10 | # maintains stream position
11 | ##
12 | class ReadBuffer
13 | constructor: () ->
14 | if not (this instanceof ReadBuffer)
15 | return new ReadBuffer()
16 | # current offset in the first buffer
17 | @offset = 0
18 | # read position in the stream of input buffers
19 | @stream_position = 0
20 | # additional queued buffers
21 | @buffers = []
22 | # sum of all buffers minus offset
23 | @length = 0
24 |
25 | # log summary for bunyan
26 | log_summary: () ->
27 | summary =
28 | offset: @offset,
29 | stream_position: @stream_position
30 | length: @length
31 | num_buffers: @buffers.length
32 | return summary
33 |
34 | # make a shallow copy - enough so we can restore state
35 | # since the buffers are read-only for reading
36 | copy: () ->
37 | rb = new ReadBuffer()
38 | rb.offset = @offset
39 | rb.stream_position = @stream_position
40 | rb.buffers = (_ for _ in @buffers)
41 | rb.length = @length
42 | return rb
43 |
44 | push: (buffer) ->
45 | rc = @buffers.push buffer
46 | @length += buffer.length
47 | rc
48 |
49 | has: (num_bytes) ->
50 | num_bytes <= @length
51 |
52 | # will consume exactly bytes
53 | # only call this if the buffer has bytes
54 | consume: (bytes) ->
55 | # log.trace @log_summary(), "consume" if log.trace()
56 | if not @has(bytes)
57 | throw new NeedMoreInput(bytes)
58 | if bytes == 0
59 | return new Buffer(0)
60 | end = @offset + bytes
61 | buff = @buffers[0]
62 | # easy/fast case: first buffer sufficient
63 | if end <= buff.length
64 | dst = buff.slice(@offset, end)
65 | @offset += bytes
66 | else
67 | # more complicated case: have to combine multiple buffers
68 | dst = new Buffer(bytes)
69 | buff.copy(dst, 0, @offset, buff.length)
70 | dstPos = len = buff.length - @offset
71 | @offset = 0
72 | @buffers.shift()
73 | numBytes = bytes - len
74 | while numBytes > 0
75 | buff = @buffers[0]
76 | len = Math.min(numBytes, buff.length)
77 | buff.copy(dst, dstPos, 0, len)
78 | numBytes -= len
79 | dstPos += len
80 | if len == buff.length
81 | @buffers.shift()
82 | len = 0
83 | @offset = len
84 |
85 | @length -= bytes
86 | @stream_position += bytes
87 | if @offset == buff.length
88 | @offset = 0
89 | @buffers.shift()
90 | return dst
91 |
92 | # will consume at most bytes, as much as we have right now
93 | # this will avoid copying if streaming out bulk data
94 | # also this will throw a special NeedMoreInput that
95 | # will not cause the buffer state restored.
96 | easy_consume: (bytes) ->
97 | if @length == 0
98 | throw new NeedMoreInput(0, true)
99 | end = @offset + bytes
100 | buff = @buffers[0]
101 | if end > buff.length
102 | end = buff.length
103 | bytes = buff.length - @offset
104 | dst = buff.slice(@offset, end)
105 | @offset += bytes
106 | @length -= bytes
107 | @stream_position += bytes
108 | if @offset == buff.length
109 | @offset = 0
110 | @buffers.shift()
111 | return dst
112 |
113 | # this only works for ascii range separators, probably
114 | # lf or cr should be safe
115 | indexOf: (needle) ->
116 | if @length == 0
117 | return -1
118 |
119 | what = (new Buffer(needle))[0]
120 | buffers = @buffers
121 | buffers_length = @buffers.length
122 | buff = buffers[0]
123 | buff_length = buff.length
124 | offset = @offset
125 |
126 | for i in [offset...buff_length]
127 | if buff[i] == what
128 | return i - offset
129 |
130 | dpos = buff_length - offset
131 | for j in [1...buffers_length]
132 | buff = buffers[j]
133 | buff_length = buff.length
134 |
135 | for i in [0...buff_length]
136 | if buff[i] == what
137 | return dpos + i
138 |
139 | dpos += buff_length
140 |
141 | return -1
142 |
143 | # is a full line present in the buffer?
144 | # returns line length (including newline)
145 | # 0 if no full line present
146 | has_line: () ->
147 | idx = @indexOf '\n'
148 | return if idx >= 0 then idx + 1 else 0
149 |
150 | class NeedMoreInput extends Error
151 | constructor: (@needMoreInput, @doNotRestore) ->
152 | super("Need #{@needMoreInput} more input.")
153 |
154 | module.exports = ReadBuffer
155 |
--------------------------------------------------------------------------------
/src/vrs.coffee:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env coffee
2 | #
3 | #
4 |
5 | log = require("./logger")("vrs")
6 | iconv = require("iconv-lite")
7 |
8 | UNDEFINED_LENGTH = 0xFFFFFFFF
9 |
10 | class DicomError extends Error
11 |
12 | class UnexpectedEofOfFile extends DicomError
13 |
14 | exports.UNDEFINED_LENGTH = UNDEFINED_LENGTH
15 | exports.DicomError = DicomError
16 | exports.UnexpectedEofOfFile = UnexpectedEofOfFile
17 |
18 | ##
19 | # little/big endian helpers
20 | #
21 | # only 2 instances will ever be used:
22 | # LITTLE_ENDIAN, BIG_ENDIAN
23 | ##
24 | class LittleEndian
25 | constructor: () ->
26 | @endianess = "LittleEndian" # for ease of debugging
27 | # without @endianess, you just see {} when dumping
28 | # the endianess object
29 | unpack_uint16: (buff) ->
30 | buff.readUInt16LE(0)
31 | unpack_uint16s: (buff, num) ->
32 | for i in [0...num]
33 | offset = 2*i
34 | buff.readUInt16LE(offset)
35 | unpack_int16s: (buff, num) ->
36 | for i in [0...num]
37 | offset = 2*i
38 | buff.readInt16LE(offset)
39 | unpack_uint32: (buff) ->
40 | buff.readUInt32LE(0)
41 | unpack_uint32s: (buff, num) ->
42 | for i in [0...num]
43 | offset = 4*i
44 | buff.readUInt32LE(offset)
45 | unpack_int32s: (buff, num) ->
46 | for i in [0...num]
47 | offset = 4*i
48 | buff.readInt32LE(offset)
49 | unpack_floats: (buff, num) ->
50 | for i in [0...num]
51 | offset = 4*i
52 | buff.readFloatLE(offset)
53 | unpack_doubles: (buff, num) ->
54 | for i in [0...num]
55 | offset = 8*i
56 | buff.readDoubleLE(offset)
57 |
58 | pack_uint16s: (values) ->
59 | buff = new Buffer(values.length * 2)
60 | for idx in [0...values.length]
61 | offset = 2 * idx
62 | buff.writeUInt16LE(values[idx], offset)
63 | buff
64 | pack_uint32s: (values) ->
65 | buff = new Buffer(values.length * 4)
66 | for idx in [0...values.length]
67 | offset = 4 * idx
68 | buff.writeUInt32LE(values[idx], offset)
69 | buff
70 | pack_int16s: (values) ->
71 | buff = new Buffer(values.length * 2)
72 | for idx in [0...values.length]
73 | offset = 2 * idx
74 | buff.writeInt16LE(values[idx], offset)
75 | buff
76 | pack_int32s: (values) ->
77 | buff = new Buffer(values.length * 4)
78 | for idx in [0...values.length]
79 | offset = 4 * idx
80 | buff.writeInt32LE(values[idx], offset)
81 | buff
82 | pack_floats: (values) ->
83 | buff = new Buffer(values.length * 4)
84 | for idx in [0...values.length]
85 | offset = 4 * idx
86 | buff.writeFloatLE(values[idx], offset)
87 | buff
88 | pack_doubles: (values) ->
89 | buff = new Buffer(values.length * 8)
90 | for idx in [0...values.length]
91 | offset = 8 * idx
92 | buff.writeDoubleLE(values[idx], offset)
93 | buff
94 |
95 | class BigEndian
96 | constructor: () ->
97 | @endianess = "BigEndian" # for ease of debugging
98 | unpack_uint16: (buff) ->
99 | buff.readUInt16BE(0)
100 | unpack_uint16s: (buff, num) ->
101 | for i in [0...num]
102 | offset = 2*i
103 | buff.readUInt16BE(offset)
104 | unpack_int16s: (buff, num) ->
105 | for i in [0...num]
106 | offset = 2*i
107 | buff.readInt16BE(offset)
108 | unpack_uint32: (buff) ->
109 | buff.readUInt32BE(0)
110 | unpack_uint32s: (buff, num) ->
111 | for i in [0...num]
112 | offset = 4*i
113 | buff.readUInt32BE(offset)
114 | unpack_int32s: (buff, num) ->
115 | for i in [0...num]
116 | offset = 4*i
117 | buff.readInt32BE(offset)
118 | unpack_floats: (buff, num) ->
119 | for i in [0...num]
120 | offset = 4*i
121 | buff.readFloatBE(offset)
122 | unpack_doubles: (buff, num) ->
123 | for i in [0...num]
124 | offset = 8*i
125 | buff.readDoubleBE(offset)
126 |
127 | pack_uint16s: (values) ->
128 | buff = new Buffer(values.length * 2)
129 | for idx in [0...values.length]
130 | offset = 2 * idx
131 | buff.writeUInt16BE(values[idx], offset)
132 | buff
133 | pack_uint32s: (values) ->
134 | buff = new Buffer(values.length * 4)
135 | for idx in [0...values.length]
136 | offset = 4 * idx
137 | buff.writeUInt32BE(values[idx], offset)
138 | buff
139 | pack_int16s: (values) ->
140 | buff = new Buffer(values.length * 2)
141 | for idx in [0...values.length]
142 | offset = 2 * idx
143 | buff.writeInt16BE(values[idx], offset)
144 | buff
145 | pack_int32s: (values) ->
146 | buff = new Buffer(values.length * 4)
147 | for idx in [0...values.length]
148 | offset = 4 * idx
149 | buff.writeInt32BE(values[idx], offset)
150 | buff
151 | pack_floats: (values) ->
152 | buff = new Buffer(values.length * 4)
153 | for idx in [0...values.length]
154 | offset = 4 * idx
155 | buff.writeFloatBE(values[idx], offset)
156 | buff
157 | pack_doubles: (values) ->
158 | buff = new Buffer(values.length * 8)
159 | for idx in [0...values.length]
160 | offset = 8 * idx
161 | buff.writeDoubleBE(values[idx], offset)
162 | buff
163 |
164 | LITTLE_ENDIAN = new LittleEndian()
165 | BIG_ENDIAN = new BigEndian()
166 |
167 | ##
168 | #
169 | # Context for coding/decoding.
170 | #
171 | # This keeps track of parameters needed for parsing.
172 | # After creation, the context can not be changed.
173 | # (for the javascript implementation - just leave it alone,
174 | # will you).
175 | # A new context can be created from an old one,
176 | # with some fields changed.
177 | #
178 | ##
179 | class Context
180 | ##
181 | # Attributes of ctx are copied but overruled by the arguments, if present
182 | # * endianess: LITTLE_ENDIAN or BIG_ENDIAN
183 | # * charset: "binary" (=latin1), "utf-8", not the 0008,0005 values
184 | # * explicit: explicit or implicit ts in effect
185 | # * encapsulated: inside encapsulated OB
186 | ##
187 | constructor: (ctx, obj) ->
188 | @endianess = obj.endianess ? ctx?.endianess ? LITTLE_ENDIAN
189 | @charset = obj.charset ? ctx?.charset ? "latin1"
190 | @explicit = obj.explicit ? ctx?.explicit ? true
191 | @encapsulated = obj.encapsulated ? ctx?.encapsulated ? false
192 |
193 | ##
194 | # Stack of Dicom Contexts.
195 | #
196 | # They keep track of end positions for nested data elements,
197 | # and actions to perform once that position is reached.
198 | #
199 | # Note that the actions will only be performed when
200 | # the context is autopopped, not when and Sequence/Item
201 | # Delimitation item ends the context.
202 | #
203 | # This is because the end-action is used to emit
204 | # end-events, even without the end item from the dicom stream.
205 | ##
206 | class CSObj # an entry in the context stack
207 | constructor: (@context, @end_position, @action) ->
208 |
209 | class ContextStack
210 | constructor: () ->
211 | @_stack = []
212 |
213 | ##
214 | # push a new dicom context with optional end_position and action
215 | #
216 | ##
217 | push: (obj, end_position, action) ->
218 | context = new Context(@top() ? {}, obj)
219 | csobj = new CSObj(context, end_position, action)
220 | log.trace("pushing context: #{csobj}") if log.trace()
221 | rc = @_stack.push csobj
222 | log.trace({context: @log_summary()}, "pushed context, this is current now!") if log.trace()
223 |
224 | ##
225 | # replace the root dicom context
226 | #
227 | #
228 | ##
229 | replace_root: (obj) ->
230 | # if @_stack.length > 1
231 | # throw new DicomError("ContextStack:replace not allowed unless stack depth = 1: #{@_stack.length}")
232 | context = new Context(@_stack[0].context, obj)
233 | @_stack[0].context = context
234 | log.trace({context: @log_summary()}, "replaced root context") if log.trace()
235 |
236 | # replace top context
237 | replace_top: (obj) ->
238 | csobj = @_stack[@_stack.length - 1]
239 | context = new Context(csobj.context, obj)
240 | csobj.context = context
241 | log.trace({context: @log_summary()}, "replaced top context") if log.trace()
242 |
243 | pop: () ->
244 | csobj = @_stack.pop()
245 | log.trace({context: @log_summary()}, "popped context stack, this is current now!") if log.trace()
246 | csobj.context
247 |
248 | handle_autopops: (pos) ->
249 | top = @_stack[@_stack.length - 1]
250 | if top.end_position?
251 | if pos < top.end_position
252 | log.trace("handle_autopops: pos #{pos}, not reached end pos #{top.end_position}") if log.trace()
253 | else
254 | log.trace("handle_autopops: pos #{pos}, reached end pos #{top.end_position}") if log.trace()
255 | top.action()
256 | @_stack.pop()
257 | return @handle_autopops(pos)
258 | else
259 | log.trace("handle_autopops: stream position #{pos}, but no context with autopop on top") if log.trace()
260 | this
261 |
262 | top: () ->
263 | @_stack[@_stack.length - 1]?.context
264 |
265 | top_little_endian: () ->
266 | _top = @top()
267 | return new Context(_top, {endianess: LITTLE_ENDIAN})
268 |
269 | stack_depth: () ->
270 | @_stack.length
271 |
272 | log_summary: () ->
273 | context = @top()
274 | summary =
275 | endianess: context.endianess
276 | charset: context.charset
277 | explicit: context.explicit
278 | encapsulated: context.encapsulated
279 | stack_depth: @_stack.length
280 |
281 | ##
282 | #
283 | # DICOM / iconv character sets:
284 | #
285 | # Default: ISO-IR 6 - Default - ASCII (binary)
286 | # Single value but not ISO_IR 192 or GB18030:
287 | # one of the ISO-8859 8 bit character sets, no extensions
288 | # multi value but not ISO_IR 192 or GB18030:
289 | # one of the ISO-8859 8 bit character sets, character extensions
290 | # ISO-IR 192 or GB18030:
291 | # multi-value character sets, no extension, 0008,0005 must
292 | # be single valued
293 | #
294 | # List of isoir* encodings for iconv-lite:
295 | # dbcs-data.js: 'isoir58': 'gbk',
296 | # dbcs-data.js: 'isoir149': 'cp949',
297 | # sbcs-data.js: "isoir6": "ascii",
298 | # sbcs-data.js: "isoir14": "iso646jp",
299 | # sbcs-data.js: "isoir57": "iso646cn",
300 | # sbcs-data.js: "isoir100": "iso88591",
301 | # sbcs-data.js: "isoir101": "iso88592",
302 | # sbcs-data.js: "isoir109": "iso88593",
303 | # sbcs-data.js: "isoir110": "iso88594",
304 | # sbcs-data.js: "isoir144": "iso88595",
305 | # sbcs-data.js: "isoir127": "iso88596",
306 | # sbcs-data.js: "isoir126": "iso88597",
307 | # sbcs-data.js: "isoir138": "iso88598",
308 | # sbcs-data.js: "isoir148": "iso88599",
309 | # sbcs-data.js: "isoir157": "iso885910",
310 | # sbcs-data.js: "isoir166": "tis620",
311 | # sbcs-data.js: "isoir179": "iso885913",
312 | # sbcs-data.js: "isoir199": "iso885914",
313 | # sbcs-data.js: "isoir203": "iso885915",
314 | # sbcs-data.js: "isoir226": "iso885916",
315 | #
316 | ##
317 |
318 | _iconv_charset = (spec_cs) ->
319 | switch spec_cs
320 | when 'GB18030' then 'gb18030'
321 | when 'ISO_IR 192' then 'utf8'
322 | else
323 | if not spec_cs
324 | return "latin1"
325 | _match = spec_cs.match(/^ISO_IR (\d+)/)
326 | if _match
327 | return 'isoir' + _match[1]
328 | # iconv-lite does not seem to have the ISO 2022 encodings ... :-(
329 | return spec_cs
330 |
331 | ##
332 | #
333 | # DicomEvent for emittings
334 | #
335 | ##
336 | class DicomEvent
337 | constructor: (@element, @vr, @offset, @command, @raw, @bulkdata_offset, @bulkdata_length) ->
338 | log_summary: () ->
339 | summary =
340 | element: @element?.log_summary?(),
341 | vr: @vr?.log_summary?(),
342 | offset: @offset,
343 | command: @command,
344 | raw: @raw?.length,
345 | bulkdata_offset: @bulkdata_offset,
346 | bulkdata_length: @bulkdata_length
347 |
348 | return summary
349 |
350 | exports.DicomEvent = DicomEvent
351 |
352 | ##
353 | # VR base class.
354 | #
355 | # VR objects store bytes received from some file or network stream
356 | # and the context needed to interpret said bytes.
357 | ##
358 | class VR
359 | is_endian: false
360 |
361 | explicit_value_length_bytes: 2
362 | implicit_value_length_bytes: 4
363 |
364 | base64_values: false
365 |
366 | # Initialize the VR. Either a buffer or parsed values must be given
367 | constructor: (context, buffer, values) ->
368 | @context = context
369 | if values? and not buffer?
370 | @encode values
371 | else
372 | @buffer = buffer
373 | # get the first value
374 | value: () ->
375 | @values()[0]
376 |
377 | _value_length_bytes: () ->
378 | vlb = if @context.explicit
379 | @explicit_value_length_bytes
380 | else
381 | @implicit_value_length_bytes
382 | log.trace({length: vlb}, "value_length_bytes") if log.trace()
383 | return vlb
384 |
385 | # consume value length from a readbuffer, return value length
386 | consume_value_length: (readbuffer) ->
387 | vlb = @_value_length_bytes()
388 | switch vlb
389 | when 2 then length_element = new US(@context)
390 | when 4 then length_element = new UL(@context)
391 | when 6
392 | # skip 2 bytes
393 | readbuffer.consume 2
394 | length_element = new UL(@context)
395 | else
396 | raise new DicomError("incorrect value length bytes (not 2,4 or 6): " + vlb)
397 | value_length = length_element.consume_value(readbuffer)
398 | return value_length
399 |
400 | # encode value length
401 | _encode_value_length: (encoder, value_length) ->
402 | if not value_length?
403 | value_length = @buffer.length
404 | vlb = @_value_length_bytes()
405 | switch vlb
406 | when 2 then length_element = new US(@context, null, [value_length])
407 | when 4 then length_element = new UL(@context, null, [value_length])
408 | when 6
409 | log.trace("encode_value_length: 6 byte VR, emitting 0x0000")
410 | encoder.push(new Buffer([0,0]))
411 | length_element = new UL(@context, null, [value_length])
412 | else
413 | raise new DicomError("incorrect value length bytes (not 2,4 or 6): " + vlb)
414 | log.trace({length: length_element.buffer.length, value_length: value_length},
415 | "encode_value_length: emitting value length") if log.trace()
416 | encoder.push(length_element.buffer)
417 |
418 | # consume value length and then the values
419 | consume: (readbuffer) ->
420 | value_length = @consume_value_length(readbuffer)
421 | @buffer = readbuffer.consume(value_length)
422 |
423 | # consume and emit - allows us to override in subclasses
424 | consume_and_emit: (element, readbuffer, decoder, start_position) ->
425 | value_length = @consume_value_length(readbuffer)
426 | @_consume_and_emit_known_value_length element, readbuffer, decoder, start_position, value_length
427 |
428 | _consume_and_emit_known_value_length: (element, readbuffer, decoder, start_position, value_length) ->
429 | if value_length == UNDEFINED_LENGTH
430 | throw new DicomError("VR::consume_and_emit is not prepared to handle UNDEFINED_LENGTH")
431 | if value_length < (decoder.streaming_value_length_minimum ? 256)
432 | @buffer = readbuffer.consume(value_length)
433 | obj = new DicomEvent(element, this, start_position, "element")
434 | decoder.log_and_push obj
435 | else
436 | @stream_element(element, readbuffer, decoder, start_position, value_length)
437 |
438 | # stream the element out (well, the byte buffers anyway)
439 | stream_element: (element, readbuffer, decoder, start_position, value_length) ->
440 | bd_offset = decoder.buffer.stream_position
441 | bd_length = value_length
442 | obj = new DicomEvent(element, this, start_position, "start_element", null, bd_offset, bd_length)
443 | decoder.log_and_push obj
444 | obj = new DicomEvent(element, this, start_position, "end_element", null, bd_offset, bd_length)
445 | decoder._stream_bytes(value_length, obj)
446 |
447 | # encoding helpers
448 | _encode_and_emit: (element, encoder) ->
449 | @_encode_and_emit_tag_vr(element, encoder)
450 | @_encode_value_length(encoder)
451 | log.trace({length: @buffer.length}, "_encode_and_emit: emitting vr buffer")
452 | encoder.push(@buffer)
453 |
454 | _encode_and_emit_tag_vr: (element, encoder) ->
455 | log.trace({tag: element.tag}, "_encode_and_emit_tag_vr: emitting tag") if log.trace()
456 | tag = new AT(@context, null, [element.tag])
457 | encoder.push(tag.buffer)
458 | if @context.explicit
459 | log.trace({vr: @name}, "_encode_and_emit_tag_vr: emitting vr") if log.trace()
460 | encoder.push(new Buffer(@name, "binary"))
461 |
462 | # log summary for bunyan
463 | log_summary: () ->
464 | summary =
465 | if @buffer?.length < 64
466 | values: @values()
467 | else
468 | length: @buffer?.length
469 |
470 | # VR of fixed length
471 | # defaults to 4 bytes length per element.
472 | # these are usually endian.
473 | class FixedLength extends VR
474 | is_endian: true
475 | single_value_length: 4
476 |
477 | # consume a single value from the readbuffer
478 | consume_value: (rb) ->
479 | @buffer = rb.consume(@single_value_length)
480 | return @value()
481 |
482 | _vm: (buffer) ->
483 | buffer.length / @single_value_length
484 |
485 | ##
486 | #
487 | # Dicom AT (=dAta element Tag).
488 | #
489 | # encoded as 2 consecutive 16 bit unsigneds giving the group/element of
490 | # a dicom tag. The value is represented as a single tag number.
491 | ##
492 | class AT extends FixedLength
493 | values: () ->
494 | g_e = @context.endianess.unpack_uint16s(@buffer, @_vm(@buffer) * 2)
495 | for idx in [0...g_e.length] by 2
496 | (g_e[idx]<<16) ^ g_e[idx + 1]
497 | encode: (values) ->
498 | g_e = []
499 | for v in values
500 | g = (v >> 16) & 0xFFFF
501 | e = v & 0xFFFF
502 | g_e.push g
503 | g_e.push e
504 | @buffer = @context.endianess.pack_uint16s(g_e)
505 |
506 | ##
507 | #
508 | # Dicom FD (=Float Double) 64bit floats
509 | #
510 | ##
511 | class FD extends FixedLength
512 | single_value_length: 8
513 | values: () ->
514 | @context.endianess.unpack_doubles(@buffer, @_vm(@buffer))
515 | encode: (values) ->
516 | @buffer = @context.endianess.pack_doubles(values)
517 |
518 |
519 | ##
520 | #
521 | # Dicom FL (=Float) IEEE 32bit floats
522 | #
523 | ##
524 | class FL extends FixedLength
525 | values: () ->
526 | @context.endianess.unpack_floats(@buffer, @_vm(@buffer))
527 | encode: (values) ->
528 | @buffer = @context.endianess.pack_floats(values)
529 |
530 |
531 | ##
532 | #
533 | # Dicom SL (=Signed Long) 32-bit signed integers
534 | #
535 | ##
536 | class SL extends FixedLength
537 | values: () ->
538 | @context.endianess.unpack_int32s(@buffer, @_vm(@buffer))
539 | encode: (values) ->
540 | @buffer = @context.endianess.pack_int32s(values)
541 |
542 |
543 | ##
544 | #
545 | # Dicom SS (=Signed Short) 16-bit signed integers
546 | #
547 | ##
548 | class SS extends FixedLength
549 | single_value_length: 2
550 | values: () ->
551 | @context.endianess.unpack_int16s(@buffer, @_vm(@buffer))
552 | encode: (values) ->
553 | @buffer = @context.endianess.pack_int16s(values)
554 |
555 |
556 | ##
557 | #
558 | # Dicom UL (=Unsigned Long) 32-bit unsigned integers
559 | #
560 | ##
561 | class UL extends FixedLength
562 | values: () ->
563 | @context.endianess.unpack_uint32s(@buffer, @_vm(@buffer))
564 | encode: (values) ->
565 | @buffer = @context.endianess.pack_uint32s(values)
566 |
567 |
568 | ##
569 | #
570 | # Dicom US (=Unsigned Short) 16-bit unsigned integers
571 | #
572 | ##
573 | class US extends FixedLength
574 | single_value_length: 2
575 | values: () ->
576 | @context.endianess.unpack_uint16s(@buffer, @_vm(@buffer))
577 | encode: (values) ->
578 | @buffer = @context.endianess.pack_uint16s(values)
579 |
580 | # base class for the 'other' VRs ... OB, OW, OF, OD, UN
581 | class OtherVR extends FixedLength
582 | base64_values: true
583 | explicit_value_length_bytes: 6
584 |
585 | values: () ->
586 | [@buffer.toString('base64')]
587 |
588 |
589 | ##
590 | #
591 | # Dicom OB (= Other Byte)
592 | #
593 | ##
594 | class OB extends OtherVR
595 | # consume and emit - handle encapsulated pixeldata
596 | consume_and_emit: (element, readbuffer, decoder, start_position) ->
597 | value_length = @consume_value_length(readbuffer)
598 | if value_length != UNDEFINED_LENGTH
599 | return @_consume_and_emit_known_value_length(element, readbuffer, decoder, start_position, value_length)
600 | # push encaps context
601 | context = decoder.context
602 | context.push {encapsulated: true}
603 | obj = new DicomEvent(element, this, start_position, "start_element")
604 | decoder.log_and_push obj
605 |
606 | ##
607 | #
608 | # Dicom UN (= UNknown)
609 | #
610 | ##
611 | class UN extends OtherVR
612 | # UN may be of undefined length if it is really a private sequence tag
613 | consume_and_emit: (element, readbuffer, decoder, start_position) ->
614 | value_length = @consume_value_length(readbuffer)
615 | log.debug({length: value_length}, "UN consume and emit") if log.debug()
616 | if value_length != UNDEFINED_LENGTH
617 | # just stream it out, like all other OtherVRs
618 | return @_consume_and_emit_known_value_length(element, readbuffer, decoder, start_position, value_length)
619 | end_cb = () ->
620 | _obj = new DicomEvent(element, this, start_position, "end_sequence")
621 | decoder.log_and_push _obj
622 | decoder.context.push({explicit: false}, null, end_cb)
623 |
624 | obj = new DicomEvent(element, this, start_position, "start_sequence")
625 | decoder.log_and_push obj
626 |
627 | # encoding helpers
628 | _encode_and_emit_seq: (element, encoder) ->
629 | @_encode_and_emit_tag_vr(element, encoder)
630 | @_encode_value_length(encoder, UNDEFINED_LENGTH)
631 | encoder.context.push({})
632 | ##
633 | #
634 | # Dicom OW (= Other Word)
635 | #
636 | #
637 | class OW extends OtherVR
638 |
639 | ##
640 | #
641 | # DICOM OF (= Other Float)
642 | #
643 | ##
644 | class OF extends OtherVR
645 |
646 | ##
647 | #
648 | # DICOM OD (= Other Double)
649 | #
650 | ##
651 | class OD extends OtherVR
652 |
653 |
654 | ##
655 | #
656 | # Dicom SQ (= SeQuence) VR
657 | #
658 | # this does not consume its values - they should be parsed
659 | # instead we push a new context, maybe with autopop
660 | # and let the driving loop do its work
661 | #
662 | ##
663 | class SQ extends VR
664 | explicit_value_length_bytes:6
665 |
666 | values: () ->
667 | undefined
668 |
669 | consume_and_emit: (element, readbuffer, decoder, start_position) ->
670 | value_length = @consume_value_length(readbuffer)
671 | log.debug({length: value_length}, "SQ consume and emit") if log.debug()
672 | end_position = undefined
673 | if value_length != UNDEFINED_LENGTH
674 | end_position = readbuffer.stream_position + value_length
675 | end_cb = () ->
676 | _obj = new DicomEvent(element, this, start_position, "end_sequence")
677 | decoder.log_and_push _obj
678 | decoder.context.push({}, end_position, end_cb)
679 | obj = new DicomEvent(element, this, start_position, "start_sequence")
680 | decoder.log_and_push obj
681 |
682 | # encoding helpers
683 | _encode_and_emit: (element, encoder) ->
684 | throw new DicomError("internal error: _encode_and_emit should not be called")
685 |
686 | _encode_and_emit_seq: (element, encoder) ->
687 | @_encode_and_emit_tag_vr(element, encoder)
688 | @_encode_value_length(encoder, UNDEFINED_LENGTH)
689 | encoder.context.push({})
690 |
691 | _ends_with = (str, char) ->
692 | len = str.length
693 | len > 0 and str[len-1] == char
694 |
695 | # String VR base class
696 | class Stringish extends VR
697 | allow_multiple_values: true
698 | padding_character: ' '
699 | split_str: '\\'
700 |
701 | values: () ->
702 | if not @buffer?
703 | return undefined
704 | s = iconv.decode(@buffer, @context.charset)
705 | if s.length == 0
706 | return []
707 | if _ends_with s, @padding_character
708 | s = s.slice(0, -1)
709 | if @allow_multiple_values
710 | return s.split(@split_str)
711 | return [s]
712 |
713 | encode: (values) ->
714 | s = values.join(@split_str) + @padding_character
715 | b = iconv.encode(s, @context.charset)
716 | if b.length % 2
717 | b = b.slice(0, -1)
718 | @buffer = b
719 |
720 | # string encoding, number values
721 | class NumberString extends Stringish
722 | values: () ->
723 | super().map Number
724 |
725 |
726 | ##
727 | #
728 | # Dicom AE (= Application Entity).
729 | #
730 | # 16 characters max, space padded.
731 | ##
732 | class AE extends Stringish
733 |
734 | ##
735 | #
736 | # Dicom AS (= Age String).
737 | #
738 | # 4 bytes fixed, of the form nnnD, nnnW, nnnM or nnnY.
739 | # E.g. 003W would mean 3 weeks old.
740 | #
741 | ##
742 | class AS extends Stringish
743 |
744 | ##
745 | #
746 | # Dicom CS (= Code String).
747 | #
748 | # 16 bytes max, only uppercase letters, 0-9, space and underscore
749 | # allowed. Leading an trailing spaces are non-significant.
750 | ##
751 | class CS extends Stringish
752 | _consume_and_emit_known_value_length: (element, readbuffer, decoder, start_position, value_length) ->
753 | super(element, readbuffer, decoder, start_position, value_length)
754 | if element.tag == 0x00080005
755 | # specific character set
756 | spec_cs = @value()
757 | log.debug {charset: spec_cs}, "CS: detected 0008,0005 SpecificCharacterSet"
758 | iconv_cs = _iconv_charset(spec_cs)
759 | decoder.context.replace_top({charset: iconv_cs})
760 |
761 | ##
762 | #
763 | # Dicom DA (= DAte).
764 | #
765 | # A string of the form YYYYMMDD
766 | #
767 | # When range matching, -YYYYMMDD, YYYYMMDD- and YYYYMMDD-YYYYMMDD
768 | # are also possible.
769 | ##
770 | class DA extends Stringish
771 |
772 | ##
773 | #
774 | # Dicom DS (= Decimal String).
775 | #
776 | # A fixed or floating point number represented as a String.
777 | #
778 | # Note: we leave this as a String.
779 | #
780 | ##
781 | class DS extends NumberString
782 |
783 | ##
784 | #
785 | # Dicom DT (= Date and Time).
786 | #
787 | # A concatenated date-tiome character string in the format:
788 | # YYYYMMDDHHMMSS.FFFFFF&ZZXX
789 | ##
790 | class DT extends Stringish
791 |
792 | ##
793 | #
794 | # Dicom IS (= Integer String)
795 | #
796 | ##
797 | class IS extends NumberString
798 |
799 | ##
800 | #
801 | # Dicom LO (= LOng string).
802 | #
803 | # Despite being LOng, 64 characters maximum, spaced padded, no backspace.
804 | # => Multiple values are possible
805 | ##
806 | class LO extends Stringish
807 |
808 | ##
809 | #
810 | # Dicom LT (= Long Text).
811 | #
812 | # This can not be multivalued, so it can contain backslashes.
813 | ##
814 | class LT extends Stringish
815 | allow_multiple_values: false
816 |
817 | ##
818 | #
819 | # Dicom PN (= Person Name).
820 | #
821 | # Limited to 64 characters per component group (not enforced)
822 | ##
823 | class PN extends Stringish
824 | values: () ->
825 | _values = super()
826 | for v in _values
827 | _groups = v?.split("=")
828 | obj = {}
829 | if _groups[0]?
830 | obj.Alphabetic = _groups[0]
831 | if _groups[1]?
832 | obj.Ideographic = _groups[1]
833 | if _groups[2]?
834 | obj.Phonetic = _groups[2]
835 | obj
836 |
837 | encode: (values) ->
838 | str_values = for v in values
839 | if (typeof v) == 'object'
840 | groups = []
841 | if v.Alphabetic?
842 | groups[0] = v.Alphabetic
843 | if v.Ideographic?
844 | groups[1] = v.Ideographic
845 | if v.Phonetic?
846 | groups[2] = v.Phonetic
847 | groups.join("=")
848 | else
849 | v
850 | super(str_values)
851 |
852 | ##
853 | #
854 | # Dicom SH (= Short String).
855 | #
856 | # 16 bytes max, no backslash ==> multiple values.
857 | #
858 | ##
859 | class SH extends Stringish
860 |
861 | ##
862 | #
863 | # Dicom ST (= Short Text).
864 | #
865 | # 1024 characters maximum, no multiple values ===> backslash is allowed in text.
866 | ##
867 | class ST extends Stringish
868 | allow_multiple_values: false
869 |
870 | ##
871 | #
872 | # Dicom TM (= TiMe).
873 | #
874 | # HHMMSS.FFFFFF
875 | ##
876 | class TM extends Stringish
877 |
878 | ##
879 | #
880 | # Dicom UI (= UId string).
881 | #
882 | # Max 64 characters, padded with zero-byte, only valid uids allowed.
883 | # I.e. only 0-9 and . allowed, must start and end with digit,
884 | # no consecutive dots, no 0 prefixes in internal digit runs,
885 | # .0. is not allowed.
886 | ##
887 | class UI extends Stringish
888 | padding_character: "\x00"
889 |
890 | ##
891 | #
892 | # Dicom UT (= Unlimited Text).
893 | #
894 | # Size only limited by length field. No multiple values allowed,
895 | # so literal backslashes are OK.
896 | ##
897 | class UT extends Stringish
898 | allow_multiple_values: false
899 | explicit_value_length_bytes: 6
900 |
901 |
902 | _VR_DICT = {
903 | # fixed length vrs
904 | 'AT': AT,
905 | 'FD': FD,
906 | 'FL': FL,
907 | 'SL': SL,
908 | 'SS': SS,
909 | 'UL': UL,
910 | 'US': US,
911 |
912 | # other vrs
913 | 'OB': OB,
914 | 'UN': UN,
915 | 'OW': OW,
916 | 'OF': OF,
917 | 'OD': OD,
918 |
919 | # sequence
920 | 'SQ': SQ,
921 |
922 | # string vrs
923 | 'AE': AE,
924 | 'AS': AS,
925 | 'CS': CS,
926 | 'DA': DA,
927 | 'DS': DS,
928 | 'DT': DT,
929 | 'IS': IS,
930 | 'LO': LO,
931 | 'LT': LT,
932 | 'PN': PN,
933 | 'SH': SH,
934 | 'ST': ST,
935 | 'TM': TM,
936 | 'UI': UI,
937 | 'UT': UT,
938 | }
939 |
940 | _init_vr_names = () ->
941 | for name,vr of _VR_DICT
942 | vr::name = name
943 | undefined
944 | _init_vr_names()
945 |
946 | for_name = (name, ctx, buffer, values) ->
947 | if name == 'OB or OW'
948 | log.debug({vr: 'OW'}, "for_name: using OW for 'OB or OW'") if log.debug()
949 | name = 'OW'
950 | if name == 'US or SS'
951 | log.debug({vr: 'SS'}, "for_name: using SS for 'US or SS'") if log.debug()
952 | name = 'SS'
953 | constr_fn = _VR_DICT[name]
954 | if not constr_fn?
955 | throw new DicomError("Unknown VR: #{name}")
956 | return new constr_fn(ctx, buffer, values)
957 |
958 | exports.LITTLE_ENDIAN = LITTLE_ENDIAN
959 | exports.BIG_ENDIAN = BIG_ENDIAN
960 | exports.Context = Context
961 | exports.ContextStack = ContextStack
962 | exports.AT = AT
963 | exports.FD = FD
964 | exports.FL = FL
965 | exports.SL = SL
966 | exports.SS = SS
967 | exports.UL = UL
968 | exports.US = US
969 |
970 | exports.OB = OB
971 | exports.OW = OW
972 | exports.OF = OF
973 | exports.OD = OD
974 | exports.UN = UN
975 | exports.SQ = SQ
976 |
977 | exports.AE = AE
978 | exports.AS = AS
979 | exports.CS = CS
980 | exports.DA = DA
981 | exports.DS = DS
982 | exports.DT = DT
983 | exports.IS = IS
984 | exports.LO = LO
985 | exports.LT = LT
986 | exports.PN = PN
987 | exports.SH = SH
988 | exports.ST = ST
989 | exports.TM = TM
990 | exports.UI = UI
991 | exports.UT = UT
992 |
993 | exports.for_name = for_name
994 | exports._VR_DICT = _VR_DICT
995 |
--------------------------------------------------------------------------------
/test/README.md:
--------------------------------------------------------------------------------
1 | Source of the various test images
2 | =================================
3 |
4 | All test images have been compressed with gzip (yes, even the already deflated test images;
5 | it allows us to simply gunzip all test images, no special handling necessary).
6 |
7 | * `charsettests`: `http://www.dclunie.com/images/charset/charsettests.20070405.tar.bz2`
8 | * `deflate_tests`: `http://www.dclunie.com/images/compressed/deflate_tests_release.tar.gz`
9 | * `report_undef_len.gz`: converted `deflate_tests/report.gz` to undefined
10 | length sequences / items
11 | * `report_default_ts.gz`: converted `deflate_tests/report.gz` to
12 | ImplicitVRLittleEndian
13 | * `private_report.gz`: `report.gz` with ContentSequence modified to a private Tag
14 | * `hebrew_ivrle.gz`: converted `charsettests/SCSHBRW.gz` to ImplicitVRLittleEndian
15 | * `quotes_jpls.dcm.gz`: converted `charsettests/SCSFR.gz` to JpegLS,
16 | and a patientname with single and double quotes in it
17 | * `scsarab_be.gz`: converted `charsettests/SCSARAB.gz` to ExplictVRBigEndian
18 |
--------------------------------------------------------------------------------
/test/charsettests/DICOMDIR.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/grmble/node-dicom/721360c3acd0aed619d8e78ccf4e2fdda2d2f88e/test/charsettests/DICOMDIR.gz
--------------------------------------------------------------------------------
/test/charsettests/SCSARAB.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/grmble/node-dicom/721360c3acd0aed619d8e78ccf4e2fdda2d2f88e/test/charsettests/SCSARAB.gz
--------------------------------------------------------------------------------
/test/charsettests/SCSFREN.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/grmble/node-dicom/721360c3acd0aed619d8e78ccf4e2fdda2d2f88e/test/charsettests/SCSFREN.gz
--------------------------------------------------------------------------------
/test/charsettests/SCSGERM.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/grmble/node-dicom/721360c3acd0aed619d8e78ccf4e2fdda2d2f88e/test/charsettests/SCSGERM.gz
--------------------------------------------------------------------------------
/test/charsettests/SCSGREEK.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/grmble/node-dicom/721360c3acd0aed619d8e78ccf4e2fdda2d2f88e/test/charsettests/SCSGREEK.gz
--------------------------------------------------------------------------------
/test/charsettests/SCSH31.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/grmble/node-dicom/721360c3acd0aed619d8e78ccf4e2fdda2d2f88e/test/charsettests/SCSH31.gz
--------------------------------------------------------------------------------
/test/charsettests/SCSH32.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/grmble/node-dicom/721360c3acd0aed619d8e78ccf4e2fdda2d2f88e/test/charsettests/SCSH32.gz
--------------------------------------------------------------------------------
/test/charsettests/SCSHBRW.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/grmble/node-dicom/721360c3acd0aed619d8e78ccf4e2fdda2d2f88e/test/charsettests/SCSHBRW.gz
--------------------------------------------------------------------------------
/test/charsettests/SCSI2.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/grmble/node-dicom/721360c3acd0aed619d8e78ccf4e2fdda2d2f88e/test/charsettests/SCSI2.gz
--------------------------------------------------------------------------------
/test/charsettests/SCSRUSS.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/grmble/node-dicom/721360c3acd0aed619d8e78ccf4e2fdda2d2f88e/test/charsettests/SCSRUSS.gz
--------------------------------------------------------------------------------
/test/charsettests/SCSX1.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/grmble/node-dicom/721360c3acd0aed619d8e78ccf4e2fdda2d2f88e/test/charsettests/SCSX1.gz
--------------------------------------------------------------------------------
/test/charsettests/SCSX2.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/grmble/node-dicom/721360c3acd0aed619d8e78ccf4e2fdda2d2f88e/test/charsettests/SCSX2.gz
--------------------------------------------------------------------------------
/test/charsettests/dcmdump.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | for x in * ; do
4 | echo =============================================================
5 | echo
6 | echo Dumping: $x
7 | echo
8 | gunzip -c $x | dcmdump -
9 | done
10 |
--------------------------------------------------------------------------------
/test/deflate_tests/image.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/grmble/node-dicom/721360c3acd0aed619d8e78ccf4e2fdda2d2f88e/test/deflate_tests/image.gz
--------------------------------------------------------------------------------
/test/deflate_tests/image_dfl.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/grmble/node-dicom/721360c3acd0aed619d8e78ccf4e2fdda2d2f88e/test/deflate_tests/image_dfl.gz
--------------------------------------------------------------------------------
/test/deflate_tests/report.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/grmble/node-dicom/721360c3acd0aed619d8e78ccf4e2fdda2d2f88e/test/deflate_tests/report.gz
--------------------------------------------------------------------------------
/test/deflate_tests/report_dfl.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/grmble/node-dicom/721360c3acd0aed619d8e78ccf4e2fdda2d2f88e/test/deflate_tests/report_dfl.gz
--------------------------------------------------------------------------------
/test/deflate_tests/wave.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/grmble/node-dicom/721360c3acd0aed619d8e78ccf4e2fdda2d2f88e/test/deflate_tests/wave.gz
--------------------------------------------------------------------------------
/test/deflate_tests/wave_dfl.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/grmble/node-dicom/721360c3acd0aed619d8e78ccf4e2fdda2d2f88e/test/deflate_tests/wave_dfl.gz
--------------------------------------------------------------------------------
/test/hebrew_ivrle.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/grmble/node-dicom/721360c3acd0aed619d8e78ccf4e2fdda2d2f88e/test/hebrew_ivrle.gz
--------------------------------------------------------------------------------
/test/metainfo_tests/dodgy_metainfo_length.dcm.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/grmble/node-dicom/721360c3acd0aed619d8e78ccf4e2fdda2d2f88e/test/metainfo_tests/dodgy_metainfo_length.dcm.gz
--------------------------------------------------------------------------------
/test/private_report.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/grmble/node-dicom/721360c3acd0aed619d8e78ccf4e2fdda2d2f88e/test/private_report.gz
--------------------------------------------------------------------------------
/test/quotes_jpls.dcm.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/grmble/node-dicom/721360c3acd0aed619d8e78ccf4e2fdda2d2f88e/test/quotes_jpls.dcm.gz
--------------------------------------------------------------------------------
/test/report_default_ts.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/grmble/node-dicom/721360c3acd0aed619d8e78ccf4e2fdda2d2f88e/test/report_default_ts.gz
--------------------------------------------------------------------------------
/test/report_undef_len.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/grmble/node-dicom/721360c3acd0aed619d8e78ccf4e2fdda2d2f88e/test/report_undef_len.gz
--------------------------------------------------------------------------------
/test/scsarab_be.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/grmble/node-dicom/721360c3acd0aed619d8e78ccf4e2fdda2d2f88e/test/scsarab_be.gz
--------------------------------------------------------------------------------
/test/test_decoder.coffee:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env /coffee
2 | #
3 | # test the decoder for particular edge cases
4 | fs = require "fs"
5 | zlib = require "zlib"
6 |
7 | decoder = require "../lib/decoder"
8 |
9 | # in SCSARAB 0002,0001 starts at offset 0xf4
10 | # The first non-metadata element starts at 0x14c, ends at 0x15e
11 |
12 | exports.DecoderTest =
13 | "test first chuck does not contain TransferSyntaxUID": (test) ->
14 | test.expect 1
15 |
16 | failure_detected = false
17 |
18 | file = fs.createReadStream "test/charsettests/SCSARAB.gz"
19 | gunzip = zlib.createGunzip()
20 | file.pipe gunzip
21 |
22 | decoder = decoder { read_header: true }
23 | decoder.on 'data', (data) =>
24 | # console.log data
25 | return
26 |
27 | decoder.once 'error', (err) =>
28 | #console.log 'decoder error'
29 | failure_detected = true
30 | test.ok false, 'Decoder should not throw an error.'
31 | test.done()
32 | gunzip.end()
33 | file.destroy()
34 |
35 | decoder.on 'end', () =>
36 | console.log 'decoder end'
37 | test.ok true
38 | test.done()
39 |
40 | first_chunk_handled = false
41 | gunzip.on 'readable', (foo) =>
42 | if failure_detected
43 | return
44 |
45 | if not first_chunk_handled
46 | first_chunk = gunzip.read 0xf4
47 | if first_chunk
48 | decoder.write first_chunk
49 | first_chunk_handled = true
50 |
51 | while (chunk = gunzip.read())
52 | decoder.write chunk
53 |
54 | gunzip.on 'end', () =>
55 | console.log 'gunzip ended'
56 | decoder.end()
57 |
58 |
59 |
--------------------------------------------------------------------------------
/test/test_dicom2json.coffee:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env coffee
2 | #
3 | # test the dicom decode / json pipeline
4 | fs = require "fs"
5 | zlib = require "zlib"
6 |
7 | tags = require "../lib/tags"
8 | decoder = require "../lib/decoder"
9 | json = require "../lib/json"
10 |
11 | exports.Dicom2JsonTest =
12 | "test defined length sequences/items": (test) ->
13 | test.expect 4
14 | json.gunzip2json "test/deflate_tests/report.gz", (err, data) ->
15 | if err
16 | console.log "Error:", err.stack
17 | test.equal 1111, json.get_value(data, tags.ConceptNameCodeSequence, 0, tags.CodeValue)
18 | test.equal "Consultation Report", json.get_value(data, tags.ConceptNameCodeSequence, 0, tags.CodeMeaning)
19 |
20 | # ContentSequence last item of 5 has BulkDataURI TextValue (among others)
21 | # offset 5562, length 268
22 | bd_elem = json.get_element(data, tags.ContentSequence, 4, tags.TextValue)
23 | bd = bd_elem.BulkDataURI
24 | test.ok /offset=5562\&/.test(bd)
25 | test.ok /length=268$/.test(bd)
26 | test.done()
27 |
28 | "test undefined length sequences/items": (test) ->
29 | test.expect 4
30 | json.gunzip2json "test/report_undef_len.gz", (err, data) ->
31 | if err
32 | console.log "Error:", err.stack
33 | test.equal 1111, json.get_value(data, tags.ConceptNameCodeSequence, 0, tags.CodeValue)
34 | test.equal "Consultation Report", json.get_value(data, tags.ConceptNameCodeSequence, 0, tags.CodeMeaning)
35 | #
36 | # ContentSequence last item of 5 has BulkDataURI TextValue (among others)
37 | # offset 6110, length 268
38 | bd_elem = json.get_element(data, tags.ContentSequence, 4, tags.TextValue)
39 | bd = bd_elem.BulkDataURI
40 | test.ok /offset=6110\&/.test(bd)
41 | test.ok /length=268$/.test(bd)
42 | test.done()
43 |
44 | "test implicit vr little endian": (test) ->
45 | test.expect 4
46 | json.gunzip2json "test/report_default_ts.gz", (err, data) ->
47 | if err
48 | console.log "Error:", err.stack
49 | test.equal 1111, json.get_value(data, tags.ConceptNameCodeSequence, 0, tags.CodeValue)
50 | test.equal "Consultation Report", json.get_value(data, tags.ConceptNameCodeSequence, 0, tags.CodeMeaning)
51 | #
52 | # ContentSequence last item of 5 has BulkDataURI TextValue (among others)
53 | # offset 5936, length 268
54 | bd_elem = json.get_element(data, tags.ContentSequence, 4, tags.TextValue)
55 | bd = bd_elem.BulkDataURI
56 | test.ok /offset=5936\&/.test(bd)
57 | test.ok /length=268$/.test(bd)
58 | test.done()
59 |
60 | "test greek charset (isoir126)": (test) ->
61 | test.expect 3
62 | json.gunzip2json "test/charsettests/SCSGREEK.gz", (err, data) ->
63 | if err
64 | console.log "Error:", err.stack
65 | test.equal "Διονυσιος", json.get_value(data, tags.PatientName).Alphabetic
66 | # PixelData native offset 866, length 262144
67 | bd_elem = json.get_element(data, tags.PixelData)
68 | bd = bd_elem.BulkDataURI
69 | test.ok /offset=866\&/.test(bd)
70 | test.ok /length=262144$/.test(bd)
71 | test.done()
72 |
73 | "test utf8 charset": (test) ->
74 | test.expect 4
75 | json.gunzip2json "test/charsettests/SCSX1.gz", (err, data) ->
76 | if err
77 | console.log "Error:", err.stack
78 | test.equal "Wang^XiaoDong", json.get_value(data, tags.PatientName).Alphabetic
79 | test.equal "王^小東", json.get_value(data, tags.PatientName).Ideographic
80 |
81 | # PixelData native offset 886, length 262144
82 | bd_elem = json.get_element(data, tags.PixelData)
83 | bd = bd_elem.BulkDataURI
84 | test.ok /offset=886\&/.test(bd)
85 | test.ok /length=262144$/.test(bd)
86 | test.done()
87 |
88 | "test gb18030 charset": (test) ->
89 | test.expect 4
90 | json.gunzip2json "test/charsettests/SCSX2.gz", (err, data) ->
91 | if err
92 | console.log "Error:", err.stack
93 | test.equal "Wang^XiaoDong", json.get_value(data, tags.PatientName).Alphabetic
94 | test.equal "王^小东", json.get_value(data, tags.PatientName).Ideographic
95 |
96 | # PixelData native offset 880, length 262144
97 | bd_elem = json.get_element(data, tags.PixelData)
98 | bd = bd_elem.BulkDataURI
99 | test.ok /offset=880\&/.test(bd)
100 | test.ok /length=262144$/.test(bd)
101 | test.done()
102 |
103 | "test quotes in json and encaps pixeldata": (test) ->
104 | test.expect 3
105 | json.gunzip2json {filename: "test/quotes_jpls.dcm.gz", bulkdata_uri: "\"D'Artagnan\""}, (err, data) ->
106 | if err
107 | console.log "Error:", err.stack
108 | test.deepEqual {Alphabetic: "\"D'Artagnan\"^asdf"}, json.get_value(data, tags.PatientName)
109 |
110 | # PixelData encapsulated fragment 1 offset 918, length 26272
111 | bd_elem = json.get_element(data, tags.PixelData)
112 | bd = bd_elem.DataFragment[1].BulkDataURI
113 | test.ok /offset=918\&/.test(bd)
114 | test.ok /length=26272$/.test(bd)
115 | test.done()
116 |
117 | "test inlinebinary ob": (test) ->
118 | test.expect 2
119 | json.gunzip2json "test/deflate_tests/report.gz", (err, data) ->
120 | if err
121 | console.log "Error:", err.stack
122 | elem = json.get_element(data, tags.FileMetaInformationVersion)
123 | test.ok not elem.Value?
124 | test.ok elem.InlineBinary
125 | test.done()
126 |
127 |
128 | "test decoding big endian": (test) ->
129 | test.expect 2
130 | json.gunzip2json "test/scsarab_be.gz", (err, data) ->
131 | if err
132 | console.log "Error:", err.stack
133 | test.equal 512, json.get_value(data, tags.Rows)
134 | test.equal 512, json.get_value(data, tags.Columns)
135 | test.done()
136 |
137 |
138 | "test decoding implicit vr with undefined length private sequence": (test) ->
139 | test.expect 4
140 | json.gunzip2json "test/private_report.gz", (err, data) ->
141 | if err
142 | console.log "Error:", err.stack
143 | elem = tags.for_tag(0x0041A730)
144 | test.equal 'UN', elem.vr
145 | priv_cont_sq = json.get_element(data, elem)
146 | # console.log "priv_cont_sq", priv_cont_sq
147 | test.ok priv_cont_sq
148 | test.equal 'SQ', priv_cont_sq.vr
149 | test.equal 5, priv_cont_sq.Value.length
150 | test.done()
151 |
152 | "test decoding implicit vr pixeldata": (test) ->
153 | test.expect 4
154 | json.gunzip2json "test/hebrew_ivrle.gz", (err, data) ->
155 | if err
156 | console.log "Error:", err.stack
157 |
158 | test.equal "שרון^דבורה", json.get_value(data, tags.PatientName).Alphabetic
159 |
160 | # PixelData native offset 848, length 262144
161 | bd_elem = json.get_element(data, tags.PixelData)
162 | bd = bd_elem.BulkDataURI
163 | test.ok /offset=848\&/.test(bd)
164 | test.ok /length=262144$/.test(bd)
165 |
166 | test.equal 'OW', bd_elem.vr
167 | test.done()
168 |
169 |
170 |
171 | EMPTY_AT_EOF = """CAAFAENTCgBJU09fSVIgMTkyCAAgAERBAAAIADAAVE0AAAgAUABTSAAACABSAENTBgBTVFVEWSAI
172 | AGEAQ1MAAAgAYgBVSQAACACQAFBOAAAIADAQTE8AABAAEABQTgAAEAAgAExPAAAQACEATE8AABAA
173 | MABEQQAAEAAyAFRNAAAQAEAAQ1MAACAADQBVSQAAIAAQAFNIAAAgAAASSVMAACAAAhJJUwAAIAAE
174 | EklTAAAgAAYSSVMAACAACBJJUwAA"""
175 |
176 | exports.EmptyElementAtBufferEndTest =
177 | "test empty element at buffer end": (test) ->
178 | test.expect 4
179 | _buff = new Buffer(EMPTY_AT_EOF, "base64")
180 | _dec = json.decoder2json transfer_syntax: 'ExplicitVRLittleEndian', (err, _json) ->
181 | throw err if err
182 | console.log "RESULT:", _json
183 | test.ok _json, "Test that we got a djm result"
184 | # test that we parsed a valueless element
185 | test.deepEqual [], json.get_values(_json, tags.NumberOfStudyRelatedInstances)
186 | test.equal null, json.get_value(_json, tags.NumberOfStudyRelatedInstances)
187 | test.equal null, json.get_value(_json, tags.PatientName)
188 | test.done()
189 | _dec.end(_buff)
--------------------------------------------------------------------------------
/test/test_json2json.coffee:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env /coffee
2 |
3 | json = require "../lib/json"
4 |
5 | DATA = {
6 | "00100020": {vr: "LO", Value: ["007"]},
7 | "00100021": {vr: "LO", Value: ["MI6"]},
8 | "00101002": {vr: "SQ", Value: [{
9 | "00100020": {vr: "LO", Value: ["0815"]},
10 | "00100021": {vr: "LO", Value: ["BND"]}}
11 | ]}}
12 |
13 | SIMPLE = {
14 | "PatientID": "007",
15 | "IssuerOfPatientID": "MI6",
16 | "OtherPatientIDsSequence": [{
17 | "PatientID": "0815",
18 | "IssuerOfPatientID": "BND"}]}
19 |
20 | exports.Json2JsonTest =
21 | "test simplified json model": (test) ->
22 | test.expect 1
23 |
24 | callback = (err, data) ->
25 | if err
26 | console.error "Error:", err
27 | console.error "stack trace:", err.stack
28 | throw err
29 | test.deepEqual DATA, data
30 | test.done()
31 |
32 | source = new json.JsonSource(SIMPLE)
33 | .on 'error', callback
34 | .pipe new json.JsonEncoder({})
35 | .on 'error', callback
36 | .pipe new json.JsonSink(callback)
37 |
38 |
--------------------------------------------------------------------------------
/test/test_metainfo.coffee:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env coffee
2 | #
3 | # test parsing metainfo with dodgy metainfo group length
4 | #
5 |
6 | fs = require "fs"
7 | zlib = require "zlib"
8 |
9 | tags = require "../lib/tags"
10 | decoder = require "../lib/decoder"
11 | json = require "../lib/json"
12 |
13 | exports.MetainfoTest =
14 | "test decoding file": (test) ->
15 | test.expect 3
16 | json.gunzip2json "test/metainfo_tests/dodgy_metainfo_length.dcm.gz", (err, data) ->
17 | if err
18 | console.log "Error:", err.stack
19 | # test that file could be read - it used to fail before tags.ImplementationVersionName
20 | test.equal "E120", json.get_value(data, tags.ImplementationVersionName)
21 | test.equal "ALL_AbdomenSAFIRE", json.get_value(data, tags.ProtocolName)
22 |
23 | # test patient ids sequence has length one with one empty object
24 | id_seq = json.get_values(data, tags.OtherPatientIDsSequence)
25 | test.deepEqual [{}], id_seq
26 |
27 | test.done()
28 |
--------------------------------------------------------------------------------
/test/test_pdu.coffee:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env /coffee
2 |
3 | pdu = require "../lib/pdu"
4 |
5 | ECHO_RAW =[ 1, 0, 0, 0, 0, 197, 0, 1, 0, 0, 84, 69, 83,
6 | 84, 77, 69, 32, 32, 32, 32, 32, 32, 32, 32, 32,
7 | 32, 68, 67, 77, 69, 67, 72, 79, 32, 32, 32, 32,
8 | 32, 32, 32, 32, 32, 0, 0, 0, 0, 0, 0, 0, 0,
9 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
10 | 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 21, 49, 46,
11 | 50, 46, 56, 52, 48, 46, 49, 48, 48, 48, 56, 46,
12 | 51, 46, 49, 46, 49, 46, 49, 32, 0, 0, 46, 1,
13 | 0, 0, 0, 48, 0, 0, 17, 49, 46, 50, 46, 56, 52,
14 | 48, 46, 49, 48, 48, 48, 56, 46, 49, 46, 49,
15 | 64, 0, 0, 17, 49, 46, 50, 46, 56, 52, 48, 46,
16 | 49, 48, 48, 48, 56, 46, 49, 46, 50, 80, 0,
17 | 0, 50, 81, 0, 0, 4, 0, 0, 64, 0, 82, 0, 0, 15,
18 | 49, 46, 50, 46, 52, 48, 46, 48, 46, 49,
19 | 51, 46, 49, 46, 49, 83, 0, 0, 4, 0, 0, 0, 0, 85,
20 | 0, 0, 11, 100, 99, 109, 52, 99, 104, 101,
21 | 45, 50, 46, 48 ]
22 | ECHO_PDU = {
23 | "name": "association_rq",
24 | "called_aet_title": "TESTME",
25 | "calling_aet_title": "DCMECHO",
26 | "application_context": "1.2.840.10008.3.1.1.1"
27 | "presentation_context": [
28 | "id": 1,
29 | "abstract_syntax": "1.2.840.10008.1.1",
30 | "transfer_syntax": ["1.2.840.10008.1.2"]]
31 | "user_information":
32 | "maximum_length": 16384
33 | "implementation_class_uid": "1.2.40.0.13.1.1"
34 | "asynchronous_operations_window":
35 | "maximum_number_operations_invoked": 0
36 | "maximum_number_operations_performed": 0
37 | "implementation_version_name": "dcm4che-2.0"
38 | }
39 |
40 | exports.PDUTest =
41 | "test decoding echo association request": (test) ->
42 | test.expect 1
43 |
44 | _decoder = new pdu.PDUDecoder()
45 | _decoder.on 'data', (pdu) ->
46 | test.deepEqual ECHO_PDU, pdu.to_json()
47 | test.done()
48 | _decoder.write(new Buffer(ECHO_RAW))
49 |
50 | "test encoding echo association request": (test) ->
51 | test.expect 1
52 |
53 | _encoder = new pdu.PDUEncoder()
54 | _encoder.on 'data', (buff) ->
55 | _decoder = new pdu.PDUDecoder()
56 | _decoder.on 'data', (pdu) ->
57 | test.deepEqual ECHO_PDU, pdu.to_json()
58 | test.done()
59 | _decoder.write buff
60 | _encoder.write new pdu.PDUAssociateRq(ECHO_PDU)
--------------------------------------------------------------------------------
/test/test_readbuffer.coffee:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env coffee
2 | readbuffer = require("../lib/readbuffer")
3 |
4 | exports.ReadBufferTest =
5 | "test push/consume": (test) ->
6 | test.expect 10
7 | rb = readbuffer()
8 | rb.push(new Buffer("0123"))
9 | rb.push(new Buffer("4567"))
10 | rb.push(new Buffer("89AB"))
11 | rb.push(new Buffer("CDEF"))
12 | rb.push(new Buffer("0123"))
13 | rb.push(new Buffer("4567"))
14 | rb.push(new Buffer("89AB"))
15 | rb.push(new Buffer("CDEF"))
16 | # one buffer beginning / middle
17 | test.equal(rb.consume(2).toString(), "01")
18 | # one buffer middle / end
19 | test.equal(rb.consume(2).toString(), "23")
20 | # one buffer beginning / end
21 | test.equal(rb.consume(4).toString(), "4567")
22 | # mult buffer, beginning / middle
23 | test.equal(rb.consume(6).toString(), "89ABCD")
24 | # mult buffer, middle / end
25 | test.equal(rb.consume(6).toString(), "EF0123")
26 | # mult buffer, beginning / end
27 | test.equal(rb.consume(8).toString(), "456789AB")
28 |
29 | test.equal(rb.length, 4)
30 | test.ok(rb.has(4))
31 | test.ok(rb.has(0))
32 | test.ok(!rb.has(5))
33 |
34 | test.done()
35 |
36 | "test indexOf": (test) ->
37 | test.expect(10)
38 | rb = readbuffer()
39 | rb.push(new Buffer("asdf"))
40 | rb.push(new Buffer("jkl"))
41 | test.equal(-1, rb.indexOf('\n'))
42 | test.equal(0, rb.indexOf('a'))
43 | test.equal(1, rb.indexOf('s'))
44 | test.equal(3, rb.indexOf('f'))
45 | test.equal(4, rb.indexOf('j'))
46 | rb.consume(2)
47 | test.equal(-1, rb.indexOf('a'))
48 | test.equal(-1, rb.indexOf('s'))
49 | test.equal(0, rb.indexOf('d'))
50 | test.equal(1, rb.indexOf('f'))
51 | test.equal(2, rb.indexOf('j'))
52 | test.done()
53 |
54 |
--------------------------------------------------------------------------------
/test/test_tags.coffee:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env coffee
2 |
3 | tags = require "../lib/tags"
4 |
5 | exports.TagsTest =
6 | "test for_tag": (test) ->
7 | test.expect 7
8 | el1 = tags.for_tag(0x00100010)
9 | el2 = tags.for_tag('PatientName')
10 | el3 = tags.PatientName
11 | el4 = tags.for_tag(tags.PatientName)
12 | el5 = tags.for_tag('00100010')
13 | test.equal el1, el2
14 | test.equal el1, el3
15 | test.equal el1, el4
16 | test.equal el1, el5
17 | test.equal 'PatientName', el1.name
18 | test.equal 0x00100010, el1.tag
19 | test.equal 'PN', el1.vr
20 | test.done()
21 |
22 | "test private tag": (test) ->
23 | test.expect 4
24 | el = tags.for_tag(0x00090010)
25 | test.equal 'LO', el.vr
26 | el = tags.for_tag(0x000900FF)
27 | test.equal 'LO', el.vr
28 | el = tags.for_tag(0x00090100)
29 | test.equal 'UN', el.vr
30 | # private tags are only allowed in groups 9 and up
31 | # so there is no private tag creator of type LO
32 | el = tags.for_tag(0x00070010)
33 | test.equal 'UN', el.vr
34 | test.done()
35 |
36 | "test group length tag": (test) ->
37 | test.expect 1
38 | el = tags.for_tag(0x00090000)
39 | test.equal 'UL', el.vr
40 | test.done()
41 |
42 | "test masked": (test) ->
43 | test.expect 4
44 | el = tags.for_tag(0x60120010)
45 | test.equal el.tag, 0x60120010
46 | test.equal 'US', el.vr
47 | test.equal 'OverlayRows', el.name
48 |
49 | el = tags.for_tag('OverlayRows')
50 | test.equal 0x60000010, el.tag
51 | test.done()
52 |
53 | "test masked (unsupported)": (test) ->
54 | test.expect 1
55 | # we only support overlays/curves not the even older repeating stuff
56 | el = tags.for_tag(0x10002220)
57 | test.equal 'UN', el.vr
58 | test.done()
59 |
--------------------------------------------------------------------------------
/test/test_uids.coffee:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env coffee
2 |
3 | uids = require "../lib/uids"
4 | vrs = require "../lib/vrs"
5 |
6 | exports.TagsTest =
7 | "test default ts": (test) ->
8 | test.expect 4
9 | dts = uids.ImplicitVRLittleEndian
10 | test.equal uids.ImplicitVRLittleEndian.uid, dts.uid
11 | test.equal 'ImplicitVRLittleEndian', dts.name
12 | test.deepEqual vrs.LITTLE_ENDIAN, dts.endianess()
13 | test.equal false, dts.is_explicit()
14 | test.done()
15 |
16 | "test for_uid": (test) ->
17 | test.expect 2
18 | dts = uids.for_uid(uids.ImplicitVRLittleEndian.uid)
19 | test.equal uids.ImplicitVRLittleEndian, dts
20 | dts = uids.for_uid('ImplicitVRLittleEndian')
21 | test.equal uids.ImplicitVRLittleEndian, dts
22 | test.done()
23 |
--------------------------------------------------------------------------------
/test/test_vrs.coffee:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env coffee
2 |
3 | vrs = require "../lib/vrs"
4 |
5 | b_1704 = new Buffer([0x17, 0x04])
6 | b_deadbeef = new Buffer([0xDE, 0xAD, 0xBE, 0xEF])
7 |
8 | exports.LittleEndianTest =
9 | "test unpacking": (test) ->
10 | test.expect 2
11 | test.equal 0x0417, vrs.LITTLE_ENDIAN.unpack_uint16(b_1704)
12 | test.deepEqual [0xADDE, 0xEFBE], vrs.LITTLE_ENDIAN.unpack_uint16s(b_deadbeef, 2)
13 | test.done()
14 | "test packing": (test) ->
15 | test.expect 1
16 | test.deepEqual b_deadbeef, vrs.LITTLE_ENDIAN.pack_uint16s([0xADDE, 0xEFBE])
17 | test.done()
18 |
19 | exports.BigEndianTest =
20 | "test unpacking": (test) ->
21 | test.expect 2
22 | test.equal 0x1704, vrs.BIG_ENDIAN.unpack_uint16(b_1704)
23 | test.deepEqual [0xDEAD, 0xBEEF], vrs.BIG_ENDIAN.unpack_uint16s(b_deadbeef, 2)
24 | test.done()
25 | "test packing": (test) ->
26 | test.expect 1
27 | test.deepEqual b_deadbeef, vrs.BIG_ENDIAN.pack_uint16s([0xDEAD, 0xBEEF])
28 | test.done()
29 |
30 | DEF_CTX = new vrs.Context({}, {})
31 |
32 | exports.ATTest =
33 | "test encoding": (test) ->
34 | test.expect 2
35 | at = new vrs.AT(DEF_CTX, null, [0x00100012, 0x0020001D])
36 | expect = new Buffer([0x10, 0x00, 0x12, 0x00, 0x20, 0x00, 0x1D, 0x00])
37 | test.deepEqual expect, at.buffer
38 | at = new vrs.AT(DEF_CTX, null, [0xfffee000])
39 | expect = new Buffer([0xfe, 0xff, 0x00, 0xe0])
40 | test.deepEqual expect, at.buffer
41 | test.done()
42 | "test decoding": (test) ->
43 | test.expect 1
44 | input = new Buffer([0x10, 0x00, 0x12, 0x00, 0x20, 0x00, 0x1D, 0x00])
45 | at = new vrs.AT(DEF_CTX, input)
46 | test.deepEqual [0x00100012, 0x0020001D], at.values()
47 | test.done()
48 |
49 |
50 | exports.FDTest =
51 | "test doubles": (test) ->
52 | test.expect 1
53 | _vals = [0.5, 1000.0]
54 | fd = new vrs.FD(DEF_CTX, null, _vals)
55 | # this relies on the fact that the values are not stored
56 | # converting to buffer and converting back should be the same
57 | test.deepEqual _vals, fd.values()
58 | test.done()
59 |
60 |
61 | exports.FLTest =
62 | "test floats": (test) ->
63 | test.expect 1
64 | _vals = [0.5, 1000.0]
65 | fl = new vrs.FL(DEF_CTX, null, _vals)
66 | # this relies on the fact that the values are not stored
67 | # converting to buffer and converting back should be the same
68 | test.deepEqual _vals, fl.values()
69 | test.done()
70 |
71 | exports.SLTest =
72 | "test encode": (test) ->
73 | test.expect 1
74 | sl = new vrs.SL(DEF_CTX, null, [0x01020304, 0x05060708])
75 | test.deepEqual sl.buffer, new Buffer([4..1].concat([8..5]))
76 | test.done()
77 | "test decode": (test) ->
78 | test.expect 1
79 | sl = new vrs.SL(DEF_CTX, new Buffer([4..1].concat([8..5])))
80 | test.deepEqual [0x01020304, 0x05060708], sl.values()
81 | test.done()
82 |
83 | exports.SSTest =
84 | "test encode": (test) ->
85 | test.expect 1
86 | ss = new vrs.SS(DEF_CTX, null, [0x0102, 0x0506])
87 | test.deepEqual ss.buffer, new Buffer([2..1].concat([6..5]))
88 | test.done()
89 | "test decode": (test) ->
90 | test.expect 1
91 | ss = new vrs.SS(DEF_CTX, new Buffer([2..1].concat([6..5])))
92 | test.deepEqual [0x0102, 0x0506], ss.values()
93 | test.done()
94 |
95 | exports.ULTest =
96 | "test encode": (test) ->
97 | test.expect 1
98 | ul = new vrs.UL(DEF_CTX, null, [0x01020304, 0x05060708])
99 | test.deepEqual ul.buffer, new Buffer([4..1].concat([8..5]))
100 | test.done()
101 | "test decode": (test) ->
102 | test.expect 1
103 | ul = new vrs.UL(DEF_CTX, new Buffer([4..1].concat([8..5])))
104 | test.deepEqual [0x01020304, 0x05060708], ul.values()
105 | test.done()
106 |
107 | exports.USTest =
108 | "test encode": (test) ->
109 | test.expect 1
110 | us = new vrs.US(DEF_CTX, null, [0x0102, 0x0506])
111 | test.deepEqual us.buffer, new Buffer([2..1].concat([6..5]))
112 | test.done()
113 | "test decode": (test) ->
114 | test.expect 1
115 | us = new vrs.US(DEF_CTX, new Buffer([2..1].concat([6..5])))
116 | test.deepEqual [0x0102, 0x0506], us.values()
117 | test.done()
118 |
119 |
120 | #
121 | # for string tests:
122 | #
123 | # * multi-values
124 | # * no multi-values, e.g. LT with backslashes in there
125 | # * space-padding
126 | # * zero-padding (UI)
127 |
128 | exports.StringMultiValuesTest =
129 | "test multivalue": (test) ->
130 | test.expect 2
131 | lo = new vrs.LO(DEF_CTX, null, ["Juergen", "Gmeiner"])
132 | test.deepEqual new Buffer("Juergen\\Gmeiner ", "binary"), lo.buffer
133 | test.deepEqual ["Juergen", "Gmeiner"], lo.values()
134 | test.done()
135 | "test no multivalue": (test) ->
136 | test.expect 2
137 | st = new vrs.ST(DEF_CTX, null, ["Some text with \\ in there"])
138 | test.deepEqual new Buffer("Some text with \\ in there ", "binary"), st.buffer
139 | test.deepEqual ["Some text with \\ in there"], st.values()
140 | test.done()
141 |
142 | exports.StringPaddingTest =
143 | "test space padding": (test) ->
144 | test.expect 2
145 | lo = new vrs.LO(DEF_CTX, null, ["ASD"])
146 | test.deepEqual new Buffer("ASD ", "binary"), lo.buffer
147 | test.deepEqual ["ASD"], lo.values()
148 | test.done()
149 | "test zerobyte padding": (test) ->
150 | test.expect 2
151 | lo = new vrs.UI(DEF_CTX, null, ["1.2"])
152 | test.deepEqual new Buffer("1.2\x00", "binary"), lo.buffer
153 | test.deepEqual ["1.2"], lo.values()
154 | test.done()
155 |
156 | #
157 | # binary vrs should produce base64 encoded values ...
158 | #
159 | exports.OBTest =
160 | "test base64 values": (test) ->
161 | test.expect 1
162 | ob = new vrs.OB(DEF_CTX, new Buffer("asdf"))
163 | test.deepEqual ["YXNkZg=="], ob.values()
164 | test.done()
165 |
166 | #
167 | # binary vrs should produce base64 encoded values ...
168 | #
169 | exports.ODTest =
170 | "test base64 values": (test) ->
171 | test.expect 1
172 | od = new vrs.OD(DEF_CTX, new Buffer("asdfasdf"))
173 | test.deepEqual ["YXNkZmFzZGY="], od.values()
174 | test.done()
175 |
176 | exports.PNTest =
177 | "test single component group value": (test) ->
178 | test.expect 1
179 | pn = new vrs.PN(DEF_CTX, new Buffer("group1"))
180 | test.deepEqual [{Alphabetic: "group1"}], pn.values()
181 | test.done()
182 | "test all component groups values": (test) ->
183 | test.expect 1
184 | pn = new vrs.PN(DEF_CTX, new Buffer("group1=group2=group3=x=y"))
185 | test.deepEqual [{Alphabetic: "group1", Ideographic: "group2", Phonetic: "group3"}], pn.values()
186 | test.done()
187 | "test encode alphabetic": (test) ->
188 | test.expect 1
189 | pn = new vrs.PN(DEF_CTX, null, [{Alphabetic: "group1"}])
190 | test.deepEqual new Buffer("group1"), pn.buffer
191 | test.done()
192 | "test encode all groups": (test) ->
193 | test.expect 1
194 | pn = new vrs.PN(DEF_CTX, null, [{Alphabetic: "group1", Ideographic: "group2", Phonetic: "group3", Xtra1: "x", Xtra2: "y"}])
195 | test.deepEqual new Buffer("group1=group2=group3"), pn.buffer
196 | test.done()
197 |
198 | #
199 | # IS and DS are encoded as strings, but represent numbers.
200 | # They are numbers in the DICOM model.
201 | #
202 | exports.ISTest =
203 | "test values": (test) ->
204 | test.expect 3
205 | _is = new vrs.IS(DEF_CTX, new Buffer("1\\2"))
206 | _vals = _is.values()
207 | test.deepEqual [1,2], _vals
208 | test.equal 'number', typeof _vals[0]
209 | test.equal 'number', typeof _vals[1]
210 | test.done()
211 | "test encode": (test) ->
212 | test.expect 1
213 | _is = new vrs.IS(DEF_CTX, null, [1,2])
214 | test.deepEqual new Buffer("1\\2 "), _is.buffer
215 | test.done()
216 |
--------------------------------------------------------------------------------