├── .gitignore ├── icon.png ├── doc ├── screenshot.png └── screenshot-values.png ├── .gitattributes ├── .editorconfig ├── .vscodeignore ├── .vscode ├── tasks.json ├── settings.json └── launch.json ├── .eslintrc.yaml ├── webpack.config.js ├── tsconfig.json ├── LICENSE.md ├── src ├── modules.d.ts ├── decorate.ts ├── extension.ts ├── contentProvider.ts ├── vrDict.ts ├── hoverProvider.ts ├── encConverter.ts ├── test │ └── encConverter.test.ts ├── extractor.ts └── valueDict.ts ├── CHANGELOG.md ├── syntaxes └── dicom-dump.tmLanguage.json ├── README.md └── package.json /.gitignore: -------------------------------------------------------------------------------- 1 | out 2 | node_modules 3 | .vscode-test/ 4 | *.vsix 5 | -------------------------------------------------------------------------------- /icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/smikitky/vscode-dicom-dump/HEAD/icon.png -------------------------------------------------------------------------------- /doc/screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/smikitky/vscode-dicom-dump/HEAD/doc/screenshot.png -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Set default behavior to automatically normalize line endings. 2 | * text=auto 3 | 4 | -------------------------------------------------------------------------------- /doc/screenshot-values.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/smikitky/vscode-dicom-dump/HEAD/doc/screenshot-values.png -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | [*.{ts,js,json}] 2 | indent_style = space 3 | indent_size = 2 4 | end_of_line = lf 5 | 6 | [*] 7 | end_of_line = lf -------------------------------------------------------------------------------- /.vscodeignore: -------------------------------------------------------------------------------- 1 | .vscode/** 2 | .vscode-test/** 3 | out/test/** 4 | out/**/*.map 5 | src/** 6 | node_modules 7 | .gitignore 8 | tsconfig.json 9 | vsc-extension-quickstart.md 10 | .eslintrc.yaml 11 | webpack.config.js -------------------------------------------------------------------------------- /.vscode/tasks.json: -------------------------------------------------------------------------------- 1 | // See https://go.microsoft.com/fwlink/?LinkId=733558 2 | // for the documentation about the tasks.json format 3 | { 4 | "version": "2.0.0", 5 | "tasks": [ 6 | { 7 | "type": "npm", 8 | "script": "watch", 9 | "problemMatcher": "$tsc-watch", 10 | "isBackground": true, 11 | "presentation": { 12 | "reveal": "never" 13 | }, 14 | "group": { 15 | "kind": "build", 16 | "isDefault": true 17 | } 18 | } 19 | ] 20 | } 21 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | // Place your settings in this file to overwrite default and user settings. 2 | { 3 | "files.exclude": { 4 | "out": false // set this to true to hide the "out" folder with the compiled JS files 5 | }, 6 | "search.exclude": { 7 | "out": true // set this to false to include "out" folder in search results 8 | }, 9 | // Turn off tsc task auto detection since we have the necessary tasks as npm scripts 10 | "typescript.tsc.autoDetect": "off", 11 | "editor.formatOnSave": true 12 | } 13 | -------------------------------------------------------------------------------- /.eslintrc.yaml: -------------------------------------------------------------------------------- 1 | extends: 2 | - eslint:recommended 3 | - plugin:@typescript-eslint/recommended 4 | parserOptions: 5 | sourceType: module 6 | env: 7 | es6: true 8 | node: true 9 | mocha: true 10 | parser: '@typescript-eslint/parser' 11 | plugins: ['@typescript-eslint'] 12 | rules: 13 | no-control-regex: off 14 | '@typescript-eslint/no-non-null-assertion': off 15 | '@typescript-eslint/no-explicit-any': off 16 | overrides: 17 | files: ['webpack.config.js'] 18 | rules: 19 | '@typescript-eslint/no-var-requires': off 20 | -------------------------------------------------------------------------------- /webpack.config.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | const path = require('path'); 3 | 4 | module.exports = { 5 | target: 'node', 6 | entry: './src/extension.ts', 7 | output: { 8 | path: path.resolve(__dirname, 'out'), 9 | filename: 'extension.js', 10 | libraryTarget: 'commonjs2', 11 | devtoolModuleFilenameTemplate: '../[resource-path]' 12 | }, 13 | devtool: 'source-map', 14 | externals: { vscode: 'commonjs vscode' }, 15 | resolve: { extensions: ['.ts', '.js'] }, 16 | module: { 17 | rules: [ 18 | { 19 | test: /\.ts$/, 20 | exclude: /node_modules/, 21 | use: [{ loader: 'ts-loader' }] 22 | } 23 | ] 24 | } 25 | }; 26 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "module": "commonjs", 4 | "target": "es2019", 5 | "outDir": "out", 6 | "lib": ["es2019"], 7 | "sourceMap": true, 8 | "rootDir": "src", 9 | /* Strict Type-Checking Option */ 10 | "strict": true /* enable all strict type-checking options */, 11 | /* Additional Checks */ 12 | "noUnusedLocals": true /* Report errors on unused locals. */ 13 | // "noImplicitReturns": true, /* Report error when not all code paths in function return a value. */ 14 | // "noFallthroughCasesInSwitch": true, /* Report errors for fallthrough cases in switch statement. */ 15 | // "noUnusedParameters": true, /* Report errors on unused parameters. */ 16 | }, 17 | "exclude": ["node_modules", ".vscode-test"] 18 | } 19 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | # The MIT License 2 | 3 | Copyright 2018 Soichiro Miki 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 6 | 7 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 8 | 9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 10 | -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | // A launch configuration that compiles the extension and then opens it inside a new window 2 | // Use IntelliSense to learn about possible attributes. 3 | // Hover to view descriptions of existing attributes. 4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 5 | { 6 | "version": "0.2.0", 7 | "configurations": [ 8 | { 9 | "name": "Extension", 10 | "type": "extensionHost", 11 | "request": "launch", 12 | "runtimeExecutable": "${execPath}", 13 | "args": ["--extensionDevelopmentPath=${workspaceFolder}"], 14 | "outFiles": ["${workspaceFolder}/out/**/*.js"], 15 | "preLaunchTask": "npm: watch" 16 | }, 17 | { 18 | "name": "Extension Tests", 19 | "type": "extensionHost", 20 | "request": "launch", 21 | "runtimeExecutable": "${execPath}", 22 | "args": [ 23 | "--extensionDevelopmentPath=${workspaceFolder}", 24 | "--extensionTestsPath=${workspaceFolder}/out/test" 25 | ], 26 | "outFiles": ["${workspaceFolder}/out/test/**/*.js"], 27 | "preLaunchTask": "npm: watch" 28 | } 29 | ] 30 | } 31 | -------------------------------------------------------------------------------- /src/modules.d.ts: -------------------------------------------------------------------------------- 1 | declare module 'dicom-parser' { 2 | export interface Element { 3 | tag: string; 4 | vr: string; 5 | dataOffset: number; 6 | length: number; 7 | items?: { dataSet: DataSet }[]; 8 | fragments?: any; 9 | } 10 | 11 | interface NumberAccessor { 12 | (key: string, index?: number): number | undefined; 13 | } 14 | 15 | export interface DataSet { 16 | elements: { [key: string]: Element }; 17 | float: NumberAccessor; 18 | double: NumberAccessor; 19 | uint32: NumberAccessor; 20 | int32: NumberAccessor; 21 | uint16: NumberAccessor; 22 | int16: NumberAccessor; 23 | string: (key: string) => string | undefined; 24 | byteArray: Uint8Array; 25 | } 26 | 27 | export function parseDicom(byteArray: Uint8Array): DataSet; 28 | } 29 | 30 | declare module 'dicom-data-dictionary' { 31 | export interface TagInfo { 32 | vr: string; 33 | // vm: string; 34 | name: string; 35 | } 36 | 37 | export interface DicomDataElements { 38 | [tag: string]: TagInfo | undefined; 39 | } 40 | export const standardDataElements: DicomDataElements; 41 | } 42 | 43 | declare module 'jconv'; 44 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Change Log 2 | 3 | ## 1.4.0 (2020-06-23) 4 | 5 | - Increased the number of tags that provide hovers/decorations. 6 | - Minor bugfixes. 7 | 8 | ## 1.3.1 (2020-06-22) 9 | 10 | - Decorations are now enclosed in parentheses. 11 | - Introduced webpack to bundle the modules. 12 | 13 | ## 1.3.0 (2020-06-22) 14 | 15 | - Implemented inline decorations. 16 | - Added `alwaysShowMenu` option. 17 | - Added support for FileSystemProvider. You can now load DICOM files via [SSH FS](https://marketplace.visualstudio.com/items?itemName=Kelvin.vscode-sshfs), for example. 18 | 19 | ## 1.2.1 (2018-07-28) 20 | 21 | - Fixed: wrong keyword replacement on search 22 | 23 | ## 1.2.0 (2018-07-28) 24 | 25 | - Added new configuration `dicom.searches`. 26 | - Fixed bugs regarding ISO-2022-JP encoding (Japanese). 27 | - Added support for ISO-2022-KR encoding (Korean). 28 | 29 | ## 1.1.0 (2018-07-11) 30 | 31 | - Provides a hover for some DICOM keywords and reserved UIDs 32 | that are hard to remember, such as Transfer Syntax UID. 33 | - Support for (retired) Group Length element (gggg,0000). 34 | 35 | ## 1.0.3 (2018-07-08) 36 | 37 | - Documentation and changelog fix. 38 | 39 | ## 1.0.0 (2018-07-08) 40 | 41 | - Initial release. 42 | -------------------------------------------------------------------------------- /syntaxes/dicom-dump.tmLanguage.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": 3 | "https://raw.githubusercontent.com/martinring/tmlanguage/master/tmlanguage.json", 4 | "name": "DICOM Dump", 5 | "patterns": [ 6 | { 7 | "include": "#entry" 8 | } 9 | ], 10 | "repository": { 11 | "entry": { 12 | "patterns": [ 13 | { 14 | "match": 15 | "^\\s*(\\([0-9A-F]{4}\\,[0-9A-F]{4}\\)) (..(\\|..)*) (\\w+|\\?) ?= ?(.*)", 16 | "captures": { 17 | "1": { "patterns": [{ "include": "#tag" }] }, 18 | "2": { "patterns": [{ "include": "#vr" }] }, 19 | "4": { "patterns": [{ "include": "#name" }] }, 20 | "5": { "patterns": [{ "include": "#val" }] } 21 | } 22 | } 23 | ] 24 | }, 25 | "tag": { 26 | "patterns": [{ "name": "markup.bold.dicom-dump", "match": ".+" }] 27 | }, 28 | "vr": { 29 | "patterns": [{ "name": "storage.type.dicom-dump", "match": ".+" }] 30 | }, 31 | "name": { 32 | "patterns": [ 33 | { "name": "invalid.illegal.dicom-dump", "match": "\\?" }, 34 | { "name": "keyword.control.dicom-dump", "match": ".+" } 35 | ] 36 | }, 37 | "val": { 38 | "patterns": [ 39 | { "name": "comment.other.dicom-dump", "match": "\\<.+\\>" }, 40 | { "name": "entity.other.dicom-dump", "match": ".+" } 41 | ] 42 | } 43 | }, 44 | "scopeName": "text.dicom-dump" 45 | } 46 | -------------------------------------------------------------------------------- /src/decorate.ts: -------------------------------------------------------------------------------- 1 | import * as vscode from 'vscode'; 2 | 3 | /** 4 | * The annotation type used to show our decorations. 5 | */ 6 | const annotationDecoration = vscode.window.createTextEditorDecorationType({ 7 | after: { 8 | margin: '0 0 0 2em', 9 | color: new vscode.ThemeColor('editorCodeLens.foreground'), 10 | fontStyle: 'italic' 11 | } 12 | }); 13 | 14 | /** 15 | * Adds decorations to some DICOM keywords/codes. 16 | * @param editor The editor to add the decorations. 17 | */ 18 | const decorate = async (editor: vscode.TextEditor): Promise => { 19 | // The dictionary will be loaded lazily to minimize the performance impact 20 | const valueDict = (await import('./valueDict')).default; 21 | 22 | const decorations: vscode.DecorationOptions[] = []; 23 | const lineCount = editor.document.lineCount; 24 | 25 | for (let i = 0; i < lineCount; i++) { 26 | const line = editor.document.lineAt(i); 27 | const match = line.text.match(/(\s*)\(([0-9A-F]{4},[0-9A-F]{4})\)/); 28 | if (!match) continue; 29 | const tag = match[2]; 30 | if (!(tag in valueDict)) continue; 31 | const contentText = line.text 32 | .substr(line.text.indexOf(' = ') + 3) 33 | .split('\\') 34 | .map(item => valueDict[tag][item] ?? '') 35 | .join('\\'); 36 | if (contentText.length > 100 || contentText.length === 0) continue; 37 | decorations.push({ 38 | range: line.range, 39 | renderOptions: { after: { contentText: '(' + contentText + ')' } } 40 | }); 41 | } 42 | 43 | editor.setDecorations(annotationDecoration, decorations); 44 | }; 45 | 46 | export default decorate; 47 | -------------------------------------------------------------------------------- /src/extension.ts: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | import * as vscode from 'vscode'; 3 | import DicomHoverProvider from './hoverProvider'; 4 | import decorate from './decorate'; 5 | import * as qs from 'qs'; 6 | 7 | const scheme = 'dicom-dump'; 8 | 9 | export function activate(context: vscode.ExtensionContext): void { 10 | const r1 = vscode.workspace.registerTextDocumentContentProvider( 11 | scheme, 12 | new ContentProviderWrapper() 13 | ); 14 | 15 | const open = (mode: string) => { 16 | return async (uri: vscode.Uri) => { 17 | if (!(uri instanceof vscode.Uri)) return; 18 | const newUri = uri.with({ 19 | query: qs.stringify({ scheme: uri.scheme, mode }), 20 | scheme, 21 | path: uri.path + '.' + mode 22 | }); 23 | const doc = await vscode.workspace.openTextDocument(newUri); 24 | return vscode.window.showTextDocument(doc); 25 | }; 26 | }; 27 | 28 | const r2 = vscode.commands.registerCommand('dicom.showTags', open('dcmdump')); 29 | 30 | const r3 = vscode.commands.registerCommand('dicom.dumpAsJson', open('json')); 31 | 32 | const r4 = vscode.languages.registerHoverProvider( 33 | { language: 'dicom-dump' }, 34 | new DicomHoverProvider() 35 | ); 36 | 37 | // Decoration in text editor 38 | let activeEditor = vscode.window.activeTextEditor; 39 | const decorateActiveEditor = () => { 40 | if (activeEditor && activeEditor.document.languageId === 'dicom-dump') 41 | decorate(activeEditor); 42 | }; 43 | decorateActiveEditor(); 44 | const r5 = vscode.window.onDidChangeActiveTextEditor(editor => { 45 | activeEditor = editor; 46 | decorateActiveEditor(); 47 | }); 48 | 49 | // Dispose 50 | context.subscriptions.push(r1, r2, r3, r4, r5); 51 | } 52 | 53 | // export function deactivate() {} 54 | 55 | /** 56 | * Wraps the actual DicomContentProvider in order to load 57 | * the actual big module as late as possible. 58 | */ 59 | class ContentProviderWrapper implements vscode.TextDocumentContentProvider { 60 | private _provider!: vscode.TextDocumentContentProvider; 61 | 62 | public async provideTextDocumentContent( 63 | uri: vscode.Uri, 64 | token: vscode.CancellationToken 65 | ): Promise { 66 | if (!this._provider) { 67 | const DicomContentProvider = (await import('./contentProvider')).default; 68 | this._provider = new DicomContentProvider(); 69 | } 70 | return (await this._provider.provideTextDocumentContent( 71 | uri, 72 | token 73 | )) as string; 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /src/contentProvider.ts: -------------------------------------------------------------------------------- 1 | import * as vscode from 'vscode'; 2 | import * as qs from 'qs'; 3 | import * as parser from 'dicom-parser'; 4 | import { standardDataElements, DicomDataElements } from 'dicom-data-dictionary'; 5 | import { EncConverter, createEncConverter } from './encConverter'; 6 | import { buildTreeFromDataSet, ParsedElement } from './extractor'; 7 | 8 | /** 9 | * Transforms the parsed elements into indented text. 10 | * @param elements 11 | * @param depth 12 | */ 13 | function parsedElementsToString(elements: ParsedElement[], depth = 0): string { 14 | const lines = elements.map(e => { 15 | const indent = ' '.repeat(depth); 16 | const print = e.desc ? `<${e.desc}>` : e.text; 17 | const main = `${indent}${e.tag} ${e.vr} ${e.name} = ${print}`; 18 | if (e.sequenceItems) { 19 | return ( 20 | main + 21 | '\n' + 22 | e.sequenceItems.map((sub, index) => { 23 | return ( 24 | `${indent} #${index}\n` + parsedElementsToString(sub, depth + 1) 25 | ); 26 | }) 27 | ); 28 | } else { 29 | return main; 30 | } 31 | }); 32 | return lines.join('\n'); 33 | } 34 | 35 | /** 36 | * DicomContentProvider is responsible for generating a virtual document 37 | * that contains the DICOM tags. 38 | */ 39 | export default class DicomContentProvider 40 | implements vscode.TextDocumentContentProvider { 41 | private async _prepareEncConverter( 42 | charSet: string | undefined 43 | ): Promise { 44 | const defaultEncConverter: EncConverter = buf => buf.toString('latin1'); 45 | if (!charSet) { 46 | // Empty tag means only 7-bit ASCII characters will be used. 47 | return defaultEncConverter; 48 | } 49 | const converter = await createEncConverter(charSet); 50 | if (converter) { 51 | // Found a good converter 52 | return converter; 53 | } 54 | vscode.window.showInformationMessage( 55 | `The character set ${charSet} is not supported. ` + 56 | `Strings may be broken.` 57 | ); 58 | return defaultEncConverter; 59 | } 60 | 61 | public async provideTextDocumentContent(uri: vscode.Uri): Promise { 62 | const config = vscode.workspace.getConfiguration('dicom'); 63 | const additionalDict: DicomDataElements = config.get('dictionary') || {}; 64 | const dictionary = Object.assign({}, standardDataElements, additionalDict); 65 | const showPrivateTags = !!config.get('showPrivateTags'); 66 | 67 | if (!(uri instanceof vscode.Uri)) return ''; 68 | const query = qs.parse(uri.query); 69 | const scheme = query.scheme as string; 70 | const dumpMode = query.mode as string; 71 | 72 | let rootDataSet: parser.DataSet; 73 | const path = uri.fsPath.replace(/\.(dcmdump|json)$/, ''); 74 | const fileUri = uri.with({ scheme, path }); 75 | try { 76 | const fileContent = await vscode.workspace.fs.readFile(fileUri); 77 | // Clone as a 'pure' Uint8Array to avoid encoding issues 78 | const arr = new Uint8Array(fileContent); 79 | rootDataSet = parser.parseDicom(arr); 80 | } catch (e) { 81 | vscode.window.showErrorMessage( 82 | 'Error opening DICOM file. ' + (typeof e === 'string' ? e : e.message) 83 | ); 84 | return ''; 85 | } 86 | 87 | // Prepares a character encoding converter based on Specific Character Set. 88 | const specificCharacterSet = rootDataSet.string('x00080005'); 89 | const encConverter = await this._prepareEncConverter(specificCharacterSet); 90 | 91 | const parsedElements = buildTreeFromDataSet(rootDataSet, { 92 | rootDataSet, 93 | showPrivateTags, 94 | dictionary, 95 | encConverter 96 | }); 97 | 98 | return dumpMode === 'dcmdump' 99 | ? parsedElementsToString(parsedElements, 0) 100 | : JSON.stringify(parsedElements, null, ' '); 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /src/vrDict.ts: -------------------------------------------------------------------------------- 1 | // Based on ftp://dicom.nema.org/MEDICAL/Dicom/2014a/output/chtml/part05/sect_6.2.html 2 | 3 | const dict: { [vr: string]: string | undefined } = { 4 | AE: 5 | '**Application Entity**. A string of characters that identifies an Application Entity with leading and trailing spaces (20H) being non-significant.', 6 | AS: 7 | '**Age String**. A string of characters with one of the following formats -- `nnnD`, `nnnW`, `nnnM`, `nnnY`; where `nnn` shall contain the number of days for `D`, weeks for `W`, months for `M`, or years for `Y`.', 8 | AT: 9 | '**Attribute Tag**. Ordered pair of 16-bit unsigned integers that is the value of a Data Element Tag.', 10 | CS: 11 | '**Code String**. A string of characters with leading or trailing spaces (20H) being non-significant. 16 bytes maximum.', 12 | DA: 13 | '**Date**. A string of characters of the format `YYYYMMDD`; where `YYYY` shall contain year, `MM` shall contain the month, and `DD` shall contain the day, interpreted as a date of the Gregorian calendar system.', 14 | DS: 15 | '**Decimal String**. A string of characters representing either a fixed point number or a floating point number.', 16 | DT: 17 | '**Date Time**. A concatenated date-time character string in the format: `YYYYMMDDHHMMSS.FFFFFF&ZZXX`', 18 | FL: 19 | '**Floating Point Single**. Single precision binary floating point number represented in IEEE 754:1985 32-bit Floating Point Number Format.', 20 | FD: 21 | '**Floating Point Double**. Double precision binary floating point number represented in IEEE 754:1985 64-bit Floating Point Number Format.', 22 | IS: 23 | '**Integer String**. A string of characters representing an Integer in base-10 (decimal).', 24 | LO: 25 | '**Long String**. A character string that may be padded with leading and/or trailing spaces. 64 chars maximum.', 26 | LT: 27 | '**Long Text**. A character string that may contain one or more paragraphs. 10240 chars maximum.', 28 | OB: 29 | '**Other Byte String**. A string of bytes where the encoding of the contents is specified by the negotiated Transfer Syntax.', 30 | OD: 31 | '**Other Double String**. A string of 64-bit IEEE 754:1985 floating point words.', 32 | OF: 33 | '**Other Float String**. A string of 32-bit IEEE 754:1985 floating point words.', 34 | OW: 35 | '**Other Word String**. A string of 16-bit words where the encoding of the contents is specified by the negotiated Transfer Syntax.', 36 | PN: 37 | '**Person Name**. A character string encoded using a 5 component convention. For human use, the five components in their order of occurrence are: family name complex, given name complex, middle name, name prefix, name suffix.', 38 | SH: 39 | '**Short String**. A character string that may be padded with leading and/or trailing spaces. 16 chars maximum.', 40 | SL: 41 | "**Signed Long**. Signed binary integer 32 bits long in 2's complement form.", 42 | SQ: '**Sequence of Items**. Value is a Sequence of zero or more Items.', 43 | SS: 44 | "**Signed Short**. Signed binary integer 16 bits long in 2's complement form.", 45 | ST: 46 | '**Short Text**. A character string that may contain one or more paragraphs. 1024 chars maximum.', 47 | TM: 48 | '**Time**. A string of characters of the format HHMMSS.FFFFFF; where HH contains hours (range "00" - "23"), MM contains minutes (range "00" - "59"), SS contains seconds (range "00" - "60"), and FFFFFF contains a fractional part of a second as small as 1 millionth of a second (range "000000" - "999999").', 49 | UI: 50 | '**Unique Identifier (UID)**. A character string containing a UID that is used to uniquely identify a wide variety of items. The UID is a series of numeric components separated by the period `.` character.', 51 | UL: '**Unsigned Long**. Unsigned binary integer 32 bits long.', 52 | UN: 53 | '**Unknown**. A string of bytes where the encoding of the contents is unknown.', 54 | US: '**Unsigned Short**. Unsigned binary integer 16 bits long.', 55 | UT: 56 | '**Unlimited Text**. A character string that may contain one or more paragraphs.' 57 | }; 58 | 59 | export default dict; 60 | -------------------------------------------------------------------------------- /src/hoverProvider.ts: -------------------------------------------------------------------------------- 1 | import * as vscode from 'vscode'; 2 | 3 | function extractKeyword( 4 | str: string, 5 | position: number, 6 | separator = '\\' 7 | ): { str: string; start: number } { 8 | const after = str.indexOf(separator, position); 9 | const before = str.lastIndexOf(separator, position); 10 | return { 11 | str: str.substring( 12 | before >= 0 ? before + 1 : 0, 13 | after >= 0 ? after : undefined 14 | ), 15 | start: before >= 0 ? before + 1 : 0 16 | }; 17 | } 18 | 19 | function replaceKeywords( 20 | str: string, 21 | keywords: { [key: string]: string } 22 | ): string { 23 | const regex = new RegExp('{(' + Object.keys(keywords).join('|') + ')}', 'g'); 24 | return str.replace(regex, (m, p1) => keywords[p1]); 25 | } 26 | 27 | interface SearchConfig { 28 | title: string; 29 | url: string; 30 | } 31 | 32 | export default class DicomHoverProvider implements vscode.HoverProvider { 33 | public async provideHover( 34 | document: vscode.TextDocument, 35 | position: vscode.Position, 36 | token: vscode.CancellationToken 37 | ): Promise { 38 | const makeHover = (pos: number, length: number, content: string) => 39 | new vscode.Hover( 40 | content, 41 | new vscode.Range( 42 | new vscode.Position(position.line, pos), 43 | new vscode.Position(position.line, pos + length) 44 | ) 45 | ); 46 | 47 | // The dictionary will be loaded lazily to minimize the performance impact 48 | const vrDict = (await import('./vrDict')).default; 49 | const valueDict = (await import('./valueDict')).default; 50 | 51 | // Since we cannot directly get the scope from here, 52 | // we need to reanalyze the line. 53 | const line = document.lineAt(position.line).text; 54 | const match = line.match( 55 | /(\s*)(\([0-9A-F]{4},[0-9A-F]{4}\)) ([A-Z]{2}(\|[A-Z])*)/ 56 | ); 57 | if (!match) return; 58 | const tag = match[2].replace(/\(|\)/g, ''); 59 | 60 | // Hover for Tag (gggg,eeee) 61 | const tagPos = match[1].length; 62 | if ( 63 | position.character >= tagPos && 64 | position.character <= tagPos + match[2].length 65 | ) { 66 | const searches = vscode.workspace 67 | .getConfiguration('dicom') 68 | .get('searches'); 69 | if (!Array.isArray(searches) || !searches.length) return; 70 | const [GGGG, EEEE] = tag.split(','); 71 | const keywords = { 72 | GGGG, 73 | EEEE, 74 | gggg: GGGG.toLowerCase(), 75 | eeee: EEEE.toLowerCase() 76 | }; 77 | 78 | const links = searches.map(search => ({ 79 | title: replaceKeywords(search.title, keywords), 80 | url: replaceKeywords(search.url, keywords) 81 | })); 82 | 83 | return makeHover( 84 | tagPos, 85 | 11, 86 | links 87 | .map(link => `[${link.title}](${encodeURI(link.url)})`) 88 | .join(' \n') 89 | ); 90 | } 91 | 92 | // Hover for VR 93 | const vrPos = match[0].length - match[3].length; 94 | if ( 95 | position.character >= vrPos && 96 | position.character <= vrPos + match[3].length 97 | ) { 98 | // The cursor is hovering on the VR part 99 | const posInVr = position.character - vrPos; 100 | const vr = extractKeyword(match[3], posInVr, '|'); 101 | const hover = vrDict[vr.str] || 'Unknown VR'; 102 | return makeHover(vrPos + vr.start, vr.str.length, hover); 103 | } 104 | 105 | // Hover for tag values after '=' 106 | const valuePos = line.indexOf(' = ') + 3; 107 | if (valuePos > 2 && position.character >= valuePos) { 108 | // The cursor is hovering on the value part 109 | const tagValue = line.substring(valuePos); 110 | const posInTagValue = position.character - valuePos; 111 | const word = extractKeyword(tagValue, posInTagValue); 112 | if (tag in valueDict && word.str in valueDict[tag]) { 113 | const description = valueDict[tag][word.str]; 114 | return makeHover(valuePos + word.start, word.str.length, description); 115 | } 116 | } 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DICOM Dump for Visual Studio Code 2 | 3 | A [Visual Studio Code][vsc] (vscode) extension that dumps [DICOM][dicom] tag contents. DICOM is a standard file format for medical images. 4 | 5 | [vsc]: https://code.visualstudio.com/ 6 | [dicom]: https://www.dicomstandard.org/ 7 | 8 | ## Usage 9 | 10 | ![Screenshot](https://raw.githubusercontent.com/smikitky/vscode-dicom-dump/master/doc/screenshot.png) 11 | 12 | Open a context menu on a DICOM file and select "DICOM: Dump DICOM tags". 13 | 14 | ## Features 15 | 16 | - Dumps all DICOM tags in human-readable format (except binary data). 17 | - Understands value representation (VR) of most standard DICOM tags. 18 | - Provides a hover for some hard-to-remember DICOM keywords and UIDs.
19 | ![Screenshot](https://raw.githubusercontent.com/smikitky/vscode-dicom-dump/master/doc/screenshot-values.png) 20 | - Basic support for character encodings. 21 | - 100% JavaScript. Does not require any external binary dependencies like DCMTK. 22 | 23 | ## Configuration 24 | 25 | - `dicom.alwaysShowMenu` (default = `false`) controls the visibility of the 26 | dump menus. When set to true, the menus will show up regardless of the 27 | file extension. When set to false, the menus will show up when the 28 | extension of the file is `*.dcm` or `*.dicom`. 29 | 30 | - `dicom.showPrivateTags` (default = `false`) controls the 31 | visibility of DICOM private tags. Set this to `true` to dump everything. 32 | Note that many private tags have 'UN' (unknown) VR type, which means 33 | this extension does not know how to stringify them. 34 | 35 | - `dicom.dictionary` (default = `{}`) modifies or adds entries to 36 | the standard DICOM dictionary. Example: 37 | 38 | ```json 39 | { 40 | "dicom.dictionary": { 41 | "01F51247": { "vr": "US", "name": "myPrivateNumericalTag" }, 42 | "01F51248": { "vr": "LO", "name": "myPrivateTextTag", "forceVr": true } 43 | } 44 | } 45 | ``` 46 | 47 | `forceVr: true` will forcibly overwrite the VR type even if 48 | another type is explicitly specified in the DICOM file. 49 | This may allow you to sniff the contents of some private tags. 50 | 51 | - `dicom.searches` (default = `[]`) provides a quick link to your favorite 52 | DICOM search engine when the mouse hovers on a tag string. Example: 53 | 54 | ```json 55 | { 56 | "dicom.searches": [ 57 | { 58 | "title": "Search {GGGG},{EEEE} on Google", 59 | "url": "https://www.google.com/search?q=DICOM%20{GGGG},{EEEE}" 60 | } 61 | ] 62 | } 63 | ``` 64 | 65 | Four keywords (`{GGGG}`, `{gggg}`, `{EEEE}` and `{eeee}`) will be replaced. 66 | 67 | ## Troubleshooting 68 | 69 | **My DICOM file does not load at all!**: Can you open that file with [dicom-parser's online demo][demo]? If not, probably your DICOM file is not standard-compliant, and there is little I can do. Some DICOM implementations are tolerant enough to open mildly broken files. Just because you can view your file with <insert your favorite viewer here> does not mean the file is not corrupted. If you could open the file with the demo above and are still getting an error from this extension, feel free to report as a bug. 70 | 71 | [demo]: https://github.com/cornerstonejs/dicomParser 72 | 73 | **The "Dump DICOM tags" context menu doesn't show up!**: By default, the menus will be displayed only when the file extension is `*.dcm` or `*.dicom`. Please check the `dicom.alwaysShowMenu` option. 74 | 75 | **Patient/institution names are garbled!**: Currently the character encoding support is limited and buggy, and it's partially due to the fact that DICOM uses rare character encodings not supported by iconv-lite. Also note that some DICOM implementations store multibyte strings with a totally wrong encoding (e.g., Japanse SJIS). I'd rather not support all sorts of malformed files "in the wild", but reasonable suggestions and PRs are welcome. 76 | 77 | ## Known Issues / Limitations 78 | 79 | **USE AT YOUR OWN RISK. DO NOT USE THIS FOR CLINICAL PURPOSES.** 80 | 81 | - Cannot display the image (pixel/voxel data) itself. 82 | - It's not possible to modify DICOM files. 83 | 84 | ## Bugs / PRs 85 | 86 | Plase use GitHub's issue system. 87 | 88 | ## Acknowledgement 89 | 90 | This extension is based on the following awesome packages. 91 | 92 | - [dicom-parser][parser] 93 | - [dicom-data-dictionary][dictionary] 94 | 95 | [parser]: https://www.npmjs.com/package/dicom-parser 96 | [dictionary]: https://www.npmjs.com/package/dicom-data-dictionary 97 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "vscode-dicom", 3 | "displayName": "DICOM Tag Dump", 4 | "description": "Shows the list of DICOM tags in the editor.", 5 | "icon": "icon.png", 6 | "version": "1.4.0", 7 | "publisher": "smikitky", 8 | "engines": { 9 | "vscode": "^1.46.0" 10 | }, 11 | "license": "MIT", 12 | "repository": { 13 | "type": "git", 14 | "url": "https://github.com/smikitky/vscode-dicom-dump" 15 | }, 16 | "bugs": { 17 | "url": "https://github.com/smikitky/vscode-dicom-dump/issues" 18 | }, 19 | "categories": [ 20 | "Other" 21 | ], 22 | "activationEvents": [ 23 | "onLanguage:dicom", 24 | "*" 25 | ], 26 | "main": "./out/extension", 27 | "contributes": { 28 | "configuration": [ 29 | { 30 | "type": "object", 31 | "properties": { 32 | "dicom.alwaysShowMenu": { 33 | "description": "When true, shows the DICOM dump menus regardless of the file extension.", 34 | "type": "boolean" 35 | }, 36 | "dicom.searches": { 37 | "description": "Provides a link to your favorite DICOM search engine with the tag name.", 38 | "type": "array", 39 | "items": { 40 | "type": "object", 41 | "properties": { 42 | "title": { 43 | "type": "string", 44 | "description": "Link text. Keywords {GGGG}, {gggg}, {EEEE}, {eeee} will be replaced." 45 | }, 46 | "url": { 47 | "type": "string", 48 | "description": "Link URL. Keywords {GGGG}, {gggg}, {EEEE}, {eeee} will be replaced." 49 | } 50 | }, 51 | "required": [ 52 | "title", 53 | "url" 54 | ] 55 | } 56 | }, 57 | "dicom.showPrivateTags": { 58 | "description": "Controls the visibility of DICOM private tags. Private tags without specific value representation may not render properly.", 59 | "type": "boolean" 60 | }, 61 | "dicom.dictionary": { 62 | "description": "The list of known DICOM tags that will be used along with standard tags. Each key must be an 8-digit uppercase hex string representing the DICOM group/element.", 63 | "type": "object", 64 | "patternProperties": { 65 | "^[0-9A-F]{8}$": { 66 | "type": "object", 67 | "properties": { 68 | "vr": { 69 | "description": "Value representation name, such as 'SL'.", 70 | "type": "string", 71 | "pattern": "^[A-Z]{2}$" 72 | }, 73 | "name": { 74 | "description": "Element name (camelCase recommended).", 75 | "type": "string" 76 | }, 77 | "forceVr": { 78 | "description": "Force this VR type for this element. Use with care.", 79 | "type": "boolean" 80 | } 81 | }, 82 | "required": [ 83 | "vr", 84 | "name" 85 | ] 86 | } 87 | }, 88 | "additionalProperties": false 89 | } 90 | } 91 | } 92 | ], 93 | "commands": [ 94 | { 95 | "command": "dicom.showTags", 96 | "title": "DICOM: Dump DICOM tags" 97 | }, 98 | { 99 | "command": "dicom.dumpAsJson", 100 | "title": "DICOM: Dump DICOM tags as JSON" 101 | } 102 | ], 103 | "menus": { 104 | "explorer/context": [ 105 | { 106 | "command": "dicom.showTags", 107 | "group": "dicom", 108 | "when": "resourceLangId == dicom || config.dicom.alwaysShowMenu" 109 | }, 110 | { 111 | "command": "dicom.dumpAsJson", 112 | "group": "dicom", 113 | "when": "resourceLangId == dicom || config.dicom.alwaysShowMenu" 114 | } 115 | ] 116 | }, 117 | "languages": [ 118 | { 119 | "id": "dicom", 120 | "extensions": [ 121 | ".dcm", 122 | ".dicom" 123 | ], 124 | "aliases": [ 125 | "DICOM", 126 | "dcm" 127 | ] 128 | }, 129 | { 130 | "id": "dicom-dump", 131 | "extensions": [ 132 | ".dcmdump" 133 | ], 134 | "aliases": [ 135 | "DICOM dump" 136 | ] 137 | } 138 | ], 139 | "grammars": [ 140 | { 141 | "language": "dicom-dump", 142 | "scopeName": "text.dicom-dump", 143 | "path": "./syntaxes/dicom-dump.tmLanguage.json" 144 | } 145 | ] 146 | }, 147 | "scripts": { 148 | "vscode:prepublish": "rimraf out && webpack --mode production", 149 | "compile": "webpack --mode development", 150 | "lint": "eslint \"src/**/*.ts\"", 151 | "watch": "webpack --mode development", 152 | "test": "tsc -p ./ && mocha --ui tdd out/test/encConverter.test.js" 153 | }, 154 | "devDependencies": { 155 | "@types/glob": "^7.1.2", 156 | "@types/mocha": "^7.0.2", 157 | "@types/node": "^14.0.13", 158 | "@types/pify": "^3.0.2", 159 | "@types/qs": "^6.9.3", 160 | "@types/vscode": "^1.46.0", 161 | "@typescript-eslint/eslint-plugin": "^3.3.0", 162 | "@typescript-eslint/parser": "^3.3.0", 163 | "eslint": "^7.3.0", 164 | "glob": "^7.1.6", 165 | "mocha": "^8.0.1", 166 | "prettier": "^2.0.5", 167 | "rimraf": "^3.0.2", 168 | "ts-loader": "^7.0.5", 169 | "typescript": "^3.9.5", 170 | "vscode-test": "^1.4.0", 171 | "webpack": "^4.43.0", 172 | "webpack-cli": "^3.3.12" 173 | }, 174 | "prettier": { 175 | "singleQuote": true, 176 | "arrowParens": "avoid", 177 | "trailingComma": "none" 178 | }, 179 | "dependencies": { 180 | "dicom-data-dictionary": "^0.3.1", 181 | "dicom-parser": "^1.8.5", 182 | "iconv-lite": "^0.6.0", 183 | "jconv": "^0.1.5", 184 | "qs": "^6.9.4" 185 | } 186 | } 187 | -------------------------------------------------------------------------------- /src/encConverter.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * EncConverter takes byte arrays from DICOM file and returns an 3 | * ordinary JavaScript Unicode string. 4 | * Target VR types are SH (Short String), LO (Long String), ST (Short Text), 5 | * LT (Long Text), PN (Person Name) and UT (Unlimited Text). 6 | */ 7 | export interface EncConverter { 8 | (input: Buffer, vr: string): string; 9 | } 10 | 11 | type Decoder = (input: Buffer) => string; 12 | 13 | /** 14 | * A higher-order function that returns a new function, 15 | * which in turn returns a Decoder. 16 | * We use HoF because we want to load external modules as late as possible. 17 | * @param encoding 18 | */ 19 | const iconvLite: (encoding: string) => () => Promise = encoding => { 20 | return async () => { 21 | const iconv = await import('iconv-lite'); 22 | return buffer => iconv.decode(buffer, encoding); 23 | }; 24 | }; 25 | 26 | // cf: 27 | // https://github.com/InsightSoftwareConsortium/DCMTK/blob/master/dcmdata/libsrc/dcspchrs.cc#L168 28 | 29 | const encMap: { [key: string]: () => Promise } = { 30 | 'IR 6': async () => b => b.toString('utf8'), // ASCII (but utf8 is compatible) 31 | 'IR 13': iconvLite('sjis'), // Japanese half-width kana (sjis is compatible) 32 | 'IR 87': async () => { 33 | // Japanese JIS kanji 34 | const jconv = await import('jconv'); 35 | return b => { 36 | // HACK: Replace the escape sequence 'ESC ( J' to 'ESC ( B'. 37 | // Both roughly mean "switch to ASCII", 38 | // but jconv currently does not support the former. 39 | const buf = Buffer.from( 40 | b.toString('binary').replace(/\x1b\x28\x4a/g, '\x1b\x28\x42'), 41 | 'binary' 42 | ); 43 | return jconv.decode(buf, 'iso-2022-jp'); 44 | }; 45 | // TODO: Many DICOM files in Japan actually stores kanji in 'SJIS' 46 | // rather than JIS. We might do some guessing here. 47 | }, 48 | 'IR 149': async () => { 49 | // EUC-KR is basically the same as ISO-2022-KR 50 | // except that KS X 1001 is implicitly invoked to G1 without 51 | // the escape sequence 'ESC $ ) C'. So we can simply remove this sequence. 52 | const iconv = await iconvLite('euc-kr')(); 53 | return buffer => { 54 | const replaced = Buffer.from( 55 | buffer.toString('binary').replace(/\x1b\x24\x29\x43/g, ''), 56 | 'binary' 57 | ); 58 | return iconv(replaced); 59 | }; 60 | }, 61 | 'IR 100': iconvLite('iso-8859-1'), // Latin-1 62 | 'IR 101': iconvLite('iso-8859-2'), // Latin-2 63 | 'IR 109': iconvLite('iso-8859-3'), // Latin-3 64 | 'IR 110': iconvLite('iso-8859-4'), // Latin-4 65 | 'IR 144': iconvLite('iso-8859-5'), // Cyrillic 66 | 'IR 127': iconvLite('iso-8859-6'), // Arabic 67 | 'IR 126': iconvLite('iso-8859-7'), // Greek 68 | 'IR 138': iconvLite('iso-8859-8'), // Hebrew 69 | 'IR 148': iconvLite('iso-8859-9'), // Latin-5 70 | 'IR 192': iconvLite('utf-8'), // UTF-8 71 | GB18030: iconvLite('gb18030'), // Chinese 72 | GBK: iconvLite('gbk'), // Chinese 73 | 'IR 166': iconvLite('tis620') // Thai 74 | }; 75 | 76 | const createdDecoders = new Map(); 77 | 78 | /** 79 | * Examines the value of SpecificCharacterSet (0008,0005) and 80 | * creates the corresponding EncConverter. 81 | * @param charSet SpecificCharacterSet string from the DICOM file. 82 | */ 83 | export async function createEncConverter( 84 | charSet: string 85 | ): Promise { 86 | // SpecificCharacterSet may have more than one value, delimited by '\'. 87 | // If the first value is omitted, it becomes default repertoire, i.e., ASCII. 88 | const charSets = charSet.split('\\').map(s => s.trim()); 89 | if (charSets[0] === '') charSets[0] = 'IR 6'; // ASCII 90 | const decoders: EncConverter[] = []; 91 | 92 | // Now asynchronously create decoders that corresponds to each value. 93 | // External modules required for conversion will be lazily-loaded here. 94 | for (const cs of charSets) { 95 | const enc = Object.keys(encMap).find(k => cs.endsWith(k)); 96 | if (!enc) return undefined; 97 | const decoder = createdDecoders.get(enc) || (await encMap[enc]()); 98 | createdDecoders.set(enc, decoder); 99 | decoders.push(decoder); 100 | } 101 | 102 | // Creates the new EncConverter and returns it. 103 | return (buffer: Buffer, vr: string) => { 104 | if (vr !== 'PN') { 105 | return decoders[0](buffer, vr); 106 | } 107 | // If VR is 'PN', we need to separately decode each 108 | // component delimited by '='. 109 | const components = splitPnComponents(buffer.toString('binary')).map(s => 110 | Buffer.from(s, 'binary') 111 | ); 112 | const decodedComponents = components.map((component, index) => { 113 | const decoder = 114 | index < decoders.length 115 | ? decoders[index] 116 | : decoders[charSets.length - 1]; 117 | return decoder(component, vr); 118 | }); 119 | return decodedComponents.join('='); 120 | }; 121 | } 122 | 123 | /** 124 | * Splits a string using the delimiter '=', 125 | * taking escape sequence into consideration. 126 | * https://en.wikipedia.org/wiki/ISO/IEC_2022 127 | */ 128 | export function splitPnComponents(input: string): string[] { 129 | // Note: this is only necessary for Japanese ISO-2022-JP, 130 | // where kanji characters are invoked to the G0 area. 131 | const len = input.length; 132 | const results: string[] = []; 133 | let escaped = false; 134 | let i = 0; 135 | let start = 0; 136 | while (i < len) { 137 | if (!escaped && input[i] === '=') { 138 | results.push(input.substring(start, i)); 139 | start = i + 1; 140 | i++; 141 | continue; 142 | } 143 | const substr = input.substr(i, 3); 144 | if (substr.match(/^\x1b\$(@|B|\(D)/)) { 145 | // Switch to kanji 146 | escaped = true; 147 | i += 3; 148 | } else if (substr.match(/^\x1b\([BJ]/)) { 149 | // Switch to ASCII / JIS X 0201 150 | escaped = false; 151 | i += 3; 152 | } else { 153 | i++; 154 | } 155 | } 156 | results.push(input.substr(start)); 157 | return results; 158 | } 159 | -------------------------------------------------------------------------------- /src/test/encConverter.test.ts: -------------------------------------------------------------------------------- 1 | import * as assert from 'assert'; 2 | import { createEncConverter } from '../encConverter'; 3 | 4 | // Performs encoding checks using DICOM spec sppendix H to K 5 | // http://dicom.nema.org/dicom/2013/output/chtml/part05/PS3.5.html 6 | suite('createEncConverter', function () { 7 | const encodeToBuffer = (str: string) => { 8 | const replaced = str 9 | .replace(/#.*$/gm, '') 10 | .replace(/(\d\d)\/(\d\d)/g, (m, p1, p2) => 11 | String.fromCharCode((parseInt(p1) << 4) + parseInt(p2)) 12 | ) 13 | .replace(/0x([0-9a-f]{2})/g, (m, p) => 14 | String.fromCharCode(parseInt(p, 16)) 15 | ) 16 | .replace(/\s+/g, ''); 17 | return Buffer.from(replaced, 'binary'); 18 | }; 19 | 20 | test('H: Japanese example 1', async function () { 21 | const ec = await createEncConverter('\\ISO 2022 IR 87'); 22 | const buf = encodeToBuffer(` 23 | 05/09 06/01 06/13 06/01 06/04 06/01 # Yamada 24 | 05/14 # ^ 25 | 05/04 06/01 07/02 06/15 07/05 # Tarou 26 | 03/13 # = 27 | 01/11 02/04 04/02 # ESC $ B 28 | 03/11 03/03 04/05 04/04 # 山田 29 | 01/11 02/08 04/02 # ESC ( B 30 | 05/14 # ^ 31 | 01/11 02/04 04/02 # ESC $ B 32 | 04/02 04/00 04/15 03/10 # 太郎 33 | 01/11 02/08 04/02 # ESC ( B 34 | 03/13 # = 35 | 01/11 02/04 04/02 # ESC $ B 36 | 02/04 06/04 02/04 05/14 02/04 04/00 # やまだ 37 | 01/11 02/08 04/02 # ESC ( B 38 | 05/14 # ^ 39 | 01/11 02/04 04/02 # ESC $ B 40 | 02/04 03/15 02/04 06/13 02/04 02/06 # たろう 41 | 01/11 02/08 04/02 # ESC ( B 42 | `); 43 | const result = ec!(buf, 'PN'); 44 | assert.equal(result, 'Yamada^Tarou=山田^太郎=やまだ^たろう'); 45 | }); 46 | 47 | test('H: Japanese example 2', async function () { 48 | const ec = await createEncConverter('ISO 2022 IR 13\\ISO 2022 IR 87'); 49 | const buf = encodeToBuffer(` 50 | 13/04 12/15 12/00 13/14 # ヤマダ 51 | 05/14 # ^ 52 | 12/00 13/11 11/03 # タロウ 53 | 03/13 # = 54 | 01/11 02/04 04/02 # ESC $ B 55 | 03/11 03/03 04/05 04/04 # 山田 56 | 01/11 02/08 04/10 # ESC ( J 57 | 05/14 # ^ 58 | 01/11 02/04 04/02 # ESC $ B 59 | 04/02 04/00 04/15 03/10 # 太郎 60 | 01/11 02/08 04/10 # ESC ( J 61 | 03/13 # = 62 | 01/11 02/04 04/02 # ESC $ B 63 | 02/04 06/04 02/04 05/14 02/04 04/00 # やまだ 64 | 01/11 02/08 04/10 # ESC ( J 65 | 05/14 # ^ 66 | 01/11 02/04 04/02 # ESC $ B 67 | 02/04 03/15 02/04 06/13 02/04 02/06 # たろう 68 | 01/11 02/08 04/10 # ESC ( J 69 | `); 70 | const result = ec!(buf, 'PN'); 71 | assert.equal(result, 'ヤマダ^タロウ=山田^太郎=やまだ^たろう'); 72 | }); 73 | 74 | test('Japanese, 0x3d in leading byte', async function () { 75 | // This is tricky because the kanji 秋 contains 0x3d as the leading byte. 76 | // Taken from JIRA's DICOM test suite 77 | // http://www.jira-net.or.jp/dicom/dicom_data_01_02.html 78 | // File: CR_JPG_IR87a.dcm 79 | const ec = await createEncConverter('\\ISO 2022 IR 87'); 80 | const buf = encodeToBuffer(` 81 | 0x41 0x4b 0x49 0x48 0x41 0x42 0x41 0x52 0x41 # AKIHABARA 82 | 0x5e # ^ 83 | 0x54 0x41 0x52 0x4f # TARO 84 | 0x3d # = 85 | 0x1b 0x24 0x42 # ESC $ B 86 | 0x3d 0x29 0x4d 0x55 0x38 0x36 # 秋葉原 87 | 0x1b 0x28 0x42 # ESC ( B 88 | 0x5e # ^ 89 | 0x1b 0x24 0x42 # ESC $ B 90 | 0x42 0x40 0x4f 0x3a # 太郎 91 | 0x1b 0x28 0x42 # ESC ( B 92 | 0x3d # = 93 | 0x1b 0x24 0x42 # ESC $ B 94 | 0x24 0x22 0x24 0x2d 0x24 0x4f 0x24 0x50 0x24 0x69 # あきはばら 95 | 0x1b 0x28 0x42 # ESC ( B 96 | 0x5e # ^ 97 | 0x1b 0x24 0x42 # ESC $ B 98 | 0x24 0x3f 0x24 0x6d 0x24 0x26 # たろう 99 | 0x1b 0x28 0x42 # ESC ( B 100 | `); 101 | const result = ec!(buf, 'PN'); 102 | assert.equal(result, 'AKIHABARA^TARO=秋葉原^太郎=あきはばら^たろう'); 103 | }); 104 | 105 | test('I: Korean', async function () { 106 | const ec = await createEncConverter('\\ISO 2022 IR 149'); 107 | const buf = encodeToBuffer(` 108 | 04/08 06/15 06/14 06/07 # Hong 109 | 05/14 # ^ 110 | 04/07 06/09 06/12 06/04 06/15 06/14 06/07 # Gildong 111 | 03/13 # = 112 | 01/11 02/04 02/09 04/03 # ESC $ ) C 113 | 15/11 15/03 # 洪 114 | 05/14 # ^ 115 | 01/11 02/04 02/09 04/03 # ESC $ ) C 116 | 13/01 12/14 13/04 13/07 # 吉洞 117 | 03/13 # = 118 | 01/11 02/04 02/09 04/03 # ESC $ ) C 119 | 12/08 10/11 # 홍 120 | 05/14 # ^ 121 | 01/11 02/04 02/09 04/03 # ESC $ ) C 122 | 11/01 14/06 11/05 11/15 # 길동 123 | `); 124 | const result = ec!(buf, 'PN'); 125 | assert.equal(result, 'Hong^Gildong=洪^吉洞=홍^길동'); 126 | }); 127 | 128 | test('J: Chinese with UTF8', async function () { 129 | const ec = await createEncConverter('ISO_IR 192'); 130 | const buf = encodeToBuffer(` 131 | 0x57 0x61 0x6e 0x67 # Wang 132 | 0x5e # ^ 133 | 0x58 0x69 0x61 0x6f 0x44 0x6f 0x6e 0x67 # XiaoDong 134 | 0x3d # = 135 | 0xe7 0x8e 0x8b # 王 136 | 0x5e # ^ 137 | 0xe5 0xb0 0x8f 0xe6 0x9d 0xb1 # 小東 138 | 0x3d # = 139 | `); 140 | const result = ec!(buf, 'PN'); 141 | assert.equal(result, 'Wang^XiaoDong=王^小東='); 142 | }); 143 | 144 | test('J: Chinese with GB18030', async function () { 145 | const ec = await createEncConverter('GB18030'); 146 | const buf = encodeToBuffer(` 147 | 0x57 0x61 0x6e 0x67 # Wang 148 | 0x5e # ^ 149 | 0x58 0x69 0x61 0x6f 0x44 0x6f 0x6e 0x67 # XiaoDong 150 | 0x3d # = 151 | 0xcd 0xf5 # 王 152 | 0x5e # ^ 153 | 0xd0 0xa1 0xb6 0xab # 小东 154 | 0x3d # = 155 | `); 156 | const result = ec!(buf, 'PN'); 157 | assert.equal(result, 'Wang^XiaoDong=王^小东='); 158 | }); 159 | }); 160 | -------------------------------------------------------------------------------- /src/extractor.ts: -------------------------------------------------------------------------------- 1 | import { DataSet } from 'dicom-parser'; 2 | import { DicomDataElements, TagInfo } from 'dicom-data-dictionary'; 3 | import { EncConverter } from './encConverter'; 4 | 5 | export interface ParsedElement { 6 | tag: string; // like '(0008,0060)' 7 | vr: string; // like 'CS' 8 | name: string; // like 'modality' 9 | desc?: string; // like 'binary data of length: 2' 10 | text?: string; // like 'MR' 11 | sequenceItems?: ParsedElement[][]; // Only used for 'SQ' element 12 | } 13 | 14 | /** 15 | * Converts tag key into more familiar format like `(0008,0060)` 16 | * @param tag dicom-parser's tag string like 'x00080060' 17 | */ 18 | function formatTag(tag: string): string { 19 | const group = tag.substring(1, 5).toUpperCase(); 20 | const element = tag.substring(5, 9).toUpperCase(); 21 | return `(${group},${element})`; 22 | } 23 | 24 | function numberListToText( 25 | dataSet: DataSet, 26 | key: string, 27 | accessor: string, 28 | valueBytes: number 29 | ): { desc?: string; text?: string } { 30 | // Each numerical value field may contain more than one number value 31 | // due to the value multiplicity (VM) mechanism. 32 | const numElements = dataSet.elements[key].length / valueBytes; 33 | if (!numElements) return { desc: 'empty value' }; 34 | const numbers: number[] = []; 35 | for (let i = 0; i < numElements; i++) { 36 | numbers.push((dataSet)[accessor](key, i) as number); 37 | } 38 | return { text: numbers.join('\\') }; 39 | } 40 | 41 | function elementToText( 42 | dataSet: DataSet, 43 | key: string, 44 | vr: string, 45 | rootDataSet: DataSet, 46 | encConverter: EncConverter 47 | ): { desc?: string; text?: string } { 48 | const element = dataSet.elements[key]; 49 | 50 | if (vr.indexOf('|') >= 0) { 51 | // This means the true VR type depends on other DICOM element. 52 | const vrs = vr.split('|'); 53 | if (vrs.every(v => ['OB', 'OW', 'OD', 'OF'].indexOf(v) >= 0)) { 54 | // This is a binary data, anyway, so treat it as such 55 | return elementToText(dataSet, key, 'OB', rootDataSet, encConverter); 56 | } else if (vrs.every(v => ['US', 'SS'].indexOf(v) >= 0)) { 57 | const pixelRepresentation = rootDataSet.uint16('x00280103'); 58 | switch (pixelRepresentation) { 59 | case 0: 60 | return elementToText(dataSet, key, 'US', rootDataSet, encConverter); 61 | case 1: 62 | return elementToText(dataSet, key, 'SS', rootDataSet, encConverter); 63 | default: 64 | return { desc: 'error: could not determine pixel representation' }; 65 | } 66 | } else { 67 | return { desc: 'error: could not guess VR of this tag' }; 68 | } 69 | } 70 | 71 | const asHexDump = () => { 72 | const bin = Buffer.from( 73 | dataSet.byteArray.buffer, 74 | element.dataOffset, 75 | element.length 76 | ); 77 | return `bin: 0x${bin.toString('hex')}`; 78 | }; 79 | 80 | switch (vr) { 81 | case 'OB': // Other Byte String 82 | case 'OW': // Other Word String 83 | case 'OD': // Other Double String 84 | case 'OF': // Other Float String 85 | case '??': // VR not provided at all. Should not happen. 86 | return element.length <= 16 87 | ? { desc: asHexDump() } 88 | : { desc: `binary data of length: ${element.length}` }; 89 | case 'SQ': { 90 | if (Array.isArray(element.items)) { 91 | const len = element.items.length; 92 | return { desc: `sequence of ${len} item${len !== 1 ? 's' : ''}` }; 93 | } else return { desc: 'error: broken sequence' }; // should not happen 94 | } 95 | case 'AT': { 96 | // Attribute Tag 97 | const group = dataSet.uint16(key, 0) as number; 98 | const groupHexStr = ('0000' + group.toString(16)).substr(-4); 99 | const element = dataSet.uint16(key, 1) as number; 100 | const elementHexStr = ('0000' + element.toString(16)).substr(-4); 101 | return { text: '0x' + groupHexStr + elementHexStr }; 102 | } 103 | case 'FL': 104 | return numberListToText(dataSet, key, 'float', 4); 105 | case 'FD': 106 | return numberListToText(dataSet, key, 'double', 8); 107 | case 'UL': 108 | return numberListToText(dataSet, key, 'uint32', 4); 109 | case 'SL': 110 | return numberListToText(dataSet, key, 'int32', 4); 111 | case 'US': 112 | return numberListToText(dataSet, key, 'uint16', 2); 113 | case 'SS': 114 | return numberListToText(dataSet, key, 'int16', 2); 115 | case 'UN': { 116 | // "Unknown" VR. We do not know how to stringify this value, 117 | // but tries to interpret as an ASCII string. 118 | const str = dataSet.string(key); 119 | const isAscii = typeof str === 'string' && /^[\x20-\x7E]+$/.test(str); 120 | if (isAscii) return { text: str }; 121 | return element.length <= 16 122 | ? { desc: asHexDump() } 123 | : { desc: `seemengly binary data (UN) of length: ${element.length}` }; 124 | } 125 | case 'SH': 126 | case 'LO': 127 | case 'ST': 128 | case 'LT': 129 | case 'PN': 130 | case 'UT': { 131 | // These are subject to Specific Character Set (0008,0005) 132 | const bin = Buffer.from( 133 | dataSet.byteArray.buffer, 134 | element.dataOffset, 135 | element.length 136 | ); 137 | return { text: encConverter(bin, vr) }; 138 | } 139 | default: { 140 | // Other string VRs which use ASCII chars, such as DT 141 | const text = dataSet.string(key); 142 | if (typeof text === 'undefined') return { desc: 'undefined' }; 143 | if (!text.length) return { desc: 'empty string' }; 144 | return { text }; 145 | } 146 | } 147 | } 148 | 149 | function findTagInfo( 150 | dictionary: DicomDataElements, 151 | tag: string 152 | ): (TagInfo & { forceVr?: string }) | undefined { 153 | const key = tag.substring(1, 9).toUpperCase(); 154 | if (key in dictionary) return dictionary[key]; 155 | if (/0000$/.test(key)) { 156 | // (gggg,0000) is a _retired_ Group Length tag 157 | // http://dicom.nema.org/dicom/2013/output/chtml/part05/sect_7.2.html 158 | return { name: 'GroupLength', vr: 'UL' }; 159 | } 160 | return undefined; 161 | } 162 | 163 | /** 164 | * Iterates of DICOM dataSet from dicom-parser and creates a 165 | * human-readable tree, which then can be transformed into a text document. 166 | * @param dataSet The raw results from dicom-parser. 167 | * @param deps Various options and dependencies. 168 | */ 169 | export function buildTreeFromDataSet( 170 | dataSet: DataSet, 171 | deps: { 172 | rootDataSet: DataSet; 173 | showPrivateTags: boolean; 174 | dictionary: DicomDataElements; 175 | encConverter: EncConverter; 176 | } 177 | ): ParsedElement[] { 178 | const { rootDataSet, showPrivateTags, dictionary, encConverter } = deps; 179 | const entries: ParsedElement[] = []; 180 | const keys = Object.keys(dataSet.elements).sort(); 181 | for (const key of keys) { 182 | const element = dataSet.elements[key]; 183 | 184 | // A tag is private if the group number is odd 185 | const isPrivateTag = /[13579bdf]/i.test(element.tag[4]); 186 | if (isPrivateTag && !showPrivateTags) continue; 187 | 188 | // "Item delimitation" tag in a sequence 189 | if (key === 'xfffee00d') continue; 190 | 191 | const tagInfo = findTagInfo(dictionary, element.tag); 192 | const vr: string = 193 | (tagInfo && tagInfo.forceVr && tagInfo.vr) || 194 | element.vr || 195 | (tagInfo ? tagInfo.vr : '??'); 196 | 197 | const textOrDesc = elementToText( 198 | dataSet, 199 | key, 200 | vr, 201 | rootDataSet, 202 | encConverter 203 | ); 204 | entries.push({ 205 | tag: formatTag(element.tag), 206 | name: tagInfo ? tagInfo.name : '?', 207 | vr, 208 | ...textOrDesc, 209 | sequenceItems: Array.isArray(element.items) 210 | ? element.items.map(item => buildTreeFromDataSet(item.dataSet, deps)) 211 | : undefined 212 | }); 213 | } 214 | return entries; 215 | } 216 | -------------------------------------------------------------------------------- /src/valueDict.ts: -------------------------------------------------------------------------------- 1 | interface ValueDict { 2 | [tag: string]: { 3 | [tagValue: string]: string; 4 | }; 5 | } 6 | 7 | const dict: ValueDict = { 8 | '0002,0010': { 9 | // Transfer Syntax UID 10 | '1.2.840.10008.1.2': 'Implicit VR Little Endian', 11 | '1.2.840.10008.1.2.1': 'Explicit VR Little Endian', 12 | '1.2.840.10008.1.2.1.99': 'Deflated Explicit VR Little Endian', 13 | '1.2.840.10008.1.2.2': 'Explicit VR Big Endian', 14 | '1.2.840.10008.1.2.4.50': 'JPEG Baseline (Process 1):', 15 | '1.2.840.10008.1.2.4.51': 'JPEG Baseline (Processes 2 & 4):', 16 | '1.2.840.10008.1.2.4.52': 'JPEG Extended (Processes 3 & 5)', 17 | '1.2.840.10008.1.2.4.53': 18 | 'JPEG Spectral Selection, Nonhierarchical (Processes 6 & 8)', 19 | '1.2.840.10008.1.2.4.54': 20 | 'JPEG Spectral Selection, Nonhierarchical (Processes 7 & 9)', 21 | '1.2.840.10008.1.2.4.55': 22 | 'JPEG Full Progression, Nonhierarchical (Processes 10 & 12)', 23 | '1.2.840.10008.1.2.4.56': 24 | 'JPEG Full Progression, Nonhierarchical (Processes 11 & 13)', 25 | '1.2.840.10008.1.2.4.57': 'JPEG Lossless, Nonhierarchical (Processes 14)', 26 | '1.2.840.10008.1.2.4.58': 'JPEG Lossless, Nonhierarchical (Processes 15)', 27 | '1.2.840.10008.1.2.4.59': 'JPEG Extended, Hierarchical (Processes 16 & 18)', 28 | '1.2.840.10008.1.2.4.60': 'JPEG Extended, Hierarchical (Processes 17 & 19)', 29 | '1.2.840.10008.1.2.4.61': 30 | 'JPEG Spectral Selection, Hierarchical (Processes 20 & 22)', 31 | '1.2.840.10008.1.2.4.62': 32 | 'JPEG Spectral Selection, Hierarchical (Processes 21 & 23)', 33 | '1.2.840.10008.1.2.4.63': 34 | 'JPEG Full Progression, Hierarchical (Processes 24 & 26)', 35 | '1.2.840.10008.1.2.4.64': 36 | 'JPEG Full Progression, Hierarchical (Processes 25 & 27)', 37 | '1.2.840.10008.1.2.4.65': 'JPEG Lossless, Nonhierarchical (Process 28)', 38 | '1.2.840.10008.1.2.4.66': 'JPEG Lossless, Nonhierarchical (Process 29)', 39 | '1.2.840.10008.1.2.4.70': 40 | 'JPEG Lossless, Nonhierarchical, First-Order Prediction', 41 | '1.2.840.10008.1.2.4.80': 'JPEG-LS Lossless Image Compression', 42 | '1.2.840.10008.1.2.4.81': 43 | 'JPEG-LS Lossy (Near- Lossless) Image Compression', 44 | '1.2.840.10008.1.2.4.90': 'JPEG 2000 Image Compression (Lossless Only)', 45 | '1.2.840.10008.1.2.4.91': 'JPEG 2000 Image Compression', 46 | '1.2.840.10008.1.2.4.92': 47 | 'JPEG 2000 Part 2 Multicomponent Image Compression (Lossless Only)', 48 | '1.2.840.10008.1.2.4.93': 49 | 'JPEG 2000 Part 2 Multicomponent Image Compression', 50 | '1.2.840.10008.1.2.4.94': 'JPIP Referenced', 51 | '1.2.840.10008.1.2.4.95': 'JPIP Referenced Deflate', 52 | '1.2.840.10008.1.2.5': 'RLE Lossless', 53 | '1.2.840.10008.1.2.6.1': 'RFC 2557 MIME Encapsulation', 54 | '1.2.840.10008.1.2.4.100': 'MPEG2 Main Profile Main Level', 55 | '1.2.840.10008.1.2.4.102': 'MPEG-4 AVC/H.264 High Profile / Level 4.1', 56 | '1.2.840.10008.1.2.4.103': 57 | 'MPEG-4 AVC/H.264 BD-compatible High Profile / Level 4.1' 58 | }, 59 | '0008,0005': { 60 | // Specific Character Set 61 | 'ISO_IR 100': 'Latin alphabet No. 1', 62 | 'ISO_IR 101': 'Latin alphabet No. 2', 63 | 'ISO_IR 109': 'Latin alphabet No. 3', 64 | 'ISO_IR 110': 'Latin alphabet No. 4', 65 | 'ISO_IR 144': 'Cyrillic', 66 | 'ISO_IR 127': 'Arabic', 67 | 'ISO_IR 126': 'Greek', 68 | 'ISO_IR 138': 'Hebrew', 69 | 'ISO_IR 148': 'Latin alphabet No. 5', 70 | 'ISO_IR 13': 'Japanese (half-width katakana)', 71 | 'ISO_IR 166': 'Thai', 72 | 'ISO_IR 192': 'Unicode in UTF-8', 73 | 'ISO 2022 IR 6': 'Default repertoire', 74 | 'ISO 2022 IR 100': 'Latin alphabet No. 1', 75 | 'ISO 2022 IR 101': 'Latin alphabet No. 2', 76 | 'ISO 2022 IR 109': 'Latin alphabet No. 3', 77 | 'ISO 2022 IR 110': 'Latin alphabet No. 4', 78 | 'ISO 2022 IR 144': 'Cyrillic', 79 | 'ISO 2022 IR 127': 'Arabic', 80 | 'ISO 2022 IR 126': 'Greek', 81 | 'ISO 2022 IR 138': 'Hebrew', 82 | 'ISO 2022 IR 148': 'Latin alphabet No. 5', 83 | 'ISO 2022 IR 13': 'Japanese (half-width katakana)', 84 | 'ISO 2022 IR 166': 'Thai', 85 | 'ISO 2022 IR 87': 'Japanese (kanji)', 86 | 'ISO 2022 IR 159': 'Japanese (supplementary kanji)', 87 | 'ISO 2022 IR 149': 'Korean', 88 | GB18030: 'Chinese GB18030', 89 | GBK: 'Chinese GBK' 90 | }, 91 | '0008,0060': { 92 | // Modality 93 | AR: 'Autorefraction', 94 | AU: 'Audio', 95 | BDUS: 'Bone Densitometry (ultrasound)', 96 | BI: 'Biomagnetic imaging', 97 | BMD: 'Bone Densitometry (X-Ray)', 98 | CR: 'Computed Radiography', 99 | CT: 'Computed Tomography', 100 | DG: 'Diaphanography', 101 | DOC: 'Document', 102 | DX: 'Digital Radiography', 103 | ECG: 'Electrocardiography', 104 | EPS: 'Cardiac Electrophysiology', 105 | ES: 'Endoscopy', 106 | FID: 'Fiducials', 107 | GM: 'General Microscopy', 108 | HC: 'Hard Copy', 109 | HD: 'Hemodynamic Waveform', 110 | IO: 'Intra-Oral Radiography', 111 | IOL: 'Intraocular Lens Data', 112 | IVOCT: 'Intravascular Optical Coherence Tomography', 113 | IVUS: 'Intravascular Ultrasound', 114 | KER: 'Keratometry', 115 | KO: 'Key Object Selection', 116 | LEN: 'Lensometry', 117 | LS: 'Laser surface scan', 118 | MG: 'Mammography', 119 | MR: 'Magnetic Resonance', 120 | NM: 'Nuclear Medicine', 121 | OAM: 'Ophthalmic Axial Measurements', 122 | OCT: 'Optical Coherence Tomography (non-Ophthalmic)', 123 | OPM: 'Ophthalmic Mapping', 124 | OP: 'Ophthalmic Photography', 125 | OPT: 'Ophthalmic Tomography', 126 | OPV: 'Ophthalmic Visual Field', 127 | OSS: 'Optical Surface Scan', 128 | OT: 'Other', 129 | PLAN: 'Plan', 130 | PR: 'Presentation State', 131 | PT: 'Positron emission tomography (PET)', 132 | PX: 'Panoramic X-Ray', 133 | REG: 'Registration', 134 | RESP: 'Respiratory Waveform', 135 | RF: 'Radio Fluoroscopy', 136 | RG: 'Radiographic imaging (conventional film/screen)', 137 | RTDOSE: 'Radiotherapy Dose', 138 | RTIMAGE: 'Radiotherapy Image', 139 | RTPLAN: 'Radiotherapy Plan', 140 | RTRECORD: 'RT Treatment Record', 141 | RTSTRUCT: 'Radiotherapy Structure Set', 142 | RWV: 'Real World Value Map', 143 | SEG: 'Segmentation', 144 | SMR: 'Stereometric Relationship', 145 | SM: 'Slide Microscopy', 146 | SRF: 'Subjective Refraction', 147 | SR: 'SR Document', 148 | STAIN: 'Automated Slide Stainer', 149 | TG: 'Thermography', 150 | US: 'Ultrasound', 151 | VA: 'Visual Acuity', 152 | XA: 'X-Ray Angiography', 153 | XC: 'External-camera Photography' 154 | }, 155 | '0008,0064': { 156 | // Conversion Type 157 | DV: 'Digitized Video', 158 | DI: 'Digital Interface', 159 | DF: 'Digitized Film', 160 | WSD: 'Workstation', 161 | SD: 'Scanned Document', 162 | SI: 'Scanned Image', 163 | DRW: 'Drawing', 164 | SYN: 'Synthetic Image' 165 | }, 166 | '0018,0020': { 167 | // Scanning Sequence 168 | SE: 'Spin Echo', 169 | IR: 'Inversion Recovery', 170 | GR: 'Gradient Recalled', 171 | EP: 'Echo Planar', 172 | RM: 'Research Mode' 173 | }, 174 | '0018,0021': { 175 | // Sequence Variant 176 | SK: 'Segmented k-space', 177 | MTC: 'Magnetization transfer contrast', 178 | SS: 'Steady state', 179 | TRSS: 'Time reversed steady state', 180 | SP: 'Spoiled', 181 | MP: 'MAG prepared', 182 | OSP: 'Oversampling phase', 183 | NONE: 'No sequence variant' 184 | }, 185 | '0018,0022': { 186 | // Scan Options 187 | PER: 'Phase Encode Reordering', 188 | RG: 'Respiratory Gating', 189 | CG: 'Cardiac Gating', 190 | PPG: 'Peripheral Pulse Gating', 191 | FC: 'Flow Compensation', 192 | PFF: 'Partial Fourier - Frequency', 193 | PFP: 'Partial Fourier - Phase', 194 | SP: 'Spatial Presaturation', 195 | FS: 'Fat Saturation' 196 | }, 197 | '0018,0071': { 198 | // Acquisition Termination Condition 199 | CNTS: 'Counts', 200 | DENS: 'Density', 201 | MANU: 'Manual', 202 | OVFL: 'Data overflow', 203 | TIME: 'Time', 204 | TRIG: 'Physiological trigger' 205 | }, 206 | '0018,1140': { 207 | // Rotation Direction 208 | CW: 'Clockwise', 209 | CC: 'Counter clockwise' 210 | }, 211 | '0018,1155': { 212 | // Radiation Setting 213 | SC: 'Low dose exposure generally corresponding to fluoroscopic settings', 214 | GR: 'High dose for diagnostic quality image acquisition' 215 | }, 216 | '0018,1181': { 217 | // Collimator Type 218 | PARA: 'Parallel (default)', 219 | PINH: 'Pinhole', 220 | FANB: 'Fan-beam', 221 | CONE: 'Cone-beam', 222 | SLNT: 'Slant hole', 223 | ASTG: 'Astigmatic', 224 | DIVG: 'Diverging', 225 | NONE: 'No collimator', 226 | UNKN: 'Unknown' 227 | }, 228 | '0018,1301': { 229 | // Whole Body Technique 230 | '1PS': 'One pass', 231 | '2PS': 'Two pass', 232 | PCN: 'Patient contour following employed', 233 | MSP: 'Multiple static frames collected into a whole body frame' 234 | }, 235 | '0018,5100': { 236 | // Patient Position 237 | HFP: 'Head First-Prone', 238 | HFS: 'Head First-Supine', 239 | HFDR: 'Head First-Decubitus Right', 240 | HFDL: 'Head First-Decubitus Left', 241 | FFDR: 'Feet First-Decubitus Right', 242 | FFDL: 'Feet First-Decubitus Left', 243 | FFP: 'Feet First-Prone', 244 | FFS: 'Feet First-Supine' 245 | }, 246 | '0018,5101': { 247 | // View Position 248 | AP: 'Anterior/Posterior', 249 | PA: 'Posterior/Anterior', 250 | LL: 'Left Lateral', 251 | RL: 'Right Lateral', 252 | RLD: 'Right Lateral Decubitus', 253 | LLD: 'Left Lateral Decubitus', 254 | RLO: 'Right Lateral Oblique', 255 | LLO: 'Left Lateral Oblique' 256 | }, 257 | '0018,9372': { 258 | // Multi-energy Detector Type 259 | INTEGRATING: 'Physical detector integrates the full X-Ray spectrum.', 260 | MULTILAYER: 261 | 'Physical detector layers absorb different parts of the X-Ray spectrum.', 262 | PHOTON_COUNTING: 263 | 'Physical detector counts photons with energy discrimination capability.' 264 | }, 265 | '0028,0004': { 266 | // Photometric Interpretation 267 | MONOCHROME1: 268 | 'Pixel data represent a single monochrome image plane. The minimum sample value is intended to be displayed as white after any VOI gray scale transformations have been performed.', 269 | MONOCHROME2: 270 | 'Pixel data represent a single monochrome image plane. The minimum sample value is intended to be displayed as black after any VOI gray scale transformations have been performed.', 271 | 'PALETTE COLOR': 272 | 'Pixel data describe a color image with a single sample per pixel (single image plane). The pixel value is used as an index into each of the Red, Blue, and Green Palette Color Lookup Tables.', 273 | RGB: 274 | 'Pixel data represent a color image described by red, green, and blue image planes. The minimum sample value for each color plane represents minimum intensity of the color.', 275 | YBR_FULL: 276 | 'Pixel data represent a color image described by one luminance (Y) and two chrominance planes (CB and CR).', 277 | YBR_FULL_422: 278 | 'Pixel data represent a color image described by one luminance (Y) and two chrominance planes (CB and CR).', 279 | YBR_PARTIAL_422: 280 | 'Pixel data represent a color image described by one luminance (Y) and two chrominance planes (CB and CR).', 281 | YBR_PARTIAL_420: 282 | 'Pixel data represent a color image described by one luminance (Y) and two chrominance planes (CB and CR).', 283 | YBR_ICT: 284 | 'Irreversible Color Transformation: Pixel data represent a color image described by one luminance (Y) and two chrominance planes (CB and CR).', 285 | YBR_RCT: 286 | 'Reversible Color Transformation: Pixel data represent a color image described by one luminance (Y) and two chrominance planes (CB and CR).' 287 | }, 288 | '0028,0006': { 289 | // Planar Configuration 290 | '0': 291 | 'The sample values for the first pixel are followed by the sample values for the second pixel, etc. For RGB images, this means the order of the pixel values sent shall be R1, G1, B1, R2, G2, B2, …, etc.', 292 | '1': 293 | 'Each color plane shall be sent contiguously. For RGB images, this means the order of the pixel values sent is R1, R2, R3, …, G1, G2, G3, …, B1, B2, B3, etc.' 294 | }, 295 | '0028,0014': { 296 | // Ultrasound Color Data Present 297 | '00': 'Ultrasound color data not present in image', 298 | '01': 'Ultrasound color data is present in image' 299 | }, 300 | '0028,0051': { 301 | // Corrected Image 302 | UNIF: 'Flood corrected', 303 | COR: 'Center of rotation corrected', 304 | NCO: 'Non-circular orbit corrected', 305 | DECY: 'Decay corrected', 306 | ATTN: 'Attenuation corrected', 307 | SCAT: 'Scatter corrected', 308 | DTIM: 'Dead time corrected', 309 | NRGY: 'Energy corrected', 310 | LIN: 'Linearity corrected', 311 | MOTN: 'Motion corrected', 312 | CLN: 313 | 'Count loss normalization; Any type of normalization applied to correct for count loss in Time Slots.' 314 | }, 315 | '0028,0103': { 316 | // Pixel Representation 317 | '0': 'Pixel samples are represented in unsigned integer.', 318 | '1': "Pixel samples are represnted in 2's complement (signed integer)." 319 | }, 320 | '0028,1040': { 321 | // Pixel Intensity Relationship 322 | LIN: 'Pixel samples are linearly proportional to X-Ray beam intensity.', 323 | LOG: 324 | 'Pixel samples are logarithmically proportional to X-Ray beam intensity.' 325 | }, 326 | '0028,2110': { 327 | // Lossy Image Compression 328 | '00': 'Image has NOT been subjected to lossy compression.', 329 | '01': 'Image has been subjected to lossy compression.' 330 | } 331 | }; 332 | 333 | export default dict; 334 | --------------------------------------------------------------------------------